code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''my_env'': conda)'
# language: python
# name: python391jvsc74a57bd0ab88f8d8cbbcde50b6847df9cf75f0481bc134e870de959eb908027e13484796
# ---
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local[1]") \
.appName('PySparkExampleDemo') \
.getOrCreate()
print("First SparkContext:");
print("APP Name :"+spark.sparkContext.appName);
print("Master :"+spark.sparkContext.master);
spark.stop()
spark2 = SparkSession.builder.master("local[1]") \
.appName("PySparkExampleDemo2") \
.getOrCreate();
print("Second SparkContext:")
print("APP Name :"+spark2.sparkContext.appName);
print("Master :"+spark2.sparkContext.master);
# spark.stop()
spark3 = SparkSession.newSession(spark2)
print("Third SparkContext:")
print("APP Name :"+spark3.sparkContext.appName);
print("Master :"+spark3.sparkContext.master);
| pyspark/.ipynb_checkpoints/spark_session-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### This notebook is Final submission for the hackathon
#
# https://www.machinehack.com/hackathons/predicting_the_costs_of_used_cars_hackathon_by_imarticus_learning/overview
#
import lightgbm as lgbm
import pandas as pd
import numpy as np
train_df = pd.read_csv('train_somewhat_preprocessed_2.csv')
test_df = pd.read_csv('test_somewhat_preprocessed_2.csv')
train_df.shape
test_df.shape
from sklearn import preprocessing
data = pd.concat([train_df.drop(['Price'],axis=1),test_df],ignore_index=True)
lable_encoder = preprocessing.LabelEncoder()
lable_encoder.fit(data['Brand'])
data.loc[data['Name'].str.contains('turbo|Turbo|TURBO|Eco')]
car_badge = ['DX','CE','LE','DLS','DLE','GLS','GLA','GLE','GL ','GT','LS','LT','LTD','LTZ','LTS','EX','RS','SE','SL','SLE','SLT']
for i in range(len(car_badge)):
data.loc[data['Name'].str.contains(car_badge[i]),'Car Badge']=car_badge[i]
data.loc[data['Car Badge'].isna(),'Car Badge']= 'not_known'
# +
#label_encode_car_badge = preprocessing.LabelEncoder()
#label_encode_car_badge.fit(data['Car Badge'])
# -
car_badge = pd.get_dummies(data['Car Badge'])
# +
#data['Car_badge_encoding']= label_encode_car_badge.transform(data['Car Badge'])
# -
data['Car Badge'].value_counts()
data = pd.concat([data,car_badge],axis=1).drop(['not_known'],axis=1)
data.Location.unique()
data.loc[data['Name'].str.contains('Sedan'),'Name'].value_counts()
data.loc[data['Name'].str.contains('SUV|suv'),'Name'].value_counts()
data.loc[data['Name'].str.contains('Hatchback|hatchback'),'Name'].value_counts()
fuel_price = pd.read_csv('Fuel_charge.csv')
fuel_price.loc[fuel_price['City']=='Kochi','CNG'] = 50
fuel_price.loc[fuel_price['City']=='Chennai','CNG'] = 42.22
fuel_price.loc[fuel_price['City']=='Ahmedabad','CNG'] = 46.75
fuel_price.loc[fuel_price['City']=='Kolkata','CNG'] = 45.35
fuel_price.to_csv('Fuel_charge.csv',index=False)
data.Fuel_Type.value_counts()
data.Location.value_counts()
fuel_price_city = list(fuel_price['City'].values)
# +
for i in range(18):
data.loc[(data['Fuel_Type']=='Petrol') & (data['Location']==fuel_price['City'].loc[i]),
'Fuel_price']=fuel_price['Petrol'].loc[i]
data.loc[(data['Fuel_Type']=='Diesel') & (data['Location']==fuel_price['City'].loc[i]),
'Fuel_price']=fuel_price['Deisel'].loc[i]
data.loc[(data['Fuel_Type']=='CNG') & (data['Location']==fuel_price['City'].loc[i]),
'Fuel_price']=fuel_price['CNG'].loc[i]
data.loc[(data['Fuel_Type']=='LPG') & (data['Location']==fuel_price['City'].loc[i]),
'Fuel_price']=fuel_price['LPG'].loc[i]
data.loc[(data['Fuel_Type']=='Electric') & (data['Location']==fuel_price['City'].loc[i]),
'Fuel_price']=fuel_price['Electric'].loc[i]
# -
data['Fuel_price'].value_counts()
data[data['Fuel_price'].isna()]
fuel_price['Petrol'].loc[0]
# +
#df_train['Brand_encoding'] =lable_encoder.transform(train_df['Brand'])
# +
#df_test['Brand_encoding'] = lable_encoder.transform(test_df['Brand'])
# -
data.columns
data_reduced =data[['Name', 'Location', 'Year', 'Kilometers_Driven', 'Fuel_Type',
'Transmission', 'Owner_Type', 'Mileage', 'Engine', 'Power', 'Seats',
'New_Price','Age','Fuel_price','Ahmedabad',
'Bangalore', 'Chennai', 'Coimbatore', 'Delhi', 'Hyderabad', 'Jaipur',
'Kochi', 'Kolkata', 'Mumbai', 'Pune','Brand','DLE', 'DX', 'EX', 'GL ', 'GLA', 'GLE', 'GT', 'LE', 'LS',
'LT', 'LTZ', 'RS', 'SE', 'SL', 'SLE']]
#label_fuel = preprocessing.LabelEncoder()
#data_reduced['Fuel_Type_Encoding'] = label_fuel.fit_transform(data_reduced['Fuel_Type'])
pd.concat([data_reduced[:6019],train_df['Price']],axis=1).to_csv("train_data_reduced.csv",index=False)
data_reduced[6019:].to_csv('test_data_reduced.csv',index=False)
data_reduced.loc[data_reduced['New_Price'].isna(),'New_Price']= '0 Lakh'
data_reduced['New_Price_converted'] = data_reduced['New_Price'].apply(lambda x:x.split()[0]).astype(float)
##label_location = preprocessing.LabelEncoder()
##data_reduced['Location_Encoding'] = label_location.fit_transform(data_reduced['Location'])
label_transmission = preprocessing.LabelEncoder()
data_reduced['Transmission_Encoding'] = label_transmission.fit_transform(data_reduced['Transmission'])
label_owner = preprocessing.LabelEncoder()
data_reduced['Owner_Type_Encoding'] = label_owner.fit_transform(data_reduced['Owner_Type'])
data_reduced['Model_name'] = data_reduced['Name'].apply(lambda x: str(x.split()[0]+'_'+x.split()[1]))
model_name = pd.get_dummies(data_reduced['Model_name'])
data_reduced.columns
data_reduced[(data_reduced['Brand']=='ISUZU')|(data_reduced['Brand']=='Isuzu')]
data_reduced = pd.concat([data_reduced,model_name],axis=1)
brand = pd.get_dummies(data_reduced['Brand'])
data_reduced = pd.concat([data_reduced,brand],axis=1)
from sklearn.feature_extraction.text import TfidfVectorizer
v = TfidfVectorizer()
x_brand = v.fit_transform(data_reduced['Brand'])
import geopy.location as geo_location
from geopy.geocoders import Nominatim
# +
geolocator = Nominatim()
cities = list(data_reduced['Location'].unique())
country ="India"
for city in cities:
locate = geolocator.geocode(city+','+ country)
data_reduced.loc[data_reduced['Location']==city,'Latitude']=locate.latitude
data_reduced.loc[data_reduced['Location']==city,'Longitude']=locate.longitude
print(city+":"+str(locate.latitude)+","+str(locate.longitude))
#geo_location.Location(address='Bangalore'
# -
data_reduced['ISUZU'] = data_reduced['ISUZU']+data_reduced['Isuzu']
del data_reduced['Isuzu']
data_reduced.Brand.unique()
# +
#data_reduced['Model_name_Encoding'] = preprocessing.LabelEncoder().fit_transform(data_reduced['Model_name'])
# -
data_reduced['Kilometers_by_per_year'] = data_reduced['Kilometers_Driven']/data_reduced['Age']
data_reduced['Engine_litre'] = data_reduced['Kilometers_Driven']/data_reduced['Engine']
temp = data.drop(['Fuel_Type','Owner_Type','Transmission','Location','New_Price','Name','Brand','Car Badge'],axis=1)
df_train = temp[:6019]
df_test = temp[6019:]
temp = data_reduced.drop(['Fuel_Type','Owner_Type','Transmission','Location','New_Price','Name','Brand','Model_name'],axis=1)
df_train = temp[:6019]
df_test = temp[6019:]
'''
df_train = train_df.drop(['Fuel_Type','Owner_Type','Transmission','Location','New_Price','Name','Brand','Price'],axis=1)
df_test = test_df.drop(['Fuel_Type','Owner_Type','Transmission','Location','New_Price','Name','Brand'],axis=1)
'''
df_target = train_df[['Price']]
# +
#del df_train['Year']
#del df_test['Year']
# +
from sklearn.model_selection import KFold,cross_val_score
from sklearn.metrics import mean_squared_log_error
def rmsle_cv(model):
kf = KFold(n_splits=5, shuffle=True, random_state=90).get_n_splits(df_train.values)
rmse= np.sqrt(-cross_val_score(model, df_train.values, df_target.values, scoring="neg_mean_squared_log_error", cv = kf))
print(rmse)
return(rmse)
# -
from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor,BaggingRegressor
lgbmr = lgbm.LGBMRegressor(n_estimators=60,num_leaves=300,min_child_samples=15,learning_rate=0.095)
#gbr = GradientBoostingRegressor()
rmsle_cv(lgbmr)
#0.17334768 0.16560297 0.17228156 0.16489429 0.16918989
#[0.16595 0.16381451 0.17946319 0.16543243 0.16725304]
#0.16403303 0.16438792 0.17861811 0.16727336 0.1654432
#0.16419121 0.16330832 0.17360209 0.16819709 0.17095877
#[0.16263038 0.16361457 0.17217961 0.16881051 0.16944638] after adding lattitude and longitude
rfr = RandomForestRegressor(n_estimators=70)
rmsle_cv(rfr)
#0.18618854 0.186489 0.19597522 0.18386189 0.18411782
#0.18463811 0.18499372 0.19798856 0.18180352 0.18835696
#0.18616785 0.18952343 0.19312987 0.17990713 0.18580735
rmsle_cv(gbr)
#0.20708859, 0.21013381, 0.21375848, 0.20480448, 0.211188
#0.20758252 0.20803649 0.214338 0.20676181 0.20952544
df_train.shape
lgbmr.fit(df_train,df_target)
df_test[pd.DataFrame({'Price':lgbmr.predict(df_test)})['Price']<=0]
pd.DataFrame({'Price':lgbmr.predict(df_test)}).to_excel('fourth_submission.xlsx',index=False)
pd.concat([data_reduced[:6019],train_df['Price']],axis=1).to_csv("train_somewhat_preprocessed_4.csv",index=False)
data_reduced[6019:].to_csv('test_somewhat_preprocessed_4.csv',index=False)
data_reduced.columns
| MachineHack/Participants_Data_Used_Cars/Final_preprocessing_and_Model_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Plotting N-D data
#
# Data with any number of dimensions can also be plotted in Scipp.
import numpy as np
import scipp as sc
# ## Default representation
#
# Data with 3 or more dimensions are by default represented by a 2-D image, accompanied by sliders to navigate the extra dimensions (one slider per dimension above 2).
N = 20
M = 30
L = 20
K = 10
xx = np.arange(N, dtype=np.float64)
yy = np.arange(M, dtype=np.float64)
zz = np.arange(L, dtype=np.float64)
qq = np.arange(K, dtype=np.float64)
x, y, z, q = np.meshgrid(xx, yy, zz, qq, indexing='ij')
b = N/20.0
c = M/2.0
d = L/2.0
r = np.sqrt(((x-c)/b)**2 + ((y-c)/b)**2 + ((z-d)/b)**2 + ((q-d)/b)**2)
a = np.sin(r)
d = sc.Dataset()
d['Some4Ddata'] = sc.Variable(dims=['x', 'y', 'z', 'Q_x'], values=a)
d.coords['x'] = sc.Variable(dims=['x'], values=xx)
d.coords['y'] = sc.Variable(dims=['y'], values=yy)
d.coords['z'] = sc.Variable(dims=['z'], values=zz)
d.coords['Q_x'] = sc.Variable(dims=['Q_x'], values=qq)
sc.plot(d)
# ### Slider controls
#
# - Each dimension comes with two sliders to control the position of the slice and its thickness.
#
# - Upon figure creation, the thickness is set to the first bin width.
# Only the data contained in that bin is displayed.
# The thickness can be increased by an integer number of bins, and the data inside those bins will either be summed or averaged (see note below).
#
# - Changing the slice thickness will also change the color range, and the `Rescale` button can be used to automatically rescale the colorbar to the limits of the currently displayed data.
#
# - Each dimension control comes with a `Continuous Update` checkbox, which is applied by default.
# If this is unselected, the plot will only update once the slider has been released.
#
# <div class="alert alert-info">
#
# *Note*
#
# You can automatically recalculate the intensity range using the `Rescale` button on the left of the plot.
# When zooming or changing thickness, the enclosed viewed region is used to calculate the new intensities.
# In the general case the intensities are calculated as the `mean` of the values within.
#
# **In the special case of your variable having units of counts, the intensities are summed.**
#
# </div>
#
# ### Changing axes dimensions
#
# By default, the two innermost dimensions are used for the image, and the rest will be allocated to a slider.
# This can be changed, either interactively using the buttons, or by changing the dimension order using `transpose`:
d['Some4Ddata'].transpose(dims=['z', 'y', 'Q_x', 'x']).plot()
# ### Profile picking
#
# Finally, each dimension also comes with a `Profile` button which allows to display one of the additional dimensions as a profile underneath the main plot.
#
# - When hovering the mouse over the top image, the profile below is updated according to the mouse position.
# - Clicking on the image will save the current profile with a random color.
# - Clicking on an existing marker on the image will delete the corresponding saved profile.
# ## 3-D scatter plots
# <div class="alert alert-info">
#
# **Note**
#
# 3-D visualization requires `pythreejs` to be installed. Use either `pip` or `conda`:
# ```
# - conda install -c conda-forge pythreejs
# - pip install pythreejs
# ```
#
# </div>
# 3-D scatter plots can be created using `plot(projection='3d', positions='xyz')`, where the mandatory `positions` keyword argument is used to set the name of the position coord (here `'xyz'`) to use as position vectors:
# +
N = 1000
M = 100
theta = np.random.random(N) * np.pi
phi = np.random.random(N) * 2.0 * np.pi
r = 10.0 + (np.random.random(N) - 0.5)
x = r * np.sin(theta) * np.sin(phi)
y = r * np.sin(theta) * np.cos(phi)
z = r * np.cos(theta)
a = np.arange(M*N).reshape([M, N]) * np.sin(y)
da = sc.DataArray(
data=sc.array(dims=['time', 'xyz'], values=a),
coords={
'xyz':sc.vectors(dims=['xyz'], unit='m', values=np.array([x, y, z]).T),
'time':sc.array(dims=['time'], unit='s', values=np.arange(M).astype(float))})
da.plot(projection='3d', positions='xyz')
# -
# Cut surfaces to slice data in 3-D can be enabled using buttons below the scene.
# When using a cut surface, the upper value of the opacity slider controls the opacity of the slice, while the lower value of the slider controls the opacity of the background.
# The scatter-plot functionality can also be used to create 3-D plots of dense data with slicing functionality.
# In this case we must first create a coordinate with positions:
d2 = d['y', :10].copy()
d2.coords['dummy-pos'] = sc.geometry.position(*[d2.coords[dim] for dim in ['x', 'y', 'z']])
sc.plot(d2, projection='3d', positions='dummy-pos')
# In the above example creating the coordinate was simple, since all three coords (`x`, `y`, and `z`) had the same unit.
# In general you may need to:
# - Set a consistent fake unit before using before calling `sc.geometry.position`.
# - Convert bin-edge coordinates to normal coordinates.
#
# It may be simpler to use dummy ranges as coordinates in that case:
ranges = [sc.arange(dim=dim, start=0.0, stop=d2.sizes[dim]) for dim in ['x', 'y', 'z']]
d2.coords['dummy-pos'] = sc.geometry.position(*ranges)
sc.plot(d2, projection='3d', positions='dummy-pos')
# ## LAMP's Superplot
# A `1d` projection is also available for multi-dimensional data, with the possibility to keep/remove lines that are plotted, a behavior we copied from LAMP's [Superplot](https://github.com/mantidproject/documents/blob/master/Requirements/Visualisation_and_Analysis/superplot.md) which was very popular in the neutron physics community.
sc.plot(d, projection='1d')
| docs/visualization/plotting/plotting-nd-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 5 (Part II): Ingesting Streaming Data with Kinesis
# ### MACS 30123: Large-Scale Computing for the Social Sciences
#
# In this second part of the lab, we'll explore how we can use Kinesis to ingest streaming text data, of the sort we might encounter on Twitter.
#
# To avoid requiring you to set up Twitter API access, we will create Twitter-like text and metadata using the `testdata` package to perform this demonstration. It should be easy enough to plug your streaming Twitter feed into this workflow if you desire to do so as an individual exercise (for instance, as a part of a final project!). Additionally, once you have this pipeline running, you can scale it up even further to include many more producers and consumers, if you would like, as discussed in lecture and the readings.
#
# Recall from the lecture and readings that in a Kinesis workflow, "producers" send data into a Kinesis stream and "consumers" draw data out of that stream to perform operations on it (i.e. real-time processing, archiving raw data, etc.). To make this a bit more concrete, we are going to implement a simplified version of this workflow in this lab, in which we spin up Producer and Consumer (t2.nano) EC2 Instances and create a Kinesis stream. Our Producer instance will run a producer script (which writes our Twitter-like text data into a Kinesis stream) and our Consumer instance will run a consumer script (which reads the Twitter-like data and calculates a simple demo statistic -- the average unique word count per tweet, as a real-time running average).
#
# You can visualize this data pipeline, like so:
#
# <img src="simple_kinesis_architecture.png" width="800" align="left" />
#
# To begin implementing this pipeline, let's import `boto3` and initialize the AWS services we'll be using in this lab (EC2 and Kinesis).
# +
import boto3
import time
session = boto3.Session()
kinesis = session.client('kinesis')
ec2 = session.resource('ec2')
ec2_client = session.client('ec2')
# -
# Then, we need to create the Kinesis stream that our Producer EC2 instance will write streaming tweets to. Because we're only setting this up to handle traffic from one consumer and one producer, we'll just use one shard, but we could increase our throughput capacity by increasing the ShardCount if we wanted to do so.
# +
response = kinesis.create_stream(StreamName = 'test_stream',
ShardCount = 1
)
# Is the stream active and ready to be written to/read from? Wait until it exists before moving on:
waiter = kinesis.get_waiter('stream_exists')
waiter.wait(StreamName='test_stream')
# -
# OK, now we're ready to set up our producer and consumer EC2 instances that will write to and read from this Kinesis stream. Let's spin up our two EC2 instances (specified by the `MaxCount` parameter) using one of the Amazon Linux AMIs. Notice here that you will need to specify your `.pem` file for the `KeyName` parameter, as well as create a custom security group/group ID. Designating a security group is necessary because, by default, AWS does not allow inbound ssh traffic into EC2 instances (they create custom ssh-friendly security groups each time you run the GUI wizard in the console). Thus, if you don't set this parameter, you will not be able to ssh into the EC2 instances that you create here with `boto3`. You can follow along in the lab video for further instructions on how you can set up one of these security groups.
#
# Also, we need to specify an IAM Instance Profile so that our EC2 instances will have the permissions necessary to interact with other AWS services on our behalf. Here, I'm using one of the profiles we create in Part I of Lab 5 (a default AWS profile for launching EC2 instances within an EMR cluster), as this gives us all of the necessary permissions
# +
instances = ec2.create_instances(ImageId='ami-0915e09cc7ceee3ab',
MinCount=1,
MaxCount=2,
InstanceType='t2.micro',
KeyName='Heather_Chen',
SecurityGroupIds=['sg-0766f5a606dc4c8c5'],
SecurityGroups=['Lab5'],
IamInstanceProfile=
{'Name': 'EMR_EC2_DefaultRole'},
)
# Wait until EC2 instances are running before moving on
waiter = ec2_client.get_waiter('instance_running')
waiter.wait(InstanceIds=[instance.id for instance in instances])
# -
# While we wait for these instances to start running, let's set up the Python scripts that we want to run on each instance. First of all, we have to define a script for our Producer instance, which continuously produces Twitter-like data using the `testdata` package and puts that data into our Kinesis stream.
# +
# %%file producer.py
import boto3
import testdata
import json
kinesis = boto3.client('kinesis', region_name='us-east-1')
# Continously write Twitter-like data into Kinesis stream
while 1 == 1:
test_tweet = {'username': testdata.get_username(),
'tweet': testdata.get_ascii_words(280)
}
kinesis.put_record(StreamName = "test_stream",
Data = json.dumps(test_tweet),
PartitionKey = "partitionkey"
)
# -
# Then, we can define a script for our Consumer instance that gets the latest tweet out of the stream, one at a time. After processing each tweet, we then print out the average unique word count per processed tweet as a running average, before jumping on to the next indexed tweet in our Kinesis stream shard to do the same thing for as long as our program is running.
# +
# %%file consumer.py
import boto3
import time
import json
kinesis = boto3.client('kinesis', region_name='us-east-1')
shard_it = kinesis.get_shard_iterator(StreamName = "test_stream",
ShardId = 'shardId-000000000000',
ShardIteratorType = 'LATEST'
)["ShardIterator"]
i = 0
s = 0
while 1==1:
out = kinesis.get_records(ShardIterator = shard_it,
Limit = 1
)
for o in out['Records']:
jdat = json.loads(o['Data'])
s = s + len(set(jdat['tweet'].split()))
i = i + 1
if i != 0:
print("Average Unique Word Count Per Tweet: " + str(s/i))
print("Sample of Current Tweet: " + jdat['tweet'][:20])
print("\n")
shard_it = out['NextShardIterator']
time.sleep(0.2)
# -
# As our final preparation step, we'll grab all of the public DNS names of our instances (web addresses that you normally copy from the GUI console to manually ssh into and record the names of our code files, so that we can easily ssh/scp into the instances and pass them our Python scripts to run.
# +
instance_dns = [instance.public_dns_name
for instance in ec2.instances.all()
if instance.state['Name'] == 'running'
]
code = ['producer.py', 'consumer.py']
# -
# To copy our files over to our instances and programmatically run commands on them, we can use Python's `scp` and `paramiko` packages. You'll need to install these via `pip install paramiko scp` if you have not already done so.
# ! pip install paramiko scp
# Once we have `scp` and `paramiko` installed, we can copy our producer and consumer Python scripts over to the EC2 instances (designating our first EC2 instance in `instance_dns` as the producer and second EC2 instance as the consumer instance). If you have a slower (or more unstable) internet connection, you might need to increase the time.sleep() time in the code and try to run this code several times in order for it to fully run.
#
# Note that, on each instance, we install `boto3` (so that we can access Kinesis through our scripts) and then copy our producer/consumer Python code over to our producer/consumer EC2 instance via `scp`. After we've done this, we install the `testdata` package on the producer instance (which it needs in order to create fake tweets) and instruct it to run our Python producer script. This will write tweets into our Kinesis stream until we stop the script and terminate the producer EC2 instance.
#
# We could also instruct our consumer to get tweets from the stream immediately after this command and this would automatically collect and process the tweets according to the consumer.py script. For the purposes of this demonstration, though, we'll manually ssh into that instance and run the code from the terminal so that we can see the real-time consumption a bit more easily.
# +
import paramiko
from scp import SCPClient
ssh_producer, ssh_consumer = paramiko.SSHClient(), paramiko.SSHClient()
# Initialization of SSH tunnels takes a bit of time; otherwise get connection error on first attempt
time.sleep(5)
# Install boto3 on each EC2 instance and Copy our producer/consumer code onto producer/consumer EC2 instances
instance = 0
stdin, stdout, stderr = [[None, None] for i in range(3)]
for ssh in [ssh_producer, ssh_consumer]:
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(instance_dns[instance],
username = 'ec2-user',
key_filename='/Users/heatherchen/.ssh/Heather_Chen.pem')
with SCPClient(ssh.get_transport()) as scp:
scp.put(code[instance])
if instance == 0:
stdin[instance], stdout[instance], stderr[instance] = \
ssh.exec_command("sudo pip install boto3 testdata")
else:
stdin[instance], stdout[instance], stderr[instance] = \
ssh.exec_command("sudo pip install boto3")
instance += 1
# Block until Producer has installed boto3 and testdata, then start running Producer script:
producer_exit_status = stdout[0].channel.recv_exit_status()
if producer_exit_status == 0:
ssh_producer.exec_command("python %s" % code[0])
print("Producer Instance is Running producer.py\n.........................................")
else:
print("Error", producer_exit_status)
# Close ssh and show connection instructions for manual access to Consumer Instance
ssh_consumer.close; ssh_producer.close()
print("Connect to Consumer Instance by running: ssh -i \"~/.ssh/Heather_Chen.pem\" ec2-user@%s" % instance_dns[1])
# -
# If you run the command above (with the correct path to your actual `.pem` file), you should be inside your Consumer EC2 instance. If you run `python consumer.py`, you should also see a real-time count of the average number of unique words per tweet (along with a sample of the text in the most recent tweet), as in the screenshot:
#
# 
#
# Cool! Now we can scale this basic architecture up to perform any number of real-time data analyses, if we so desire. Also, if we execute our consumer code remotely via paramiko as well, the process will be entirely remote, so we don't need to keep any local resources running in order to keep streaming/processing real-time data.
#
# As a final note, when you are finished observing the real-time feed from your consumer instance, **be sure to terminate your EC2 instances and delete your Kinesis stream**. You don't want to be paying for these to run continuously! You can do so programmatically by running the following `boto3` code:
# +
# Terminate EC2 Instances:
ec2_client.terminate_instances(InstanceIds=[instance.id for instance in instances])
# Confirm that EC2 instances were terminated:
waiter = ec2_client.get_waiter('instance_terminated')
waiter.wait(InstanceIds=[instance.id for instance in instances])
print("EC2 Instances Successfully Terminated")
# Delete Kinesis Stream (if it currently exists):
try:
response = kinesis.delete_stream(StreamName='test_stream')
except kinesis.exceptions.ResourceNotFoundException:
pass
# Confirm that Kinesis Stream was deleted:
waiter = kinesis.get_waiter('stream_not_exists')
waiter.wait(StreamName='test_stream')
print("Kinesis Stream Successfully Deleted")
# -
| Labs/Lab 5 Ingesting and Processing Large-Scale Data/Part II Kinesis/Lab 5 Kinesis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scapy.all import *
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
packets = rdpcap('./capture_3.pcapng')
# !free -h
SCHEDULER = "192.168.1.116"
WORKER_1 = "192.168.1.117"
WORKER_2 = "192.168.1.118"
WORKER_3 = "192.168.1.115"
# +
def get_src_ip(packet):
return packet[IP].fields["src"]
def get_dst_ip(packet):
return packet[IP].fields["dst"]
def get_time_size(packet):
_time = packet.time
_size = packet.payload.len
return _time, _size
# -
def plot_txrx(packets, ip_a):
tx_log, rx_log = {}, {}
for packet in packets:
try:
src_ip = get_src_ip(packet)
if src_ip == ip_a:
ts, size = get_time_size(packet)
ts = str(int(ts))
try:
tx_log[ts] += size
except:
tx_log[ts] = 0
tx_log[ts] += size
except:
pass
try:
dst_ip = get_dst_ip(packet)
if dst_ip == ip_a:
ts, size = get_time_size(packet)
ts = str(int(ts))
try:
rx_log[ts] += size
except:
rx_log[ts] = 0
rx_log[ts] += size
except:
pass
tx_bw, rx_bw = [], []
timeline = list(set(list(rx_log.keys()) + list(tx_log.keys())))
timeline.sort()
for time in timeline:
try:
rx_bw.append(8*rx_log[time]/1e6)
except:
pass
try:
tx_bw.append(8*tx_log[time]/1e6)
except:
pass
timeline = [int(time) for time in timeline]
timeline.sort()
start_time = min(timeline)
timeline = np.asarray(timeline) - start_time
plt.plot(timeline, tx_bw, label="TX")
plt.plot(timeline, rx_bw, label="RX")
plt.legend()
plt.title("Node Traffic ("+ip_a+")")
plt.ylim(0, 1700)
plt.ylabel("Traffic (Mbit)")
plt.xlabel("Time")
plt.show()
plot_txrx(packets, SCHEDULER)
plot_txrx(packets, WORKER_1)
plot_txrx(packets, WORKER_2)
plot_txrx(packets, WORKER_3)
def total_traffic(packets, ip_a_1, ip_a_2):
ip_1_to_2 = 0
ip_2_to_1 = 0
for packet in packets:
try:
src_ip = get_src_ip(packet)
dst_ip = get_dst_ip(packet)
if src_ip == ip_a_1 and dst_ip == ip_a_2:
_, size = get_time_size(packet)
ip_1_to_2 += size
except:
pass
try:
src_ip = get_src_ip(packet)
dst_ip = get_dst_ip(packet)
if src_ip == ip_a_2 and dst_ip == ip_a_1:
_, size = get_time_size(packet)
ip_2_to_1 += size
except:
pass
return {
"ip_1_to_2": ip_1_to_2,
"ip_2_to_1": ip_2_to_1
}
traffic = total_traffic(packets, SCHEDULER, WORKER_1)
print("Scheduler to worker:", round(traffic["ip_1_to_2"]/1e6, 1), "MBytes")
print("Worker to scheduler:", round(traffic["ip_2_to_1"]/1e6, 1), "MBytes")
traffic = total_traffic(packets, SCHEDULER, WORKER_2)
print("Scheduler to worker:", round(traffic["ip_1_to_2"]/1e6, 1), "MBytes")
print("Worker to scheduler:", round(traffic["ip_2_to_1"]/1e6, 1), "MBytes")
traffic = total_traffic(packets, SCHEDULER, WORKER_3)
print("Scheduler to worker:", round(traffic["ip_1_to_2"]/1e6, 1), "MBytes")
print("Worker to scheduler:", round(traffic["ip_2_to_1"]/1e6, 1), "MBytes")
traffic = total_traffic(packets, WORKER_1, WORKER_2)
print("Worker 1 to worker 2:", round(traffic["ip_1_to_2"]/1e6, 1), "MBytes")
print("Worker 2 to worker 1:", round(traffic["ip_2_to_1"]/1e6, 1), "MBytes")
traffic = total_traffic(packets, WORKER_1, WORKER_3)
print("Worker 1 to worker 2:", round(traffic["ip_1_to_2"]/1e6, 1), "MBytes")
print("Worker 2 to worker 1:", round(traffic["ip_2_to_1"]/1e6, 1), "MBytes")
traffic = total_traffic(packets, WORKER_2, WORKER_3)
print("Worker 1 to worker 2:", round(traffic["ip_1_to_2"]/1e6, 1), "MBytes")
print("Worker 2 to worker 1:", round(traffic["ip_2_to_1"]/1e6, 1), "MBytes")
def tx_breakdown(ip_a):
other_ops = []
type_counts = {
# dask worker heartbeat
"heartbeat_worker": 0,
# describes a compute task to worker
"compute_task": 0,
# reports task result to scheduler
"task_finished": 0,
# request for data
"get_data": 0,
# delete data (e.g. orphaned task)
"delete_data": 0,
# steal - distribute load at expense of data locality
"steal_response": 0,
"steal_request": 0,
# register new data from external source
"key_in_memory": 0,
# others - binary data (no op)
"others": 0,
}
for packet in packets:
try:
src_ip = get_src_ip(packet)
if src_ip == ip_a:
packet_type = None
packet_load = packet.load.decode(errors="ignore")
if "opheartbeat_worker" in packet_load:
packet_type = "heartbeat_worker"
elif "optask-finished" in packet_load:
packet_type = "task_finished"
elif "opget_data" in packet_load:
packet_type = "get_data"
elif "opdelete-data" in packet_load:
packet_type = "delete_data"
elif "opadd-keys" in packet_load:
packet_type = "add_keys"
elif "opsteal-response" in packet_load:
packet_type = "steal_response"
elif "opcompute-task" in packet_load:
packet_type = "compute_task"
elif "opkey-in-memory" in packet_load:
packet_type = "key_in_memory"
elif "opsteal-request" in packet_load:
packet_type = "steal_request"
elif "\x00\x00op" in packet_load:
packet_type = "others"
other_ops.append(packet_load)
else:
packet_type = "others"
type_counts[packet_type] += 1
except:
pass
return type_counts, other_ops
type_counts, other_ops = tx_breakdown(SCHEDULER)
type_counts
sum(type_counts.values())
type_counts, other_ops = tx_breakdown(WORKER_1)
type_counts
sum(type_counts.values())
type_counts, other_ops = tx_breakdown(WORKER_2)
type_counts
type_counts, other_ops = tx_breakdown(WORKER_3)
type_counts
| pcap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparing two datasets
# One of the simplest analyses we can make with a dataset is splitting it into groups (perhaps by experimental condition), and comparing some statistic between them. This tutorial will cover such "first-pass" analyses when the data naturally breaks into groups, and relatively simple statistics can be calculated between them
#
# # The t-test
# ## Data Introduction
# Octopamine has been implicated in modulating feeding behaviors in both vertebrates and invertebrates. Pargyline has been shown to increase the levels of octopamine in the nervous system. The role of Pargyline in sucrose consumption was tested in blowflies. Two groups of blowfies were used in this study: one group was injected with Parglyine (n=295 flies) while the control group was injected with saline (n = 300 flies). The amount of sucrose consumed was then measured. [adapted from Samuels & Witmer, pg 220. Originally: Long & Murdock, PNAS 1983]
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import (ttest_ind, ttest_1samp, ttest_rel,
wilcoxon, ks_2samp)
from statsmodels.distributions.empirical_distribution import ECDF
import statsmodels.api as sm
# %matplotlib inline
# -
# First we'll load the data and take a look at it's structure. We'll note that each column is a variable, and each row is a data point. Some of the columns have `NaN`, because that variable doesn't apply to the particular datapoint.
data = pd.read_csv('../data/fly_feeding.csv')
data.head()
data['feed_type'].unique()
# First, let's look at the distributions for feeding behavior under parglyine and saline
# +
data_par = data.query('feed_type == "pargyline"')['measurement'].values
data_sal = data.query('feed_type == "saline"')['measurement'].values
fig, ax = plt.subplots()
ax.hist(data_par)
_ = ax.hist(data_sal)
# -
# We'll also show the data as a boxplot, which is sometimes easier to interpret.
data.query('feed_type in ["pargyline", "saline"]').\
boxplot('measurement', by='feed_type')
# We want to test for whether there is a statistical difference between the means of these two distributions. Because the two distributions look (relatively) normally distributed, s two-sample t-test seems like it may be useful. Let's perform this below.
res = ttest_ind(data_par, data_sal)
print(res)
# Here we can see that, according to the t-test, there is a highly significant difference in the feeding behavior for these groups.
# ## Follow-up experiment
# To further confirm octopamine positively modulates feeding behavior, an additional experiment was done with Yohimbine (an antagonist of octopamine receptors in insects). One group was injected with Parglyine and Yohimbine (n = 130) while an additional control group was injected with saline (n = 100). The amount of sucrose consumed was then measured.
#
# First, we'll once again visualize the two conditions as histograms and boxplots.
data_par_yoh = data.query('feed_type == "parglyine_w_yohimbine"')
data_par_yoh = data_par_yoh['measurement'].values
fig, ax = plt.subplots()
ax.hist(data_sal)
_ = ax.hist(data_par_yoh)
data_box = data.query('feed_type in ["parglyine_w_yohimbine", "saline"]')
data_box.boxplot('measurement', by='feed_type')
# We'll run a t-test once again to test for any difference between these groups.
res = ttest_ind(data_sal, data_par_yoh)
print(res)
# Here it seems like there is *no* difference between the groups...their means are not far enough apart to conclude statistical significance. However, would you conclude that the two distributions are not different at all? That's hard to say. For example, there seems to be a strong difference in the **variance** between the two distributions. This is something you should always pay attention to.
#
# Finding more complex relationships in your data requires different kinds of tests. Next we'll look at a slightly different version of the t-test: the "paired" t-test.
# # Paired t-tests
# ## About the data
# Certain types of nerve cells have the ability to regenerate a part of the cell that has been amputated. In an early study of this process, measurements were made on the nerves in the the spinal cord of rhesus monkeys. Nerves emanating from the left side of the cord were cut, while nerves from the right side were kept intact. During the regeneration process, the amount of creatine phosphate (CP) was measured in the left and right portions of the spinal cord. You are interested in whether CP levels are different between the cut and control sides of the spinal cord. [adapted from Samuels & Witmer, pg 387. Originally: Bodian (1947)]
#
# First, we'll visualize the data as a histogram + boxplot.
#
bef_aft = data.query('measurement_type == "creatine_phosphate"')
bef_aft.head()
data_left = bef_aft.query('side == "Left"')['measurement'].values
data_right = bef_aft.query('side == "Right"')['measurement'].values
fig, ax = plt.subplots()
ax.hist(data_left)
_ = ax.hist(data_right)
# Per our earlier analysis, we'll perform a 2-sample t-test, splitting our data into two groups, "before" and "after" treatment with creatine phosphate:
ttest_ind(data_left, data_right)
# There doesn't seem to be a statistical difference between the two groups. But wait one second! We have omitted an important component of our dataset. Rather than being two separate groups, there is a natural "pairing" of the data. *For each animal*, we have one recording from the left, and one from the right. This means that we can include this knowledge in our statistical test.
#
# As a start, let's visualize the difference between left and right for *each* animal. We'll use a line plot for this.
fig, ax = plt.subplots()
ax.plot([0, 1], [data_left, data_right], color='k')
plt.xticks([0, 1], ['left', 'right'])
_ = plt.setp(ax, xlim=[-1, 2])
# Perhaps there does seem to be something going on after all. The lines in general seem to go up from left to right. To run statistics on this, we'll use a "paired" t-test. This assumes the natural pairings that we're plotting above:
ttest_rel(data_left, data_right)
# We also could have calculated the *difference* between left and right for each animal, and then run a regular t-test for independence from 0 on this distribution:
diff = data_right - data_left
ttest_1samp(diff, 0)
# Thus far, we have assumed that our data is characterized by some well-defined distribution. Generally this means that we assume our data is gaussian-distributed.
#
# But this is obviously not always the case. What do we do in this situation? We'll finish this lesson by covering techniques that make less assumptions about the data, and don't require normal distributions.
# # Non-parametric Tests
# ## Wilcoxon Signed-Rank Test (one sample) and Wilcoxon-Mann-Whitney (two sample).
# We'll use the same dataset as above. If you look at the histograms, you might notice that the datasets were quite small. This is often reason enough to assume that you don't have a normally distributed dataset.
#
# To relax this assumption, we can use a *wilcoxon signed rank test*. This simply looks at whether the difference between two conditions is positive or negative, rather than the actual values of the difference.
wilcoxon(data_left, data_right)
diff = data_left - data_right
wilcoxon(diff)
# At this point you might be noticing that whenever we calculate the statistical test on the "difference" between paired datapoints, the p-value tends to go down. This is because paired tests generally have more **statistical power** than unpaired ones. If your data has this natural paired structure to it, it's a good idea to use it.
# # When t-tests fail: the Kolmogorov-Smirnov test
# Generally speaking, parametric tests are more powerful than nonparametric tests. That’s because the assumptions that you make with a parameteric test allow you to make stronger statements with the data. However, there are some cases where nonparametric test can tell you more. Let's consider the first dataset we looked at in this notebook. We'll re-plot the distributions below:
fig, ax = plt.subplots()
ax.hist(data_sal, bins=20)
ax.hist(data_par_yoh, bins=20)
# Looking at the distributions, it seems that while they have the same mean, the variance of each distribution may be different. All of the tests covered so far focus exclusively on the difference in *means* between two distributions. However, sometimes the mean isn't the statistic of interest between two distributions. To visualize this let's plot the "cumulative distribution function" of the data. This is another useful way of comparing datasets to one another:
# +
ecdf_py = ECDF(data_par_yoh)
ecdf_sa = ECDF(data_sal)
data_test = np.arange(np.min([data_par_yoh.min(), data_sal.min()]),
np.max([data_par_yoh.max(), data_sal.max()]))
fig, ax = plt.subplots()
for i_ecdf in [ecdf_py, ecdf_sa]:
ax.plot(data_test, i_ecdf(data_test))
# -
# These two lines don't look quite the same. One of them seems to be rising more sharply than the other, reflecting the fact that the distribution is clustered around a single value rather than spread out across many values. In other words, the clustered distribution has smaller variance. How can we test for this?
#
# One option is the Kolmogorov-Smirnov test. non-parametric test used to investigate the “shape” of a distribution. Let's see what it finds in this data
ks_2samp(data_par_yoh, data_sal)
# Is it "significant"? No. But p-values are arbitrary constructions anyway. The important point is that this test makes a different kind of statement about the data than t-tests. Instead of asking "is the mean between the two distributions different?" it asks "is the shape of these distributions different?". This is a much more complicated question to ask, and there are many ways to test for this. Regardless, choosing the right test to fit your question requires careful consideration.
#
# # An aside on p-values and multiple comparisons
#
# We've performed a lot of tests in this notebook, and have often referred to p-values as some reflection of "significance". But is this the right thing to do?
#
# Whenever we find a significant result, it's important to ask "what's the likelihood that this was a false positive?" Let’s try a little computer simulation, generating “fake” random data to help us understand how often false positive can occur.
#
# 1. One trial is defined as follows: take two random samples (n = 10) from a normal distribution and run a two-sample t-test on them, taking note of the p-value.
# 1. One experiment is 100 trials. (you should have 100 p-values at the end of an experiment). In the course of one experiment, how often are the two random samples significantly different from each other (at the level of alpha = 0.05)? Take a note of that number.
# 1. Run the above experiment about 100 more times… Isn’t it interesting that the number of statistically different “data sets” is always very close to 5? Why do you think this may be the case? What implications does it have for t-tests?
# +
n_iterations = 100
n_trials = 100
n_per_trial = 10
pvalues = np.zeros([n_iterations, n_trials])
for ii in range(n_iterations):
for jj in range(n_trials):
data1 = np.random.randn(n_per_trial)
data2 = np.random.randn(n_per_trial)
results = ttest_ind(data1, data2)
pvalues[ii, jj] = results.pvalue
# -
fig, ax = plt.subplots()
ax.hist(pvalues[0], bins=20)
ax.axvline(.05, c='r', ls='--', lw=3)
test_vals = np.arange(0, 1, .01)
fig, ax = plt.subplots(figsize=(4, 4))
for i_iteration in pvalues:
i_ecdf = ECDF(i_iteration)
ax.plot(test_vals, i_ecdf(test_vals), c='k', alpha=.1)
ax.axhline(.05, ls='--', c='r', lw=2)
ax.axvline(.05, ls='--', c='r', lw=2)
# Notice how, in every iteration, roughly the same proportion of the distribution lies under .05. This is the expected false positive rate (assuming all our assumptions about normality hold).
#
# This far we have focused on performing tests that use clever mathematical techniques. However, with the increasing computational power at our hands, there have evolved new ways for testing for differences between these groups. These computation-heavy methods inclue things like the statistical bootstrap and the permutation test.
# # Estimating the difference using a confidence interval
# Instead of using parametric statistics (or significance tests), another option is to simply give a confidence interval around the statistic of choice. For example, for the question "what is the expected mean of the difference between two distributions?", we might bootstrap the distribution of differences of the mean, and create confidence intervals around this value. See the [notebook on quantifying uncertainty](./simple_stats_and_uncertainty.ipynb) for a more thorough discussion.
# +
n_boots = 1000
n_sal = data_sal.shape[0]
n_py = data_par_yoh.shape[0]
differences = np.zeros(n_boots)
for ii in range(n_boots):
sample_sal = data_sal[np.random.randint(0, n_sal, n_sal)]
sample_py = data_par_yoh[np.random.randint(0, n_py, n_py)]
differences[ii] = np.mean(sample_sal) - np.mean(sample_py)
clo, chi = np.percentile(differences, [2.5, 97.5])
# -
fig, ax = plt.subplots()
ax.hist(differences, bins=np.arange(-2, 2, .1))
ax.hlines(ax.get_ylim()[-1] + 5, clo, chi, lw=10, color='k')
# In a sense, this confidence interval represents the uncertainty in the difference in means of these two distributions. In a technical sense, a N% confidence interval means: "repeating the data collection and re-calculating the confidence interval many times will cause the resulting confidence interval to overlap with the "true" mean N% of the time.
#
# This seems a bit difficult to intuit, so let's simulate this below. We'll estimate a single value using our confidence interval: the mean of a distribution.
# +
n = 2000
mn = 4
std = 2
n_simulations = 200
n_boots = 1000
all_differences = np.zeros([n_simulations, n_boots])
for ii in range(n_simulations):
data = np.sqrt(2) * np.random.randn(n) + mn
for jj in range(n_boots):
sample = data[np.random.randint(0, n, n)]
all_differences[ii, jj] = np.mean(sample)
# Now calculate the 95% CI for each simulation
clo, chi = np.percentile(all_differences, [2.5, 97.5], axis=1)
# -
# Above we've performed `n_simulations` bootstraps, so this gives us `n_simulations` confidence intervals. Let's see how many of them overlap with the "true" mean:
fig, ax = plt.subplots()
n_outside = 0
for ii, (iclo, ichi) in enumerate(zip(clo, chi)):
if any([mn < iclo, mn > ichi]):
color = 'r'
n_outside += 1
else:
color = 'k'
ax.hlines(ii, iclo, ichi, lw=1, color=color)
ax.set_title('Number of simulations outside of "true" mean:\n'
'{} / {} ({:.3f}%)'.format(n_outside, n_simulations,
100 * (float(n_outside) / n_simulations)))
# Pretty close to 5%. If we were to run an infinite number of simulations, we'd quickly converge to 5% of cases where the confidence interval did *not* overlap with the true mean.
#
# Another important thing to notice here is that the confidence interval itself does not tell you where *within the interval* the mean lies. It only says that the mean is likely somewhere within that interval. Many people intuitively want to say "the mean is most likely to be at the center of this interval" but this is not necessarily true.
| neurophysics-neuroscience/python/basics/comparing_two_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.5.0-rc4
# language: julia
# name: julia-0.5
# ---
# # Tiago testcases
# ## Simulation of typical section aeroelastic response (based on setup at ITA LNCA)
#workspace()
include("../src/UNSflow.jl")
using UNSflow
# ### Structural and aerodynamic parameters
# +
c = 0.29; #m, Chord
mass = 6.5 # + 7.0; %kg, Mass from base and typical seccion
airdensity = 1.119 #kg/m^3 ### Structural definitions
pvt = 0.43105 #Elastic Axis, Percentage from LE Pivot(0-1)
w_h = 2.38*2*pi # rad/s Natural Frequency for H
w_alpha = 4.29*2*pi #rad/s Natural Frequency for Alpha
x_alpha = 0.0275/(c/2) #Distance between ea and cg divided by c %Static Unbalance
r_alpha = 0.064/(c/2) # 2*sqrt(Itheta/(m*c^2)) %Radius of Gyration (Admensional)
kappa = pi*airdensity*c^2/(4*mass/0.75)
w_alphadot = 0.
w_hdot = 1.
cubic_h_1 = 1.
cubic_h_3 = 0.
cubic_alpha_1 = 1.
cubic_alpha_3 = 0.
# -
# ### Initial condition
alpha_init = 10*pi/180
alphadot_init = 0.
h_init = 0.
hdot_init = 0.
udot = 0
# ### Simulation control
# +
dt = 0.015
lespcrit = [0.3;] # High value, No LEV shedding
del = DelVortDef(1, 500, 10)
# -
# ### Case 1
# +
u = 16
kinem = KinemPar2DOF(alpha_init, h_init, alphadot_init, hdot_init, u, udot, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.)
strpar = TwoDOFPar(x_alpha, r_alpha, kappa, w_alpha, w_h, w_alphadot, w_hdot, cubic_h_1, cubic_h_3, cubic_alpha_1, cubic_alpha_3)
surf = TwoDSurf_2DOF(c, u, "FlatPlate", pvt, 70, 35, strpar, kinem, lespcrit)
curfield = TwoDFlowField()
nsteps = 15000
@time mat, surf, curfield = ldvm(surf, curfield, nsteps, dt, del)
# -
plot(mat[:,1],mat[:,2])
surf.adot[3]
# + active=""
#
# -
| Notebooks/.ipynb_checkpoints/Typical section test (Tiago)-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Exploring Web Map Service (WMS)
#
# 1. WMS and OWSLib
# 2. Getting some information about the service
# 3. Getting the basic information we need to perform a GetMap request
# 4. More on GetMap request
# 5. TDS-ncWMS styles and extensions
# 6. WMS and basemap
#
#
#
# ## 1. WMS and OWSLib
# - WMS is the Open Geospatial Consortium (OGC) standard interface for requesting georeferenced __images__ through HTTP.
# - OWSLib is part of [geopython](http://geopython.github.io/), a GitHub organization comprised of Python projects related to geospatial.
# - OWSLib is a Python package for client programming with OGC Web Services (OWS) developed by [<NAME>](http://www.kralidis.ca/).
# - OWSLib supports several OGC standards: WFS, WCS, SOS...and of course WMS 1.1.1. [More](http://geopython.github.io/OWSLib/).
# - Does not come installed with canopy but is available in the community packages.
# - Installation with enpkg:
# * enpkg OWSLib
# * current version (07/09/2013) --> 0.4.0-1
# ## 2. Getting some information about the service
#
# * We will use OWSLib package and in particular the owslib.wms module.
# * Within the TDS context, if WMS is enabled and set up in the catalogs, each dataset has a WMS url.
#
# %matplotlib inline
from owslib.wms import WebMapService
#We just need a WMS url from one TDS dataset...
serverurl ='http://thredds.ucar.edu/thredds/wms/grib/NCEP/NAM/CONUS_12km/best'
wms = WebMapService( serverurl, version='1.1.1')
# The WebMapService object gets all the information available about the service through a GetCapabilities request:
# +
#This is general information, common to all datasets in a TDS server
operations =[ op.name for op in wms.operations ]
print 'Available operations: '
print operations
print 'General information (common to all datasets):'
print wms.identification.type
print wms.identification.abstract
print wms.identification.keywords
print wms.identification.version
print wms.identification.title
# -
# - Bounding boxes, styles and dimensions are specific to each layer.
# - Each variable in a dataset translates into a layer in the WMS service.
# - Besides, the server creates virtual layers if it founds vector components in CF-1 or Grib conventions.
#Listing all available layers...
layers = list(wms.contents)
for l in layers:
print 'Layer title: '+wms[l].title +', name:'+wms[l].name
# ## 3. Getting the basic information we need to perform a GetMap request
#
# - All the information clients need is available in the capabilities document, which is stored in the WebMapService object.
# - TDS-WMS only supports GetMap requests on one layer (variable).
# - We need to choose our layer, bounding box, spatial reference system (SRS), size and format of the image.
#
# +
#Values common to all GetMap requests: formats and http methods:
print wms.getOperationByName('GetMap').formatOptions
print wms.getOperationByName('GetMap').methods
#Let's choose: 'wind @ Isobaric surface' (the value in the parameter must be name of the layer)
wind = wms['wind @ Isobaric surface']
#What is its bounding box?
print wind.boundingBox
#available CRS
print wind.crsOptions
# --> NOT ALL THE AVAILABLE CRS OPTIONS ARE LISTED
# +
#Function that saves the layer as an image
def saveLayerAsImage(layer, inname):
out = open(inname, 'wb')
out.write(layer.read())
out.close()
#let's get the image...
img_wind = wms.getmap( layers=[wind.name], #only takes one layer
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(512, 512),
format='image/png'
)
#Save it..
saveLayerAsImage(img_wind, 'test_wind.png')
#Display the image we've just saved...
from IPython.core.display import Image
Image(filename='test_wind.png')
# -
# # 4. More on GetMap requests
#
# * Handling time and vertical dimensions
# * Changing styles
# * Changing the spatial reference system (SRS)
#
# ### Handling time and vertical dimensions
# * Getting available times for a layer:
#Times are available in the timepositions property of the layer
times= [time.strip() for time in wind.timepositions]
print times
# +
#We can choose any of the available times and make a request for it with the parameter time
#If no time is provided the default in TDS is the closest available time to the current time
img_wind = wms.getmap( layers=[wind.name],
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(600, 600),
format='image/png',
time= times[len(times)-1]
)
saveLayerAsImage(img_wind, 'test_wind.png')
Image(filename='test_wind.png')
# +
#We can also specify a time interval to get an animated gif
#Format must be image/gif
img_wind = wms.getmap( layers=[wind.name],
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(600, 600),
format='image/gif',
time= times[len(times)-4]+'/'+times[len(times)-1]
)
#Image(url='http://python.org/images/python-logo.gif')
#saveLayerAsImage(img_wind, 'test_anim_wind.gif')
Image(url=img_wind.url)
# -
# * Getting the available vertical levels:
# OWSLib does not support vertical levels, meaning the layer objects do not have a property "elevations" with the vertical levels. So, we need a little extra work to get the available vertical levels for a layer
# +
#Next version of OWSLib will support this...
#elevations = [el.strip() for el in wind.elevations]
#print elevations
#In the meantime...
def find_elevations_for_layer(wms, layer_name):
"""
parses the wms capabilities document searching
the elevation dimension for the layer
"""
#Get all the layers
levels =None;
layers = wms._capabilities.findall(".//Layer")
layer_tag = None
for el in layers:
name = el.find("Name")
if name is not None and name.text.strip() == layer_name:
layer_tag = el
break
if layer_tag is not None:
elevation_tag = layer_tag.find("Extent[@name='elevation']")
if elevation_tag is not None:
levels = elevation_tag.text.strip().split(',')
return levels;
elevations = find_elevations_for_layer(wms, wind.name)
print elevations
# +
#now we can change our vertical level with the parameter elevation
#If no elevation parameter is provided the default is the first vertical level in the dimension.
img_wind = wms.getmap( layers=['wind @ Isobaric surface'], #only takes one layer
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(600, 600),
format='image/png',
time= times[0],
elevation=elevations[len(elevations)-1 ]
)
saveLayerAsImage(img_wind, 'test_wind.png')
Image(filename='test_wind.png')
# -
# ### Changing styles
# * We can specify the style (any from the available styles for a layer) in the param styles
# +
#available styles:
#print wind.styles
#Change the style of our layer
img_wind = wms.getmap( layers=[wind.name], #only takes one layer
styles=['barb/rainbow'], #one style per layer
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(600, 600),
format='image/png',
time= times[0]
)
saveLayerAsImage(img_wind, 'test_wind_barb.png')
Image(filename='test_wind_barb.png')
# -
# ### Changing the spatial reference system (SRS)
# * We can reproject to any of the available SRS.
# +
#Reproject the bounding box to a global mercator (EPSG:3875, projection used by Google Maps, OSM...) using pyproj
from mpl_toolkits.basemap import pyproj
epsg = '3857'
psproj = pyproj.Proj(init="epsg:%s" % epsg)
xmin, ymin = psproj(wind.boundingBox[0], wind.boundingBox[1])
xmax, ymax = psproj(wind.boundingBox[2], wind.boundingBox[3])
img_wind = wms.getmap( layers=[wind.name],
srs='EPSG:'+ epsg,
bbox=(xmin, ymin, xmax, ymax),
size=(600, 600),
format='image/png',
time= times[0]
)
saveLayerAsImage(img_wind, 'test_wind_3857.png')
Image(filename='test_wind_3857.png')
# -
# Cool, we already know how to make get map requests. Let's change our layer...
# +
temp =wms['Temperature_isobaric']
img_temp = wms.getmap( layers=[temp.name],
styles=['boxfill/rainbow'],
srs='EPSG:4326',
bbox=(temp.boundingBox[0],temp.boundingBox[1], temp.boundingBox[2], temp.boundingBox[3]),
size=(600, 600),
format='image/png',
time= times[0]
)
saveLayerAsImage(img_temp, 'test_temp.png')
Image(filename='test_temp.png')
# -
# ...well not that cool.
# ## 5. TDS-ncWMS styles and extensions
#
# * ncWMS/THREDDS provides some __[non-standard WMS parameters](http://www.resc.rdg.ac.uk/trac/ncWMS/wiki/WmsExtensions)__ that allow clients some control on the styling.
#
# - Change the scale range:
# - Default is -50,50. Parameter colorscalerange allows us to use a different scale
#
# +
img_temp = wms.getmap( layers=[temp.name],
styles=['boxfill/rainbow'],
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(600, 600),
format='image/png',
time= times[0],
colorscalerange='250,320'
)
saveLayerAsImage(img_temp, 'test_temp.png')
Image(filename='test_temp.png')
# -
# * abovemaxcolor, belowmincolor params give us control on how we want the values out of range to be displayed.
# * valid values for those params are: extend (will use the highest/lowest value of the palette for values larger/smaller than the maximun/minimun), transparent and a color in 0xRRGGBB format
# +
colorscalerange='290,310'
img_temp = wms.getmap( layers=[temp.name],
styles=['boxfill/rainbow'],
srs='EPSG:4326',
bbox=(wind.boundingBox[0],wind.boundingBox[1], wind.boundingBox[2], wind.boundingBox[3]),
size=(600, 600),
format='image/png',
time= times[0],
colorscalerange=colorscalerange,
abovemaxcolor='transparent',
belowmincolor='transparent'
)
saveLayerAsImage(img_temp, 'test_temp.png')
Image(filename='test_temp.png')
# -
# The GetLegendGraphic request gives us a legend for the map, but the request is not supported by OWSLib.
# +
params ={'request': 'GetLegendGraphic',
'colorbaronly':'False', #want the text in the legend
'layer':temp.name,
'colorscalerange':colorscalerange}
legendUrl=serverurl+'?REQUEST={request:s}&COLORBARONLY={colorbaronly:s}&LAYER={layer:s}&COLORSCALERANGE={colorscalerange:s}'.format(**params)
Image(url=legendUrl)
# -
# ## 5. WMS and basemap
# We can use basemap to overlay the layer with a coastline...
# +
import os
import urllib2
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
from matplotlib._png import read_png
m = Basemap(llcrnrlon=temp.boundingBox[0], llcrnrlat=temp.boundingBox[1],
urcrnrlon=temp.boundingBox[2], urcrnrlat=temp.boundingBox[3]+5.0,
resolution='l',epsg=4326)
plt.figure(1, figsize=(16,12))
plt.title(temp.title +' '+times[0] )
m.wmsimage(serverurl,xpixels=600, ypixels=600, verbose=False,
layers=[temp.name],
styles=['boxfill/rainbow'],
time= times[0],
colorscalerange=colorscalerange,
abovemaxcolor='extend',
belowmincolor='transparent'
)
m.drawcoastlines(linewidth=0.25)
#Annotating the map with the legend
#Save the legend as image
cwd = os.getcwd()
legend = urllib2.urlopen(legendUrl)
saveLayerAsImage(legend, 'legend_temp.png')
#read the image as an array
arr = read_png('legend_temp.png')
imagebox = OffsetImage(arr, zoom=0.7)
xy =[ temp.boundingBox[2], temp.boundingBox[1] ]
#Gets the current axis
ax = plt.gca()
#Creates the annotation
ab = AnnotationBbox(imagebox, xy,
xybox=(-46.,100.),
xycoords='data',
boxcoords="offset points",
pad=0.)
#Adds the legend image as an AnnotationBbox to the map
ax.add_artist(ab)
plt.show()
# -
# ##Exercise:
# - Get the vertical levels for the layer temp.
# - Change the request for getting the highest level.
# - Change the color scale range to appropriate values.
| WMS/wms_sample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plot figure 7.3
#
# Theme Song: Caramel<br>
# Artist: Brontide<br>
# Album: Artery<br>
# Released: 2014
#
# - Author: <NAME>
# - Editor: <NAME>
# - Data: Norman Loeb
import numpy as np
import string
import matplotlib.pyplot as plt
import matplotlib
plt.rcParams['figure.figsize'] = (18/2.54, 18/2.54)
plt.rcParams['font.size'] = 11
plt.rcParams['font.family'] = 'Arial'
plt.rcParams['ytick.direction'] = 'out'
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['ytick.major.right'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['xtick.bottom'] = True
# +
# This version should be Python3 compliant..
def read_textfile(datadir='../data_input/Loeb_et_al_2020/',
filename='Global_Net_Anomaly_Timeseries_12monthMean.txt', skip=1):
f = open(datadir + filename, "r")
lines = f.readlines()
header = lines[skip-1]
keys = header.split()
f.close()
ncols = len(lines[-1].split()) # Get number of columns from the last line..
nrows = len(lines) - skip # Get number of data lines in text file
data = np.zeros([nrows, ncols]) # Convention of rows then columns for Numpy arrays (?)
for jj in range(nrows):
for ii in range(ncols):
data[jj, ii] = float(lines[jj + skip].split()[ii])
data_dict = dict.fromkeys(keys)
for kk, key in enumerate(keys):
data_dict[key] = data[:, kk]
return data_dict
# +
Net_dict = read_textfile()
SW_dict = read_textfile(filename='Global_SW_Anomaly_Timeseries_12monthMean.txt')
LW_dict = read_textfile(filename='Global_LW_Anomaly_Timeseries_12monthMean.txt')
# Include full model names as specified in Loeb et al (2020):
# https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2019GL086705
Net_dict['EC-Earth3-Veg'] = Net_dict['EC-Earth3']
SW_dict['EC-Earth3-Veg'] = SW_dict['EC-Earth3']
LW_dict['EC-Earth3-Veg'] = LW_dict['EC-Earth3']
Net_dict['ECHAM6.3'] = Net_dict['ECHAM']
SW_dict['ECHAM6.3'] = SW_dict['ECHAM']
LW_dict['ECHAM6.3'] = LW_dict['ECHAM']
Net_dict['GFDL-AM4'] = Net_dict['GFDL']
SW_dict['GFDL-AM4'] = SW_dict['GFDL']
LW_dict['GFDL-AM4'] = LW_dict['GFDL']
Net_dict['IPSL-CM6A'] = Net_dict['IPSL']
SW_dict['IPSL-CM6A'] = SW_dict['IPSL']
LW_dict['IPSL-CM6A'] = LW_dict['IPSL']
print(Net_dict.keys())
# +
yr = Net_dict['yr']
for model in ['HadGEM3', 'ECHAM', 'EC-Earth3', 'CESM2', 'CanESM5','IPSL', 'GFDL']:
plt.plot(yr, Net_dict[model], label=model)
plt.plot(yr, Net_dict['CERES'], label='CERES', color='k', linewidth=2.0)
plt.legend(ncol=2)
plt.show()
# +
# Use the IPCC CMIP6 model colours from "~/Documents/IPCC/AR6/CMIP6_color.xlsx" where possible..
RGB_dict = {'CanESM5':np.array([30, 76, 36])/255.,
'CESM2':np.array([67, 178, 216])/255.,
'EC-Earth3-Veg':np.array([124, 99, 184])/255.,
'ECHAM6.3':np.array([93, 161,162])/255., # Atmosphere for MPI model
'GFDL-AM4':np.array([35, 54, 109])/255.,
'HadGEM3':np.array([122, 139, 38])/255.,
'IPSL-CM6A':np.array([91, 83, 174])/255.}
# +
plotdir = '../figures/'
#plotfile = 'plot_AMIP_CERES_comparison_ModelMean_FGD.png'
plotfile = 'fig7.3.png'
c_obs = 'black' # Line color for CERES observations
c_mean = 'darkred' # Line color for multi-model mean
lw_model = 1.0 # Linewidth for individual CMIP models
text1 = """The ensemble mean of available CMIP6 climate models tracks the observed
energy budget changes when forced with observed sea surface temperatures"""
xmin, xmax = 2000., 2018
ymin, ymax = -1.3, 1.3
plt.figure(1)
f = plt.gcf()
#f.set_size_inches(6.,8.)
matplotlib.rcParams['font.size']=9
matplotlib.rcParams['axes.linewidth']=0.5 # set the value globally
yr = Net_dict['yr']
# In alphabetical order..
models = ['CanESM5','CESM2', 'EC-Earth3-Veg', 'ECHAM6.3', 'GFDL-AM4', 'HadGEM3', 'IPSL-CM6A']
# Panel A = SW radiation anomaly comparison
ax = plt.subplot(3, 1, 1)
plt.plot([xmin,xmax], [0., 0.], 'grey', linewidth=0.75) # Plot zero line
for model in models:
color = RGB_dict[model]
# Adopt Chapter 7 convention of +ve downwards
plt.plot(yr, -SW_dict[model], label=None, color=color, linewidth=lw_model)
plt.plot(yr, -SW_dict['multimodel'], label='Model mean', color=c_mean, linewidth=3.0)
plt.plot(yr, -SW_dict['CERES'], label='CERES observations', color=c_obs, linewidth=3.0)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
#ax.set_xticklabels('')
ax.yaxis.set_ticks_position('both')
ax.tick_params(width=0.5)
plt.ylabel('W m$^{-2}$')
#plt.title('a) Global mean reflected solar flux anomaly')
plt.title('a) Global mean solar flux anomaly')
plt.legend(loc='upper left', frameon=False, fontsize=8, ncol=4)
# Panel B = LW radiation anomaly comparison
ax = plt.subplot(3, 1, 2)
plt.plot([xmin,xmax], [0., 0.], 'grey', linewidth=0.75) # Plot zero line
for model in models:
color = RGB_dict[model]
# Adopt Chapter 7 convention of +ve downwards
plt.plot(yr, -LW_dict[model], label=model, color=color, linewidth=lw_model)
plt.plot(yr, -LW_dict['multimodel'], label=None, color=c_mean, linewidth=3.0)
plt.plot(yr, -LW_dict['CERES'], label=None, color=c_obs, linewidth=3.0)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
#ax.set_xticklabels('')
ax.yaxis.set_ticks_position('both')
ax.tick_params(width=0.5)
plt.ylabel('W m$^{-2}$')
#plt.title('b) Global mean emitted thermal flux anomaly')
plt.title('b) Global mean thermal flux anomaly')
plt.legend(loc='upper left', frameon=False, fontsize=8, ncol=4)
#plt.legend(loc='lower left', frameon=False, fontsize=8, ncol=4)
# Panel C = Net radiation anomaly comparison
ax = plt.subplot(3, 1, 3)
plt.plot([xmin,xmax], [0., 0.], 'grey', linewidth=0.75) # Plot zero line
for model in models:
color = RGB_dict[model]
plt.plot(yr, Net_dict[model], label=None, color=color, linewidth=lw_model)
plt.plot(yr, Net_dict['multimodel'], label=None, color=c_mean, linewidth=3.0)
plt.plot(yr, Net_dict['CERES'], label=None, color=c_obs, linewidth=3.0)
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
plt.text(2000.5, 1.22, text1, fontsize=8, va='top')
#plt.text(2000.5, 1.1, text2, fontsize=8)
#ax.set_xticklabels('')
ax.yaxis.set_ticks_position('both')
ax.tick_params(width=0.5)
plt.ylabel('W m$^{-2}$')
plt.title('c) Global mean net flux anomaly')
#plt.legend(loc='upper left', frameon=False, fontsize=8, ncol=4)
plt.tight_layout()
print("Saving file: ", plotdir+plotfile)
plt.savefig(plotdir+plotfile,dpi=300)
plt.savefig(plotdir+'fig7.3.pdf')
# -
| notebooks/300_chapter7_fig7.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
import torch
import torch.nn as nn
import torch.optim as optim
from torchtext import data
from torchtext import datasets
import pytorch_lightning as pl
# +
SEED = 1234
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
# +
TEXT = data.Field(tokenize = 'spacy', include_lengths = True)
LABEL = data.LabelField(dtype = torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL, root='../data/')
# +
train_data, valid_data = train_data.split(random_state = random.seed(SEED))
MAX_VOCAB_SIZE = 25_000
TEXT.build_vocab(train_data,
max_size = MAX_VOCAB_SIZE,
vectors = "glove.6B.300d",
unk_init = torch.Tensor.normal_)
LABEL.build_vocab(train_data)
# -
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
HIDDEN_DIM = 256
OUTPUT_DIM = 1
N_LAYERS = 2
BIDIRECTIONAL = True
DROPOUT = 0.5
BATCH_SIZE = 32
print(f'Number of training examples: {len(train_data)}')
print(f'Number of testing examples: {len(test_data)}')
print(vars(train_data.examples[0]))
print(f'Number of training examples: {len(train_data)}')
print(f'Number of validation examples: {len(valid_data)}')
print(f'Number of testing examples: {len(test_data)}')
print(f"Unique tokens in TEXT vocabulary: {len(TEXT.vocab)}")
print(f"Unique tokens in LABEL vocabulary: {len(LABEL.vocab)}")
print(TEXT.vocab.freqs.most_common(100))
print(TEXT.vocab.itos[:10])
# +
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE,
sort_within_batch = True,
device = device)
# +
it = next(iter(train_iterator))
text, ln = it.text
# label, ln = it.label
# print([TEXT.vocab.itos[token] for token in text.tolist()])
print([TEXT.vocab.itos[token] for token in it.text[0][0].tolist()])
print('label : ',LABEL.vocab.itos[int(it.label[0].item())])
# it.label[0].item()
# LABEL.vocab.itos
# +
tr = train_data[0]
tr.text
# text, ln = it.text
# # label, ln = it.label
print([TEXT.vocab.itos[token] for token in tr.text])
# print([TEXT.vocab.itos[token] for token in it.text[0][0].tolist()])
# print('label : ',LABEL.vocab.itos[int(it.label[0].item())])
# it.label[0].item()
# LABEL.vocab.itos
# -
def binary_accuracy(preds, y):
"""
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(torch.sigmoid(preds))
correct = (rounded_preds == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
# +
class SentimentNetwork(nn.Module):
def __init__(self, input_dim, embedding_dim, hidden_dim, output_dim):
super().__init__()
self.embedding = nn.Embedding(input_dim, embedding_dim)
self.rnn = nn.RNN(embedding_dim, hidden_dim)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, text):
#text = [sent len, batch size]
embedded = self.embedding(text)
#embedded = [sent len, batch size, emb dim]
output, hidden = self.rnn(embedded)
#output = [sent len, batch size, hid dim]
#hidden = [1, batch size, hid dim]
assert torch.equal(output[-1,:,:], hidden.squeeze(0))
return self.fc(hidden.squeeze(0))
class SentimentLSTM(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers,
bidirectional, dropout, pad_idx):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx = pad_idx)
self.lstm = nn.LSTM(embedding_dim,
hidden_dim,
num_layers=n_layers,
bidirectional=bidirectional,
dropout=dropout)
self.fc = nn.Linear(hidden_dim * 2, output_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, text, text_lengths):
#text = [sent len, batch size]
embedded = self.dropout(self.embedding(text))
#embedded = [sent len, batch size, emb dim]
#pack sequence
packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths)
packed_output, (hidden, cell) = self.lstm(packed_embedded)
#unpack sequence
output, output_lengths = nn.utils.rnn.pad_packed_sequence(packed_output)
#output = [sent len, batch size, hid dim * num directions]
#output over padding tokens are zero tensors
#hidden = [num layers * num directions, batch size, hid dim]
#cell = [num layers * num directions, batch size, hid dim]
#concat the final forward (hidden[-2,:,:]) and backward (hidden[-1,:,:]) hidden layers
#and apply dropout
hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))
#hidden = [batch size, hid dim * num directions]
return self.fc(hidden)
class TaskIMDB(pl.LightningModule):
def __init__(self, model, optimizers, criterion, accuracy):
super().__init__()
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.accuracy = accuracy
def shared_step(self, batch, batch_idx):
text, text_lengths = batch.text
predictions = self.model(text, text_lengths).squeeze(1)
# predictions = self.model(batch.text).squeeze(1)
loss = self.criterion(predictions, batch.label)
acc = self.accuracy(predictions, batch.label)
return loss, acc
def training_step(self, batch, batch_idx):
loss, acc = self.shared_step(batch, batch_idx)
result = pl.TrainResult(loss)
result.log_dict({'trn_loss': loss, 'trn_acc':acc})
return result
def validation_step(self, batch, batch_idx):
loss, acc = self.shared_step(batch, batch_idx)
result = pl.EvalResult(checkpoint_on=loss)
result.log_dict({'val_loss': loss, 'val_acc': acc})
return result
def configure_optimizers(self):
return self.optimizer
# +
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token]
model.embedding.weight.data[UNK_IDX] = torch.zeros(EMBEDDING_DIM)
model.embedding.weight.data[PAD_IDX] = torch.zeros(EMBEDDING_DIM)
# +
# model = SentimentNetwork(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM)
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
model = SentimentLSTM(INPUT_DIM, EMBEDDING_DIM, HIDDEN_DIM, OUTPUT_DIM, N_LAYERS, BIDIRECTIONAL, DROPOUT, PAD_IDX)
# -
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.BCEWithLogitsLoss()
# +
from pytorch_lightning import loggers as pl_loggers
tb_logger = pl_loggers.TensorBoardLogger('logs/imdb')
task = TaskIMDB(model, optimizer, criterion, binary_accuracy)
trainer = pl.Trainer(gpus=1, logger=tb_logger)
trainer.fit(task, train_iterator, valid_iterator)
# +
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'The model has {count_parameters(model):,} trainable parameters')
# -
def train(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.text).squeeze(1)
loss = criterion(predictions, batch.label)
acc = binary_accuracy(predictions, batch.label)
epoch_loss += loss.item()
epoch_acc += acc.item()
return epoch_loss / len(iterator), epoch_acc / len(iterator)
# +
import time
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
# +
N_EPOCHS = 5
best_valid_loss = float('inf')
model = model.to(device)
criterion = criterion.to(device)
for epoch in range(N_EPOCHS):
start_time = time.time()
train_loss, train_acc = train(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), 'tut1-model.pt')
print(f'Epoch: {epoch+1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc*100:.2f}%')
print(f'\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:.2f}%')
# +
model.load_state_dict(torch.load('tut1-model.pt'))
test_loss, test_acc = evaluate(model, test_iterator, criterion)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')
# -
| notebook/imdb_train.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++17
// language: C++17
// name: xcpp17
// ---
// # AlgoViz
// <div class="prereq">
// <h3>Vorraussetzungen</h3>
// <div>
// <ul>
// <li><a class="prereq" href="/user-redirect/algoviz/lessons/01_Algorithmen/02_ImperativeProgramme.ipynb">Imperative Programme</a></li>
// <li><a class="prereq" href="/user-redirect/algoviz/lessons/02_Grundlagen/02_Variablen.ipynb">Variablen</a> und
// <a class="prereq" href="/user-redirect/algoviz/lessons/02_Grundlagen/03_Ints.ipynb">Integer-Typen.</a></li>
// </ul>
// </div>
// </div>
// In der Vorlesung nutzen wir die eigens entwickelte Bibliothek **AlgoViz**. Zu ihr gehören neben der bereits bekannten [Schildkröte](/user-redirect/algoviz/lessons/01_Algorithmen/01_Algorithmen.ipynb) auch einige andere Funktionalitäten. Eine davon - sogenannte [SVG-Grafiken](https://de.wikipedia.org/wiki/Scalable_Vector_Graphics) - wollen wir in diesem Notebook kennenlernen. Dazu müssen wir die richtige Bibliothek einbinden. Dies geschieht mit dem üblichen `#include`.
#include <algoviz/SVG.hpp>
// Beachten Sie die Groß- und Kleinschreibung!
// Jetzt können wir die Seitenleiste durch zwei Befehl ein- und ausblenden.
AlgoViz::show(); // Einblenden
AlgoViz::hide(); // Ausblenden
// Wir können aber auch eine Art **Zeichenbrett** hinzufügen. Dazu steht die **Klasse** `SVG` zur Verfügung. Die Erzeugung ist ähnlich zu dem von `Turtle`. Die beiden Parameter sind die Breite und Höhe der Zeichnung in Punkten.
SVG zeichnung = SVG(400,400);
// In der Klasse stehen eine Reihe von Befehlen zur Verfügung. Sie können Sie über die **interne Dokumentation** einsehen. Führen Sie dazu die nächste Zelle aus.
?SVG
// Durch das vorangestellte Fragezeichen rufen Sie die Dokumentation der Klasse SVG auf. Sie erscheint in einem eigenen Bereich an der unteren Kante.
//
// Das Ganze geht auch mit den Standard C++ Funktionen. Probieren Sie mal Folgendes:
?std::cin
// Jetzt aber zurück zu `SVG`.
?SVG
// Scrollen Sie mal nach unten. Sie werden reine Reihe von Operationen entdecken, die wir verwenden können. Beispielsweise `drawCircle(x,y,r)` mit der Sie einen Kreis zeichnen können.
zeichnung.drawCircle(40,60,30);
// Es gibt noch eine Reihe anderer Formen, die Sie zeichnen können. Die meisten sind relativ einfach.
// <div class="task">
// <h3>Aufgaben</h3>
// <div>
// <p>Probieren Sie die verschiedenen Befehle für das Zeichnen von Formen aus.</p>
// </div>
// </div>
// +
// Platz zum Experimentieren
// -
// ## Farbe
//
// Wir können auch Farben verwenden. Dazu stehen zwei Möglichkeiten zur Verfügung. Entweder verwendet man die sogenannten [CSS-Farbnamen](https://www.w3schools.com/colors/colors_names.asp)
// oder die [RGB-Komponenten](https://de.wikipedia.org/wiki/RGB-Farbraum). Durch die Operation `setColor()` wird die Linienfarbe für alle nach folgenden Zeichnungen gesetzt. `setFill()` setzt die Füllung der Objekte.
// +
zeichnung.clear(); // Lösche die Zeichnung
zeichnung.setColor("red");
zeichnung.setFill("MediumSpringGreen");
zeichnung.drawRect(60,70,100,50);
zeichnung.setColor("blue");
zeichnung.setFill("PaleTurquoise");
zeichnung.drawCircle(100,30,20);
// -
// Die RGB-Komponenten geben die Rot-, Grün- und Blauanteile der Farben an. Sie können Werte zwischen 0 und 255 annehmen.
// +
zeichnung.clear();
zeichnung.setColor(128,0,128); // "halb" rot, "halb" blau
zeichnung.setFill(0,255,128); // "ganz" grün und "halb" blau
zeichnung.drawRect(60,70,100,50);
zeichnung.drawCircle(100,30,20);
// -
// Man kann einen optionalen vierten Parameter angeben, der die Transparenz der Farbe angibt. Er kann werte zwischen 0.0 (durchsichtig) und 1.0 (undurchsichtig) annehmen.
// +
zeichnung.clear();
zeichnung.setColor("black");
zeichnung.setFill(255,255,0,1.0); // undurchsichtig
zeichnung.drawRect(10,10,100,50);
zeichnung.setFill(255,0,255,0.5); // halb durchsichtig
zeichnung.drawRect(20,20,100,50);
zeichnung.setFill(0,255,0,0.25); // etwas durchscheinender
zeichnung.drawRect(30,30,100,50);
zeichnung.setFill(0,0,0,0.0); // unsichtbar
zeichnung.drawRect(40,40,100,50);
// -
// Es gibt auch die Farbe "transparent".
// +
zeichnung.clear();
zeichnung.setColor("transparent"); // Der Rand wird nicht gezeichnet.
zeichnung.setFill(255,255,0,1.0);
zeichnung.drawRect(10,10,100,50);
zeichnung.setFill(255,0,255,0.5);
zeichnung.drawRect(20,20,100,50);
zeichnung.setFill(0,255,0,0.25);
zeichnung.drawRect(30,30,100,50);
// -
// ## Variablen für Veränderung
//
// <a class="prereq" href="/user-redirect/algoviz/lessons/02_Grundlagen/02_Variablen.ipynb">Variablen</a> können dazu verwendet werden Dinge zu verändern. Um das zu sehen, verwenden wir als erstes Variablen, um Linien in verschiedenen Farben an verschiedenen Stellen zu zeichnen.
//
// Aber zuerst räumen wir auf.
zeichnung.clear();
// +
int wert = 0; // Unsere Variable hat den Wert 0
zeichnung.setColor(wert,0,255); // Damit legen wir die Farbe ...
zeichnung.drawLine(0,wert,255,wert); // ... und die Position der Linie fest.
// -
// Wie man sieht wird an der oberen Kante eine Linie gezeichnet (man muss schon genau hinsehen).
//
// Verändern wir jetzt den Wert der Variable und führen dieselben Anweisungen nochmal durch, ergibt sich ein anderes Ergebnis.
// +
wert = 255; // Die Variable wir nicht nochmal deklariert!
zeichnung.setColor(wert,0,255); // Genau das gleiche wie oben.
zeichnung.drawLine(0,wert,255,wert);
// -
// Durch die Veränderung der Variable haben dieselben Anweisungen eine andere Wirkung. Und diesen Effekt kann man gezielt nutzen. Wir können jetzt z.B. eine <a class="perspective" href="/user-redirect/algoviz/lessons/02_Grundlagen/13_Zaehlschleifen.ipynb">Zählschleife</a> verwenden, die alle möglichen Werte der Variable "durchzählt".
// +
zeichnung.clear();
for ( int rot = 0; rot < 256; rot = rot+1 ) { // Der Wert der Variable wird schrittweise von 0
zeichnung.setColor(rot,0,255); // von 0 bis 256 gezählt.
zeichnung.drawLine(0,rot,255,rot);
}
// -
// Die Veränderung des Variablenwerts geschieht in der dritten Komponente der Zählschleife. Sie können mal ausprobieren, was passiert, wenn sie damit etwas Spielen.
//
// <div class="task">
// <h3>Aufgabe</h3>
// <div><p>
// Zeichnen Sie einen Farbverlauf, wie er unten dargestellt ist. Eigentlich müssen Sie nur wenig ändern.</p><br/>
// <center><img src="../../img/Farbverlauf.png" width=60></img></center>
// </div>
// </div>
// +
// ...
// -
// # Bilder
//
// Sie können auch Bilder verwenden. Diejenige, die im Verzeichnis `img` liegen Können Sie mit dem Pfad `/user-redirect/algoviz/img/<name>`verwenden.
for ( int winkel = 0; winkel <= 1440; winkel++) {
zeichnung.clear();
// Diese Zeile ist die Magie. Versuchen Sie sie mal zu verstehen.
zeichnung.setTransform("rotate(" + std::to_string(winkel) + ",50,50)");
// Hier wird das Bild gezeichnet.
zeichnung.drawImage("/user-redirect/algoviz/img/tardis.png",0,0,100,100);
}
// Das "Flackern" entsteht durch das ständige Löschen des Bildes. Wir werden bald Möglichkeiten kennen lernen das Ganze etwas eleganter zu machen.
// Wollen Sie eigenen Bilder verwenden, müssen Sie sie in Ihr `myNotebooks` Verzeichnis hochladen. Dann können Sie sie über `/user-redirect/algoviz/myNotebooks/<name>` verwenden (ja ich weiß, der Pfad ist nicht schön).
// <div class="task">
// <h3>Aufgabe</h3>
// <div>
// Laden Sie ein Bild in Ihr <tt>myNotebooks</tt> Verzeichnis hoch und lassen Sie es über das Zeichenbrett wandern.
// </div>
// </div>
// +
// ....
// -
// <div class="followup">
// <h3>Was folgen könnte</h3>
// <div>
// Bevor es mit den <a class="perspective" href="/user-redirect/algoviz/lessons/02_Grundlagen/11_Entscheidungen.ipynb">Entscheidungsanweisungen</a>
// weitergeht, sollten Sie sich schon mal mit <a class="followup" href="/user-redirect/algoviz/lessons/02_Grundlagen/14_ErsterKontaktMitObjekten.ipynb">Objekten</a> beschäftigen.
// </div>
// </div>
| lessons/02_Grundlagen/10_AlgoViz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Doc2Vec for Document Clustering
#
# Dataset - http://mlg.ucd.ie/datasets/bbc.html
#
# Citation - <NAME> and <NAME>. "Practical Solutions to the Problem of Diagonal Dominance in Kernel Document Clustering", Proc. ICML 2006.
#
# Consists of 2225 documents from the BBC news website corresponding to stories in five topical areas from 2004-2005.
# Class Labels: 5 (business, entertainment, politics, sport, tech)
# - 510 business
# - 386 entertainment
# - 417 politics
# - 511 sports
# - 401 tech
# +
from pathlib import Path
documents = []
documentReference = []
documentCount = 0
def dataCleanup(text):
text = text.lower()
text = text.replace('\n', ' ')
# Add space around punctuations
for char in ['.', '"', ',', '(', ')', '!', '?', ';', ':']:
text = text.replace(char, ' ' + char + ' ')
return text
def buildDocument(folder, category):
allDocs = Path(folder).glob('**/*')
global documentCount
for doc in allDocs:
file = open(doc, "r")
data = file.read()
cleanData = dataCleanup(data)
documents.append(cleanData)
# Keep a record of category and filename
documentReference.append([])
documentReference[documentCount].append(category)
documentReference[documentCount].append(doc)
documentCount = documentCount + 1
return
buildDocument("bbc-fulltext/business", "business")
buildDocument("bbc-fulltext/entertainment", "entertainment")
buildDocument("bbc-fulltext/politics", "politics")
buildDocument("bbc-fulltext/sport", "sport")
buildDocument("bbc-fulltext/tech", "tech")
# +
from gensim.models import doc2vec
from gensim.models.doc2vec import TaggedDocument
from collections import namedtuple
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk import download
download('stopwords') # For stopword removal
download('punkt') # For tokenizer
def removeStopwords(text):
# Removing stopwords improved results with BBC news data, but test with and without stop words.
stop_words = stopwords.words('english')
text = [w for w in text if w not in stop_words]
text = [w for w in text if w.isalpha()]
return text
def text2tokens(text):
text = text.lower()
wordList = word_tokenize(text)
wordList = removeStopwords(wordList)
return wordList
doc2vec_corpus = []
for i, text in enumerate(documents):
words = text2tokens(text)
tag = [i]
doc2vec_corpus.append(TaggedDocument(words=words, tags=tag))
# The model parameters below can impact the outcome.
# 1. Size - Vector size. 100 worked best with the BBC news data set. Tried various between 50 to 300 before choosing 100.
# 2. Window - context window, i.e. the number of words on the left and right of a word that
# defines a "context" for learning the meaning of the word. Context window of 1 gave the best result (tried between 1 and 10)
# ..... probably due to the very small size of documents / vocabulary.
model = doc2vec.Doc2Vec(doc2vec_corpus, size = 100, negative = 5, window = 1, iter = 20, min_count = 2, workers = 4, alpha=0.025, min_alpha=0.025)
model.save("bbc_news_doc2vec.model")
print("Doc2Vec Model Saved")
# -
docVectors = []
count = 0
while (count < documentCount):
docVectors.append(model.docvecs[count])
count = count + 1
# +
from sklearn.cluster import KMeans
num_clusters = 5
km = KMeans(n_clusters = num_clusters, random_state = 99999)
km.fit(docVectors)
clusters = km.labels_.tolist()
# +
import nltk
from nltk.stem import WordNetLemmatizer
def wordFrequencyFilter(text, max_word_count, freq):
text = text.lower()
tokens = nltk.word_tokenize(text)
tagged = nltk.pos_tag(tokens)
lemmatizer = WordNetLemmatizer()
wordList = []
for tag in tagged:
if ((tag[1] == 'NN') or (tag[1] == 'NNS') or (tag[1] == 'NNP') or (tag[1] == 'NNPS')):
tagLemma = lemmatizer.lemmatize(tag[0])
wordList.append(tagLemma)
freqDist = nltk.FreqDist(wordList)
common = freqDist.most_common(max_word_count)
mainText = ''
for word in common:
# Exclude words less than 2 characters long.
# Exclude words with frequency count greater than freq
# Only include alphabetic strings
if ((word[0].isalpha()) and (len(word[0]) > 2) and ((word[1] < freq) == False)):
mainText = mainText + ' ' + word[0]
return mainText
def extractKeywords(file):
myFile = open(file, 'r')
data = myFile.read()
# 10 most frequent words, minimum frequency 2
mainText = wordFrequencyFilter(data, 10, 2)
return mainText
myAnalysis = []
count = 0
while (count < documentCount):
# Cluster, Original Category, File Reference, Keywords
myAnalysis.append([])
myAnalysis[count].append(clusters[count])
myAnalysis[count].append(documentReference[count][0])
myAnalysis[count].append(documentReference[count][1])
keywords = extractKeywords(documentReference[count][1])
myAnalysis[count].append(keywords)
count = count + 1
import pandas as pd
myLabels = ['Cluster', 'Category', 'File', 'Keywords']
df = pd.DataFrame(myAnalysis, columns=myLabels)
# -
outputFile = "ClusterAnalysis.csv"
df = df.sort_values('Cluster', ascending = True)
df.to_csv(outputFile, sep='\t', encoding='utf-8')
# ### Display the key phrases from each cluster
# +
def clusterKeyPatterns(text, wordcount):
tokens = nltk.word_tokenize(text)
freqDist = nltk.FreqDist(tokens)
common = freqDist.most_common(wordcount)
returnText = ''
for word in common:
returnText = returnText + ' ' + word[0]
return returnText
clusterKeywords = []
for num in range(0, num_clusters):
clusterKeywords.append([])
clusterKeywords[0].append('')
for num in range(0, documentCount):
tmpStr = str(clusterKeywords[myAnalysis[num][0]]) + ' ' + str(myAnalysis[num][3])
clusterKeywords[myAnalysis[num][0]] = tmpStr
for num in range(0, num_clusters):
# Obtain the top xx words in each cluster
listofwords = clusterKeyPatterns(str(clusterKeywords[num]), 50)
print("Key words in cluster - ", num)
print(listofwords, "\n")
# -
# The results above is perfect!!
| Code/Chapter 6 - Keywords Summarisation Classification Clustering/Document Clustering/Document Clustering - Doc2Vec - FINAL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 2: D to P Exciation in Helium with Atomic Doppler Profile Avergaing, Gaussian Beam Averaging, and Rotation
#
# This tutorial demonstrates how to set up a system and time evolve the sysem averaging over the Gaussian beam profile and the doppler profile of the atoms. We also uses the Wigner-D matrices to rotate the system to a different reference frame and show that this model is physically consistent in all reference frames.
#
# Start by importing the libraries we will be using.
# +
import LASED as las
import numpy as np
import plotly.graph_objects as go
# -
# ## Setting up the system
#
# To set up a Laser-Atom system you must first declare the atomic states which you want to work with and label them. We are going to set up an example system for a D-state to a P-state transistion for helium where the P-state is a high principle quantum number Rydberg state. Therefore, we will assume that the wavelength of this fictitious transition is the ionisation energy of helium as this is a very high lying Rydberg state. This system is only for example purposes only and does not exist.
from IPython.display import Image
Image(filename = "LevelDiagrams/HeRydbergDtoP.png")
# A level diagram of the system we will model is shown above.
#
# The `LaserAtomSystem` is setup in the code block below. As the excited state is high-lying its non-radiative lifetime will be comparable to its radiative lifetime. This non-radiative decay to other states outside the system is characterised by the arrow from the upper states to state $|f\rangle$. This can be input into the `LaserAtomSystem` using the keyword `tau_f`.
#
# We will excite this system with simultaneous right-hand circular and left-hand circular polarised light in the natural frame with the laser beam travelling down the quantisation axis. This will be linearly-polarised light in the collision frame where the transverse E-field of the laser is oscillating along the quantisation axis. Therefore, we must set `Q = [-1, 1]` and set the `rabi_factors` to `[1, -1]` as noted in Tutorial 1 and scale the Rabi frequency in the system by using `rabi_scaling`. In this case set it to $1/{\sqrt{2}}$.
#
# Then, we create the sub-states and put them into either the ground or excited states.
# +
# System parameters
n = 8 # number of energy levels in system
laser_wavelength = 827e-9 # wavelength of transition
w_e = las.angularFreq(laser_wavelength)
# Create states
one = las.State(label = 1, w = 0, m = -2, L = 2, S = 0)
two = las.State(label = 2, w = 0, m = -1, L = 2, S = 0)
three = las.State(label = 3, w = 0, m = 0, L = 2, S = 0)
four = las.State(label = 4, w = 0, m = 1, L = 2, S = 0)
five = las.State(label = 5, w = 0, m = 2, L = 2, S = 0)
six = las.State(label = 6, w = w_e, m = -1, L = 1, S = 0)
seven = las.State(label = 7, w = w_e, m = 0, L = 1, S = 0)
eight = las.State(label = 8, w = w_e, m = 1, L = 1, S = 0)
G = [one, two, three, four, five] # ground states
E = [six, seven, eight] # excited states
Q = [-1, 1] # laser radiation polarisation
rabi_scaling_he = 1/np.sqrt(2)
rabi_factors_he = [1, -1]
tau = 100e3 # lifetime in ns (estimated)
tau_f = 100e3 # non-radiative lifetime of rydberg upper state to other states in ns (estimated)
# -
# ### TEM$_{00}$ Gaussian Beam Profile
#
# A laser beam usually does not have a flat beam profile (known as a "top-hat" distribution) in intensity. As the beam has spatial variation in intensity the atoms being excited experience a non-uniform time evolution. To model the effects of the beam profile the beam can be split up into regions of approximate uniform intensity and each spatial portion of the beam is used to time-evolve a part of the system being illuminated. Then, each part of the system is summed together and normalised which results in the entire system being modelled.
#
# `LASED` supports the modelling of a Gaussian TEM$_{00}$ laser beam profile. The 2D standard deviation of the Gaussian must be declared with keyword `r_sigma` when performing the `timeEvolution()` of the `LaserAtomSystem`. The number of portions which the beam is split into must be chosen as well. This is declared with the keyword `n_intensity` when using `timeEvolution`. If these are left out then a "top-hat" distribution of laser intensity is assumed. Also, to use the Gaussian avergaing over the beam profile, the keyword `laser_power` must be defined in the `LaserAtomSystem`. This is the total power which the laser delivers as opposed to the intensity over a mm$^2$.
#
# Below, the laser parameters are declared for this system.
#
# **Note**: If using this averaging the model will loop over the time evolution with the number defined in `n_intensity` so the model will be much slower if a larger number is input. The larger number also results in a more accurate representation of the beam profile. Usually, a `n_intensity` of around 50 is enough for most cases.
# Laser parameters
laser_power = 100 # laser intensity in mW
r_sigma = 0.75 # radial distance to 2D standard deviation in mm
n_intensity = 20
# ### Doppler Detuning from the Atomic Velocity Profile
#
# When using `LASED` the atoms being excited are usually defined as being stationary unless specified. If the atoms are not stationary and have some velocity with respect to the laser beam then the frequency of the laser is detuned from resonance due to the fixed velocity. In experiments an atomic beam is sometimes used to provide the atoms to some interaction region where the laser-excitation takes place. If a velocity component is in (or opposite to) the direction of the laser beam then detuning occurs. The velocity component can be specified using the `atomic_velocity` keyword in the `timeEvolution()`. This is specified in units of m/s in the direction of the laser beam. If the direction is opposite to this then the `atomic_velocity` is negative.
#
# Detuning can also occur due to the Maxwell-Boltzmann distribution of atomic velocities. This results in a Gaussian detuning profile. This can be modelled by splitting the detuning due to the velocity distribution of atoms into uniform sections and time-evolving the system with these uniform detunings and then summing up the time evolution for each detuning and normalising. The detuning due to this Doppler broadening can be modelled in `LASED` by defining a `doppler_width` in Grad/s in `timeEvolution()` and a list with all the detunings to be used for the averaging process called `doppler_detunings`. The more elements in `doppler_detunings` the more the time evolution of the system is calculated and the more time it will take to model the system.
#
# **Note**: When using _both_ Doppler and Gaussian beam averaging the number of times the system is time evolved will be `n_intensity` multiplied by the number of elements in `doppler_detunings`.
# Doppler detuning parameters
doppler_width = 0.1*2*np.pi # doppler width in Grad/s
delta_upper = 3*doppler_width
delta_lower = -3*doppler_width
doppler_steps = 20
doppler_detunings = np.linspace(delta_lower, delta_upper, doppler_steps)
atomic_velocity = 0 # Velocity component of atoms in direction of laser beam in m/s
# Set the simulation time for 1000 ns every 1 ns as follows:
# Simulation parameters
start_time = 0
stop_time = 1000 # in ns
time_steps = 1000
time = np.linspace(start_time, stop_time, time_steps)
# Create the `LaserAtomSystem` object. To set the initial conditions of the density matrix at t = 0 ns $\rho(t = 0)$ we can use the `setRho_0(s1, s2, val)` where `s1` and `s2` are `State` objects denoting the element of the density matrix to be set as $\rho_{s1,s2}$ and `val` denotes the value assigned to this element.
#
# For this system we have set the populations of states $|1\rangle$, $|3\rangle$, and $|5\rangle$ as 1/3. So the density matrix elements $\rho_{11}$ = $\rho_{33}$ = $\rho_{55}$ = 1/3.
helium_system = las.LaserAtomSystem(E, G, tau, Q, laser_wavelength, tau_f = tau_f,
laser_intensity = laser_power, laser_power = laser_power,
rabi_scaling = rabi_scaling_he, rabi_factors = rabi_factors_he)
helium_system.setRho_0(one, one, 1/3)
helium_system.setRho_0(three, three, 1/3)
helium_system.setRho_0(five, five, 1/3)
# Time evolve the system.
helium_system.timeEvolution(time,
r_sigma = r_sigma,
n_beam_averaging = n_intensity,
doppler_width = doppler_width,
doppler_detunings = doppler_detunings,
beam_profile_averaging = True,
doppler_averaging = True)
# Now, we can plot the populations using `Plotly` (or any other plotting package).
# +
las_sys = helium_system
rho_66 = [ abs(rho) for rho in las_sys.Rho_t(six, six)]
rho_77 = [abs(rho) for rho in las_sys.Rho_t(seven, seven)]
rho_88 = [abs(rho) for rho in las_sys.Rho_t(eight, eight)]
fig_upper = go.Figure(data = go.Scatter(x = time,
y = rho_66,
mode = 'markers',
name = "Rho_66 (Upper State)",
marker = dict(
color = 'red',
symbol = 'x',
)))
fig_upper.add_trace(go.Scatter(x = time,
y = rho_77,
mode = 'lines',
name = "Rho_77(Upper State)",
marker = dict(
color = 'blue',
symbol = 'square',
)))
fig_upper.add_trace(go.Scatter(x = time,
y = rho_88,
mode = 'lines',
name = "Rho_88(Upper State)",
marker = dict(
color = 'green',
symbol = 'circle',
)))
fig_upper.update_layout(title = "Upper Atomic Populations: J = 2 to J = 1 Rydberg He, P = 100 mW, σ+ & σ-, 100ns Lifetime",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_upper.write_image("SavedPlots/tutorial2-HeFigUpperNatFrame.png")
Image("SavedPlots/tutorial2-HeFigUpperNatFrame.png")
# + tags=[]
rho11 = [ abs(rho) for rho in las_sys.Rho_t(one, one)]
rho22 = [ abs(rho) for rho in las_sys.Rho_t(two, two)]
rho33 = [ abs(rho) for rho in las_sys.Rho_t(three, three)]
rho44 = [ abs(rho) for rho in las_sys.Rho_t(four, four)]
rho55 = [ abs(rho) for rho in las_sys.Rho_t(five, five)]
fig_lower = go.Figure(data = go.Scatter(x = time,
y = rho11,
mode = 'lines',
name = "Rho_11 (Lower State)",
marker = dict(
color = 'red',
symbol = 'circle',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho22,
mode = 'markers',
name = "Rho_22 (Lower State)",
marker = dict(
color = 'blue',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho33,
mode = 'lines',
name = "Rho_33 (Lower State)",
marker = dict(
color = 'purple',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho44,
mode = 'lines',
name = "Rho_44 (Lower State)",
marker = dict(
color = 'gold',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho55,
mode = 'lines',
name = "Rho_55 (Lower State)",
marker = dict(
color = 'green',
symbol = 'square',
)))
fig_lower.update_layout(title = "Lower Atomic Populations",
xaxis_title = "Time (ns)",
yaxis_title = "Population")
fig_lower.write_image("SavedPlots/tutorial2-HeFigLowerNatFrame.png")
Image("SavedPlots/tutorial2-HeFigLowerNatFrame.png")
# -
# ## Rotation
#
# With `LASED` we can rotate density matrices to different reference frames using the Wigner-D matrix.
#
# **Note**: The Wigner-D matrix is only defined for J-represenattion (when isospin I = 0) and therefore when in F-representation the rotation may not be correct. Also, density matrices can only be rotated for single atomic states so the optical coherences between ground and excited states cannot be rotated. To obtain optical coherences in a different reference frame the initial density matrix (at t = 0) must be rotated and time evolved with the polarisation rotated to that frame.
#
# The rotation is defined by Euler angles `alpha`, `beta`, and `gamma`. The frame is rotated with each angle in succession so that:
# * `alpha` is the rotation (in radians) around the initial z-axis to obtain the new frame Z'
# * `beta` is the rotation (in radians) about the new y'-axis to obtain the new frame Z''
# * `gamma` is the rotation (in radians) about the new z''-axis to obtain the final frame
#
# In `LASED` to rotate the initial density matrix use `rotateRho_0(alpha, beta, gamma)` on the `LaserAtomSystem`. In this he lium system the atom is changed to be defined in the collision frame from the natural frame so the polarisation is changed to be purely linear with `Q = [0]` and scaled to 1.
#
# **Note**: To simulate a linear polarisation with an angle with respect to the x-axis we can initialise the density matrix in that frame, rotate to the collision frame (with the polarisation aligned with the x-axis, time evolve the system, and then rotate back to the frame where the polarisation is at an angle.
alpha = np.pi/2
beta = np.pi/2
gamma = -np.pi/2
helium_system_rot = helium_system
helium_system_rot.rotateRho_0(alpha, beta, gamma)
helium_system_rot.Q = [0]
helium_system.rabi_scaling = 1
helium_system.rabi_factors = [1]
# Now, we can time evolve this system in this new reference frame.
print(helium_system_rot)
helium_system_rot.timeEvolution(time,
r_sigma = r_sigma,
n_beam_averaging = n_intensity,
doppler_width = doppler_width,
doppler_detunings = doppler_detunings,
beam_profile_averaging = True,
doppler_averaging = True)
# Now we can plot what the populations look like.
# +
las_sys = helium_system_rot
rho_66 = [ abs(rho) for rho in las_sys.Rho_t(six, six)]
rho_77 = [abs(rho) for rho in las_sys.Rho_t(seven, seven)]
rho_88 = [abs(rho) for rho in las_sys.Rho_t(eight, eight)]
fig_upper = go.Figure(data = go.Scatter(x = time,
y = rho_66,
mode = 'markers',
name = "Rho_66 (Upper State)",
marker = dict(
color = 'red',
symbol = 'x',
)))
fig_upper.add_trace(go.Scatter(x = time,
y = rho_77,
mode = 'lines',
name = "Rho_77(Upper State)",
marker = dict(
color = 'blue',
symbol = 'square',
)))
fig_upper.add_trace(go.Scatter(x = time,
y = rho_88,
mode = 'lines',
name = "Rho_88(Upper State)",
marker = dict(
color = 'green',
symbol = 'circle',
)))
fig_upper.update_layout(title = "Laser Frame Upper Atomic Populations: J = 2 to J = 1 Rydberg He, P = 100 mW, π, 100ns Lifetime",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_upper.write_image("SavedPlots/tutorial2-HeFigUpperCollFrame.png")
Image("SavedPlots/tutorial2-HeFigUpperCollFrame.png")
# +
rho11 = [ abs(rho) for rho in las_sys.Rho_t(one, one)]
rho22 = [ abs(rho) for rho in las_sys.Rho_t(two, two)]
rho33 = [ abs(rho) for rho in las_sys.Rho_t(three, three)]
rho44 = [ abs(rho) for rho in las_sys.Rho_t(four, four)]
rho55 = [ abs(rho) for rho in las_sys.Rho_t(five, five)]
fig_lower = go.Figure(data = go.Scatter(x = time,
y = rho11,
mode = 'lines',
name = "Rho_11 (Lower State)",
marker = dict(
color = 'red',
symbol = 'circle',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho22,
mode = 'markers',
name = "Rho_22 (Lower State)",
marker = dict(
color = 'blue',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho33,
mode = 'lines',
name = "Rho_33 (Lower State)",
marker = dict(
color = 'purple',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho44,
mode = 'lines',
name = "Rho_44 (Lower State)",
marker = dict(
color = 'gold',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho55,
mode = 'lines',
name = "Rho_55 (Lower State)",
marker = dict(
color = 'green',
symbol = 'square',
)))
fig_lower.update_layout(title = "Lower Atomic Populations",
xaxis_title = "Time (ns)",
yaxis_title = "Population")
fig_lower.write_image("SavedPlots/tutorial2-HeFigLowerCollFrame.png")
Image("SavedPlots/tutorial2-HeFigLowerCollFrame.png")
# -
# If we rotate the time-evolved density matrix `rho_t` back to the natural frame we should get the same populations as when we evolved in the natural frame as the excitation must be the same in all reference frames. We can do this rotation by using `rotateRho_t` on the `LaserAtomSystem`.
#
# **Note**: A message will be thrown saying that the optical coherences are not rotated.
helium_system_rot.rotateRho_t(alpha, -beta, gamma)
print("Natural Frame final populations")
# Now we can plot the results and obtain the same plots as in the first time evolution.
# + tags=[]
las_sys = helium_system_rot
rho_66 = [ abs(rho) for rho in las_sys.Rho_t(six, six)]
rho_77 = [abs(rho) for rho in las_sys.Rho_t(seven, seven)]
rho_88 = [abs(rho) for rho in las_sys.Rho_t(eight, eight)]
fig_upper = go.Figure(data = go.Scatter(x = time,
y = rho_66,
mode = 'markers',
name = "Rho_66 (Upper State)",
marker = dict(
color = 'red',
symbol = 'x',
)))
fig_upper.add_trace(go.Scatter(x = time,
y = rho_77,
mode = 'lines',
name = "Rho_77(Upper State)",
marker = dict(
color = 'blue',
symbol = 'square',
)))
fig_upper.add_trace(go.Scatter(x = time,
y = rho_88,
mode = 'lines',
name = "Rho_88(Upper State)",
marker = dict(
color = 'green',
symbol = 'circle',
)))
fig_upper.update_layout(title = "Natural Frame Upper Atomic Populations: J = 2 to J = 1 Rydberg He, P = 100 mW, π, 100ns Lifetime",
xaxis_title = "Time (ns)",
yaxis_title = "Population",
font = dict(
size = 11))
fig_upper.write_image("SavedPlots/tutorial2-HeFigUpperRotatedToNatFrame.png")
Image("SavedPlots/tutorial2-HeFigUpperRotatedToNatFrame.png")
# + tags=[]
rho11 = [ abs(rho) for rho in las_sys.Rho_t(one, one)]
rho22 = [ abs(rho) for rho in las_sys.Rho_t(two, two)]
rho33 = [ abs(rho) for rho in las_sys.Rho_t(three, three)]
rho44 = [ abs(rho) for rho in las_sys.Rho_t(four, four)]
rho55 = [ abs(rho) for rho in las_sys.Rho_t(five, five)]
fig_lower = go.Figure(data = go.Scatter(x = time,
y = rho11,
mode = 'lines',
name = "Rho_11 (Lower State)",
marker = dict(
color = 'red',
symbol = 'circle',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho22,
mode = 'markers',
name = "Rho_22 (Lower State)",
marker = dict(
color = 'blue',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho33,
mode = 'lines',
name = "Rho_33 (Lower State)",
marker = dict(
color = 'purple',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho44,
mode = 'lines',
name = "Rho_44 (Lower State)",
marker = dict(
color = 'gold',
symbol = 'x',
)))
fig_lower.add_trace(go.Scatter(x = time,
y = rho55,
mode = 'lines',
name = "Rho_55 (Lower State)",
marker = dict(
color = 'green',
symbol = 'square',
)))
fig_lower.update_layout(title = "Lower Atomic Populations",
xaxis_title = "Time (ns)",
yaxis_title = "Population")
fig_lower.write_image("SavedPlots/tutorial2-HeFigLowerRotatedToNatFrame.png")
Image("SavedPlots/tutorial2-HeFigLowerRotatedToNatFrame.png")
| docs/source/tutorials/Tutorial2-HeRotation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decision Tree Data
# This notebook explores data taken from an Access Database and put into a graph format. The data itself represents a series of questions along with possible answers. There is additional data tying these questions and answers together to construct a tree-like structure.
#
# <div align="left">
# <img src="images/example_model.png" alt="Graph Model" width="700px" align="center"/>
# </div>
#
# The data within this notebook includes a sample decision tree revolving around choosing a drink as Starbucks. The questions and answers take us through a series of choices, eventually landing on a drink order.
#
# ## Let's Begin
# If you haven't already, please set up your computer by following instructions in the *README.md* file. To start, we need to set up some initial variables to ensure we can connect to our graph database. We will be able to use these variables later in our notebook.
# +
import os
from neo4j.v1 import GraphDatabase
# Connect to our Graph database, ensure connectivity, and store connection in variable.
graph = GraphDatabase.driver("bolt://localhost", auth=("neo4j", "<PASSWORD>"));
# Set up a local path reference
rel_path = os.getcwd()
# If you want to clear your database and start fresh, uncomment the line below.
# BE SURE TO CHECK WHAT DATABASE YOU ARE RUNNING THIS AGAINST.
# MORE CAPITAL LETTERS TO EMPHASIZE THE POINT ABOVE.
# with graph.session() as session: print(session.run("MATCH (d) DETACH DELETE (d)").value())
# -
# ## Data CSV Files
#
# Here we are setting up variables pointing to the CSV files we have stored on our machine.
# These files contain all of the potential question, answers, and connections between nodes
questions_file = os.path.join(rel_path, "questions.csv");
answers_file = os.path.join(rel_path, "answers.csv");
relationships_file = os.path.join(rel_path, "relationships.csv");
# Let's see what the structure of the data looks like by using the LOAD CSV comment in Neo4J. We will load each file and show the row of data as an example.
#
# **NOTE**: If you get an error running the command below, try commenting out the `dbms.directories.import` line in your graph databases configuration file. You can access settings by clicking _Manage_ on your database in Neo4J and selecting the settings tab. Make sure to uncomment the line when you are done as this is not secure.
csv_query = """LOAD CSV FROM $file AS row RETURN row LIMIT 5 """
with graph.session() as session:
print("Example questions:");
display(session.run(csv_query, { "file": "file:" + questions_file }).value())
print("Example answers:");
display(session.run(csv_query, { "file": "file:" + answers_file }).value())
print("Example relationships:");
display(session.run(csv_query, { "file": "file:" + relationships_file }).value())
# The question, and answer files contains a simple list of IDs and values. The relationships file contain pointer IDs to and from either a question or answer. The third column in the relationships file signifies if the start node is a question.
# ## Importing Data
#
# Now we need to take the data inside our CSV files and connect them in a graph database. The following code will run through all files and create the nodes and relationships. We will rely on Python to open our CSV file and loop through each line and entry in our file. Note - there are various ways of doing this. This method should not be used in a production environment demanding performance. Try using the [`LOAD CSV`](https://neo4j.com/blog/bulk-data-import-neo4j-3-0/) command for bulk data importing.
#
# Let this block run until you see 'Data Loaded!'. It should only take a few seconds due to the low volume of data. You can run this as many times as you want. The queries generated utilize `MERGE` to ensure it only creates a node when it does not find one matching the properties list.
# +
from pandas import *
from string import Template
import multiprocessing.dummy as mp
q_csv = read_csv(questions_file, header=None);
a_csv = read_csv(answers_file, header=None);
r_csv = read_csv(relationships_file, header=None);
q_template = 'MERGE (n:Question { id: $id, value: "$val" }) ';
a_template = 'MERGE (n:Answer { id: $id, value: "$val" }) ';
q_to_a_template = 'MATCH (q:Question { id: $from_id }) MATCH (a:Answer { id: $to_id }) MERGE (q)-[:IS_CLASSIFIED_BY]->(a) ';
a_to_q_template = 'MATCH (q:Question { id: $to_id }) MATCH (a:Answer { id: $from_id }) MERGE (a)-[:RESULTS_IN]->(q) ';
create_queries = [];
relate_queries = [];
for i, row in enumerate(q_csv.values):
create_queries.append(Template(q_template).substitute(id=row[0], val=row[1]));
for i, row in enumerate(a_csv.values):
create_queries.append(Template(a_template).substitute(id=row[0], val=row[1]));
for i, row in enumerate(r_csv.values):
if row[2] is True:
relate_queries.append(Template(q_to_a_template).substitute(from_id=row[0], to_id=row[1]));
else:
relate_queries.append(Template(a_to_q_template).substitute(from_id=row[0], to_id=row[1]));
print('Queries created... Running.');
with graph.session() as session:
display(session.run("CREATE INDEX ON :Question(id,value)").summary().counters);
display(session.run("CREATE INDEX ON :Answer(id,value)").summary().counters);
display(session.run("CREATE CONSTRAINT ON (q:Question) ASSERT q.id IS UNIQUE").summary().counters);
display(session.run("CREATE CONSTRAINT ON (a:Answer) ASSERT a.id IS UNIQUE").summary().counters);
def run_queries(q):
with graph.session() as session:
for i, query in enumerate(q):
display(session.run(query).summary().counters)
run_queries(create_queries);
run_queries(relate_queries);
print('Data Loaded!');
# -
# ### Decision Tree Loaded
#
# We should now have our entire dataset loaded into our graph database. Let's run a quick snippet of code to check our import. Access your Neo4J browser by selecting 'Manage' on your database and clicking the 'Open Browser' button.
#
# Try running the following query: `MATCH (n) RETURN n`. This query gives us everything in the database, nodes and relationships included.
#
# You should get output resembling the following:
#
# <div align="left">
# <img src="images/data_loaded.png" alt="Graph Model" width="700px" align="center"/>
# </div>
# #### What's Next?
#
# Now that we have our base decision tree imported, we can start asking the data questions.
#
# [Go to the next module >>](2%20-%20Exploring%20the%20Decision%20Tree.ipynb)
| 1 - Loading Decision Tree Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
sentence = "I visited the US from the UK on 22-10-18"
def normalize(text):
return text.replace("US", "United States").replace("UK",
"United Kingdom").replace("-18", "-2018")
normalized_sentence = normalize(sentence)
print(normalized_sentence)
normalized_sentence = normalize('The US and the UK are two superpowers')
print(normalized_sentence)
| Exercise05/Exercise05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2020년 3월 29일 일요일
# ### 백준 : 11478번 서로 다른 부분 문자열의 개수
# ### 문제 : https://www.acmicpc.net/problem/11478
# ### 블로그 : https://somjang.tistory.com/entry/BaeKJoon-11478%EB%B2%88-%EC%84%9C%EB%A1%9C-%EB%8B%A4%EB%A5%B8-%EB%B6%80%EB%B6%84-%EB%AC%B8%EC%9E%90%EC%97%B4%EC%9D%98-%EA%B0%9C%EC%88%98-Python
# ### 첫번째 시도
# +
string = str(input())
strings = []
for i in range(len(string)):
for j in range(len(string) - i):
strings.append(string[j:j+i+1])
print(len(set(strings)))
| DAY 001 ~ 100/DAY052_[BaekJoon] 서로 다른 부분 문자열의 개수 (Python).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # ECEF to LLA Converter
# Convert **Earth-Centered, Earth-Fixed (ECEF)** coordinates to **Latitude, Longitude and Altitude (LLA)** coordinates.
#
# *Note: Geodetic latitude (WGS84) is used here.*
import ostk.physics
LLA = ostk.physics.coordinate.spherical.LLA
Earth = ostk.physics.environment.objects.celestial_bodies.Earth
# ---
x_ECEF = (2340841.98864643, -1351485.7522754, 5757737.03072958)
lla = LLA.cartesian(x_ECEF, Earth.equatorial_radius, Earth.flattening)
print(lla)
# ---
| notebooks/Coordinate Conversion/ECEF to LLA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic operations to optimize a Birch-Murnaghan EoS
# ## from *static energies* and vibrational frequencies, at different cell volumes, computed at the *ab initio* level
#
#
# This tutorial shows the basic usage of the BMx system to optimize the EoS of a mineral (*pyrope*, in the example) starting from a set of values of *static energy*, at different volumes ($V$), and *frequencies of vibrational normal modes* also computed at different values of the unit cell volume of the crystal.
#
# The calculation is done within the framework of the *Quasi-Harmonic Approximation* (**QHA**).
#
# ## A bit of theory
#
# The *Helmholtz free energy* $F$, as a function of $V$ and $T$ (*temperature*), is computed following its definition in statistical termodynamics:
#
# $$F=U-TS = -k_BT \log Z$$
#
# where $U$ is the internal energy, $S$ the entropy, $Z$ the *partition function* and $k_B$ the Boltzmann's constant. In turn, $Z$ is defined as:
#
# $$Z = \sum_a e^{-\epsilon_a/k_BT}$$
#
# where $\epsilon_a$ is the energy of the $^{th}a$ *state* of the crystal and the *sum* is extended to all the possible *states*. In particular, at a fixed volume, the contribution of the $^{th}i$ normal mode with frequency $\nu_i$ to the *vibrational* Helmholtz free energy ($F^{vib}_{i}$) is given by
#
# $$F^{vib}_{i} = -k_BT\sum_n e^{-(n+1/2)h\nu_i/k_BT}$$
#
# where $n$ is the vibrational quantum number of the oscillator. The total *vibrational free energy* is:
#
# $$F^{vib}=\sum_i F^{vib}_{i}$$
#
# where the sum is extended to all the normal modes of the crystal. Note that:
#
# $$F = F^{static} + F^{vib} = U^{static} + U^{vib} - TS^{vib}$$
#
# where $U^{static}$ is the so named *static energy* of the crystal ($U^{static}\equiv F^{static}$) that depends on the volume of the cell only.
#
# ***Pressure*** ($P$) at a given volume an temperature is evaluated from its definition in term of free energy:
#
# $$P=-\left(\frac{\partial F}{\partial V}\right)_T = -\left(\frac{\partial U^{static}}{\partial V}\right)_T -\left(\frac{\partial F^{vib}}{\partial V}\right)_T = P^{static}(V) + P^{vib}(V,T)$$
#
# The *vibrational pressure* $P^{vib}$ is here defined as the partial derivative of $F^{vib}$ with respect to $V$ (at $T$ constant). In turn, $F^{vib}$ depends upon $V$ through the variation of the $\nu_i$'s by $V$; in a perfectly *harmonic* crystal, the vibrational frequencies are by definition independent of volume, so that the *vibrational pressure* is exactly *zero* and the total pressure is given by the static contribution only. This is the *harmonic* model.
#
# The *harmonic* model is usually *not* valid as the frequencies normally do depend of volume. The *Quasi harmonic approximation* (**QHA**) is thus generally used. In fact, to compute the *vibrational pressure*, the frequencies variation with the unit cell volume must be estimated, so that $F$ and its derivatives with $V$ can be computed at whatever temperature.
#
# Static energies and frequencies (at different volumes) must be calculated with some quantum-mechanical program; the one that was used here, in the case of pyrope, is [CRYSTAL17]( http://www.crystal.unito.it/index.php).
#
# A possible strategy to get the parameters of a chosen equation of state (EoS) is to calculate the pressure of the crystal (first derivative of $F$ with respect to $V$) in a range of volumes, at a fixed temperature. The EoS is then fitted to the pressure/volume set thus obtained.
#
# The strategy followed in the current project if to directly fit the *volume-integrated* EoS to the $F$ free energy/volume set. In fact, by defining with $P_{EoS}(V)$ the $P(V)$ EoS function (for instance, a Birch-Murnaghan equation), its volume integrated form follows by:
#
# $$P_{EoS}(V)=-\left(\frac{\partial F_{EoS}}{\partial V}\right)_T \ \rightarrow \ {\rm d}F_{EoS}(V)=-P_{EoS}(V){\rm d}V \
# \rightarrow \ F_{EoS}(V) = -\int_{V_0}^{V} P_{EoS}(V){\rm d}V + F_{EoS}(V_0)$$
#
# where $F_{EoS}(V_0)$ is the free energy at the equilibrium volume (at the fixed temperature).
#
#
#
#
# ## *Practice*
#
# 1. Prepare a folder containing;
# * a file of primitive unit cell volumes (in $\mathrm A^3$) and the corresponding energy values ($U^{static}$ in *a.u.*; see the *theoretical* section above). In the example provided, such file is 'pyrope_static.dat': the first column lists volumes; the second one lists energies;
# * a file of volumes at which frequencies of vibrational modes are computed. In the example provided, such files is 'volume.dat';
# * a file of set of frequencies computed at each volume listed in the *volume* file. Such file is named 'pyrope_freq.dat' in the example. Each column of the file lists the frequencies of all the modes computed at a given volume. **Note** that the first column lists the *degeneracy* of the modes. Frequency values are in $\rm{cm}^{-1}$;
# * an *input.txt* file with instructions to run the program. Have a look at the input file provided in the example, with the comments it contains about the meaning of each keyword.
#
#
# 2. For a *quick start*, in the master folder (the one with the program code) do provide a file *quick_start.txt* just containing the name of the data folder (*pyrope* in the example).
#
# Start the calculation by running the ***bm3_thermal_2*** Python code:
# %run bm3_thermal_2.py
# The parameters of a BM3 EoS fitted to the *static* data (*no vibrational contributions* considered) are printed.
#
# Next, issue the command ***eos_temp*** by giving a *temperature* (n K) as argument. This provides the BM3 EoS parameters at the specific temperature.
eos_temp(300)
# Besides other points, the output provides information concerning the method followed to deal with the frequencies change with the volume: in the present case, spline fits of $\nu_i(V)$ are performed for each mode $i$, as specified by the *SPLINE* keyword under the *FITVOL* directive in the *input.txt* file.
#
# To deactivate the fitting of the frequencies, so to compute the EoS by using just the set of frequencies provided in input, at the corresponding volumes, use the command ***fit_off***
fit_off()
eos_temp(300,prt=False)
# **Note** the *standard deviations* on each parameter, as determined by the fit, which are higher than those obtained before, with the spline fit of the frequencies.
#
# Beside *spline* fittings of the frequencies, *polynomial* fits can be used by issuing the command ***set_poly*** which accepts the degree of the polynomial as argument. Look at the example below, where the effect of the degree of the polynomial on the computed EoS parameters is evaluated:
set_poly(2)
set_volume_range(725,769,16)
eos_temp(300,prt=False)
set_poly(3)
eos_temp(300,prt=False)
set_poly(4)
eos_temp(300,prt=True)
# The volume range for the fitting of frequencies and EoS can be changed by the function ***set_volume_range***. The function takes, as arguments, the minimum volume (v_min), the maximum volume (v_max) and the number of points in the range:
reload_input("pyrope")
set_poly(3)
set_volume_range(725,768,16)
eos_temp(300)
# **Note** that the *set_volume_range* function has no effect with *fit_off*. To change the volume range if the frequencies are not fitted, just use the SET keyword in *input.txt*.
#
# EoS optimization can be performed by **keeping *Kp* fixed**. Use of the ***set_fix*** function can be used for the purpose:
set_fix(4.00)
set_poly(4)
eos_temp(300,prt=False)
# The EoS can also be evaluated by fitting the BM3 equation to the pressures computed for a set of volumes (at a given temperature). In turn, such pressures are calculated as the volume derivative of the $F(V,T)$ Helmholtz free energy:
#
# $$P=-\left(\frac{\partial F}{\partial V}\right)_T$$
#
# At each volume, the static energy contribution to $F$ comes from a V-BM3 fit to the static energy values $[U^{static}(V)]$, whereas all the vibrational contributions comes from the *vibrational partition function*.
#
# Setup the calculation by specifying a volume range for the frequency fit (and optionally check the EoS evaluated by the methods outlined above):
set_volume_range(725,768,20)
reset_fix()
eos_temp(300,prt=False)
# Then compute the EoS by fitting a BM3 function to the $P(V)$ data; the command to use is ***bulk_dir*** (which wants the temperature as argument):
bulk_dir(300)
# ### 4^ order Birch-Murnaghan EoS
#
# To fit the *F(V)* curve by a volume integrated 4^ order Birch-Murnaghan EoS, just issue the commands *start_bm4()* and *eos_temp*:
reload_input("pyrope")
set_poly(3)
start_bm4()
eos_temp(300)
# Let's see the effect of a change in the volume range on a BM4 fit:
set_volume_range(725,770,16)
eos_temp(300,prt=False)
# As in the case of BM3, also the BM4 EoS can be computed fitting P(V) instead of F(V) data. The function ***bm4_dir*** is to be used for the purpose:
bm4_dir(300)
# Some information concerning volume ranges and fitting conditions can be gathered through the method ***info.show()***:
bm4.off()
set_volume_range(725, 772)
eos_temp(300,prt=False)
info.show()
# ### Direct calculation of the bulk modulus
#
# Bulk modulus can also be computed from its definition
#
# $$K=-V\left(\frac{\partial P}{\partial V}\right)_T$$
#
# This is done by the function *bulk_modulus_p* in two different ways:
#
# 1. a V-EoS is fitted to the $F(V)$ data at a fixed temperature; pressures at any given volume are computed from the fitted EoS and $K$ is computed as the derivative of *P* with respect to *V*;
#
#
# 2. pressures are computed as derivatives of the *F* function, at a fixed temperature, and $K$ is obtained as the derivative of *P* with respect to *V*.
#
# At variance with the first case, no reference to any EoS function is present in the second route.
#
# Reload the input and start the calculation:
reload_input('pyrope')
set_poly(3)
# The route (1) explained above is followed by specifying the keyword **noeos=False** as argument of the function *bulk_modulus_p* (that is the default). The function requires the temperature (300K in the example) and the pressure (0 GPa in the example) as mandatory arguments:
bulk_modulus_p(300,0,noeos=False,prt=True)
# The second route is the choice when the keyword **noeos=True**:
bulk_modulus_p(300,0, noeos=True, prt=True)
# ### Note on numerical derivatives with respect to *V*
#
# As said above, in a function like *bulk_modulus_p*, pressures are evaluated through numerical derivatives of *F* with respect to *V* (if noeos=True) and, likewise, the same type of numerical derivatives are employed to compute *K*. The computation of numerical derivatives involves a volume range at which the given $f(V)$ is computed, then it is fit by some polynomials of $V$ and, finally, that polynomials is analytically derived. The file *parame.py* contains some convenient parameters to perform such a task. For instance, the power of the polynomials is set to 3 (*degree_v* value, imported in the program at runtime and stored in the *pr.degree_v* variable). The V-range (*pr.delta_v* variable) is also imported as a default value but it is usually recomputed by the program as a function of the equilibrium static volume (*v0*) of the crystal being investigated. To do that, the parameter *pr.v_frac* is used (default value: 0.0015); precisely: *delta=v0\*pr.v_frac*.
#
# The class *volume_delta_class* (whose instance is *vd*) is provided to redefine *delta*, as well as *v_frac* and *v0*, or to set *delta* to the default *pr.delta_v* value. As an example:
print("Flag: %r, v0: %6.3f, frac: %6.4f, delta: %5.3f" % (vd.flag, vd.v0, vd.frac, vd.delta))
bulk_modulus_p(300,0,noeos=False,prt=True)
# To use the default V-range, simply invoke the *vd.off* method:
vd.off()
print("Default delta value: %6.3f" % pr.delta_v)
bulk_modulus_p(300,0,noeos=False,prt=True)
# In this case, a significant difference (about 2GPa) between the two computed $K$ values is observed.
# ### Bulk modulus as a function of temperature
#
# To compute the bulk modulus in a range of temperatures, the function *bulk_modulus_p_serie* can be used; its many arguments can be seen by requesting the help:
help(bulk_modulus_p_serie)
vd.on()
bulk_modulus_p_serie(10, 500., 24, 0, noeos=True, fit=True, type='spline', deg=3, smooth=0.01)
# ### Interface to EoSFit
#
# The function ***eosfit_dir*** can be used to write a PVT file to be used as input to the [EoSFit](http://www.rossangel.com/text_eosfit.htm). Such function computes the pressure, at each volume and pressure, as the derivative of the $F$ function with respect to $V$ (at constant $T$).
#
# The function ***eosdir*** can also be used as an interface to EoSFit: it computes the pressure by using a BM3 EoS that was determined by fitting a volume-integrated BM3 EoS to $F(V)$ points.
#
# The set of temperatures at which P(V) data must be computed is specified in *input.txt*, under the kewyword TEMP.
eosfit_dir("pyrope_eosfit_dir.dat")
| basic_eos_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# ## Objectives
# - Read tabular data into an IPython notebook
# - Access columns of the data
# - Isolate subsets of the data
# - Generate plots based on subsetted data
# ## Resources
# Pandas has lots of great documentation, tutorials and walkthroughs.
#
# This tutorial was based largely off of a SWC inspired lesson by <NAME> found at:
# https://nsoontie.github.io/2015-03-05-ubc/novice/python/Pandas-Lesson.html
#
# I adapted other parts from a great tutorial by <NAME>:
# http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/
#
# More can be found in the pandas documentation:
# http://pandas.pydata.org/pandas-docs/stable/
#
# A great youtube walkthrough from PyCon 2015:
# https://www.youtube.com/watch?v=5JnMutdy6Fw
#
# Lastly, a set of recent helpful blogposts for intermediate and advanced users can be found at:
# https://tomaugspurger.github.io/modern-1.html
# ## Working with dataframes
# pandas introduces two new data structures to Python - **Series** and **DataFrame**, both of which are built on top of NumPy.
#
# We can load in a tabular data set as a dataframe in a number of different ways.
df = pd.read_table('./gapminderDataFiveYear.txt')
df
type(df)
df.shape
df.columns
df.head()
df.head(6)
df.tail()
df.info()
df.dtypes
df.describe()
# ## Data selection
# Sometimes we need to look at only parts of the data. For example, we might want to look at the data for a particular country or in a particular year.
# ### Selecting columns
#select multiple columns with a list of column names
df[['year','lifeExp']]
#alternative selection with dot notation won't work if column names have spaces, uncommon characters or leading numbers
df.lifeExp
# ### Selecting rows
#by index location
df.iloc[[0]]
#you can provide a list of index values to select
df.iloc[[0,5,10]]
#or select with the slice notation
df[0:5]
#select by index label
#would require named index
country_index = df.set_index('country')
country_index.loc['Canada']
#boolean indexing
large_pop = df[df['pop'] > 300000000]
large_pop
large_pop['country'].unique()
# You can also chain together multiple criteria for boolean indexing:
multi_criteria = df[(df['country']=='Canada') & (df['year'] > 1990)]
multi_criteria
# ### Q:
# How many unique countries are there in our dataframe? Years?
# ### Exercise
# Write a function 'print_stats()' that will print a given country's life expectancy, population and gdp per capita in a given year. (note data is available only for every 5 years between 1952 and 2007).
def print_stats(df,country,year):
""" Prints the life expectancy, gdp per capita and population
of country in year. """
print_stats(df, 'Canada', 2007)
# ## Groupby
# We can use the groupby method to split up the data according to repeated values in each column. For example, group the data by continent. This is helpful if we want to repeat an analysis on each group of data from a continent.
continents = df.groupby('continent')
continents
len(continents)
#helpful way to visualize the groupby object: gives first row of each group
continents.first()
# ### Q:
# List the names of the continents and the number of data points in each.
# ### Q:
# How many unique countries are there grouped together in the Americas continent?
# You can use an aggregate function to get the mean life expectancy in the different continents
continents.lifeExp.mean()
# The previous cell showed mean life expectancy values aggregated over all the years.
#
# Alternatively, we can groupby multiple columns and use an aggregate function to get the mean life expectancy/population/gdpPercap in a specific continent in a specific year of interest:
df.groupby(['continent', 'year']).agg(np.mean)
# You can also retrieve a particular group with the get_group() command.
continents.get_group('Africa').describe()
# ### Q:
# What is the maximum life expectancy for a country in Asia?
# What country is this? When was the measurement taken? We can figure this out in a few different ways:
continents.get_group('Asia').lifeExp.idxmax()
#idxmax convenience function will return the index with max value
df[df['continent']=='Asia']['lifeExp'].idxmax()
df.loc[803]
# How can we rank each country based on their lifeExp?
#
# Let's create a new column 'lifeExp_rank' that creates an ordered ranking based on the longest life expectancy.
sorted_by_lifeExp = df.sort_values('lifeExp', ascending=False)
sorted_by_lifeExp['lifeExp_rank'] = np.arange(len(sorted_by_lifeExp)) + 1
#lists all rows in order of lifeExp
sorted_by_lifeExp.head()
# ### split,apply and combine: the power of groupby
# What if we want to rank each country by max life expectancy for each year that data was collected?
#
# Applying a function on grouped selections can simplify this process:
def ranker(df):
"""Assigns a rank to each country based on lifeExp, with 1 having the highest lifeExp.
Assumes the data is DESC sorted by lifeExp."""
df['lifeExp_rank'] = np.arange(len(df)) + 1
return df
#apply the ranking function on a per year basis:
sorted_by_lifeExp = sorted_by_lifeExp.groupby('year').apply(ranker)
# We can now subset my new dataframe by year to view the lifeExp ranks for each year
sorted_by_lifeExp[sorted_by_lifeExp.year == 2002].head()
# We can also subset by country=='Canada' to see how Canada's ranking has changed over the years:
sorted_by_lifeExp[(sorted_by_lifeExp['country']=='Canada')]
# ## Visualization
# Make sure you use the following %magic command to allow for inline plotting
# %matplotlib inline
# We can specify the type of plot with the kind argument. Also, choose the independent and dependent variables with x and y arguments.
#
# * Plot year vs life expectancy in a scatter plot.
df.plot(x='year',y='lifeExp',kind='scatter')
# - Plot gdp per capita vs life expectancy in a scatter plot
df.plot(x='gdpPercap',y='lifeExp',kind='scatter', alpha = 0.2, s=50, marker='o')
# What's going on with those points on the right?
#
# High gdp per capita, yet not particularly high lifeExp. We can use boolean selection to rapidly subset and check them out.
df[df['gdpPercap'] > 55000]
df.hist(column='lifeExp')
df.lifeExp.plot.hist(bins=200)
df['lifeExp'].plot(kind='kde')
# ## Exercise
# Write a function that will take two countries as an argument and plot the life expectancy vs year for each country on the same axis.
def compare_lifeExp(country1, country2):
"""Plot life expectancy vs year for country1 and country2"""
compare_lifeExp('Canada', 'Mexico')
# ## Exercises
#
# Suzy wrote some code to determine which country had the lowest life expectancy in 1982.
#
# What is wrong with her solution?
spec=['country','lifeExp']
df[df['year']==1982][spec].min()
# We can do a quick check to look up Afghanistan's life expectancy in 1982.
df[(df['year']==1982) & (df['country']=='Afghanistan')]
# This doesnt match with the answer above because the min() function was applied to each column (country and lifeExp).
#
# She should have done this:
# ### Putting it together:
#
# We can use all of these ideas to generate a plot that looks at a subset of the data.
#
# * Plot GDP per capita vs life expectancy in 2007 for each continent.
continents = df.groupby(['continent'])
for continent in continents.groups:
group = continents.get_group(continent)
group[group['year']==2007].plot(kind='scatter', x='gdpPercap', y='lifeExp', title=continent)
plt.axis([-10000,60000,30,90])
#Example
fig,ax = plt.subplots(1,1)
colours = ['m','b','r','g','y']
for continent, colour in zip(continents.groups, colours):
group = continents.get_group(continent)
group[group['year']==2007].plot(kind='scatter',x='gdpPercap',y='lifeExp',label=continent,ax=ax,color=colour,alpha=0.5)
ax.set_title(2007)
plt.legend(loc='lower right')
# ### Exercise
# Write a function the takes a country as an argument and plots the life expectancy against GDP per capita for all years in a scatter plot. Also print the year of the minimum/maximum lifeExp and the year of the miniimim/maximum GDP per capita.
def compare_gdp_lifeExp(df,country):
""" plot GDP per capita against life expectancy for a given country.
print year of min/max gdp per capita and life expectancy
"""
compare_gdp_lifeExp(df,'Afghanistan')
compare_gdp_lifeExp(df,'Canada')
# ## Rapid plotting with seaborn
import seaborn as sns
df.head()
sns.set_context("talk")
sns.factorplot(data=df, x='year', y='lifeExp', hue='continent', size=8)
sns.regplot(data=df, x='year', y='gdpPercap', fit_reg=True)
sns.lmplot(data=df, x='year', y='gdpPercap', row='continent')
sns.factorplot(data=df, x='continent', y='gdpPercap', kind='bar')
g = sns.FacetGrid(df, col='continent', row='year')
g.map(plt.hist, 'lifeExp')
| lessons/python/pandas2/UofT-pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bitcoin Mining Pool Classifier - Data from BigQuery
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
from google.cloud import bigquery
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.patheffects as PathEffects
import matplotlib.pylab as pylab
import numpy as np
import pandas as pd
import itertools
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import time
import seaborn as sns
from keras import utils, optimizers
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.losses import binary_crossentropy
# -
client = bigquery.Client()
# ## Load data from BigQuery
#
# Note: I would be querying a subset of the data here due to Kaggle's resource constraints
miner_limit = 5000
non_miner_limit = 5000
# SQL query adapted from https://gist.github.com/allenday/16cf63fb6b3ed59b78903b2d414fe75b
sql = '''
WITH
output_ages AS (
SELECT
ARRAY_TO_STRING(outputs.addresses,',') AS output_ages_address,
MIN(block_timestamp_month) AS output_month_min,
MAX(block_timestamp_month) AS output_month_max
FROM `bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(outputs) AS outputs
GROUP BY output_ages_address
)
,input_ages AS (
SELECT
ARRAY_TO_STRING(inputs.addresses,',') AS input_ages_address,
MIN(block_timestamp_month) AS input_month_min,
MAX(block_timestamp_month) AS input_month_max
FROM `bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(inputs) AS inputs
GROUP BY input_ages_address
)
,output_monthly_stats AS (
SELECT
ARRAY_TO_STRING(outputs.addresses,',') AS output_monthly_stats_address,
COUNT(DISTINCT block_timestamp_month) AS output_active_months,
COUNT(outputs) AS total_tx_output_count,
SUM(value) AS total_tx_output_value,
AVG(value) AS mean_tx_output_value,
STDDEV(value) AS stddev_tx_output_value,
COUNT(DISTINCT(`hash`)) AS total_output_tx,
SUM(value)/COUNT(block_timestamp_month) AS mean_monthly_output_value,
COUNT(outputs.addresses)/COUNT(block_timestamp_month) AS mean_monthly_output_count
FROM `bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(outputs) AS outputs
GROUP BY output_monthly_stats_address
)
,input_monthly_stats AS (
SELECT
ARRAY_TO_STRING(inputs.addresses,',') AS input_monthly_stats_address,
COUNT(DISTINCT block_timestamp_month) AS input_active_months,
COUNT(inputs) AS total_tx_input_count,
SUM(value) AS total_tx_input_value,
AVG(value) AS mean_tx_input_value,
STDDEV(value) AS stddev_tx_input_value,
COUNT(DISTINCT(`hash`)) AS total_input_tx,
SUM(value)/COUNT(block_timestamp_month) AS mean_monthly_input_value,
COUNT(inputs.addresses)/COUNT(block_timestamp_month) AS mean_monthly_input_count
FROM `bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(inputs) AS inputs
GROUP BY input_monthly_stats_address
)
,output_idle_times AS (
SELECT
address AS idle_time_address,
AVG(idle_time) AS mean_output_idle_time,
STDDEV(idle_time) AS stddev_output_idle_time
FROM
(
SELECT
event.address,
IF(prev_block_time IS NULL, NULL, UNIX_SECONDS(block_time) - UNIX_SECONDS(prev_block_time)) AS idle_time
FROM (
SELECT
ARRAY_TO_STRING(outputs.addresses,',') AS address,
block_timestamp AS block_time,
LAG(block_timestamp) OVER (PARTITION BY ARRAY_TO_STRING(outputs.addresses,',') ORDER BY block_timestamp) AS prev_block_time
FROM `bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(outputs) AS outputs
) AS event
WHERE block_time != prev_block_time
)
GROUP BY address
)
,input_idle_times AS (
SELECT
address AS idle_time_address,
AVG(idle_time) AS mean_input_idle_time,
STDDEV(idle_time) AS stddev_input_idle_time
FROM
(
SELECT
event.address,
IF(prev_block_time IS NULL, NULL, UNIX_SECONDS(block_time) - UNIX_SECONDS(prev_block_time)) AS idle_time
FROM (
SELECT
ARRAY_TO_STRING(inputs.addresses,',') AS address,
block_timestamp AS block_time,
LAG(block_timestamp) OVER (PARTITION BY ARRAY_TO_STRING(inputs.addresses,',') ORDER BY block_timestamp) AS prev_block_time
FROM `bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(inputs) AS inputs
) AS event
WHERE block_time != prev_block_time
)
GROUP BY address
)
--,miners AS (
--)
(SELECT
TRUE AS is_miner,
output_ages_address AS address,
UNIX_SECONDS(CAST(output_ages.output_month_min AS TIMESTAMP)) AS output_month_min,
UNIX_SECONDS(CAST(output_ages.output_month_max AS TIMESTAMP)) AS output_month_max,
UNIX_SECONDS(CAST(input_ages.input_month_min AS TIMESTAMP)) AS input_month_min,
UNIX_SECONDS(CAST(input_ages.input_month_max AS TIMESTAMP)) AS input_month_max,
UNIX_SECONDS(CAST(output_ages.output_month_max AS TIMESTAMP)) - UNIX_SECONDS(CAST(output_ages.output_month_min AS TIMESTAMP)) AS output_active_time,
UNIX_SECONDS(CAST(input_ages.input_month_max AS TIMESTAMP)) - UNIX_SECONDS(CAST(input_ages.input_month_min AS TIMESTAMP)) AS input_active_time,
UNIX_SECONDS(CAST(output_ages.output_month_max AS TIMESTAMP)) - UNIX_SECONDS(CAST(input_ages.input_month_max AS TIMESTAMP)) AS io_max_lag,
UNIX_SECONDS(CAST(output_ages.output_month_min AS TIMESTAMP)) - UNIX_SECONDS(CAST(input_ages.input_month_min AS TIMESTAMP)) AS io_min_lag,
output_monthly_stats.output_active_months,
output_monthly_stats.total_tx_output_count,
output_monthly_stats.total_tx_output_value,
output_monthly_stats.mean_tx_output_value,
output_monthly_stats.stddev_tx_output_value,
output_monthly_stats.total_output_tx,
output_monthly_stats.mean_monthly_output_value,
output_monthly_stats.mean_monthly_output_count,
input_monthly_stats.input_active_months,
input_monthly_stats.total_tx_input_count,
input_monthly_stats.total_tx_input_value,
input_monthly_stats.mean_tx_input_value,
input_monthly_stats.stddev_tx_input_value,
input_monthly_stats.total_input_tx,
input_monthly_stats.mean_monthly_input_value,
input_monthly_stats.mean_monthly_input_count,
output_idle_times.mean_output_idle_time,
output_idle_times.stddev_output_idle_time,
input_idle_times.mean_input_idle_time,
input_idle_times.stddev_input_idle_time
FROM
output_ages, output_monthly_stats, output_idle_times,
input_ages, input_monthly_stats, input_idle_times
WHERE TRUE
AND output_ages.output_ages_address = output_monthly_stats.output_monthly_stats_address
AND output_ages.output_ages_address = output_idle_times.idle_time_address
AND output_ages.output_ages_address = input_monthly_stats.input_monthly_stats_address
AND output_ages.output_ages_address = input_ages.input_ages_address
AND output_ages.output_ages_address = input_idle_times.idle_time_address
AND output_ages.output_ages_address IN
(
SELECT
ARRAY_TO_STRING(outputs.addresses,',') AS miner
FROM
`bigquery-public-data.crypto_bitcoin.blocks` AS blocks,
`bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(outputs) AS outputs
WHERE blocks.hash = transactions.block_hash
AND is_coinbase IS TRUE
AND ( FALSE
--
-- miner signatures from https://en.bitcoin.it/wiki/Comparison_of_mining_pools
--
OR coinbase_param LIKE '%4d696e656420627920416e74506f6f6c%' --AntPool
OR coinbase_param LIKE '%2f42434d6f6e737465722f%' --BCMonster
--BitcoinAffiliateNetwork
OR coinbase_param LIKE '%4269744d696e746572%' --BitMinter
--BTC.com
--BTCC Pool
--BTCDig
OR coinbase_param LIKE '%2f7374726174756d2f%' --Btcmp
--btcZPool.com
--BW Mining
OR coinbase_param LIKE '%456c6967697573%' --Eligius
--F2Pool
--GHash.IO
--Give Me COINS
--Golden Nonce Pool
OR coinbase_param LIKE '%2f627261766f2d6d696e696e672f%' --Bravo Mining
OR coinbase_param LIKE '%4b616e6f%' --KanoPool
--kmdPool.org
OR coinbase_param LIKE '%2f6d6d706f6f6c%' --Merge Mining Pool
--MergeMining
--Multipool
--P2Pool
OR coinbase_param LIKE '%2f736c7573682f%' --Slush Pool
--ZenPool.org
)
GROUP BY miner
HAVING COUNT(1) >= 20
)
LIMIT {})
UNION ALL
(SELECT
FALSE AS is_miner,
output_ages_address AS address,
UNIX_SECONDS(CAST(output_ages.output_month_min AS TIMESTAMP)) AS output_month_min,
UNIX_SECONDS(CAST(output_ages.output_month_max AS TIMESTAMP)) AS output_month_max,
UNIX_SECONDS(CAST(input_ages.input_month_min AS TIMESTAMP)) AS input_month_min,
UNIX_SECONDS(CAST(input_ages.input_month_max AS TIMESTAMP)) AS input_month_max,
UNIX_SECONDS(CAST(output_ages.output_month_max AS TIMESTAMP)) - UNIX_SECONDS(CAST(output_ages.output_month_min AS TIMESTAMP)) AS output_active_time,
UNIX_SECONDS(CAST(input_ages.input_month_max AS TIMESTAMP)) - UNIX_SECONDS(CAST(input_ages.input_month_min AS TIMESTAMP)) AS input_active_time,
UNIX_SECONDS(CAST(output_ages.output_month_max AS TIMESTAMP)) - UNIX_SECONDS(CAST(input_ages.input_month_max AS TIMESTAMP)) AS io_max_lag,
UNIX_SECONDS(CAST(output_ages.output_month_min AS TIMESTAMP)) - UNIX_SECONDS(CAST(input_ages.input_month_min AS TIMESTAMP)) AS io_min_lag,
output_monthly_stats.output_active_months,
output_monthly_stats.total_tx_output_count,
output_monthly_stats.total_tx_output_value,
output_monthly_stats.mean_tx_output_value,
output_monthly_stats.stddev_tx_output_value,
output_monthly_stats.total_output_tx,
output_monthly_stats.mean_monthly_output_value,
output_monthly_stats.mean_monthly_output_count,
input_monthly_stats.input_active_months,
input_monthly_stats.total_tx_input_count,
input_monthly_stats.total_tx_input_value,
input_monthly_stats.mean_tx_input_value,
input_monthly_stats.stddev_tx_input_value,
input_monthly_stats.total_input_tx,
input_monthly_stats.mean_monthly_input_value,
input_monthly_stats.mean_monthly_input_count,
output_idle_times.mean_output_idle_time,
output_idle_times.stddev_output_idle_time,
input_idle_times.mean_input_idle_time,
input_idle_times.stddev_input_idle_time
FROM
output_ages, output_monthly_stats, output_idle_times,
input_ages, input_monthly_stats, input_idle_times
WHERE TRUE
AND output_ages.output_ages_address = output_monthly_stats.output_monthly_stats_address
AND output_ages.output_ages_address = output_idle_times.idle_time_address
AND output_ages.output_ages_address = input_monthly_stats.input_monthly_stats_address
AND output_ages.output_ages_address = input_ages.input_ages_address
AND output_ages.output_ages_address = input_idle_times.idle_time_address
AND output_ages.output_ages_address NOT IN
(
SELECT
ARRAY_TO_STRING(outputs.addresses,',') AS miner
FROM
`bigquery-public-data.crypto_bitcoin.blocks` AS blocks,
`bigquery-public-data.crypto_bitcoin.transactions` AS transactions JOIN UNNEST(outputs) AS outputs
WHERE blocks.hash = transactions.block_hash
AND is_coinbase IS TRUE
AND ( FALSE
--
-- miner signatures from https://en.bitcoin.it/wiki/Comparison_of_mining_pools
--
OR coinbase_param LIKE '%4d696e656420627920416e74506f6f6c%' --AntPool
OR coinbase_param LIKE '%2f42434d6f6e737465722f%' --BCMonster
--BitcoinAffiliateNetwork
OR coinbase_param LIKE '%4269744d696e746572%' --BitMinter
--BTC.com
--BTCC Pool
--BTCDig
OR coinbase_param LIKE '%2f7374726174756d2f%' --Btcmp
--btcZPool.com
--BW Mining
OR coinbase_param LIKE '%456c6967697573%' --Eligius
--F2Pool
--GHash.IO
--Give Me COINS
--Golden Nonce Pool
OR coinbase_param LIKE '%2f627261766f2d6d696e696e672f%' --Bravo Mining
OR coinbase_param LIKE '%4b616e6f%' --KanoPool
--kmdPool.org
OR coinbase_param LIKE '%2f6d6d706f6f6c%' --Merge Mining Pool
--MergeMining
--Multipool
--P2Pool
OR coinbase_param LIKE '%2f736c7573682f%' --Slush Pool
--ZenPool.org
)
GROUP BY miner
HAVING COUNT(1) >= 20
)
LIMIT {})
'''.format(miner_limit, non_miner_limit)
df = client.query(sql).to_dataframe()
df.info()
# Dropping the columns with null values
df.drop(labels = ['stddev_output_idle_time','stddev_input_idle_time'], axis = 1, inplace = True)
df.tail(5)
df.head(5)
df.shape
# Dropping the non-numeric features
features = df.drop(labels = ['is_miner', 'address'], axis = 1)
target = df['is_miner'].values
indices = range(len(features))
# ## t-SNE
sns.set_style('darkgrid')
sns.set_palette('muted')
sns.set_context("notebook", font_scale = 1.5, rc = {"lines.linewidth": 2.5})
# Utility function to visualize the outputs of t-SNE
def bitcoin_scatter(x, colors):
# choose a color palette with seaborn.
num_classes = len(np.unique(colors))
palette = np.array(sns.color_palette("hls", num_classes))
# create a scatter plot.
f = plt.figure(figsize = (8, 8))
ax = plt.subplot(aspect = 'equal')
sc = ax.scatter(x[:,0], x[:,1], lw = 0, s = 40, c=palette[colors.astype(np.int)])
plt.xlim(-25, 25)
plt.ylim(-25, 25)
ax.axis('off')
ax.axis('tight')
plt.title('t-SNE to visualize features')
# add the labels for each digit corresponding to the label
txts = []
for i in range(num_classes):
# Position of each label at median of data points.
xtext, ytext = np.median(x[colors == i, :], axis = 0)
txt = ax.text(xtext, ytext, str(i), fontsize = 24)
txt.set_path_effects([
PathEffects.Stroke(linewidth = 5, foreground = "w"),
PathEffects.Normal()])
txts.append(txt)
return f, ax, sc, txts
time_start = time.time()
RS = 123
bitcoin_tsne = TSNE(random_state = RS).fit_transform(features)
print('Time elapsed: {} seconds' .format(time.time() - time_start))
bitcoin_scatter(bitcoin_tsne, target)
plt.savefig('tSNE.jpg')
# ## Splitting the training and testing dataset
# Splitting the training and testing dataset
x_train, x_test, y_train, y_test, indices_train, indices_test = train_test_split(features, target, indices, test_size = 0.2)
x_train.head()
x_train.shape
y_train
# ## Artificial Neural Network
# Feature Scaling
sc_x = StandardScaler()
x_train_ann = sc_x.fit_transform(x_train)
x_test_ann = sc_x.transform(x_test)
# +
num_classes = 2
# Hyperparameters
learn_rate = 0.001
batch_size = 500
epochs = 270
# +
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y_train_ann = labelencoder_y.fit_transform(y_train)
# Converting to binary class matrix
y_train_ann = utils.to_categorical(y_train_ann, num_classes)
# -
y_train_ann.shape
# +
# Encoding the Dependent Variable
labelencoder_y = LabelEncoder()
y_test_ann = labelencoder_y.fit_transform(y_test)
# Converting to binary class matrix
y_test_ann = utils.to_categorical(y_test_ann, num_classes)
# -
y_test_ann.shape
# +
seed = 1
np.random.seed(seed)
# Creating model
ann = Sequential()
ann.add(Dense(26, activation = 'tanh', kernel_initializer = 'glorot_uniform'))
ann.add(Dense(11, activation = 'tanh'))
ann.add(Dropout(0.5))
ann.add(Dense(6, activation = 'tanh'))
ann.add(Dense(num_classes, activation = 'softmax'))
# -
rmsprop = optimizers.RMSprop(learn_rate)
ann.compile(loss = 'binary_crossentropy', optimizer = rmsprop, metrics = ['binary_accuracy']) # Compiling the model
# Model fitting
ann.fit(np.array(x_train_ann), y_train_ann, batch_size = batch_size, epochs = epochs, validation_data = (x_test_ann, y_test_ann))
ann.summary()
scores = ann.evaluate(x_test_ann, y_test_ann, verbose = 0)
print("Test Accuracy (Artificial Neural Network): {}%" .format(scores[1] * 100))
scores
# +
y_pred = ann.predict(x_test_ann)
# Compute confusion matrix
matrix = confusion_matrix(y_test_ann.argmax(axis = 1), y_pred.argmax(axis = 1)) # Building the confusion matrix
# -
matrix
# ## Random Forest Classification
# Training the model
rf = RandomForestClassifier(n_estimators = 100, class_weight = 'balanced')
rf.fit(x_train, y_train)
# Model predictions
y_pred = rf.predict(x_test)
probs = rf.predict_proba(x_test)[:, 1] # Positive class probabilities
params = {'legend.fontsize': 'small',
'axes.labelsize': 'x-small',
'axes.titlesize':'small',
'xtick.labelsize':'x-small',
'ytick.labelsize':'x-small'}
pylab.rcParams.update(params)
# +
# Confusion matrix code adapted from https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes, normalize = False, title = 'Confusion matrix', cmap = plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize = True.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis = 1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
dummy = np.array([[0, 0], [0, 0]])
plt.figure(figsize = (6, 6))
plt.imshow(dummy, interpolation = 'nearest', cmap = cmap)
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation = 45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment = "center",
color = "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
class_names = ['not mining pool', 'mining pool']
np.set_printoptions(precision = 2)
# Plot confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes = class_names, normalize = False, title = 'Bitcoin Mining Pool Detector using Random Forest - Confusion Matrix')
plt.show()
# -
# Calculating Accuracy
acc = (cnf_matrix[0][0] + cnf_matrix[1][1]) / (cnf_matrix[0][0] + cnf_matrix[1][1] + cnf_matrix[0][1] + cnf_matrix[1][0])
print("Test Accuracy (Random Forest Classification): {}%" .format(acc * 100))
plt.savefig('RF_CM.jpg')
# ### What is the contribution of each feature?
params = {'legend.fontsize': 'small',
'axes.labelsize': 'small',
'axes.titlesize':'small',
'xtick.labelsize':'small',
'ytick.labelsize':'small'
}
pylab.rcParams.update(params)
# +
x_pos = np.arange(len(features.columns))
btc_importances = rf.feature_importances_
inds = np.argsort(btc_importances)[::-1]
btc_importances = btc_importances[inds]
cols = features.columns[inds]
bar_width = .8
# How many features to plot?
n_features = 26
x_pos = x_pos[:n_features][::-1]
btc_importances = btc_importances[:n_features]
# Plot
plt.figure(figsize = (26, 10))
plt.barh(x_pos, btc_importances, bar_width, label = 'BTC model')
plt.yticks(x_pos, cols, rotation = 0, fontsize = 14)
plt.xlabel('feature importance', fontsize = 14)
plt.title('Mining Pool Detector', fontsize = 20)
plt.tight_layout()
# -
plt.savefig('RFfeatureImportance.jpg')
# ### Are False Positives associated with dark mining pools?
# Data points where model predicts true, but are labelled as false
false_positives = (y_test == False) & (y_pred == True)
# +
# Subset to test set data only
df_test = df.iloc[indices_test, :]
print('False Positive addresses')
# Subset test set to false positives only
df_test.iloc[false_positives].head(15)
# -
# # Comparison between ANN and RF
index = inds[: -1]
index
data_top = x_train.columns
xann = pd.DataFrame(data = x_train_ann[0:, 0:], index = [i for i in range(x_train_ann.shape[0])], columns = [str(i) for i in data_top])
xtann = pd.DataFrame(data = x_test_ann[0:, 0:], index = [i for i in range(x_test_ann.shape[0])], columns = [str(i) for i in data_top])
xrf = x_train
xtrf = x_test
i = 1
ann_acc = [scores[1] * 100]
rf_acc = [acc * 100]
print("{}- All 26 features taken as input:" .format(i))
print("ANN test accuracy = {}%, RF test accuracy = {}%" .format(ann_acc[0], rf_acc[0]))
for x in reversed(index):
xann = xann.drop(columns = [data_top[x]])
xtann = xtann.drop(columns = [data_top[x]])
xrf = xrf.drop(columns = [data_top[x]])
xtrf = xtrf.drop(columns = [data_top[x]])
# Creating model
ann = Sequential()
ann.add(Dense(26, activation = 'tanh', kernel_initializer = 'glorot_uniform'))
ann.add(Dense(11, activation = 'tanh'))
ann.add(Dropout(0.5))
ann.add(Dense(6, activation = 'tanh'))
ann.add(Dense(num_classes, activation = 'softmax'))
rmsprop = optimizers.RMSprop(learn_rate)
ann.compile(loss = 'binary_crossentropy', optimizer = rmsprop, metrics = ['binary_accuracy']) # Compiling the model
# Model fitting
ann.fit(np.array(xann), y_train_ann, batch_size = batch_size, verbose = 0, epochs = epochs)
scores_ann = ann.evaluate(xtann, y_test_ann, verbose = 0)
# Training the model
rf = RandomForestClassifier(n_estimators = 100, class_weight = 'balanced')
rf.fit(xrf, y_train)
# Model predictions
y_pred = rf.predict(xtrf)
cnf_matrix = confusion_matrix(y_test, y_pred)
acc = (cnf_matrix[0][0] + cnf_matrix[1][1]) / (cnf_matrix[0][0] + cnf_matrix[1][1] + cnf_matrix[0][1] + cnf_matrix[1][0])
i += 1
print("{}- Dropping {}:" .format(i, data_top[x]))
print("ANN test accuracy = {}%, RF test accuracy = {}%" .format(scores_ann[1] * 100, acc * 100))
ann_acc.append(scores_ann[1] * 100)
rf_acc.append(acc * 100)
# +
# Create plots with pre-defined labels
f = plt.figure(figsize = (10, 10))
ax = f.add_subplot(121)
t = list(np.arange(1., 27., 1))
t.reverse()
ax.plot(t, rf_acc, 'r-', label = 'Random Forest')
ax.plot(t, ann_acc, 'b--', label = 'Artificial Neural Network')
legend = ax.legend(loc = 'lower right', shadow = True, fontsize = 'x-small')
# Put a nicer background color on the legend.
legend.get_frame().set_facecolor('C6')
plt.ylabel('Test Accuracy (%)')
plt.xlabel('Number of features')
plt.title('RF vs ANN (Accuracy Comparison)')
plt.show()
# -
plt.savefig('RFvsANN.jpg')
# # Trying other approaches
# ## Naive Bayes
# Fitting Naive Bayes to the Training set
nb = GaussianNB()
nb.fit(x_train, y_train)
# Predicting the Test set results
y_pred = nb.predict(x_test)
# +
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred)
class_names = ['not mining pool', 'mining pool']
np.set_printoptions(precision = 3)
# Plot confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes = class_names, normalize = False, title = 'Bitcoin Mining Pool Detector using Naive Bayes - Confusion Matrix')
plt.show()
# -
# Calculating Accuracy
acc = (cnf_matrix[0][0] + cnf_matrix[1][1]) / (cnf_matrix[0][0] + cnf_matrix[1][1] + cnf_matrix[0][1] + cnf_matrix[1][0])
print("Test Accuracy (Naive Bayes Classification): {}%" .format(acc * 100))
plt.savefig('NaiveBayesCM.jpg')
| ml-in-blockchain.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# +
import matplotlib
import math
thrust = 1000
gammaAir = 1.4
inletTotalPressure = 30*100*10^6
inletTotalTemperature = 2000
airGasConstant = 287
freeStremPressure = 100*10^6
exitPressure = freeStreamPressure
trust = massFlowRate*exitVelocity + (exitPressure - freeStreamPressure)*exitArea
exitMach = math.sqrt((2/(gamma - 1))*((exitPressure/inletTotalPressure)^((1-gamma)/gamma) - 1))
exitArea = (throatArea*((gamma + 1)/2)**((gamma + 1)/(2*(1 - gamma))))*(1/exitMach)*(1 + ((gamma - 1)/2)*exitMach**2)**((gamma + 1)/(2*(gamma - 1)))
exitTemperature = inletTotalTemperature*(1 + ((gamma - 1)/2)*exitMach**2)**(-1)
exitVelocity = exitMach*math.sqrt(gamma*airGasConstant*exitTemperature)
trust = massFlowRate*exitVelocity + (exitPressure - freeStreamPressure)*exitArea
| Nozzle/nozzle-beta.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# 멀티-GPU 예제
# ==================
#
# 데이터 병렬 처리(Data Parallelism)는 미니-배치를 여러 개의 더 작은 미니-배치로
# 자르고 각각의 작은 미니배치를 병렬적으로 연산하는 것입니다.
#
# 데이터 병렬 처리는 ``torch.nn.DataParallel`` 을 사용하여 구현합니다.
# ``DataParallel`` 로 감쌀 수 있는 모듈은 배치 차원(batch dimension)에서
# 여러 GPU로 병렬 처리할 수 있습니다.
#
#
# DataParallel
# -------------
#
# +
import torch
import torch.nn as nn
class DataParallelModel(nn.Module):
def __init__(self):
super().__init__()
self.block1 = nn.Linear(10, 20)
# wrap block2 in DataParallel
self.block2 = nn.Linear(20, 20)
self.block2 = nn.DataParallel(self.block2)
self.block3 = nn.Linear(20, 20)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
# -
# CPU 모드인 코드를 바꿀 필요가 없습니다.
#
# DataParallel에 대한 문서는 `여기 <http://pytorch.org/docs/nn.html#dataparallel>`_
# 에서 확인하실 수 있습니다.
#
# **래핑된 모듈의 속성**
#
# 모듈을 ``DataParallel`` 로 감싼 후에는 모듈의 속성(예. 사용자 정의 메소드)에
# 접근할 수 없게 됩니다. 이는 ``DataParallel`` 이 몇몇 새로운 멤버를 정의하기 떄문에
# 다른 속성에 접근을 허용하는 것이 충돌을 일으킬 수도 있기 때문입니다.
# 그래도 속성에 접근하고자 한다면 아래와 같이 ``DataParallel`` 의 서브클래스를
# 사용하는 것이 좋습니다.
#
#
class MyDataParallel(nn.DataParallel):
def __getattr__(self, name):
return getattr(self.module, name)
# **DataParallel이 구현된 기본형(Primitive):**
#
#
# 일반적으로, PyTorch의 `nn.parallel` 기본형은 독립적으로 사용할 수 있습니다.
# 간단한 MPI류의 기본형을 구현해보겠습니다:
#
# - 복제(replicate): 여러 기기에 모듈을 복제합니다.
# - 분산(scatter): 첫번째 차원에서 입력을 분산합니다.
# - 수집(gather): 첫번째 차원에서 입력을 수집하고 합칩니다.
# - 병렬적용(parallel\_apply): 이미 분산된 입력의 집합을 이미 분산된 모델의
# 집합에 적용합니다.
#
# 더 명확히 알아보기 위해, 위 요소 사용하여 구성한 ``data_parallel``
# 함수를 살펴보겠습니다.
#
#
def data_parallel(module, input, device_ids, output_device=None):
if not device_ids:
return module(input)
if output_device is None:
output_device = device_ids[0]
replicas = nn.parallel.replicate(module, device_ids)
inputs = nn.parallel.scatter(input, device_ids)
replicas = replicas[:len(inputs)]
outputs = nn.parallel.parallel_apply(replicas, inputs)
return nn.parallel.gather(outputs, output_device)
# 모델의 일부는 CPU, 일부는 GPU에서
# --------------------------------------------
#
# 일부는 CPU에서, 일부는 GPU에서 신경망을 구현한 짧은 예제를 살펴보겠습니다
#
#
# +
device = torch.device("cuda:0")
class DistributedModel(nn.Module):
def __init__(self):
super().__init__(
embedding=nn.Embedding(1000, 10),
rnn=nn.Linear(10, 10).to(device),
)
def forward(self, x):
# CPU에서 연산합니다.
x = self.embedding(x)
# GPU로 보냅니다.
x = x.to(device)
# GPU에서 연산합니다.
x = self.rnn(x)
return x
# -
# 지금까지 기존 Torch 사용자를 위한 간단한 PyTorch 개요를 살펴봤습니다.
# 배울 것은 아주 많이 있습니다.
#
# ``optim`` 패키지, 데이터 로더 등을 소개하고 있는 더 포괄적인 입문용 튜토리얼을
# 보시기 바랍니다: :doc:`/beginner/deep_learning_60min_blitz`.
#
# 또한, 다음의 내용들도 살펴보세요.
#
# - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`
# - `Train a state-of-the-art ResNet network on imagenet`_
# - `Train an face generator using Generative Adversarial Networks`_
# - `Train a word-level language model using Recurrent LSTM networks`_
# - `다른 예제들 참고하기`_
# - `더 많은 튜토리얼 보기`_
# - `포럼에서 PyTorch에 대해 얘기하기`_
# - `Slack에서 다른 사용자와 대화하기`_
#
#
#
| docs/_downloads/d09aa12e86820d87b65f73c77c443514/parallelism_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#import the library used to query a website
from urllib.request import urlopen
# #### Going scrape the following URL of makaan.com (Property listing site for Buy, Sell and Rent) to get average rates for rent in apartment in various locality in kolkata
#specify the url to be scraped
site_url = "https://www.makaan.com/price-trends/property-rates-for-rent-in-kolkata"
#Query the website and return the html to the variable 'page'
page = urlopen(site_url)
#import the Beautiful soup functions to parse the data returned from the website
from bs4 import BeautifulSoup
#Parse the html in the 'page' variable, and store it in Beautiful Soup format
soup = BeautifulSoup(page, "lxml")
#Display the page title
soup.title.string
# ### Finding our required table from where data to be retrieved.
right_table=soup.find('table', id='locality_apartment')
#right_table
# ### Storing the table column values to different lists
#Generate lists
A=[]
B=[]
C=[]
D=[]
for row in right_table.findAll("tr"):
states = row.findAll('th') #To store second column data
cells = row.findAll('td')
if len(cells)==7: #Only extract table body not heading
A.append(cells[0].find(text=True))
B.append(cells[2].find(text=True))
C.append(cells[4].find(text=True))
D.append(cells[6].find(text=True))
# ### Make a Pandas Dataframe from the above lists
#import pandas to convert list to data frame
import pandas as pd
df=pd.DataFrame(A,columns=['Locality'])
df['BHK_1_Avg_Rent']=B
df['BHK_2_Avg_Rent']=C
df['BHK_3_Avg_Rent']=D
df
# #### Let's drop those rows where BHK_1_Avg_Rent, BHK_2_Avg_Rent & BHK_3_Avg_Rent all are blank
df = df.drop(df[(df.BHK_1_Avg_Rent == '-') & (df.BHK_2_Avg_Rent == '-') & (df.BHK_3_Avg_Rent == '-')].index)
# reset index, because we droped two rows
df.reset_index(drop = True, inplace = True)
df
# #### Going to drop another two rows where the locality is absolutely out of Kolkata or seems irrelevant or Foursquare API does not provide results (row 1, 8, 30, 46)
df = df.drop([1, 8, 30, 46])
# reset index, because we droped two rows
df.reset_index(drop = True, inplace = True)
df
# #### Need to rename some localities
df['Locality'].replace(['Dum Dum Cantonment Kolkata'], 'Dum Dum Cantonment', inplace=True)
df['Locality'].replace(['E M Bypass'], 'EM Bypass', inplace=True)
df['Locality'].replace(['Durganagar'], 'Durga Nagar', inplace=True)
df['Locality'].replace(['birati'], 'Birati', inplace=True)
df['Locality'].replace(['Phool Bagan'], 'Phoolbagan', inplace=True)
df['Locality'].replace(['Bonhooghly on BT Road'], 'BT Road', inplace=True)
df['Locality'].replace(['Sector 1 Salt Lake City'], 'Sector 1', inplace=True)
df['Locality'].replace(['New Town Action Area I'], 'New Town AA1', inplace=True)
df
# !conda install -c conda-forge geopy --yes # uncomment this line if you haven't completed the Foursquare API lab
from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
# +
address = 'Kolkata, WB'
geolocator = Nominatim()
location = geolocator.geocode(address)
kol_latitude = location.latitude
kol_longitude = location.longitude
print('The geograpical coordinate of {} are {}, {}.'.format(address, kol_latitude, kol_longitude))
# -
# #### Define Foursquare Credentials and Version
CLIENT_ID = 'FUXKTDIP0GJ5PJD43PQZBMVKCRQQ240MZDC0IAQBWNRSIZHY' # your Foursquare ID
CLIENT_SECRET = '<KEY>' # your Foursquare Secret
VERSION = '20180605' # Foursquare API version
import requests # library to handle requests
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
# function that extracts the category of the venue
def get_category_type(row):
try:
categories_list = row['categories']
except:
categories_list = row['venue.categories']
if len(categories_list) == 0:
return None
else:
return categories_list[0]['name']
# function to fetch venues for different locality from Latlng
def get_venues(latitude, longitude, radius, limit):
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
latitude,
longitude,
radius,
limit)
results = requests.get(url).json()
venues = results['response']['groups'][0]['items']
nearby_venues = json_normalize(venues) # flatten JSON
# filter columns
filtered_columns = ['venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng']
nearby_venues =nearby_venues.loc[:, filtered_columns]
# filter the category for each row
nearby_venues['venue.categories'] = nearby_venues.apply(get_category_type, axis=1)
# clean columns
nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns]
nearby_venues['Total'] = 0
df_final = nearby_venues.groupby(['categories'], as_index=False)['Total'].count()
return df_final
kolkata_cats = get_venues(kol_latitude, kol_longitude, 2000, 100)
kolkata_cats
# ### Now let's try to loop through the Localities to get their Latitude and Longitude
# function that extracts the Latlng of the Locality
def get_latlng(index, locality):
address = locality + ', Kolkata, WB'
geolocator = Nominatim()
location = geolocator.geocode(address)
if(location is None):
address = locality + ', WB'
geolocator = Nominatim()
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
df['Latitude'][index] = latitude
df['Longitude'][index] = longitude
#print('The geograpical coordinate of {} are {}, {}.'.format(address, latitude, longitude))
print('Done!')
# #### Adding two columns Latitude and Longitude to the df
df['Latitude'] = 0.00
df['Longitude'] = 0.00
df
# #### Now let's loop through the Localities and update their Latitude and Longitude
for index, row in df.iterrows():
#print(row['Locality'], row['BHK_1_Avg_Rent'], row['BHK_2_Avg_Rent'], row['BHK_3_Avg_Rent'])
get_latlng(index, row['Locality'])
df
# ### Let's insert Kolkata's common top categories of facilities as column into the dataframe set initial count value to 0
for index, row in kolkata_cats.iterrows():
df[row['categories']] = 0
df
# #### Now let's loop through the Localities and update the count of each facilities of that locality. Added new column if not already exists! Also adding and updating a column TotalFacilities which show total number of facilities (from all categories) available for that locality.
df['TotalFacilities'] = 0
for index, row in df.iterrows():
loc_cats = get_venues(row['Latitude'], row['Longitude'], 2000, 100)
for index2, row2 in loc_cats.iterrows():
if row2['categories'] in df.columns:
df[row2['categories']][index] = row2['Total']
else:
df[row2['categories']] = 0
df[row2['categories']][index] = row2['Total']
df['TotalFacilities'][index] = sum(loc_cats['Total'])
df
# #### Now let's loop through the Localities to calculate and update the corresponding distance of each locality from Sector V, Kolkata.
import geopy.distance
# +
address = 'Sector V, Kolkata, WB'
geolocator = Nominatim()
location = geolocator.geocode(address)
kol_latitude = location.latitude
kol_longitude = location.longitude
print('The geograpical coordinate of {} are {}, {}.'.format(address, kol_latitude, kol_longitude))
# -
# ### Calculate and update the distance from each locality to Sector V in KM
for index, row in df.iterrows():
coords_1 = (22.5798447, 88.4376736)
coords_2 = (row['Latitude'], row['Longitude'])
dist = round(geopy.distance.vincenty(coords_1, coords_2).km, 2)
df['Sector_V_Dist'][index] = dist
# ### Yes, we have achieved our final Dataframe with BHK_1_Avg_Rent, BHK_2_Avg_Rent and BHK_3_Avg_Rent, total number of each facilities, total number of facilities (from all categories) and the distance to the Locality from Sector V
df
# ### You are now able to choose the appropiate locality to rent out as your requirement and pocket!
df_summerized = df[['Locality', 'BHK_1_Avg_Rent', 'BHK_2_Avg_Rent', 'BHK_3_Avg_Rent', 'Sector_V_Dist', 'TotalFacilities']]
df_summerized
# # Thank You!!!
| Capstone_Project_The_Battle_of_Neighborhoods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Pre-introduction to the class
#
# You should start by having your class go to [url](url) and clone the materials for this class, either by:
#
# 1. Cloning this repo with `git clone https://`
# 2. Clicking the 'download zip' button on the righthand side of the page
#
# While people are typing in urls and waiting for downloads, you can move on to:
#
# ## Introduction to the class
#
# Python is an interpreted, imperative, object oriented programming language whose primary motivation is to be easy to understand. We'll spend today talking about what each of those things mean, starting with:
#
# ### Interpreted
#
# Python is an interpreted language, as opposed to a compiled language. This means that, instead of being translated into a string of bits or bytes that is submitted directly to the machine, python code is submitted line by line to a program that decides what to do with each line. There are many ways to interact with this program. The simplest is:
#
# #### 1. Running files with python
#
# > Open up a terminal window and type this command exactly:
# ```
# python scripts/simple.py
# ```
# ! python ../scripts/simple.py
# > Python is reading the lines in from the file simple.py, interpreting them, and then executing them. If you've taken our introduction to UNIX class, you know that to a computer, there is essentially no difference between reading commands from a file and reading them from a REPL loop.
#
# #### 2. You can submit commands to python via a terminal-interpreter
#
# > Python ships with a basic interpreter that you can enter by typing `python` in a terminal. This should land you in a python environment with an introductory message and a prompt that look like this:
#
# ```
# Python 3.4.3 |Anaconda 2.3.0 (x86_64)| (default, Mar 6 2015, 12:07:41)
# [GCC 4.2.1 (Apple Inc. build 5577)] on darwin
# Type "help", "copyright", "credits" or "license" for more information.
# >>>
# ```
#
# > You can run the file by typing `from scripts import simple`. If you look in the file, you'll see that `simple` is just running the print function. We can do this ourselves by typing:
print('IOKN2K!')
# > A more popular terminal interpreter is iPython (which is developed here at Berkeley). Type `quit()` or press CNTRL+D to leave vanilla python, and once you are back in your bash terminal, type `ipython`. You should see a prompt that looks like this:
#
# ```
# Python 3.4.3 |Anaconda 2.3.0 (x86_64)| (default, Mar 6 2015, 12:07:41)
# Type "copyright", "credits" or "license" for more information.
#
# IPython 4.0.0 -- An enhanced Interactive Python.
# ? -> Introduction and overview of IPython's features.
# # %quickref -> Quick reference.
# help -> Python's own help system.
# object? -> Details about 'object', use 'object??' for extra details.
#
# In [1]:
# ```
#
# > IPython is a popular option for developers who are prototyping code, and need to try out different implentations in real time. This is also true of the people of develop IPython, who report adopting a two-window setup where one window is IPython and the other is a text editor (like Vi, Sublime, or Atom). Two fantastic features of IPython are tab complete and the documentation lookup operator. Try typing `pri <tab>` into your interpreter. It should auto-complete to `print`. Add a `?` immediately after `print` and hit enter. You should see the docstring like this:
#
# ```
# In [1]: print?
# Docstring:
# print(value, ..., sep=' ', end='\n', file=sys.stdout, flush=False)
#
# Prints the values to a stream, or to sys.stdout by default.
# Optional keyword arguments:
# file: a file-like object (stream); defaults to the current sys.stdout.
# sep: string inserted between values, default a space.
# end: string appended after the last value, default a newline.
# flush: whether to forcibly flush the stream.
# Type: builtin_function_or_method
# ```
#
# #### 3. You can run python as a kernel in another program
#
# > The same people who make IPython also make Jupyter, which provides a notebook-like format for python similar to Mathematica or Rmd, where code, code output, text, and graphics can be combined into a single filetype that can be viewed and run by others in real time. Quit IPython (do you remember how?) and type `jupyter notebook` into your terminal. It will display some output like this:
#
# ```
# [I 13:59:34.497 NotebookApp] Serving notebooks from local directory: /Users/dillonniederhut/python-for-everything
# [I 13:59:34.497 NotebookApp] 0 active kernels
# [I 13:59:34.497 NotebookApp] The IPython Notebook is running at: http://localhost:8888/
# [I 13:59:34.497 NotebookApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
# ```
#
# > And then will open up your default browser (or open a tab in the browser you already have running) and display the local filesystem. From here, you can start a new notebook by clicking the 'new' button on the righthand side of the page.
#
# **Important! You can't do anything in the terminal while the notebook is running.**
#
# > Notebooks are not typically used for development or production, but are very common in teaching environments. For example, this teaching materials for this class were all created in Jupyter.
#
# #### 4. You can run python in an IDE
#
# > IDE stands for 'Integrated Development Environment', and is a graphical user interface that typically includes an output window, a text editor with built-in `run` and `debug` functions, display windows for plots and filesystems, and some amount of declaration tracking. In this class, we'll be using [Rodeo](http://blog.yhathq.com/posts/introducing-rodeo.html), a lean IDE that uses IPython as its interpreter. There are many other choices, including:
# * IDLE - [Python's built-in IDE](https://docs.python.org/3.5/library/idle.html) (no one really uses this)
# * Spyder - [IDE that ships with Anaconda](https://github.com/spyder-ide/spyder) (similar to Rodeo)
# * PyCharm - [JetBrain's IDE](https://www.jetbrains.com/pycharm/) (feature-heavy; includes VCS support and cross-referencing)
# You should already have Rodeo installed - double click the icon (wherever you put it) to start up the program.
# ## Object Orientation
#
# Earlier, we said that python is an object oriented language. This means that python things about it's code the same way that you think about the stuff around you. In the grand scheme of computer software, object orientation is a way of organizing code such that it is easy to update without breaking. This means grouping functions that serve a similar purpose into hierarchies. However, stating it this way is confusing and abstract.
#
# You can think about it this way: a soccer ball is an object. So is a basketball. They share a lot of things in common. It's simpler to know that balls generaly bounce than to explicitly declare for every ball I ever see in my entire life whether it bounces or not. I can't bounce you, for example, but you didn't need to tell me that when I met you. If I came to believe that people were bounce-able, I would update my idea of people generally, not every person specifically.
type(4)
# We call things like you and basketball objects, and they are in classes like human and ball. If I want to create a new object, like a football, I don't have to declare every single thing there is to know about footballs. I can say it inherits attributes from the class ball, except that it's an oblate spheroid instead of a sphere. Easy.
#
# In python, things like numbers are a class of objects. A specific number, however, needs a name. In much the same way, if I want to talk to you about the deflated Patriots' football, I can't just ask you about 'the ball' and expect you to know what I mean. In python, we call the specific ball under question an instance, and it needs a unique name for the duration of our discussion.
four = 4
type(four)
four + 4
# If I assign something else the name `four`, it overwrites the instance that `four` previously referred to.
four = 5
four + 4
# Because everything in python needs to have a unique name, managing what names are defined at any time becomes very important. Python comes with built in names like `print` and `open` that are already taken. Other functions and libraries don't exist in Python on their own, but need to be brought in with a function called `import`. As a simple example, let's import a library for math called `math`.
import math
# Now type in `math.` and press tab.
# ```
# math.acos math.acosh math.asin math.asinh
# math.atan math.atan2 math.atanh math.ceil
# math.copysign math.cos math.cosh math.degrees
# math.e math.erf math.erfc math.exp
# math.expm1 math.fabs math.factorial math.floor
# math.fmod math.frexp math.fsum math.gamma
# math.hypot math.isfinite math.isinf math.isnan
# math.ldexp math.lgamma math.log math.log10
# math.log1p math.log2 math.modf math.pi
# math.pow math.radians math.sin math.sinh
# math.sqrt math.tan math.tanh math.trunc
# ```
# What happened? What is the purpose of hiding `log10` hidden behind `math`?
#
# All the names in use at any time are called your 'namespace'. Keeping functions in dot notation behind their library keeps you from polluting your namespace, or accidentally overriding other variables.
#
# > side note - if you are coming from R, the dot naming convention, e.g. `my.data`, can never be used because of this
#
# Dot notation doesn't only apply to objects in a library, it is also used for functions that are attached to an object (these are called 'methods'). Try tab complete on `four`.
#
# ```
# four.bit_length four.conjugate four.denominator four.from_bytes
# four.imag four.numerator four.real four.to_bytes
# ```
#
# You won't see `+` or `-` in the methods (they are actually there, just hidden from the user) because `four.add(5).add(6)` is less easy to read and understand than `four + 5 + 6`. Easy-to-understand code is the main design principle behind the python language. In fact, you can import the python philosophy into your session the same way you would import anything else.
import this
# Guido's insight in creating python was that code is read more frequently than it is written, so writing code that is easy to read should be a major principle in the design of the language. All that stuff about favoring explicit actions is so that someone reading your code isn't missing important stuff that is happening, but not written into the code in an obvious way.
#
# > side note - if you are a cool cat, you abbreviate Guido's name as `GvR`
#
# > side note - if you are a really cool cat, you call yourself a pythonista
#
# > side note - GvR named the python language after Monty Python, which should tell you something about pythonistas
#
# Likewise, the line about having only one way to perform an action makes code much easier to read. For example, you'll learn tomorrow about how to read and write to disk (so don't worry about taking notes on this). There are many ways that you *could* do this, but in python the *correct* way is:
#
# ```
# with open(filepath, 'r') as f:
# my_data = f.read()
# ```
#
# Any time you see code that looks something like this, you know exactly what it is doing, even if you haven't seen it before. For example, what do you think this code does?
#
# ```
# with open(filepath, 'r') as f:
# my_data = json.load(f)
# ```
#
# While we've assigned objects to names, we haven't really made them do much yet. Any object that modifies data, whether this is returned to the user or happens behind the scenes, is called a function. In python, functions are designated by parens attached to the object name.
math.sqrt(four)
# If you call a function without parens, python will print something about the function.
math.sqrt
# ## Data types
#
# Programming is all about data, and any given programming language will have different ways of dealing with different kinds of data. The constraints on how a programming language deals with data come from both the hardware and the users. On the hardware side, a computer operates on data at the binary level, so everything needs to be fundamentally composed of `1`s and `0`s. On the user side, manipulating numbers is (and should be!) very different from manipulating words. Python has five basic types of data.
#
# > side note - unlike many other languages, you do not need to tell python what type your data is (although you can anyway, and it is often a good idea to be 'defensive' about typing).
#
# ### 1. Integers
#
# We've already seen some of these. An integer in Python is exactly what it sounds like:
type(1)
# You can perform all of the basic operations on integers that you expect, like basic arithmetic:
3 + 2
3 - 2
3 * 2
3 / 2
# This last result should be very surprising to you if you come from a language like C++ or Java (or even an older version of Python!) - we just divided two integers and got something else! As of python 3, float division is standard even when the datatypes are integers. If you want integer division or integer modulus, you need to use `//` and `%`:
3 // 2
3 % 2
# You can also perform logical comparisons on integers, which return another kind of value (note that equality testing is done with two equal signs):
3 > 2
3 == 2
3 != 2
# Integers are often used in programming to count the number of times something has happened. In this case, you would initialize a variable with a value of zero:
counter = 0
# and then increment it:
counter += 1
print(counter)
# Run the code again. What happened? How do you think you would decrement a value?
# ### 2. Booleans
#
# Since everything in python is an object, and every object must belong to a class, the `True`s and `False`s that we are getting also have a type and associated methods.
type(True)
# Principally, bools are used for decision making, which you'll learn about tomorrow. They are also often used to indicate whether an attempt at doing something was successful or not. Bools can be evaluated in logic tables:
not True
True and False #or True & False
True or False #or True | False
# Internally, python stores values for `bool` type objects as a binary value, which means you can do some weird things with `True` and `False`
True * 3
4 / False
# Sometimes this works the way you want:
1 and True
# But sometime it does not:
True and 1
# ### 3. Floats and type coersion
#
# Floating point numbers are python's way of handling numbers that can't efficiently be represented as integers, either because they are very large or because they are inherently rational. In python, you indicate that you want a number to be a float with a `.`
type(1.)
# Most of the numerical data you'll process will be as floating point numbers, which behave pretty much the same as integers in mathematical operations, but come with a few extra methods.
3.5 + 2.5
3.5 + 2.5
math.pi.as_integer_ratio()
# The ability to efficiently represent complex numbers comes with a risk of imprecision, which grows for larger numbers.
100.2 - 100
1000000000.2 - 1000000000
# This can land you in trouble when making comparisons:
100.2 - 100 == 0.2
# When you mix integers and floating point number in a calculation, python casts the result as a float, even if the result is an integer
type(0.5 * 2)
type(3/2)
# You can coerce floating point numbers into integers, but note that you lost information when you do this.
int(4.5)
# What you might not have guessed is that you can also convert floating point numbers into True and False. Like JavaScript, Python has 'truthiness', which means that non-Boolean values can evaluate to `True` and `False` in certain situations. This is done to avoid obtuse syntax, like:
#
# ```
# if number_of_students != 0:
# have class
# ```
#
# You'll see this more tomorrow, but just to introduce it now:
number_of_students = 0.
if number_of_students:
print('Class is in session!')
# Floating truthiness is that 0 is always `False`, but everything else (including negative numbers) is `True`.
number_of_students = -1.
if number_of_students:
print('Class is in session!')
# #### Now let's try a small challenge
#
# To check that you've understood this conversation about data types, objects, and ways to interact with python, we're going to have you do a small test challenge. Partner up with the person next to you - we're going to do this as a pair coding exercise - and choose which computer you are going to use.
#
# In a text editor or IDE on that computer, open `challenges/00_introduction/A_objects.py`. This is a python script file that you can run from the command line.
#
# In the file are comments describing some tasks. When you think you've completed them successfully, open a terminal window and navigate to `challenges/00_introduction`, then type `py.test test_A.py` and hit enter.
#
# > students may need to install pytest with `conda install pytest` or `pip install pytest`
#
# If you have completed everything successfully you will see:
#
# ```
# ============================== test session starts ===============================
# platform darwin -- Python 3.5.1, pytest-2.8.1, py-1.4.30, pluggy-0.3.1
# rootdir: /Users/dillon/Dropbox/dlab/workshops/pyintensive/challenges/00_introduction, inifile:
# collected 2 items
#
# test_A.py ..
#
# ============================ 2 passed in 0.01 seconds ============================
# ```
#
# If you have not, you'll see something like this:
#
# ```
# ============================== test session starts ===============================
# platform darwin -- Python 3.5.1, pytest-2.8.1, py-1.4.30, pluggy-0.3.1
# rootdir: /Users/dillon/Dropbox/dlab/workshops/pyintensive/challenges/00_introduction, inifile:
# collected 2 items
#
# test_A.py .F
#
# ==================================== FAILURES ====================================
# __________________________________ test_dillon ___________________________________
#
# def test_dillon():
# > assert isinstance(float, A.dillon)
# E AttributeError: module 'A_objects' has no attribute 'dillon'
#
# test_A.py:12: AttributeError
# ======================= 1 failed, 1 passed in 0.01 seconds =======================
# ```
#
# with information about which test failed and why. In this case, testing the object `dillon` failed because A_objects.py does not contain an object with the name `dillon`.
# ### 4. Strings and containers, part -1
#
# Srings are how Python represents character data like "Dillon" or "It was the best of times, it was the worst of times".
"A string can be in double quotes"
'Or single quotes'
'As long as ya'll are careful with "apostrophes" and quotations'
# Just like with integers and floats, you can specify types with a function call. Just about anything can be coerced to a string:
str(4.0)
str(True)
# Internally, these are represented as bytes (which you can also access, but probably don't want to). Translating from bytes to string literals is known as "decoding", and translation in the other direction is called "encoding".
#
# Why am I telling you this? Because if you are here for web scraping or any kind of text analysis, you will immediately run into encode/decode errors. The issue here is that there are approximately one bajillion ways to convert between machine readible bytes and human readible characters. This means that some characters don't exist in some encodings:
'é'.encode('ascii')
# It also means that the the same character has a one-to-many mapping with bytes:
'é'.encode('utf-8')
'é'.encode('iso-8859-1')
# The encoding for any kind of string data depends on a combination of:
#
# 1. The program that created it
# 2. The operating system that hosts the program
# 3. The filetype where it was written to disk
#
# Infuriatingly, the encoding of characters is not always declared in a file, especially if the file was written some time before 2005.
#
# As a general rule, the characters on English keyboard keys are the same in all encodings. Most things `UNIX` and Python are either `ascii` or `utf-8`, which is forwards-compatible with ascii. If the file doesn't declare its encoding anywhere or it is really old, it is probably `iso-8859-1`, which is the American/Western-Europe encoding in Microsoft.
#
# Python has rich methods for string manipulation, even in the standard library, which makes it a popular language for text analysis. To get started, what do you think will happen if we use the `+` operator on two strings?
'Juan' + 'Shishido'
# + active=""
# Or the `*` operator?
# -
'Juan' * 3
# The `-` operator won't work. Pretend you are `GvR` and tell me why.
'Andrew' - 'Chong'
# There isn't a clear meaning behind subtracting a string from another string. Do we want one 'Chong' removed from Andrew? All of them? Or all of the individual characters in 'Chong'? This would need to be implicit in the code somewhere - no good!
#
# If you want to remove part of a string, you'll need to use a substitution method like:
# +
my_string = '<NAME> wears a beret'
my_string.replace('beret', '')
# -
# Of course, you could replace beret with something else
my_string = my_string.replace('beret', 'speedo')
print(my_string)
# Just like floats, strings are also truthy. In this case, a true string is just one that isn't empty:
bool(my_string)
bool('')
# Simple string transformations are easy in Python
my_string.lower()
my_string.title()
# Each transformation has an associated test
my_string.isupper()
# You can count the number of substrings in a string
my_string.count('e')
# Which means you can say
bool(my_string.count('e'))
# But that's weird to read, so instead we would want to write:
'e' in my_string
# This works because strings in Python are technically containers for characters (an empty string -- `''` -- is just a container with no characters in it. Because strings are containers, this means that each character has an index value. You can get the index value of a substring with:
my_string.find('speedo')
# The `18` here is giving you the index of 'Dav'. If we look for a string that isn't there, we see something a little unexpected.
my_string.find('Dillon')
# This tells us two things:
#
# 1. Dillon does not wear speedos
# 2. We can't use str.find() as a truthy value
#
# To find out why (why 2; why 1 should be self-explanatory), let's see how to grab things by index.
my_string[18]
# This gives us the `s` in `speedo`. You can grab more than one character by specifying a beginning and an end to the index like this:
#
# ```
# [start:end]
# ```
#
# Let's imagine we wanted to grab the whole word. How would we do that?
my_string[18:18+len('speedo')] # or my_string[18:18+6]
# You might have tried the following, which does not work:
my_string[18:18+len('peedo')] # or my_string[18:18+5]
# The reason is that python indices are only inclusive on one end. Mathematically, this is written as `[x,y)`. This keeps you from getting overlapping parts of a string when subsetting more than once, and makes it really easy to grab substrings just with
#
# ```
# [i : i + len(s)]
# ```
#
# because the distance between two points of an index is the same as the length of the object.
# Grab 'Dav' from `my_string`.
my_string[:3]
# The index starts at zero! Python is a 'zero-indexed' language, like most computer languages (but unlike R). This lets us grab items out of the start of a container just by knowing how long they are. Unfortunately, it means that if we call `str.find()` on 'Dav', it returns a position of `0`, so we can't coerce these results into a bool.
#
# For text analysis, you typically don't analyze entire containers of characters. More likely, you'll want to split strings on one of two features:
"It was the best of times \nIt was the worst of times".split('\n')
# If you don't specify what character to split on, Python uses whitespace by default.
my_string.split()
# These both turn strings (which remember, are containers), into a container of containers called a `list`. You'll learn more about these tomorrow.
# ### 5. Functions, the not-exactly-data-datatype
#
# As an object oriented language, functions in Python are also objects that can be assigned to names and passed to other functions.
type(max)
# Unlike other datatypes, functions in python need to be created with a keyword -- `def`. This is a normally thing in OOLs, but seems odd in Python because it lacks the `val` and `var` keywords for creating data.
# +
def increment(x):
return x + 1
increment
# -
increment(4)
# When you run a function, it creates its own namespace to keep any object names in the function from insulated from object names in the global environment. Imagine if every time you wanted to have a conversation, you had to invent new words for everything you wanted to talk about -- super dangerous! Namespaces help to enforce modularity in software, to keep functions from breaking when other things change.
#
# To see how this works, let's modify that increment function a little bit
def increment(x):
n = 1
return x + n
increment(1)
n = 9000
increment(1)
# You can also use a function to create other functions.
# +
def make_incrementor(n):
def incrementor(x):
return x + n
return incrementor
chapman = make_incrementor(-2)
chapman
# -
chapman(5)
# You can also also give functions to other functions, just like any other kind of data. We have done this already by calling `type` on a function, but we can to this ourselves as well.
def my_apply(x, fun):
return fun(x)
my_apply
my_apply(-1, chapman)
# #### Now let's try another small challenge!
#
# Pair up with your partner again - but this time, use the other person's computer. You are going to try the next challenge for today, which is in `challenges/00_introduction/B_syntax.py`.
#
# When you think you have met the challenge, run `py.test test_B.py`. If you don't pass the tests, be sure to pay attention to the error messages!
| instructor/day_zero.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Leaky Aquifer Test
# **This example is taken from AQTESOLV examples.**
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from ttim import *
# Set basic parameters:
Q = 24464.06 #constant discharge in m^3/d
b1 = 6.096 #overlying aquitard thickness in m
b2 = 15.24 #aquifer thickness in m
zt = -b1 #top boundary of aquifer
zb = -b1 - b2 #bottom boundary of aquifer
rw = 0.1524 #well radius in m
# Load dataset of observation wells:
# +
data1 = np.loadtxt('data/texas40.txt')
t1 = data1[:, 0]
h1 = data1[:, 1]
r1 = 12.191 #distance between obs1 to pumping well in m
data2 = np.loadtxt('data/texas80.txt')
t2 = data2[:, 0]
h2 = data2[:, 1]
r2 = 24.383 #distance between obs2 to pumping well in m
data3 = np.loadtxt('data/texas160.txt')
t3 = data3[:, 0]
h3 = data3[:, 1]
r3 = 48.766 #distance between obs3 to pumping well in m
# -
# Create conceptual model:
ml_0 = ModelMaq(kaq=10, z=[0, zt, zb], Saq=0.001, Sll=0, c=10, tmin=0.001, \
tmax=1, topboundary='semi')
w_0 = Well(ml_0, xw=0, yw=0, rw=rw, tsandQ=[(0, Q)], layers=0)
ml_0.solve()
# Calibrate with three datasets simultaneously:
#unknown parameters: kaq, Saq, c, Sll
ca_0 = Calibrate(ml_0)
ca_0.set_parameter(name='kaq0', initial=10)
ca_0.set_parameter(name='Saq0', initial=1e-4)
ca_0.set_parameter_by_reference(name='Sll0', parameter=ml_0.aq.Sll, \
initial=1e-4, pmin=0)
ca_0.set_parameter(name='c0', initial=100)
ca_0.series(name='OW1', x=r1, y=0, t=t1, h=h1, layer=0)
ca_0.series(name='OW2', x=r2, y=0, t=t2, h=h2, layer=0)
ca_0.series(name='OW3', x=r3, y=0, t=t3, h=h3, layer=0)
ca_0.fit(report=True)
display(ca_0.parameters)
print('RMSE:', ca_0.rmse())
hm1_0 = ml_0.head(r1, 0, t1)
hm2_0 = ml_0.head(r2, 0, t2)
hm3_0 = ml_0.head(r3, 0, t3)
plt.figure(figsize = (8, 5))
plt.semilogx(t1, h1, '.', label = 'OW1')
plt.semilogx(t1, hm1_0[0], label = 'ttim OW1')
plt.semilogx(t2, h2, '.', label = 'OW2')
plt.semilogx(t2, hm2_0[0], label = 'ttim OW2')
plt.semilogx(t3, h3, '.', label = 'OW3')
plt.semilogx(t3, hm3_0[0], label = 'ttim OW3')
plt.xlabel('time(d)')
plt.ylabel('head(m)')
plt.legend();
# Since the value of Sll is very close to the minimum limit (zero), Sll is set to 0 and removed from the calibration of the following model.
ml_1 = ModelMaq(kaq=10, z=[0, zt, zb], Saq=0.001, Sll=0, c=10, tmin=0.001, \
tmax=1, topboundary='semi')
w_1 = Well(ml_1, xw=0, yw=0, rw=rw, tsandQ=[(0, Q)], layers=0)
ml_1.solve()
#unknown parameters: kaq, Saq, c
ca_1 = Calibrate(ml_1)
ca_1.set_parameter(name='kaq0', initial=10)
ca_1.set_parameter(name='Saq0', initial=1e-4)
ca_1.set_parameter(name='c0', initial=100)
ca_1.series(name='OW1', x=r1, y=0, t=t1, h=h1, layer=0)
ca_1.series(name='OW2', x=r2, y=0, t=t2, h=h2, layer=0)
ca_1.series(name='OW3', x=r3, y=0, t=t3, h=h3, layer=0)
ca_1.fit(report=True)
display(ca_1.parameters)
print('RMSE:', ca_1.rmse())
hm1_1 = ml_1.head(r1, 0, t1)
hm2_1 = ml_1.head(r2, 0, t2)
hm3_1 = ml_1.head(r3, 0, t3)
plt.figure(figsize = (8, 5))
plt.semilogx(t1, h1, '.', label = 'OW1')
plt.semilogx(t1, hm1_1[0], label = 'ttim OW1')
plt.semilogx(t2, h2, '.', label = 'OW2')
plt.semilogx(t2, hm2_1[0], label = 'ttim OW2')
plt.semilogx(t3, h3, '.', label = 'OW3')
plt.semilogx(t3, hm3_1[0], label = 'ttim OW3')
plt.xlabel('time(d)')
plt.ylabel('head(m)')
plt.legend();
# Model with fixed Sll has similar performance with the former model. The second model has an AIC value of -432.269, which is two units lower than that of the former model (-430.268). Thus, Sll should set to zero (default value) and keep removed from the calibration.
# Try adding res & rc:
ml_2 = ModelMaq(kaq=10, z=[0, zt, zb], Sll=0, Saq=0.001, c=10, tmin=0.001, \
tmax=1, topboundary='semi')
w_2 = Well(ml_2, xw=0, yw=0, rw=rw, res=0, rc=None, tsandQ=[(0, Q)], layers=0)
ml_2.solve()
# Calibrate with three datasets simultaneously:
# When adding both res and rc into calibration and set the minimum limits as zero, the optimized res value is about 2.8e-08, which means adding res in the conceptual model has little effect on improving the performance. Thus, res is removed from the calibration.
#unknown parameters: kaq, Saq, c, rc
ca_2 = Calibrate(ml_2)
ca_2.set_parameter(name='kaq0', initial=10)
ca_2.set_parameter(name='Saq0', initial=1e-4)
ca_2.set_parameter(name='c0', initial=10)
ca_2.set_parameter_by_reference(name='rc', parameter=w_2.rc, initial=0)
ca_2.series(name='OW1', x=r1, y=0, t=t1, h=h1, layer=0)
ca_2.series(name='OW2', x=r2, y=0, t=t2, h=h2, layer=0)
ca_2.series(name='OW3', x=r3, y=0, t=t3, h=h3, layer=0)
ca_2.fit(report=True)
display(ca_2.parameters)
print('RMSE:', ca_2.rmse())
hm1_2 = ml_2.head(r1, 0, t1)
hm2_2 = ml_2.head(r2, 0, t2)
hm3_2 = ml_2.head(r3, 0, t3)
plt.figure(figsize = (8, 5))
plt.semilogx(t1, h1, '.', label='OW1')
plt.semilogx(t1, hm1_2[0], label='ttim OW1')
plt.semilogx(t2, h2, '.', label='OW2')
plt.semilogx(t2, hm2_2[0], label='ttim OW2')
plt.semilogx(t3, h3, '.', label='OW3')
plt.semilogx(t3, hm3_2[0], label='ttim OW3')
plt.xlabel('time(d)')
plt.ylabel('head(m)')
plt.legend();
# ## Summary of values simulated by AQTESOLV
t = pd.DataFrame(columns=['k [m/d]', 'Ss [1/m]', 'c [d]', 'rc'], \
index=['AQTESOLV', 'ttim', 'ttim-rc'])
t.loc['AQTESOLV'] = [224.726, 2.125e-4, 43.964, '-']
t.loc['ttim'] = np.append(ca_1.parameters['optimal'].values, '-')
t.loc['ttim-rc'] = ca_2.parameters['optimal'].values
t['RMSE'] = [0.059627, ca_1.rmse(), ca_2.rmse()]
t
| pumpingtest_benchmarks/9_test_of_texas_hill.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Kosh Environment
# language: python
# name: kosh
# ---
# # Adding Data To Datasets
#
# This tutorial is a sequel to [Tutorial 00](Example_00_Open_Store_And_Add_Datasets.ipynb#Connect-to-store-(using-sina-local-file)) which should have been successfully ran before this tutotrial.
#
#
# ## Connect to store (using sina local file and asynchronous mode)
#
# +
from kosh import KoshStore
import os
# local tutorial sql file
kosh_example_sql_file = "kosh_example.sql"
# connect to store in asynchronous mode
store = KoshStore(db_uri=kosh_example_sql_file)
# -
# ## Adding Files to Datasets
#
# Let's search datasets containing param1
from sina.utils import DataRange
# We're setting a min value less than the known min, to ensure all dataset come back
datasets = store.search(param1=DataRange(-1.e20))
print(len(datasets))
# Let's scan the directories and add relevant files to the datasets
# +
import os
import glob
try:
from tqdm.autonotebook import tqdm
except:
tqdm = list
pth = "sample_files"
pbar = tqdm(datasets[:10])
for i, dataset in enumerate(pbar):
hdf5 = dataset.name+".hdf5"
if len(hdf5)>0:
try:
dataset.associate(os.path.join(pth,hdf5), mime_type="hdf5")
except Exception: # file already here
pass
# -
# List ids of data URIs associated with this dataset
dataset._associated_data_
# Let's search this datasets for all data with mimetype `hdf5`
dataset.search(mime_type="hdf5")
file = store._load(dataset._associated_data_[0])
file.uri
h5 = dataset.open(dataset._associated_data_[0])
h5
h5 = store.open(dataset._associated_data_[0])
h5
# You can associate many sources to a dataset
dataset.associate("some_other_file", mime_type="netcdf")
dataset._associated_data_
# Or many datasets at once
dataset.associate(["file2", "file3"], mime_type="png")
dataset._associated_data_
# They do NOT have to be of them type and/or metadata
dataset.associate(["file5", "file6"], mime_type=["tiff", "jpg"], metadata=[{"name":"some"}, {"age":21}])
dataset._associated_data_
# ## Removing associated data
#
# Sometimes you might need to remove an association this can be done via the `dissociate` command.
dataset.dissociate("file5")
dataset._associated_data_
| docs/source/jupyter/Example_01_Add_Data_To_Datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## CHANGELOG
# # 0.6.0
#
# * Support Python 3.8
# * `from_filename` replaced with `load`
# # 0.5.0
#
# * Mostly stable
# # 0.4.0
#
# * Fuzzy name completion.
# * A configurable extension system for magics.
# * `Interactive(shell=False)` is the default loader.
# # 0.3.2
#
# * Add `remote` loader. Load notebooks from remote urls.
# * Support a fuzzy name import system. Files with special characters and numbers are importable.
# * An IPython magic to allow relative imports during interactive computing.
# # 0.3.1
#
# * In loaders `Notebook`, `Interactive`, `Execute`, and `Parameterize`
# * Remove `Partial`, `Lazy`, and `NotebookTest` loaders.
# * The first Markdown cell imports as a docstrings, permitting doctests on markdown cells.
# * `Notebook(globals={})` passes global values to the module
# * `Notebook(dir="..")` will change the working directory and path.
# * The code is pure python and uses IPython when possible.
# * `ipython -m importnb nodebook.ipynb` runs a notebook.
# # 0.2.9
#
# * Include `Partial`, `Lazy`, and `NotebookTest` loaders.
# * Transform markdown cells to literate block strings so they are included in the ast.
# * `__doc__`'s are extracted from the first markdown cell or normal source code from a code cell.
# * Export the python source code with `black`.
# * `Notebook.from_filename` is a loader for paths and strings.
# * Add `importnb.nbtest` for notebook testing tools..
# * Benchmark `importnb` against existing notebooks.
# * Include a `watchdog` trick to watch tests..
# * Extend the project to >= 3.4
# * Use nbviewer/github hierachy for the docs.
# # 0.2.4
#
# * Use `tox` for testing
# * Use a source directory folder structure for pytest and tox testing.
# * Create a pytest plugin that discovers notebooks as tests. With `importnb` notebooks can be used as fixtures in pytest.
# * Install `importnb` as an IPython extension.
# * Support running notebooks as modules from the `ipython` command line
# * Create a `setuptools` command to allow notebooks as packages.
# # 0.2.1
#
# * `importnb` supports notebook inputs from pure python environments. Two compatible compiler were created from IPython and Python
# * `importnb.Partial` works appropriately by improving exceptions.
# * All of the IPython magic syntaxes were removed to support Pure Python.
# * The generated Python files are formatted with black.
# * Tests were added to:
#
# * Validate the line number in tracebacks
# * Test someone elses notebooks
# ### 0.1.4
# - Pypi supports markdown long_description with the proper mimetype in long_description_content_type.
# ### 0.1.3
# - Include the RST files in the `MANIFEST.in`.
# ### 0.1.2 (Unreleased)
# - Use RST files to improve the literacy of the pypi description.
# ### 0.1.1
# - Released on PyPi
# ### 0.0.2
# - Initial Testing Release
| changelog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#hide
#all_examples
# -
# # Examples on how to use tiling utilities
# ## Tiling dataset for object detection or instance segmentation
from pathlib import Path
from drone_detector.processing.tiling import *
import os, sys
import geopandas as gpd
import rasterio as rio
from rasterio import plot as rioplot
import random
import matplotlib.pyplot as plt
# This example uses deadwood data from Hiidenportti. For this purpose, we use only 5 tiles to speed things up.
# +
tile_folder = Path('../data/hiidenportti/raw/envelope_patches/')
vector_folder = Path('../data/hiidenportti/raw/envelope_vectors/')
outpath = Path('../data/hiidenportti/processed_example/')
tiles = sorted(os.listdir(tile_folder))[:5]
vectors = sorted([f for f in os.listdir(vector_folder) if f.endswith('geojson')])[:5]
assert len(tiles) == len(vectors)
# -
# These virtual plots are tiled into 512 times 512 pixel grid. Then, the vector files are tiled using the same grid. By setting `min_area_pct` to 0.25, we discard all polygons that are cut so that their area is less than 25% of the original area. `Tiler.tile_vector` discards all grid cells that don't contain any polygons
for t in tiles:
if not os.path.exists(outpath/t[:-4]): os.makedirs(outpath/t[:-4])
shp_fname = t.replace('tif', 'geojson')
tilesize = 512
tiler = Tiler(outpath=outpath/t[:-4], gridsize_x=tilesize, gridsize_y=tilesize, overlap=(0,0))
tiler.tile_raster(str(tile_folder/t))
tiler.tile_vector(vector_folder/shp_fname, min_area_pct=0.25)
for d in os.listdir(outpath):
print(f"""{d} was split into {len(os.listdir(outpath/d/'raster_tiles'))} raster cells and {len(os.listdir(outpath/d/"vector_tiles"))} vector cells""")
ex_file = random.sample(os.listdir(outpath/d/'vector_tiles'), 1)[0]
fig, axs = plt.subplots(1,2, figsize=(11,5))
for a in axs:
a.set_xticks([])
a.set_yticks([])
with rio.open(outpath/d/f"raster_tiles/{ex_file.replace('geojson', 'tif')}") as im:
rioplot.show(im, ax=axs[0])
mask = gpd.read_file(outpath/d/'vector_tiles'/ex_file)
rioplot.show(im, ax=axs[1])
mask.plot(ax=axs[1], column='groundwood')
# ## Tiling dataset for semantic segmentation
# +
tile_folder = Path('../data/hiidenportti/raw/envelope_patches/')
vector_folder = Path('../data/hiidenportti/raw/envelope_vectors/')
outpath = Path('../data/hiidenportti/processed_unet_example/')
tiles = sorted(os.listdir(tile_folder))[:5]
vectors = sorted([f for f in os.listdir(vector_folder) if f.endswith('geojson')])[:5]
assert len(tiles) == len(vectors)
# -
# For semantic segmentation, we split the data into 256 times 256 pixel grid. Vector files are then tiled and rasterized to the same grid, in such way that raster images are saved. This method saves target mask for each cell even if they don't contain any masks.
for t in tiles:
if not os.path.exists(outpath/t[:-4]): os.makedirs(outpath/t[:-4])
shp_fname = t.replace('tif', 'geojson')
tilesize = 256
tiler = Tiler(outpath=outpath/t[:-4], gridsize_x=tilesize, gridsize_y=tilesize, overlap=(0,0))
tiler.tile_raster(str(tile_folder/t))
tiler.tile_and_rasterize_vector(vector_folder/shp_fname, column='groundwood')
for d in os.listdir(outpath):
print(f"""{d} was split into {len(os.listdir(outpath/d/'raster_tiles'))} raster cells and {len(os.listdir(outpath/d/"rasterized_vector_tiles"))} vector cells""")
ex_file = random.sample(os.listdir(outpath/d/'rasterized_vector_tiles'), 1)[0]
fig, axs = plt.subplots(1,2, figsize=(11,5))
for a in axs:
a.set_xticks([])
a.set_yticks([])
with rio.open(outpath/d/"raster_tiles"/ex_file) as im:
rioplot.show(im, ax=axs[0])
with rio.open(outpath/d/"rasterized_vector_tiles"/ex_file) as mask:
rioplot.show(mask, ax=axs[1])
# ## Tiling non-geospatial data
# So far `Tiler` doesn't really support data without sensible geotransform. It is, however, possible to work around and might be available in the future.
map_tile = '../data/historic_map/raw/kartta.png'
mask_tile = '../data/historic_map/raw/swampbin.png'
outpath = Path('../data/historic_map/processed')
tilesize = 224
map_tiler = Tiler(outpath=outpath, gridsize_x=tilesize, gridsize_y=tilesize, overlap=(0,0))
map_tiler.tile_raster(map_tile)
#map_tiler.raster_path = outpath/'mask_tiles'
#map_tiler.tile_raster(mask_tile)
from osgeo import gdal
# We have to manually set pixel coordinates as geocoordinates, provide GCPs and change the order of `projWin`
raster = gdal.Open(map_tile)
for row in (map_tiler.grid.itertuples()):
coords = list(row.geometry.exterior.coords)[:-1]
gcp_list = []
gcp_list.append(gdal.GCP(coords[0][0],coords[0][1],1,0,-0))
gcp_list.append(gdal.GCP(coords[1][0],coords[1][1],1,224,-0))
gcp_list.append(gdal.GCP(coords[2][0],coords[2][1],1,224,-224))
gcp_list.append(gdal.GCP(coords[3][0],coords[3][1],1,0,-224))
translate_kwargs = {'GCPs': gcp_list}
tempraster = gdal.Translate(f'{map_tiler.raster_path}/{row.cell}.tif', raster,
projWin=[row.geometry.bounds[0], row.geometry.bounds[1],
row.geometry.bounds[2], row.geometry.bounds[3]],
**translate_kwargs
)
tempraster = None
raster = None
map_tiler.raster_path = outpath/'mask_tiles'
map_tiler.tile_raster(mask_tile)
raster = gdal.Open(mask_tile)
for row in (map_tiler.grid.itertuples()):
coords = list(row.geometry.exterior.coords)[:-1]
gcp_list = []
gcp_list.append(gdal.GCP(coords[0][0],coords[0][1],1,0,-0))
gcp_list.append(gdal.GCP(coords[1][0],coords[1][1],1,224,-0))
gcp_list.append(gdal.GCP(coords[2][0],coords[2][1],1,224,-224))
gcp_list.append(gdal.GCP(coords[3][0],coords[3][1],1,0,-224))
translate_kwargs = {'GCPs': gcp_list,
'outputType': gdal.GDT_Byte}
tempraster = gdal.Translate(f'{map_tiler.raster_path}/{row.cell}.tif', raster,
projWin=[row.geometry.bounds[0], row.geometry.bounds[1],
row.geometry.bounds[2], row.geometry.bounds[3]],
**translate_kwargs
)
tempraster = None
raster = None
# Example data here is historical map from Evo area, and the target mask is for marshes and swamps.
ex_file = random.sample(os.listdir(outpath/'raster_tiles'), 1)[0]
fig, axs = plt.subplots(1,2, figsize=(11,5))
for a in axs:
a.set_xticks([])
a.set_yticks([])
with rio.open(outpath/"raster_tiles"/ex_file) as im:
rioplot.show(im, ax=axs[0])
with rio.open(outpath/"mask_tiles"/ex_file) as mask:
rioplot.show(mask, ax=axs[1])
| nbs/examples.tiling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Z-T0_TbLTPUL"
# IMPORT LIBRARY
# + id="wTGyBWRC_IoG"
import warnings
warnings.filterwarnings("ignore")
# Start with loading all necessary libraries
import numpy as np
import pandas as pd
from os import path
from PIL import Image
from wordcloud import WordCloud, ImageColorGenerator
import matplotlib.pyplot as plt
% matplotlib inline
# + [markdown] id="sSIhDJtwTVnw"
# INSTALL & IMPORT LIBRARY SASTRAWI UNTUK STOPWORDS BAHASA INDONESIA
# + id="GAlWkZc4TEsg" colab={"base_uri": "https://localhost:8080/"} outputId="1c337dc3-9219-4c72-fedc-b24b34ac2ef6"
# !pip install Sastrawi
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
# + [markdown] id="7uvfTnAVTgrx"
# DOWNLOAD FILE BERISI KUMPULAN KOMENTAR TERHADAP PILKADA DKI 2017
# + id="U-Ft3TjLAvMp"
# Load in the dataframe
df = pd.read_csv("https://raw.githubusercontent.com/rizalespe/Dataset-Sentimen-Analisis-Bahasa-Indonesia/master/dataset_tweet_sentiment_pilkada_DKI_2017.csv", index_col=0)
# + [markdown] id="5VU2WCcBTrDZ"
# HEAD
# + id="xBn97O1qA0IJ" colab={"base_uri": "https://localhost:8080/", "height": 219} outputId="b0416c36-2cfc-4f96-9c95-54401993945f"
# Looking at first 5 rows of the dataset
df.head()
# + [markdown] id="vcHFBzyhTuOU"
# MERUBAH NAMA ATRIBUT TEXT TWEET MENJADI COMMENT
# + id="jNg9yLV4CUhm" colab={"base_uri": "https://localhost:8080/", "height": 219} outputId="9d193de8-3398-4c16-b097-e39eb92527d9"
df = df.rename(columns={'Text Tweet': 'comment'})
df.head()
# + [markdown] id="B6vFvpsOT6U7"
# JUMLAH KATA
# + id="QhiFbXFBBvqy" colab={"base_uri": "https://localhost:8080/"} outputId="4615eba8-0d3e-4205-cc13-9e4662aeb1d7"
text = " ".join(review for review in df.comment)
print ("There are {} words in the combination of all review.".format(len(df)))
# + [markdown] id="ziT7Lh-WUDle"
# BUAT VARIABEL STOPWORDS BAHASA INDONESIA
# + id="iGUmU2b6IArr" colab={"base_uri": "https://localhost:8080/"} outputId="4c7bb3de-723a-4fa7-b17e-8770c79272a2"
factory = StopWordRemoverFactory()
stopwords = factory.get_stop_words()
print(stopwords)
# + [markdown] id="uGDwgE2pUOP0"
# PLOT GAMBAR
# + id="VblAtHKhDAlW" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="bef9ea86-3d10-4847-f0ba-68da89670852"
# Generate a word cloud image
wordcloud = WordCloud(stopwords=stopwords, background_color="white").generate(text)
# Display the generated image:
# the matplotlib way:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# + id="4mEwt8vdRN8L"
| Data_Visualization_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Given an array nums of n integers and an integer target, find three integers in nums such that the sum is closest to target. Return the sum of the three integers. You may assume that each input would have exactly one solution.
#
# Example:
#
# Given array nums = [-1, 2, 1, -4], and target = 1.
#
# The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
# +
nums=[1,2,4,8,16,32,64,128] #ans 82
target=82
def threeSumClosest(nums,target):
nums.sort()
res=0
dif=float('inf')
for i in range(len(nums)-2):
if i>0 and nums[i]==nums[i-1]:
continue
l , r = i+1 , len(nums)-1
while l<r:
sums=nums[i]+nums[l]+nums[r]
if target==sums:
return sums
if abs(target-sums) < dif:
res=sums
dif=abs(target-sums)
if sums < target:
l+=1
else:
r-=1
return res
threeSumClosest(nums,target)
| 16. 3Sum Closest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# - $(C)^{'}=0$
# - $(x^\mu)^{'}=\mu x^{\mu -1}$
# - $(\sin x)^{'}=\cos x$
# - $(\cos x)^{'}=-\sin x$
# - $(\tan x)^{'}=\sec^2x$
# - $(\cot x)^{'}=-\csc^2x$
# - $(\sec x)^{'}=\sec x\tan x$
# - $(\csc x)^{'}=-\csc x\cot x$
# - $(a^x)^{'}=a^x ln_{a}$
# - $(e^x)^{'}=e^x$
# - $(log_{a}x)^{'}=\frac{1}{xln_{a}}$
# - $(ln_{x})^{'}=\frac{1}{x}$
# - $(\arcsin x)^{'}=\frac{1}{\sqrt{1-x^2}}$
# - $(\arccos x)^{'}=-\frac{1}{\sqrt{1-x^2}}$
# - $(\arctan x)^{'}=\frac{1}{1+x^2}$
# - $(arccot{x})^{'}=-\frac{1}{1+x^2}$
# #### 2-2
# - 6.
# - (1) $y^{'}=4(2x+5)\cdot (2x+5)^{'}=8(2x+5)^3$
# - (2) $y^{'}=-\sin(4-3x)\cdot (4-3x)^{'}=3\sin(4-3x)$
# - (3) $y^{'}=(-3x^2)^{'} e^{-3x^2}=-6xe^{-3x^2}$
# - (4) $y^{'}=\frac{1}{1+x^2}\cdot(1+x^2)^{'}=\frac{2x}{1+x^2}$
# - (5) $y^{'}=2\sin x(\sin x^{'})=2\sin x\cos x = \sin2x$
# - (6) $y^{'}=((a^2-x^2)^{\frac{1}{2}})^{'}=\frac{1}{2} (a^2-x^2)^{-\frac{1}{2}}\cdot (a^2-x^2)^{'}=-\frac{x}{\sqrt{a^2-x^2}}$
# - (7) $y^{'}=2x\sec ^2(x^2)$
# - (8) $y^{'}=\frac{e^x}{1+e^{2x}}$
# - (9) $y^{'}=2\arcsin x\cdot (\arcsin x ^ {'})=\frac{2\arcsin x}{\sqrt{1-x^2}}$
# - (10) $y^{'}=\frac{1}{\cos x} \cdot (\cos x)^{'}=-\tan x$
| Doc/Jupyter Notebook/Math_Save.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing Necessary Modules
import cv2
import numpy as np
import os
from matplotlib import pyplot as plt
import time
import mediapipe as mp
# ## Extract keypoints using MP Holistics
mp_holistic = mp.solutions.holistic
mp_drawing = mp.solutions.drawing_utils
def mediapipe_detection(image, model):
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = model.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image, results
def draw_landmarks(image, results):
mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
def draw_styled_landmarks(image, results):
mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS,
mp_drawing.DrawingSpec(color=(80,100,10), thickness=1, circle_radius=1),
mp_drawing.DrawingSpec(color=(80,250,120), thickness=1, circle_radius=1)
)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(80,20,10), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(80,40,120), thickness=2, circle_radius=2)
)
mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(120,20,80), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(120,40,250), thickness=2, circle_radius=2)
)
mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
mp_drawing.DrawingSpec(color=(250,120,70), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(250,70,230), thickness=2, circle_radius=2)
)
# ## Detecting Landmarks
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=0.5,min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
ret,frame = cap.read()
image,results = mediapipe_detection(frame, holistic)
print(results)
draw_styled_landmarks(image,results)
cv2.imshow('check',image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
results
len(results.face_landmarks.landmark)
mp_holistic.POSE_CONNECTIONS
frame
plt.imshow(frame)
plt.imshow(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
draw_styled_landmarks(frame,results)
plt.imshow(cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
results.pose_landmarks
# ## Extract Keypoint Values
for res in results.pose_landmarks.landmark:
test = np.array([res.x, res.y, res.z, res.visibility])
test
results.pose_landmarks.landmark[-1]
pose = []
for res in results.pose_landmarks.landmark:
values = np.array([res.x, res.y, res.z, res.visibility])
pose.append(values)
pose
len(results.pose_landmarks.landmark)
len(pose)
len(results.right_hand_landmarks.landmark)
len(results.face_landmarks.landmark)
pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(132)
face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(1404)
left_hand = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(63)
right_hand = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(63)
pose
face
left_hand
right_hand
def extract_keypoints(results):
pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(132)
face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(1404)
left_hand = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(63)
right_hand = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(63)
return np.concatenate([pose, face, left_hand, right_hand])
extract_keypoints(results)
extract_keypoints(results).shape
# ## Folder setup to collect keypoints for each frame
# +
DATA_PATH = os.path.join('Feature_Extraction')
actions = np.array(['hello', 'thanks', 'iloveyou'])
number_sequences = 30
sequence_length = 30
# -
for action in actions:
for sequence in range(number_sequences):
try:
os.makedirs(os.path.join(DATA_PATH, action, str(sequence)))
except:
pass
# ## Feature Extraction
# +
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
for action in actions:
for sequence in range(number_sequences):
for frame_num in range(sequence_length):
ret, frame = cap.read()
image, results = mediapipe_detection(frame, holistic)
draw_styled_landmarks(image, results)
# Wait
if frame_num == 0:
cv2.putText(image, 'START NEW ACTION', (120,200),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255, 0), 4, cv2.LINE_AA)
cv2.putText(image, 'Collecting frames for {} Video Number {}'.format(action, sequence), (15,12),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('Action Detection', image)
cv2.waitKey(2000)
else:
cv2.putText(image, 'Collecting frames for {} Video Number {}'.format(action, sequence), (15,12),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('Action Detection', image)
keypoints = extract_keypoints(results)
keypoint_path = os.path.join(DATA_PATH, action, str(sequence), str(frame_num))
np.save(keypoint_path, keypoints)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
cap.release()
cv2.destroyAllWindows()
# ## Data Preprocessing
classes = {label:num for num, label in enumerate(actions)}
classes
sequences, labels = [], []
for action in actions:
for sequence in range(number_sequences):
window = []
for frame_num in range(sequence_length):
res = np.load(os.path.join(DATA_PATH, action, str(sequence), "{}.npy".format(frame_num)))
window.append(res)
sequences.append(window)
labels.append(classes[action])
np.array(sequences).shape
np.array(labels).shape
X = np.array(sequences)
from tensorflow.keras.utils import to_categorical
y= to_categorical(labels).astype(int)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.05)
X_train.shape
X_test.shape
# ## LSTM Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
model = Sequential()
model.add(LSTM(64, return_sequences=True, activation='relu', input_shape=(30,1662)))
model.add(LSTM(128, return_sequences=True, activation='relu'))
model.add(LSTM(64, return_sequences=False, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])
model.summary()
model.fit(X_train, y_train, epochs=100)
# ## Predictions
final_result = model.predict(X_test)
actions[np.argmax(final_result[0])]
actions[np.argmax(y_test[0])]
# ## Save our model
model.save('lstm_model.h5')
y_pred = model.predict(X_test)
y_true = np.argmax(y_test, axis=1).tolist()
y_pred = np.argmax(y_pred, axis=1).tolist()
y_true
y_pred
# ## Performance Evaluation
# +
from sklearn.metrics import multilabel_confusion_matrix
multilabel_confusion_matrix(y_true,y_pred)
# +
from sklearn.metrics import accuracy_score
accuracy_score(y_true,y_pred)
# -
# ## Real Time Testing
colors = [(255,0,0), (0,255,0), (0,0,255)]
def prob_viz(res, actions, input_frame, colors):
output_frame = input_frame.copy()
for num, prob in enumerate(res):
cv2.rectangle(output_frame, (0,60+num*40), (int(prob*100), 90+num*40), colors[num], -1)
cv2.putText(output_frame, actions[num], (0, 85+num*40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)
return output_frame
# +
sequence = []
sentence = []
threshold = 0.8
cap = cv2.VideoCapture(0)
with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:
while cap.isOpened():
ret, frame = cap.read()
image, results = mediapipe_detection(frame, holistic)
print(results)
draw_styled_landmarks(image, results)
keypoints = extract_keypoints(results)
sequence.append(keypoints)
sequence = sequence[-30:]
if len(sequence) == 30:
res = model.predict(np.expand_dims(sequence, axis=0))[0]
print(actions[np.argmax(res)])
if res[np.argmax(res)] > threshold:
if len(sentence) > 0:
if actions[np.argmax(res)] != sentence[-1]:
sentence.append(actions[np.argmax(res)])
else:
sentence.append(actions[np.argmax(res)])
if len(sentence) > 5:
sentence = sentence[-5:]
image = prob_viz(res, actions, image, colors)
cv2.rectangle(image, (0,0), (640, 40), (245, 117, 16), -1)
cv2.putText(image, ' '.join(sentence), (3,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Action_Recognition', image)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
cap.release()
cv2.destroyAllWindows()
| Demo/Action_Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: sql
# name: python36
# ---
# # Train a model and use it for prediction
#
# Before running this notebook, run the auto-ml-sql-setup.ipynb notebook.
# 
# ## Set the default database
USE [automl]
GO
# ## Use the AutoMLTrain stored procedure to create a forecasting model for the nyc_energy dataset.
INSERT INTO dbo.aml_model(RunId, ExperimentName, Model, LogFileText, WorkspaceName)
EXEC dbo.AutoMLTrain @input_query='
SELECT CAST(timeStamp as NVARCHAR(30)) as timeStamp,
demand,
precip,
temp,
CASE WHEN timeStamp < ''2017-01-01'' THEN 0 ELSE 1 END AS is_validate_column
FROM nyc_energy
WHERE demand IS NOT NULL AND precip IS NOT NULL AND temp IS NOT NULL
and timeStamp < ''2017-02-01''',
@label_column='demand',
@task='forecasting',
@iterations=10,
@iteration_timeout_minutes=5,
@time_column_name='timeStamp',
@is_validate_column='is_validate_column',
@experiment_name='automl-sql-forecast',
@primary_metric='normalized_root_mean_squared_error'
# ## Use the AutoMLPredict stored procedure to predict using the forecasting model for the nyc_energy dataset.
# +
DECLARE @Model NVARCHAR(MAX) = (SELECT TOP 1 Model FROM dbo.aml_model
WHERE ExperimentName = 'automl-sql-forecast'
ORDER BY CreatedDate DESC)
EXEC dbo.AutoMLPredict @input_query='
SELECT CAST(timeStamp AS NVARCHAR(30)) AS timeStamp,
demand,
precip,
temp
FROM nyc_energy
WHERE demand IS NOT NULL AND precip IS NOT NULL AND temp IS NOT NULL
AND timeStamp >= ''2017-02-01''',
@label_column='demand',
@model=@model
WITH RESULT SETS ((timeStamp NVARCHAR(30), actual_demand FLOAT, precip FLOAT, temp FLOAT, predicted_demand FLOAT))
# -
# ## List all the metrics for all iterations for the most recent training run.
# +
DECLARE @RunId NVARCHAR(43)
DECLARE @ExperimentName NVARCHAR(255)
SELECT TOP 1 @ExperimentName=ExperimentName, @RunId=SUBSTRING(RunId, 1, 43)
FROM aml_model
ORDER BY CreatedDate DESC
EXEC dbo.AutoMLGetMetrics @RunId, @ExperimentName
| how-to-use-azureml/automated-machine-learning/sql-server/energy-demand/auto-ml-sql-energy-demand.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="y78ktKU26fUY"
# # Rasterize tree canopy
#
# This notebook (re-)casts the tree canopy areas of each Census Tract as a raster.
#
# The outputs are binary tiffs for each Tract for 2010 and 2017 at 1-ft resolution with values for 'canopy', 'no canopy', and 'no data' (indicating area outside the tract boundary.
#
#
# #### Reusable components:
# - clip and combine vector geometries
# - create new vector geometry to fill 'empty' area within a boundary
# - recast vector geometry as xarray raster
# - interatively open sections of a larger geodatabase (within a mask area), processs, save as raster
#
# #### ToC:
#
# 1. install packages and read in data
# 2. test process and rasterize a single Census Tract
# 3. loop through all Census Tracts
#
# [ here the tiffs go over to R to be processesd with FRAGSTATS ]
#
# 4. read in FRAC results
#
# + [markdown] id="FeB1ZvBn8NbG"
# ## install packages and read in data
# + id="ZpxAoog1ddT2" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1647656847811, "user_tz": 240, "elapsed": 7968, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="c479deb7-0342-4def-852e-368a20c5e98b"
# Install gdal
# !apt install gdal-bin python-gdal python3-gdal
# Install rtree - Geopandas requirment
# !apt install python3-rtree
# Install Geopandas
# !pip install git+git://github.com/geopandas/geopandas.git
# Install descartes - Geopandas requirment
# !pip install descartes
# + colab={"base_uri": "https://localhost:8080/"} id="A8O7yF3MARQn" executionInfo={"status": "ok", "timestamp": 1647656916473, "user_tz": 240, "elapsed": 8701, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="ea803852-2dd5-481b-e0eb-5cafa113ace5"
pip install geopandas
# + id="6To1-_KPda71"
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import shapely
import numpy as np
# + id="4f9kAFYyAs26" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1646492789924, "user_tz": 300, "elapsed": 29019, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="48809e4b-accf-4235-a970-7b4ea86bf9b5"
pip install geocube
# + colab={"base_uri": "https://localhost:8080/"} id="M_SmCaNnz0QN" executionInfo={"status": "ok", "timestamp": 1646492791615, "user_tz": 300, "elapsed": 1696, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="536e06df-b044-4ea2-fa7d-8235f6184472"
from geocube.api.core import make_geocube
# + id="OS9zJ2Y6A1lu" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1646492796070, "user_tz": 300, "elapsed": 4458, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="e35c6263-e74f-4255-96db-e3d4e6c7529e"
pip install xarray
# + id="I231x81oA53x"
import xarray as xr
# + [markdown] id="8lDKiLBMDbBB"
# ## read in census tracts
# + id="52iZ58WwDi8z"
# downloaded from Census TIGER - all of NYS
ct = gpd.read_file('/content/drive/MyDrive/Advanced Spatial/project 1/tl_2010_36_tract10/tl_2010_36_tract10.shp')
# + id="vJ_BcL_pwxLA"
# filter to NYC (by counties)
ct = ct.loc[ct['COUNTYFP10'].isin(['005','047', '061', '081', '085'])]
# + id="XdQV4qQKuixA"
# to local projection (spec in feet)
ct.to_crs('EPSG:2263', inplace=True)
# + id="jCBAuLBtxhcC"
ct = ct.rename(columns={'GEOID10':'GEOID'})
# + id="SEejCNhWYVeo"
ct = ct.set_index('GEOID')
# + [markdown] id="uxD8ClNKtmr_"
# ## test rasterize one census tract
# + id="rVyzxvZVRwRg"
# thank you !! https://www.linkedin.com/pulse/rasterize-polygons-geopandas-geocube-chonghua-yin
# + id="cgOqCP1hwSJr"
i = '36061007000'
# set tract geometry for maks and clip
masker = ct.loc[i,'geometry']
# open canopy file intersecting (within) tract boundary
ct_canopy = gpd.read_file('/content/drive/MyDrive/Advanced Spatial/project 1/NYC_TreeCanopyChange_2010_2017.gdb',
mask=masker)
# clip canopy to tract boundary
ct_canopy = ct_canopy.clip(masker)
# + id="1WvtqGuowssz"
# classify canopy areas for each year (2010 = no change + loss ; 2017 = no change + gain). Binary (1) as value for canopy present.
ct_canopy['cp_2010'] = ct_canopy['Class'].isin(['No Change','Loss']).astype(int)
ct_canopy['cp_2017'] = ct_canopy['Class'].isin(['No Change','Gain']).astype(int)
# + id="PmsF1k2fw11V"
# dissolve all geomotries where canopy is present into a single geometry
all_canopy_2010 = ct_canopy.loc[ct_canopy['cp_2010'].astype(bool)].unary_union
all_canopy_2017 = ct_canopy.loc[ct_canopy['cp_2017'].astype(bool)].unary_union
# + id="NY5Wt7lyxCMJ"
# create new geometry for no-canopy area from difference
no_canopy_2010 = ct.loc[[i]].difference(all_canopy_2010)
no_canopy_2017 = ct.loc[[i]].difference(all_canopy_2017)
# + id="Goy7VJDEw2oI"
# create new geodataframes for each year with just geometries for canopy and no canopy areas
canopy_2010_shapes = gpd.GeoSeries([all_canopy_2010, no_canopy_2010[0]], crs='EPSG:2263')
canopy_2017_shapes = gpd.GeoSeries([all_canopy_2017, no_canopy_2017[0]], crs='EPSG:2263')
# + id="7CbTV8i-xh-R"
canopy_2010 = gpd.GeoDataFrame(data={'canopy_2010':[1,0]}, geometry=canopy_2010_shapes)
canopy_2017 = gpd.GeoDataFrame(data={'canopy_2017':[1,0]}, geometry=canopy_2017_shapes)
# + id="1wfzA8nsxjP1"
# create raster
raster_cp_2010 = make_geocube( # make_geocube to make xarray raster
vector_data = canopy_2010,
measurements = ['canopy_2010'], # variable value
resolution = (1,1), # speced in crs units (feet)
fill = -999 # fill NaNs (outside of tract boundary)
)
# + id="rFw8TMZZ8Yj8"
raster_cp_2017 = make_geocube(
vector_data = canopy_2017,
measurements = ['canopy_2017'],
resolution = (1,1),
fill = -999
)
# + id="ajpOjaFxyLp5"
# save files
filename_2010 = f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2010.tiff'
raster_cp_2010.rio.to_raster(filename_2010)
filename_2017 = f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2017.tiff'
raster_cp_2017.rio.to_raster(filename_2017)
# + id="7n7P6KNABKC7"
# plot
fig = plt.figure(figsize=(15, 13))
ax = fig.add_subplot(1, 1, 1)
# use xarray to plot raster
da_grib = xr.where(raster_cp_2010['canopy_2010']<0, np.nan, raster_cp_2010['canopy_2010'])
da_grib.plot(ax=ax)
# + [markdown] id="-UfC4VaIC1zD"
# ## test loop on list of census tracts
# + id="QhGCjolrEES3"
import random
# + id="8KxysYXgEFdi"
random.seed(1)
# + id="VeAA9JLPEIkW"
ct_sample = ct.sample(10)
# + id="k5EMVdbwYcOW"
ct_list = ct_sample.index.to_list()
# + colab={"base_uri": "https://localhost:8080/"} id="HHwytLHF_1hK" executionInfo={"status": "ok", "timestamp": 1646432052353, "user_tz": 300, "elapsed": 109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="4527c083-eea4-4be7-980e-c2f67c66b779"
ct_list
# + id="0DZ2WEyfXIin" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1646432121401, "user_tz": 300, "elapsed": 60342, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="80b7c532-d09f-46d9-c612-34e5d80f5e11"
# %%time
for i in ct_list:
# set tract geomety for maks and clip
geom = ct.loc[i,'geometry']
# open canopy file intersecting (within) tract boundary
ct_canopy = gpd.read_file('/content/drive/MyDrive/Advanced Spatial/project 1/NYC_TreeCanopyChange_2010_2017.gdb',
mask=geom)
# clip canopy to tract boundary
ct_canopy = ct_canopy.clip(geom)
# compute 2010 and 2017 areas
ct_canopy['cp_2010'] = ct_canopy['Class'].isin(['No Change','Loss']).astype(int)
ct_canopy['cp_2017'] = ct_canopy['Class'].isin(['No Change','Gain']).astype(int)
# merge canopy areas
all_canopy_2010 = ct_canopy.loc[ct_canopy['cp_2010'].astype(bool)].unary_union
all_canopy_2017 = ct_canopy.loc[ct_canopy['cp_2017'].astype(bool)].unary_union
# compute non-canopy area
no_canopy_2010 = ct.loc[[i]].difference(all_canopy_2010)
no_canopy_2017 = ct.loc[[i]].difference(all_canopy_2017)
# make geodataframes
canopy_2010_shapes = gpd.GeoSeries([all_canopy_2010, no_canopy_2010[0]], crs='EPSG:2263')
canopy_2017_shapes = gpd.GeoSeries([all_canopy_2017, no_canopy_2017[0]], crs='EPSG:2263')
canopy_2010 = gpd.GeoDataFrame(data={'canopy_2010':[1,0]}, geometry=canopy_2010_shapes)
canopy_2017 = gpd.GeoDataFrame(data={'canopy_2017':[1,0]}, geometry=canopy_2017_shapes)
# rasterize
raster_cp_2010 = make_geocube(
vector_data = canopy_2010,
measurements = ['canopy_2010'],
resolution = (1,1),
fill = -999)
raster_cp_2017 = make_geocube(
vector_data = canopy_2017,
measurements = ['canopy_2017'],
resolution = (1,1),
fill = -999)
# save tiffs
filename_2010 = f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2010.tiff'
raster_cp_2010.rio.to_raster(filename_2010)
filename_2017 = f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2017.tiff'
raster_cp_2017.rio.to_raster(filename_2017)
# + [markdown] id="6V3NTS6QLoIq"
# ## now _all_ the census tracts
# + id="EQXUt-9zg0sJ"
ct_list = ct.index.to_list()
# + id="O9XSMaEnLzGQ"
# %%time
# keep track of successes and errors
cts_processed = []
errors = []
for i in ct_list:
# set tract geomety for mask and clip
masker = ct.loc[i,'geometry']
# open canopy file intersecting (within) tract boundary
ct_canopy = gpd.read_file('/content/drive/MyDrive/Advanced Spatial/project 1/NYC_TreeCanopyChange_2010_2017.gdb',
mask=masker)
# clip canopy to tract boundary
ct_canopy = ct_canopy.clip(masker)
# compute 2010 and 2017 areas
ct_canopy['cp_2010'] = ct_canopy['Class'].isin(['No Change','Loss']).astype(int)
ct_canopy['cp_2017'] = ct_canopy['Class'].isin(['No Change','Gain']).astype(int)
# merge canopy areas
all_canopy_2010 = ct_canopy.loc[ct_canopy['cp_2010'].astype(bool)].unary_union
all_canopy_2017 = ct_canopy.loc[ct_canopy['cp_2017'].astype(bool)].unary_union
# compute non-canopy area
# to catch errors where there is no canopy so the difference fails
try:
no_canopy_2010 = ct.loc[[i]].difference(all_canopy_2010)
no_canopy_2017 = ct.loc[[i]].difference(all_canopy_2017)
except TypeError:
errors.append(i)
continue
# make geodataframes
canopy_2010_shapes = gpd.GeoSeries([all_canopy_2010, no_canopy_2010[0]], crs='EPSG:2263')
canopy_2017_shapes = gpd.GeoSeries([all_canopy_2017, no_canopy_2017[0]], crs='EPSG:2263')
canopy_2010 = gpd.GeoDataFrame(data={'canopy_2010':[1,0]}, geometry=canopy_2010_shapes)
canopy_2017 = gpd.GeoDataFrame(data={'canopy_2017':[1,0]}, geometry=canopy_2017_shapes)
# rasterize
raster_cp_2010 = make_geocube(
vector_data = canopy_2010,
measurements = ['canopy_2010'],
resolution = (1,1),
fill = -999)
raster_cp_2017 = make_geocube(
vector_data = canopy_2017,
measurements = ['canopy_2017'],
resolution = (1,1),
fill = -999)
# save tiffs
filename_2010 = f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2010.tiff'
raster_cp_2010.rio.to_raster(filename_2010)
filename_2017 = f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2017.tiff'
raster_cp_2017.rio.to_raster(filename_2017)
cts_processed.append(i)
# + id="DaK-w3BTm3pm"
errors
# + id="w7Fb8pEgp8Y0"
# when completed, check which files were (and weren't created)
from os.path import exists
# + id="GXTgXuwR3vrz"
cts_processed = []
for i in ct_list:
cts_processed.append(
exists(f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2010.tiff') and exists(f'/content/drive/MyDrive/Advanced Spatial/project 1/rasters/{i}_2017.tiff')
)
# + id="OckN3eLF5beA"
ct['tiff processed'] = cts_processed
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="ZdznKIPl6M0g" executionInfo={"status": "ok", "timestamp": 1646497700804, "user_tz": 300, "elapsed": 2019, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="e543bd27-4064-4970-b7cc-b8579f333f53"
ct[ct['tiff processed']].plot()
# hm just a problem with the bronx?
# + colab={"base_uri": "https://localhost:8080/"} id="oHJkyCOfHwTQ" executionInfo={"status": "ok", "timestamp": 1646501314121, "user_tz": 300, "elapsed": 334, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="8829b17a-cf38-4ce8-d259-849bd0d726df"
ct[ct['COUNTYFP10'] == '061']['tiff processed'].all()
# all on Manhattan is there - -I'll use that
# + id="-hYyEPNyHRRb"
ct[ct['COUNTYFP10'] == '061'].index.to_list()
# + [markdown] id="RAhWg5nnvMni"
# ... here I proocess the PARFRAC in R ...
# ...
# + [markdown] id="3AXuwbQyvXX-"
# ## read in FRAC results
# + id="HoZt_w20vbcZ"
frac = pd.read_csv('/content/drive/MyDrive/Advanced Spatial/project 1/frac_result.csv', index_col=0)
# + id="EaQY3D0Zv0kB"
frac = frac.rename(columns={'ct_list':'GEOID'})
# + id="i0N6v3kCxuDh"
frac['GEOID'] = frac['GEOID'].astype(str)
# + id="jJW1EJiZx0p7"
frac = frac.set_index('GEOID')
# + id="v8JUoe6fxGP-"
frac = gpd.GeoDataFrame(data = frac, geometry = ct['geometry'])
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="mTJWGLn4x83O" executionInfo={"status": "ok", "timestamp": 1647656951612, "user_tz": 240, "elapsed": 686, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="cfcf57b6-595d-4955-bd3f-1e6ab04c3df1"
frac.plot(column = 'canopy_2010_results', legend=True)
# + colab={"base_uri": "https://localhost:8080/"} id="kJlvMScoyI2Q" executionInfo={"status": "ok", "timestamp": 1647656957092, "user_tz": 240, "elapsed": 153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="ae49054a-1f31-4622-9edf-866b646244f9"
frac.isna().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="YavagNZxy9Cd" executionInfo={"status": "ok", "timestamp": 1647656975298, "user_tz": 240, "elapsed": 144, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "06243718382745054029"}} outputId="df93029e-938a-464e-9e19-7e18e3a85772"
len(ct[ct['COUNTYFP10'] == '061'].index.to_list())
| notebooks to publish/3. rasterize tree canopy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append("../")
from math import sqrt
from sklearn.metrics import mean_squared_error, mean_absolute_error
import pandas as pd
import numpy as np
from reco_utils.dataset.python_splitters import python_stratified_split
# -
data = pd.read_csv('../Data/ratings_compressed.csv', names=['user_id', 'profile_id', 'rating'])
data['rating'] = data['rating'].astype(np.float32)
train, test = python_stratified_split(data, ratio=0.75, col_user='user_id', col_item='profile_id', seed=45)
test['baseline'] = train['rating'].mean()
sqrt(mean_squared_error(test['rating'].values, test['baseline'].values))
mean_absolute_error(test['rating'].values, test['baseline'].values)
| baseline/baseline_avg_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) <NAME>, <NAME> 2015. Thanks to NSF for support via CAREER award #1149784.
# -
# [@LorenaABarba](https://twitter.com/LorenaABarba)
# 12 steps to Navier-Stokes
# =====
# ***
# You see where this is going ... we'll do 2D diffusion now and next we will combine steps 6 and 7 to solve Burgers' equation. So make sure your previous steps work well before continuing.
# Step 7: 2D Diffusion
# ----
# ***
# And here is the 2D-diffusion equation:
#
# $$\frac{\partial u}{\partial t} = \nu \frac{\partial ^2 u}{\partial x^2} + \nu \frac{\partial ^2 u}{\partial y^2}$$
#
# You will recall that we came up with a method for discretizing second order derivatives in Step 3, when investigating 1-D diffusion. We are going to use the same scheme here, with our forward difference in time and two second-order derivatives.
# $$\frac{u_{i,j}^{n+1} - u_{i,j}^n}{\Delta t} = \nu \frac{u_{i+1,j}^n - 2 u_{i,j}^n + u_{i-1,j}^n}{\Delta x^2} + \nu \frac{u_{i,j+1}^n-2 u_{i,j}^n + u_{i,j-1}^n}{\Delta y^2}$$
#
# Once again, we reorganize the discretized equation and solve for $u_{i,j}^{n+1}$
# $$u_{i,j}^{n+1} = u_{i,j}^n + \frac{\nu \Delta t}{\Delta x^2}(u_{i+1,j}^n - 2 u_{i,j}^n + u_{i-1,j}^n) + \frac{\nu \Delta t}{\Delta y^2}(u_{i,j+1}^n-2 u_{i,j}^n + u_{i,j-1}^n)$$
# +
import numpy
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D ##library for 3d projection plots
from matplotlib import cm ##cm = "colormap" for changing the 3d plot color palette
# %matplotlib inline
###variable declarations
nx = 31
ny = 31
nt = 17
nu=.05
dx = 2/(nx-1)
dy = 2/(ny-1)
sigma = .25
dt = sigma*dx*dy/nu
x = numpy.linspace(0,2,nx)
y = numpy.linspace(0,2,ny)
u = numpy.ones((ny,nx)) ##create a 1xn vector of 1's
un = numpy.ones((ny,nx)) ##
###Assign initial conditions
u[.5/dy:1/dy+1,.5/dx:1/dx+1]=2 ##set hat function I.C. : u(.5<=x<=1 && .5<=y<=1 ) is 2
fig = pyplot.figure()
ax = fig.gca(projection='3d')
X,Y = numpy.meshgrid(x,y)
surf = ax.plot_surface(X,Y,u, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.set_xlim(0,2)
ax.set_ylim(0,2)
ax.set_zlim(1,2.5);
# -
# $$u_{i,j}^{n+1} = u_{i,j}^n + \frac{\nu \Delta t}{\Delta x^2}(u_{i+1,j}^n - 2 u_{i,j}^n + u_{i-1,j}^n) + \frac{\nu \Delta t}{\Delta y^2}(u_{i,j+1}^n-2 u_{i,j}^n + u_{i,j-1}^n)$$
# +
###Run through nt timesteps
def diffuse(nt):
u[.5/dy:1/dy+1,.5/dx:1/dx+1]=2
for n in range(nt+1):
un = u.copy()
u[1:-1,1:-1]=un[1:-1,1:-1]+nu*dt/dx**2*(un[1:-1,2:]-2*un[1:-1,1:-1]+un[1:-1,0:-2])+\
nu*dt/dy**2*(un[2:,1:-1]-2*un[1:-1,1:-1]+un[0:-2,1:-1])
u[0,:]=1
u[-1,:]=1
u[:,0]=1
u[:,-1]=1
fig = pyplot.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X,Y,u[:], rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=True)
ax.set_zlim(1,2.5)
# -
diffuse(10)
diffuse(14)
diffuse(50)
# ## Learn More
# The video lesson that walks you through the details for Steps 5 to 8 is **Video Lesson 6** on You Tube:
from IPython.display import YouTubeVideo
YouTubeVideo('tUg_dE3NXoY')
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
# > (The cell above executes the style for this notebook.)
| lessons/09_Step_7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Perform regionalization when no parameter set is available
#
# Here we call the Regionalization WPS service to provide estimated streamflow (best estimate and ensemble) at an ungauged site using three pre-calibrated hydrological models and a large hydrometeorological database with catchment attributes (Extended CANOPEX). Multiple regionalization strategies are allowed.
# +
from birdy import WPSClient
from ravenpy.utilities.testdata import get_file
import datetime as dt
from urllib.request import urlretrieve
import xarray as xr
import numpy as np
from matplotlib import pyplot as plt
import json
import os
# Set environment variable WPS_URL to "http://localhost:9099" to run on the default local server
url = os.environ.get(
"WPS_URL", "https://pavics.ouranos.ca/twitcher/ows/proxy/raven/wps"
)
wps = WPSClient(url)
# -
# Get the documentation for the method's usage:
help(wps.regionalisation)
# +
# Forcing files. This file should only contain weather data (tmin, tmax, rain, snow, pet (if desired), etc.
# No streamflow is required. This is a link to a string, but you can submit a string to your netcdf file directly.
ts = str(
get_file("raven-gr4j-cemaneige/Salmon-River-Near-Prince-George_meteo_daily.nc")
)
# Model configuration parameters
config = dict(
start_date=dt.datetime(2000, 1, 1),
end_date=dt.datetime(2002, 1, 1),
area=4250.6,
elevation=843.0,
latitude=54.4848,
longitude=-123.3659,
method="PS", # One of the methods described above
model_name="HMETS", # One of the three models are allowed: HMETS, GR4JCN and MOHYSE
min_nse=0.7, # Minimumcalibration NSE required to be considered a donor (for selecting good donor catchments)
ndonors=5, # Number of donors we want to use. Usually between 4 and 8 is a robust number.
properties=json.dumps({"latitude": 54.4848, "longitude": -123.3659, "forest": 0.4}),
)
# Let's call the model with the timeseries, model parameters and other configuration parameters
resp = wps.regionalisation(ts=ts, **config)
# -
# And get the response
# With `asobj` set to False, only the reference to the output is returned in the response.
# Setting `asobj` to True will retrieve the actual files and copy the locally.
[hydrograph, ensemble] = resp.get(asobj=True)
# The `hydrograph` and `ensemble` outputs are netCDF files storing the time series. These files are opened by default using `xarray`, which provides convenient and powerful time series analysis and plotting tools.
print(hydrograph.q_sim)
# +
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
hydrograph.q_sim.plot()
# -
print("Max: ", hydrograph.q_sim.max())
print("Mean: ", hydrograph.q_sim.mean())
print("Monthly means: ", hydrograph.q_sim.groupby("time.month").mean(dim="time"))
# Now we can also see the results coming from the 5 donors using the 'ensemble' variable
# Plot the simulations from the 5 donor parameter sets
ensemble.q_sim.isel(nbasins=0).plot.line(hue="realization")
plt.show()
# You can also obtain the data in netcdf format directly by changing asobj to False:
[hydrograph_path, ensemble_path] = resp.get(asobj=False)
print(hydrograph)
print(ensemble)
print(ensemble.q_sim)
| docs/source/notebooks/Perform_Regionalization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab={} colab_type="code" id="RiWOqVGJpnR5" outputId="fdde8e36-e354-4943-d027-e8a691e184ec"
# # Introduction to NumPy
# + [markdown] colab_type="text" id="kJQgCx-EpnRm"
# Adapted by [Nimblebox Inc.](https://www.nimblebox.ai/) from the `Data-X: Introduction to Numpy` tutorial by [<NAME>](https://alex.fo/) and [<NAME>](https://vcresearch.berkeley.edu/faculty/ikhlaq-sidhu), [`Python Data Science Handbook`](http://shop.oreilly.com/product/0636920034919.do) by [<NAME>](https://github.com/jakevdp/PythonDataScienceHandbook) and [`NumPy Documentation`](https://numpy.org/doc/1.17/index.html).
#
# <img style="float:left; margin-left: 50px" src="https://user-images.githubusercontent.com/50221806/86498175-86c40400-bd39-11ea-90de-1315a043fd45.png" alt="Numpy Logo" width="300" height="400">
#
# <img style="float:right; margin-right: 50px" src="https://media-exp1.licdn.com/dms/image/C4E1BAQH3ErUUfLXoHQ/company-background_10000/0?e=2159024400&v=beta&t=9Z2hcX4LqsxlDd2BAAW8xDc-Obfvk_rziT1AkPKBcCc" alt="Nimblebox Logo" width="500" height="600">
# + [markdown] colab_type="text" id="pobAbXhxpnRm"
# ## Introduction:
#
# NumPy stands for **Numerical Python** and it is the fundamental package for scientific computing in Python. It is a package that lets you efficiently store and manipulate numerical arrays. It contains among other things:
#
# * a powerful N-dimensional array object
# * sophisticated (broadcasting) functions
# * tools for integrating C/C++ and Fortran code
# * useful linear algebra, Fourier transform, and random number capabilities
#
# -
# In this tutorial, we will cover:
#
# * **Basics**: Different ways to create NumPy Arrays and Basics of NumPy
# * **Computation**: Computations on NumPy arrays using Universal Functions and other NumPy Routines
# * **Aggregations**: Various function used to aggregate for NumPy arrays
# + [markdown] colab_type="text" id="_0903dnnpnRn"
# ### NumPy contains an array object that is "fast"
#
#
# <img src="https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/threefundamental.png">
#
#
# **It stores / consists of**:
# * location of a memory block (allocated all at one time)
# * a shape (3 x 3 or 1 x 9, etc)
# * data type / size of each element
#
# The core feauture that NumPy supports is its multi-dimensional arrays. In NumPy, dimensions are called axes and the number of axes is called a rank.
# -
# ### NumPy Array Anotomy
# <img src= "https://github.com/ikhlaqsidhu/data-x/raw/master/imgsource/anatomyarray.png">
#
# We'll start with the standard NumPy import, under the alias `np`
# + colab={} colab_type="code" id="COn0-kdqpnRo"
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 35} colab_type="code" executionInfo={"elapsed": 919, "status": "ok", "timestamp": 1536166399848, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-7vNzR2sdPLY/AAAAAAAAAAI/AAAAAAAAAq0/4KRHUM8RY9w/s50-c-k-no/photo.jpg", "userId": "114693381777103020132"}, "user_tz": -330} id="5aEOPPawpnRs" outputId="97d42dca-becc-45eb-a974-4e43c5ca3c0e"
print(np.__version__)
# + [markdown] colab_type="text" id="EydwFoX4pnRw"
# ### Basics of NumPy Array
# -
# #### 1. Creating a NumPy Array
# ##### From Python List
# We use `np.array` to create a numpy array object from python list.
# + colab={} colab_type="code" id="_hvfZRmZpnRx" outputId="935b7f78-269a-4a7e-aca7-d8e7e47a924c"
# Create array from Python list
list1 = [1, 2, 3, 4]
data = np.array(list1)
print(data)
# + colab={} colab_type="code" id="lctvElXspnR1" outputId="f16981c4-3851-4fc6-a159-d45d1217c5e6"
# Find out object type
print(type(list1))
print(type(data))
# + [markdown] colab={} colab_type="code" id="XEBBotr_pq_u"
# Python being a dynamically typed language, Python lists can contain elements with hetarogenous data-types. But NumPy is constrained to array with which have homogenous data-types. If the data types are not homogenous, NumPy will upcast (if possible) to the most logical data type
# + colab={} colab_type="code" id="j9amYbD0pnSA" outputId="5135d974-2085-4c47-abd2-420cdbed54df"
# NumPy converts to most logical data type
data1 = np.array([1.2, 2, 3, 4])
print(data1)
print(data1.dtype) # all values will be converted to floats
# + colab={} colab_type="code" id="ZvMwNHXGpnR9" outputId="3ad01835-470a-411f-fab1-f60c8cc7624f"
# Here if we store a float in an int array, the float will be up-casted to an int
list2 = [1, 2, 3, 4]
data2 = np.array(list2)
data2[0] = 3.14159
print(data2)
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" executionInfo={"elapsed": 1101, "status": "ok", "timestamp": 1536166422913, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-7vNzR2sdPLY/AAAAAAAAAAI/AAAAAAAAAq0/4KRHUM8RY9w/s50-c-k-no/photo.jpg", "userId": "114693381777103020132"}, "user_tz": -330} id="QKxF5TZYpnSF" outputId="eefffa7f-bd73-4a74-8258-dc19f165a413"
# We can manually specify the datatype
data3 = np.array([1, 2, 3], dtype="str")
print(data3)
print(data3.dtype)
# -
# In order to perform any mathematical operations on NumPy arrays, all the elements must be of a type that is valid to perform these mathematical operations.
# + colab={} colab_type="code" id="ZfjZnOSrpnSU" outputId="8c34690b-e4cf-43cc-c7d7-71ecfd3c4a5d"
#This will give you a TypeError
a = np.random.normal(0,1,1000)
b = np.arange(1000, dtype=np.int8)
c = np.arange(1000, dtype=np.int16)
c += a + b
print(c)
# -
# error is resolved by just changing the dtype of 'a' manually
a = np.random.normal(0,1,10)
a = a.astype(np.int16)
b = np.arange(10, dtype=np.int16)
c = np.arange(10, dtype=np.int16)
c += a + b
print(c)
# Unlike python list, we can create multi-dimensional arrays using NumPy.
# nested lists result in multi-dimensional arrays
x1 = np.array([range(i, i + 3) for i in [2, 4, 6]])
print(x1)
# For more information and other NumPy operations based on Python list, refer to the [NumPy documentation](http://numpy.org/).
# ##### Using NumPy routines
# When dealing with very large array, it is more efficient to create arrays from scratch using routines built into NumPy. Here are several examples:
# Create a length-10 integer array filled with zeros
print(np.zeros(10, dtype=int))
# Create a 3x5 floating-point array filled with ones
print(np.ones((3, 5), dtype=float))
# Create a 3x5 array filled with 3.14
print(np.full((3, 5), 3.14))
# Create an array filled with a linear sequence
# Starting at 0, ending at 20, stepping by 2
# (this is similar to the built-in range() function)
print(np.arange(0, 20, 2))
# Create a 3x3 array of uniformly distributed
# random values between 0 and 1
print(np.random.random((3, 3)))
# Create a 3x3 array of normally distributed random values
# with mean 0 and standard deviation 1
print(np.random.normal(0, 1, (3, 3)))
# Create a 3x3 array of random integers in the interval [0, 10)
print(np.random.randint(0, 10, (3, 3)))
# Returns the identity matrix of specific squared size
print(np.eye(5))
# You can always explore the [documentation](http://numpy.org/) for more.
# #### 2. Basics of NumPy Array
# ##### Attributes of NumPy Array
# Each NumPy array has the following attributes,
# +
x3 = np.random.randint(10, size=(3, 4, 5)) # Create a 3-D array
print("x3 ndim: ", x3.ndim) # np.ndim yields the number of dimensions
print("x3 shape:", x3.shape) # np.shape yields the size of each dimension
print("x3 size: ", x3.size) # np.size yields the total size of the array
print("dtype:", x3.dtype) # np.dtype yields the data type of the array
print("itemsize:", x3.itemsize, "bytes") # np.itemsize yields the size (in bytes) of each array element
print("nbytes:", x3.nbytes, "bytes") # np.nbytes yields the total size (in bytes) of the array
# -
# For more information, refer the [documentation](http://numpy.org/).
# + [markdown] colab_type="text" id="8nAlWhr8pnSc"
# ##### Accessing elements: Slicing and Indexing
# -
# Slicing and Indexing of NumPy Arrays is quite similar to that of Python lists
# + colab={} colab_type="code" id="xkrmJM2OpnSd" outputId="664d3a4f-62b7-4227-8c37-05d287b8f6e4"
data = np.arange(10) # Create a 1-D array
print("Original Data:\n", data, "\n")
# Indexing
print("Indexing NumPy Array:")
print(" ", data[4]) # 4th element of the numpy array
print(" ", data[-1], "\n") # 1st element from right side of the numpy array
# Slicing: To access a slice of an array 'data', we use this `data[start:stop:step]`
print("Slicing NumPy Array:")
print(" ", data[:5]) # First 5 element of the numpy array
print(" ", data[::-1]) # All the elements of the numpy array but in reverse order
# -
# <u><i>Indexing in a multi-dimentional NumPy Array</i></u>: Multi-dimensional indices work in the same way, with multiple indices separated by commas
# Let's create a 3-D array
x3 = np.random.randint(10, size=(3, 4, 5))
print(x3)
print(x3[1]) # prints the 2nd 4x5 array in the generated 3-D array
print(x3[1,2]) # prints the 3rd row of the x3[1] array
print(x3[1,2,3]) # prints the 4th element of the x3[1,2] array
# <u><i>Slicing in a multi-dimentional NumPy Array</i></u>: Multi-dimensional slices work in the same way, with multiple slices separated by commas
# Let's create a 3-D array
x3 = np.random.randint(10, size=(3, 4, 5))
print(x3)
print(x3[:2, :3, :4]) # prints a 3x4x5 array is sliced into 2x3x4 array
# <u><i>Mask Indexing and Boolean Slicing</i></u>: These technique are used to filter and get quick inference about the nature of the dataset that we have
# +
# Mask Indexing
numpy_array = np.random.randint(1, 11, size=(10))
print("NumPy Array:\n", numpy_array, "\n")
# Let's create a mask for the 'numpy_array' such that we can filter out the elements that are 'greater than 5'
mask = numpy_array > 5
print("Masked Array:\n", mask, "\n")
# Now let's just print the elements that follow our condition
print("Interested Array:\n", numpy_array[mask])
# -
# For further exploration, refer the [documentation](https://numpy.org/doc/1.17/user/basics.indexing.html) of NumPy
# + [markdown] colab_type="text" id="_lF7_W83pnSp"
# ##### Python Lists and NumPy Arrays
# -
# NumPy utilizes efficient pointers to a location in memory and it will store the full array. Lists on the other hand are pointers to many different objects in memory.
# <u><i>Subarray (default returns)</i></u>: Slicing returns a view for a NumPy Array, where as Python Lists returns a copy the list
# + colab={} colab_type="code" id="ZYjzWMhOpnSq" outputId="e0b4f769-796d-45fb-c494-191decd1c3a2"
# Let's create a NumPy Array and slice it
data_numpy = np.random.randint(10, size=(10))
print("Pre-slicing NumPy Array: ", data_numpy)
slicing_numpy = data_numpy[0:3]
print("Slice of NumPy Array: ", slicing_numpy)
# Let's create a Python List and slice it
import random
data_list = random.sample(range(0, 10), 10)
print("\nPre-slicing Python List: ", data_list)
slicing_list = data_list[0:3]
print("Slice of Python List: ", slicing_list)
# + colab={} colab_type="code" id="0MKQXKtgpnSy" outputId="92a7cfc2-d420-4fea-b1af-d46f70efc531"
# Let's change the first element of both array and list
slicing_numpy[0] = -1
print("Slice of NumPy Array: ", slicing_numpy)
slicing_list[0] = -1
print("Slice of Python List: ", slicing_list)
# + colab={} colab_type="code" id="wKrkBwF2pnS2" outputId="0774c345-7ace-4573-d604-89b26fdc506f"
print("Post-slicing NumPy array: ", data_numpy) # has changed
print("Post-slicing Python list: ", data_list) # has not changed
# -
# <u><i>Subarray (custom)</i></u>: Slicing of NumPy Array should create a copy of the array just like Python Lists
# + colab={} colab_type="code" id="FmLdcrTXpnS5" outputId="2432ceba-3501-4b9f-fa06-44fafbe7622a"
# Creating copies of the array instead of views
data_numpy = np.random.randint(10, size=(10))
print("Pre-slicing NumPy Array: ", data_numpy)
slicing_numpy_copy = data_numpy[0:3].copy()
print("Slice of NumPy Array: ", slicing_numpy_copy)
# + colab={} colab_type="code" id="wfIdJgLApnS7" outputId="e6163bd6-063f-426d-b875-dd78a62d3fca"
# Let's chage the first element of our numpy array and observe
slicing_numpy_copy[0] = -1
print("Post-slicing NumPy Array: ", slicing_numpy_copy)
print("Pre-slicing NumPy Array: ", data_numpy) # now it is not a view any more but we created a copy of data_numpy
# + [markdown] colab_type="text" id="dC5uIhS-pnTv"
# ### Computation
# -
# #### 1. Universal Function
# A universal function (or ufunc) that is applied on an `ndarray` in an element-by-element fashion. That is, a ufunc is a “vectorized” wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs.
# + colab={} colab_type="code" id="OXKDS1blpnTv" outputId="c2bce9ef-f972-4fa6-8202-a615c846130d"
# Let's define two NumPy Arrays
x = np.random.randint(1, 11, size=(10))
y = np.random.randint(1, 11, size=(10))
print ("Array 'x' = ", x)
print ("Array 'y' = ", y)
# + colab={} colab_type="code" id="sMmkVzCnpnTy" outputId="966e28a8-1034-4fe2-e17c-1c61f7ec5fbf"
# Let's perform some arithmetic on these arrays
print(x + y)
print(x - y)
print(x * y)
print(x / y)
print(x // y) # floor division
print(x % y)
# -
# Each of these arithmetic operations are simply convenient wrappers around specific functions built into NumPy, for example, the `+` operator is a wrapper for the `add` function
# + colab={} colab_type="code" id="tk6s0qqopnT7" outputId="d3202563-8765-41f9-a64e-867d2da99448"
print(np.add(x, y))
print(np.subtract(x, y))
print(np.multiply(x, y))
print(np.mod(x, y))
# + [markdown] colab={} colab_type="code" id="jHhRzVT7pnUA" outputId="7818293f-8297-4e3a-f7f6-996956eed422"
# The following table lists some of the `ufunc` implemented in NumPy:
#
#
# | Universal Functions | Operator (if any) | Description |
# |:-----------------------:|:------------------:|:--------------------------------------------------------------:|
# |``np.add`` | ``+`` |Addition (e.g., ``[10 6 8] + [3 10 6] = [13 16 14]``) |
# |``np.subtract`` | ``-`` |Subtraction (e.g., ``[10 6 8] - [3 10 6] = [ 7 -4 2]``) |
# |``np.negative`` | ``-`` |Unary negation (e.g., ``[-10 -6 -8]``) |
# |``np.multiply`` | ``*`` |Multiplication (e.g., ``[10 6 8] * [3 10 6] = [30 60 48]``) |
# |``np.divide`` | ``/`` |Division (e.g., ``[10 6 8] / [3 10 6] = [3.33 0.6 1.33]``) |
# |``np.floor_divide`` | ``//`` |Floor division (e.g., ``[10 6 8] // [3 10 6] = [3 0 1]``) |
# |``np.mod`` | ``%`` |Modulus/remainder (e.g., ``[10 6 8] % [3 10 6] = [1 6 2]``) |
# |``np.log`` | |Natural logarithm, element-wise |
# |``np.log2`` | |Base-2 logarithm of x |
#
# -
# More information on universal functions (including the full list of available functions) can found in the NumPy [documentation](https://numpy.org/doc/1.17/reference/ufuncs.html).
# #### 2. NumPy Routines
#
# NumPy being a the scientific computing package, it has several in-build routines/functions to aid mathematical and scientific computing. Some of the common routines used in Machine Learning are discussed below.
# +
# NumPy allows use to concatenate or append different NumPy Arrays
a = np.random.randint(1, 11, size=(3, 3, 2))
b = np.random.randint(1, 11, size=(3, 3, 3))
c = np.ones((1, 3, 2), dtype="int32")
d = np.ones((3, 1, 2), dtype="int32")
print("'a':\n", a, "\n")
print("'b':\n", b, "\n")
print("'c':\n", c, "\n")
print("'d':\n", d, "\n")
# Let's concatenate 'a' and 'b' together alond axis=2
print("Concatenate:\n", np.concatenate((a, b), axis=2), "\n")
# Let's append 'c' to 'a' vertically
print("Vertical Append:\n", np.vstack((a, c)), "\n") # try appending 'd' to 'a' vertically
# Let's append 'd' to 'a' horizontally
print("Horizontal Append:\n", np.hstack((a, d))) # try appending 'c' to 'a' horizontally
# +
# Let's create a random NumPy Array
numpy_array = np.random.randint(1, 11, size=(9))
print("Original Array Shape: ", numpy_array.shape)
print("Original Array: ", numpy_array, "\n")
# Using np.reshape() routine to reshape an array
numpy_array = numpy_array.reshape(3,3)
print("New Array Shape: ", numpy_array.shape)
print("New Array:\n", numpy_array)
# +
# We can also flatten matrices using ravel()
numpy_array = np.random.randint(1, 11, size=(24))
numpy_array = numpy_array.reshape(4,6)
print("Original Array Shape: ", numpy_array.shape)
print("Original Array:\n", numpy_array, "\n")
# Flattening an unflattened array
numpy_array = numpy_array.ravel()
print("Flattened Array Shape: ", numpy_array.shape)
print ("Flattened Array:\n", numpy_array)
# +
# Other useful routines for data analysis using NumPy
numpy_array = np.random.randint(1, 11, size=(3, 4))
print(numpy_array, "\n")
print ("Sum of all Elements:", numpy_array.sum())
print("Smallest Element:", numpy_array.min())
print("Highest Element:", numpy_array.max())
print("Cumulative Sum of Elements:", numpy_array.cumsum())
print ("Column-wise Sum:", numpy_array.sum(axis=0))
print ("Row-wise Sum:",numpy_array.sum(axis=1))
# -
# You can do matrix multiplication and matrix manipulations
# +
# Dot products of two "arrays"
a = np.random.randint(1, 11, size=(3, 3))
b = np.random.randint(1, 11, size=(3, 3))
print("'a':\n", a, "\n")
print("'b':\n", b, "\n")
print("Dot Product of 'a' and 'b' (arrays):\n", np.dot(a, b))
# +
# Matrix product of two "arrays"
a = np.random.randint(1, 11, size=(3, 4))
b = np.random.randint(1, 11, size=(4, 2))
print("'a':\n", a, "\n")
print("'b':\n", b, "\n")
print("Matrix Product of 'a' and 'b' (arrays):\n", np.matmul(a, b))
# +
# Taking the transpose of an array Matrix
a = np.random.randint(1, 11, size=(3, 4))
print("'a':\n", a, "\n")
# You can take transpose in two ways
print("'a' Transpose (using 'array.T'):\n", a.T, "\n")
print("'a' Transpose (using 'np.transpose()''):\n", np.transpose(a), "\n")
# -
# There so many more routines available in this package. To explore all the NumPy routines, refer the [documentation](https://numpy.org/doc/1.17/reference/routines.html).
print("Thank you for the Joining")
| Day 2 - Introduction to NumPy/intro-to-numpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# +
import datacomp as dc
# %matplotlib inline
# -
# # Data Inputs
# +
# load Data
datacol = dc.get_data("data/simulated.csv", ["Dataset1", "Dataset2", "Dataset3"], ["CATSIG1", "CATNON2"], "DATASET")
# exlude following features from comparison
exclude_feats = ["DATASET", "ENTITY"]
# feature subset of interest
feats = ["NDSIG1", "LONG1", "FEAT1"]
# -
# # Visualize Number Of Entities Per Time Point
dc.plot_entities_per_timepoint(datacol, "TIME", "DATASET")
# # Longitudinal Analysis w.o. Normalization
#
# In this section the longitudinal comparison is applied to unnormalized raw data values.
result, time_dfs = datacol.analyze_longitudinal_feats("TIME", 0, include=feats)
result
# ### Visualization
dc.plot_signf_progs(time_dfs, result)
# # Calculate Progression Scores; Z-score Normalization
#
# Here the z-score normalization procedure is applied prior to the comparison.
progcol = datacol.create_progression_tables(feats, "TIME", "ENTITY", "z-score", 0)
# # Analyze Progression Scores
result, time_dfs = progcol.analyze_longitudinal_feats("TIME", 0, include=feats)
result
# ### Visualization
dc.plot_signf_progs(time_dfs, result)
# # Calculate Progression Scores; Folds Ratio Normalization
#
# Now the raw values will be turned into fold ratios with respect to baseline (longitudinal value 0). Any other longitudinal value could be chosen as reference.
progcol = datacol.create_progression_tables(feats, "TIME", "ENTITY", "robl", 0)
# # Analyze Progression Scores
result, time_dfs = progcol.analyze_longitudinal_feats("TIME", 0, include=feats)
result
# ### Visualization
dc.plot_signf_progs(time_dfs, result)
| longitudinal_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
from release_info import get_github_info
releases = get_github_info()
releases
# +
requirements = []
for package in releases:
version = releases[package]['version']
version = version.replace("v","")
requirements.append(f'{package}>={version}')
with open('frozen.txt', 'w') as f:
f.write("\n".join(requirements))
# -
import pickle
pickle.dump(releases, open( "releases.p", "wb" ) )
| tools/frozen.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# +
using DataFrames
person_data = [
("Alice", "apple", "apple pie", "yes", 10),
("Balin", "banana", "bananas foster", "yes", 53),
("Cal", "carrot", "carrot cake", "no", 34),
("Dara", "dates", "doughnut", "yes", 83),
("Ezekiel", "elderberry", "elderberry tart", "no", 4),
("Frances", "fig", "fig bars", "no", 34)
]
person = DataFrame(name = String[], fav_fruit_veg = String[], fav_treat = String[], sweet_tooth = String[], age = Int[])
for p in person_data
push!(person, p)
end
# -
person
| gettingstarted/rowwiseinit-julia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="RvlKvoEVNbJ3" colab_type="code" colab={}
"""
We are running these lines because we are operating on Google Colab
"""
from google.colab import drive
drive.mount('/content/gdrive')
import os
os.chdir('/content/gdrive/My Drive/finch/framework/official_fasttext/text_classification/imdb')
# + id="8xYiRmfLOTVb" colab_type="code" colab={}
def get_idx2word(_index_from=3):
word2idx = tf.keras.datasets.imdb.get_word_index()
word2idx = {k:(v+_index_from) for k,v in word2idx.items()}
word2idx["<pad>"] = 0
word2idx["<start>"] = 1
word2idx["<unk>"] = 2
idx2word = {idx: w for w, idx in word2idx.items()}
return idx2word
# + id="eR0laIWuOUWL" colab_type="code" outputId="32ee8275-0d74-42c5-e91b-41c022195a66" executionInfo={"status": "ok", "timestamp": 1555659666951, "user_tz": -480, "elapsed": 80819, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/-cJ4VJthuDc0/AAAAAAAAAAI/AAAAAAAABAw/iwZyEawePbs/s64/photo.jpg", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 87}
import tensorflow as tf
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.imdb.load_data(num_words=20000)
idx2word = get_idx2word()
with open('train.txt', 'w') as f:
for x, y in zip(X_train, y_train):
text = ' '.join([idx2word[i] for i in x[1:]])
f.write(f'__label__{y} {text}\n')
# + id="bKm_YmapQa9K" colab_type="code" outputId="cbcf6f2f-1a6a-485d-fca8-419b3ad76fa0" executionInfo={"status": "ok", "timestamp": 1555659696152, "user_tz": -480, "elapsed": 110005, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/-cJ4VJthuDc0/AAAAAAAAAAI/AAAAAAAABAw/iwZyEawePbs/s64/photo.jpg", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 1077}
# !wget https://github.com/facebookresearch/fastText/archive/v0.2.0.zip
# !unzip -q v0.2.0.zip
os.chdir('/content/gdrive/My Drive/finch/framework/official_fasttext/text_classification/imdb/fastText-0.2.0')
# !make
# + id="CAbd9BxKR5TE" colab_type="code" outputId="d1b4f2eb-f1e1-4fc9-c96f-150767e6b223" executionInfo={"status": "ok", "timestamp": 1555659781583, "user_tz": -480, "elapsed": 195408, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/-cJ4VJthuDc0/AAAAAAAAAAI/AAAAAAAABAw/iwZyEawePbs/s64/photo.jpg", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 87}
# !./fasttext supervised -input ../train.txt -output model -epoch 30 -wordNgrams 2
# + id="f56JiRT3UsJY" colab_type="code" outputId="bf996d3e-b92d-4b5a-c230-5dbedb5a175f" executionInfo={"status": "ok", "timestamp": 1555659839005, "user_tz": -480, "elapsed": 252812, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/-cJ4VJthuDc0/AAAAAAAAAAI/AAAAAAAABAw/iwZyEawePbs/s64/photo.jpg", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 329}
os.chdir('/content/gdrive/My Drive/finch/framework/official_fasttext/text_classification/imdb')
# !git clone https://github.com/facebookresearch/fastText.git
os.chdir('/content/gdrive/My Drive/finch/framework/official_fasttext/text_classification/imdb/fastText')
# !pip install .
# + id="QukcJlpaVWAJ" colab_type="code" outputId="7914da88-f1bc-4068-b17a-4ddfbb3ea2a5" executionInfo={"status": "ok", "timestamp": 1555659848594, "user_tz": -480, "elapsed": 262382, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/-cJ4VJthuDc0/AAAAAAAAAAI/AAAAAAAABAw/iwZyEawePbs/s64/photo.jpg", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
import numpy as np
from fastText import load_model
os.chdir('/content/gdrive/My Drive/finch/framework/official_fasttext/text_classification/imdb')
classifier = load_model("fastText-0.2.0/model.bin")
texts = [' '.join([idx2word[i] for i in x]) for x in X_test]
y_preds = classifier.predict(texts)
label2label = {'__label__0': 0, '__label__1':1}
y_preds = [label2label[l[0]] for l in y_preds[0]]
print('Testing Accuracy: {:.3f}'.format((np.array(y_preds)==y_test).mean()))
| finch/framework/official_fasttext/text_classification/imdb/bigram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 02 - ML Experimentation with Custom Model
#
# The purpose of this notebook is to use [custom training](https://cloud.google.com/ai-platform-unified/docs/training/custom-training) to train a keras classifier to predict whether a given trip will result in a tip > 20%. The notebook covers the following tasks:
# 1. Preprocess the data locally using Apache Beam.
# 2. Train and test custom model locally using a Keras implementation.
# 3. Submit a Dataflow job to preprocess the data at scale.
# 4. Submit a custom training job to Vertex AI using a [pre-built container](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
# 5. Upload the trained model to Vertex AI.
# 6. Track experiment parameters from [Vertex AI Metadata](https://cloud.google.com/vertex-ai/docs/ml-metadata/introduction).
#
# We use [Vertex TensorBoard](https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-overview)
# and [Vertex ML Metadata](https://cloud.google.com/vertex-ai/docs/ml-metadata/introduction) to track, visualize, and compare ML experiments.
# ## Setup
# ### Import libraries
# +
import os
import time
import logging
from datetime import datetime
import numpy as np
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow.keras as keras
from google.cloud import aiplatform as vertex_ai
from google.cloud import aiplatform_v1beta1 as vertex_ai_beta
from src.common import features, datasource_utils
from src.model_training import data, model, defaults, trainer, exporter
from src.preprocessing import etl
logging.getLogger().setLevel(logging.INFO)
tf.get_logger().setLevel('INFO')
print(f"TensorFlow: {tf.__version__}")
print(f"TensorFlow Transform: {tft.__version__}")
# -
# ### Setup Google Cloud project
# +
PROJECT = '[your-project-id]' # Change to your project id.
REGION = 'us-central1' # Change to your region.
BUCKET = '[your-bucket-name]' # Change to your bucket name.
SERVICE_ACCOUNT = "[your-service-account]"
if PROJECT == "" or PROJECT is None or PROJECT == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = shell_output[0]
if SERVICE_ACCOUNT == "" or SERVICE_ACCOUNT is None or SERVICE_ACCOUNT == "[your-service-account]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.account)' 2>/dev/null
SERVICE_ACCOUNT = shell_output[0]
if BUCKET == "" or BUCKET is None or BUCKET == "[your-bucket-name]":
# Get your bucket name to GCP projet id
BUCKET = PROJECT
# Try to create the bucket if it doesn'exists
# ! gsutil mb -l $REGION gs://$BUCKET
print("")
PARENT = f"projects/{PROJECT}/locations/{REGION}"
print("Project ID:", PROJECT)
print("Region:", REGION)
print("Bucket name:", BUCKET)
print("Service Account:", SERVICE_ACCOUNT)
print("Vertex API Parent URI:", PARENT)
# -
# ### Set configurations
# +
VERSION = 'v01'
DATASET_DISPLAY_NAME = 'chicago-taxi-tips'
MODEL_DISPLAY_NAME = f'{DATASET_DISPLAY_NAME}-classifier-{VERSION}'
WORKSPACE = f'gs://{BUCKET}/{DATASET_DISPLAY_NAME}'
EXPERIMENT_ARTIFACTS_DIR = os.path.join(WORKSPACE, 'experiments')
RAW_SCHEMA_LOCATION = 'src/raw_schema/schema.pbtxt'
TENSORBOARD_DISPLAY_NAME = f'tb-{PROJECT}'
EXPERIMENT_NAME = f'{MODEL_DISPLAY_NAME}-experiment'
# -
# ## Create Vertex TensorBoard instance
# !gcloud beta ai tensorboards create --display-name={TENSORBOARD_DISPLAY_NAME} \
# --project={PROJECT} --region={REGION}
# +
tensorboard_client_beta = vertex_ai_beta.TensorboardServiceClient(
client_options={"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
)
tensorboard = [
resource for resource in tensorboard_client_beta.list_tensorboards(parent=PARENT)
if resource.display_name == TENSORBOARD_DISPLAY_NAME][0]
tensorboard_resource_name = tensorboard.name
print("TensorBoard resource name:", tensorboard_resource_name)
# -
# ## Initialize workspace
# +
REMOVE_EXPERIMENT_ARTIFACTS = False
if tf.io.gfile.exists(EXPERIMENT_ARTIFACTS_DIR) and REMOVE_EXPERIMENT_ARTIFACTS:
print("Removing previous experiment artifacts...")
tf.io.gfile.rmtree(EXPERIMENT_ARTIFACTS_DIR)
if not tf.io.gfile.exists(EXPERIMENT_ARTIFACTS_DIR):
print("Creating new experiment artifacts directory...")
tf.io.gfile.mkdir(EXPERIMENT_ARTIFACTS_DIR)
print("Workspace is ready.")
# -
# ## Initialize Vertex AI experiment
# +
vertex_ai.init(
project=PROJECT,
location=REGION,
staging_bucket=BUCKET,
experiment=EXPERIMENT_NAME
)
run_id = f"run-local-{datetime.now().strftime('%Y%m%d%H%M%S')}"
vertex_ai.start_run(run_id)
EXPERIMENT_RUN_DIR = os.path.join(EXPERIMENT_ARTIFACTS_DIR, EXPERIMENT_NAME, run_id)
print("Experiment run directory:", EXPERIMENT_RUN_DIR)
# -
# ## 1. Preprocess the data using Apache Beam
#
# The Apache Beam pipeline of data preprocessing is implemented in the [preprocessing](src/preprocessing) directory.
EXPORTED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'exported_data')
TRANSFORMED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'transformed_data')
TRANSFORM_ARTIFACTS_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'transform_artifacts')
# ### Get Source Query from Managed Dataset
# +
ML_USE = 'UNASSIGNED'
LIMIT = 5120
raw_data_query = datasource_utils.get_training_source_query(
project=PROJECT,
region=REGION,
dataset_display_name=DATASET_DISPLAY_NAME,
ml_use=ML_USE,
limit=LIMIT
)
print(raw_data_query)
# -
# ### Test Data Preprocessing Locally
args = {
'runner': 'DirectRunner',
'raw_data_query': raw_data_query,
'write_raw_data': True,
'exported_data_prefix': EXPORTED_DATA_PREFIX,
'transformed_data_prefix': TRANSFORMED_DATA_PREFIX,
'transform_artifact_dir': TRANSFORM_ARTIFACTS_DIR,
'temporary_dir': os.path.join(WORKSPACE, 'tmp'),
'gcs_location': f'gs://{BUCKET}/bq_tmp',
'project': PROJECT
}
vertex_ai.log_params(args)
print("Data preprocessing started...")
etl.run_transform_pipeline(args)
print("Data preprocessing completed.")
# !gsutil ls {EXPERIMENT_RUN_DIR}
# ## 2. Train a custom model locally using a Keras
#
# The `Keras` implementation of the custom model is in the [model_training](src/model_training) directory.
LOG_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'logs')
EXPORT_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'model')
# ### Read transformed data
tft_output = tft.TFTransformOutput(TRANSFORM_ARTIFACTS_DIR)
transform_feature_spec = tft_output.transformed_feature_spec()
transform_feature_spec
# +
train_data_file_pattern = os.path.join(TRANSFORMED_DATA_PREFIX,'train/data-*.gz')
eval_data_file_pattern = os.path.join(TRANSFORMED_DATA_PREFIX,'eval/data-*.gz')
for input_features, target in data.get_dataset(
train_data_file_pattern, transform_feature_spec, batch_size=3).take(1):
for key in input_features:
print(f"{key} {input_features[key].dtype}: {input_features[key].numpy().tolist()}")
print(f"target: {target.numpy().tolist()}")
# -
# ### Create hyperparameters
# +
hyperparams = {
"hidden_units": [64, 32]
}
hyperparams = defaults.update_hyperparams(hyperparams)
hyperparams
# -
# ### Create and test model inputs and outputs
classifier = model.create_binary_classifier(tft_output, hyperparams)
classifier.summary()
keras.utils.plot_model(
classifier,
show_shapes=True,
show_dtype=True
)
classifier(input_features)
# ### Train the model locally.
# +
logging.getLogger().setLevel(logging.INFO)
hyperparams["learning_rate"] = 0.001
hyperparams["num_epochs"] = 5
hyperparams["batch_size"] = 512
vertex_ai.log_params(hyperparams)
# -
classifier = trainer.train(
train_data_dir=train_data_file_pattern,
eval_data_dir=eval_data_file_pattern,
tft_output_dir=TRANSFORM_ARTEFACTS_DIR,
hyperparams=hyperparams,
log_dir=LOG_DIR,
)
val_loss, val_accuracy = trainer.evaluate(
model=classifier,
data_dir=eval_data_file_pattern,
raw_schema_location=RAW_SCHEMA_LOCATION,
tft_output_dir=TRANSFORM_ARTEFACTS_DIR,
hyperparams=hyperparams,
)
vertex_ai.log_metrics(
{"val_loss": val_loss, "val_accuracy": val_accuracy})
# !tb-gcp-uploader --tensorboard_resource_name={tensorboard_resource_name} \
# --logdir={LOG_DIR} \
# --experiment_name={EXPERIMENT_NAME} --one_shot=True
# ### Export the trained model
# +
saved_model_dir = os.path.join(EXPORT_DIR)
exporter.export_serving_model(
classifier=classifier,
serving_model_dir=saved_model_dir,
raw_schema_location=RAW_SCHEMA_LOCATION,
tft_output_dir=TRANSFORM_ARTEFACTS_DIR,
)
# -
# ### Inspect model serving signatures
# !saved_model_cli show --dir={saved_model_dir} --tag_set=serve --signature_def=serving_tf_example
# !saved_model_cli show --dir={saved_model_dir} --tag_set=serve --signature_def=serving_default
# ### Test the exported SavedModel
serving_model = tf.saved_model.load(saved_model_dir)
file_names = tf.data.TFRecordDataset.list_files(EXPORTED_DATA_PREFIX + '/data-*.tfrecord')
for batch in tf.data.TFRecordDataset(file_names).batch(3).take(1):
predictions = serving_model.signatures['serving_tf_example'](batch)
for key in predictions:
print(f"{key}: {predictions[key]}")
# +
import tensorflow_data_validation as tfdv
from tensorflow_transform.tf_metadata import schema_utils
raw_schema = tfdv.load_schema_text(RAW_SCHEMA_LOCATION)
raw_feature_spec = schema_utils.schema_as_feature_spec(raw_schema).feature_spec
# +
instance = {
"dropoff_grid": "POINT(-87.6 41.9)",
"euclidean": 2064.2696,
"loc_cross": "",
"payment_type": "Credit Card",
"pickup_grid": "POINT(-87.6 41.9)",
"trip_miles": 1.37,
"trip_day": 12,
"trip_hour": 6,
"trip_month": 2,
"trip_day_of_week": 4,
"trip_seconds": 555,
}
for feature_name in instance:
dtype = raw_feature_spec[feature_name].dtype
instance[feature_name] = tf.constant([[instance[feature_name]]], dtype)
# -
predictions = serving_model.signatures['serving_default'](**instance)
for key in predictions:
print(f"{key}: {predictions[key].numpy()}")
# ## Start a new Vertex AI experiment run
# +
vertex_ai.init(
project=PROJECT,
staging_bucket=BUCKET,
experiment=EXPERIMENT_NAME)
run_id = f"run-gcp-{datetime.now().strftime('%Y%m%d%H%M%S')}"
vertex_ai.start_run(run_id)
EXPERIMENT_RUN_DIR = os.path.join(EXPERIMENT_ARTIFACTS_DIR, EXPERIMENT_NAME, run_id)
print("Experiment run directory:", EXPERIMENT_RUN_DIR)
# -
# ## 3. Submit a Data Processing Job to Dataflow
EXPORTED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'exported_data')
TRANSFORMED_DATA_PREFIX = os.path.join(EXPERIMENT_RUN_DIR, 'transformed_data')
TRANSFORM_ARTIFACTS_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'transform_artifacts')
# +
ML_USE = 'UNASSIGNED'
LIMIT = 1000000
raw_data_query = datasource_utils.get_training_source_query(
project=PROJECT,
region=REGION,
dataset_display_name=DATASET_DISPLAY_NAME,
ml_use=ML_USE,
limit=LIMIT
)
args = {
'runner': 'DataflowRunner',
'raw_data_query': raw_data_query,
'exported_data_prefix': EXPORTED_DATA_PREFIX,
'transformed_data_prefix': TRANSFORMED_DATA_PREFIX,
'transform_artifact_dir': TRANSFORM_ARTIFACTS_DIR,
'write_raw_data': False,
'temporary_dir': os.path.join(WORKSPACE, 'tmp'),
'gcs_location': os.path.join(WORKSPACE, 'bq_tmp'),
'project': PROJECT,
'region': REGION,
'setup_file': './setup.py'
}
# -
vertex_ai.log_params(args)
# +
logging.getLogger().setLevel(logging.ERROR)
print("Data preprocessing started...")
etl.run_transform_pipeline(args)
print("Data preprocessing completed.")
# -
# !gsutil ls {EXPERIMENT_RUN_DIR}
# ## 4. Submit a Custom Training Job to Vertex AI
LOG_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'logs')
EXPORT_DIR = os.path.join(EXPERIMENT_RUN_DIR, 'model')
# ### Test the training task locally
# !python -m src.model_training.task \
# --model-dir={EXPORT_DIR} \
# --log-dir={LOG_DIR} \
# --train-data-dir={TRANSFORMED_DATA_PREFIX}/train/* \
# --eval-data-dir={TRANSFORMED_DATA_PREFIX}/eval/* \
# --tft-output-dir={TRANSFORM_ARTIFACTS_DIR} \
# --num-epochs=5 \
# --hidden-units=32,32 \
# --experiment-name={EXPERIMENT_NAME} \
# --run-name={run_id} \
# --project={PROJECT} \
# --region={REGION} \
# --staging-bucket={BUCKET}
# ### Prepare training package
TRAINER_PACKAGE_DIR = os.path.join(WORKSPACE, 'trainer_packages')
TRAINER_PACKAGE_NAME = f'{MODEL_DISPLAY_NAME}_trainer'
print("Trainer package upload location:", TRAINER_PACKAGE_DIR)
# +
# !rm -r src/__pycache__/
# !rm -r src/.ipynb_checkpoints/
# !rm -r src/raw_schema/.ipynb_checkpoints/
# !rm -f {TRAINER_PACKAGE_NAME}.tar {TRAINER_PACKAGE_NAME}.tar.gz
# !mkdir {TRAINER_PACKAGE_NAME}
# !cp setup.py {TRAINER_PACKAGE_NAME}/
# !cp -r src {TRAINER_PACKAGE_NAME}/
# !tar cvf {TRAINER_PACKAGE_NAME}.tar {TRAINER_PACKAGE_NAME}
# !gzip {TRAINER_PACKAGE_NAME}.tar
# !gsutil cp {TRAINER_PACKAGE_NAME}.tar.gz {TRAINER_PACKAGE_DIR}/
# !rm -r {TRAINER_PACKAGE_NAME}
# !rm -r {TRAINER_PACKAGE_NAME}.tar.gz
# -
# ### Prepare the training job
TRAIN_RUNTIME = 'tf-cpu.2-4'
TRAIN_IMAGE = f"gcr.io/cloud-aiplatform/training/{TRAIN_RUNTIME}:latest"
print("Training image:", TRAIN_IMAGE)
# +
num_epochs = 10
learning_rate = 0.001
hidden_units = "64,64"
trainer_args = [
f'--train-data-dir={TRANSFORMED_DATA_PREFIX + "/train/*"}',
f'--eval-data-dir={TRANSFORMED_DATA_PREFIX + "/eval/*"}',
f'--tft-output-dir={TRANSFORM_ARTIFACTS_DIR}',
f'--num-epochs={num_epochs}',
f'--learning-rate={learning_rate}',
f'--project={PROJECT}',
f'--region={REGION}',
f'--staging-bucket={BUCKET}',
f'--experiment-name={EXPERIMENT_NAME}'
]
# +
package_uri = os.path.join(TRAINER_PACKAGE_DIR, f'{TRAINER_PACKAGE_NAME}.tar.gz')
training_spec = [
{
"replica_count": 1,
"machine_spec": {
"machine_type": 'n1-standard-4',
"accelerator_count": 0
},
"python_package_spec": {
"executor_image_uri": TRAIN_IMAGE,
"package_uris": [package_uri],
"python_module": "src.model_training.task",
"args": trainer_args,
}
}
]
# -
# ### Submit the training job
# +
print("Submitting a custom training job...")
job_display_name = f"{TRAINER_PACKAGE_NAME}_{datetime.now().strftime('%Y%m%d%H%M%S')}"
custom_job = {
"display_name": job_display_name,
"job_spec": {
"worker_pool_specs": training_spec,
"base_output_directory": {"output_uri_prefix": EXPERIMENT_RUN_DIR},
"service_account": SERVICE_ACCOUNT,
"tensorboard": tensorboard_resource_name,
}
}
job_client_beta = vertex_ai_beta.JobServiceClient(
client_options={"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
)
job = job_client_beta.create_custom_job(
parent=PARENT,
custom_job=custom_job
)
print(f"Job {job.name} submitted.")
# -
# ### Monitor job state
while True:
response = job_client_beta.get_custom_job(name=job.name)
if response.state.name == 'JOB_STATE_SUCCEEDED':
print("Training job completed. - Training Elapsed Time:", response.update_time - response.create_time)
print("Training Job Time:", response.end_time - response.start_time)
break
elif response.state.name == 'JOB_STATE_FAILED':
print("Training job failed!")
break
else:
print(f"Training job state is: {response.state.name}.")
time.sleep(60)
# ## 5. Upload exported model to Vertex AI Models
# !gsutil ls {EXPORT_DIR}
# ### Generate the Explaination metadata
explanation_config = features.generate_explanation_config()
explanation_config
# ### Upload model
SERVING_RUNTIME='tf2-cpu.2-4'
SERVING_IMAGE = f"gcr.io/cloud-aiplatform/prediction/{SERVING_RUNTIME}:latest"
print("Serving image:", SERVING_IMAGE)
# +
explanation_metadata = vertex_ai.explain.ExplanationMetadata(
inputs=explanation_config["inputs"],
outputs=explanation_config["outputs"],
)
explanation_parameters = vertex_ai.explain.ExplanationParameters(
explanation_config["params"]
)
vertex_model = vertex_ai.Model.upload(
display_name=MODEL_DISPLAY_NAME,
artifact_uri=EXPORT_DIR,
serving_container_image_uri=SERVING_IMAGE,
parameters_schema_uri=None,
instance_schema_uri=None,
explanation_metadata=explanation_metadata,
explanation_parameters=explanation_parameters,
)
# -
vertex_model.gca_resource
# ## 6. Exract experiment run parameters
experiment_df = vertex_ai.get_experiment_df()
experiment_df = experiment_df[experiment_df.experiment_name == EXPERIMENT_NAME]
experiment_df.T
print("Vertex AI Experiments:")
print(
f"https://console.cloud.google.com/vertex-ai/locations{REGION}/experiments/{EXPERIMENT_NAME}/metrics?project={PROJECT}"
)
| 02-experimentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.6 64-bit
# name: python3
# ---
import os
import pandas as pd
from socket import getservbyname, getservbyport
from tqdm import tqdm
from mawiparse.tool import week_split, pp
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
from mawiparse.plot2 import plot_factory
import plotly.express as px
# +
selected = [
21.0,
22.0,
25.0,
80.0,
143.0,
209.0,
443.0,
993.0,
1194.0, # openVPN
1935.0, # RTMP
2408.0, # Cloudflare
# 3074.0, # Xbox
# 3478.0, #
# 3479.0,
# 3480.0,
# 3481.0,
# 4500.0,
# 5223.0,
# 8200.0,
# 8801.0
]
dir = 'result'
month = '202007'
dec2019 = pd.read_csv(os.path.join(dir, '201912'+'_port.csv'), index_col = 0)
jan2020 = pd.read_csv(os.path.join(dir, '202001'+'_port.csv'), index_col = 0)
feb2020 = pd.read_csv(os.path.join(dir, '202002'+'_port.csv'), index_col = 0)
mar2020 = pd.read_csv(os.path.join(dir, '202003'+'_port.csv'), index_col = 0)
apr2020 = pd.read_csv(os.path.join(dir, '202004'+'_port.csv'), index_col = 0)
may2020 = pd.read_csv(os.path.join(dir, '202005'+'_port.csv'), index_col = 0)
jun2020 = pd.read_csv(os.path.join(dir, '202006'+'_port.csv'), index_col = 0)
jul2020 = pd.read_csv(os.path.join(dir, '202007'+'_port.csv'), index_col = 0)
aug2020 = pd.read_csv(os.path.join(dir, '202008'+'_port.csv'), index_col = 0)
sep2020 = pd.read_csv(os.path.join(dir, '202009'+'_port.csv'), index_col = 0)
pd_list = [dec2019, jan2020, feb2020, mar2020, apr2020, may2020, jun2020, jul2020, aug2020, sep2020]
pd_list = [ x.loc[selected] for x in pd_list ]
series_list = []
all = []
for i in pd_list:
# i =i.loc[selected]
series_list.append(i.T.mean())
all = pd.concat(pd_list, axis=1)
summary = pd.concat(series_list, axis=1)
summary.columns = ['2019 Dec', '2020 Jan', '2020 Feb', '2020 Mar', '2020 Apr', '2020 May', '2020 Jun', '2020 Jul', '2020 Aug', '2020 Sep']
px.bar(summary.T, log_y=True, width = 600, height = 400,
labels={ # replaces default labels by column name
"value": "Port Traffic Volume (in Bytes)", "index": "Date"
},
)
# summary.T.plot(loglog=True)
# +
a = pd_list[0].T.mean()
fig = px.pie(values=a, names=a.index,
width = 400, height = 400, color_discrete_sequence=px.colors.qualitative.Set3
)
fig.update_traces(textinfo='label+percent', marker=dict( line=dict(color='#000000', width=1)))
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
# +
a = pd_list[7].T.mean()
fig = px.pie(values=a, names=a.index,
width = 400, height = 400, color_discrete_sequence=px.colors.qualitative.Set3
)
fig.update_traces(textinfo='label+percent', marker=dict( line=dict(color='#000000', width=1)))
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
# +
a = pd_list[6].T.mean()
fig = px.pie(values=a, names=a.index,
width = 400, height = 400, color_discrete_sequence=px.colors.qualitative.Set3
)
fig.update_traces(textinfo='label+percent', marker=dict( line=dict(color='#000000', width=1)))
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
# +
a = pd_list[6].T.mean()
fig = px.pie(values=a, names=a.index,
width = 400, height = 400, color_discrete_sequence=px.colors.qualitative.Set3
)
fig.update_traces(textinfo='label+percent', marker=dict( line=dict(color='#000000', width=1)))
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
# +
dec2020 = pd.read_csv(os.path.join(dir, '202012'+'_port.csv'), index_col = 0)
dec2020 =dec2020.loc[selected]
a = dec2020.T.mean()
fig = px.pie(values=a, names=a.index,
width = 400, height = 400, color_discrete_sequence=px.colors.qualitative.Set3
)
fig.update_traces(textinfo='label+percent', marker=dict( line=dict(color='#000000', width=1)))
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
)
# -
| part3_plt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ENGR 1330 – Computational Thinking and Data Science Fall 2021
#
# ## <font color=navy>Project: Streamflow Forecasting Final Project</font>
# <img src="https://c.tenor.com/9nEacavIPZsAAAAC/flowing-water.gif" width="50%"> <br>
#
# ### Background
#
# Why is Streamflow Analysis Important?
#
# Information gained from streamflow data is used for many different purposes:
# - ***Water supply plans***
# - Municipalities
# - Agriculture
# - Industries
# - ***Engineering design***
# - Reservoirs
# - Bridges, Roads, Culverts
# - Treatment Plans
# - ***Operations***
# - Reservoirs
# - Power plants
# - Navigation
# - ***Identifying changes in streamflows***
# - Climate change
# - Water use
# - Land use
# - ***Flood planning and warning***
# - Floodplain mapping
# - Flood forecasts
# - ***Streamflow forecasting***
# - ***Characterizing and evaluating in-stream conditions***
# - Habitat assessment
# - Environmental flow requirements
# - Recreation
# - ***Support of water quality sampling***
# - Water quality conditions
# - Contaminant transport
#
# <br>
# <br>
#
# <img src="https://www.lrp.usace.army.mil/portals/72/siteimages/Recreation/conemauglake.jpg" width="50%"> <br>
#
# In this project, you will analyse a streamflow dataset and build a Machine Learning Model to predict the flow status and flowrate in a river.
# ___
# ### How Streamflow is Measured?
#
# USGS describes the process at [https://www.usgs.gov/special-topic/water-science-school/science/how-streamflow-measured?qt-science_center_objects=0#qt-science_center_objects](https://www.usgs.gov/special-topic/water-science-school/science/how-streamflow-measured?qt-science_center_objects=0#qt-science_center_objects)
#
#  <br>
#
# Streamgaging generally involves 3 steps:
#
# 1. ***Measuring stream stage***—obtaining a continuous record of stage—the height of the water surface at a location along a stream or river
# 2. ***The discharge measurement***—obtaining periodic measurements of discharge (the quantity of water passing a location along a stream)
# 3. ***The stage-discharge relation***—defining the natural but often changing relation between the stage and discharge; using the stage-discharge relation to convert the continuously measured stage into estimates of streamflow or discharge
#
#
# <img src="https://prd-wret.s3.us-west-2.amazonaws.com/assets/palladium/production/s3fs-public/styles/full_width/public/thumbnails/image/streamgage-graphics%20-%20Copy.jpg" width="50%"> <br>
# ___
#
# ## Case Study: The Colorado River in Texas
#
# The Colorado River is an approximately 862-mile (1,387 km) long river in the U.S. state of Texas. It is the 18th longest river in the United States and the longest river with both its source and its mouth within Texas.
#
# The Colorado River originates south of Lubbock, on the Llano Estacado near Lamesa. It flows generally southeast out of the Llano Estacado and through the Texas Hill Country, then through several reservoirs including Lake <NAME>, E.V. Spence Reservoir, and O.H. Ivie Lake. The river flows through several more reservoirs before reaching Austin, including Lake Buchanan, Inks Lake, Lake <NAME> (commonly referred to as Lake LBJ), and Lake Travis. The Llano River joins the Colorado at Lake LBJ near Kingsland, and the Pedernales River joins at Lake Travis near Briarcliff. After passing through Austin, the Colorado River continues flowing southeast until emptying into Matagorda Bay on the Gulf of Mexico, near Matagorda. The Colorado is the largest river lying entirely within Texas; it drains an area of about 39,900 square miles (103,350 square km) and receives several forks of the Concho River, the Pecan Bayou, and the San Saba, Llano, and Pedernales rivers.
#
#  <br>
#
# The river is an important source of water for farming, cities, and electrical power production. In addition to power plants operating on each of the major lakes, waters of the Colorado are used for cooling the South Texas Nuclear Project near Bay City. Altogether, there are over 7,500 miles of creeks, streams, and rivers in our basin, and well over 2 million people live and work here. The Colorado’s watershed includes several major metropolitan areas, including Midland-Odessa, San Angelo, and Austin, and there are hundreds of smaller towns and communities as well. Many communities, like Austin, rely on the Colorado River for 100% of their municipal water. Because of its importance to the state’s economy, environment, industry, and agriculture it is recognized as the lifeblood of Texas.
# ___
#
# <img src="https://cdn.vox-cdn.com/thumbor/z67Uw_p2JYudnXeeouxgil4XtoQ=/0x0:5999x3685/1200x800/filters:focal(2521x1364:3479x2322)/cdn.vox-cdn.com/uploads/chorus_image/image/64045588/Mount_Bonnell_shutterstock.0.jpg" width="70%"> <br>
#
#
# ### The Dataset | Streamflow Data
#
# Streamflow data is downloaded for the most upstream streamflow monitoring station from "waterdata.usgs.gov". USGS Streamflow Monitoring Station 08117995 located in Borden County, Texas (Latitude 32°37'43", Longitude 101°17'06" NAD27) provided monthly streamflow records from March 1988 until May 2021.
#
# <img src="https://deeply-assets.thenewhumanitarian.org/20170616094245/Idaho-gauge.jpg?w=640&fit=max&q=60" width="70%"> <br>
#
#
# ### The Dataset | Meteorological Data
#
# Meteorological data is downloaded for the same period of time and the same time scale from "prism.oregonstate.edu". This data includes monthly precipitation and temperature (max, mean, and min) records.
#
#
# <img src="https://stjohn23pburg.files.wordpress.com/2013/06/river-rain.jpg" width="70%"> <br>
# ---
#
# ### The Dataset | Overview
#
# The dataset [http://54.243.252.9/engr-1330-webroot/4-Databases/ColoradoRiverData.csv](http://54.243.252.9/engr-1330-webroot/4-Databases/ColoradoRiverData.csv) is contains the following information:
#
# |Columns|Info.|
# |---:|---:|
# |Date | The date of a measurement in YYYY-MM format|
# |ppt (inches) |The total recorded precipitation in inches for each month|
# |tmin (degrees F) |The minimum recorded temperature in degrees Fahrenheit for each month|
# |tmean (degrees F) |The average recorded temperature in degrees Fahrenheit for each month|
# |tmax (degrees F) |The maximum recorded temperature in degrees Fahrenheit for each month|
# |Flowrate (cfs) |The average recorded streamflow in cubic feet per second for each month|
#
#
# A script to get and download the database is provided below:
import requests
remote_url="http://54.243.252.9/engr-1330-webroot/4-Databases/ColoradoRiverData.csv" # set the url
rget = requests.get(remote_url, allow_redirects=True) # get the remote resource, follow imbedded links
open('ColoradoRiverData.csv','wb').write(rget.content); # extract from the remote the contents, assign
# ___
#
# ## Problem Statement:
# - Literature scan on the importance and approaches to streamflow forecasting.
# - Analyse an existing hydro-meteorological database and build a data model to predict the streamflow based on various features.
# - Build an interface to allow users to enter relative input features (e.g., precipitation) and return an estimated flowrate and an assessment of the uncertainty in the estimate
# - Build an interface to allow users to enter relative input features (e.g., precipitation) and return an estimated flow state.
# - Document the project in an interim and final report; prepare videos of use cases and project mangagement issues.
# ___
# ### **Literature Research:**
# In a short essay (1-2 pages):
# - Describe the importance and challenges of streamflow forecasting.
# - Summarize the value of a data model in the context of the conventional approach to streamflow forecasting
#
# Some places to start are:
# - <NAME>., <NAME>., & <NAME>. (2011). A Review of Quantitative Precipitation Forecasts and Their Use in Short- to Medium-Range Streamflow Forecasting, Journal of Hydrometeorology, 12(5), 713-728. Retrieved Oct 21, 2021, from https://journals.ametsoc.org/view/journals/hydr/12/5/2011jhm1347_
#
# - <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2015). Artificial intelligence based models for stream-flow forecasting: 2000–2015. Journal of Hydrology, 530, 829-844. available at https://www.sciencedirect.com/science/article/abs/pii/S0022169415008069
#
#
# - <NAME>, <NAME> & <NAME> (2018) Univariate streamflow forecasting using commonly used data-driven models: literature review and case study, Hydrological Sciences Journal, 63:7, 1091-1111, DOI: 10.1080/02626667.2018.1469756 available at https://www.tandfonline.com/doi/full/10.1080/02626667.2018.1469756
#
#
# - <NAME>, <NAME>, <NAME>. Flood Prediction Using Machine Learning Models: Literature Review. Water. 2018; 10(11):1536. https://doi.org/10.3390/w10111536 available at https://www.mdpi.com/2073-4441/10/11/1536
#
#
# - <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020). Long lead-time daily and monthly streamflow forecasting using machine learning methods. Journal of Hydrology, 590, 125376. available at https://www.sciencedirect.com/science/article/abs/pii/S0022169420308362
#
# ### **Exploratory Data Analysis (EDA):**
# Provide a summary (description) of the dataset in 2-3 pages. This summery should appropriately present the essential infromation about the dataset in a concise, well-written and clear manner. Things you may want to include ...
#
# - An overall description of the dataset
# - A summery of the information extracted from the important statistics of different parameters in the dataset
# - A summery of the distributional charactristics of different parameters in the dataset
# - A summary of the relationship status within the different parameters in the dataset
#
# Your EDA section must include your answers for the following questions:
# - Which parameter (between precipitation and temperature values) can be a better predictor for streamflow at the station of study? why?
# - What can you infer from comparing the maximum recorded floods in the 90s, 2000s, and 2010s? Based on the records available in the dataset, would you expect to see more or less extreme floods in the future?
# - Periods with the flowrate of 0 (No-flow periods) can be viewed as indicators of drought. What can you understand from comparing the number of no-flow days in the 90s, 2000s, and 2010s? Is the upstream of the Colorado River becoming more or less prone to drought?
#
# ### **Model Building: Part 1 | Forecasting flowrates**
# In this part, the goal is to make models to predict flowrates in the Colorado River, and then evaluate their performance using appropriate goodness-of-fit measures, and analyze the outcomes. Use the first 75% of the dataset for training your models and the remaining 25% for testing.
#
# - Build 3 data models that you see appropriate for this task. (*Please note that these models should be unique and this uniqueness can be defined based on using different algorithms, inputs, or both.*)
# - Assess data model quality (decide which model is best)
# - Build the input data interface for using the "best" model
# - Using your best model determine the estimated flowrate for the hydro-meteorological conditions in the table below:
#
# |ppt|tmax|tmean|tmin|last month flowrate|
# |:---|:---|:---|:---|:---|
# |0.0|113.0|99.0|85.0|0.0|
# |4.5|95.0|85.0|75.0|74.5|
# |2.2|20.0|10.0|0.0|55.0|
# |1.0|80.0|60.0|40.0|36.3|
# |0.0|80.0|60.0|40.0|12.0|
# *note that you may not all the values for each case, depending on your best model.*
#
# Your "Model Building: Part 1" section must include your answers for the following questions:
# - What are the most important assumptions in your modeling?
# - Is it benefitial to use the streamflow recorded in the previous step (a lagged streamflow value) as an input feature? why?
# - Which parameter (between precipitation and temperature values) was be a better predictor for streamflow at the station of study? why?
# - Is there a specific range of streamflow values that are harder to capture accurately for your data models? If yes, what range and why?
#
# ### **Model Building: Part 2 | Forecasting flow states**
# In this part, the goal is to make models to predict whether the Colorado River's flowstate is in the "Flow" or the "No-Flow" state. Then, evaluate their performance using appropriate goodness-of-fit measures, and analyze the outcomes. Use the first 75% of the dataset for training your models and the remaining 25% for testing.
#
# - Add a column to the dataframe for the flow state: It should be 0 when the flowrate is equal to 0 and 1 when the flowrate is non-zero.
# - Build 3 data models that you see appropriate for predicting the flow state. (*Please note that these models should be unique and this uniqueness can be defined based on using different algorithms, inputs, or both.*)
# - Assess data model quality (decide which model is best)
# - Build the input data interface for using the "best" model
# - Using your best model determine the estimated flow state for the hydro-meteorological conditions in the table below:
#
# |ppt|tmax|tmean|tmin|
# |:---|:---|:---|:---|
# |0.0|113.0|99.0|85.0|
# |4.5|95.0|85.0|75.0|
# |2.2|20.0|10.0|0.0|
# |1.0|80.0|60.0|40.0|
# |0.0|80.0|60.0|40.0|
# *note that you may not all the values for each case, depending on your best model.*
#
# Your "Model Building: Part 2" section must include your answers for the following questions:
# - What are the most important assumptions in your modeling?
# - Is it more difficult for the model to capture one flow state more than the other? If yes, which one? why?
# - Which parameter (between precipitation and temperature values) was be a better predictor for flow state at the station of study? why?
# - What can be some applications of this kind of streamflow state modeling?
#
#
# ___
# ## **Deliverables:**
#
# #### Effort Sheets (due every week on Friday):
# Each team must submit an effort sheet which is a table with a clear discription of the tasks undertaken by each member and has the signiture of all team members. The effort sheets should be submitted digitally via email.
#
#
# #### Interim report (due November 24):
# This report must include:
# - The "Literature Research" section
# - A description of the Colorado River database
# - A plan of work for how you want to handle the project and solve the modeling tasks.
# - Break down each task into manageable subtasks and describe how you intend to solve the subtasks and how you will test each task. (Perhaps make a simple Gantt Chart)
# - Address the responsibilities of each team member for tasks completed and tasks to be completed until the end of the semester. (Perhaps make explicit subtask assignments)
#
# Your report should be limited to 7 pages, 12 pt font size, double linespacing (exclusive of references which are NOT included in the page count). You need to cite/reference all sources you used. This report must be submitted by Midnight November 24th in PDF format.
#
# #### Final report (due December 7):
# This report must include:
# - All the required parts, including the ones from the Interim report as well as the sections on EDA and Model Building parts.
# - All the filled effort sheets with the signitures of the team members with a clear description of all the tasks performed by each member.
# - All the references used in the entire length of the project. <br>
# This report must be submitted by Midnight December 7th in PDF format, along with the following documents:
# - A well-documented Jupyter Notebook (.ipynb file) for the analysis and implementation of the data models.
# - A well-documented Jupyter Notebook (.ipynb file) for the implementation of the data model user interface.
#
# **Above items can reside in a single notebook; but clearly identify sections that perform different tasks.**
#
# - A how-to video demonstrating the application, performance and description of what you did for the project, including the problems that you solved as well as those that you were not able to solve.
# - A project management video (up to 5 minutes) in which you explain how you completed the project and how you worked as a team.
#
# **Above items can reside in a single video; but structure the video into the two parts; use an obvious transition when moving from "how to ..." into the project management portion.**
# **Keep the total video length to less than 10 minutes; submit as an "unlisted" YouTube video, and just supply the link (someone on each team is likely to have a YouTube creator account). Keep in mind a 10 minute video can approach 100MB file size before compression, so it won't upload to Blackboard and cannot be emailed.**
| 6-Projects/P-StreamflowAnalysis/src/.ipynb_checkpoints/ENGR-1330-Streamflow-Project-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="RIOpmuPsAfVC"
# Examen Medio Curso
#
# <NAME> <NAME>
#
# 1799790- Martes N4
#
# + [markdown] id="GiOb2lEfAtrh"
# 1.1 El usuario dará un número al azar y el código calculará la suma de todos los números desde el 1 hasta el número dado por el usuario.
# Ejemplo: el usuario dió el número 4, el código deberá de dar como resultado 10
# + colab={"base_uri": "https://localhost:8080/"} id="C7oqvMbMAs8B" outputId="d4534974-7b14-4f6f-875b-e31e2a6e142b"
num=int(input('Ingresa un número al azar: ')) #se pide el numero al azar y se define como una variable entera
i=1 #se le da un valor a la i como 1 ya que se sumara desde el 1
sum=0 #se le da un valor a sum como 0
while i<=num: #se incializa un loop con while para que se sumen desde el numero 1 hasta el numero dado por el usario
sum=i+sum #se suma cada numero desde el 1 hasta llegar al numero del usuario
i+=1 #se suma uno a la i para el loop
print('La suma es: ',sum) #se imprime el resultado de la suma
# + [markdown] id="FtyePV1nBDIp"
# 1.2 Dados el inicio y final de un rango de números, guardar ese rango de números en una lista. Después, imprimir los números que son pares en la lista por medio de uno de los ciclos que vimos en clase.
# Inicio = 6, final = 31
# + colab={"base_uri": "https://localhost:8080/"} id="TXzQAXdhBF6K" outputId="f1851aee-3dac-48ae-e607-f3d1ad4884ca"
incio= int(input('Ingresa el numero de inicio para el rango: ')) #se pide el numero de incio del rango
final=int(input('Ingresa el numero final para el rango: ')) #se pide el numero final del rango
lista=list (range(incio,final+1)) #se crea una lista y se agrega los numeros dentro del rango establecido por el usuario y se le agrega 1 al numero final para que se tomen todos los numeros en cuenta
print('Los numeros pares de la lista son:')
for num in lista: #se crea un ciclo for para imprimir la lista de pares
if num%2==0: #se una condición para que sol se impriman los numeros que al dividirlos su residuo sea 0
print(num) #se imprimen los numeros pares
| Examen-Medio-Curso-IA/EMC_IA_KarenValdez_1799790.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Eclipsing binary: `pymc3` solution for the maps
# In this notebook, we're going to do MCMC to infer the surface maps of two stars in an eclipsing binary given the light curve of the system. We generated the data in [this notebook](EclipsingBinary_Generate.ipynb). Note that here we assume we know everything else about the system (the orbital parameters, the limb darkening coefficients, etc.), so the only unknown parameters are the maps of the two stars, which are expressed in `starry` as vectors of spherical harmonic coefficients. In a future tutorial we'll explore a more complex inference problem where we have uncertainties on all the parameters.
#
# Let's begin with some imports. **Note that in order to do inference with pymc3, we need to enable lazy evaluation.** That's because `pymc3` requires derivatives of the likelihood function, so we need to use the fancy `theano` computational graph to perform backpropagation on the `starry` model. All this means in practice is that we'll have to call `.eval()` in some places to get numerical values out of the parameters.
# + tags=["hide_input"]
# %matplotlib inline
# + tags=["hide_input"]
# %run notebook_setup.py
# +
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import exoplanet as xo
import os
import starry
from corner import corner
np.random.seed(12)
starry.config.lazy = True
starry.config.quiet = True
# -
# ## Load the data
#
# Let's load the EB dataset:
# + tags=["hide_input", "hide_output"]
# Run the Generate notebook if needed
if not os.path.exists("eb.npz"):
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
with open("EclipsingBinary_Generate.ipynb") as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=600, kernel_name="python3")
ep.preprocess(nb);
# -
data = np.load("eb.npz", allow_pickle=True)
A = data["A"].item()
B = data["B"].item()
t = data["t"]
flux = data["flux"]
sigma = data["sigma"]
# Next, we instantiate the primary, secondary, and system objects. Recall that we assume we know the true values of all the orbital parameters and star properties, *except* for the two surface maps. Note that we are instantiating the `starry` objects within a `pm.Model()` context so that `pymc3` can keep track of all the variables.
with pm.Model() as model:
# Primary
pri = starry.Primary(
starry.Map(ydeg=A["ydeg"], udeg=A["udeg"], inc=A["inc"]),
r=A["r"],
m=A["m"],
prot=A["prot"],
)
pri.map[1:] = A["u"]
# Secondary
sec = starry.Secondary(
starry.Map(ydeg=B["ydeg"], udeg=B["udeg"], inc=B["inc"]),
r=B["r"],
m=B["m"],
porb=B["porb"],
prot=B["prot"],
t0=B["t0"],
inc=B["inc"],
)
sec.map[1:] = B["u"]
# System
sys = starry.System(pri, sec)
# Here's the light curve we're going to do inference on:
fig, ax = plt.subplots(1, figsize=(12, 5))
ax.plot(t, flux, "k.", alpha=0.5, ms=4)
ax.set_xlabel("time [days]", fontsize=24)
ax.set_ylabel("normalized flux", fontsize=24);
# ## Define the `pymc3` model
#
# Now we define the full `pymc3` model. If you've never used `pymc3` before, <NAME>'s [exoplanet package documentation](https://exoplanet.dfm.io/en/stable/) has lots of nice tutorials on how to use `pymc3` to do inference. The basic idea here is we define our variables by assigning priors to them; we use a `pm.MvNormal` for both the primary and secondary maps. This is a multi-variate normal (Gaussian) distribution, which happens to be a convenient prior to place on spherical harmonic coefficients because of its close relationship to the *power spectrum* of the map. In particular, if the Gaussian prior is zero-mean and its covariance is diagonal with constant entries for each degree $l$ (as we assume below), this is equivalent to an isotropic prior whose power spectrum is given by those entries on the diagonal. Note that for simplicity we are assuming a *flat* power spectrum, meaning we place the same prior weight on all spatial scales. So the covariance of our Gaussian is as simple as it can be: it's just $\lambda I$, where $\lambda = 10^{-2}$ is the prior variance of the spherical harmonic coefficients and $I$ is the identity matrix. The scalar $\lambda$ is essentially a regularization parameter: by making it small, we ensure that the spherical harmonic coefficients stay close to zero, which is usually what we want for physical maps.
#
# You'll note there's also a call to `pm.Deterministic`, which just keeps track of variables for later (in this case, we'll have access to the value of `flux_model` for every iteration of the chain once we're done; this is useful for plotting). And finally, there's a call to `pm.Normal` in which we specify our `observed` values, their standard deviation `sd`, and the mean vector `mu`, which is our `starry` flux model. This normal distribution is our chi-squared term: we're telling `pymc3` that our data is normally distributed about our model with some (observational) uncertainty.
with pm.Model() as model:
# The amplitude of the primary
pri.map.amp = pm.Normal("pri_amp", mu=1.0, sd=0.1)
# The Ylm coefficients of the primary
# with a zero-mean isotropic Gaussian prior
ncoeff = pri.map.Ny - 1
pri_mu = np.zeros(ncoeff)
pri_cov = 1e-2 * np.eye(ncoeff)
pri.map[1:, :] = pm.MvNormal("pri_y", pri_mu, pri_cov, shape=(ncoeff,))
# The amplitude of the secondary
sec.map.amp = pm.Normal("sec_amp", mu=0.1, sd=0.01)
# The Ylm coefficients of the secondary
# with a zero-mean isotropic Gaussian prior
ncoeff = sec.map.Ny - 1
sec_mu = np.zeros(ncoeff)
sec_cov = 1e-2 * np.eye(ncoeff)
sec.map[1:, :] = pm.MvNormal("sec_y", sec_mu, sec_cov, shape=(ncoeff,))
# Compute the flux
flux_model = sys.flux(t=t)
# Track some values for plotting later
pm.Deterministic("flux_model", flux_model)
# Save our initial guess
# See http://exoplanet.dfm.io/en/stable/user/api/#exoplanet.eval_in_model
flux_model_guess = xo.eval_in_model(flux_model)
# The likelihood function assuming known Gaussian uncertainty
pm.Normal("obs", mu=flux_model, sd=sigma, observed=flux)
# Now that we've specified the model, it's a good idea to run a quick gradient descent to find the MAP (maximum a posteriori) solution. This will give us a decent starting point for the inference problem.
# %%time
with model:
map_soln = xo.optimize()
# Note the dramatic increase in the value of the log posterior!
# Let's plot the MAP model alongside the data and the initial guess (note that we're doing quite well).
plt.figure(figsize=(12, 5))
plt.plot(t, flux, "k.", alpha=0.3, ms=2, label="data")
plt.plot(t, flux_model_guess, "C1--", lw=1, alpha=0.5, label="Initial")
plt.plot(
t, xo.eval_in_model(flux_model, map_soln, model=model), "C1-", label="MAP", lw=1
)
plt.legend(fontsize=10, numpoints=5)
plt.xlabel("time [days]", fontsize=24)
plt.ylabel("relative flux", fontsize=24);
# We can also plot the corresponding maps: note that we recover the spots *really well*!
map = starry.Map(ydeg=A["ydeg"])
map.inc = A["inc"]
map.amp = map_soln["pri_amp"]
map[1:, :] = map_soln["pri_y"]
map.show(theta=np.linspace(0, 360, 50))
map = starry.Map(ydeg=B["ydeg"])
map.inc = B["inc"]
map.amp = map_soln["sec_amp"]
map[1:, :] = map_soln["sec_y"]
map.show(theta=np.linspace(0, 360, 50))
# ## MCMC sampling
#
# We have an optimum solution, but we're really interested in the *posterior* over surface maps (i.e., an understanding of the uncertainty of our solution). We're therefore going to do MCMC sampling with `pymc3`. This is easy: within the `model` context, we just call `pm.sample`. The number of tuning and draw steps below are quite small since I wanted this notebook to run quickly; try increasing them by a factor of a few to get more faithful posteriors.
#
# You can read about the `get_dense_nuts_step` convenience function (which *really* helps the sampling when degeneracies are present) [here](http://exoplanet.dfm.io/en/stable/user/api/#exoplanet.get_dense_nuts_step).
# %%time
with model:
trace = pm.sample(
tune=500,
draws=500,
start=map_soln,
chains=4,
step=xo.get_dense_nuts_step(target_accept=0.9),
)
# We can look at `pm.summary` to check if things converged. In particular, we're looking for a large number of effective samples `ess` for all parameters and a value of `r_hat` that is very close to one.
varnames = ["pri_amp", "pri_y", "sec_amp", "sec_y"]
display(pm.summary(trace, var_names=varnames).head())
display(pm.summary(trace, var_names=varnames).tail())
# The number of effective samples for some of the parameters is quite small, so in practice we should run this chain for longer (an exercise for the reader!) But let's carry on for now, keeping in mind that our posteriors will be quite noisy.
# Let's plot the model for 24 random samples from the chain. Note that the lines are so close together that they're indistinguishable!
plt.figure(figsize=(12, 5))
plt.plot(t, flux, "k.", alpha=0.3, ms=2, label="data")
label = "samples"
for i in np.random.choice(range(len(trace["flux_model"])), 24):
plt.plot(t, trace["flux_model"][i], "C0-", alpha=0.3, label=label)
label = None
plt.legend(fontsize=10, numpoints=5)
plt.xlabel("time [days]", fontsize=24)
plt.ylabel("relative flux", fontsize=24);
# Let's compare the *mean* map and a *random* sample to the true map for each star:
# +
# Random sample
np.random.seed(0)
i = np.random.randint(len(trace["pri_y"]))
map = starry.Map(ydeg=A["ydeg"])
map[1:, :] = np.mean(trace["pri_y"], axis=0)
map.amp = np.mean(trace["pri_amp"])
pri_mu = map.render(projection="rect").eval()
map[1:, :] = trace["pri_y"][i]
map.amp = trace["pri_amp"][i]
pri_draw = map.render(projection="rect").eval()
map[1:, :] = A["y"]
map.amp = A["amp"]
pri_true = map.render(projection="rect").eval()
map = starry.Map(ydeg=B["ydeg"])
map[1:, :] = np.mean(trace["sec_y"], axis=0)
map.amp = np.mean(trace["sec_amp"])
sec_mu = map.render(projection="rect").eval()
map[1:, :] = trace["sec_y"][i]
map.amp = trace["sec_amp"][i]
sec_draw = map.render(projection="rect").eval()
map[1:, :] = B["y"]
map.amp = B["amp"]
sec_true = map.render(projection="rect").eval()
fig, ax = plt.subplots(3, 2, figsize=(8, 7))
ax[0, 0].imshow(
pri_true,
origin="lower",
extent=(-180, 180, -90, 90),
cmap="plasma",
vmin=0,
vmax=0.4,
)
ax[1, 0].imshow(
pri_mu,
origin="lower",
extent=(-180, 180, -90, 90),
cmap="plasma",
vmin=0,
vmax=0.4,
)
ax[2, 0].imshow(
pri_draw,
origin="lower",
extent=(-180, 180, -90, 90),
cmap="plasma",
vmin=0,
vmax=0.4,
)
ax[0, 1].imshow(
sec_true,
origin="lower",
extent=(-180, 180, -90, 90),
cmap="plasma",
vmin=0,
vmax=0.04,
)
ax[1, 1].imshow(
sec_mu,
origin="lower",
extent=(-180, 180, -90, 90),
cmap="plasma",
vmin=0,
vmax=0.04,
)
ax[2, 1].imshow(
sec_draw,
origin="lower",
extent=(-180, 180, -90, 90),
cmap="plasma",
vmin=0,
vmax=0.04,
)
ax[0, 0].set_title("primary")
ax[0, 1].set_title("secondary")
ax[0, 0].set_ylabel("true", rotation=0, labelpad=20)
ax[1, 0].set_ylabel("mean", rotation=0, labelpad=20)
ax[2, 0].set_ylabel("draw", rotation=0, labelpad=20);
# -
# Looks pretty good! There are obvious artifacts (there are tons of degeneracies in this problem), but we've definitely recovered the spots, with some uncertainty. Recall that our chains weren't well converged! Run this notebook for longer to get more faithful posteriors.
#
# Finally, here's a corner plot for the first several coefficients of the primary map. You can see that all the posteriors are nice and Gaussian, with some fairly strong correlations (the degeneracies I mentioned above):
fig, ax = plt.subplots(9, 9, figsize=(7, 7))
labels = [r"$\alpha$"] + [
r"$Y_{%d,%d}$" % (l, m)
for l in range(1, pri.map.ydeg + 1)
for m in range(-l, l + 1)
]
samps = np.hstack((trace["pri_amp"].reshape(-1, 1), trace["pri_y"][:, :8]))
corner(samps, fig=fig, labels=labels)
for axis in ax.flatten():
axis.xaxis.set_tick_params(labelsize=6)
axis.yaxis.set_tick_params(labelsize=6)
axis.xaxis.label.set_size(12)
axis.yaxis.label.set_size(12)
axis.xaxis.set_label_coords(0.5, -0.6)
axis.yaxis.set_label_coords(-0.6, 0.5)
# That's it! While sampling with `pymc3` is fairly fast, the problem of inferring a surface map when all other parameters are known is a **linear problem**, which means it actually has an *analytic* solution! In the following [notebook](EclipsingBinary_Linear.ipynb), we show how to take advantage of this within `starry` to do extremely fast inference.
| notebooks/EclipsingBinary_PyMC3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "skip"}
# <table>
# <tr align=left><td><img align=left src="./images/CC-BY.png">
# <td>Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved MIT license. (c) <NAME></td>
# </table>
# + slideshow={"slide_type": "skip"}
from __future__ import print_function
# + [markdown] slideshow={"slide_type": "slide"}
# # Introduction to NumPy
#
# [NumPy](https://numpy.org/) is the basic library in Python that defines a number of essential data structures and routines for doing numerical computing (among other things). Many of the semantics for manipulating the most basic data structure, the `ndarray`, are identical to manipulating `list`s with a few key exceptions. Other commands are similar to matlab commands and work in a similar manner. We will cover those and some of the other important points when working with NumPy.
#
# Topics:
# - The `ndarray`
# - Mathematical functions
# - Array manipulations
# - Common array functions
# - Math Functions in NumPy
# + [markdown] slideshow={"slide_type": "slide"}
# ## `ndarray`
#
# The `ndarray` forms the most basic type of data-structure for NumPy. As the name suggests the `ndarray` is an array that can have as many dimensions as you specify. For matlab users this should be familiar although note that the `ndarray` does not exactly behave as you might expect the same object to in matlab. Here are some examples usages:
# + slideshow={"slide_type": "fragment"}
import numpy
# + [markdown] slideshow={"slide_type": "fragment"}
# Define a 2x2 array, note that unlike MATLAB we need commas everywhere:
# + slideshow={"slide_type": "-"}
my_array = numpy.array([[1, 2], [3, 4]])
print(my_array)
# + [markdown] slideshow={"slide_type": "subslide"}
# Get the `(0, 1)` component of the array:
# + slideshow={"slide_type": "-"}
print(my_array)
# + slideshow={"slide_type": "-"}
my_array[0, 1]
# + [markdown] slideshow={"slide_type": "fragment"}
# Fetch the second row of the matrix:
# + slideshow={"slide_type": "-"}
my_array[1,:]
# + [markdown] slideshow={"slide_type": "subslide"}
# Fetch the first column of the matrix:
# + slideshow={"slide_type": "-"}
print(my_array)
# + slideshow={"slide_type": "-"}
my_array[:,0]
# + [markdown] slideshow={"slide_type": "subslide"}
# Define a column vector:
# + slideshow={"slide_type": "-"}
my_vec = numpy.array([[1], [2]])
print(my_vec)
# + [markdown] slideshow={"slide_type": "fragment"}
# Multiply `my_array` by the vector `my_vec` in the usual linear algebra sense (equivalent to MATLAB's `*`)
# + slideshow={"slide_type": "-"}
print(my_array)
# + slideshow={"slide_type": "-"}
print(numpy.dot(my_array, my_vec))
# + slideshow={"slide_type": "-"}
print(my_array.dot(my_vec))
# + [markdown] slideshow={"slide_type": "subslide"}
# Multiply `my_array` and `my_vec` by "broadcasting" the matching dimensions, equivalent to MATLAB's `.*` form:
# + slideshow={"slide_type": "-"}
print(my_array)
print()
print(my_vec)
# + slideshow={"slide_type": "-"}
my_array * my_vec
# + [markdown] slideshow={"slide_type": "slide"}
# ## Common Array Constructors
# Along with the most common constructor for `ndarray`s above (`array`) there are number of other ways to create arrays with particular values inserted in them. Here are a few that can be useful.
# + [markdown] slideshow={"slide_type": "subslide"}
# The `linspace` command (similar to MATLAB's `linspace` command) take three arguments, the first define a range of values and the third how many points to put in between them. This is great if you want to evaluate a function at evently space points between two numbers.
# + slideshow={"slide_type": "-"}
print(numpy.linspace(-1, 1, 10))
# numpy.linspace?
# + [markdown] slideshow={"slide_type": "subslide"}
# Another useful set of functions are `zeros` and `ones` which create an array of zeros and ones respectively (again equivalent to the functions in MATLAB). Note that you can explicitly define the data type.
# + slideshow={"slide_type": "-"}
numpy.zeros([3, 3])
# + slideshow={"slide_type": "-"}
numpy.ones([3, 3, 2], dtype=int)
# + [markdown] slideshow={"slide_type": "subslide"}
# Another common array is the identity matrix. The `identity` command can be used to define an identity matrix of a given dimension.
# + slideshow={"slide_type": "-"}
I = numpy.identity(3)
print(I)
# + [markdown] slideshow={"slide_type": "subslide"}
# Note that NumPy arrays can be reshaped and expanded after they are created but this can be computational expense and may be difficult to fully understand the consequences of (`reshape` in particular can be difficult). One way to avoid these issues is to create an empty array of the right size and storing the calculated values as you find them. The array constructor to do this is called `empty`:
# + slideshow={"slide_type": "-"}
numpy.empty([2,3])
# + [markdown] slideshow={"slide_type": "skip"}
# Note that here the IPython notebook is displaying zeros (or something close to this). The values are almost always not zero but the display of values is truncated to help with displaying long numbers. This can be controlled using `%precision 3` where 3 is upto the number of decimal points to display
# + slideshow={"slide_type": "skip"}
# %precision 3
numpy.empty([2,3]) + 2
# + [markdown] slideshow={"slide_type": "slide"}
# ## Array Manipulations
# Sometimes, despite our best efforts, we will need to manipulate the size or shape of our already created arrays.
# - Note that these functions can be complex to use and can be computationally expensive so use sparingly!
# - That being said, often these can still be a great way to avoid using too much memory and still may be faster than creating multiple arrays.
# - Check out the [NumPy Docs](http://docs.scipy.org/doc/numpy/reference/routines.array-manipulation.html) for more functions beyond these basic ones
# + [markdown] slideshow={"slide_type": "subslide"}
# One of the important aspects of an array is its `shape`.
# + slideshow={"slide_type": "-"}
A = numpy.array([[1, 2, 3], [4, 5, 6]])
print(A)
# + slideshow={"slide_type": "-"}
print("A Shape = ", A.shape)
# + [markdown] slideshow={"slide_type": "fragment"}
# We can reshape an array.
# + slideshow={"slide_type": "-"}
B = A.reshape((6,1))
print("A Shape = ", A.shape)
print("B Shape = ", B.shape)
print(B)
# #numpy.reshape?
# + [markdown] slideshow={"slide_type": "subslide"}
# Take the matrix `A` and make a larger matrix by tiling the old one the number of times specified.
# + slideshow={"slide_type": "-"}
A
# + slideshow={"slide_type": "-"}
B=numpy.tile(A, (2,3))
print(B.shape)
# + slideshow={"slide_type": "-"}
A.flatten()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Array Operations
#
# The numpy library also includes a number of basic operations on arrays. For example, a common operation is to determine the transpose of an array.
# + slideshow={"slide_type": "-"}
B = numpy.array([[1,2,3],[1,4,9],[1,8,27]])
print(B)
# + slideshow={"slide_type": "-"}
print(B.transpose())
B.mean()
# + [markdown] slideshow={"slide_type": "subslide"}
# One nice aspect of the numpy libary is that scalar multiplication is defined in the usual way.
# + slideshow={"slide_type": "-"}
v = numpy.array([[1],[2],[3]])
print(v)
# + slideshow={"slide_type": "-"}
print(2*v)
# + [markdown] slideshow={"slide_type": "subslide"}
# Another common operation is to multiply two arrays. Be careful to make sure that an operation is defined. In the example below the operation that is commented out is not defined. You should uncomment the line and execute the commands. It is important to learn how to read and interpret error messages.
# + slideshow={"slide_type": "-"}
A = numpy.array([[1],[-1],[1]])
B = numpy.array([[1,2,3],[1,4,9],[1,8,27]])
print(numpy.matmul(B,A))
print(numpy.matmul(A.transpose(),B))
print(numpy.matmul(A,B))
# + [markdown] slideshow={"slide_type": "fragment"}
# Note: Matrix-Matrix (and by extension Matrix-vector) multiplication can also be done using the array method `dot`
# + slideshow={"slide_type": "-"}
print(B.dot(A))
print(A.transpose().dot(B))
# + [markdown] slideshow={"slide_type": "subslide"}
# An element within an array can be changed using the same notation above that is used to get the value of an entry within an array.
# + slideshow={"slide_type": "-"}
B = numpy.array([[1,2,3],[1,4,9],[1,8,27]])
print(B)
# + slideshow={"slide_type": "-"}
B[0,0] = -5
print(B)
# + [markdown] slideshow={"slide_type": "fragment"}
# or even whole slices or sub-arrays can be changed
# + slideshow={"slide_type": "-"}
B[:,1] = numpy.array([1, 2, 3])
print(B)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mathematical Functions
# Similar to the built-in Python module `math`, NumPy also provides a number of common math functions such as `sqrt`, `sin`, `cos`, and `tan` along with a number of useful constants, the most important of which is $\pi$. The benefit of using NumPy's versions is that they can be used on entire arrays.
# + slideshow={"slide_type": "subslide"}
x = numpy.linspace(-2.0 * numpy.pi, 2.0 * numpy.pi, 62)
print(x)
# + slideshow={"slide_type": "-"}
y = numpy.sin(x)
print(y)
import math
print(math.sin(x))
# + [markdown] slideshow={"slide_type": "subslide"}
# This is often useful for plotting functions easily or setting up a problem (we will cover plotting later).
# + hide_input=true slideshow={"slide_type": "-"}
import matplotlib.pyplot as plt
# %matplotlib inline
fig = plt.figure(figsize=(8,6))
plt.plot(x,y,linewidth=2)
plt.grid()
plt.xlabel('$x$',fontsize=16)
plt.ylabel('$y$',fontsize=16)
plt.title('$\sin{x}$',fontsize=18)
plt.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# One thing to watch out for (and this is true of the `math` module) is that contrary to what you might expect:
# + slideshow={"slide_type": "-"}
x = numpy.linspace(-1, 1, 20)
print(x)
# + slideshow={"slide_type": "-"}
numpy.sqrt(x)
# + [markdown] slideshow={"slide_type": "subslide"}
# The problem is that if you take the `sqrt` of a negative number NumPy does not automatically use the `Complex` variable type to represent the output. Unlike lists, NumPy requires the data stored within to be uniform (of the same type or record structure). By default NumPy assumes we want `float`s which obey the IEEE compliant floating point rules for arithmetic (more on this later) and generates `nan`s instead (`nan` stands for "not-a-number", see more about this special value [here]()).
#
# If we want to deal with complex numbers there is still a way to tell NumPy that we want the `Complex` data type instead by doing the following:
# + slideshow={"slide_type": "-"}
x = numpy.linspace(-1, 1, 20, dtype=complex)
numpy.sqrt(x)
print(x)
# + [markdown] slideshow={"slide_type": "fragment"}
# There are number of other data types that NumPy understands, the most m important one being `int` for integers.
| 02_NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''amberdata'': venv)'
# language: python
# name: python37764bitamberdatavenv6eb9815684054413851f0606c6f4ebfd
# ---
# # Market Rankings Notebook
# Here is the main notebook for the Market Rankings blog post.
# ## Goal
# The goal is to use Amberdata's free API services to select assets for intraday trading. Use this notebook to explore and play around with the data, or the package in this repo to customize the framework for your needs.
# +
# load packages
import requests
import os
import json
from datetime import datetime
from dotenv import load_dotenv
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# utility functions
def get_key():
"Get the API key from an .env file"
if ".env" not in os.listdir("./"):
print("Configuring API Key...")
key = input("Amberdata API Key: ")
with open(".env", "w") as f:
f.write(f"AMBERDATA_API_KEY={key}\n")
load_dotenv(verbose=True)
return {
"AMBERDATA_API_KEY": os.getenv("AMBERDATA_API_KEY")
}
def get_response(url, headers=None, queryString=None):
"Get the REST response from the specified URL"
if not headers:
headers = {'x-api-key': api_key["AMBERDATA_API_KEY"]}
if queryString:
response = requests.request("GET", url, headers=headers, params=queryString)
else:
response = requests.request("GET", url, headers=headers)
response = json.loads(response.text)
try:
if response["title"] == "OK":
return response["payload"]
except Exception:
print(response)
return None
api_key = get_key()["AMBERDATA_API_KEY"]
# -
# Our first endpoint and the most powerful one for our purposes is [Address Rankings Latest](https://docs.amberdata.io/reference#market-rankings). This gives us key information about available Cryptoassets, and the ability to sort them based on some criteria, set by using `sortType`. Here are our options:
# * changeInPrice
# * currentPrice
# * liquidMarketCap
# * marketCap
# * tokenVelocity
# * tradeVolume
# * transactionVolume
# * uniqueAddresses
#
# Since our goal is intraday trading, we want to make sure we are using high Volume assets, so let us sort by `tradeVolume`.
# the market rankings endpoint
url = "https://web3api.io/api/v2/market/rankings"
# our api key
headers = {'x-api-key': api_key}
# the column to sort by, and number of results to return
querystring = {
"sortType": "tradeVolume",
"size": 250
}
# get the results with our utility function
payload = get_response(url, headers, querystring)["data"]
# save the results in a dataframe
# drop not used or redundant columns
df = pd.DataFrame(payload).drop([
"icon",
"maxSupply",
"totalSupply",
"tokenVelocity",
"uniqueAddresses",
"transactionVolume",
"specifications",
"address",
"decimals",
"circulatingSupply",
"changeInPrice",
"rank"
], axis=1)
# make sense of the blockchain column
df["blockchain"] = df.blockchain.map(lambda x: x["name"])
# show results
df.head()
# ### Data cleaning
# Looks like the data is highly variable in precision. Let's standardize it to `float64`.
# +
non_num = ["name", "symbol", "blockchain"] # non-numeric columns
# changing numeric columns to float64
df = pd.concat([df[non_num], df.drop(non_num, axis=1).apply(pd.to_numeric, axis=1)], axis=1)
# display results
df.head(10)
# -
# ### EDA
# Let's take a look at hourly change in price.
# plotting hourly change in price
df.changeInPriceHourly.plot.hist(bins=20)
plt.title("Histogram of hourly change in price")
plt.savefig("../plots/hour_price_hist.png")
# ## Define our Universe
# Now we have our crucial information to define our asset universe for trading. We care most about:
# 1. Liquidity, and
# 2. Volatility [1](https://www.investopedia.com/day-trading/pick-stocks-intraday-trading/).
#
# Here, we use volume as a proxy for measuring liquidity. We can easily determine volatile stocks with `changeInPrice`. The criteria I developed for picking assets for trading is as follows:
# * Assets with `tradeVolume`> $10^6$ to satisfy liquidity.
# * Assets to assets with $-5\% <$ `changeInPriceWeekly` $< \%5$ to get assets which have made large moves recently.
# * Assets with $-\sigma_H\% <$ `changeInPriceHourly` $< \sigma_H\%$, where $\sigma_H$ is the standard deviation of `changeInPriceHourly` for assets more active than the market.
# * Assets with price $< 15\$ $ to reduce barrier of entry.
# get the standard deviation in hourly price change in our data
s = df.changeInPriceHourly.std()
# selecting our asset universe
universe = df.query(f"abs(changeInPriceWeekly) > 5 & tradeVolume > 10**6 & abs(changeInPriceHourly) > {1*s} & currentPrice < 15")
# display our selection
universe
# ## Preparing to Trade
# Now that we have our day trading assets in order, let's see which are most available to trade. My methodology here is to find out which we can simply buy with USD, which has the lowest transaction fees and barrier to enter. You also may want to consider the VWAP/TWAP of the base you are purchasing.
#
# First, we get match available pairs to our base's in our asset universe. For more information on a base and quote currencies, check [2](https://www.investopedia.com/terms/b/basecurrency.asp).
# lets get the pairs available to trade
url = "https://web3api.io/api/v2/market/prices/pairs"
# recieve the payload
payload = get_response(url, headers)
# get the base and quote
pairs = [pair.split("_") for pair in payload if len(pair.split("_")) == 2]
# +
# matching the quotes to our selected universe
universe_pairs = {}
for symbol in universe.symbol:
universe_pairs[symbol.lower()] = [[c for c in p if c != symbol.lower()][0] for p in pairs if symbol.lower() in p]
universe_pairs
# -
# ### Note
# the code below is only necessary to prove to yourself that the best way to buy alt-coins - if not with USD - is with BTC.
# get a list of all possible
quotes_all = list(set([item for sublist in list(universe_pairs.values()) for item in sublist]))
# +
currency = "usd"
def get_quotes_data(quotes_all):
"returns the TWAP/VWAP for our quotes"
# storing TWAP/VWAP
quotes_data = {}
for quote in quotes_all:
# skip our native currency
if quote == currency:
continue
# getting the WAP data
url = f"https://web3api.io/api/v2/market/prices/{quote}/wap/latest"
wap = get_response(url, headers)
# try to find a pair for our currency
try:
quotes_data[quote] = wap[f"{quote}_{currency}"]
except KeyError:
print(f"Unable to purchase {quote} with {currency.upper()}")
continue
print(f"Able to purchase {quote} with {currency.upper()}")
return quotes_data
quotes_data = get_quotes_data(quotes_all)
# -
# Now, we rank the quotes by VWAP to see which quotes it makes most sense to use to buy
# the underlying we would like to trade
quotes_df = pd.DataFrame(quotes_data).T.sort_values(by="vwap1m", ascending=False).reset_index().rename({"index": "symbol"}, axis=1)
quotes_df
# Now, we continue with our analysis.
# ### Finally, let's identify our base's
# We are assuming that you do not have cryptocurrency to trade for these bases, and you would either have to buy them directly or buy them with Bitcoin.
def get_wap_data(base, quote):
"returns the TWAP/VWAP for our quotes"
# getting the WAP data
url = f"https://web3api.io/api/v2/market/prices/{base}/wap/latest"
wap = get_response(url, headers)
if not wap:
print(f"No WAP data for {base}")
return {}
# try to find a pair for our currency
try:
return wap[f"{base}_{quote}"]
except KeyError:
print(f"Error getting WAP data for {base}")
return {}
# +
# assume BTC is the best way to buy a coin if we cannot do
# so with our native currency
## arrays to store results
curr_avail, curr_wap = [], []
best_symb, best_wap = [], []
# which we have to buy with BTC, and which to buy with our currency
for base, quotes in universe_pairs.items():
if currency in quotes:
curr_avail.append(base)
curr_wap.append(get_wap_data(base, currency))
else:
if "btc" in quotes:
best_symb.append(base)
best_wap.append(get_wap_data(base, "btc"))
# display the results
cols = ["name", "symbol", "changeInPriceHourly", "currentPrice", "tradeVolume"]
if curr_wap:
curr_wap = pd.DataFrame(curr_wap)
if best_wap:
best_wap = pd.DataFrame(best_wap)
if curr_avail:
print("\nBase available to buy with fiat:")
fiat = df[df.symbol.map(lambda x: x.lower()).isin(curr_avail)][cols].reset_index(drop=True).join(curr_wap)
print(fiat)
else:
print("\nUnable to purchase any bases with fiat")
print(f"\nAble to purchase with btc:")
btc = df[df.symbol.map(lambda x: x.lower()).isin(best_symb)][cols].reset_index(drop=True).join(best_wap)
print(btc)
# -
# save the results
now = datetime.now().strftime("%y-%m-%d_%H-%M")
fiat.to_csv(f"../results/{now}_fiat.csv", index=False)
btc.to_csv(f"../results/{now}_btc.csv", index=False)
| market-rankings/notebooks/1.0-ea-market-rankings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test the subsequence widths code
#
# Saturday, Nov 24, 2018
# %load_ext autoreload
# %autoreload 2
import numpy as np
import subsequence_widths as ssw
# ## Test the counting sort function
#
# Use counting sort to sort the following array by the unicode value of the first character in the string. Strings that start with the same character should remain in the same order that they appear in the original list. (I.e. the counting sort function should be implemented as a stable sort.)
items = ['a', 'bat', 'c', 'x', 'aldk', 'yak', 'smurf', 'bark', 'gorf', 'b']
#ord() is a built-in function that returns an integer representing the Unicode code point of a character.
#its inverse is chr()
ord('a')
max(ord(item[0]) for item in items) #max works on generators. Woo hoo!
[ord(item[0]) for item in items]
np.log2(20000)
key = lambda s: ord(s[0])
key('a')
[key(item) for item in items]
print(items)
ssw.counting_sort(items, key = lambda s: ord(s[0]))
sorted_items = ssw.counting_sort(items, key = lambda s: ord(s[0]))
print([key(item) for item in sorted_items])
sorted_items = ssw.counting_sort(items, key=key, max_key=121, min_key=97)
print(sorted_items)
print([key(item) for item in sorted_items])
# ### Try sorting a list of numbers using counting sort
#
# Specifying `min_key=None` tells the function to find the minimum key, requiring one extra pass of the input but reducing the size of the counts array.
#
# Leaving `min_key` unspecified tells the function to use 0 as the minimum key, eliminating the need to scan the array but possibly wasting space (and time) by creating a larger counts array than necessary.
numbers = [3948, 923774, 2938, 293875, 28377, 9247, 29277, 202998, 3763, 34846, 2738, 3371]
sorted_numbers = ssw.counting_sort(numbers, min_key=None)
print(sorted_numbers)
sorted_numbers = ssw.counting_sort(numbers)
print(sorted_numbers)
a = b = 4
a
b
print(numbers)
sorted_numbers = ssw.counting_sort_integers(numbers)
print(sorted_numbers)
# ## Try timing things on the last successful input
#
# I still exceeded the time limit, even using counting sort.
# !ls
# +
with open('last_input.txt') as f:
integer_list = eval(f.read())
type(integer_list)
# -
integer_list[:10]
len(integer_list)
max(integer_list)
min(integer_list)
# %timeit sorted(integer_list)
# %timeit ssw.counting_sort_integers(integer_list)
# %timeit ssw.counting_sort_integers(integer_list, max_val=20000,min_val=0)
# %timeit ssw.counting_sort(integer_list, max_key=20000,min_key=0)
# %timeit ssw.counting_sort(integer_list)
solution = ssw.Solution()
# %timeit solution.sumSubseqWidths(integer_list) #using counting sort
# %timeit solution.sumSubseqWidths(integer_list) #using built-in function sorted()
# ### Ok, so my implementation of counting sort is slower than the built-in `sorted()`, and the time to sort is trivial compared to the overall runtime...
# %timeit solution.sumSubseqWidths(integer_list) #using built-in function sorted() and explicit bit-shifting
# ### Ugh, that's ridiculous. When I timed the bit-shifting by itself, it was actually *slower* than exponentiation, so I assumed Python automatically optimized the code. Let's try submitting again...
6*(2<<(27-1) % 30007)
6*((2<<(27-1)) % 30007)
2<<3 % 5
2<<(3 % 5)
(2<<3) % 5
2<<3
1<<3+1
1<<(4-1)+1
(1<<4)+1
# %timeit solution.sumSubseqWidths(integer_list) #using built-in function sorted() and explicit bit-shifting, corrected
# ### Woo hoo, that did it
# %timeit solution.sumSubseqWidths(integer_list) #using built-in function sorted(), explicit bit-shifting, and fewer mod's
# %timeit solution.sumSubseqWidths(integer_list) #using built-in function sorted() and explicit bit-shifting, corrected,
| LeetCode/0891_sum_subsequence_widths/test_subsequence_widths.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img alt="QuantRocket logo" src="https://www.quantrocket.com/assets/img/notebook-header-logo.png">
#
# <a href="https://www.quantrocket.com/disclaimer/">Disclaimer</a>
# # Data Collection - US Stocks
#
# Our machine learning strategy will run on the universe of all US stocks.
#
# Start by collecting US stock data from Sharadar. Fundamental and price data are collected separately but can be run simultaneously.
# ## Collect Sharadar fundamentals
#
# To collect the fundamentals:
from quantrocket.fundamental import collect_sharadar_fundamentals
collect_sharadar_fundamentals(country="US")
# This runs in the background, monitor flightlog for a completion message:
#
# ```
# quantrocket.fundamental: INFO Collecting Sharadar US fundamentals
# quantrocket.fundamental: INFO Collecting updated Sharadar US securities listings
# quantrocket.fundamental: INFO Finished collecting Sharadar US fundamentals
# ```
# ## Collect Sharadar prices
#
# First, create a database for Sharadar stock prices:
from quantrocket.history import create_sharadar_db
create_sharadar_db("sharadar-us-stk-1d", sec_type="STK", country="US")
# Then collect the data:
from quantrocket.history import collect_history
collect_history("sharadar-us-stk-1d")
# This runs in the background, monitor flightlog for a completion message:
#
# ```
# quantrocket.history: INFO [sharadar-us-stk-1d] Collecting Sharadar US STK prices
# quantrocket.history: INFO [sharadar-us-stk-1d] Collecting updated Sharadar US securities listings
# quantrocket.history: INFO [sharadar-us-stk-1d] Finished collecting Sharadar US STK prices
# ```
# ***
#
# ## *Next Up*
#
# Part 2: [Data Collection - Indexes](Part2-Data-Collection-Indexes.ipynb)
| kitchensink_ml/Part1-Data-Collection-US-Stocks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="HE1poVfmnURm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="fa71ca81-8389-4a6a-8364-5f16ea0ab6f3"
import os
import nltk
import wave
from glob import glob
swah = " : غ ظ ض ذ خ ث ت ش ر ق ص ف ع س ن م ل ك ي ط ح ز و ة ه د ج ب ا ى إ ء ئ آ ؤ , "" ـ ھ أ "
# swah = "A B C D E F G H I J K L M N O P R S T U V W Y Z "
#swah = 'a b d e f g h i j k l m n o p r s t v y z 0 1 2 3 4 5 6 7 8 9 à c q u w x ô ì ỳ òا'
character_to_index = {j:i for i,j in enumerate(swah.split())}
def remove_punct(text):
tokenizer = nltk.RegexpTokenizer(r"\w+")
new_words = tokenizer.tokenize(text)
t = ''
for i in new_words:
t+= i+' '
return t
# path = '/home/safa/Documents/SR/Project/safa'
path = "/home/safa/Documents/SR/Project/safa"
# sessions = glob("data/*/*/file*.txt")
# linkers = glob("data/*/*/*linker.txt")
linkers = glob('/home/safa/Documents/SR/Project/safa/*_elicit/*_linker.txt')
files = []
for n in range(1,20,1):
info1 = glob(f'/home/safa/Documents/SR/Project/safa/*_elicit/*sr{n}.txt')
files.append(info1)
sessions = []
for file in files:
sessions.append(file[0])
print(len(sessions))
print(len(linkers))
# print('Session files:',len(sessions), 'Linker files:',len(linkers))
print('Linker files:',len(linkers))
all_session_text = open(path+'/all_sessions.txt', 'w') #os.path.abspath(os.getcwd()) +
for linker, sess in zip(linkers, sessions):
sess_read = open(sess, 'r')
link_read = open(linker, 'r')
for link, sentence in zip(link_read.readlines(), sess_read.readlines()):
wav_name = os.path.basename(link).replace('\n','')
wav_path = glob("/home/safa/Documents/SR/Project" + "/safa/*/{0}".format(wav_name))
# print(os.path.abspath(os.getcwd()))
# print(os.path.abspath(os.getcwd()))
if len(wav_path)==0:
continue
wav_path = wav_path[0]
try:
w = wave.open(wav_path, 'r')
d = w.readframes(w.getnframes())
except:
print('corrupted audio: {0} -- skipped: '.format(wav_name))
# uncomment if you want to delete the corrupted file
# os.remove(wav_path)
continue
indices = ''
sentence = sentence.replace('##', '')
sentence = remove_punct(sentence)
for c in sentence:
if not c.isspace():
indices+=str(character_to_index[c.lower()]) + ' '
all_session_text.writelines(wav_name[:-4] + ' ' + indices + '\n')
# + id="rgtwD4LCnURr" colab_type="code" colab={}
| Reprocessing_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''fastai2'': conda)'
# language: python
# name: python37664bitfastai2condaf3e9781124be45a78083b472977e8c5c
# ---
# # PyTorch Exercises
#
# > It's collection of exercises to hone your numerical computing skills. The goal of this collection is to offer quick reference for several torch operations.
#
# - toc:true
# - badges:true
# - branch: master
# - comments:true
# - author: <NAME>
# - categories: [pytorch]
# - image: images/pytorch-logo.png
# Inspired by [100-numpy-exercises](https://github.com/rougier/numpy-100/)
# #### 1. Import `PyTorch` and print version (★☆☆)
# +
#collapse
import torch
torch.__version__
# -
# #### 2. Create a null vector of size 10 (★☆☆)
#collapse
torch.zeros(10)
# #### 3. Create a vector with values ranging from 10 to 49 (★☆☆)
#collapse
torch.arange(10,50)
# #### 4. Reverse a vector & tensor (★☆☆)
# +
#collapse
#hide_output
print("Vector")
x = torch.arange(10)
print("Original: ",x)
print("Reversed: ", x.flip(0))
print("Tensor")
x = x.view(5,2)
print("Original: \n",x)
print("Reversed(rows): \n", x.flip(0))
print("Reversed(cols): \n", x.flip(1))
# -
# #### 5. Create a 3x3 matrix with values ranging from 0 to 8 (★☆☆)
#collapse
torch.arange(9).view(3,3)
# #### 6. Create a 3x3 Identity Matrix (★☆☆)
#collapse
torch.eye(3)
# #### 7. Create a 3x3x3 matrix with random values (★☆☆)
# (output hidden to avoid verbosity)
# +
#collapse
#hide_output
# Random Uniform
print(torch.rand(3,3))
# Random Normal
print(torch.randn(3,3))
# Random int (low to high)
print(torch.randint(1,10,(3,3)))
# Random Permutations of given range
print(torch.randperm(9).view(3,3))
# -
# #### 8. Create a random vector of size 20 and find following stats: (★☆☆)
# - min,max, sum
# - mean, variance, standard deviation
#collapse
x = torch.randint(20,(20,)).float()
print(x)
x.min() , x.max(), x.sum(), x.mean(), x.std(), x.var()
# #### 9. Create a 2d array with 1 on the border and 0 inside (★☆☆)
#collapse
x = torch.ones(5,5)
x[1:-1,1:-1] = 0
x
# #### 10. How to add a border (filled with 0's) around an existing array? (★☆☆)
#collapse
import torch.nn.functional as F
x = torch.randn(3,3)
F.pad(x,(1,1,1,1),'constant',0)
| _notebooks/2020-04-22-PyTorch-Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit
# language: python
# name: python3
# ---
# <h1 align="center">
# Quantum Computer Music
# </h1>
# <h4 align="right">
# <NAME> <br>
# Habib University <br>
# CS/PHY 314/300 <br>
# Quantum Computing <br>
# Fall 2021 <br>
# </h4>
# <h2 align="center">
# Introduction
# </h2>
#
# Since the earliest use of computers, algorithms have been created and used in order to create musical compositions. The kind of algorithm this paper will be focusing on is sequencing rules. A computer is programmed with a certain set of rules that generate a sequence of notes, and they can be in the form of Graphs, Finite State Automata, Markov Chains, etc.
#
# ## Random Walk
#
# A random walk can be defined as a path that consists of a succession of random steps over some space.
#
# <figure align = "center">
# <img src="images/Picture1.png" alt="Random Walk">
# <figcaption>Figure #1</figcaption>
# <figcaption align="center">Source: Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)</figcaption>
# </figure>
#
# Figure 1 depicts a graph of a connected set of notes. A random walk over this graph would results in a sequence of notes that can be considered a musical composition. This is a simple example of algorithmic music generation.
#
# ## Markov Chains
#
# A Markov Chain is a stochastic model that describes a sequence of possible events where the probability of each event depends only on the previous event. The terms 'Markov Chain' and 'Random Walk' can be used interchangeably.
#
# <figure align = "center">
# <img src="images/Picture2.png" alt="Markov Chain">
# <figcaption>Figure #2</figcaption>
# <figcaption align="center">Source: Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)</figcaption>
# </figure>
#
# Figure 2 depicts the Markov Chain representation of the random walk shown in Figure 1.
#
# ## Quantum Random Walk
#
# Quantum Randomness is equivalent to real randomness. Thus, a Quantum Random Walk is truly random. To implement a Quantum Random Walk we can use the idea of a Quantum Die. This die will depend on our number of qubits and the order of For a simple one dimensional random walk such as depicted by Figure 1, we can represent our Quantum Die as such:
#
# <figure align = "center">
# <img src="images/Picture4.png" alt="One-Dimensional Quantum Die">
# <figcaption>Figure #3</figcaption>
# <figcaption align="center">Source: Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)</figcaption>
# </figure>
# <h2 align="center">
# Implementation
# </h2>
from qiskit import QuantumCircuit,QuantumRegister,ClassicalRegister,execute,Aer
shot_count = 64
# When implementing a quantum walk, we want to 'save' the value we receive at the end of each walk. We need each successive circuit to be influenced by this previous value, and this is the necessary circuit that does so. Since a Quantum Circuit starts in the state of $|0⟩$, we add an X gate to every qubit that was previously had a measured value of $|1⟩$.
def updateCircuit(xbar, bits, cbits=0):
if cbits == 0:
qc1 = QuantumCircuit(bits)
else:
qc1 = QuantumCircuit(bits, cbits)
qc1.barrier()
for i in range(len(xbar)):
if xbar[i] == str(1):
qc1.x(i)
qc1.barrier()
return qc1
# <h2 align="center">
# Quantum Walk over a Tetrahedron
# </h2>
# ## Creating Sequencing Rules for the Random Walk Corresponding to Vertices on a Tetrahedron
#
# A tetrahedron is defined as a triangular pyramid composed of 4 faces and 4 vertex corners. A quantum Walk over these 4 vertices can be represented by 2 qubits. The corresponding notes to these 4 vertices is derived from the Tetratonic C Scale. An Image describing a walk over a tetrahedron is such:
#
# <figure align = "center">
# <img src="images/Picture7.png" alt="Random Walk">
# <figcaption>Figure #4</figcaption>
# </figure>
# +
vertices = {}
vertices['00'] = 'C'
vertices['01'] = 'Eb'
vertices['10'] = 'G'
vertices['11'] = 'A'
rythym = {}
rythym['00'] = 1.0
rythym['01'] = 0.8
rythym['10'] = 0.6
rythym['11'] = 0.4
# -
# ## Quantum Walk Circuit over a Tetrahedron
#
# The Quantum Walk corresponding to Figure 4 can be implemented as a Quantum circuit with 2 qubits corresponding to each of the 4 vertices and 2 for dice qubits. The dice qubits have 4 possible options:
#
# 1. The first qubit is flipped.
# 2. The second qubit is flipped.
# 3. Both qubits are flipped.
# 4. No qubit is flipped.
#
# To implement this, we first put our dice qubits in a superposition, this enables the 'random' in the 'random walk'. We use controlled-X gates to conditionally flip each, both, or none of the qubits.
def tetrahedron_measure_dice(input):
q_dice = QuantumCircuit(4,2)
q_inp = updateCircuit(input, 4)
q_dice += q_inp
q_dice.h(2)
q_dice.h(3)
q_dice.cx(2, 0)
q_dice.cx(3, 1)
q_dice.barrier()
return q_dice
# The circuit will look like this with an input state of $|01⟩$:
#
# <figure align = "center">
# <img src="images/Picture5.png" alt="Random Walk">
# <figcaption>Figure #5</figcaption>
# <figcaption align="center">Source: Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)</figcaption>
# </figure>
# ## Implementing the Simple Quantum Walk over a Tetrahedron
#
# Starting off with the vertex '00', we do a Quantum Random Walk 10 times, passing the current vertex as an argument into our circuit implementation function. At the end of these iterations we convert the vertices to their respective notes and rythyms, as represnted in the dictionaries given above.
# +
def tetrahedron_quantum_walk():
start = "00"
vertex_lst = []
note_lst = []
for i in range(10):
temp = tetrahedron_measure_dice(start)
temp.measure([0, 1],[0, 1])
job = execute(temp,Aer.get_backend('qasm_simulator'),shots=shot_count)
counts = list(job.result().get_counts(temp).items())
counts.sort(key=lambda x: x[1])
start = counts[-1][0]
vertex_lst.append(start)
# display(temp.draw())
for i in vertex_lst:
note_lst.append(vertices[i])
print(note_lst)
start = "00"
vertex_lst = []
rythym_lst = []
for i in range(5):
temp = tetrahedron_measure_dice(start)
temp.measure([0, 1],[0, 1])
job = execute(temp,Aer.get_backend('qasm_simulator'),shots=shot_count)
counts = list(job.result().get_counts(temp).items())
counts.sort(key=lambda x: x[1])
start = counts[-1][0]
vertex_lst.append(start)
for i in vertex_lst:
rythym_lst.append(rythym[i])
print(rythym_lst)
return note_lst, rythym_lst
notes, rythyms = tetrahedron_quantum_walk()
# -
# ## Sonic Pi code created Using Quantum Walk over a Tetrahedron
#
# We save the code usable with the application, SonicPi, into text files. The sleep function is added in place for the rhythym, and some sustain and bass is also added.
# +
sonic_note_lst = []
for i in zip(notes, rythyms):
sonic_note_lst.append("""play :{},sustain:({}),sustain_level:({})\nsample :bass_hit_c\nsleep({})""".format(i[0], i[1], i[1], i[1]))
sonic_note_lst = '\n'.join(sonic_note_lst)
a = open("SonicPiCode/tetrahedron_notefile.txt", "w")
a.write(sonic_note_lst)
a.close()
# -
# <h2 align="center">
# Quantum Walk over a Cube
# </h2>
# ## Creating Sequencing Rules for the Random Walk Corresponding to Vertices on a Cube
#
# A cube is composed of 6 faces and 8 vertex corners. A quantum Walk over these 8 vertices can be represented by 3 qubits. The corresponding notes, chords and rhythyms to these 6 vertices is derived from the Persian C Octatonic Scale. An Image describing a walk over a cube is such:
#
# <figure align = "center">
# <img src="images/Picture3.png" alt="Random Walk">
# <figcaption>Figure #6</figcaption>
# <figcaption align="center">Source: Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)</figcaption>
# </figure>
#
# +
vertices = {}
vertices['000'] = 'C'
vertices['001'] = 'Db'
vertices['010'] = 'F'
vertices['011'] = 'Gb'
vertices['100'] = 'E'
vertices['101'] = 'B'
vertices['110'] = 'Ab'
vertices['111'] = 'C'
chords = {}
chords['000'] = 'C4 major7'
chords['001'] = 'D4 minor7'
chords['010'] = 'E4 minor7'
chords['011'] = 'F4 major7'
chords['100'] = 'G4 major7'
chords['101'] = 'A4 minor7'
chords['110'] = 'B4 diminished7'
chords['111'] = 'C4 major7'
rythym = {}
rythym['000'] = 1.0
rythym['001'] = 0.9
rythym['010'] = 0.8
rythym['011'] = 0.7
rythym['100'] = 0.6
rythym['101'] = 0.5
rythym['110'] = 0.4
rythym['111'] = 0.3
# -
# ## Quantum Walk Circuit over a Cube
#
# A 3-qubit Quantum Walk can be implemented as a Quantum circuit with 3 qubits for measurement and 2 for dice qubits. The dice qubits have 4 possible options:
#
# 1. The first qubit is flipped.
# 2. The second qubit is flipped.
# 3. The third qubit is flipped.
# 4. No qubit is flipped.
#
# To implement this<sup>[1](#f1)</sup>, we first put our dice qubits in a superposition, this enables the 'random' in the 'random walk'. We use controlled-X gates to conditionally flip each of the qubits, However, we must also ensure that 2 or more bits are not flipped at the same time. This order or gates ensured these conditions and allows us to walk over the defined vertices of our cube.
#
# <b id="f1">1</b> The circuit implementation has been taken from "Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)".
shot_count = 64
def cube_measure_dice(input):
q_dice = QuantumCircuit(5,3)
q_inp = updateCircuit(input, 5)
q_dice += q_inp
q_dice.h(3)
q_dice.h(4)
q_dice.cx(4, 0)
q_dice.x(4)
q_dice.cx(4, 1)
q_dice.cx(3, 2)
q_dice.mcx([4, 3], 1)
q_dice.x(4)
q_dice.mcx([4, 3], 0)
q_dice.x(4)
q_dice.mcx([4, 3], 2)
q_dice.barrier()
return q_dice
# The circuit will look like this with an input state of $|000⟩$:
#
# <figure align = "center">
# <img src="images/Picture8.png" alt="Random Walk">
# <figcaption>Figure #7</figcaption>
# <figcaption align="center">Source: Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)</figcaption>
# </figure>
# ## Implementing the Simple Quantum Walk over a Cube
#
# Starting off with the vertex '000', we do a Quantum Random Walk 10 times, passing the current vertex as an argument into our circuit implementation function. At the end of these iterations we convert the vertices to their respective notes, chords, and rythyms, as represnted in the dictionaries given above.
# +
def cube_quantum_walk():
start = "000"
vertex_lst = []
note_lst = []
for i in range(10):
temp = cube_measure_dice(start)
temp.measure([0, 1, 2],[0, 1, 2])
job = execute(temp,Aer.get_backend('qasm_simulator'),shots=shot_count)
counts = list(job.result().get_counts(temp).items())
counts.sort(key=lambda x: x[1])
start = counts[-1][0]
vertex_lst.append(start)
for i in vertex_lst:
note_lst.append(vertices[i])
print(note_lst)
start = "000"
vertex_lst = []
chord_lst = []
for i in range(10):
temp = cube_measure_dice(start)
temp.measure([0, 1, 2],[0, 1, 2])
job = execute(temp,Aer.get_backend('qasm_simulator'),shots=shot_count)
counts = list(job.result().get_counts(temp).items())
counts.sort(key=lambda x: x[1])
start = counts[-1][0]
vertex_lst.append(start)
for i in vertex_lst:
chord_lst.append(chords[i])
print(chord_lst)
start = "000"
vertex_lst = []
rythym_lst = []
for i in range(10):
temp = cube_measure_dice(start)
temp.measure([0, 1, 2],[0, 1, 2])
job = execute(temp,Aer.get_backend('qasm_simulator'),shots=shot_count)
counts = list(job.result().get_counts(temp).items())
counts.sort(key=lambda x: x[1])
start = counts[-1][0]
vertex_lst.append(start)
for i in vertex_lst:
rythym_lst.append(rythym[i])
print(rythym_lst)
return note_lst, chord_lst, rythym_lst
notes, chords, rythyms = cube_quantum_walk()
# -
# ## Sonic Pi code created Using Quantum Walk over a Cube
#
# We save the code usable with the application, SonicPi, into text files. The sleep function is added in place for the rhythym, and some sustain and bass is also added. The dictionary that maintains the chord list for C major is also used, using the same rhythym values as for the note file.
# +
sonic_note_lst = []
for i in zip(notes, rythyms):
sonic_note_lst.append("""play :{},sustain:({}),sustain_level:({})\nsample :bass_hit_c\nsleep({})""".format(i[0], i[1], i[1], i[1]))
sonic_note_lst = '\n'.join(sonic_note_lst)
f = open("SonicPiCode/cube_notefile.txt", "w")
f.write(sonic_note_lst)
sonic_chord_lst = []
for i in zip(chords, rythyms):
chord_major = i[0].split()
sonic_chord_lst.append("""play chord(:{}, :{});sample :bass_hit_c\nsleep({})""".format(chord_major[0], chord_major[1], i[1]))
sonic_chord_lst = '\n'.join(sonic_chord_lst)
x = open("SonicPiCode/cube_chordfile.txt", "w")
x.write(sonic_chord_lst)
x.close()
# -
# <h2 align="center">
# Basak-Miranda Algorithm
# </h2>
#
# The previous examples for the Quantum Random Walk only work in certain spaces given certain conditions. Cubes and Tetrahedrons have a predefined structure that makes the dice representation simple. However, if we try to extend this to other shapes such as a dodecahedron, the dice complexity will increase significantly. To generalize a Quantum Random Walk over any set of sequencing rules, the Basak-Miranda Algorithm makes use of Grover's Search Algorithm to not only construct an equivalent quantum circuit, but also provide a quadratic time speedup.
#
# ## Grover's Search Algorithm
#
# Grover's Search Algorithm is used to provide a quadratic speedup for the following problem statement: Given f: {0,...,N-1} -> {0,1} such that f(x)=1 for exactly one x, find x.
#
# In other words, we use this algorithm to return a target state given a large unstructured list. The first step is called Phase Inversion, followed by Inversion about the Mean. The process can be shown in the following figure:
#
# <figure align = "center">
# <img src="images/Picture10.png" alt="Grovers Algorithm">
# <figcaption>Figure #8</figcaption>
# <figcaption align="center">Source: Intrigano (https://www.youtube.com/watch?v=9WAxOlYBE3g&ab_channel=intrigano)</figcaption>
# </figure>
#
# The first diagram depicts a superposition of all states. Next, the target state has it's amplitude flipped. Finally, all values are inverted about the mean value. Thus, the target value has the highest amplitude, and after enough repetitions of this process, it is very likely that it will be the measured outcome.
#
# Grover's Search Algorithm can be applied in the case of algorithmic music generation via sequencing rules. Firstly, we create a Markov Chain based on our chosen sequencing rules. After assigning each state a vertex and maintaining relevant dictionaries, we can choose the target states that have been predefined by our Markov Chain based on the current vertex we are present on. The output of Grover's Search Algorithm will be only the states that have been defined in our sequencing rules. Repeating this process with a new current vertex will give us a musical composition for any kind of sequencing rules. Not just that, but it provides a quadratic time speedup, as well as naturally assigning equal amplitudes, and thus probabilities, to our target states.
# ## Creating Complex Sequencing Rules
#
# These sequencing rules are defined in the paper, "Quantum Computer Music: Foundations and Initial Experiments" by Miranda and Basak. The rules themselves are maintained in a dictionary, as well as the note to vertex and vertex to note correspondence. The vertex to note dictionary is also maintained in decimal form for convenience.
#
# +
notes = ["E", "F", "G", "C#", "F#", "D#", "G#", "D", "B", "C", "A", "A#"]
sequencing_rules = {i:[] for i in notes}
sequencing_rules["E"] = ["F", "D#"]
sequencing_rules["D#"] = ["F", "C#", "F#", "G#"]
sequencing_rules["C#"] = ["G", "F#", "D#"]
sequencing_rules["G"] = ["F", "C#", "D"]
sequencing_rules["D"] = ["F", "G", "G#", "B"]
sequencing_rules["F"] = ["E", "G", "D", "C"]
sequencing_rules["C"] = ["F", "F#", "B", "A"]
sequencing_rules["F#"] = ["C#", "D#", "C", "A"]
sequencing_rules["A"] = ["F#", "G#", "C", "A#"]
sequencing_rules["G#"] = ["D#", "D", "B", "A"]
sequencing_rules["B"] = ["G#", "D", "C", "A#"]
sequencing_rules["A#"] = ["B", "A"]
note_to_vertex = {}
note_to_vertex["E"] = "0000"
note_to_vertex["F"] = "0001"
note_to_vertex["G"] = "0010"
note_to_vertex["C#"] = "0011"
note_to_vertex["F#"] = "0100"
note_to_vertex["D#"] = "0101"
note_to_vertex["G#"] = "0110"
note_to_vertex["D"] = "0111"
note_to_vertex["B"] = "1000"
note_to_vertex["C"] = "1001"
note_to_vertex["A"] = "1010"
note_to_vertex["A#"] = "1011"
vertex_to_note = {}
vertex_to_note["0000"] = "E"
vertex_to_note["0001"] = "F"
vertex_to_note["0010"] = "G"
vertex_to_note["0011"] = "C#"
vertex_to_note["0100"] = "F#"
vertex_to_note["0101"] = "D#"
vertex_to_note["0110"] = "G#"
vertex_to_note["0111"] = "D"
vertex_to_note["1000"] = "B"
vertex_to_note["1001"] = "C"
vertex_to_note["1010"] = "A"
vertex_to_note["1011"] = "A#"
vertex_to_note["1100"] = "E"
vertex_to_note["1101"] = "F"
vertex_to_note["1110"] = "G"
vertex_to_note["1111"] = "C#"
vertices = {}
vertices[0] = "E"
vertices[1] = "F"
vertices[2] = "G"
vertices[3] = "C#"
vertices[4] = "F#"
vertices[5] = "D#"
vertices[6] = "G#"
vertices[7] = "D"
vertices[8] = "B"
vertices[9] = "C"
vertices[10] = "A"
vertices[11] = "A#"
# -
# ## Creating a Markov Chain
def markov():
markov = [[0 for i in range(len(sequencing_rules.keys()))] for i in range(len(sequencing_rules.keys()))]
for i, j in vertices.items():
for x in range(len(markov[i])):
if vertices[x] in sequencing_rules[vertices[i]]:
markov[i][x] = round(1/len(sequencing_rules[vertices[i]]), 2)
return markov
# ## Building the Circuit for the Basak-Miranda Algorithm
#
# The first step of Grover's Search Algorithm, Phase Inversion, can be implemented with the use of X Gates and MCX Gates. After initializing the circuit in a superposition of states, wherever there is a 1, an X gate is applied. We have an extra qubit than measured qubits, which is initialized in the $|->$ state, and the target qubit of the MCX gate is on this extra qubit. X gates are reapplied in the same way, as well as Hadamard gates. What this circuit will do is flip the target states. To do it for multiple target states, we repeat it as many times as target states.
#
# Next, we create the circuit to implement Inversion about the Mean. The source of this circuit was taken from Qiskit<sup>[1](#f1)</sup>, and the idea is to implement a Diffuser that can be created by Hadamard and X gates, followed by a Hadamard, MCT, and Hadamard gate on the second last qubit, finally reapplying the X and Hadamard gates.
#
# <b id="f1">1</b> Source: https://qiskit.org/textbook/ch-algorithms/grover.html.
def basak_miranda_circuit(input):
qc = QuantumCircuit(5,4)
qc.x(4)
for qubit in range(5):
qc.h(qubit)
qc.barrier()
for i in input:
flip = []
for j in range(len(i)):
if i[j] == '1':
flip.append(j)
for j in flip:
qc.x(j)
if len(flip) >= 1:
qc.mcx(flip, 4)
for j in flip:
qc.x(j)
qc.barrier()
for qubit in range(4):
qc.h(qubit)
qc.barrier()
for qubit in range(4):
qc.h(qubit)
for qubit in range(4):
qc.x(qubit)
qc.barrier()
qc.h(3)
qc.mct([0,1,2], 3)
qc.h(3)
qc.barrier()
for qubit in range(4):
qc.x(qubit)
for qubit in range(4):
qc.h(qubit)
qc.barrier()
return qc
# ## Implementing the Basak-Miranda Algorithm
#
# Starting off with the vertex '0000', we do a Quantum Random Walk 10 times, passing the target states as an argument into our circuit implementation function. We find these target states by referring to positive values in our markov chain for each current vertex. At the end of these iterations we convert the vertices to their respective notes and rythyms, as represnted in the dictionaries given above.
# +
def quantum_walk():
start = "0000"
vertex_lst = []
note_lst = []
markov_chain = markov()
for i in range(10):
target_states = []
for j in range(len(markov_chain[notes.index(vertex_to_note[start])])):
if markov_chain[notes.index(vertex_to_note[start])][j] > 0:
target_states.append(note_to_vertex[notes[j]])
temp = basak_miranda_circuit(target_states)
temp.measure(range(4),range(4))
job = execute(temp,Aer.get_backend('qasm_simulator'),shots=50000)
counts = list(job.result().get_counts(temp).items())
counts.sort(key=lambda x: x[1])
i = -1
while counts[i][0] in ['1111', '1110', '1101', '1100']:
i+=1
start = counts[i][0]
vertex_lst.append(start)
for i in vertex_lst:
if i in vertex_to_note:
note_lst.append(vertex_to_note[i])
print(vertex_lst)
return note_lst
notes = quantum_walk()
# -
# ## Sonic Pi code created Using Basak-Miranda Algorithm
#
# We save the code usable with the application, SonicPi, into text files. The sleep function is added in place for the rhythym, and some sustain and bass is also added.
# +
sonic_note_lst = []
for i in zip(notes, rythyms):
sonic_note_lst.append("""play :{},sustain:({}),sustain_level:({})\nsample :bass_hit_c\nsleep({})""".format(i[0], i[1], i[1], i[1]))
sonic_note_lst = '\n'.join(sonic_note_lst)
f = open("SonicPiCode/basak_miranda_notefile.txt", "w")
f.write(sonic_note_lst)
f.close()
# -
# <h2 align="center">
# Conclusion
# </h2>
#
# To conclude this paper, I will firs summarize the main results and takeaways from this project. The main goal throughout all this was to generate a musical composition. This comprises of, but is not limited to, notes and rhythyms. As an extra result, musical composition containing purely chords was also created for a Quantum Random Walk over a Cube. Moving on to theh more complex sequencing rules, Grover's Algorithm was implemented to generate a musical composition. However, there were issues in implementation, and the resulting circuit produces incorrect transitions between states, based on predefined sequencing rules.
# One of the main results from this project, however, is that a quantum circuit was able to be generated for a Tetrahedron. The cited and followed research paper creates a quantum dice for a cube, and it is possible to create quantum dice for other figures. Further research could be conducted on generalising the idea of a quantum die over platonic solids (tetrahedron, cube, etc.).
# Using SonicPi for music creation allows for much more creativity, especicially over the traditional method of 'beep-ing' the frequency corresponding to a note. SonicPi allows for chords to be played without mentioning the corresponding notes. It allows simultaneous playback, and interesting elements such as sustain and bass.
#
# Overall, Quantum Computer Music is a new and exciting way of using a well-known algorithm to allow convenience and quadratic time speedup in something one would not expect a Quantum Computer to provide specific use. Miranda and Basak have created an algorithm and exposed the world to a new and creative use of Quantum Circuits. The use of Quantum Dice and Quantum Random Walks could further have use in many different, and possibly unrelated, fields.
# <h2 align="center">
# References
# </h2>
#
# 1. Quantum Computer Music: Foundations and Initial Experiments (Miranda, Basak)
# 2. Intrigano (https://www.youtube.com/watch?v=9WAxOlYBE3g&ab_channel=intrigano)
# 3. Grover's Search Algorithm- Qiskit (https://qiskit.org/textbook/ch-algorithms/grover.html)
#
| QCM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from libraries.import_export_data_objects import import_export_data as Import_Export_Data
from libraries.altair_renderings import AltairRenderings
from libraries.utility import Utility
import pandasql as psql
import numpy as np
import pandas as pd
import os
import altair as alt
my_altair = AltairRenderings()
my_data = Import_Export_Data()
my_altair.get_eu_domestic_trading_chart()
eu_countries=['Austria','Belgium','Croatia','Czech Republic','Denmark','Finland','France','Germany','Greece','Hungary',
'Italy','Netherlands','Poland','Portugal','Spain','Sweden','United Kingdom']
top_20 = my_data.load_and_clean_up_WTO_file()
eu_df_filtered = eu_df[['Trading Partner','country','Total Trade ($M)','year']]
eu_df_filtered.head()
exam = eu_df_filtered.pivot_table('Total Trade ($M)', ['Trading Partner','country'], 'year').reset_index()
exam['ordered-cols'] = exam.apply(lambda x: '-'.join(sorted([x['Trading Partner'],x['country']])), axis=1)
exam = exam.drop_duplicates(['ordered-cols'])
exam.columns = exam.columns.astype(str)
alt.Chart(exam).mark_rect().encode(
x="Trading Partner:N",
y="country:N",
color='2020:Q'
).resolve_scale(color="independent",).properties(
title='EU Domestic Service Trading Gross Volume'
).configure_axisY(
titleAngle=30
)
dt = my_altair.my_data_object
world_source = dt.get_world_countries_by_iso_label()
for i in range(len(df)):
for j in range(len(df)):
source = df.index[i]
partner = df.columns[j]
df.iloc[i,j] = eu_service_import_2020[(eu_service_import_2020['Reporting Economy'] == source) & (eu_service_import_2020['Partner Economy'] == partner)]['2020'].sum()
| .ipynb_checkpoints/eu_analysis_fz-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Variables and assignment
#
# - toc:false
# - branch: master
# - badges: true
# - comments: false
# - categories: [python]
# - hide: true
# ---
#
# Questions:
# - How can I store data in programs?
#
# Objectives:
# - Write programs that assign scalar values to variables and perform calculations with those values.
# - Correctly trace value changes in programs that use scalar assignment.
#
# Keypoints:
# - Use variables to store values.
# - Use `print` to display values.
# - Variables must be created before they are used.
# - Variables persist between cells.
# - Variables can be used in calculations.
# - Python is case-sensitive.
# - Use valid and meaningful variable names.
# ---
#
#
# ### Use variables to store values.
#
# * Variables are names for values.
# * In Python the `=` symbol assigns the value on the right to the name on the left.
# * The variable is created when a value is assigned to it.
# * Here, Python assigns an age to a variable `age` and a name in quotes to a variable `first_name`.
age = 42
first_name = 'Ahmed'
#
#
#
#
# ### Use `print` to display values.
#
# * Python has a built-in function called `print` that prints things as text.
# * Call the function (i.e., tell Python to run it) by using its name.
# * Provide values to the function (i.e., the things to print) in parentheses.
# * To add a string to the printout, wrap the string in single or double quotes.
# * The values passed to the function are called 'arguments'
print(first_name, 'is', age, 'years old')
#
#
# * `print` automatically puts a single space between items to separate them.
# * And wraps around to a new line at the end.
#
#
# ### Variables must be created before they are used.
#
# * Unlike some languages, which "guess" a default value, if a variable doesn't exist yet, or if the name has been mis-spelled, Python reports an error.
#
#
#
print(last_name)
#
# * The last line of an error message is usually the most informative.
# * We will look at error messages in detail [later]({{ page.root }}/15-scope/#reading-error-messages).
#
#
# ### Variables Persist Between Cells
#
# Be aware that it is the order of **execution** of cells that is important in a Jupyter notebook, not the order
# in which they appear. Python will remember **all** the code that was run previously, including any variables you have
# defined, irrespective of the order in the notebook. Therefore if you define variables lower down the notebook and then
# (re)run cells further up, those defined further down will still be present. As an example, we can create 2 cells with the following content, in this order:
#
print(myval)
myval = 1
#
# If you execute this in order, the first cell will give an error. However, if you run the first cell **after** the second
# cell it will print out ‘1’. To prevent confusion, it can be helpful to use the `Kernel` -> `Restart & Run All` option which
# clears the interpreter and runs everything from a clean slate going top to bottom.
#
#
#
# ### Variables can be used in calculations.
#
# * We can use variables in calculations just as if they were values.
#
#
age = 42
age = age + 3
print('Age in three years:', age)
#
#
#
# ### Python is case-sensitive.
#
# * Python thinks that upper- and lower-case letters are different,
# so `Name` and `name` are different variables.
# * There are conventions for using upper-case letters at the start of variable names so we will use lower-case letters for now.
#
#
# > Tip: In programming an eye for detail is important. If you include an extra full-stop, or forget a space, then you may get an error message or unexpected behaviour.
# ### Use valid and meaningful variable names.
#
# * Python doesn't care what you call variables as long as they obey the following rules:
#
# * can **only** contain letters, digits, and underscore `_` (typically used to separate words in long variable names)
# * cannot start with a digit
#
# * Variable names that start with underscores like `__bobbins_real_age` have a special meaning so we won't do that until we understand the convention.
#
#
flabadab = 42
ewr_422_yY = 'Ahmed'
print(ewr_422_yY, 'is', flabadab, 'years old')
# * Use meaningful variable names to help other people understand what the program does.
# * The most important "other person" is your future self.
#
#
# ---
#
# Do [the quick-test](https://nu-cem.github.io/CompPhys/2021/08/02/02-Variables-Assignment-Qs.html).
#
# Back to [Python part one](https://nu-cem.github.io/CompPhys/2021/08/02/Python_basics_one.html).
#
# ---
| _notebooks/2021-08-02-02-Variables-Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ivan-Nebogatikov/HumanActivityRecognitionOutliersDetection/blob/main/Processing.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Qacy3ORv0ggu"
# Скачиваем данные, преобразуем их в одну таблицу
# + colab={"base_uri": "https://localhost:8080/"} id="60b4shfxz8lT" outputId="e4adab33-4b31-4455-a27b-d838480029df"
import numpy as np
import pandas as pd
import json
from datetime import datetime
from datetime import date
from math import sqrt
from zipfile import ZipFile
from os import listdir
from os.path import isfile, join
filesDir = "/content/drive/MyDrive/training_data"
csvFiles = [join(filesDir, f) for f in listdir(filesDir) if (isfile(join(filesDir, f)) and 'csv' in f)]
data = pd.DataFrame()
for file in csvFiles:
if 'acc' in file:
with ZipFile(file, 'r') as zipObj:
listOfFileNames = zipObj.namelist()
for fileName in listOfFileNames:
if 'chest' in fileName:
with zipObj.open(fileName) as csvFile:
newData = pd.read_csv(csvFile)
newData['type'] = str(csvFile.name).replace('_',' ').replace('.',' ').split()[1]
data = data.append(newData)
# newData = pd.read_csv(csvFile)
# newColumns = [col for col in newData.columns if col not in data.columns]
# print(newColumns)
# if data.empty or not newColumns:
# newData['type'] = str(csvFile.name).replace('_',' ').replace('.',' ').split()[1]
# data = data.append(newData)
# else:
# for index, newRow in newData.iterrows():
# print(newRow['attr_time'])
# print(data.iloc[[0]]['attr_time'])
# print(len(data[data['attr_time'] < newRow['attr_time']]))
# existingRow = data[data['attr_time'] <= newRow['attr_time']].iloc[-1]
# existingRow[newColumns] = newRow[newColumns]
# data = data.sort_values(by=['attr_time'])
#print(data)
data = data.sort_values(by=['attr_time'])
print(data)
# heart = pd.read_csv('https://raw.githubusercontent.com/Ivan-Nebogatikov/HumanActivityRecognition/master/datasets/2282_3888_bundle_archive/heart.csv')
# heart['timestamp'] = heart['timestamp'].map(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f"))
# heart = heart.sort_values(by='timestamp')
# def getHeart(x):
# dt = datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f")
# f = heart[heart['timestamp'] < dt]
# lastValue = f.iloc[[-1]]['values'].tolist()[0]
# intValue = list(json.loads(lastValue.replace('\'', '"')))[0]
# return intValue
# acc = pd.read_csv('https://raw.githubusercontent.com/Ivan-Nebogatikov/HumanActivityRecognition/master/datasets/2282_3888_bundle_archive/acc.csv')
# acc['heart'] = acc['timestamp'].map(lambda x: getHeart(x))
# print(acc)
# def change(x):
# if x == 'Pause' or x == 'Movie':
# x = 'Watching TV'
# if x == 'Shop':
# x = 'Walk'
# if x == 'Football':
# x = 'Running'
# if x == 'Meeting' or x == 'Work' or x == 'Picnic ' or x == 'In vehicle' or x == 'In bus' :
# x = 'Sitting'
# if x == 'On bus stop':
# x = 'Walk'
# if x == 'Walking&party' or x == 'Shopping& wearing' or x == 'At home':
# x = 'Walk'
# return x
# acc['act'] = acc['act'].map(lambda x: change(x))
# labels = np.array(acc['act'])
# arrays = acc['values'].map(lambda x: getValue(x))
# x = getDiff(list(arrays.map(lambda x: np.double(x[0]))))
# y = getDiff(list(arrays.map(lambda x: np.double(x[1]))))
# z = getDiff(list(arrays.map(lambda x: np.double(x[2]))))
# dist = list(map(lambda a, b, c: sqrt(a*a+b*b+c*c), x, y, z))
# + id="IIGslAHCFzYS"
labels = np.array(data['type'])
# + [markdown] id="t7mBH65j0e8m"
#
# + colab={"base_uri": "https://localhost:8080/"} id="ncaq7RTju04e" outputId="5c58d941-a174-4821-9fed-e64c2dbf717b"
data['time_diff'] = data['attr_time'].diff()
indMin = int(data[['time_diff']].idxmin())
print(indMin)
t_j = data.iloc[indMin]['attr_time']
print(t_j)
t_j1 = data.iloc[indMin+1]['attr_time']
diff = t_j1 - t_j
print(diff)
# interpolated = []
data['attr_x_i'] = data.apply(lambda row: (t_j1 - row['attr_time']) * row['attr_x'] / diff + (row['attr_time'] - t_j) * row['attr_x'] / diff, axis=1) # !!! тут нужен +1 строка
data['attr_y_i'] = data.apply(lambda row: (t_j1 - row['attr_time']) * row['attr_y'] / diff + (row['attr_time'] - t_j) * row['attr_y'] / diff, axis=1)
data['attr_z_i'] = data.apply(lambda row: (t_j1 - row['attr_time']) * row['attr_z'] / diff + (row['attr_time'] - t_j) * row['attr_z'] / diff, axis=1)
# # for i, row in data.iterrows():
# # t_i = row['attr_time']
# # def axis(value): (t_j1 - t_i) * value / (t_j1 - t_j) + (t_i + t_j) * value / (t_j1 + t_j)
# # interpolated.append([row["id"], row['attr_time'], axis(row['attr_x']), axis(row['attr_y']), axis(row['attr_z']), row['type'], row['time_diff']])
print(data)
# + colab={"base_uri": "https://localhost:8080/"} id="9IFPROCM8phq" outputId="14cabcd5-f6e5-43ee-9d3b-457642a77b29"
data['g_x'] = data['attr_x_i'].rolling(window=5).mean()
data['g_y'] = data['attr_y_i'].rolling(window=5).mean()
data['g_z'] = data['attr_z_i'].rolling(window=5).mean()
print(data['g_x'])
# + colab={"base_uri": "https://localhost:8080/"} id="Lrz3g-1D_hsJ" outputId="3e66c807-479a-4e40-c0e8-57cfb9568f1e"
data['g_x'] = data['attr_x_i'].rolling(window=5).mean()
data['g_y'] = data['attr_y_i'].rolling(window=5).mean()
data['g_z'] = data['attr_z_i'].rolling(window=5).mean()
print(data['g_x'])
# + colab={"base_uri": "https://localhost:8080/"} id="9edtt4xHAfHn" outputId="1eff2533-e3d2-4936-f503-9685bebe1215"
import numpy as np
def acc(a, g):
return np.cross(np.cross(a, g) / np.dot(g, g), g)
data['a_tv'] = data.apply(lambda row: acc([row.attr_x_i, row.attr_y_i, row.attr_z_i], [row.g_x, row.g_y, row.g_z]), axis=1)
data['a_th'] = data.apply(lambda row: [row.attr_x_i - row.a_tv[0], row.attr_y_i - row.a_tv[1], row.attr_z_i - row.a_tv[2]], axis=1)
print(data['a_tv'])
# + colab={"base_uri": "https://localhost:8080/"} id="a0do9XkHC7_C" outputId="d7b1bb8d-478f-4aff-dee3-28d2d17426b3"
print(data['a_th'])
# + [markdown] id="MgKGdPNtEruD"
# Вспомогательная функция для вывода результатов
# + id="SHg9rEeUEFX0"
import pandas as pd
import numpy as np
from scipy import interp
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import LabelBinarizer
def class_report(y_true, y_pred, y_score=None, average='micro'):
if y_true.shape != y_pred.shape:
print("Error! y_true %s is not the same shape as y_pred %s" % (
y_true.shape,
y_pred.shape)
)
return
accuracy = accuracy_score(y_true, y_pred)
print("Accuracy:", accuracy)
lb = LabelBinarizer()
if len(y_true.shape) == 1:
lb.fit(y_true)
#Value counts of predictions
labels, cnt = np.unique(
y_pred,
return_counts=True)
n_classes = 5
pred_cnt = pd.Series(cnt, index=labels)
metrics_summary = precision_recall_fscore_support(
y_true=y_true,
y_pred=y_pred,
labels=labels)
avg = list(precision_recall_fscore_support(
y_true=y_true,
y_pred=y_pred,
average='weighted'))
metrics_sum_index = ['precision', 'recall', 'f1-score', 'support']
class_report_df = pd.DataFrame(
list(metrics_summary),
index=metrics_sum_index,
columns=labels)
support = class_report_df.loc['support']
total = support.sum()
class_report_df['avg / total'] = avg[:-1] + [total]
class_report_df = class_report_df.T
class_report_df['pred'] = pred_cnt
class_report_df['pred'].iloc[-1] = total
if not (y_score is None):
fpr = dict()
tpr = dict()
roc_auc = dict()
for label_it, label in enumerate(labels):
fpr[label], tpr[label], _ = roc_curve(
(y_true == label).astype(int),
y_score[:, label_it])
roc_auc[label] = auc(fpr[label], tpr[label])
if average == 'micro':
if n_classes <= 2:
fpr["avg / total"], tpr["avg / total"], _ = roc_curve(
lb.transform(y_true).ravel(),
y_score[:, 1].ravel())
else:
fpr["avg / total"], tpr["avg / total"], _ = roc_curve(
lb.transform(y_true).ravel(),
y_score.ravel())
roc_auc["avg / total"] = auc(
fpr["avg / total"],
tpr["avg / total"])
elif average == 'macro':
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([
fpr[i] for i in labels]
))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in labels:
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["avg / total"] = auc(fpr["macro"], tpr["macro"])
class_report_df['AUC'] = pd.Series(roc_auc)
print(class_report_df)
return accuracy
# + [markdown] id="IxSsJfXOEvZK"
# Определяем функции для предсказания с использованием классификатора и с использованием нескольких классификаторов
# + id="E_ivs39sExi6"
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
import pandas as pd
from sklearn.model_selection import cross_val_score
from sklearn.metrics import plot_confusion_matrix
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
def Predict(x, classifier = RandomForestClassifier(n_estimators = 400, random_state = 3, class_weight='balanced')):
train_features, test_features, train_labels, test_labels = train_test_split(x, labels, test_size = 0.15, random_state = 242)
print('Training Features Shape:', train_features.shape)
print('Testing Features Shape:', test_features.shape)
print("\n")
classifier.fit(train_features, train_labels);
x_shuffled, labels_shuffled = shuffle(np.array(x), np.array(labels))
scores = cross_val_score(classifier, x_shuffled, labels_shuffled, cv=7)
print("%f accuracy with a standard deviation of %f" % (scores.mean(), scores.std()))
predictions = list(classifier.predict(test_features))
pred_prob = classifier.predict_proba(test_features)
accuracy = class_report(
y_true=test_labels,
y_pred=np.asarray(predictions),
y_score=pred_prob, average='micro')
if hasattr(classifier, 'feature_importances_'):
print(classifier.feature_importances_)
plot_confusion_matrix(classifier, test_features, test_labels)
plt.xticks(rotation = 90)
plt.style.library['seaborn-darkgrid']
plt.show()
return [accuracy, scores.mean(), scores.std()]
def PredictWithClassifiers(data, classifiers):
accuracies = {}
for name, value in classifiers.items():
accuracy = Predict(data, value)
accuracies[name] = accuracy
print("\n")
df = pd.DataFrame({(k, v[0], v[1], v[2]) for k, v in accuracies.items()}, columns=["Method", "Accuracy", "Mean", "Std"])
print(df)
# + [markdown] id="LaoOX5Z_E3vH"
# Определяем набор используемых классификаторов
# + id="mp0AvYxRE0t3"
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.ensemble import AdaBoostClassifier
methods = {
"MLP" : MLPClassifier(random_state=1, max_iter=300),
"K-neigh" : KNeighborsClassifier(), # default k = 5
"Random Forest" : RandomForestClassifier(n_estimators = 400, random_state = 3, class_weight='balanced'),
"Bayes" : GaussianNB(),
"AdaBoost" : AdaBoostClassifier(),
"SVM" : svm.SVC(probability=True, class_weight='balanced')
}
# + colab={"background_save": true, "base_uri": "https://localhost:8080/", "height": 1000} id="HRSLqDLYE7mq" outputId="7eb071bc-cdff-4d9e-b954-b7fef34dc924"
frame = pd.DataFrame(data['a_th'].to_list(), columns=['x','y','z']).fillna(0)
print(frame)
feature_list = list(frame.columns)
print(frame)
PredictWithClassifiers(frame, methods)
| Processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ungraded Lab: Build a Multi-output Model
#
# In this lab, we'll show how you can build models with more than one output. The dataset we will be working on is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Energy+efficiency). It is an Energy Efficiency dataset which uses the bulding features (e.g. wall area, roof area) as inputs and has two outputs: Cooling Load and Heating Load. Let's see how we can build a model to train on this data.
# + [markdown] colab={} colab_type="code" id="0p84I7yFHRT2"
# ## Imports
# +
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
from sklearn.model_selection import train_test_split
# -
# ## Utilities
#
# We define a few utilities for data conversion and visualization to make our code more neat.
# + colab={} colab_type="code" id="04Y-C9RYUTes"
def format_output(data):
y1 = data.pop('Y1')
y1 = np.array(y1)
y2 = data.pop('Y2')
y2 = np.array(y2)
return y1, y2
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
def plot_diff(y_true, y_pred, title=''):
plt.scatter(y_true, y_pred)
plt.title(title)
plt.xlabel('True Values')
plt.ylabel('Predictions')
plt.axis('equal')
plt.axis('square')
plt.xlim(plt.xlim())
plt.ylim(plt.ylim())
plt.plot([-100, 100], [-100, 100])
plt.show()
def plot_metrics(metric_name, title, ylim=5):
plt.title(title)
plt.ylim(0, ylim)
plt.plot(history.history[metric_name], color='blue', label=metric_name)
plt.plot(history.history['val_' + metric_name], color='green', label='val_' + metric_name)
plt.show()
# -
# ## Prepare the Data
#
# We download the dataset and format it for training.
# +
# Specify data URI
URI = 'local_data/ENB2012_data.xls'
# link for dataset excel: https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx
# Use pandas excel reader
df = pd.read_excel(URI)
# df.drop(columns=['Unnamed: 10', 'Unnamed: 11'], inplace=True)
# -
df.head()
# +
df = df.sample(frac=1).reset_index(drop=True)
# Split the data into train and test with 80 train / 20 test
train, test = train_test_split(df, test_size=0.2)
train_stats = train.describe()
# Get Y1 and Y2 as the 2 outputs and format them as np arrays
train_stats.pop('Y1')
train_stats.pop('Y2')
train_stats = train_stats.transpose()
train_Y = format_output(train)
test_Y = format_output(test)
# Normalize the training and test data
norm_train_X = norm(train)
norm_test_X = norm(test)
# -
train
# ## Build the Model
#
# Here is how we'll build the model using the functional syntax. Notice that we can specify a list of outputs (i.e. `[y1_output, y2_output]`) when we instantiate the `Model()` class.
# +
# Define model layers.
input_layer = Input(shape=(len(train .columns),))
first_dense = Dense(units='128', activation='relu')(input_layer)
second_dense = Dense(units='128', activation='relu')(first_dense)
# Y1 output will be fed directly from the second dense
y1_output = Dense(units='1', name='y1_output')(second_dense)
third_dense = Dense(units='64', activation='relu')(second_dense)
# Y2 output will come via the third dense
y2_output = Dense(units='1', name='y2_output')(third_dense)
# Define the model with the input layer and a list of output layers
model = Model(inputs=input_layer, outputs=[y1_output, y2_output])
print(model.summary())
# -
# ## Configure parameters
#
# We specify the optimizer as well as the loss and metrics for each output.
# Specify the optimizer, and compile the model with loss functions for both outputs
optimizer = tf.keras.optimizers.SGD(lr=0.001)
model.compile(optimizer=optimizer,
loss={'y1_output': 'mse', 'y2_output': 'mse'},
metrics={'y1_output': tf.keras.metrics.RootMeanSquaredError(),
'y2_output': tf.keras.metrics.RootMeanSquaredError()})
# ## Train the Model
# Train the model for 500 epochs
history = model.fit(norm_train_X, train_Y,
epochs=10, batch_size=10, validation_data=(norm_test_X, test_Y))
# ## Evaluate the Model and Plot Metrics
# Test the model and print loss and mse for both outputs
loss, Y1_loss, Y2_loss, Y1_rmse, Y2_rmse = model.evaluate(x=norm_test_X, y=test_Y)
print("Loss = {}, Y1_loss = {}, Y1_mse = {}, Y2_loss = {}, Y2_mse = {}".format(loss, Y1_loss, Y1_rmse, Y2_loss, Y2_rmse))
# Plot the loss and mse
Y_pred = model.predict(norm_test_X)
plot_diff(test_Y[0], Y_pred[0], title='Y1')
plot_diff(test_Y[1], Y_pred[1], title='Y2')
plot_metrics(metric_name='y1_output_root_mean_squared_error', title='Y1 RMSE', ylim=6)
plot_metrics(metric_name='y2_output_root_mean_squared_error', title='Y2 RMSE', ylim=7)
| C1_functional_API/C1_W1_Lab_2_multi-output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Finding a minimum in a flat neighborhood
# =========================================
#
# An excercise of finding minimum. This excercise is hard because the
# function is very flat around the minimum (all its derivatives are zero).
# Thus gradient information is unreliable.
#
# The function admits a minimum in [0, 0]. The challenge is to get within
# 1e-7 of this minimum, starting at x0 = [1, 1].
#
# The solution that we adopt here is to give up on using gradient or
# information based on local differences, and to rely on the Powell
# algorithm. With 162 function evaluations, we get to 1e-8 of the
# solution.
#
#
# +
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
def f(x):
return np.exp(-1/(.01*x[0]**2 + x[1]**2))
# A well-conditionned version of f:
def g(x):
return f([10*x[0], x[1]])
# The gradient of g. We won't use it here for the optimization.
def g_prime(x):
r = np.sqrt(x[0]**2 + x[1]**2)
return 2/r**3*g(x)*x/r
result = optimize.minimize(g, [1, 1], method="Powell", tol=1e-10)
x_min = result.x
# -
# Some pretty plotting
#
#
# +
plt.figure(0)
plt.clf()
t = np.linspace(-1.1, 1.1, 100)
plt.plot(t, f([0, t]))
plt.figure(1)
plt.clf()
X, Y = np.mgrid[-1.5:1.5:100j, -1.1:1.1:100j]
plt.imshow(f([X, Y]).T, cmap=plt.cm.gray_r, extent=[-1.5, 1.5, -1.1, 1.1],
origin='lower')
plt.contour(X, Y, f([X, Y]), cmap=plt.cm.gnuplot)
# Plot the gradient
dX, dY = g_prime([.1*X[::5, ::5], Y[::5, ::5]])
# Adjust for our preconditioning
dX *= .1
plt.quiver(X[::5, ::5], Y[::5, ::5], dX, dY, color='.5')
# Plot our solution
plt.plot(x_min[0], x_min[1], 'r+', markersize=15)
plt.show()
| _downloads/plot_exercise_flat_minimum.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Scaling Example
# You have now seen how feature scaling might change the clusters we obtain from the kmeans algorithm, but it is time to try it out!
#
# First let's get some data to work with. The first cell here will read in the necessary libraries, generate data, and make a plot of the data you will be working with throughout the rest of the notebook.
#
# The dataset you will work with through the notebook is then stored in data.
#
#
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from IPython.display import Image
from sklearn.datasets.samples_generator import make_blobs
import tests2 as t
# %matplotlib inline
# DSND colors: UBlue, Salmon, Gold, Slate
plot_colors = ['#02b3e4', '#ee2e76', '#ffb613', '#2e3d49']
# Light colors: Blue light, Salmon light
plot_lcolors = ['#88d0f3', '#ed8ca1', '#fdd270']
# Gray/bg colors: Slate Dark, Gray, Silver
plot_grays = ['#1c262f', '#aebfd1', '#fafbfc']
def create_data():
n_points = 120
X = np.random.RandomState(3200000).uniform(-3, 3, [n_points, 2])
X_abs = np.absolute(X)
inner_ring_flag = np.logical_and(X_abs[:,0] < 1.2, X_abs[:,1] < 1.2)
outer_ring_flag = X_abs.sum(axis = 1) > 5.3
keep = np.logical_not(np.logical_or(inner_ring_flag, outer_ring_flag))
X = X[keep]
X = X[:60] # only keep first 100
X1 = np.matmul(X, np.array([[2.5, 0], [0, 100]])) + np.array([22.5, 500])
plt.figure(figsize = [15,15])
plt.scatter(X1[:,0], X1[:,1], s = 64, c = plot_colors[-1])
plt.xlabel('5k Completion Time (min)', size = 30)
plt.xticks(np.arange(15, 30+5, 5), fontsize = 30)
plt.ylabel('Test Score (raw)', size = 30)
plt.yticks(np.arange(200, 800+200, 200), fontsize = 30)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
[side.set_linewidth(2) for side in ax.spines.values()]
ax.tick_params(width = 2)
plt.savefig('C18_FeatScalingEx_01.png', transparent = True)
data = pd.DataFrame(X1)
data.columns = ['5k_Time', 'Raw_Test_Score']
return data
data = create_data()
plt.style.use('dark_background')
# -
data.describe()
data.info()
# +
# Use the dictionary to match the values to the corresponding statements
a = 0
b = 60
c = 22.9
d = 4.53
e = 511.7
q1_dict = {
'number of missing values': a,
'the mean 5k time in minutes': c,
'the mean test score as a raw value': e,
'number of individuals in the dataset': b
}
# check your answer against ours here
t.check_q1(q1_dict)
# -
n_clusters = 2
model = KMeans(n_clusters = n_clusters)
preds = model.fit_predict(data)
# +
# Run this to see your results
def plot_clusters(data, preds, n_clusters):
plt.figure(figsize = [15,15])
for k, col in zip(range(n_clusters), plot_colors[:n_clusters]):
my_members = (preds == k)
plt.scatter(data['5k_Time'][my_members], data['Raw_Test_Score'][my_members], s = 64, c = col)
plt.xlabel('5k Completion Time (min)', size = 30)
plt.xticks(np.arange(15, 30+5, 5), fontsize = 30)
plt.ylabel('Test Score (raw)', size = 30)
plt.yticks(np.arange(200, 800+200, 200), fontsize = 30)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
[side.set_linewidth(2) for side in ax.spines.values()]
ax.tick_params(width = 2)
plot_clusters(data, preds, 2)
# -
data['test_scaled'] = (data['Raw_Test_Score'] - np.mean(data['Raw_Test_Score']))/np.std(data['Raw_Test_Score'])
data['5k_time_sec'] = data['5k_Time']*60
n_clusters = 2
model = KMeans(n_clusters = n_clusters)
preds = model.fit_predict(data)
| Unsupervised-Learning/Clustering/Feature Scaling Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
from tensorflow.python.framework import ops
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath('__file__'))
sys.path.append(BASE_DIR)
# grouping_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_grouping_so.so'))
grouping_module=tf.load_op_library(os.path.join(BASE_DIR, 'tf_grouping_so_hk.so'))
# !python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())'
# !cat /etc/*-release
import os
os.environ
| utils/spidercnn/tf_ops/grouping/tf_grouping_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
import seaborn as sns
import os
import requests
import zipfile
import re
# -
ranked_list = ['Relative Humidity', 'Solar Zenith Angle', 'GHI', 'Surface Albedo','Wind Direction','Temperature','Wind Speed','DNI','DHI','Dew Point','Pressure','Cloud Type']
feature_list = ranked_list[:11]
feature_list
# BMS station:
# Latitude: 39.742o North
# Longitude: 105.18o West
#
# STAC station:
# Latitude: 39.75685o North
# Longitude: 104.62025o West
#
# University of Oregon (SRML)
# Latitude: 44.0467o North
# Longitude: 123.0743o West
#
# VTIF NREL Vehicle Testing and Integration Facility RSR
# Latitude: 39.74211o North
# Longitude: 105.17572o West
# acceptable location code: BMS, STAC, UOSMRL, VTIF
location='VTIF'
url = 'https://midcdmz.nrel.gov/apps/daily.pl?site='+location+'&live=1'
response = requests.get(url)
if response.status_code == 200:
print ('Website accessed!')
else:
print ('Error code 200. Rerun this code block or check website!')
outfile = open("delete_after.txt", "w")
outfile.write(response.text)
outfile.close()
feature_dict=dict()
with open('delete_after.txt','r') as readfile:
for line in readfile:
result=re.search('<TD nowrap><DIV ALIGN=right>',line)
if result is not None:
for feature in ['Global','Direct','Diffuse','Wind Direction','Wind Speed','Zenith','Albedo','Relative Humidity','Air Temperature','Pressure']:
if feature in line:
result=re.findall("[-+]?\d+\.\d+",line)
if result is not None:
if feature not in feature_dict:
feature_dict[feature]=float(result[0])
keys = list(feature_dict)
for key in keys:
if key == "Global":
feature_dict["GHI"] = feature_dict["Global"]
del feature_dict["Global"]
elif key == "Direct":
feature_dict["DNI"] = feature_dict["Direct"]
del feature_dict["Direct"]
elif key == "Diffuse":
feature_dict["DHI"] = feature_dict["Diffuse"]
del feature_dict["Diffuse"]
elif key == "Zenith":
feature_dict["Solar Zenith Angle"] = feature_dict["Zenith"]
del feature_dict["Zenith"]
elif key == "Albedo":
feature_dict["Surface Albedo"] = feature_dict["Albedo"]
del feature_dict["Albedo"]
elif key == 'Air Temperature':
feature_dict["Temperature"] = feature_dict['Air Temperature']
del feature_dict['Air Temperature']
elif key == 'Pressure':
feature_dict['Pressure'] = feature_dict['Pressure']*1.33322
else:
pass
feature_dict
# ! rm delete_after.txt
| examples/NREL_weather_webscrape/NREL_weather_data_webscrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: feedforward
# language: python
# name: feedforward
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="fgrno2xpSQEG" executionInfo={"status": "ok", "timestamp": 1644642142830, "user_tz": 300, "elapsed": 23453, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="1bf6ff3f-44d1-45b7-f9df-e44b65e746a0"
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
# + id="wPVKtoviSV4v" executionInfo={"status": "ok", "timestamp": 1644642145037, "user_tz": 300, "elapsed": 2215, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
import os
os.chdir('/content/drive/MyDrive/Colab_Notebooks/coursera/coursera_Probabilistic_Deep_Learning_TF2/Week2')
# + colab={"base_uri": "https://localhost:8080/"} id="kALAGUDGSXGn" executionInfo={"status": "ok", "timestamp": 1644642215161, "user_tz": 300, "elapsed": 70140, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="fed262b5-ff84-4ecc-a8cd-b9156ccab027"
# !pip install tensorflow=='2.1.0'
# + colab={"base_uri": "https://localhost:8080/"} id="Xmv4WuT_SbL4" executionInfo={"status": "ok", "timestamp": 1644642226605, "user_tz": 300, "elapsed": 11473, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="aeff6d2b-014a-4bb7-dba5-bd7065978737"
# !pip install tensorflow_probability=='0.9.0'
# + [markdown] id="jABhOvG_oFV5"
# # Programming Assignment
# + [markdown] id="BOfScRJ_oFV6"
# ## Bayesian convolutional neural network
# + [markdown] id="1kTxcfwxoFV6"
# ### Instructions
#
# In this notebook, you will create a Bayesian convolutional neural network to classify the famous MNIST handwritten digits. This will be a probabilistic model, designed to capture both aleatoric and epistemic uncertainty. You will test the uncertainty quantifications against a corrupted version of the dataset.
#
# Some code cells are provided for you in the notebook. You should avoid editing provided code, and make sure to execute the cells in order to avoid unexpected errors. Some cells begin with the line:
#
# `#### GRADED CELL ####`
#
# Don't move or edit this first line - this is what the automatic grader looks for to recognise graded cells. These cells require you to write your own code to complete them, and are automatically graded when you submit the notebook. Don't edit the function name or signature provided in these cells, otherwise the automatic grader might not function properly.
#
# ### How to submit
#
# Complete all the tasks you are asked for in the worksheet. When you have finished and are happy with your code, press the **Submit Assignment** button at the top of this notebook.
#
# ### Let's get started!
#
# We'll start running some imports, and loading the dataset. Do not edit the existing imports in the following cell. If you would like to make further Tensorflow imports, you should add them here.
# + id="GdB-aS_aoFV7" executionInfo={"status": "ok", "timestamp": 1644642251243, "user_tz": 300, "elapsed": 2552, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### PACKAGE IMPORTS ####
# Run this cell first to import all required packages. Do not make any imports elsewhere in the notebook
import tensorflow as tf
import tensorflow_probability as tfp
import os
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import RMSprop
tfd = tfp.distributions
tfpl = tfp.layers
# If you would like to make further imports from tensorflow, add them here
# + [markdown] id="tLoEIz_5oFV-"
# #### The MNIST and MNIST-C datasets
#
# In this assignment, you will use the [MNIST](http://yann.lecun.com/exdb/mnist/) and [MNIST-C](https://github.com/google-research/mnist-c) datasets, which both consist of a training set of 60,000 handwritten digits with corresponding labels, and a test set of 10,000 images. The images have been normalised and centred. The MNIST-C dataset is a corrupted version of the MNIST dataset, to test out-of-distribution robustness of computer vision models.
#
# - <NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based learning applied to document recognition." Proceedings of the IEEE, 86(11):2278-2324, November 1998.
# - <NAME> and <NAME>. "MNIST-C: A Robustness Benchmark for Computer Vision" https://arxiv.org/abs/1906.02337
#
# Your goal is to construct a neural network that classifies images of handwritten digits into one of 10 classes.
# + [markdown] id="67o1JQ9yqWuv"
# #### Import the data
#
# The datasets required for this project can be downloaded from the following links:
#
# https://drive.google.com/file/d/10VhBL5zo4cOA_28trFCu3WtxFBHbj3yV/view?usp=sharing
#
# https://drive.google.com/file/d/11013-Bk-iJjVZ1rPn1TFPut12WNhMu5q/view?usp=sharing
#
# You should store these files in Drive for use in this Colab notebook.
# + id="Nn-dqkOBqxxg"
# Run this cell to connect to your Drive folder
#from google.colab import drive
#drive.mount('/content/gdrive')
# + [markdown] id="Wdq_bGwyoFV-"
# #### Load the datasets
#
# We'll start by importing two datasets. The first is the MNIST dataset of handwritten digits, and the second is the MNIST-C dataset, which is a corrupted version of the MNIST dataset. This dataset is available on [TensorFlow datasets](https://www.tensorflow.org/datasets/catalog/mnist_corrupted). We'll be using the dataset with "spatters". We will load and inspect the datasets below. We'll use the notation `_c` to denote `corrupted`. The images are the same as in the original MNIST, but are "corrupted" by some grey spatters.
# + id="hoZrjcjsoFV_" executionInfo={"status": "ok", "timestamp": 1644642272701, "user_tz": 300, "elapsed": 184, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Function to load training and testing data, with labels in integer and one-hot form
def load_data(name):
data_dir = os.path.join(name)
x_train = 1 - np.load(os.path.join(data_dir, 'x_train.npy')) / 255.
x_train = x_train.astype(np.float32)
y_train = np.load(os.path.join(data_dir, 'y_train.npy'))
y_train_oh = tf.keras.utils.to_categorical(y_train)
x_test = 1 - np.load(os.path.join(data_dir, 'x_test.npy')) / 255.
x_test = x_test.astype(np.float32)
y_test = np.load(os.path.join(data_dir, 'y_test.npy'))
y_test_oh = tf.keras.utils.to_categorical(y_test)
return (x_train, y_train, y_train_oh), (x_test, y_test, y_test_oh)
# + id="ICc0b73GoFWB" executionInfo={"status": "ok", "timestamp": 1644642287564, "user_tz": 300, "elapsed": 202, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Function to inspect dataset digits
def inspect_images(data, num_images):
fig, ax = plt.subplots(nrows=1, ncols=num_images, figsize=(2*num_images, 2))
for i in range(num_images):
ax[i].imshow(data[i, ..., 0], cmap='gray')
ax[i].axis('off')
plt.show()
# + id="7Q_akHFvoFWF" colab={"base_uri": "https://localhost:8080/", "height": 126} executionInfo={"status": "ok", "timestamp": 1644642291335, "user_tz": 300, "elapsed": 2731, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="8f257e3f-ed61-4b5c-85ee-5de7ec04f4ff"
# Load and inspect the MNIST dataset
(x_train, y_train, y_train_oh), (x_test, y_test, y_test_oh) = load_data('/content/drive/MyDrive/Colab_Notebooks/coursera/coursera_Probabilistic_Deep_Learning_TF2/Week2/MNIST')
inspect_images(data=x_train, num_images=8)
# + id="ER5aLNPnoFWI" colab={"base_uri": "https://localhost:8080/", "height": 126} executionInfo={"status": "ok", "timestamp": 1644642303714, "user_tz": 300, "elapsed": 2700, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="4b38dabc-b4bf-4451-e8d2-f1689a1740c6"
# Load and inspect the MNIST-C dataset
(x_c_train, y_c_train, y_c_train_oh), (x_c_test, y_c_test, y_c_test_oh) = load_data('/content/drive/MyDrive/Colab_Notebooks/coursera/coursera_Probabilistic_Deep_Learning_TF2/Week2/MNIST_corrupted')
inspect_images(data=x_c_train, num_images=8)
# + [markdown] id="ATtylddtoFWK"
# #### Create the deterministic model
#
# We will first train a standard deterministic CNN classifier model as a base model before implementing the probabilistic and Bayesian neural networks. You should now build the deterministic model using the Sequential API according to the following specifications:
#
# * The first layer should be Conv2D layer with 8 filters, 5x5 kernel size, ReLU activation and `'VALID'` padding.
# * This layer should set the `input_shape` according to the function argument
# * The second layer should be a MaxPooling2D layer with a 6x6 window size.
# * The third layer should be a Flatten layer
# * The final layer should be a Dense layer with 10 units and softmax activation
#
# In total, the network should have 4 layers.
#
# The model should then be compiled with the loss function, optimiser and list of metrics supplied in the function arguments.
# + id="mwu2unFloFWK" executionInfo={"status": "ok", "timestamp": 1644642573711, "user_tz": 300, "elapsed": 181, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following function.
# Make sure to not change the function name or arguments.
def get_deterministic_model(input_shape, loss, optimizer, metrics):
"""
This function should build and compile a CNN model according to the above specification.
The function takes input_shape, loss, optimizer and metrics as arguments, which should be
used to define and compile the model.
Your function should return the compiled model.
"""
model=Sequential([
Conv2D(input_shape=(input_shape),filters=8,kernel_size=(5,5),activation='relu',padding='VALID'),
MaxPooling2D(pool_size=(6,6)),
Flatten(),
Dense(units=10,activation='softmax')
])
model.compile(loss=loss,optimizer=optimizer,metrics=metrics)
return model
# + id="m7MPJousoFWN" executionInfo={"status": "ok", "timestamp": 1644642575351, "user_tz": 300, "elapsed": 181, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Run your function to get the benchmark model
tf.random.set_seed(0)
deterministic_model = get_deterministic_model(
input_shape=(28, 28, 1),
loss=SparseCategoricalCrossentropy(),
optimizer=RMSprop(),
metrics=['accuracy']
)
# + id="bHH61DKIoFWP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644642576409, "user_tz": 300, "elapsed": 29, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="179fd335-ef1e-4db4-c976-529b38da49e0"
# Print the model summary
deterministic_model.summary()
# + id="TKQwGmx4oFWR" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644642709661, "user_tz": 300, "elapsed": 132160, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="e7a53c84-9145-4fbe-ef5a-9e10f469bf8a"
# Train the model
deterministic_model.fit(x_train, y_train, epochs=5)
# + id="QH4evy33oFWT" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644642713721, "user_tz": 300, "elapsed": 4068, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="5d4fca52-d2d2-4e14-fde6-fa642a677746"
# Evaluate the model
print('Accuracy on MNIST test set: ',
str(deterministic_model.evaluate(x_test, y_test, verbose=False)[1]))
print('Accuracy on corrupted MNIST test set: ',
str(deterministic_model.evaluate(x_c_test, y_c_test, verbose=False)[1]))
# + [markdown] id="6gL4XmbLoFWW"
# As you might expect, the pointwise performance on the corrupted MNIST set is worse. This makes sense, since this dataset is slightly different, and noisier, than the uncorrupted version. Furthermore, the model was trained on the uncorrupted MNIST data, so has no experience with the spatters.
# + [markdown] id="IgADj27goFWX"
# ### Probabilistic CNN model
#
# You'll start by turning this deterministic network into a probabilistic one, by letting the model output a distribution instead of a deterministic tensor. This model will capture the aleatoric uncertainty on the image labels. You will do this by adding a probabilistic layer to the end of the model and training using the negative loglikelihood.
#
# You should first define the negative loss likelihood loss function below. This function has arguments `y_true` for the correct label (as a one-hot vector), and `y_pred` as the model prediction (a `OneHotCategorical` distribution). It should return the negative log-likelihood of each sample in `y_true` given the predicted distribution `y_pred`. If `y_true` is of shape `[B, E]` and `y_pred` has batch shape `[B]` and event shape `[E]`, the output should be a Tensor of shape `[B]`.
# + id="aBLyvWOtoFWX" executionInfo={"status": "ok", "timestamp": 1644642786246, "user_tz": 300, "elapsed": 194, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function name or arguments.
def nll(y_true, y_pred):
"""
This function should return the negative log-likelihood of each sample
in y_true given the predicted distribution y_pred. If y_true is of shape
[B, E] and y_pred has batch shape [B] and event_shape [E], the output
should be a Tensor of shape [B].
"""
return -y_pred.log_prob(y_true)
# + [markdown] id="AtXrZEWpoFWZ"
# You should now build your probabilistic model according to the following specification:
#
# * The first three layers are the same as for the deterministic model above
# * The fourth layer should be a Dense layer with no activation function, and the correct number of units needed to parameterise the probabilistic layer that follows
# * The final layer should be a probabilistic layer that outputs a `OneHotCategorical` distribution with an event shape of `[10]`, corresponding to the 10 digits
# * The `convert_to_tensor_fn` in the categorical layer should be set to the mode
#
# In total, your model should have 5 layers.
#
# The model should then be compiled with the loss function, optimiser and list of metrics supplied in the function arguments.
# + id="aRtkNN_loFWa" executionInfo={"status": "ok", "timestamp": 1644643255928, "user_tz": 300, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function name or arguments.
def get_probabilistic_model(input_shape, loss, optimizer, metrics):
"""
This function should return the probabilistic model according to the
above specification.
The function takes input_shape, loss, optimizer and metrics as arguments, which should be
used to define and compile the model.
Your function should return the compiled model.
"""
model=Sequential([
Conv2D(input_shape=(input_shape),filters=8,kernel_size=(5,5),activation='relu',padding='VALID'),
MaxPooling2D(pool_size=(6,6)),
Flatten(),
Dense(units=10),
tfpl.OneHotCategorical(10,convert_to_tensor_fn=tfd.Distribution.mode)
])
model.compile(loss=loss,optimizer=optimizer,metrics=metrics)
return model
# + id="fCLuN7lpoFWc" executionInfo={"status": "ok", "timestamp": 1644643258300, "user_tz": 300, "elapsed": 227, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Run your function to get the probabilistic model
tf.random.set_seed(0)
probabilistic_model = get_probabilistic_model(
input_shape=(28, 28, 1),
loss=nll,
optimizer=RMSprop(),
metrics=['accuracy']
)
# + id="WITIcnsyoFWe" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644643259467, "user_tz": 300, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="c141cde8-eefc-4341-e261-194f59447500"
# Print the model summary
probabilistic_model.summary()
# + [markdown] id="SN6VkX4-oFWg"
# Now, you can train the probabilistic model on the MNIST data using the code below.
#
# Note that the target data now uses the one-hot version of the labels, instead of the sparse version. This is to match the categorical distribution you added at the end.
# + id="KAW94TLLoFWh" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644643394469, "user_tz": 300, "elapsed": 132065, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="6dc5cdd3-877d-4665-f04f-0a7822c67b21"
# Train the model
probabilistic_model.fit(x_train, y_train_oh, epochs=5)
# + id="vczPrd_8oFWj" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644643448582, "user_tz": 300, "elapsed": 4433, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="7acac4f8-e163-4d7b-fd0b-04f4cd3149a5"
# Evaluate the model
print('Accuracy on MNIST test set: ',
str(probabilistic_model.evaluate(x_test, y_test_oh, verbose=False)[1]))
print('Accuracy on corrupted MNIST test set: ',
str(probabilistic_model.evaluate(x_c_test, y_c_test_oh, verbose=False)[1]))
# + [markdown] id="KMILWQefoFWl"
# Note that the test accuracy of the probabilistic model is identical to the deterministic model. This is because the model architectures for both are equivalent; the only difference being that the probabilistic model returns a distribution object. Since we have also set the same random seed for both models, the trained variables are in fact identical, as the following cell shows.
# + id="SiznxCoVoFWl" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644643458781, "user_tz": 300, "elapsed": 202, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="cbb4b473-02e9-497c-b554-2971f88ff7c1"
# Check all the weights of the deterministic and probabilistic models are identical
for deterministic_variable, probabilistic_variable in zip(deterministic_model.weights, probabilistic_model.weights):
print(np.allclose(deterministic_variable.numpy(), probabilistic_variable.numpy()))
# + [markdown] id="QlTlUeeroFWo"
# #### Analyse the model predictions
#
# We will now do some deeper analysis by looking at the probabilities the model assigns to each class instead of its single prediction.
#
# The function below will be useful to help us analyse the probabilistic model predictions.
# + id="iOeY0WmboFWp" executionInfo={"status": "ok", "timestamp": 1644643467801, "user_tz": 300, "elapsed": 344, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Function to make plots of the probabilities that the model estimates for an image
def analyse_model_prediction(data, true_labels, model, image_num, run_ensemble=False):
if run_ensemble:
ensemble_size = 200
else:
ensemble_size = 1
image = data[image_num]
true_label = true_labels[image_num, 0]
predicted_probabilities = np.empty(shape=(ensemble_size, 10))
for i in range(ensemble_size):
predicted_probabilities[i] = model(image[np.newaxis, :]).mean().numpy()[0]
model_prediction = model(image[np.newaxis, :])
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(10, 2),
gridspec_kw={'width_ratios': [2, 4]})
# Show the image and the true label
ax1.imshow(image[..., 0], cmap='gray')
ax1.axis('off')
ax1.set_title('True label: {}'.format(str(true_label)))
# Show a 95% prediction interval of model predicted probabilities
pct_2p5 = np.array([np.percentile(predicted_probabilities[:, i], 2.5) for i in range(10)])
pct_97p5 = np.array([np.percentile(predicted_probabilities[:, i], 97.5) for i in range(10)])
bar = ax2.bar(np.arange(10), pct_97p5, color='red')
bar[int(true_label)].set_color('green')
ax2.bar(np.arange(10), pct_2p5-0.02, color='white', linewidth=1, edgecolor='white')
ax2.set_xticks(np.arange(10))
ax2.set_ylim([0, 1])
ax2.set_ylabel('Probability')
ax2.set_title('Model estimated probabilities')
plt.show()
# + id="3c8vqn9PoFWq" colab={"base_uri": "https://localhost:8080/", "height": 329} executionInfo={"status": "ok", "timestamp": 1644643468595, "user_tz": 300, "elapsed": 588, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="d9754279-2353-4a72-fa38-67cf9f4b15d1"
# Prediction examples on MNIST
for i in [0, 1577]:
analyse_model_prediction(x_test, y_test, probabilistic_model, i)
# + [markdown] id="DHdUTb7UoFWt"
# The model is very confident that the first image is a 6, which is correct. For the second image, the model struggles, assigning nonzero probabilities to many different classes.
#
# Run the code below to do the same for 2 images from the corrupted MNIST test set.
# + id="9--Bv_ZpoFWt" colab={"base_uri": "https://localhost:8080/", "height": 329} executionInfo={"status": "ok", "timestamp": 1644643478631, "user_tz": 300, "elapsed": 650, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="5b542663-9d3f-4410-cfa6-27a5c00642fb"
# Prediction examples on MNIST-C
for i in [0, 3710]:
analyse_model_prediction(x_c_test, y_c_test, probabilistic_model, i)
# + [markdown] id="Li3vTiOYoFWv"
# The first is the same 6 as you saw above, but the second image is different. Notice how the model can still say with high certainty that the first image is a 6, but struggles for the second, assigning an almost uniform distribution to all possible labels.
#
# Finally, have a look at an image for which the model is very sure on MNIST data but very unsure on corrupted MNIST data:
# + id="zD9Qj3izoFWw" colab={"base_uri": "https://localhost:8080/", "height": 329} executionInfo={"status": "ok", "timestamp": 1644643482241, "user_tz": 300, "elapsed": 1105, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="499e3870-0708-41f7-a7b1-7fcdbbc63581"
# Prediction examples from both datasets
for i in [9241]:
analyse_model_prediction(x_test, y_test, probabilistic_model, i)
analyse_model_prediction(x_c_test, y_c_test, probabilistic_model, i)
# + [markdown] id="o6OLDYt-oFWx"
# It's not surprising what's happening here: the spatters cover up most of the number. You would hope a model indicates that it's unsure here, since there's very little information to go by. This is exactly what's happened.
# + [markdown] id="zKPz3vv-oFWy"
# #### Uncertainty quantification using entropy
#
# We can also make some analysis of the model's uncertainty across the full test set, instead of for individual values. One way to do this is to calculate the [entropy](https://en.wikipedia.org/wiki/Entropy_%28information_theory%29) of the distribution. The entropy is the expected information (or informally, the expected 'surprise') of a random variable, and is a measure of the uncertainty of the random variable. The entropy of the estimated probabilities for sample $i$ is defined as
#
# $$
# H_i = -\sum_{j=1}^{10} p_{ij} \text{log}_{2}(p_{ij})
# $$
#
# where $p_{ij}$ is the probability that the model assigns to sample $i$ corresponding to label $j$. The entropy as above is measured in _bits_. If the natural logarithm is used instead, the entropy is measured in _nats_.
#
# The key point is that the higher the value, the more unsure the model is. Let's see the distribution of the entropy of the model's predictions across the MNIST and corrupted MNIST test sets. The plots will be split between predictions the model gets correct and incorrect.
# + id="b0Ev31u7oFWy" executionInfo={"status": "ok", "timestamp": 1644643492025, "user_tz": 300, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Functions to plot the distribution of the information entropy across samples,
# split into whether the model prediction is correct or incorrect
def get_correct_indices(model, x, labels):
y_model = model(x)
correct = np.argmax(y_model.mean(), axis=1) == np.squeeze(labels)
correct_indices = [i for i in range(x.shape[0]) if correct[i]]
incorrect_indices = [i for i in range(x.shape[0]) if not correct[i]]
return correct_indices, incorrect_indices
def plot_entropy_distribution(model, x, labels):
probs = model(x).mean().numpy()
entropy = -np.sum(probs * np.log2(probs), axis=1)
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
for i, category in zip(range(2), ['Correct', 'Incorrect']):
entropy_category = entropy[get_correct_indices(model, x, labels)[i]]
mean_entropy = np.mean(entropy_category)
num_samples = entropy_category.shape[0]
title = category + 'ly labelled ({:.1f}% of total)'.format(num_samples / x.shape[0] * 100)
axes[i].hist(entropy_category, weights=(1/num_samples)*np.ones(num_samples))
axes[i].annotate('Mean: {:.3f} bits'.format(mean_entropy), (0.4, 0.9), ha='center')
axes[i].set_xlabel('Entropy (bits)')
axes[i].set_ylim([0, 1])
axes[i].set_ylabel('Probability')
axes[i].set_title(title)
plt.show()
# + id="6dYsfnipoFW1" colab={"base_uri": "https://localhost:8080/", "height": 313} executionInfo={"status": "ok", "timestamp": 1644643496529, "user_tz": 300, "elapsed": 3446, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="35f27b1f-a32c-44cf-c1fd-2bfc9bcdd11a"
# Entropy plots for the MNIST dataset
print('MNIST test set:')
plot_entropy_distribution(probabilistic_model, x_test, y_test)
# + id="txpCTZEsoFW3" colab={"base_uri": "https://localhost:8080/", "height": 313} executionInfo={"status": "ok", "timestamp": 1644643500065, "user_tz": 300, "elapsed": 3546, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="02f73c2e-276a-4eb4-8042-d25ca508b167"
# Entropy plots for the MNIST-C dataset
print('Corrupted MNIST test set:')
plot_entropy_distribution(probabilistic_model, x_c_test, y_c_test)
# + [markdown] id="-h18QpMRoFW5"
# There are two main conclusions:
# - The model is more unsure on the predictions it got wrong: this means it "knows" when the prediction may be wrong.
# - The model is more unsure for the corrupted MNIST test than for the uncorrupted version. Futhermore, this is more pronounced for correct predictions than for those it labels incorrectly.
#
# In this way, the model seems to "know" when it is unsure. This is a great property to have in a machine learning model, and is one of the advantages of probabilistic modelling.
# + [markdown] id="yTHlkzWvoFW5"
# ### Bayesian CNN model
#
# The probabilistic model you just created considered only aleatoric uncertainty, assigning probabilities to each image instead of deterministic labels. The model still had deterministic weights. However, as you've seen, there is also 'epistemic' uncertainty over the weights, due to uncertainty about the parameters that explain the training data.
#
# You'll now be adding weight uncertainty to the model you just created. Your new model will again have the following layers:
# - 2D convolution
# - Max pooling
# - Flatten
# - Dense
# - OneHotCategorical
#
# but where the convolutional and dense layers include weight uncertainty. You'll embed weight uncertainty as follows:
# - The 2D convolution layer will be replaced by a `Convolution2DReparameterization` layer
# - The Dense layer will be replaced by a `DenseVariational` layer.
# + [markdown] id="Hl5EroY8oFW6"
# You should start by creating the convolutional layer in the function below. The function should return an instance of the `Convolution2DReparameterization` layer, according to the following specification:
#
# * The function takes the `input_shape` and `divergence_fn` as arguments
# * The layer should set the input shape in its constructor using the `input_shape` argument
# * This layer should have 8 filters, a kernel size of `(5, 5)`, a ReLU activation, and `"VALID"` padding
# * The prior for both the kernel and bias should be the standard `default_multivariate_normal_fn`, as seen in the coding tutorial
# * The posterior for each parameter in both the kernel and bias should be an independent normal distribution with trainable mean and variance (_hint: use the_ `default_mean_field_normal_fn`_)_
# * The divergence function should be set using the `divergence_fn` argument for both the kernel and the bias
#
# _HINT: Review the arguments you used in the coding tutorial on Reparameterization layers._
# + id="lhcswiqxoFW6" executionInfo={"status": "ok", "timestamp": 1644643852749, "user_tz": 300, "elapsed": 189, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function name or arguments.
def get_convolutional_reparameterization_layer(input_shape, divergence_fn):
"""
This function should create an instance of a Convolution2DReparameterization
layer according to the above specification.
The function takes the input_shape and divergence_fn as arguments, which should
be used to define the layer.
Your function should then return the layer instance.
"""
#divergence_fn = lambda q,p,_:tfd.kl_divergence(q,p)/x_train.shape[0]
return tfpl.Convolution2DReparameterization(
input_shape=input_shape,filters=8,kernel_size=(5,5),activation='relu',padding='VALID',
kernel_prior_fn=tfpl.default_multivariate_normal_fn,
kernel_posterior_fn=tfpl.default_mean_field_normal_fn(is_singular=False),
kernel_divergence_fn=divergence_fn,
bias_prior_fn=tfpl.default_multivariate_normal_fn,
bias_posterior_fn=tfpl.default_mean_field_normal_fn(is_singular=False),
bias_divergence_fn=divergence_fn,
)
# + [markdown] id="6Qegf6tkoFW8"
# You'll use this function to create your model a little bit later on.
# + [markdown] id="fNVmkwyToFW8"
# #### Custom prior
#
# For the parameters of the `DenseVariational` layer, we will use a custom prior: the "spike and slab" (also called a *scale mixture prior*) distribution. This distribution has a density that is the weighted sum of two normally distributed ones: one with a standard deviation of 1 and one with a standard deviation of 10. In this way, it has a sharp spike around 0 (from the normal distribution with standard deviation 1), but is also more spread out towards far away values (from the contribution from the normal distribution with standard deviation 10). The reason for using such a prior is that it is like a standard unit normal, but makes values far away from 0 more likely, allowing the model to explore a larger weight space. Run the code below to create a "spike and slab" distribution and plot its probability density function, compared with a standard unit normal.
# + id="i26mlCWooFW9" executionInfo={"status": "ok", "timestamp": 1644643857900, "user_tz": 300, "elapsed": 257, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Function to define the spike and slab distribution
def spike_and_slab(event_shape, dtype):
distribution = tfd.Mixture(
cat=tfd.Categorical(probs=[0.5, 0.5]),
components=[
tfd.Independent(tfd.Normal(
loc=tf.zeros(event_shape, dtype=dtype),
scale=1.0*tf.ones(event_shape, dtype=dtype)),
reinterpreted_batch_ndims=1),
tfd.Independent(tfd.Normal(
loc=tf.zeros(event_shape, dtype=dtype),
scale=10.0*tf.ones(event_shape, dtype=dtype)),
reinterpreted_batch_ndims=1)],
name='spike_and_slab')
return distribution
# + id="WFDxViE4oFW_" colab={"base_uri": "https://localhost:8080/", "height": 279} executionInfo={"status": "ok", "timestamp": 1644643860942, "user_tz": 300, "elapsed": 482, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="76cf30b5-bddd-48f0-c374-b24d651d27b1"
# Plot the spike and slab distribution pdf
x_plot = np.linspace(-5, 5, 1000)[:, np.newaxis]
plt.plot(x_plot, tfd.Normal(loc=0, scale=1).prob(x_plot).numpy(), label='unit normal', linestyle='--')
plt.plot(x_plot, spike_and_slab(1, dtype=tf.float32).prob(x_plot).numpy(), label='spike and slab')
plt.xlabel('x')
plt.ylabel('Density')
plt.legend()
plt.show()
# + [markdown] id="eqlCcUdLoFXB"
# You should now complete the function below to create the prior distribution for the `DenseVariational` layer, using the spike and slab distribution above.
#
# * The function has the required signature for the `make_prior_fn` argument of the `DenseVariational` layer
# * The prior will have no trainable parameters
# * It should use the spike and slab distribution for both the kernel and the bias, setting the `dtype` according to the function argument
# * The distribution should have the correct event shape, according to the `kernel_size` and `bias_size` arguments
# * The function should return a callable, that returns the spike and slab distribution
#
# _Hints:_
# * _Refer to the lecture video and/or coding tutorial to review the_ `DenseVariational` _layer arguments_
# * _Use the_ `Sequential` _API with a_ `DistributionLambda` _layer to create the callable that is returned by the function_
# + id="zByNMRX-oFXB" executionInfo={"status": "ok", "timestamp": 1644643987388, "user_tz": 300, "elapsed": 192, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function name or arguments.
def get_prior(kernel_size, bias_size, dtype=None):
"""
This function should create the prior distribution, consisting of the
"spike and slab" distribution that is described above.
The distribution should be created using the kernel_size, bias_size and dtype
function arguments above.
The function should then return a callable, that returns the prior distribution.
"""
n=kernel_size + bias_size
prior_model = Sequential([
tfpl.DistributionLambda(
lambda t: spike_and_slab(n, dtype)
)
])
return prior_model
# + [markdown] id="FPQ1S25woFXD"
# You'll use this function when you create the `DenseVariational` layer later on.
# + [markdown] id="rCy-J9eDoFXE"
# You should now complete the function below to create the variational posterior distribution for the `DenseVariational` layer. This distribution will be an independent Gaussian with trainable mean and standard deviation for each parameter in the layer.
#
# * The function has the required signature for the `make_posterior_fn` argument of the `DenseVariational` layer
# * The posterior will have 2 trainable variables for each layer parameter, one for the mean and one for the standard deviation
# * The distribution should have the correct event shape, according to the `kernel_size` and `bias_size` arguments
# * The function should return a callable, that returns the trainable independent Gaussian distribution
#
# _Hints:_
# * _Refer to the lecture video and/or coding tutorial to review the_ `DenseVariational` _layer arguments_
# * _Use the_ `Sequential` _API with a_ `VariableLayer` and an `IndependentNormal` _layer to create the callable that is returned by the function_
# + id="zspebbazoFXE" executionInfo={"status": "ok", "timestamp": 1644644120004, "user_tz": 300, "elapsed": 201, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function name or arguments.
def get_posterior(kernel_size, bias_size, dtype=None):
"""
This function should create the posterior distribution as specified above.
The distribution should be created using the kernel_size, bias_size and dtype
function arguments above.
The function should then return a callable, that returns the posterior distribution.
"""
n=kernel_size+bias_size
posterior_model=Sequential([
tfpl.VariableLayer(tfpl.IndependentNormal.params_size(n),dtype=dtype),
tfpl.IndependentNormal(event_shape=n)
])
return posterior_model
# + [markdown] id="_qklM5W5oFXG"
# You should now use your `prior` and `posterior` functions to complete the function below to create the `DenseVariational` layer.
#
# * The function has `prior_fn`, `posterior_fn` and `kl_weight` arguments, to be used in the constructor of the `DenseVariational` layer
# * The layer should have the correct number of units in order to parameterize a `OneHotCategorical` layer with 10 categories
# * The `make_prior_fn`, `make_posterior_fn` and `kl_weight` arguments should be set with the corresponding function arguments
# * An exact KL-divergence is unavailable for this choice of prior and posterior, so the layer should not attempt to use an analytical expression for this
# * Your function should then return an instance of the `DenseVariational` layer
# + id="VKVzcCPdoFXG" executionInfo={"status": "ok", "timestamp": 1644644317798, "user_tz": 300, "elapsed": 198, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
#### GRADED CELL ####
# Complete the following functions.
# Make sure to not change the function name or arguments.
def get_dense_variational_layer(prior_fn, posterior_fn, kl_weight):
"""
This function should create an instance of a DenseVariational layer according
to the above specification.
The function takes the prior_fn, posterior_fn and kl_weight as arguments, which should
be used to define the layer.
Your function should then return the layer instance.
"""
return tfpl.DenseVariational( units=10,
make_prior_fn=prior_fn,
make_posterior_fn=posterior_fn,
kl_weight=kl_weight,
kl_use_exact=False)
# + [markdown] id="NhsF2-0boFXI"
# Now, you're ready to use the functions you defined to create the convolutional reparameterization and dense variational layers, and use them in your Bayesian convolutional neural network model.
# + id="vazOUoF8oFXI" executionInfo={"status": "ok", "timestamp": 1644644321285, "user_tz": 300, "elapsed": 225, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}}
# Create the layers
tf.random.set_seed(0)
divergence_fn = lambda q, p, _ : tfd.kl_divergence(q, p) / x_train.shape[0]
convolutional_reparameterization_layer = get_convolutional_reparameterization_layer(
input_shape=(28, 28, 1), divergence_fn=divergence_fn
)
dense_variational_layer = get_dense_variational_layer(
get_prior, get_posterior, kl_weight=1/x_train.shape[0]
)
# + id="PnvIoaxyoFXL" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644644323988, "user_tz": 300, "elapsed": 487, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="b5a0b80b-161a-4011-8544-d8d6d95fecdf"
# Build and compile the Bayesian CNN model
bayesian_model = Sequential([
convolutional_reparameterization_layer,
MaxPooling2D(pool_size=(6, 6)),
Flatten(),
dense_variational_layer,
tfpl.OneHotCategorical(10, convert_to_tensor_fn=tfd.Distribution.mode)
])
bayesian_model.compile(loss=nll,
optimizer=RMSprop(),
metrics=['accuracy'],
experimental_run_tf_function=False)
# + id="VqH-uyc4oFXN" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644644327896, "user_tz": 300, "elapsed": 204, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="35308119-fa71-40a5-edcd-c599215b2edb"
# Print the model summary
bayesian_model.summary()
# + id="U_Xy3mCsoFXP" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644644590164, "user_tz": 300, "elapsed": 250339, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="5b923fa0-eed7-4c2e-b920-a00aee57be6b"
# Train the model
bayesian_model.fit(x=x_train, y=y_train_oh, epochs=10, verbose=True)
# + id="cy6zxkbBoFXR" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1644644594737, "user_tz": 300, "elapsed": 4582, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="18576ad0-0ca3-40d1-aae9-e03cf3f721ed"
# Evaluate the model
print('Accuracy on MNIST test set: ',
str(bayesian_model.evaluate(x_test, y_test_oh, verbose=False)[1]))
print('Accuracy on corrupted MNIST test set: ',
str(bayesian_model.evaluate(x_c_test, y_c_test_oh, verbose=False)[1]))
# + [markdown] id="e1DORpGooFXT"
# #### Analyse the model predictions
#
# Now that the model has trained, run the code below to create the same plots as before, starting with an analysis of the predicted probabilities for the same images.
#
# This model now has weight uncertainty, so running the forward pass multiple times will not generate the same estimated probabilities. For this reason, the estimated probabilities do not have single values. The plots are adjusted to show a 95% prediction interval for the model's estimated probabilities.
# + id="qtBvyfPMoFXU" colab={"base_uri": "https://localhost:8080/", "height": 329} executionInfo={"status": "ok", "timestamp": 1644644604403, "user_tz": 300, "elapsed": 9671, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="ff948319-001b-4c52-bfdf-28dcec08ff2d"
# Prediction examples on MNIST
for i in [0, 1577]:
analyse_model_prediction(x_test, y_test, bayesian_model, i, run_ensemble=True)
# + [markdown] id="FflDcYZgoFXW"
# For the first image, the model assigns a probability of almost one for the 6 label. Furthermore, it is confident in this probability: this probability remains close to one for every sample from the posterior weight distribution (as seen by the horizontal green line having very small height, indicating a narrow prediction interval). This means that the epistemic uncertainty on this probability is very low.
#
# For the second image, the epistemic uncertainty on the probabilities is much larger, which indicates that the estimated probabilities may be unreliable. In this way, the model indicates whether estimates may be inaccurate.
# + id="_rApHmbNoFXW" colab={"base_uri": "https://localhost:8080/", "height": 329} executionInfo={"status": "ok", "timestamp": 1644644614019, "user_tz": 300, "elapsed": 9620, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="04c16303-aeec-44f0-b7ba-77500b673812"
# Prediction examples on MNIST-C
for i in [0, 3710]:
analyse_model_prediction(x_c_test, y_c_test, bayesian_model, i, run_ensemble=True)
# + [markdown] id="K0G-_2tyoFXZ"
# Even with the spatters, the Bayesian model is confident in predicting the correct label for the first image above. The model struggles with the second image, which is reflected in the range of probabilities output by the network.
# + id="RjQcz_lroFXa" colab={"base_uri": "https://localhost:8080/", "height": 329} executionInfo={"status": "ok", "timestamp": 1644644623435, "user_tz": 300, "elapsed": 9426, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="0fe19c9a-7603-443e-a850-bfe16b52030b"
# Prediction examples from both datasets
for i in [9241]:
analyse_model_prediction(x_test, y_test, bayesian_model, i, run_ensemble=True)
analyse_model_prediction(x_c_test, y_c_test, bayesian_model, i, run_ensemble=True)
# + [markdown] id="tLp9mTcpoFXc"
# Similar to before, the model struggles with the second number, as it is mostly covered up by the spatters. However, this time is clear to see the epistemic uncertainty in the model.
# + [markdown] id="mH6wmfNAoFXc"
# #### Uncertainty quantification using entropy
#
# We also again plot the distribution of distribution entropy across the different test sets below. In these plots, no consideration has been made for the epistemic uncertainty, and the conclusions are broadly similar to those for the previous model.
# + id="jbz3LvrhoFXc" colab={"base_uri": "https://localhost:8080/", "height": 313} executionInfo={"status": "ok", "timestamp": 1644644628057, "user_tz": 300, "elapsed": 4641, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="f46fd813-9c2c-4108-93ac-4e4d016ca203"
# Entropy plots for the MNIST dataset
print('MNIST test set:')
plot_entropy_distribution(bayesian_model, x_test, y_test)
# + id="AhswTB_boFXf" colab={"base_uri": "https://localhost:8080/", "height": 313} executionInfo={"status": "ok", "timestamp": 1644644630794, "user_tz": 300, "elapsed": 2773, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gj4upAirXBdUG7CfOYDUxKNavh6gFLZguho2tWC7w=s64", "userId": "07633353867867375638"}} outputId="dfe35d68-793d-47f1-b5f0-c6d47f68d6af"
# Entropy plots for the MNIST-C dataset
print('Corrupted MNIST test set:')
plot_entropy_distribution(bayesian_model, x_c_test, y_c_test)
# + [markdown] id="9jIdp-sKoFXh"
# Congratulations on completing this programming assignment! In the next week of the course we will look at the bijectors module and normalising flows.
| Week2/Week 2 Programming Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from matplotlib import pyplot as plt
import numpy as np
% matplotlib inline
x0 = 0.0
x0p = 0.0
m = 10.0
c = 5.0
k = 1000.0
wn = np.sqrt(k/m)
xi = c/(2*m*wn)
wd = wn*np.sqrt(1-xi**2)
F0 = 100.
def unit_impulse(t):
return np.exp(-xi*wn*t)*(1./(m*wd))*np.sin(wd*t)
def force(t):
t1 = 2.0
t2 = 4.0
if t < t1:
return F0*(t/t1)
elif (t >= t1)&(t < t2):
return F0*(t2-t)/t1
else:
return 0.
NP = 1000
tf = 20.
t = np.linspace(0,tf,NP, endpoint = True)
x = np.zeros(NP)
h = t[1]-t[0]
for i in range(1,NP):
for j in range(1,i+1):
x[i] = x[i] + (force(t[j-1])*unit_impulse(t[i]-t[j-1]) + force(t[j])*unit_impulse(t[i]-t[j]))*h/2
x = x + np.exp(-xi*wn*t)*(x0*np.cos(wd*t) + (x0p + xi*wn*x0)*np.sin(wd*t)/wd)
plt.rcParams['figure.figsize'] = 15, 5
plt.plot(t,x)
plt.ylabel(r'Time evolution of $x(t)$ (m)')
plt.xlabel(r'Time (s)')
plt.grid(True)
# -
| Duhamel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > **DO YOU USE GITHUB?**
# If True: print('Remember to make your edits in a personal copy of this notebook')
# Else: print('You don't have to understand. Continue your life.')
# # Module 6: Web Scraping 1
#
# In this module you will be introduced to `web scraping`:
# - What it web scraping?
# - How to web scrape?
# - Why is web scrpaing important to master as a data scientist?
#
# Readings for `session 6+7+8`:
# - [Python for Data Analysis, chapter 6](https://bedford-computing.co.uk/learning/wp-content/uploads/2015/10/Python-for-Data-Analysis.pdf)
# - [A Practical Introduction to Web Scraping in Python](https://realpython.com/python-web-scraping-practical-introduction/)
# - [An introduction to web scraping with Python](https://towardsdatascience.com/an-introduction-to-web-scraping-with-python-a2601e8619e5)
# - [Introduction to Web Scraping using Selenium](https://medium.com/the-andela-way/introduction-to-web-scraping-using-selenium-7ec377a8cf72)
#
# Video materiale from `ISDS 2020`:
# - [Web Scraping 1](https://bit.ly/ISDS2021_6)
# - [Web Scraping 2](https://bit.ly/ISDS2021_7)
# - [Web Scraping 3](https://bit.ly/ISDS2021_8)
#
# Other ressources:
# - [Nicklas Webpage](https://nicklasjohansen.netlify.app/)
# - [Data Driven Organizational Analysis, Fall 2021](https://efteruddannelse.kurser.ku.dk/course/2021-2022/ASTK18379U)
# - [Master of Science (MSc) in Social Data Science](https://www.socialdatascience.dk/education)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Ethical Considerations
# * If a regular user can’t access it, we shouldn’t try to get it [That is considered hacking](https://www.dr.dk/nyheder/penge/gjorde-opmaerksom-paa-cpr-hul-nu-bliver-han-politianmeldt-hacking).
# * Don't hit it to fast: Essentially a DENIAL OF SERVICE attack (DOS). [Again considered hacking](https://www.dr.dk/nyheder/indland/folketingets-hjemmeside-ramt-af-hacker-angreb).
# * Add headers stating your name and email with your requests to ensure transparency.
# * Be careful with copyrighted material.
# * Fair use (take only the stuff you need)
# * If monetizing on the data, be careful not to be in direct competition with whom you are taking the data from.
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src="https://github.com/snorreralund/images/raw/master/Sk%C3%A6rmbillede%202017-08-03%2014.46.32.png"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ## The Web Scraping Recipe
#
# To scrape information from the web is:
# 1. **MAPPING**: Finding URLs of the pages containing the information you want.
# 2. **DOWNLOAD**: Fetching the pages via HTTP.
# 3. **PARSE**: Extracting the information from HTML.
#
#
# You could also add `connection`, `storing`, `logging`, etc.
#
#
#
# ### Packages used
# Today we will mainly build on the python skills you have gotten so far, and tomorrow we will look into more specialized packages.
#
# * for connecting to the internet we use: **requests**
# * for parsing: **beautifulsoup** and **regex**
# * for automatic browsing / screen scraping: **selenium**
# * for mitigating errors we use: **time**
#
# We will write our scrapers with basic python, for larger projects consider looking into the packages **scrapy**
# + slideshow={"slide_type": "subslide"}
# check that you can import these lbraries
# otherwise you they can easily be installed using pip
# example: https://pypi.org/project/beautifulsoup4/
import requests
from bs4 import BeautifulSoup
import re
import selenium
import time
import pandas as pd
# + [markdown] slideshow={"slide_type": "slide"}
# ## Connecting to the Internet
#
#
# **Connecting to the internet** **HTTP**
#
# *URL* : the adressline in our browser.
#
# Via HTTP we send a **get** request to an *address* with *instructions* ( - or rather our dns service provider redirects our request to the right address)
# *Address / Domain*: www.google.com
#
# *Instructions*: /search?q=who+is+mister+miyagi
#
# *Header*: information send along with the request, including user agent (operating system, browser), cookies, and prefered encoding.
#
# *HTML*: HyperTextMarkupLanguage the language of displaying web content. More on this tomorrow.
#
# + slideshow={"slide_type": "fragment"}
import requests
response = requests.get('https://www.google.com')
#response.text
# + slideshow={"slide_type": "fragment"}
import requests
response = requests.get('https://isdsucph.github.io/isds2021/')
#response.text
# + [markdown] slideshow={"slide_type": "slide"}
# ## Static Webpage Example
#
# Visit the following website (https://www.basketball-reference.com/leagues/NBA_2018.html).
#
# The page displays tables of data that we want to collect.
# Tomorrow you will see how to parse such a table, but for now I want to show you a neat function that has already implemented this.
# + slideshow={"slide_type": "subslide"}
url = 'https://www.basketball-reference.com/leagues/NBA_2018.html' # link to the website
dfs = pd.read_html(url) # parses all tables found on the page.
dfs[0]
# -
# If we did not have a neat function we would have to navigate the website to point at the data we wanted to collect. Below I show how to find the headline of the table. This is something you will learn about in session 7.
url = 'https://www.basketball-reference.com/leagues/NBA_2018.html'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
soup.find_all('h2')[0].text
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Navigating websites to collect links
# Now I will show you a few common ways of finding the links to the pages you want to scrape.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Building URLS using a recognizable pattern.
# A nice trick is to understand how urls are constructed to communicate with a server.
#
# Lets look at how [jobindex.dk](https://www.jobindex.dk/) does it. We simply click around and take note at how the addressline changes.
#
# This will allow us to navigate the page, without having to parse information from the html or click any buttons.
#
# * / is like folders on your computer.
# * ? entails the start of a query with parameters
# * = defines a variable: e.g. page=1000 or offset = 100 or showNumber=20
# * & separates different parameters.
# * \+ is html for whitespace
# -
# Mapping exercise
url = 'https://www.jobindex.dk/jobsoegning/storkoebenhavn?page=2&q=python'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
soup
jobs = int(soup.find('span',attrs={'class':'d-md-none'}).text[0:3])
jobs
# 20 jobs per page
for i in range(round(jobs/20)+1):
print(i)
for i in range(round(jobs/20)+1):
print('https://www.jobindex.dk/jobsoegning/storkoebenhavn?page=' + str(i) +'&q=python')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Good practices
# * Transparency: send your email and name in the header so webmasters will know you are not a malicious actor.
# * Ratelimiting: Make sure you don't hit their servers to hard.
# * Reliability:
# * Make sure the scraper can handle exceptions (e.g. bad connection) without crashing.
# * Keep a log.
# * Store raw data.
#
# -
# ## Logging
# Even if logging is not important for the below exercises, get in the habit of using this class for connecting to the internet, to practice logging your activity.
#
# You should run `pip install scraping_class` to install the module to be used.
import scraping_class
logfile = 'log.csv'## name your log file.
connector = scraping_class.Connector(logfile)
# # Exercise Set 6: Web Scraping 1
#
# In this Exercise Set we shall practice our webscraping skills utiilizing only basic python.
# We shall cover variations between static and dynamic pages and build.
# ## Exercise Section 6.1: Scraping Jobnet.dk
#
# This exercise you get to practice locating the request that the JavaScript sends to get the job data that it builds the joblistings from. You should use the **>Network Monitor<** tool in your browser. I recomend using Chrome.
#
# Furthermore you practice spotting how the pagination is done, without clicking on the next page button, but instead changing a small parameter in the URL.
# > **Ex. 6.1.1:** Go to www.jobnet.dk and investigate the page. Start your `mapping`. Figure our what url you need to scrape to collect jobposting data. Sometimes this can be hard and requires you to inspect the page.
#
# > **Ex. 6.1.2.:** Use the `request` module to collect the first 20 results and unpack the relevant `json` data into a `pandas` DataFrame.
#
# > **Ex. 6.1.3.:** How many results do you find in total? Store this number as 'n_listings' for later use.
# > **Ex. 6.1.4:** This exercise is about paging the results. We need to understand the websites pagination scheme.
#
# > Now scroll down the webpage and press the next page button. See how the parameters of the url changes as you turn the pages.
#
# > **Ex. 6.1.5:** Design a`for` loop using the `range` function that changes this paging parameter in the URL. Use 'n_listings' from before to define the limits of the range function. Store these urls in a container.
# > **Ex.6.1.6:** Pick 20 random links using the `random.sample()` function and scrape their content. Use the `time.sleep()` function to limit the rate of your calls. Load all the results into a DataFrame. ***extra***: monitor the time left to completing the loop by using `tqdm.tqdm()` function.
#
# > **Ex.6.1.7:** <NAME>, a resaercher at SODAS, has build a connector class. Repeat 6.1.6 but try to to use Snorres connector to log your activity. You can either download it from [pip](https://pypi.org/project/scraping-class/) using `pip install scraping-class` or you can use the posted code below.
# +
### <NAME>und Connector class
import requests,os,time
def ratelimit(dt):
"A function that handles the rate of your calls."
time.sleep(dt) # sleep one second.
class Connector():
def __init__(self,logfile,overwrite_log=False,connector_type='requests',session=False,path2selenium='',n_tries = 5,timeout=30,waiting_time=0.5):
"""This Class implements a method for reliable connection to the internet and monitoring.
It handles simple errors due to connection problems, and logs a range of information for basic quality assessments
Keyword arguments:
logfile -- path to the logfile
overwrite_log -- bool, defining if logfile should be cleared (rarely the case).
connector_type -- use the 'requests' module or the 'selenium'. Will have different since the selenium webdriver does not have a similar response object when using the get method, and monitoring the behavior cannot be automated in the same way.
session -- requests.session object. For defining custom headers and proxies.
path2selenium -- str, sets the path to the geckodriver needed when using selenium.
n_tries -- int, defines the number of retries the *get* method will try to avoid random connection errors.
timeout -- int, seconds the get request will wait for the server to respond, again to avoid connection errors.
"""
## Initialization function defining parameters.
self.n_tries = n_tries # For avoiding triviel error e.g. connection errors, this defines how many times it will retry.
self.timeout = timeout # Defining the maximum time to wait for a server to response.
self.waiting_time = waiting_time # define simple rate_limit parameter.
## not implemented here, if you use selenium.
if connector_type=='selenium':
assert path2selenium!='', "You need to specify the path to you geckodriver if you want to use Selenium"
from selenium import webdriver
## HIN download the latest geckodriver here: https://github.com/mozilla/geckodriver/releases
assert os.path.isfile(path2selenium),'You need to insert a valid path2selenium the path to your geckodriver. You can download the latest geckodriver here: https://github.com/mozilla/geckodriver/releases'
self.browser = webdriver.Firefox(executable_path=path2selenium) # start the browser with a path to the geckodriver.
self.connector_type = connector_type # set the connector_type
if session: # set the custom session
self.session = session
else:
self.session = requests.session()
self.logfilename = logfile # set the logfile path
## define header for the logfile
header = ['id','project','connector_type','t', 'delta_t', 'url', 'redirect_url','response_size', 'response_code','success','error']
if os.path.isfile(logfile):
if overwrite_log==True:
self.log = open(logfile,'w')
self.log.write(';'.join(header))
else:
self.log = open(logfile,'a')
else:
self.log = open(logfile,'w')
self.log.write(';'.join(header))
## load log
with open(logfile,'r') as f: # open file
l = f.read().split('\n') # read and split file by newlines.
## set id
if len(l)<=1:
self.id = 0
else:
self.id = int(l[-1][0])+1
def get(self,url,project_name):
"""Method for connector reliably to the internet, with multiple tries and simple error handling, as well as default logging function.
Input url and the project name for the log (i.e. is it part of mapping the domain, or is it the part of the final stage in the data collection).
Keyword arguments:
url -- str, url
project_name -- str, Name used for analyzing the log. Use case could be the 'Mapping of domain','Meta_data_collection','main data collection'.
"""
project_name = project_name.replace(';','-') # make sure the default csv seperator is not in the project_name.
if self.connector_type=='requests': # Determine connector method.
for _ in range(self.n_tries): # for loop defining number of retries with the requests method.
ratelimit(self.waiting_time)
t = time.time()
try: # error handling
response = self.session.get(url,timeout = self.timeout) # make get call
err = '' # define python error variable as empty assumming success.
success = True # define success variable
redirect_url = response.url # log current url, after potential redirects
dt = t - time.time() # define delta-time waiting for the server and downloading content.
size = len(response.text) # define variable for size of html content of the response.
response_code = response.status_code # log status code.
## log...
call_id = self.id # get current unique identifier for the call
self.id+=1 # increment call id
#['id','project_name','connector_type','t', 'delta_t', 'url', 'redirect_url','response_size', 'response_code','success','error']
row = [call_id,project_name,self.connector_type,t,dt,url,redirect_url,size,response_code,success,err] # define row to be written in the log.
self.log.write('\n'+';'.join(map(str,row))) # write log.
self.log.flush()
return response,call_id # return response and unique identifier.
except Exception as e: # define error condition
err = str(e) # python error
response_code = '' # blank response code
success = False # call success = False
size = 0 # content is empty.
redirect_url = '' # redirect url empty
dt = t - time.time() # define delta t
## log...
call_id = self.id # define unique identifier
self.id+=1 # increment call_id
row = [call_id,project_name,self.connector_type,t,dt,url,redirect_url,size,response_code,success,err] # define row
self.log.write('\n'+';'.join(map(str,row))) # write row to log.
self.log.flush()
else:
t = time.time()
ratelimit(self.waiting_time)
self.browser.get(url) # use selenium get method
## log
call_id = self.id # define unique identifier for the call.
self.id+=1 # increment the call_id
err = '' # blank error message
success = '' # success blank
redirect_url = self.browser.current_url # redirect url.
dt = t - time.time() # get time for get method ... NOTE: not necessarily the complete load time.
size = len(self.browser.page_source) # get size of content ... NOTE: not necessarily correct, since selenium works in the background, and could still be loading.
response_code = '' # empty response code.
row = [call_id,project_name,self.connector_type,t,dt,url,redirect_url,size,response_code,success,err] # define row
self.log.write('\n'+';'.join(map(str,row))) # write row to log file.
self.log.flush()
# Using selenium it will not return a response object, instead you should call the browser object of the connector.
## connector.browser.page_source will give you the html.
return None,call_id
# -
# ## Exercise Section 6.2: Scraping Trustpilot.com
# Now for a slightly more elaborate, yet still simple scraping problem. Here we want to scrape trustpilot for user reviews. This data is very nice since it provides free labeled data (rating) to train a machine learning model to understand positive and negative sentiment.
#
# Here you will practice crawling a website collecting the links to each company review page, and finally locate another behind the scenes JavaScript request that gets the review data in a neat json format.
# > **Ex. 6.2.1:** Visit the https://www.trustpilot.com/ website and locate the categories page.
# From this page you find links to company listings.
#
# > **Ex. 6.2.2:**
# Get the category page using the `requests` module and extract each link to a specific category page from the HTML. This can be done using the basic python `.split()` string method. Make sure only links within the ***/categories/*** section are kept, checking each string using the ```if 'pattern' in string``` condition.
#
# *(Hint: The links are relative. You need to add the domain name)*
#
# > **Ex. 6.2.3:** Get one of the category section links. Write a function to extract the links to the company review page from the HTML.
#
# > **Ex. 6.2.4:** Figure out how the pagination is done, by following how the url changes when pressing the **next page**-button to obtain more company listings. Write a function that builds links to paging all the company listing results of each category. This includes parsing the number of subpages of each category and changing the correct parameter in the url.
#
# (Hint: Find the maximum number of result pages, right before the next page button and make a loop change the page parameter of the url.)
#
# > **Ex. 6.2.5:** Loop through all categories and build the paging links using the above defined function.
#
# > **Ex. 6.2.6:** Randomly pick one of category listing links you have generated, and get the links to the companies listed using the other function defined.
#
# > **Ex. 6.2.7:** Visit one of these links and inspect the **>Network Monitor<** to locate the request that loads the review data. Use the requests module to retrieve this link and unpack the json results to a pandas DataFrame.
#
| teaching_material/module_6/module_6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append("./../..")
# +
# %reload_ext yellowbrick
# %matplotlib inline
# Imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.metrics import precision_recall_curve
from yellowbrick.style.palettes import get_color_cycle, PALETTES
from yellowbrick.style.colors import resolve_colors
from yellowbrick.base import ModelVisualizer
from yellowbrick.classifier import ThresholdVisualizer, thresholdviz
# +
# Retrieve Data Set
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/spambase/spambase.data', header=None)
df.rename(columns={57:'is_spam'}, inplace=True)
# Build the classifier and get the predictions
model = BernoulliNB(3)
X = df[[col for col in df.columns if col != 'is_spam']]
y = df['is_spam']
# -
viz = ThresholdVisualizer(model, n_trials=100, title="Spam vs Ham Thresholds", quantiles=(0.10, 0.5, .9))
viz.fit_show(X, y)
thresholdviz(model, X, y)
| examples/ndanielsen/ThresholdVisualizer Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Pembukaan
#
# Assalamualaikum warahmatullahi wabarakatuh. Mohon ijin pimpinan 🙏🏽 . Dengan ini saya sampaikan data mengenai persekolahan di Indonesia, wabil khusus perbandingan antara kondisi nasional dan Papua (Provinsi Papua dan Provinsi Papua Barat). Data diperoleh dari situs [Data Pokok Pendidikan Dasar dan Menengah](https://dapo.dikdasmen.kemdikbud.go.id/).
#
# Maksud dan tujuan dari ini adalah untuk belajar apa yang sekiranya terjadi di Papua (meskipun hanya secuil), setidaknya dari segi pendidikan. Kode, slide presentasi, dan notebook dapat ditengok di https://github.com/ledwindra/pendidikan-papua. Silakan gunakan sesuka hati, klik `Star ⭐️` jika suka dengan projek ini, atau lempar kritik maupun caciannya di `Issues ⚠️`.
#
# <img src="https://media.giphy.com/media/87gYYkSC09QetBBHge/giphy.gif" width="500" align="center">
# + slideshow={"slide_type": "skip"}
import matplotlib.pyplot as plt
import pandas as pd
import re
import seaborn as sns
from bs4 import BeautifulSoup
# + slideshow={"slide_type": "skip"}
df = pd.read_csv('./data/subdistrict.zip', header=None)
df.columns = [
'nama',
'sekolah_id',
'kode_wilayah_induk_kecamatan',
'induk_provinsi',
'kode_wilayah_induk_provinsi',
'bentuk_pendidikan',
'status_sekolah',
'sekolah_id_enkrip'
]
df['sekolah_id_enkrip'] = df.apply(lambda x: x['sekolah_id_enkrip'].replace(' ', ''), axis=1)
# + slideshow={"slide_type": "skip"}
df.shape[0] == df['sekolah_id_enkrip'].nunique()
# + slideshow={"slide_type": "skip"}
len(df[df['induk_provinsi'] == 'Prov. Papua']) + len(df[df['induk_provinsi'] == 'Prov. Papua Barat'])
# + slideshow={"slide_type": "skip"}
sch = pd.read_csv('./data/school.zip', header=None)
sch.columns = [
'rombel',
'guru_kelas',
'guru_matematika',
'guru_bahasa_indonesia',
'guru_bahasa_inggris',
'guru_sejarah_indonesia',
'guru_pkn',
'guru_penjaskes',
'guru_agama_budi_pekerti',
'guru_seni_budaya',
'ptk_laki',
'ptk_perempuan',
'pegawai_laki',
'pegawai_perempuan',
'pd_kelas_1_laki',
'pd_kelas_1_perempuan',
'pd_kelas_2_laki',
'pd_kelas_2_perempuan',
'pd_kelas_3_laki',
'pd_kelas_3_perempuan',
'pd_kelas_4_laki',
'pd_kelas_4_perempuan',
'pd_kelas_5_laki',
'pd_kelas_5_perempuan',
'pd_kelas_6_laki',
'pd_kelas_6_perempuan',
'pd_kelas_7_laki',
'pd_kelas_7_perempuan',
'pd_kelas_8_laki',
'pd_kelas_8_perempuan',
'pd_kelas_9_laki',
'pd_kelas_9_perempuan',
'pd_kelas_10_laki',
'pd_kelas_10_perempuan',
'pd_kelas_11_laki',
'pd_kelas_11_perempuan',
'pd_kelas_12_laki',
'pd_kelas_12_perempuan',
'pd_kelas_13_laki',
'pd_kelas_13_perempuan',
'jumlah_kirim',
'ptk',
'pegawai',
'pd',
'pd_laki',
'pd_perempuan',
'jml_rk',
'jml_lab',
'jml_perpus',
'identitas_valid',
'ptk_valid',
'pd_valid',
'prasarana_valid',
'total_valid',
'kecukupan_air',
'memproses_air',
'minum_siswa',
'siswa_bawa_air',
'toilet_siswa_kk',
'sumber_air_str',
'ketersediaan_air',
'tipe_jamban',
'jml_wastafel',
'a_sabun_air_mengalir',
'jml_jamban_digunakan',
'jml_jamban_tidak_digunakan',
'sekolah_id_enkrip'
]
# + slideshow={"slide_type": "skip"}
df.shape[0] == df['sekolah_id_enkrip'].nunique() == sch['sekolah_id_enkrip'].nunique()
# + slideshow={"slide_type": "skip"}
sch = sch.drop_duplicates()
df = pd.merge(df, sch, how='inner', on='sekolah_id_enkrip')
df.shape
# + slideshow={"slide_type": "skip"}
get_mean = lambda x: df.groupby('groups')[x].agg('mean').reset_index()
# + slideshow={"slide_type": "skip"}
def get_percentage(metrics):
papua = {
'groups': 'Papua',
f'{metrics}_pct': len(df[(df['groups'] == 1) & (df[metrics] == True)]) / len(df[df['groups'] == 1])
}
non_papua = {
'groups': 'Non-Papua',
f'{metrics}_pct': len(df[(df['groups'] == 2) & (df[metrics] == True)]) / len(df[df['groups'] == 2])
}
return pd.DataFrame([papua, non_papua])
# + slideshow={"slide_type": "skip"}
def get_bar(metrics, ycol, title):
ax = sns.catplot(
x=['Papua', 'Non-Papua'],
y=ycol,
data=metrics,
kind='bar'
)
ax.set(
xlabel='',
ylabel='',
title=title
)
# + slideshow={"slide_type": "skip"}
def get_bar_multiple(metrics, title):
data = list(set((df[metrics])))
data = [str(x) for x in data]
data = sorted(data)
data = dict(zip(data, data))
data = dict((re.sub(' |\/', '_', key).lower(), value) for (key, value) in data.items())
for key, value in data.items():
if key != 'nan':
df[key] = df.apply(lambda x: x[metrics] == value, axis=1)
data = df.groupby(['groups', key])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
get_bar(get_percentage(key), f'{key}_pct', f'{title}: {value}')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Perbandingan jumlah rata-rata nasional dan Papua
# Perlu dicatat bahwa dalam konteks ini Provinsi Papua dan Papua Barat dijadikan satu.
# + slideshow={"slide_type": "skip"}
df['is_papua'] = df.apply(lambda x: (x['induk_provinsi'] == 'Prov. Papua') | (x['induk_provinsi'] == 'Prov. Papua Barat'), axis=1)
pd.DataFrame(df.groupby('is_papua').size(), columns=['count']).reset_index()
# + slideshow={"slide_type": "slide"}
def recode_province(x):
if (x['induk_provinsi'] == 'Prov. Papua') | (x['induk_provinsi'] == 'Prov. Papua Barat'):
return 'Papua or West Papua'
else:
return 'Non-Papua'
# + slideshow={"slide_type": "slide"}
df['groups'] = df.apply(recode_province, axis=1)
groups_num = {'Papua or West Papua': 1, 'Non-Papua': 2}
df = df.replace({'groups': groups_num})
pd.DataFrame(df.groupby('groups').size(), columns=['count']).reset_index()
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru kelas
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_kelas')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru matematika
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_matematika')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru bahasa Indonesia
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_bahasa_indonesia')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru bahasa Inggris
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_bahasa_inggris')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru sejarah Indonesia
# + slideshow={"slide_type": "skip"}
# meaningless data from soure
get_mean('guru_sejarah_indonesia')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru PKN
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_pkn')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru penjaskes
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_penjaskes')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru agama budi pekerti
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_agama_budi_pekerti')
# + [markdown] slideshow={"slide_type": "skip"}
# ### Guru seni budaya
# + slideshow={"slide_type": "skip"}
# meaningless data from source
get_mean('guru_seni_budaya')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Pendidik dan tenaga kependidikan (PTK)
# + slideshow={"slide_type": "slide"}
get_mean('ptk')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('ptk'), 'ptk', 'Rata-rata jumlah PTK')
# + slideshow={"slide_type": "slide"}
get_mean('ptk_laki')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('ptk_laki'), 'ptk_laki', 'Rata-rata jumlah PTK laki-laki')
# + slideshow={"slide_type": "slide"}
get_mean('ptk_perempuan')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('ptk_perempuan'), 'ptk_perempuan', 'Rata-rata jumlah PTK perempuan')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Pegawai
# + slideshow={"slide_type": "slide"}
get_mean('pegawai')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('pegawai'), 'pegawai', 'Rata-rata jumlah pegawai')
# + slideshow={"slide_type": "slide"}
get_mean('pegawai_laki')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('pegawai_laki'), 'pegawai_laki', 'Rata-rata jumlah pegawai laki')
# + slideshow={"slide_type": "slide"}
get_mean('pegawai_perempuan')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('pegawai_perempuan'), 'pegawai_perempuan', 'Rata rata jumlah pegawai perempuan')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Peserta didik (PD)
# + slideshow={"slide_type": "slide"}
get_mean('pd')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('pd'), 'pd', 'Rata-rata jumlah PD')
# + slideshow={"slide_type": "slide"}
get_mean('pd_laki')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('pd_laki'), 'pd_laki', 'Rata-rata jumlah PD laki-laki')
# + slideshow={"slide_type": "slide"}
get_mean('pd_perempuan')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('pd_perempuan'), 'pd_perempuan', 'Rata-rata jumlah PD perempuan')
# + slideshow={"slide_type": "slide"}
for i in range(1, 13):
pd_laki = get_mean(f'pd_kelas_{i}_laki')
get_bar(pd_laki, f'pd_kelas_{i}_laki', f'Jumlah PD laki-laki kelas {i}')
# + slideshow={"slide_type": "slide"}
for i in range(1, 13):
pd_perempuan = get_mean(f'pd_kelas_{i}_perempuan')
get_bar(pd_perempuan, f'pd_kelas_{i}_perempuan', f'Rata-rata jumlah PD perempuan kelas {i}')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Ruang kelas
# + slideshow={"slide_type": "slide"}
get_mean('jml_rk')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('jml_rk'), 'jml_rk', 'Rata-rata ruang kelas')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Laboratorium
# + slideshow={"slide_type": "slide"}
get_mean('jml_lab')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('jml_lab'), 'jml_lab', 'Rata-rata jumlah laboratorium')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Perpustakaan
# + slideshow={"slide_type": "slide"}
get_mean('jml_perpus')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('jml_perpus'), 'jml_perpus', 'Rata-rata jumlah perpustakaan')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Kecukupan air
# + slideshow={"slide_type": "slide"}
set(df['kecukupan_air'])
# + slideshow={"slide_type": "skip"}
df['is_water_enough'] = df.apply(lambda x: x['kecukupan_air'] == 'Cukup', axis=1)
# + slideshow={"slide_type": "slide"}
is_water_enough = df.groupby(['groups', 'is_water_enough'])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
is_water_enough
# + slideshow={"slide_type": "slide"}
get_percentage('is_water_enough')
# + slideshow={"slide_type": "slide"}
get_bar(get_percentage('is_water_enough'), 'is_water_enough_pct', 'Persentase sekolah yang memiliki kecukupan air')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Memproses air
# Tidak jelas maksudnya apa. Mohon pencerahannya 🙏🏽
# + slideshow={"slide_type": "slide"}
set(df['memproses_air'])
# + slideshow={"slide_type": "skip"}
df['is_processing_water'] = df.apply(lambda x: x['memproses_air'] == 'Ya', axis=1)
# + slideshow={"slide_type": "slide"}
is_processing_water = df.groupby(['groups', 'is_processing_water'])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
is_processing_water
# + slideshow={"slide_type": "slide"}
get_percentage('is_processing_water')
# + slideshow={"slide_type": "slide"}
get_bar(get_percentage('is_processing_water'), 'is_processing_water_pct', 'Persentase sekolah yang memproses air')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Minum siswa
# + slideshow={"slide_type": "slide"}
set(df['minum_siswa'])
# + slideshow={"slide_type": "skip"}
df['provided_by_school'] = df.apply(lambda x: x['minum_siswa'] == 'Disediakan sekolah', axis=1)
# + slideshow={"slide_type": "slide"}
provided_by_school = df.groupby(['groups', 'provided_by_school'])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
provided_by_school
# + slideshow={"slide_type": "slide"}
get_percentage('provided_by_school')
# + slideshow={"slide_type": "slide"}
get_bar(get_percentage('provided_by_school'), 'provided_by_school_pct', 'Persentase sekolah yang menyediakan air minum kepada murid')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Siswa bawa air
# Maksudnya murid bawa air minum kali ya? Mohon pencerahannya. 🙏🏽
# + slideshow={"slide_type": "slide"}
set(df['siswa_bawa_air'])
# + slideshow={"slide_type": "skip"}
df['students_bring_water'] = df.apply(lambda x: x['siswa_bawa_air'] == 'Ya', axis=1)
# + slideshow={"slide_type": "slide"}
students_bring_water = df.groupby(['groups', 'students_bring_water'])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
students_bring_water
# + slideshow={"slide_type": "slide"}
get_percentage('students_bring_water')
# + slideshow={"slide_type": "slide"}
get_bar(get_percentage('students_bring_water'), 'students_bring_water_pct', 'Persentase murid yang membawa air')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Toilet siswa berkebutuhan khusus
# + slideshow={"slide_type": "slide"}
get_mean('toilet_siswa_kk')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('toilet_siswa_kk'), 'toilet_siswa_kk', 'Rata-rata jumlah toilet bagi murid berkebutuhan khusus')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sumber air
# + slideshow={"slide_type": "slide"}
set(df['sumber_air_str'])
# + slideshow={"slide_type": "slide"}
get_bar_multiple('sumber_air_str', 'Persentase sumber air sekolah')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Ketersediaan air
# + slideshow={"slide_type": "slide"}
set(df['ketersediaan_air'])
# + slideshow={"slide_type": "skip"}
df['access_to_water'] = df.apply(lambda x: x['ketersediaan_air'] == 'Ya', axis=1)
# + slideshow={"slide_type": "slide"}
access_to_water = df.groupby(['groups', 'access_to_water'])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
access_to_water
# + slideshow={"slide_type": "slide"}
get_percentage('access_to_water')
# + slideshow={"slide_type": "slide"}
get_bar(get_percentage('access_to_water'), 'access_to_water_pct', 'Persentase sekolah yang memiliki ketersediaan air')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Tipe jamban
# + slideshow={"slide_type": "slide"}
set(df['tipe_jamban'])
# + slideshow={"slide_type": "slide"}
get_bar_multiple('tipe_jamban', 'Persentase tipe jamban sekolah')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Wastafel
# + slideshow={"slide_type": "slide"}
get_mean('jml_wastafel')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('jml_wastafel'), 'jml_wastafel', 'Rata-rata jumlah wastafel')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Sabun dan air
# + slideshow={"slide_type": "slide"}
set(df['a_sabun_air_mengalir'])
# + slideshow={"slide_type": "skip"}
df['water_and_soap'] = df.apply(lambda x: x['a_sabun_air_mengalir'] == 'Ya', axis=1)
# + slideshow={"slide_type": "slide"}
water_and_soap = df.groupby(['groups', 'water_and_soap'])['sekolah_id_enkrip'].agg('count').to_frame().reset_index()
water_and_soap
# + slideshow={"slide_type": "slide"}
get_percentage('water_and_soap')
# + slideshow={"slide_type": "slide"}
get_bar(get_percentage('water_and_soap'), 'water_and_soap_pct', 'Persentase sekolah yang memiliki ketersediaan air dan sabun')
# + [markdown] slideshow={"slide_type": "slide"}
# ### Jamban
# + slideshow={"slide_type": "slide"}
get_mean('jml_jamban_digunakan')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('jml_jamban_digunakan'), 'jml_jamban_digunakan', 'Rata-rata jumlah jamban yang digunakan')
# + slideshow={"slide_type": "slide"}
get_mean('jml_jamban_tidak_digunakan')
# + slideshow={"slide_type": "slide"}
get_bar(get_mean('jml_jamban_tidak_digunakan'), 'jml_jamban_tidak_digunakan', 'Rata-rata jumlah jamban yang tidak digunakan')
# + [markdown] slideshow={"slide_type": "slide"}
# # Kesimpulan
# Seperti yang sudah kita lihat, kondisi persekolahan di Papua dan Papua Barat jelas tertinggal jika dibandingkan dengan nasional dari beberapa aspek. Semoga dengan ini saya sebagai masyarakat kelas menengah ngehe dapat mengakui privilese hidup yang dikaruniai Tuhan YME. Demikian saya ucapkan wabillahi taufiq wal hidayah, wassalamualikum warahmatullahi wabarakatuh. 🙏🏽
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Installation
# :label:`chap_installation`
#
# In order to get you up and running for hands-on learning experience,
# we need to set you up with an environment for running Python,
# Jupyter notebooks, the relevant libraries,
# and the code needed to run the book itself.
#
# ## Installing Miniconda
#
# The simplest way to get going will be to install
# [Miniconda](https://conda.io/en/latest/miniconda.html). The Python 3.x version
# is required. You can skip the following steps if conda has already been installed.
# Download the corresponding Miniconda sh file from the website
# and then execute the installation from the command line
# using `sh <FILENAME> -b`. For macOS users:
#
# ```bash
# # The file name is subject to changes
# sh Miniconda3-latest-MacOSX-x86_64.sh -b
# ```
#
# For Linux users:
#
# ```bash
# # The file name is subject to changes
# sh Miniconda3-latest-Linux-x86_64.sh -b
# ```
#
# Next, initialize the shell so we can run `conda` directly.
#
# ```bash
# ~/miniconda3/bin/conda init
# ```
#
# Now close and re-open your current shell. You should be able to create a new
# environment as following:
#
# ```bash
# conda create --name d2l python=3.8 -y
# ```
#
# ## Downloading the D2L Notebooks
#
# Next, we need to download the code of this book. You can click the "All
# Notebooks" tab on the top of any HTML page to download and unzip the code.
# Alternatively, if you have `unzip` (otherwise run `sudo apt install unzip`) available:
#
# ```bash
# # # mkdir d2l-en && cd d2l-en
# curl https://d2l.ai/d2l-en.zip -o d2l-en.zip
# unzip d2l-en.zip && rm d2l-en.zip
# ```
#
# Now we will want to activate the `d2l` environment.
#
# ```bash
# conda activate d2l
# ```
#
# ## Installing the Framework and the `d2l` Package
#
# Before installing the deep learning framework, please first check
# whether or not you have proper GPUs on your machine
# (the GPUs that power the display on a standard laptop
# do not count for our purposes).
# If you are installing on a GPU server,
# proceed to :ref:`subsec_gpu` for instructions
# to install a GPU-supported version.
#
# Otherwise, you can install the CPU version as follows.
# That will be more than enough horsepower to get you
# through the first few chapters but you will want
# to access GPUs before running larger models.
#
# + [markdown] origin_pos=3 tab=["tensorflow"]
# You can install TensorFlow with both CPU and GPU support via the following:
#
# ```bash
# pip install tensorflow tensorflow-probability
# ```
#
# + [markdown] origin_pos=4
# We also install the `d2l` package that encapsulates frequently used
# functions and classes in this book.
#
# ```bash
# # -U: Upgrade all packages to the newest available version
# pip install -U d2l
# ```
#
# Once they are installed, we now open the Jupyter notebook by running:
#
# ```bash
# jupyter notebook
# ```
#
# At this point, you can open http://localhost:8888 (it usually opens automatically) in your Web browser. Then we can run the code for each section of the book.
# Please always execute `conda activate d2l` to activate the runtime environment
# before running the code of the book or updating the deep learning framework or the `d2l` package.
# To exit the environment, run `conda deactivate`.
#
#
# ## GPU Support
# :label:`subsec_gpu`
#
# + [markdown] origin_pos=6 tab=["tensorflow"]
# By default, the deep learning framework is installed with GPU support.
# If your computer has NVIDIA GPUs and has installed [CUDA](https://developer.nvidia.com/cuda-downloads),
# then you are all set.
#
# + [markdown] origin_pos=7
# ## Exercises
#
# 1. Download the code for the book and install the runtime environment.
#
# + [markdown] origin_pos=10 tab=["tensorflow"]
# [Discussions](https://discuss.d2l.ai/t/436)
#
| d2l-en/tensorflow/chapter_installation/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''misc'': conda)'
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
from glob import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
from scipy.interpolate import splprep, splev
import json
import cv2
# -
ROOT_DIR = '/scratche/users/sansiddh/DeepLesion/'
IMG_DIR = ROOT_DIR+'Images_png/'
images = glob(IMG_DIR+'*/*')
len(images)
len(glob(f'{IMG_DIR}/*'))
df_metadata = pd.read_csv(ROOT_DIR+'DL_info.csv')
df_metadata
df_metadata[df_metadata['File_name'] == '002136_04_01_027.png']
df_metadata.dtypes
# +
points2d = np.array(df_metadata.loc[0, 'Measurement_coordinates']).reshape((-1, 2)).T
tck, u = splprep(points2d)
unew = np.linspace(0, 1, 100)
basic_form = splev(unew, tck)
basic_form
# -
columns = ['Measurement_coordinates', 'Bounding_boxes', 'Lesion_diameters_Pixel_', 'Normalized_lesion_location',
'Slice_range', 'Spacing_mm_px_', 'Image_size', 'DICOM_windows']
for colname in columns:
print(colname)
df_metadata[colname] = df_metadata[colname].apply(lambda x : list(map(float, x.split(', '))))
df_temp = df_metadata.groupby(['Patient_index', 'Study_index', 'Series_ID']).count()
df_temp[(df_temp > 5)['File_name']].sort_values('File_name', ascending=False)
df_metadata.groupby('Coarse_lesion_type').count()
df_temp = df_metadata.groupby(['Patient_index', 'Study_index', 'Series_ID']).count()
df_temp = df_temp.reset_index()
# (Here each row in the dataframe denotes 1 CT scanning procedure, note that 1 study can have multiple procedures (multiple contrasts))
# And the count in each row is total number of IDENTIFIED lesions in that procedure
# Total number of CT scanning procedures (14601)
df_temp2 = df_temp.groupby(['Patient_index', 'Study_index']).count()
df_temp2 = df_temp2.sort_values('File_name', ascending=True)
# df_temp2 is the total number of CT scanning studies (10594).
df_temp2
df_temp2[(df_temp2 > 2)['File_name']].sort_values('File_name', ascending=False)
# There are a total of 574 studies with multiple CT scanning procedures
with open(ROOT_DIR+'text_mined_labels_171_and_split.json', 'r') as f:
labels_json = json.load(f)
labels_json.keys()
# +
np.sort(labels_json['val_lesion_idxs'])
full_array = []
full_array += labels_json['val_relevant_labels']
full_array += labels_json['test_relevant_labels']
full_array += labels_json['train_relevant_labels']
subset_array = []
for labels in full_array:
labels_list = [labels_json['term_list'][x] for x in labels]
if 'pancreas' in labels_list:
subset_array.append(labels_list)
print(labels_list)
# -
len(labels_json['val_relevant_labels'])
idx_arr = []
idx_arr.extend(labels_json['train_lesion_idxs'])
idx_arr.extend(labels_json['val_lesion_idxs'])
idx_arr.extend(labels_json['test_lesion_idxs'])
len(idx_arr)
len(labels_json['train_lesion_idxs'])
len(labels_json['train_relevant_labels'])
len(labels_json['train_irrelevant_labels'])
len(labels_json['train_uncertain_labels'])
# # Visualise the images
# +
img = cv2.imread(f'{IMG_DIR}/000001_01_01/109.png', cv2.IMREAD_UNCHANGED)
img = img.astype('int32')
img = img - 32768
print(np.unique(img))
print(img)
min_hu, max_hu = (-175, 275)
img[img > max_hu] = max_hu
img[img < min_hu] = min_hu
print(np.unique(img))
img = (img - min_hu)/(max_hu - min_hu)
img_pr = img
bboxes = df_metadata.loc[0, 'Bounding_boxes']
bboxes[0] += 5
bboxes[1] += 5
bboxes[2] -= 5
bboxes[3] -= 5
rect = patches.Rectangle((bboxes[0], bboxes[1]),
bboxes[2] - bboxes[0], bboxes[3] - bboxes[1],
linewidth=1, edgecolor='r', facecolor='none')
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(img, cmap='gray')
ax.add_patch(rect)
# -
img = plt.imread(f'{IMG_DIR}/000001_01_01/110.png')
img - img_pr
# +
img = plt.imread(f'{IMG_DIR}/000001_01_01/110.png')
bboxes = df_metadata.loc[0, 'Bounding_boxes']
rect = patches.Rectangle((bboxes[0], bboxes[1]),
bboxes[2] - bboxes[0], bboxes[3] - bboxes[1],
linewidth=1, edgecolor='r', facecolor='none')
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(img, cmap='gray')
ax.add_patch(rect)
# +
img = plt.imread(f'{IMG_DIR}/000001_02_01/014.png')
bboxes = df_metadata.loc[0, 'Bounding_boxes']
rect = patches.Rectangle((bboxes[0], bboxes[1]),
bboxes[2] - bboxes[0], bboxes[3] - bboxes[1],
linewidth=1, edgecolor='r', facecolor='none')
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(img, cmap='gray')
ax.add_patch(rect)
# -
| notebooks/[TRY] visualise-deeplesion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import scipy
import numpy
import matplotlib
import pandas
import sklearn
print('Python: {}'.format(sys.version))
print('scipy: {}'.format(scipy.__version__))
print('numpy: {}'.format(numpy.__version__))
print('matplotlib: {}'.format(matplotlib.__version__))
print('pandas: {}'.format(pandas.__version__))
print('sklearn: {}'.format(sklearn.__version__))
# +
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import cross_validate
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import model_selection
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import pandas as pd
# -
# Load Dataset
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data"
names = ['id', 'clump_thickness', 'uniform_cell_size', 'uniform_cell_shape',
'marginal_adhesion', 'single_epithelial_size', 'bare_nuclei',
'bland_chromatin', 'normal_nucleoli', 'mitoses', 'class']
df = pd.read_csv(url, names=names)
# +
# Preprocess the data
df.replace('?',-99999, inplace=True)
print(df.axes)
df.drop(['id'], 1, inplace=True)
# +
# Let explore the dataset and do a few visualizations
print(df.loc[10])
# Print the shape of the dataset
print(df.shape)
# -
# Describe the dataset
print(df.describe())
# Plot histograms for each variable
df.hist(figsize = (10, 10))
plt.show()
# Create scatter plot matrix
scatter_matrix(df, figsize = (18,18))
plt.show()
# +
# Create X and Y datasets for training
X = np.array(df.drop(['class'], 1))
y = np.array(df['class'])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# -
# Testing Options
seed = 8
scoring = 'accuracy'
# +
# Define models to train
models = []
models.append(('KNN', KNeighborsClassifier(n_neighbors = 5)))
models.append(('SVM', SVC(gamma='auto')))
# evaluate each model in turn
results = []
names = []
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state = seed)
cv_results = model_selection.cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# +
# Make predictions on validation dataset
for name, model in models:
model.fit(X_train, y_train)
predictions = model.predict(X_test)
print(name)
print(accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions))
# +
clf = SVC(gamma='auto')
clf.fit(X_train, y_train)
accuracy = clf.score(X_test, y_test)
print(accuracy)
example_measures = np.array([[4,2,1,1,1,2,3,2,1]])
example_measures = example_measures.reshape(len(example_measures), -1)
prediction = clf.predict(example_measures)
print(prediction)
# -
| BreastCancerDetection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import geopandas as gpd
from pyspark.sql import SparkSession
from geo_pyspark.register import GeoSparkRegistrator
from geo_pyspark.utils import GeoSparkKryoRegistrator, KryoSerializer
from geo_pyspark.data import csv_point_input_location, mixed_wkt_geometry_input_location,\
mixed_wkb_geometry_input_location, geojson_input_location
from geo_pyspark.data import data_path
# -
spark = SparkSession.builder.\
master("local[*]").\
appName("TestApp").\
config("spark.serializer", KryoSerializer.getName).\
config("spark.kryo.registrator", GeoSparkKryoRegistrator.getName) .\
getOrCreate()
GeoSparkRegistrator.registerAll(spark)
# ## Geometry Constructors
# ### ST_Point
# +
point_csv_df = spark.read.format("csv").\
option("delimiter", ",").\
option("header", "false").\
load(csv_point_input_location)
point_csv_df.createOrReplaceTempView("pointtable")
point_df = spark.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)), cast(pointtable._c1 as Decimal(24,20))) as arealandmark from pointtable")
point_df.show(5)
# -
# ### ST_GeomFromText
# +
polygon_wkt_df = spark.read.format("csv").\
option("delimiter", "\t").\
option("header", "false").\
load(mixed_wkt_geometry_input_location)
polygon_wkt_df.createOrReplaceTempView("polygontable")
polygon_df = spark.sql("select polygontable._c6 as name, ST_GeomFromText(polygontable._c0) as countyshape from polygontable")
polygon_df.show(5)
# -
# ### ST_GeomFromWKB
# +
polygon_wkb_df = spark.read.format("csv").\
option("delimiter", "\t").\
option("header", "false").\
load(mixed_wkb_geometry_input_location)
polygon_wkb_df.createOrReplaceTempView("polygontable")
polygon_df = spark.sql("select polygontable._c6 as name, ST_GeomFromWKB(polygontable._c0) as countyshape from polygontable")
polygon_df.show(5)
# -
# ### ST_GeomFromGeoJSON
# +
polygon_json_df = spark.read.format("csv").\
option("delimiter", "\t").\
option("header", "false").\
load(geojson_input_location)
polygon_json_df.createOrReplaceTempView("polygontable")
polygon_df = spark.sql("select ST_GeomFromGeoJSON(polygontable._c0) as countyshape from polygontable")
polygon_df.show(5)
# -
# ## Spatial Operations
# ### Spatial Join - Distance Join
# +
point_csv_df_1 = spark.read.format("csv").\
option("delimiter", ",").\
option("header", "false").load(csv_point_input_location)
point_csv_df_1.createOrReplaceTempView("pointtable")
point_df1 = spark.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape1 from pointtable")
point_df1.createOrReplaceTempView("pointdf1")
point_csv_df2 = spark.read.format("csv").\
option("delimiter", ",").\
option("header", "false").load(csv_point_input_location)
point_csv_df2.createOrReplaceTempView("pointtable")
point_df2 = spark.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape2 from pointtable")
point_df2.createOrReplaceTempView("pointdf2")
distance_join_df = spark.sql("select * from pointdf1, pointdf2 where ST_Distance(pointdf1.pointshape1,pointdf2.pointshape2) < 2")
distance_join_df.explain()
distance_join_df.show(5)
# -
# For more examples please refer to http://geospark.datasyslab.org/
# ### Converting GeoPandas to GeoSpark
# +
gdf = gpd.read_file(os.path.join(data_path, "gis_osm_pois_free_1.shp"))
osm_points = spark.createDataFrame(
gdf
)
# -
osm_points.printSchema()
osm_points.show(5)
osm_points.createOrReplaceTempView("points")
transformed_df = spark.sql(
"""
SELECT osm_id,
code,
fclass,
name,
ST_Transform(geometry, 'epsg:4326', 'epsg:2180') as geom
FROM points
""")
transformed_df.show(5)
transformed_df.createOrReplaceTempView("points_2180")
neighbours_within_1000m = spark.sql("""
SELECT a.osm_id AS id_1,
b.osm_id AS id_2,
a.geom
FROM points_2180 AS a, points_2180 AS b
WHERE ST_Distance(a.geom,b.geom) < 50
""")
neighbours_within_1000m.show()
# ## Converting GeoSpark to GeoPandas
df = neighbours_within_1000m.toPandas()
gdf = gpd.GeoDataFrame(df, geometry="geom")
gdf
| ShowCase Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Color legend
# +
import os,sys,inspect
pwdpath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# sys.path.insert(0, pwdpath)
from pathlib import Path
sys.path.insert(0, str(Path(pwdpath).parent))
sys.path.insert(0, str(Path(pwdpath).parent / 'train'))
from util.utils_dataset import get_color_encoding, color_dict_to_text_color_lists
# color_dict = get_color_encoding('openrooms')
# +
import colorsys
import collections
import matplotlib.pyplot as plt
# dataset_name = 'openrooms'
# dataset_name = 'InteriorNet'
dataset_name = 'scannet'
font_size = 150
fig = plt.figure(figsize=(30, 10))
ax = plt.subplot(121)
color_dict = get_color_encoding(dataset_name)
text_list, color_list = color_dict_to_text_color_lists(color_dict)
plt.axis('tight')
plt.axis('off')
the_table = ax.table(cellText=text_list, cellColours=color_list, bbox=[0., 0.0, 1.2, 1.2])
the_table.set_fontsize(font_size)
the_table.scale(1.5, 6)
# ax.title.set_text('sort by label IDs')
ax = plt.subplot(122)
# ax.title.set_text('sort by colors')
color_dict = get_color_encoding(dataset_name)
color_dict = collections.OrderedDict({k: v for k, v in sorted(color_dict.items(), key=lambda item: colorsys.rgb_to_hsv(item[1][0], item[1][1], item[1][2]))})
text_list, color_list = color_dict_to_text_color_lists(color_dict)
plt.axis('tight')
plt.axis('off')
# ax.title.set_text('Sorted by label')
the_table = ax.table(cellText=text_list, cellColours=color_list, bbox=[0.1, 0.0, 1.2, 1.2])
the_table.set_fontsize(font_size)
the_table.scale(1.5, 6)
plt.subplots_adjust(left=0.125,
bottom=0.1,
right=0.9,
top=0.9,
wspace=0.2,
hspace=0.35)
plt.show()
fig.savefig('data/%s/color_mapping.png'%dataset_name, bbox_inches='tight')
# +
import random
list_a = [1, 2, 3, 4, 5, 31, 412, 11, 0]
index_list = range(len(list_a))
sample_num = len(index_list)
# index_sample = random.sample(index_list, sample_num)
random.sample(list_a, 4)
# +
index_sample
# +
import numpy as np
nyu_or_dict = {0:255, 1:40, 2:41, 3:24, 4:15, 5:18, 6:8, 7:4, 8:38, 9: 27,
10:7, 11:255, 12:4, 13:255, 14:4, 15:7, 16:1, 17: 255, 18:13, 19:255,
20: 255, 21:255, 22:42, 23: 255, 24: 255, 25:20, 26: 255, 27: 18, 28: 255, 29: 255,
30:255, 31:255, 32:255, 33:255, 34:32, 35:28, 36: 21, 37:33, 38:5, 39:3, 40:6}
nyu_or_map = lambda x: nyu_or_dict.get(x+1,x)
nyu_or_map = np.vectorize(nyu_or_map)
# -
nyu_or_map(np.zeros((2, 2)))
nyu_or_map
# +
import torch
a = torch.zeros((3, 3), dtype=torch.uint8)
import numpy as np
nyu_or_dict = {0:255, 1:40, 2:41, 3:24, 4:15, 5:18, 6:8, 7:4, 8:38, 9: 27,
10:7, 11:255, 12:4, 13:255, 14:4, 15:7, 16:1, 17: 255, 18:13, 19:255,
20: 255, 21:255, 22:42, 23: 255, 24: 255, 25:20, 26: 255, 27: 18, 28: 255, 29: 255,
30:255, 31:255, 32:255, 33:255, 34:32, 35:28, 36: 21, 37:33, 38:5, 39:3, 40:6}
nyu_or_map = lambda x: nyu_or_dict.get(x+1,x)
#
# nyu_or_map = torch.vectorize(nyu_or_map)
print(a.apply_(nyu_or_map))
# -
from PIL import Image
label_path = '../dataset/nyu/seg40/train/01017.png'
label = np.array(Image.open(label_path).convert('L'))
import matplotlib.pyplot as plt
plt.figure(figsize=(15, 8))
plt.imshow(label)
plt.colorbar()
plt.show()
nyu_or_dict = {43:1, 44:2, 28:3, 42:3, 18:4, 21:5, 11:6, 4:7, 41:8, 31:9, 10:10, 7:12, 5:14, 1:16, 16:18, 45:22, 24:30, 32:35, 25:36, 37:37, 8:38, 26:38, \
3:39, 6:39, 13:39, 14:39, 40:39, \
9:40, 12:40, 15:40, 17:40, 19:40, 20:40, 22:40, 23:40, 27:40, 30:40, 33:40, 34:40, 35:40, 36:40, 38:40, 39:40, 2:0, 29:0}
# 0:255, 1:40, 2:41, 3:24, 4:15, 5:18, 6:8, 7:4, 8:38, 9:27,
# 10:7, 11:255, 12:4, 13:255, 14:4, 15:7, 16:1, 17: 255, 18:13, 19:255,
# 20: 255, 21:255, 22:42, 23: 255, 24: 255, 25:20, 26: 255, 27: 18, 28: 255, 29: 255,
# 30:255, 31:255, 32:255, 33:255, 34:32, 35:28, 36: 21, 37:33, 38:5, 39:3, 40:6}
keys = list(nyu_or_dict.keys())
keys.sort()
print(keys)
list_file = 'visualization_palette.txt'
with open(list_file, 'r') as f:
a = f.read().splitlines()
for line in a:
rgb = line.split(', ')
rgb = [int(float(x)*255.) for x in rgb]
print(rgb)
| train/debug_inside_train.ipynb |
# # The California housing dataset
#
# In this notebook, we will quickly present the dataset known as the
# "California housing dataset". This dataset can be fetched from internet using
# scikit-learn.
# +
from sklearn.datasets import fetch_california_housing
california_housing = fetch_california_housing(as_frame=True)
# -
# We can have a first look at the available description
print(california_housing.DESCR)
# Let's have an overview of the entire dataset.
california_housing.frame.head()
# As written in the description, the dataset contains aggregated data regarding
# each district in California. Let's have a close look at the features that can
# be used by a predictive model.
california_housing.data.head()
# In this dataset, we have information regarding the demography (income,
# population, house occupancy) in the districts, the location of the districts
# (latitude, longitude), and general information regarding the house in the
# districts (number of rooms, number of bedrooms, age of the house). Since
# these statistics are at the granularity of the district, they corresponds to
# averages or medians.
#
# Now, let's have a look to the target to be predicted.
california_housing.target.head()
# The target contains the median of the house value for each district.
# Therefore, this problem is a regression problem.
#
# We can now check more into details the data types and if the dataset contains
# any missing value.
california_housing.frame.info()
# We can see that:
#
# * the dataset contains 20,640 samples and 8 features;
# * all features are numerical features encoded as floating number;
# * there is no missing values.
#
# Let's have a quick look at the distribution of these features by plotting
# their histograms.
# +
import matplotlib.pyplot as plt
california_housing.frame.hist(figsize=(12, 10), bins=30, edgecolor="black")
plt.subplots_adjust(hspace=0.7, wspace=0.4)
# -
# We can first focus on features for which their distributions would be more or
# less expected.
#
# The median income is a distribution with a long tail. It means that the
# salary of people is more or less normally distributed but there is some
# people getting a high salary.
#
# Regarding the average house age, the distribution is more or less uniform.
#
# The target distribution has a long tail as well. In addition, we have a
# threshold-effect for high-valued houses: all houses with a price above 5 are
# given the value 5.
#
# Focusing on the average rooms, average bedrooms, average occupation, and
# population, the range of the data is large with unnoticeable bin for the
# largest values. It means that there are very high and few values (maybe they
# could be considered as outliers?). We can see this specificity looking at the
# statistics for these features:
features_of_interest = ["AveRooms", "AveBedrms", "AveOccup", "Population"]
california_housing.frame[features_of_interest].describe()
# For each of these features, comparing the `max` and `75%` values, we can see
# a huge difference. It confirms the intuitions that there are a couple of
# extreme values.
#
# Up to know, we discarded the longitude and latitude that carry geographical
# information. In short, the combination of this feature could help us to
# decide if there are locations associated with high-valued houses. Indeed,
# we could make a scatter plot where the x- and y-axis would be the latitude
# and longitude and the circle size and color would be linked with the house
# value in the district.
# +
import seaborn as sns
sns.scatterplot(data=california_housing.frame, x="Longitude", y="Latitude",
size="MedHouseVal", hue="MedHouseVal",
palette="viridis", alpha=0.5)
plt.legend(title="MedHouseVal", bbox_to_anchor=(1.05, 0.95),
loc="upper left")
_ = plt.title("Median house value depending of\n their spatial location")
# -
# If you are not familiar with the state of California, it is interesting to
# notice that all datapoints show a graphical representation of this state.
# We note that the high-valued houses will be located on the cost, where the
# big cities from California are located: San Diego, Los Angeles, San Jose, or
# San Francisco.
#
# We can do a random subsampling to have less data points to plot but that
# could still allow us to see these specificities.
# +
import numpy as np
rng = np.random.RandomState(0)
indices = rng.choice(np.arange(california_housing.frame.shape[0]), size=500,
replace=False)
# -
sns.scatterplot(data=california_housing.frame.iloc[indices],
x="Longitude", y="Latitude",
size="MedHouseVal", hue="MedHouseVal",
palette="viridis", alpha=0.5)
plt.legend(title="MedHouseVal", bbox_to_anchor=(1.05, 1),
loc="upper left")
_ = plt.title("Median house value depending of\n their spatial location")
# We can make a final analysis by making a pair plot of all features and the
# target but dropping the longitude and latitude. We will quantize the target
# such that we can create proper histogram.
# +
import pandas as pd
# Drop the unwanted columns
columns_drop = ["Longitude", "Latitude"]
subset = california_housing.frame.iloc[indices].drop(columns=columns_drop)
# Quantize the target and keep the midpoint for each interval
subset["MedHouseVal"] = pd.qcut(subset["MedHouseVal"], 6, retbins=False)
subset["MedHouseVal"] = subset["MedHouseVal"].apply(lambda x: x.mid)
# -
_ = sns.pairplot(data=subset, hue="MedHouseVal", palette="viridis")
# While it is always complicated to interpret a pairplot since there is a lot
# of data, here we can get a couple of intuitions. We can confirm that some
# features have extreme values (outliers?). We can as well see that the median
# income is helpful to distinguish high-valued from low-valued houses.
#
# Thus, creating a predictive model, we could expect the longitude, latitude,
# and the median income to be useful features to help at predicting the median
# house values.
#
# If you are curious, we created a linear predictive model below and show the
# values of the coefficients obtained via cross-validation
# +
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_validate
alphas = np.logspace(-3, 1, num=30)
model = make_pipeline(StandardScaler(), RidgeCV(alphas=alphas))
cv_results = cross_validate(
model, california_housing.data, california_housing.target,
return_estimator=True, n_jobs=2)
# -
score = cv_results["test_score"]
print(f"R2 score: {score.mean():.3f} +/- {score.std():.3f}")
# +
import pandas as pd
coefs = pd.DataFrame(
[est[-1].coef_ for est in cv_results["estimator"]],
columns=california_housing.feature_names
)
# -
color = {"whiskers": "black", "medians": "black", "caps": "black"}
coefs.plot.box(vert=False, color=color)
plt.axvline(x=0, ymin=-1, ymax=1, color="black", linestyle="--")
_ = plt.title("Coefficients of Ridge models\n via cross-validation")
# It seems that the three features that we earlier spotted are found important
# by this model. But be careful regarding interpreting these coefficients.
# We let you go into the module "Interpretation" to go in depth regarding such
# experiment.
| notebooks/datasets_california_housing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
import requests
import time
from scipy.stats import linregress
# Import API key
from config import api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "../output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# +
#List of Cities
#Create empty arrays for holding latitude & longitude and random cities
lat_lngs = []
cities = []
#Create random sets of lat and lng
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
#Find nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
#Add the city to the list if it isn't repeated
if city not in cities:
cities.append(city)
#Number of cities in the list
print(len(cities))
# +
#Empty arrays to be appended
city = []
cloudiness = []
country = []
date = []
humidity = []
lat = []
lng = []
max_temp = []
wind_speed = []
#Base url
url = "http://api.openweathermap.org/data/2.5/weather?"
units = "imperial"
#Counter
count = 0
for citi in cities:
# Build query URL
query_url = f"{url}appid={api_key}&q={citi}&units={units}"
# weather data
weather_json = requests.get(query_url).json()
# increase count
count += 1
try:
name = weather_json["name"]
print(f"Processing Record {count} of {len(cities)}: {name}")
#Append into empty list arrays
city.append(weather_json["name"])
cloudiness.append(weather_json["clouds"]["all"])
country.append(weather_json["sys"]["country"])
date.append(weather_json["dt"])
humidity.append(weather_json["main"]["humidity"])
max_temp.append(weather_json["main"]["temp_max"])
wind_speed.append(weather_json["wind"]["speed"])
lat.append(weather_json["coord"]["lat"])
lng.append(weather_json["coord"]["lon"])
except:
print("n/a")
# +
#Convert timestamp to date
from datetime import datetime
new_date = []
for dt in date:
new_date.append(datetime.fromtimestamp(dt))
# read csv file
df = pd.DataFrame({
"City": city,
"Country": country,
"Date": new_date,
"Latitude": lat,
"Longitude": lng,
"Cloudiness": cloudiness,
"Humidity": humidity,
"Max Temperature": max_temp,
"Wind Speed": wind_speed
})
# view number of items per column
df.count()
#Save dataframe as csv
df.to_csv("../output_data/cities.csv", encoding='utf-8', index=False)
# -
df
# +
#Scatterplot
plt.scatter(df["Latitude"], df["Max Temperature"])
#x and y axis labels & title
plt.title(f"City Latitude vs. Max Temperature {new_date[0]}")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature (F)")
#grid lines
plt.grid()
#show and save graph as picture
plt.savefig("../output_data/LatitudevsTemperature.png")
plt.show()
# +
#Scatterplot
plt.scatter(df["Latitude"], df["Humidity"])
#x and y axis labels & title
plt.title(f"City Latitude vs. Humidity {new_date[0]}")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
#grid lines
plt.grid()
# show and save graph as picture
plt.savefig("../output_data/LatitudevsHumidity.png")
plt.show()
# +
#Scatterplot
plt.scatter(df["Latitude"], df["Cloudiness"])
#x and y axis labels & title
plt.title(f"City Latitude vs. Cloudiness {new_date[0]}")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
#grid lines
plt.grid()
#show and save graph as picture
plt.savefig("../output_data/LatitudevsCloudiness.png")
plt.show()
# +
#Scatterplot
plt.scatter(df["Latitude"], df["Wind Speed"])
#x and y axis labels & title
plt.title(f"City Latitude vs. Wind Speed {new_date[0]}")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
#grid lines
plt.grid()
#show and save graph as picture
plt.savefig("../output_data/LatitudevsWindspeed.png")
plt.show()
# +
#Northern Hemisphere Arrays (X-Value,Temp,Humidity,Cloudiness,Windspeed)
n_x_values = []
n_y_values = []
n_hmdy_values = []
n_cldnss_values = []
n_windspd_values = []
#Southern Hemisphere Arrays (X-Value,Temp,Humidity,Cloudiness,Windspeed)
s_x_values = []
s_y_values = []
s_hmdy_values = []
s_cldnss_values = []
s_windspd_values = []
#Create an Index
indexes = range(0, len(df["City"]))
#Append into empty arrays
for index in indexes:
if df["Latitude"][index] >= 0:
n_x_values.append(df["Latitude"][index])
n_y_values.append(df["Max Temperature"][index])
n_hmdy_values.append(df["Humidity"][index])
n_cldnss_values.append(df["Cloudiness"][index])
n_windspd_values.append(df["Wind Speed"][index])
if df["Latitude"][index] < 0:
s_x_values.append(df["Latitude"][index])
s_y_values.append(df["Max Temperature"][index])
s_hmdy_values.append(df["Humidity"][index])
s_cldnss_values.append(df["Cloudiness"][index])
s_windspd_values.append(df["Wind Speed"][index])
#Give the array values the integer datatype
n_x_values = np.array(n_x_values, dtype = "int")
s_x_values = np.array(s_x_values, dtype = "int")
n_y_values = np.array(n_y_values, dtype = "int")
s_y_values = np.array(s_y_values, dtype = "int")
n_hmdy_values = np.array(n_hmdy_values, dtype = "int")
s_hmdy_values = np.array(s_hmdy_values, dtype = "int")
n_cldnss_values = np.array(n_cldnss_values, dtype = "int")
s_cldnss_values = np.array(s_cldnss_values, dtype = "int")
n_windspd_values = np.array(n_windspd_values, dtype = "int")
s_windspd_values = np.array(s_windspd_values, dtype = "int")
# +
#X and Y axis labels and table title
plt.title("Northern Latitude Cities vs. Max Temperature")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature (F)")
#Scatter plot
plt.scatter(n_x_values, n_y_values)
#Regression line
(slope, intercept, rvalue, pvalue, stderr) = linregress(n_x_values, n_y_values)
regress_values = n_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(n_x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
# show and save pic
plt.savefig("../output_data/NorthLatvsMaxTemp.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Southern Latitude Cities vs. Max Temperature")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature (F)")
#Scatter Plot
plt.scatter(s_x_values, s_y_values)
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(s_x_values, s_y_values)
regress_values = s_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(s_x_values,regress_values,"r-")
plt.annotate(line_eq,(-30,50),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
#show and save graph as picture
plt.savefig("../output_data/SouthLatvsTemp.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Northern Latitude Cities vs. Humidity")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
#Scatter Plot
plt.scatter(n_x_values, n_hmdy_values)
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(n_x_values, n_hmdy_values)
regress_values = n_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(n_x_values,regress_values,"r-")
plt.annotate(line_eq,(45,10),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
# show and save pic
plt.savefig("../output_data/NorthLatvsHumidity.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Southern Latitude Cities vs. Humidity")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
#Scatter Plot
plt.scatter(s_x_values, s_hmdy_values,c="blue")
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(s_x_values, s_hmdy_values)
regress_values = s_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(s_x_values, s_hmdy_values)
plt.plot(s_x_values,regress_values,"r-")
plt.annotate(line_eq,(-35,55),fontsize=20,color="red")
print(f"The r-squared is: {rvalue}")
# show and save graph as picture
plt.savefig("../output_data/SouthLatvsHumidity.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Northern Latitude Cities vs. Cloudiness")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
#Scatter Plot
plt.scatter(n_x_values, n_cldnss_values, c='blue')
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(n_x_values, n_cldnss_values)
regress_values = n_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(n_x_values, n_cldnss_values)
plt.plot(n_x_values,regress_values,"r-")
plt.annotate(line_eq,(45,55),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
# show and save graph as picture
plt.savefig("../output_data/NorthLatvsCloudiness.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Southern Latitude Cities vs. Cloudiness")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
#Scatter Plot
plt.scatter(s_x_values, s_cldnss_values)
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(s_x_values, s_cldnss_values)
regress_values = s_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(s_x_values,regress_values,"r-")
plt.annotate(line_eq,(-45,30),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
# show and save graph as picture
plt.savefig("../output_data/SouthLatvCloudiness.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Northern Latitude Cities vs. Wind Speed")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
#Scatterplot
plt.scatter(n_x_values, n_windspd_values)
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(n_x_values, n_windspd_values)
regress_values = n_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.plot(n_x_values,regress_values,"r-")
plt.annotate(line_eq,(30,25),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
# show and save graph as picture
plt.savefig("../output_data/NorhtLatvsWind.png")
plt.show()
# +
#X and Y axis labels and table title
plt.title("Southern Latitude Cities vs. Wind Speed")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
#Scatter Plot
plt.scatter(s_x_values, s_windspd_values,c='blue')
#Regression
(slope, intercept, rvalue, pvalue, stderr) = linregress(s_x_values, s_windspd_values)
regress_values = s_x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(s_x_values, s_windspd_values)
plt.plot(s_x_values,regress_values,"r-")
plt.annotate(line_eq,(-30,20),fontsize=15,color="red")
print(f"The r-squared is: {rvalue}")
# show and save graph as picture
plt.savefig("../output_data/SouthLatvsWind.png")
plt.show()
# -
| WeatherPy/WeatherPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercises
#
# This will be a notebook for you to work through the exercises during the workshop. Feel free to work on these at whatever pace you feel works for you, but I encourage you to work together! Edit the title of this notebook with your name because I will ask you to upload your final notebook to our shared github repository at the end of this workshop.
#
# Feel free to google the documentation for numpy, matplotlib, etc.
#
# Don't forget to start by importing any libraries you need.
import numpy as np
import astropy
import matplotlib.pyplot as plt
from scipy import integrate
# ### Day 1
#
# #### Exercise 1
#
# A. Create an array with 10 evenly spaced values in logspace ranging from 0.1 to 10,000.
#
# B. Print the following values: The first value in the array, the final value in the array, and the range of 5th-8th values.
#
# C. Append the numbers 10,001 and 10,002 (as floats) to the array. Make sure you define this!
#
# D. Divide your new array by 2.
#
# E. Reshape your array to be 3 x 4.
#
# F. Multiply your array by itself.
#
# G. Print out the number of dimensions and the maximum value.
# +
# A
array = np.logspace(np.log10(0.1),np.log10(10000),10)
print(array)
# B
print(array[0])
print(array[-1])
print(array[5:8])
# C
newarray = np.append(array,[10001., 10002.])
print(newarray)
# D
half = newarray/2
print(half)
# E
reshaped = newarray.reshape(3,4)
print(reshaped)
# F
mult = np.dot(newarray, newarray)
print(mult)
# G
print(newarray.size)
print(np.max(newarray))
# -
# ### Day 2
# #### Exercise 1
#
# A. Create an array containing the values 4, 0, 6, 5, 11, 14, 12, 14, 5, 16.
# B. Create a 10x2 array of zeros.
# C. Write a for loop that checks if each of the numbers in the first array squared is less than 100. If the statement is true, change that row of your zeros array to equal the number and its square. Hint: you can change the value of an array by stating "zerosarray[i] = [a number, a number squared]".
# D. Print out the final version of your zeros array.
#
# Hint: should you loop over the elements of the array or the indices of the array?
# +
# your solutions here
# -
# #### Exercise 2
#
# A. Write a function that takes an array of numbers and spits out the Gaussian distribution. Yes, there is a function for this in Python, but it's good to do this from scratch! This is the equation:
#
# $$ f(x) = \frac{1}{\sigma \sqrt{2\pi}} \exp{\frac{-(x - \mu)^2}{2\sigma^2}} $$
#
# (Pi is built into numpy, so call it as np.pi.)
#
# B. Call the function a few different times for different values of mu and sigma, between -10 < x < 10.
#
# C. Plot each version, making sure they are differentiated with different colors and/or linestyles and include a legend. Btw, here's a list of the customizations available in matplotlib:
#
# https://matplotlib.org/3.1.0/api/_as_gen/matplotlib.pyplot.plot.html
#
# https://matplotlib.org/gallery/color/named_colors.html
#
# D. Save your figure.
#
# If you have multiple lines with plt.plot(), Python will plot all of them together, unless you write plt.show() after each one. I want these all on one plot.
# +
# your solutions here
# -
# ### Day 3
#
# #### Exercise 1
#
# There is a file in this directory called "histogram_exercise.dat" which consists of of randomly generated samples from a Gaussian distribution with an unknown $\mu$ and $\sigma$. Using what you've learned about fitting data, load up this file using np.genfromtxt, fit a Gaussian curve to the data and plot both the curve and the histogram of the data. As always, label everything, play with the colors, and choose a judicious bin size.
#
# Hint: if you attempt to call a function from a library or package that hasn't been imported, you will get an error.
# +
# your solution here
# -
# #### Exercise 2
#
# Create a 1D interpolation along these arrays. Plot both the data (as points) and the interpolation (as a dotted line). Also plot the value of the interpolated function at x=325. What does the function look like to you?
# +
x = np.array([0., 50., 100., 150., 200., 250., 300., 350., 400., 450., 500])
y = np.array([0., 7.071, 10., 12.247, 14.142, 15.811, 17.321, 18.708, 20., 21.213, 22.361])
# solution here
# -
# ### Day 4
#
# #### Exercise 1
#
# Let's practice some more plotting skills, now incorporating units.
#
# A. Write a function that takes an array of frequencies and spits out the Planck distribution. That's this equation:
#
# $$ B(\nu, T) = \frac{2h\nu^3/c^2}{e^{\frac{h\nu}{k_B T}} - 1} $$
#
# This requires you to use the Planck constant, the Boltzmann constant, and the speed of light from astropy. Make sure they are all in cgs.
#
# B. Plot your function in log-log space for T = 25, 50, and 300 K. The most sensible frequency range is about 10^5 to 10^15 Hz. Hint: if your units are correct, your peak values of B(T) should be on the order of 10^-10. Make sure everything is labelled.
# +
# solution here
# -
# #### Exercise 2
#
# Let's put everything together now! Here's a link to the full documentation for FITSFigure, which will tell you all of the customizable options: http://aplpy.readthedocs.io/en/stable/api/aplpy.FITSFigure.html. Let's create a nice plot of M51 with a background optical image and X-ray contours overplotted.
#
# The data came from here if you're interested: http://chandra.harvard.edu/photo/openFITS/multiwavelength_data.html
#
# A. Using astropy, open the X-RAY data (m51_xray.fits). Flatten the data array and find its standard deviation, and call it sigma.
#
# B. Using aplpy, plot a colorscale image of the OPTICAL data. Choose a colormap that is visually appealing (list of them here: https://matplotlib.org/2.0.2/examples/color/colormaps_reference.html). Show the colorbar.
#
# C. Plot the X-ray data as contours above the optical image. Make the contours spring green with 80% opacity and dotted lines. Make the levels go from 2$\sigma$ to 10$\sigma$ in steps of 2$\sigma$. (It might be easier to define the levels array before show_contours, and set levels=levels.)
# +
# solution here
| Exercises_Nolan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyTorch simple example
#
# * article: <https://towardsdatascience.com/how-to-code-a-simple-neural-network-in-pytorch-for-absolute-beginners-8f5209c50fdd>
# * dataset: <https://www.kaggle.com/c/titanic/data>
# +
import pandas as pd
df = pd.read_csv("dat/titanic/train.csv")
df.head()
# -
from torch.utils.data import Dataset
import torch
class TitanicDataset (Dataset):
def __init__ (self, csvpath, mode="train"):
self.mode = mode
df = pd.read_csv(csvpath)
"""
<------Some Data Preprocessing---------->
Removing Null Values, Outliers and Encoding the categorical labels etc
"""
if self.mode == "train":
df = df.dropna()
self.inp = df.iloc[:, 1:].values
self.oup = df.iloc[:, 0].values.reshape(891, 1)
else:
self.inp = df.values
def __len__ (self):
return len(self.inp)
def __getitem__ (self, idx):
if self.mode == "train":
inpt = torch.Tensor(self.inp[idx])
oupt = torch.Tensor(self.oup[idx])
return {
"inp": inpt,
"oup": oupt,
}
else:
inpt = torch.Tensor(self.inp[idx])
return {
"inp": inpt
}
## initialize and load the dataSet
data = TitanicDataset("dat/titanic/train.csv")
data_train = DataLoader(dataset=data, batch_size=BATCH_SIZE, shuffle=False)
| wip/nn3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Class exercise #2:
# Creating a data frame
import pandas as pd
names=["Tomás", "Pauline", "Pablo", "Bjork","Alan","Juana"]
woman=[False,True,False,False,False,True]
ages=[32,33,28,30,32,27]
country=["Chile", "Senegal", "Spain", "Norway","Peru","Peru"]
education=["Bach", "Bach", "Master", "PhD","Bach","Master"]
data={'Names':names, 'Woman':woman, 'Ages':ages, 'Country':country, 'Education':education}
friends=pd.DataFrame.from_dict(data)
friends
# Creating a list of tuples, where each tuple is a pair (name,country), using comprehensions
listofTuples=[(n,c) for (n,c) in zip(friends.Names, friends.Country)] #using comprehension
listofTuples
NameCountryList = list(zip(names,country)) #using zip function
NameCountryList
# Implementation of a for loop to count how many peruvian there are in the data frame
# +
counterOfPeruvians=0 #counter: to calculate something we need to start a counter from 0
for country in friends.Country: #
if country=='Peru':
counterOfPeruvians +=1 #updating counter
#to see the results:
counterOfPeruvians
# -
friends.where(friends.Country=='Peru').dropna().shape[0] #shapes (rows, columns) we are calling the rows by [0]
# Implementing of a for loop to get the count of men (using not in one solution and ~ in another one)
#
# +
counterOfMen=0 #counter
for x in friends.Woman:
if x==False:
counterOfMen +=1 #updating counter
#to see the results:
counterOfMen
# +
counterOfMen=0 #counter
for woman in friends.Woman:
if not woman:
counterOfMen +=1 #updating counter
#to see the results:
counterOfMen
# +
counterOfMen=0 #counter
for val in ~friends.Woman: # ~ tylda is reversing everything
if val: #Python doesnt need condition bcz its Boolean value, so by default it is looking for TRUE values
print(val) #can use val, can use man
counterOfMen +=1 #updating counter
#to see the results:
counterOfMen
# -
| ex_controlOfEx.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Various Figure methods in the FRB Repository
# %matplotlib inline
# suppress warnings for these examples
import warnings
warnings.filterwarnings('ignore')
# +
# imports
from matplotlib import pyplot as plt
from frb import frb
from frb.figures import dm as ffdm
from frb.figures import utils as ffutils
from frb.figures import galaxies as ffgalaxies
# -
# # Load up
frb121102 = frb.FRB.by_name('FRB121102')
frb180924 = frb.FRB.by_name('FRB180924')
frbs = [frb121102, frb180924]
# # DM Cartoon
plt.clf()
# Fonts
ffutils.set_mplrc()
#
fig = plt.figure(figsize=(7., 5))
f, (ax1,ax2) = plt.subplots(1,2,sharey=True)
#
ffdm.sub_cartoon(ax1, ax2, frb180924.coord, frb180924.z, host_DM=50., halos=False,
FRB_DM=frb180924.DM.value, yscl=0.88)
#
plt.show()
# # BPT
# Requires that SDSS_DR14_PM.fits has been downloaded
# ## Host galaxies
hosts = [ifrb.grab_host() for ifrb in frbs]
# ## Figure
plt.clf()
# Fonts
ffutils.set_mplrc()
#
fig = plt.figure(figsize=(7., 5))
ax = plt.gca()
#
ffgalaxies.sub_bpt(ax, hosts, ['b','r'], ['s','o'], SDSS_clr='Greys')
#
plt.show()
# # SFR vs. M*
# Requires PRIMUS data
plt.clf()
# Fonts
ffutils.set_mplrc()
#
fig = plt.figure(figsize=(7., 5))
ax = plt.gca()
#
ffgalaxies.sub_sfms(ax, hosts, ['b','r'], ['s','o'])
#
plt.show()
# # Color-magnitude
# Requires PRIMUS
plt.clf()
# Fonts
ffutils.set_mplrc()
#
fig = plt.figure(figsize=(7., 5))
ax = plt.gca()
#
ffgalaxies.sub_color_mag(ax, [hosts[1]], ['r'], ['o'])
#
plt.show()
| docs/nb/Figures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CS171 - Spring 2019 - Assignment 1
# ### Instructor: <NAME>
# ##<NAME>
# ##SID: 862012234
#
# In this first assignment you will explore a dataset, visualizing the dataset in various ways, and doing a preliminary analysis on the data.
#
# For this assignment we are going to use the functionality of Pandas (the library, *not* the unbearably cute animal): https://pandas.pydata.org/ in order to manipulate datasets.
# In addition to Pandas, we are going to use Matplotlib (https://matplotlib.org/) and Numpy (http://www.numpy.org/) and you may also find Seaborn (https://seaborn.pydata.org/) useful for some data visualization.
#
# Unless you are explicitly asked to *implement* a particular functionality, you may assume that you may use an existing implementation from the libraries above (or some other library that you may find, as long as you *document* it).
#
# Before you start, make sure you have installed all those packages in your local Jupyter instance, as follows:
#
# conda install numpy pandas matplotlib seaborn
#
# ## Academic Integrity
# Each assignment should be done individually. You may discuss general approaches with other students in the class, and ask questions to the TAs, but you must only submit work that is yours . If you receive help by any external sources (other than the TA and the instructor), you must properly credit those sources, and if the help is significant, the appropriate grade reduction will be applied. If you fail to do so, the instructor and the TAs are obligated to take the appropriate actions outlined at http://conduct.ucr.edu/policies/academicintegrity.html . Please read carefully the UCR academic integrity policies included in the link.
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import random as rand
import math
# ## Question 0: Getting real data [0%]
#
# In this assignment you are going to use data from the UCI Machine Learning repository ( https://archive.ics.uci.edu/ml/index.php ). In particular, you are going to use the famous Iris dataset: https://archive.ics.uci.edu/ml/datasets/Iris
#
data_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'label']
data = pd.read_csv('iris.data', names = data_names)
data.head()
# ## Question 1: Data Visualization [20%]
#
# ### Question 1a: Scatterplots [10%]
# 1. Plot the scatterplot of all pairs of features and color the points by class label [5%]
# 2. Which pair of features is (visually) the most correlated? [2.5%]
# 3. Can you think of a reason why looking at this plot would be useful in a task where we would have to classify flowers by label? [2.5%]
# +
#your code here
setosa = data[0:50]
versicolor = data[50:100]
virginica = data[100:150]
sIndex = 1
plt.figure(1, figsize=(25,25))
for yname in data_names:
if yname != 'label':
for xname in data_names:
if xname != 'label':
plt.subplot(4,4, sIndex)
plt.scatter(setosa[xname], setosa[yname],s=50, c='red')
plt.scatter(versicolor[xname], versicolor[yname],s=50, c = 'green')
plt.scatter(virginica[xname], virginica[yname],s=50, c='blue')
sIndex+=1
plt.show()
# -
# Your answer here:
# 2. According to me, when I plotted multiple scatter plots as you see showed me that the most co-related plots were Petal width and the Petal length.
# 3. This plot actually helps us visualize and to look at the plots when classifying the labels by predicting their associated labels.
# ### Question 1b: Boxplot and Histogram [10%]
#
# 1. Plot the boxplot for each feature of the dataset (you can put all boxplots on a single figure) [4%]
# 2. Plot the histogram only for petal length [4%]
# 3. Does the histogram for petal length give more information than the boxplot? If so, what information? [2%]
# +
#your code here
#1
green_diamond = dict(markerfacecolor='g', marker='D')
def boxplot(data_set, title):
plt.figure(1, figsize=(10,10))
Index=1
plt.subplot(4,4, Index)
plt.boxplot(data_set, vert=False,flierprops=green_diamond)
plt.title(title, fontsize=10)
plt.tight_layout()
Index+=1
plt.show()
#Setosa
boxplot(setosa['sepal_length'], "Setosa Sepal Length")
boxplot(setosa['sepal_width'], "Setosa Sepal Width")
boxplot(setosa['petal_length'], "Setosa Petal Length ")
boxplot(setosa['petal_width'], "Setosa Petal Width")
#
boxplot(versicolor['sepal_length'], "Versicolor Sepal Length")
boxplot(versicolor['sepal_width'],"Versicolor Sepal Width")
boxplot(versicolor['petal_length'], "Versicolor Petal Length ")
boxplot(versicolor['petal_width'], "Versicolor Petal Width")
#
boxplot(virginica['sepal_length'], "virginica Sepal Length")
boxplot(virginica['sepal_width'], "virginica Sepal Width")
boxplot(virginica['petal_length'], "virginica Petal Length ")
boxplot(virginica['petal_width'], "virginica Petal Width")
# data_1=[setosa['sepal_length'],setosa['sepal_width'], setosa['petal_length'], setosa['petal_width']]
# plt.boxplot(data_1,flierprops=green_diamond)
# plt.show()
##Question 1 Part2
# n_bins=20
# axs = plt.subplots(1, 2, sharey=True, tight_layout=True)
# axs[0].hist(setosa['petal_length'], bins=n_bins)
# axs[1].hist(versicolor['petal_length'], bins=n_bins)
n, bins, patches = plt.hist(setosa['petal_length'], 10, density=True, facecolor='r',label= "Setosa Petal length")
plt.ylabel("Setosa Petal Length")
plt.grid(True)
plt.show()
n, bins, patches = plt.hist(versicolor['petal_length'], 10, density=True, facecolor='g',label= "Versicolor Petal length")
plt.ylabel("Setosa Petal Length")
plt.grid(True)
plt.show()
n, bins, patches = plt.hist(virginica['petal_length'], 10, density=True, facecolor='b',label= "Virginica Petal length")
plt.ylabel("Setosa Petal Length")
plt.grid(True)
plt.show()
# -
# ## Your answer here:
#
# 3. The histogram plot of the petal length helps us visualize the rising and falling edges of data much easily while the boxplot help us find the outliers in the data much easily, but since the histogram is much more easy to visualize, I would say histogram does show better information then boxplot.
# ## Question 2: Distance computation [40%]
#
#
# ### Question 2a: Implement the Lp distance function [20%]
# 1. Write code that implements the Lp distance function between two data points as we saw it in class [15%]
# 2. Verify that it is correct by comparing it for p=2 against an existing implementation in Numpy for the two selected data points below. Note that the difference of the distances may not be exactly 0 due to numerical precision issues. [5%]
# +
#your code here
def distance_lp(x,y,p):
totalDistance = 0
for i,j in zip(x,y):
# print("i,j", i, j)
if type(i) is str:
if type(j) is str:
continue
else:
totalDistance += math.pow(abs(i-j),p)
return math.pow(totalDistance,(1/p))
# -
# ### Question 2b: Compute the distance matrix between all data points [20%]
# 1. Compute an $N\times N$ distance matrix between all data points (where $N$ is the number of data points) [5%]
# 2. Plot the above matrix and include a colorbar. [5%]
# 3. What is the minimum number of distance computations that you can do in order to populate every value of this matrix? (note: it is OK if in the first two questions you do all the $N^2$ computations) [5%]
# 4. Note that the data points in your dataset are sorted by class. What do you observe in the distance matrix? [5%]
# +
#your code here
def LPMatrix(data_set,p):
lpMatrix=[]
for x in data_set:
n_Row=[]
for y in data_set:
l_Row=distance_lp(x,y,p)
n_Row.append(l_Row)
lpMatrix.append(n_Row)
return np.array(lpMatrix)
I_matrix = LPMatrix(data.values,1)
plt.figure(1, figsize=(7,7))
plt.imshow(I_matrix, cmap='viridis')
plt.colorbar()
plt.show()
# -
# Your answer here:
# 3. No. of times : 150 x 150 : 22500 distance function calls as total elements in iris data is 150
# 4. I observe a symmetrical data
# ## Question 3: Data Sampling [40%]
#
# Sometimes datasets are too big, or come in a streaming fashion, and it is impossible for us to process every single data point, so we have to resort to sampling methods. In this question, you will implement the popular "reservoir sampling" method, which is mostly used to obtain a uniform random sample of a data stream. Subsequently, you will experiment with sampling directly all the data and conducting stratified sampling (by class label) and observe the results in the data distribution.
# ### Question 3a: Reservoir Sampling [20%]
# 1. Implement reservoir sampling as we saw it in class. Create a 'reservoir_sampling' function because it will be useful for the next question. [15%]
# 2. Run reservoir sampling with reservoir size $M = 15$ and plot the histogram of the petal length feature for the sampled dataset [5%]
# + active=""
# #your code here
# #def reservoir_sampling(stream,M):
# +
#Code exaple used from c++ geeks for geeks : https://www.geeksforgeeks.org/reservoir-sampling/
def reservoir_sampling(stream, M, n_res):
i=0;
reservoir = [0]*M;
for i in range(M):
reservoir[i] = stream[i];
while(i < n_res):
j = rand.randrange(i+1);
if(j < M):
reservoir[j] = stream[i];
i+=1;
return reservoir
res= reservoir_sampling(data.values,15 ,len(data.values))
temp = [0] * 15
for i in range(0, len(res)):
temp[i]=res[i][2]
print(temp)
plt.xlabel('petal_length')
plt.hist(temp)
plt.grid(True)
plt.show()
# -
# ### Question 3b: Stratified Sampling [20%]
# 1. Implement stratified sampling by class label, and within each stratum use the reservoir sampling function you implemented. [15%]
# 2. Run your stratified sampler with $M=5$ samples per class (so that we have 15 data points in total) and plot the histogram of the petal length feature for the sampled dataset [2.5%]
# 3. Do you observe any difference between the stratified and the non-stratified histograms? Which one resembles the original petal length distribution more closely? In order to answer this question you may want to run both sampling procedures a few times and observe which one gives a more accurate result on average. [2.5%]
# +
#your code here
def strat_sample(stream, stream_size,M ):
return reservoir_sampling(stream, stream_size, M)
# setosa = data[0:50]
# versicolor = data[50:100]
# virginica = data[100:150]
data_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'label']
data = pd.read_csv('iris.data', names = data_names)
data= data.values
setosa= [0]*50
versicolor=[0]*50
virginica=[0]*50
for i in range (0,49):
setosa[i]=data[i][2]
versicolor[i]= data[i+50][2]
virginica[i]=data[i+100][2]
# temp2 = []
temp2 = strat_sample(setosa,5,50)+strat_sample(versicolor,5,50)+strat_sample(virginica,5,50)
print (temp2)
plt.hist(temp2)
plt.grid(True)
plt.show()
# -
# Your answer here:
# 3. The major difference between the stratified and reservoir sampling is the fact that for stratified data sampling, we are actually using the values only from the data while not worrying much about the random values.
| Assignment1/Assignment-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="VCunpkHJVliv"
# # Layers and Blocks
#
# ## Construct a MLP
# + attributes={"classes": [], "id": "", "n": "1"} id="kY3e65dfVliw"
import torch
import torch.nn as nn
net = nn.Sequential(nn.Linear(20, 256),
nn.ReLU(),
nn.Linear(256,10)
)
# + [markdown] id="kWjfJwnNVliz"
# ### Forward
# + id="ObD0glGNVli0" outputId="4ded6100-82d8-4de2-c582-038efae7b475" colab={"base_uri": "https://localhost:8080/", "height": 102}
x = torch.rand(size=(2, 20))
def init_weights(m):
if type(m) == nn.Linear:
# Initialize weight parameter by a normal distribition
# with a mean of 0 and standard deviation of 0.01.
nn.init.normal_(m.weight.data, std=0.01)
# The bias parameter is initialized to zero by default.
m.bias.data.fill_(0.0)
net.apply(init_weights)
net(x)
# + [markdown] id="oh2zrQYDVli4"
# ## Implement the Same MLP with A Custom Block
# + attributes={"classes": [], "id": "", "n": "1"} id="VsrAG-zSVli4"
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.hidden = nn.Linear(20, 256)
self.relu = nn.ReLU()
self.output = nn.Linear(256, 10)
def forward(self, x):
return self.output(self.relu(self.hidden(x)))
# + [markdown] id="U_hixkLGVli7"
# ### Forward
# + attributes={"classes": [], "id": "", "n": "2"} id="UQh3i0QSVli7" outputId="b4ec3374-b9ac-48d7-d7a5-ae7d6f162d77" colab={"base_uri": "https://localhost:8080/", "height": 85}
net = MLP()
net.apply(init_weights)
net(x)
# + [markdown] id="iv0YBgsyVljE"
# ## Blocks with Code
# + attributes={"classes": [], "id": "", "n": "5"} id="cq3aMvDAVljE"
class FancyMLP(nn.Module):
def __init__(self):
super(FancyMLP, self).__init__()
# Random weight parameters are not iterated during training
self.rand_weight = nn.Parameter(torch.empty(20,20).uniform_(0, 1))
self.fc1 = nn.Linear(20, 20)
self.fc2 = nn.Linear(20, 256)
self.relu = nn.ReLU()
def forward(self, x):
x = self.fc1(x)
# This layer will not be updated during training.
x = self.relu(torch.matmul(x, torch.autograd.Variable(self.rand_weight).data) + 1)
# Reuse the fully connected layer.
x = self.fc2(x)
while x.norm().item() > 1:
x /= 2
if x.norm().item() < 0.8:
x *= 10
return x.sum()
# + [markdown] id="DM5jh_ijVljH"
# ### Forward
# + attributes={"classes": [], "id": "", "n": "6"} id="PCrQadDiVljI" outputId="0f29300c-0dc8-42ea-91b4-9b5a93400a83" colab={"base_uri": "https://localhost:8080/", "height": 34}
net = FancyMLP()
net.apply(init_weights)
net(x)
# + [markdown] id="0j1MapC7VljL"
# ## Mix Things Together
# + attributes={"classes": [], "id": "", "n": "7"} id="BBpFp0JdVljM" outputId="a738ee07-e985-4311-c0f6-964389994ca3" colab={"base_uri": "https://localhost:8080/", "height": 306}
class NestMLP(nn.Module):
def __init__(self, **kwargs):
super(NestMLP, self).__init__(**kwargs)
self.net = nn.Sequential(nn.Linear(20, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU()
)
self.fc = nn.Linear(32, 16)
self.relu = nn.ReLU()
def forward(self, x):
return self.relu(self.fc(self.net(x)))
chimera = NestMLP()
chimera.apply(init_weights)
print(chimera)
chimera(x)
| L10 Layers, Parameters, GPUs/L10_2_Blocks_and_Layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
from IPython.display import display, Markdown, HTML
# %matplotlib inline
import tkinter as tk
from tkinter import filedialog, messagebox
from openpyxl import load_workbook
root = tk.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showinfo('OnSSET', 'Open the input file with calibrated GIS data')
input_file = filedialog.askopenfilename()
df = pd.read_csv(input_file)
df.sort_values(by=['Pop2018'], ascending=False, inplace=True)
cumulative_pop = df['Pop2018'].cumsum()/df['Pop2018'].sum()
# First, update the population and urbanization rate
years = [2025, 2030, 2040, 2050, 2060, 2070]
time_step = {2025: 7,
2030: 5,
2040: 10,
2050: 10,
2060: 10,
2070: 10}
# +
urbanization_rate = {2018: 0.2006, # Do not change the 2018 value, update the rest
2025: 0.263,
2030: 0.300,
2040: 0.375,
2050: 0.450,
2060: 0.525,
2070: 0.600}
population = {2018: 107535000, # Do not change the 2018 value, update the rest
2025: 134208459,
2030: 149774797,
2040: 181383802,
2050: 211590946,
2060: 237724184,
2070: 261168812}
household_size = {2018: 4.53, # Do not change the 2018 value, update the rest
2025: 4.26,
2030: 4.07,
2040: 3.69,
2050: 3.30,
2060: 3.00,
2070: 2.96}
# -
for year in years:
urban_growth = (urbanization_rate[year] * population[year])/(urbanization_rate[year-time_step[year]] * population[year-time_step[year]])
rural_growth = ((1-urbanization_rate[year]) * population[year])/((1-urbanization_rate[year-time_step[year]]) * population[year-time_step[year]])
df.loc[df['IsUrban'] == 2, 'Pop{}'.format(year)] = df['Pop{}'.format(year-time_step[year])] * urban_growth
df.loc[df['IsUrban'] < 2, 'Pop{}'.format(year)] = df['Pop{}'.format(year-time_step[year])] * rural_growth
df['NumPeoplePerHH' + '{}'.format(year)] = household_size[year]
# +
tier_1_urban = 19.1 # kWh/household/year
tier_2_urban = 229
tier_3_urban = 619
tier_4_urban = 1915
tier_5_urban = 3785
tier_1_rural = 5.9 # kWh/household/year
tier_2_rural = 181
tier_3_rural = 572
tier_4_rural = 1867
tier_5_rural = 3737
# -
tier_split_2025 = {5: 0.03, # Share of population in Tier 5
4: 0.14, # Share of population in Tier 5 or Tier 4
3: 0.14, # Share of population in Tier 5 or Tier 4 or Tier 3
2: 0.29, # Share of population in Tier 5 or Tier 4 or Tier 2
1: 1} # Share of population in Tier 5 or Tier 4 or Tier 2 or Tier 1 (Always = 1)
tier_split_2030 = {5: 0.03,
4: 0.20,
3: 0.21,
2: 0.37,
1: 1}
tier_split_2040 = {5: 0.05,
4: 0.38,
3: 0.45,
2: 0.63,
1: 1}
tier_split_2050 = {5: 0.07,
4: 0.49,
3: 0.90,
2: 1,
1: 1}
tier_split_2060 = {5: 0.11,
4: 0.59,
3: 1,
2: 1,
1: 1}
tier_split_2070 = {5: 0.16,
4: 0.67,
3: 1,
2: 1,
1: 1}
tier_splits = {2025: tier_split_2025,
2030: tier_split_2030,
2040: tier_split_2040,
2050: tier_split_2050,
2060: tier_split_2060,
2070: tier_split_2070}
for year in years:
tier_split = tier_splits[year]
# Tier 1
df.loc[df['IsUrban'] == 2, 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_1_urban / household_size[year]
df.loc[df['IsUrban'] < 2, 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_1_rural / household_size[year]
df['Tier' + "{}".format(year)] = 1
# Tier 2
df.loc[(df['IsUrban'] == 2) & (cumulative_pop <= tier_split[2]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_2_urban / household_size[year]
df.loc[(df['IsUrban'] < 2) & (cumulative_pop <= tier_split[2]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_2_rural / household_size[year]
df.loc[cumulative_pop <= tier_split[2], 'Tier' + "{}".format(year)] = 2
# Tier 3
df.loc[(df['IsUrban'] == 2) & (cumulative_pop <= tier_split[3]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_3_urban / household_size[year]
df.loc[(df['IsUrban'] < 2) & (cumulative_pop <= tier_split[3]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_3_rural / household_size[year]
df.loc[cumulative_pop <= tier_split[3], 'Tier' + "{}".format(year)] = 3
# Tier 4
df.loc[(df['IsUrban'] == 2) & (cumulative_pop <= tier_split[4]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_4_urban / household_size[year]
df.loc[(df['IsUrban'] < 2) & (cumulative_pop <= tier_split[4]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_4_rural / household_size[year]
df.loc[cumulative_pop <= tier_split[4], 'Tier' + "{}".format(year)] = 4
# Tier 5
df.loc[(df['IsUrban'] == 2) & (cumulative_pop <= tier_split[5]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_5_urban / household_size[year]
df.loc[(df['IsUrban'] < 2) & (cumulative_pop <= tier_split[5]), 'ResidentialDemandTierCustom' + "{}".format(year)] = tier_5_rural / household_size[year]
df.loc[cumulative_pop <= tier_split[5], 'Tier' + "{}".format(year)] = 5
scenario_name = 'BAU_Updated_Test'
messagebox.showinfo('OnSSET', 'Browse to the folder where you want to save the outputs')
df.sort_index(inplace=True)
output_dir = filedialog.askdirectory()
output_dir_results = os.path.join(output_dir, '{}.csv'.format(scenario_name))
df.to_csv(output_dir_results, index=False)
| Demand_update.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Question 5:Customer segmentation RFM analysis
# #### Submitted by <NAME>
# #### Load the required libraries and read data from Online retail excel file
import pandas as pd
import numpy as np
import matplotlib as plt
import seaborn as sn
# %matplotlib inline
Retail_df = pd.read_excel("Online Retail.xlsx")
# #### Convert the series invoice date data in datetime format and count the unique number of attributes
# +
#Convert the date in YYYY-mm-dd HH:MM format and store that date in 'Date' column
Retail_df['Date']=pd.to_datetime(Retail_df['InvoiceDate'], format = '%Y-%m-%d %H:%M:%S')
#Retail_df['Date']=Retail_df['Date'].apply(lambda x: x.strftime('%Y-%d-%m %H:%M'))
# Count the unique no of attributes in Retail data
def unique_counts(Retail_df):
for i in Retail_df.columns:
count = Retail_df[i].nunique()
print(i, ": ", count)
unique_counts(Retail_df)
# -
# #### Calculate Total Price by multiplying units and quantity and store that value in separate column Total_Price
# +
Retail_df['Total_Price']=Retail_df['Quantity']*Retail_df['UnitPrice']
Retail_df.head(10)
# -
# #### Only consider rows without NaN values and store the data in dataset Online_retail_df
# +
Online_retail_df = Retail_df[np.isfinite(Retail_df['CustomerID'])]
# -
#
# #### Count the unique no of attributes in Retail data
# +
unique_counts(Online_retail_df)
# -
#For the sake of calculating recency and frequency, drop the rows with negative values of Quantity and store the data in final_df
final_retail = Online_retail_df[Online_retail_df['Quantity'] > 0]
final_retail.shape
unique_counts(final_retail)
type(final_retail['Date'].max())
final_retail['Date'].min()
# ### Q5.a ) Calculate RFM value of each customer
# #### Assumption: In this final dataset , I have removed all the transaction with negative quantity value
#Calculate recency and frequency
import datetime as dt
NOW = dt.datetime(2011,12,10)
rfmTable = final_retail.groupby('CustomerID').agg({'Date': lambda x: (NOW - x.max()).days, 'InvoiceNo': lambda x: len(x),'Total_Price': lambda x: x.sum()})
rfmTable['Date'] = rfmTable['Date'].astype(int)
rfmTable.rename(columns={'Date': 'recency',
'InvoiceNo': 'frequency',
'Total_Price': 'monetary_value'}, inplace=True)
#rfmTable = pd.merge(mTable, rfTable, on="CustomerID",how = 'inner')
rfmTable.shape
rfmTable.head(10)
# ### Q35.b) Fing top 10 customers based on frequency and monetary values
# #### Sorting first on frequency and then on Monetary value
rfmTable.sort_values(['frequency', 'monetary_value'], ascending=[False, False], inplace=True)
# #### Below are top ten customers after sorting
# +
rfmTable.head(10)
# -
# ### Q5.c)Find optimal number of segments usig dendograms and elbow methods
# #### Seggregate the brands into 3 segments
# ### Normalising the features
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform( rfmTable )
clusters = KMeans(3) # 3 clusters
clusters.fit( X_scaled )
import random
random.seed(9008)
X_sample = np.array(random.sample(X_scaled.tolist(),20))
#type(X_scaled)
rfmTable["cluster_new"] = clusters.labels_
rfmTable
type(X_scaled)
rfmTable.groupby('cluster_new' ).mean()
# +
#Dendogram built with random samples from X_scaled
# -
rfmTable.drop( 'cluster_new', axis = 1, inplace = True )
cmap = sn.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
g = sn.clustermap(X_sample, cmap=cmap, linewidths=.5)
#Lets take one more sample to validate dendogram
random.seed(9005)
X_sample = np.array(random.sample(X_scaled.tolist(),30))
cmap = sn.cubehelix_palette(as_cmap=True, rot=-.3, light=1)
g = sn.clustermap(X_sample, cmap=cmap, linewidths=.5)
# ### The dendogram shows there are 3-6 disctinct clusters.
# ### I have taken a random sample of 20-30 data points to build dendogram
# ### Consider Elbow method to verify the cluster segmentation
# +
cluster_range = range( 1, 10 )
cluster_errors = []
for num_clusters in cluster_range:
clusters = KMeans( num_clusters )
clusters.fit( X_scaled )
cluster_errors.append( clusters.inertia_ )
# -
clusters_df = pd.DataFrame( { "num_clusters":cluster_range, "cluster_errors": cluster_errors } )
# +
clusters_df.head(10)
# -
import matplotlib.pyplot as plt
plt.figure(figsize=(12,6))
plt.plot( clusters_df.num_clusters, clusters_df.cluster_errors, marker = "o" )
clusters = KMeans(3) # 3 clusters
clusters.fit( X_scaled )
rfmTable["cluster_label"] = clusters.labels_
rfmTable.groupby('cluster_label').mean()
rfmTable_0 = rfmTable[rfmTable.cluster_label == 0]
rfmTable_0.head(10)
# ### All the customers with high recency and low frequency and low monetary value are segmented in this cluster.These are the least profitable customers for the company.
rfmTable_1 = rfmTable[rfmTable.cluster_label == 1]
rfmTable_1.head(10)
# #### Each customer is assigned with the cluster label.
# #### This cluster has customers that are potential customers with decent frequency and monetary value.Company should work towards them to convert them to most profitable customers
rfmTable_2 = rfmTable[rfmTable.cluster_label == 2]
rfmTable_2.head(10)
# #### Each customer is assigned with the cluster label.
# #### All the customers with low recency and high frequency and and monetary value are segmented in this Cluster .These are the most profitable and highly valued customers company should look at.
#
rfmTable_0.mean()
rfmTable_1.mean()
rfmTable_2.mean()
clusters = KMeans(3) # 5 clusters
clusters.fit( X_scaled )
rfmTable.head(10)
rfmTable.groupby('cluster_label').mean()
| RFM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# See:
# https://aws.amazon.com/blogs/machine-learning/building-training-and-deploying-fastai-models-with-amazon-sagemaker/
#
# for alternative way to do Realtime Predictor
# # Deploying and Monitoring
#
# In this notebook we will deploy the network traffic classification model that we have trained in the previous steps to Amazon SageMaker hosting, which will expose a fully-managed real-time endpoint to execute inferences.
#
# Amazon SageMaker is adding new capabilities that monitor ML models while in production and detect deviations in data quality in comparison to a baseline dataset (e.g. training data set). They enable you to capture the metadata and the input and output for invocations of the models that you deploy with Amazon SageMaker. They also enable you to analyze the data and monitor its quality.
#
# We will deploy the model to a real-time endpoint with data capture enabled and start collecting some inference inputs/outputs. Then, we will create a baseline and finally enable model monitoring to compare inference data with respect to the baseline and analyze the quality.
# First, we set some variables, including the AWS region we are working in, the IAM execution role of the notebook instance and the Amazon S3 bucket where we will store data and outputs.
# +
import os
import boto3
import sagemaker
region = boto3.Session().region_name
role = sagemaker.get_execution_role()
sagemaker_session = sagemaker.Session()
bucket_name = sagemaker_session.default_bucket()
prefix = 'aim362'
print(region)
print(role)
print(bucket_name)
# -
# ## Deployment with Data Capture
#
# We are going to deploy the latest network traffic classification model that we have trained. To deploy a model using the SM Python SDK, we need to make sure we have the Amazon S3 URI where the model artifacts are stored and the URI of the Docker container that will be used for hosting this model.
#
# First, let's determine the Amazon S3 URI of the model artifacts by using a couple of utility functions which query Amazon SageMaker service to get the latest training job whose name starts with 'nw-traffic-classification-xgb' and then describing the training job.
# +
import boto3
def get_latest_training_job_name(base_job_name):
client = boto3.client('sagemaker')
response = client.list_training_jobs(NameContains=base_job_name, SortBy='CreationTime',
SortOrder='Descending', StatusEquals='Completed')
if len(response['TrainingJobSummaries']) > 0 :
return response['TrainingJobSummaries'][0]['TrainingJobName']
else:
raise Exception('Training job not found.')
def get_training_job_s3_model_artifacts(job_name):
client = boto3.client('sagemaker')
response = client.describe_training_job(TrainingJobName=job_name)
s3_model_artifacts = response['ModelArtifacts']['S3ModelArtifacts']
return s3_model_artifacts
latest_training_job_name = get_latest_training_job_name('nw-traffic-classification-xgb')
print(latest_training_job_name)
model_path = get_training_job_s3_model_artifacts(latest_training_job_name)
print(model_path)
# -
# For this model, we are going to use the same XGBoost Docker container we used for training, which also offers inference capabilities. As a consequence, we can just create the XGBoostModel object of the Amazon SageMaker Python SDK and then invoke its .deploy() method to execute deployment.
# We will also provide an entrypoint script to be invoked at deployment/inference time. The purpose of this code is deserializing and loading the XGB model. In addition, we are re-defining the output functions as we want to extract the class value from the default array output. For example, for class 3 the XGB container would output [3.] but we want to extract only the 3 value.
# !pygmentize source_dir/deploy_xgboost.py
# Now we are ready to create the XGBoostModel object.
# +
from time import gmtime, strftime
from sagemaker.xgboost import XGBoostModel
model_name = 'nw-traffic-classification-xgb-model-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
code_location = 's3://{0}/{1}/code'.format(bucket_name, prefix)
xgboost_model = XGBoostModel(model_data=model_path,
entry_point='deploy_xgboost.py',
source_dir='source_dir/',
name=model_name,
code_location=code_location,
framework_version='0.90-2',
role=role,
sagemaker_session=sagemaker_session)
# -
# Finally we create an endpoint with data capture enabled, for monitoring the model data quality.
# Data capture is enabled at enpoint configuration level for the Amazon SageMaker real-time endpoint. You can choose to capture the request payload, the response payload or both and captured data is stored in JSON format.
# +
from time import gmtime, strftime
from sagemaker.model_monitor import DataCaptureConfig
s3_capture_upload_path = 's3://{}/{}/monitoring/datacapture'.format(bucket_name, prefix)
print(s3_capture_upload_path)
endpoint_name = 'nw-traffic-classification-xgb-ep-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
pred = xgboost_model.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge',
endpoint_name=endpoint_name,
data_capture_config=DataCaptureConfig(
enable_capture=True,
sampling_percentage=100,
destination_s3_uri=s3_capture_upload_path))
# -
# After the deployment has been completed, we can leverage on the RealTimePredictor object to execute HTTPs requests against the deployed endpoint and get inference results.
# +
from sagemaker.predictor import RealTimePredictor
pred = RealTimePredictor(endpoint_name)
pred.content_type = 'text/csv'
pred.accept = 'text/csv'
# Expecting class 4
test_values = "80,1056736,3,4,20,964,20,0,6.666666667,11.54700538,964,0,241.0,482.0,931.1691850999999,6.6241710320000005,176122.6667,\
431204.4454,1056315,2,394,197.0,275.77164469999997,392,2,1056733,352244.3333,609743.1115,1056315,24,0,0,0,0,72,92,\
2.8389304419999997,3.78524059,0,964,123.0,339.8873763,115523.4286,0,0,1,1,0,0,0,1,1.0,140.5714286,6.666666667,\
241.0,0.0,0.0,0.0,0.0,0.0,0.0,3,20,4,964,8192,211,1,20,0.0,0.0,0,0,0.0,0.0,0,0,20,2,2018,1,0,1,0"
result = pred.predict(test_values)
print(result)
# Expecting class 7
test_values = "80,10151,2,0,0,0,0,0,0.0,0.0,0,0,0.0,0.0,0.0,197.0249237,10151.0,0.0,10151,10151,10151,10151.0,0.0,10151,10151,0,0.0,\
0.0,0,0,0,0,0,0,40,0,197.0249237,0.0,0,0,0.0,0.0,0.0,0,0,0,0,1,0,0,0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,2,0,0,0,32738,\
-1,0,20,0.0,0.0,0,0,0.0,0.0,0,0,21,2,2018,2,0,1,0"
result = pred.predict(test_values)
print(result)
# Expecting class 0
test_values = "80,54322832,2,0,0,0,0,0,0.0,0.0,0,0,0.0,0.0,0.0,0.0368169318,54322832.0,0.0,54322832,54322832,54322832,54322832.0,0.0,\
54322832,54322832,0,0.0,0.0,0,0,0,0,0,0,40,0,0.0368169318,0.0,0,0,0.0,0.0,0.0,0,0,0,0,1,0,0,0,0.0,0.0,0.0,0.0,0.0,0.0,\
0.0,0.0,0.0,0.0,2,0,0,0,279,-1,0,20,0.0,0.0,0,0,0.0,0.0,0,0,23,2,2018,4,0,1,0"
result = pred.predict(test_values)
print(result)
# -
# Now let's list the data capture files stored in S3. You should expect to see different files from different time periods organized based on the hour in which the invocation occurred.
#
# **Note that the delivery of capture data to Amazon S3 can require a couple of minutes so next cell might error. If this happens, please retry after a minute.**
# +
s3_client = boto3.Session().client('s3')
current_endpoint_capture_prefix = '{}/monitoring/datacapture/{}'.format(prefix, endpoint_name)
result = s3_client.list_objects(Bucket=bucket_name, Prefix=current_endpoint_capture_prefix)
capture_files = ['s3://{0}/{1}'.format(bucket_name, capture_file.get("Key")) for capture_file in result.get('Contents')]
print("Capture Files: ")
print("\n ".join(capture_files))
# -
# We can also read the contents of one of these files and see how capture records are organized in JSON lines format.
# !aws s3 cp {capture_files[0]} datacapture/captured_data_example.jsonl
# !head datacapture/captured_data_example.jsonl
# In addition, we can better understand the content of each JSON line like follows:
# +
import json
with open ("datacapture/captured_data_example.jsonl", "r") as myfile:
data=myfile.read()
print(json.dumps(json.loads(data.split('\n')[0]), indent=2))
# -
# For each inference request, we get input data, output data and some metadata like the inference time captured and saved.
# ## Baselining
# From our validation dataset let's ask Amazon SageMaker to suggest a set of baseline constraints and generate descriptive statistics for our features. Note that we are using the validation dataset for this workshop to make sure baselining time is short, and that file extension needs to be changed since the baselining jobs require .CSV file extension as default.
# In reality, you might be willing to use a larger dataset as baseline.
# +
import boto3
s3 = boto3.resource('s3')
bucket_key_prefix = "aim362/data/val/"
bucket = s3.Bucket(bucket_name)
for s3_object in bucket.objects.filter(Prefix=bucket_key_prefix):
target_key = s3_object.key.replace('data/val/', 'monitoring/baselining/data/').replace('.part', '.csv')
print('Copying {0} to {1} ...'.format(s3_object.key, target_key))
copy_source = {
'Bucket': bucket_name,
'Key': s3_object.key
}
s3.Bucket(bucket_name).copy(copy_source, target_key)
# +
baseline_data_path = 's3://{0}/{1}/monitoring/baselining/data'.format(bucket_name, prefix)
baseline_results_path = 's3://{0}/{1}/monitoring/baselining/results'.format(bucket_name, prefix)
print(baseline_data_path)
print(baseline_results_path)
# -
# Please note that running the baselining job will require 8-10 minutes. In the meantime, you can take a look at the Deequ library, used to execute these analyses with the default Model Monitor container: https://github.com/awslabs/deequ
# +
from sagemaker.model_monitor import DefaultModelMonitor
from sagemaker.model_monitor.dataset_format import DatasetFormat
my_default_monitor = DefaultModelMonitor(
role=role,
instance_count=1,
instance_type='ml.c5.4xlarge',
volume_size_in_gb=20,
max_runtime_in_seconds=3600,
)
# -
my_default_monitor.suggest_baseline(
baseline_dataset=baseline_data_path,
dataset_format=DatasetFormat.csv(header=True),
output_s3_uri=baseline_results_path,
wait=True
)
# Let's display the statistics that were generated by the baselining job.
# +
import pandas as pd
baseline_job = my_default_monitor.latest_baselining_job
schema_df = pd.io.json.json_normalize(baseline_job.baseline_statistics().body_dict["features"])
schema_df.head(10)
# -
# Then, we can also visualize the constraints.
constraints_df = pd.io.json.json_normalize(baseline_job.suggested_constraints().body_dict["features"])
constraints_df.head(10)
# #### Results
#
# The baselining job has inspected the validation dataset and generated constraints and statistics, that will be used to monitor our endpoint.
# ## Generating violations artificially
# In order to get some result relevant to monitoring analysis, we are going to generate artificially some inferences with feature values causing specific violations, and then invoke the endpoint with this data.
#
# This requires about 2 minutes for 1000 inferences.
# +
import time
import numpy as np
dist_values = np.random.normal(1, 0.2, 1000)
# Tot Fwd Pkts -> set to float (expected integer) [second feature]
# Flow Duration -> set to empty (missing value) [third feature]
# Fwd Pkt Len Mean -> sampled from random normal distribution [nineth feature]
artificial_values = "22,,40.3,0,0,0,0,0,{0},0.0,0,0,0.0,0.0,0.0,0.0368169318,54322832.0,0.0,54322832,54322832,54322832,54322832.0,0.0,\
54322832,54322832,0,0.0,0.0,0,0,0,0,0,0,40,0,0.0368169318,0.0,0,0,0.0,0.0,0.0,0,0,0,0,1,0,0,0,0.0,0.0,0.0,0.0,0.0,0.0,\
0.0,0.0,0.0,0.0,2,0,0,0,279,-1,0,20,0.0,0.0,0,0,0.0,0.0,0,0,23,2,2018,4,0,1,0"
for i in range(1000):
pred.predict(artificial_values.format(str(dist_values[i])))
time.sleep(0.15)
if i > 0 and i % 100 == 0 :
print('Executed {0} inferences.'.format(i))
# -
# ## Monitoring
# Once we have built the baseline for our data, we can enable endpoint monitoring by creating a monitoring schedule.
# When the schedule fires, a monitoring job will be kicked-off and will inspect the data captured at the endpoint with respect to the baseline; then it will generate some report files that can be used to analyze monitoring results.
# ### Create Monitoring Schedule
# Let's create the monitoring schedule for the previously created endpoint. When we create the schedule, we can also specify two scripts that will preprocess the records before the analysis takes place and execute post-processing at the end.
# For this example, we are not going to use a record preprocessor, and we are just specifying a post-processor that outputs some text for demo purposes.
# !pygmentize postprocessor.py
# We copy the script to Amazon S3 and specify the path where the monitoring reports will be saved to.
# +
import boto3
monitoring_code_prefix = '{0}/monitoring/code'.format(prefix)
print(monitoring_code_prefix)
boto3.Session().resource('s3').Bucket(bucket_name).Object(monitoring_code_prefix + '/postprocessor.py').upload_file('postprocessor.py')
postprocessor_path = 's3://{0}/{1}/monitoring/code/postprocessor.py'.format(bucket_name, prefix)
print(postprocessor_path)
reports_path = 's3://{0}/{1}/monitoring/reports'.format(bucket_name, prefix)
print(reports_path)
# -
# Finally, we create the monitoring schedule with hourly schedule execution.
# +
from sagemaker.model_monitor import CronExpressionGenerator
from time import gmtime, strftime
endpoint_name = pred.endpoint
mon_schedule_name = 'nw-traffic-classification-xgb-mon-sch-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
my_default_monitor.create_monitoring_schedule(
monitor_schedule_name=mon_schedule_name,
endpoint_input=endpoint_name,
post_analytics_processor_script=postprocessor_path,
output_s3_uri=reports_path,
statistics=my_default_monitor.baseline_statistics(),
constraints=my_default_monitor.suggested_constraints(),
schedule_cron_expression=CronExpressionGenerator.hourly(),
enable_cloudwatch_metrics=True
)
# -
# ### Describe Monitoring Schedule
desc_schedule_result = my_default_monitor.describe_schedule()
desc_schedule_result
# ### Delete Monitoring Schedule
#
# Once the schedule is created, it will kick of jobs at specified intervals. Note that if you are kicking this off after creating the hourly schedule, you might find the executions empty.
# You might have to wait till you cross the hour boundary (in UTC) to see executions kick off. Since we don't want to wait for the hour in this example we can delete the schedule and use the code in next steps to simulate what will happen when a schedule is triggered, by running an Amazon SageMaker Processing Job.
# Note: this is just for the purpose of running this example.
my_default_monitor.delete_monitoring_schedule()
# ### Triggering execution manually
#
# In oder to trigger the execution manually, we first get all paths to data capture, baseline statistics, baseline constraints, etc.
# Then, we use a utility fuction, defined in <a href="./monitoringjob_utils.py">monitoringjob_utils.py</a>, to run the processing job.
# +
result = s3_client.list_objects(Bucket=bucket_name, Prefix=current_endpoint_capture_prefix)
capture_files = ['s3://{0}/{1}'.format(bucket_name, capture_file.get("Key")) for capture_file in result.get('Contents')]
print("Capture Files: ")
print("\n ".join(capture_files))
data_capture_path = capture_files[len(capture_files) - 1][: capture_files[len(capture_files) - 1].rfind('/')]
statistics_path = baseline_results_path + '/statistics.json'
constraints_path = baseline_results_path + '/constraints.json'
print(data_capture_path)
print(postprocessor_path)
print(statistics_path)
print(constraints_path)
print(reports_path)
# +
from monitoringjob_utils import run_model_monitor_job_processor
run_model_monitor_job_processor(region, 'ml.m5.xlarge', role, data_capture_path, statistics_path, constraints_path, reports_path,
postprocessor_path=postprocessor_path)
# -
# ### Analysis
# When the monitoring job completes, monitoring reports are saved to Amazon S3. Let's list the generated reports.
# +
s3_client = boto3.Session().client('s3')
monitoring_reports_prefix = '{}/monitoring/reports/{}'.format(prefix, pred.endpoint)
result = s3_client.list_objects(Bucket=bucket_name, Prefix=monitoring_reports_prefix)
try:
monitoring_reports = ['s3://{0}/{1}'.format(bucket_name, capture_file.get("Key")) for capture_file in result.get('Contents')]
print("Monitoring Reports Files: ")
print("\n ".join(monitoring_reports))
except:
print('No monitoring reports found.')
# -
# We then copy monitoring reports locally.
# !aws s3 cp {monitoring_reports[0]} monitoring/
# !aws s3 cp {monitoring_reports[1]} monitoring/
# !aws s3 cp {monitoring_reports[2]} monitoring/
# Let's display the violations identified by the monitoring execution.
# +
import pandas as pd
pd.set_option('display.max_colwidth', -1)
file = open('monitoring/constraint_violations.json', 'r')
data = file.read()
violations_df = pd.io.json.json_normalize(json.loads(data)['violations'])
violations_df.head(10)
# -
# We can see that the violations identified correspond to the ones that we artificially generated and that there is a feature that is generating some drift from the baseline.
# ### Advanced Hints
# You might be asking yourself what are the type of violations that are monitored and how drift from the baseline is computed.
#
# The types of violations monitored are listed here: https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-interpreting-violations.html. Most of them use configurable thresholds, that are specified in the monitoring configuration section of the baseline constraints JSON. Let's take a look at this configuration from the baseline constraints file:
# !aws s3 cp {statistics_path} baseline/
# !aws s3 cp {constraints_path} baseline/
# +
import json
with open ("baseline/constraints.json", "r") as myfile:
data=myfile.read()
print(json.dumps(json.loads(data)['monitoring_config'], indent=2))
# -
# This configuration is intepreted when the monitoring job is executed and used to compare captured data to the baseline. If you want to customize this section, you will have to update the **constraints.json** file and upload it back to Amazon S3 before launching the monitoring job.
#
# When data distributions are compared to detect potential drift, you can choose to use either a _Simple_ or _Robust_ comparison method, where the latter has to be preferred when dealing with small datasets. Additional info: https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-byoc-constraints.html.
# ## Delete Endpoint
# Finally we can delete the endpoint to free-up resources.
pred.delete_endpoint()
pred.delete_model()
# ## References
#
# A Realistic Cyber Defense Dataset (CSE-CIC-IDS2018) https://registry.opendata.aws/cse-cic-ids2018/
| 09_deploy/wip/deploy_and_monitor/deploy_and_monitor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="jdJhs7VHWJiO" colab_type="code" colab={}
# + id="mQaRtOALWrF-" colab_type="code" colab={}
# + id="7sazlWPAVwVj" colab_type="code" colab={}
from google.colab import drive
import pandas as pd
import numpy as np
import datadotworld as dw
# + id="6vRmtJZgWyhA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="f5f438ab-80b6-4812-e204-9df1247c79c3" executionInfo={"status": "ok", "timestamp": 1582735068204, "user_tz": -60, "elapsed": 28549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
drive.mount("/content/drive")
# + id="PuIGdcXBXBxr" colab_type="code" colab={}
# + id="VADuONmjXSos" colab_type="code" colab={}
# !mkdir data
# + id="rATXh4GOXU0X" colab_type="code" colab={}
# !echo 'data' > .gitignore
# + id="IrqlKqHoXlXN" colab_type="code" colab={}
# !git add .gitignore
# + id="UWEZ4rUHXy8n" colab_type="code" colab={}
data = dw.load_dataset('datafiniti/mens-shoe-prices')
# + id="EFV7lL3XYDlh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="d0890c43-a12d-4de4-9a87-2a6d4f90ba0f" executionInfo={"status": "ok", "timestamp": 1582735355560, "user_tz": -60, "elapsed": 1624, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df = data.dataframes['7004_1']
df.shape
# + id="TyQYE6efYI1M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 598} outputId="2da2f331-c4c7-4ae7-e183-b3fb7db6781b" executionInfo={"status": "ok", "timestamp": 1582735370212, "user_tz": -60, "elapsed": 890, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df.sample(5)
# + id="ZIpU7RX6YSQz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="41fcd1d0-ae0b-46b1-c679-4a4e796e43ea" executionInfo={"status": "ok", "timestamp": 1582735392897, "user_tz": -60, "elapsed": 895, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df.columns
# + id="QtT_oiBiYXzO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="7302a764-5c10-45b3-ba1f-c95cb281f185" executionInfo={"status": "ok", "timestamp": 1582735409977, "user_tz": -60, "elapsed": 783, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df.prices_currency.unique()
# + id="yQam9vqwYb_1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 255} outputId="957d91e4-87d7-4fbf-f8ff-088585910341" executionInfo={"status": "ok", "timestamp": 1582735449702, "user_tz": -60, "elapsed": 880, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df.prices_currency.value_counts(normalize=True)
# + id="Noq17qWtYhfA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5457e103-7d1e-482f-e6b8-4a3d150e06f9" executionInfo={"status": "ok", "timestamp": 1582735563396, "user_tz": -60, "elapsed": 901, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df_usd = df[ df.prices_currency == 'USD' ].copy()
df_usd.shape
# + id="KNhhswEeY5Tu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="bd90788a-58ce-4bd0-eeab-fcaa99c10274" executionInfo={"status": "ok", "timestamp": 1582735756260, "user_tz": -60, "elapsed": 1017, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df_usd['prices_amountmin'] = df_usd.prices_amountmin.astype(np.float)
df_usd['prices_amountmin'].hist()
# + id="zWHrznveZNCX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="423cec50-9585-44b3-af74-7ddeb37bedc8" executionInfo={"status": "ok", "timestamp": 1582735869226, "user_tz": -60, "elapsed": 879, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
filter_max = np.percentile( df_usd['prices_amountmin'], 99) #w 99% buty kosztują 895 dol lub mniej
filter_max
# + id="k6PkaNGTZ6pv" colab_type="code" colab={}
df_usd_filter = df_usd[ df_usd['prices_amountmin'] < filter_max ]
# + id="8kgNvqOYaWkC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="3e7c9a01-4886-405f-e285-81a001d7f9b9" executionInfo={"status": "ok", "timestamp": 1582735992381, "user_tz": -60, "elapsed": 969, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
df_usd_filter.prices_amountmin.hist(bins=100)
# + id="mTzCeoo8akx7" colab_type="code" colab={}
# !git add matrix_one/day3.ipynb
# + id="s8df1_sfbSbl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="0260fab2-912e-4e48-cc76-487cdb5712f7" executionInfo={"status": "ok", "timestamp": 1582736335303, "user_tz": -60, "elapsed": 8097, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
# !git commit -m "Read Men's Shoe Prices dataset from data.world"
# + id="MCiJDHNybkBP" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "Daninik"
# + id="JQwP6Ji1b68F" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="b6412141-4ac1-4746-b138-f98b14018cfb" executionInfo={"status": "ok", "timestamp": 1582736593995, "user_tz": -60, "elapsed": 9774, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAloXx03jmJimASQtdBNUlhrg7jX1jPcGZw8kkP=s64", "userId": "13921298845918023581"}}
# !git push -u origin master
# + id="Krz2FPRRcDjc" colab_type="code" colab={}
| matrix_one/day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + run_control={"frozen": false, "read_only": false}
import file_transfer.datamover as dm
# + run_control={"frozen": false, "read_only": false}
import os
import h5py
# + run_control={"frozen": false, "read_only": false}
s3handle = dm.S3EnramHandler(bucket_name="lw-enram", profile_name="lw-enram")
# + run_control={"frozen": false, "read_only": false}
s3handle.bucket.objects.all()
# -
# #### Temp
from file_transfer.creds import URL, LOGIN, PASSWORD
btos = dm.BaltradToS3(URL, LOGIN, PASSWORD, "<PASSWORD>", profile_name="lw-enram")
btos.transfer(name_match="_vp_", overwrite=True,
limit=5, verbose=True)
btos.transferred
s3handle.create_zip_version(btos.transferred)
import shutil
shutil.rmtree(os.path.join(".", "cz"))
shutil.rmtree(os.path.join(".", "cz", "brd", "2017", "09"))
#
os.removedirs(os.path.join(".", "cz", "brd", "2017"))
# ## Remove corrupted h5 files on the repo and remove all ZIP-files on the data repo
# + run_control={"frozen": false, "read_only": false}
corrupted = []
country_exclude = ["be", "ch", "cz", "dk", "es"]
sat_exclude = ["boo", "drs", "eis", "emd", "ess", "fld", "hnr", "mem", "neu", "nhb", "anj", "ika", "kes"]
for j, file in enumerate(s3handle.bucket.objects.all()):
if file.key.endswith(".h5") and not file.key.split("/")[0] in country_exclude and not file.key.split("/")[1] in sat_exclude:
# download file
s3handle.download_file(file.key)
# check if it can be read
try:
h5py.File(file.key, mode="r")
except:
corrupted.append(file.key)
file.delete()
os.remove(file.key)
elif file.key.endswith(".zip"):
file.delete()
# + run_control={"frozen": false, "read_only": false}
corrupted[-10:]
# -
# ## Recreate all the ZIP folders
# Load the existing h5 files and use this to create the zip files, we can use the coverage file information do to so as explained in the documentation of the datamover
# https://github.com/enram/data-repository/blob/master/file_transfer/tutorial_datamover.ipynb
# + run_control={"frozen": false, "read_only": false}
s3enram = dm.S3EnramHandler("lw-enram", profile_name="lw-enram")
# + run_control={"frozen": false, "read_only": false}
s3enram.create_zip_version(s3enram.count_enram_coverage(level="month"))
# + run_control={"frozen": false, "read_only": false}
| file_transfer/ad_hoc_management/remove_corrupted_h5_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mn-Salinity relationship
#
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import netCDF4 as nc
import pandas as pd
import glob
import warnings
warnings.filterwarnings('ignore')
import datetime
import xarray as xr
# %matplotlib inline
# -
# #### Parameters:
# +
# domain dimensions:
imin, imax = 1479, 2179
jmin, jmax = 159, 799
isize = imax - imin
jsize = jmax - jmin
year = 2015
month = 8
# Mn model results location and ANHA12 salinity (http://knossos.eas.ualberta.ca/anha/anhatable.php)
folder_salinity = f'/data/brogalla/ANHA12/salinity_ave/'
folder_ref_2009 = f'/data/brogalla/run_storage/Mn-reference-202110/ANHA12_ref-2009_20211012/'
folder_ref_2015 = f'/data/brogalla/run_storage/Mn-reference-202110/ANHA12_ref-2015_20211012/'
folder_cleanice_2009 = f'/data/brogalla/run_storage/Mn-clean-ice-202110/ANHA12_clean-ice-2009_20211023/'
folder_cleanice_2015 = f'/data/brogalla/run_storage/Mn-clean-ice-202110/ANHA12_clean-ice-2015_20211023/'
# colours:
obs_CB = '#b23333'
obs_CAA = '#d89999'
mod_CB = '#006f99'
mod_CAA = '#99c5d6'
land_color = "#8b7765"
# -
# #### Load files:
# ANHA12 grid:
mesh = nc.Dataset('/ocean/brogalla/GEOTRACES/data/ANHA12/ANHA12_mesh1.nc')
mesh_lon = np.array(mesh.variables['nav_lon'])
mesh_lat = np.array(mesh.variables['nav_lat'])
mesh_bathy = np.array(mesh.variables['hdept'][0])
tmask = np.array(mesh.variables['tmask'][0,:,:,:])
Z_masked = np.ma.masked_where((tmask > 0.1), tmask)
# Model results
# Mn model results format:
c = nc.Dataset(f'{folder_ref_2009}ANHA12_EXH006_2009_monthly.nc', 'r')
lat_model = np.array(c.variables['nav_lat'])
lon_model = np.array(c.variables['nav_lon'])
depths_model = np.array(c.variables['deptht'])
# Observations
# +
# Concentration of dMn in 2015 from Colombo et al., 2020
Manuel = pd.read_csv('/ocean/brogalla/GEOTRACES/data/DMn_nmolL-1_BR-edited.csv')
Manuel_station_names = Manuel['Station'].astype('string')
Manuel_depths = Manuel['Depth'].astype('float').values
Manuel_dMn_nmolkg = Manuel['nmol kg-1'].astype('float').values
Manuel_dMn_nmolL = Manuel['nmol L-1'].astype('float')
Manuel_salinity = Manuel['Salinity'].astype('float')
Manuel_density = Manuel_dMn_nmolL / Manuel_dMn_nmolkg
# Fit of Mn:salinity from river observations in Colombo et al. 2019:
S_obs = np.arange(0, 40, 1)
dmn_obs_CB = -0.601*S_obs + 21.9
dmn_obs_CAA = -1.26*S_obs + 45.2
# -
# GEOTRACES stations information from Colombo et al., 2020
Pb_data = pd.read_csv('/ocean/brogalla/GEOTRACES/data/Pb-paper-data.csv')
stn_names = Pb_data['Station names'].dropna().astype('str')
lons = Pb_data['station lon'].astype('float').dropna().values
lats = Pb_data['Station lat'].astype('float').dropna().values
# +
# Concentration of dMn in the Beaufort Sea in 2009.
### Thesis doesn't have a list of lats and lons of station locations --- Jeffrey Charter's thesis does (2012)
IPY = pd.read_csv('/ocean/brogalla/GEOTRACES/data/Nari_Sim_dMn_data.csv')
IPY_names_full = IPY['Station name'].astype('string').values
IPY_depths = IPY['Depth [m]'].astype('float').values
IPY_dMn_nmolkg = IPY['DMn [ nmol/kg]'].astype('float').values
IPY_lons_full = -1*IPY['Longitude'].astype('float').values
IPY_lats_full = IPY['Latitude'].astype('float').values
IPY_salinity = IPY['Salinity [psu]'].astype('float').values
# Unique entries (maintaining order)
IPY_lons = np.array(list(dict.fromkeys(IPY_lons_full)))
IPY_lats = np.array(list(dict.fromkeys(IPY_lats_full)))
IPY_names = np.array(list(dict.fromkeys(IPY_names_full)))
# Convert nmol / kg to nmol / L
IPY_density = IPY['Density'].astype('float').values / 1000 # kg / L
IPY_depths_cruise_sheet = IPY['Depth [m] cruise sheet'].astype('float').values
IPY_dMn_nmolL = IPY_dMn_nmolkg * IPY_density
# -
# #### Define functions:
def calculate_average_2009(folder_ref_2009, folder_salinity):
# Calculate average over data files during cruise period:
#-------------------------------------------------------------------------------------------------------------------
# 2009 GEOTRACES: 29 August - 8 September
dmn_files_2009 = np.sort(glob.glob(f'{folder_ref_2009}ANHA12_EXH006_5d_20090101_20091231*'))
sal_files_2009 = np.sort(glob.glob(f'{folder_salinity}ANHA12-EXH006_5d_gridT_y2009m*'))
start_date_2009 = datetime.datetime.strptime('2009-08-29', '%Y-%m-%d')
end_date_2009 = datetime.datetime.strptime('2009-09-08', '%Y-%m-%d')
#----Manganese---------------
dmn_file_list_2009 = []
for file in dmn_files_2009:
if (datetime.datetime.strptime(file.split('/')[-1][42:50],'%Y%m%d') >= start_date_2009) & \
(datetime.datetime.strptime(file.split('/')[-1][51:59],'%Y%m%d') < end_date_2009):
dmn_file_list_2009.append(file)
dmn_2009 = np.zeros((50,700,640))
days = 0
for file in dmn_file_list_2009:
ds = xr.open_dataset(f"{file}")
dmn_2009 = dmn_2009 + ds['dissolmn'].values[0,:,:,:]
mn_dis_ref_2009 = dmn_2009 / len(dmn_file_list_2009)
#----Salinity---------------
sal_file_list_2009 = []
for file in sal_files_2009:
if (datetime.datetime.strptime(file.split('/')[-1][24:34],'%Ym%md%d') >= start_date_2009) & \
(datetime.datetime.strptime(file.split('/')[-1][24:34],'%Ym%md%d')+datetime.timedelta(days=5) < end_date_2009):
sal_file_list_2009.append(file)
sal_2009 = np.zeros((50,700,640))
days = 0
for file in sal_file_list_2009:
ds = xr.open_dataset(f"{file}")
sal_2009 = sal_2009 + ds['vosaline'].values[0,:,imin:imax,jmin:jmax]
sal_ref_2009 = sal_2009 / len(sal_file_list_2009)
return mn_dis_ref_2009, sal_ref_2009
def calculate_average_2015(folder_ref_2015, folder_salinity):
# 2015 GEOTRACES: 9 August - 15 September
dmn_files_2015 = np.sort(glob.glob(f'{folder_ref_2015}ANHA12_EXH006_5d_20150101_20151231*'))
sal_files_2015 = np.sort(glob.glob(f'{folder_salinity}ANHA12-EXH006_5d_gridT_y2015m*'))
start_date_2015 = datetime.datetime.strptime(f'2015-08-09', '%Y-%m-%d')
end_date_2015 = datetime.datetime.strptime(f'2015-09-15', '%Y-%m-%d')
#----Manganese---------------
dmn_file_list_2015 = []
for file in dmn_files_2015:
if (datetime.datetime.strptime(file.split('/')[-1][42:50],'%Y%m%d') >= start_date_2015) & \
(datetime.datetime.strptime(file.split('/')[-1][51:59],'%Y%m%d') < end_date_2015):
dmn_file_list_2015.append(file)
dmn_2015 = np.zeros((50,700,640))
days = 0
for file in dmn_file_list_2015:
ds = xr.open_dataset(f"{file}")
dmn_2015 = dmn_2015 + ds['dissolmn'].values[0,:,:,:]
mn_dis_ref_2015 = dmn_2015 / len(dmn_file_list_2015)
#----Salinity---------------
sal_file_list_2015 = []
for file in sal_files_2015:
if (datetime.datetime.strptime(file.split('/')[-1][24:34],'%Ym%md%d') >= start_date_2015) & \
(datetime.datetime.strptime(file.split('/')[-1][24:34],'%Ym%md%d')+datetime.timedelta(days=5) < end_date_2015):
sal_file_list_2015.append(file)
sal_2015 = np.zeros((50,700,640))
days = 0
for file in sal_file_list_2015:
ds = xr.open_dataset(f"{file}")
sal_2015 = sal_2015 + ds['vosaline'].values[0,:,imin:imax,jmin:jmax]
sal_ref_2015 = sal_2015 / len(sal_file_list_2015)
return mn_dis_ref_2015, sal_ref_2015
def mask_data(mask_ini, S_model, mn_model):
# Apply a spatial mask to input data
z_start = 0 #5:17
z_end = 50 # Observation relationships are only for upper 40 m and start at 5 m
mask = np.tile(mask_ini, (z_end-z_start,1,1))
S = np.ma.masked_where(mask==0 , S_model[z_start:z_end,:,:])
mn = np.ma.masked_where(mask==0 , mn_model[z_start:z_end,:,:]*10**9)
S_plot = S[:,:,:].filled(fill_value=np.nan).flatten()
mn_plot = mn[:,:,:].filled(fill_value=np.nan).flatten()
S_plot[S_plot < 0.1] = np.nan
mn_plot[mn_plot < 0.1] = np.nan
return S_plot, mn_plot
def plot_mnsal(S_CAA, mn_CAA, S_CB, mn_CB, reso=200, title='', savefig=False):
fig, ax = plt.subplots(1,3, figsize=(4.5*2,3.74*0.7), dpi=300)
st = sns.axes_style("whitegrid")
for axis in ax.ravel():
axis.set(xlim=(22, 36), ylim=(0, 20))
axis.tick_params(axis='both', labelsize=7.5)
with st:
CAA_color='#e58203' # '#e5ae03'
level_step = 0.1
linewidth_fit = 2.7
sns.kdeplot(x=S_CAA[::reso], y=mn_CAA[::reso], levels=np.arange(level_step,1+level_step,level_step), \
fill=True, ax=ax[0], cmap="YlOrBr_r")
sns.kdeplot(x=S_CB[::reso], y=mn_CB[::reso], levels=np.arange(level_step,1+level_step,level_step), \
fill=True, ax=ax[1], cmap='Blues_r')
sns.kdeplot(x=S_CB[::reso], y=mn_CB[::reso], levels=np.arange(level_step,1+level_step,level_step), \
fill=True, ax=ax[2], cmap='Blues_r')
# CAA
sns.regplot(Manuel_salinity[Manuel_station_names.str.find('CAA')!=-1], \
Manuel_dMn_nmolL[Manuel_station_names.str.find('CAA')!=-1], \
dropna=True, ci=None, \
scatter_kws={"s": 15, 'color':'w', 'edgecolor':CAA_color, 'linewidths':1.0, 'alpha':1.0},\
line_kws={"linewidth":linewidth_fit, 'color':CAA_color, 'linestyle':'--'}, truncate=False, ax=ax[0], \
label='2015 observations')
# Canada Basin
sns.regplot(Manuel_salinity[Manuel_station_names.str.find('CB')!=-1], \
Manuel_dMn_nmolL[Manuel_station_names.str.find('CB')!=-1], \
dropna=True, ci=None, \
scatter_kws={"s": 15, 'color':'w', 'edgecolor':'#6f818d', 'linewidths':1.0, 'alpha':1.0},\
line_kws={"linewidth":0}, truncate=False, ax=ax[1], \
label='2015 observations')
# IPY data
sns.regplot(IPY_salinity, IPY_dMn_nmolL, \
dropna=True, ci=None, \
scatter_kws={"s": 15, 'color':'w', 'edgecolor':'#39454d', 'linewidths':1.0, 'alpha':1.0},\
line_kws={"linewidth":0}, truncate=False, ax=ax[1], \
label='2009 observations')
# Observations combined fit
sns.regplot(S_CB_obs_combined, mn_CB_obs_combined, \
dropna=True, ci=None, \
scatter_kws={"s": 0}, line_kws={"linewidth":linewidth_fit, 'color':'#607381', 'linestyle':'--'}, \
truncate=False, ax=ax[1])
# Model data
sns.regplot(S_CAA[::reso], mn_CAA[::reso], dropna=True, ci=None, \
line_kws={"linewidth":linewidth_fit, 'color':'w'}, scatter_kws={"s": 0.0}, ax=ax[0])
sns.regplot(S_CAA[::reso], mn_CAA[::reso], dropna=True, ci=None, \
line_kws={"linewidth":2.0, 'color':CAA_color}, scatter_kws={"s": 0.0}, ax=ax[0])
sns.regplot(S_CB[::reso] , mn_CB[::reso], dropna=True, ci=None, \
line_kws={"linewidth":linewidth_fit, 'color':'w'}, scatter_kws={"s": 0.0}, ax=ax[1])
sns.regplot(S_CB[::reso] , mn_CB[::reso], dropna=True, ci=None, \
line_kws={"linewidth":2.0, 'color':'#607381'}, scatter_kws={"s": 0.0}, ax=ax[1])
# Legend entries:
sns.lineplot([0,0],[0,0], lw=2, c=CAA_color, ax=ax[0], label='Model fit')
sns.lineplot([0,0],[0,0], lw=2, c=CAA_color, ax=ax[0], label='Observations fit')
ax[0].lines[4].set_linestyle("--")
sns.lineplot([0,0],[0,0], lw=2, c='#607381', ax=ax[1], label='Model fit')
sns.lineplot([0,0],[0,0], lw=2, c='#607381', ax=ax[1], label='Observations fit')
ax[1].lines[6].set_linestyle("--")
ax[0].legend(loc=(0.35,0.76), frameon=False, fontsize=7)
ax[1].legend(loc=(0.35,0.68), frameon=False, fontsize=7)
ax[0].set_xlabel('Salinity [psu]', fontsize=8)
ax[1].set_xlabel('Salinity [psu]', fontsize=8)
ax[2].set_xlabel('Salinity [psu]', fontsize=8)
ax[0].set_ylabel('Dissolved Mn [nM]', fontsize=8)
ax[1].set_ylabel('')
ax[2].set_ylabel('')
ax[0].set_title('CAA', fontsize=8)
ax[1].set_title('Canada Basin', fontsize=8)
ax[2].set_title('Canada Basin', fontsize=8)
fig.text(0.07, 0.94, 'a)', fontsize=9)
fig.text(0.35, 0.94, 'b)', fontsize=9)
fig.text(0.65, 0.94, 'c)', fontsize=9)
# fig.text(0.07, 0.94, 'd)', fontsize=9)
# fig.text(0.35, 0.94, 'e)', fontsize=9)
# fig.text(0.65, 0.94, 'f)', fontsize=9)
if savefig:
fig.savefig('/ocean/brogalla/GEOTRACES/figures/paper1-202110/S9-model-mn-salinity-relationship_reference.png', \
bbox_inches='tight', dpi=300)
fig.savefig('/ocean/brogalla/GEOTRACES/figures/paper1-202110/S9-model-mn-salinity-relationship_reference.svg', \
format='svg', bbox_inches='tight', dpi=300)
# fig.savefig('/ocean/brogalla/GEOTRACES/figures/paper1-202110/S9-model-mn-salinity-relationship_clean-ice.png', \
# bbox_inches='tight', dpi=300)
# fig.savefig('/ocean/brogalla/GEOTRACES/figures/paper1-202110/S9-model-mn-salinity-relationship_clean-ice.svg', \
# format='svg', bbox_inches='tight', dpi=300)
return
# Calculate average Mn concentrations during time period of interest
# +
mn_ref_2009, sal_2009 = calculate_average_2009(folder_ref_2009, folder_salinity)
mn_ref_2015, sal_2015 = calculate_average_2015(folder_ref_2015, folder_salinity)
mn_cleanice_2009, sal_2009 = calculate_average_2009(folder_cleanice_2009, folder_salinity)
mn_cleanice_2015, sal_2015 = calculate_average_2015(folder_cleanice_2015, folder_salinity)
# -
# # Salinity:Mn relationship
# Seperate Canada Basin and CAA
# +
# Canada Basin: ------
x_ind = np.arange(1621, 2100, 1)
y_ind = (-7/8)*x_ind + 1517 + 700
CB_indx = []
CB_indy = []
for index in range(0,len(x_ind)):
CB_x = np.arange(x_ind[index],2179,1)
CB_y = np.ones(CB_x.shape)*y_ind[index]
CB_indx = np.append(CB_x, CB_indx)
CB_indy = np.append(CB_y, CB_indy)
# Baffin Bay: ------
x_ind = np.arange(1400, 1550, 1)
y_ind = np.arange(515, 799, 1)
BB_indx = []
BB_indy = []
for xindex in x_ind:
for yindex in y_ind:
BB_indx = np.append(BB_indx, xindex)
BB_indy = np.append(BB_indy, yindex)
# Separate Canada Basin and the CAA: -------
mask_ini_CB = np.zeros((isize,jsize))
mask_ini_CAA = np.ones((isize,jsize))
mask_ini_CBBB = np.zeros((isize,jsize))
for i, j in zip(CB_indx, CB_indy):
mask_ini_CB[int(i-imin),int(j-jmin)] = 1
mask_ini_CBBB[int(i-imin),int(j-jmin)] = 1
mask_ini_CAA[int(i-imin),int(j-jmin)] = 0
for i, j in zip(BB_indx, BB_indy):
mask_ini_CBBB[int(i-imin),int(j-jmin)] = 1
mask_ini_CAA[int(i-imin),int(j-jmin)] = 0
mask_ini_CB[150:-1 ,-8:-1] = 1
mask_ini_CBBB[150:-1 ,-8:-1] = 1
mask_ini_CAA[150:-1,-8:-1] = 0
# -
# Separate salinity and mn results based on these definitons.
# +
reso = 5
ref_S_CB_2015, ref_mn_CB_2015 = mask_data(mask_ini_CBBB[::reso,::reso], sal_2015[:,::reso,::reso], mn_ref_2015[:,::reso,::reso])
ref_S_CB_2009, ref_mn_CB_2009 = mask_data(mask_ini_CBBB[::reso,::reso], sal_2009[:,::reso,::reso], mn_ref_2009[:,::reso,::reso])
ref_S_CAA_2015, ref_mn_CAA_2015 = mask_data(mask_ini_CAA[::reso,::reso], sal_2015[:,::reso,::reso], mn_ref_2015[:,::reso,::reso])
cleanice_S_CB_2015, cleanice_mn_CB_2015 = mask_data(mask_ini_CB[::reso,::reso], sal_2015[:,::reso,::reso], \
mn_cleanice_2015[:,::reso,::reso])
cleanice_S_CB_2009, cleanice_mn_CB_2009 = mask_data(mask_ini_CB[::reso,::reso], sal_2009[:,::reso,::reso], \
mn_cleanice_2009[:,::reso,::reso])
cleanice_S_CAA_2015, cleanice_mn_CAA_2015 = mask_data(mask_ini_CAA[::reso,::reso], sal_2015[:,::reso,::reso], \
mn_cleanice_2015[:,::reso,::reso])
# Combine Canada Basin 2009 and 2015 model data:
ref_S_CB_combined = np.append(ref_S_CB_2009, ref_S_CB_2015)
ref_mn_CB_combined = np.append(ref_mn_CB_2009, ref_mn_CB_2015)
cleanice_S_CB_combined = np.append(cleanice_S_CB_2009, cleanice_S_CB_2015)
cleanice_mn_CB_combined = np.append(cleanice_mn_CB_2009, cleanice_mn_CB_2015)
# Combine Canada Basin 2009 and 2015 observation data:
S_CB_obs_combined = np.append(IPY_salinity, Manuel_salinity[Manuel_station_names.str.find('CB')!=-1])
mn_CB_obs_combined = np.append(IPY_dMn_nmolL, Manuel_dMn_nmolL[Manuel_station_names.str.find('CB')!=-1])
# -
plot_mnsal(ref_S_CAA_2015, ref_mn_CAA_2015, ref_S_CB_combined, ref_mn_CB_combined, \
reso=1, title='', savefig=True)
plot_mnsal(cleanice_S_CAA_2015, cleanice_mn_CAA_2015, cleanice_S_CB_combined, cleanice_mn_CB_combined, \
reso=1, title='', savefig=True)
| paper-materials/S9-salinity-mn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="BMWnNRNfXewe"
# # Import Necessary Packages
# + id="wR1S2OghXjzO"
import pandas as pd
# + [markdown] id="UUTZIZ3CEyId"
# # Creating complex_words.csv
# + [markdown] id="iJZorl7HaPFF"
# Comparing the list of words to get the school level column to populate correctly did not work right. I opted to do it the long and difficult way.
# + [markdown] id="fUTU3SO3WPM_"
# Each word seperated into the school levels they belong to.
# + id="jxwC46vWExtl"
middle_school_level = "abate, abide, abode, abrupt, absorb, abstain, absurd, abyss, accord, anathema, annex, bane, cogent, conduit, crass, dearth, debacle, defunct, dispel, edict, elicit, extant, foiled, foil, inane, mores, pert, quaint, rife, tome, trite, yoke, tutor, clout, accrue, acta, aft, akin, amid, ana, asp, aura, avid, awe, brute, chic, clad, coup, daft, dap, dire, duly, eon, ergo, eve, flux, heed, helm, hex, hind, hue, ire, keen, kiln, leer, lest, lug, lush, moot, ode, omit, poise, prod, prone, reel, riff, romp, rout, scant, sect, seep, sheen, shunt, silt, skew, skirr, sly, smelt, snare, sod, spiel, stern, stint, stout, stow, strut, subtle, sync, tacit, tact, taut, tether, thwart, toil, vat, vie, vigor, vivid, vying, wane, wary, weary, whim, woe, zeal"
high_school_level = "aback, abalone, abased, abbot, abet, abettor, abeyance, abject, abhorrent, abridge, abrogate, abstruse, acarid, accede, accolade, accredit, acumen, admonish, alacrity, arboreal, ascetic, clamor, cleave, cobbler, consign, contrite, covet, despot, didactic, dirge, disrepute, dogmatic, dour, emollient, ephemeral, espouse, evince, fallacious, flagrant, fortuitous, fractious, gratuitous, hapless, impinge, impute, inimical, inoculate, instigate, inure, invective, knell, linchpin, litigant, maelstrom, maudlin, maverick, morass, nadir, panacea, paucity, pernicious, portent, profligate, prurient, puerile, relegate, remiss, reprieve, sanguine, sobriety, solicitous, staid, swarthy, travesty, trenchant, turpitude, veracity, vestige, vitriolic, winsome, zephyr, wily, tirade, grommet, sultry, accursed, accuser, acerbic, acerous, acetic, acme, acrid, adage, adduce, aegis, airfoil, alcove, alibi, allay, allude, alms, ambit, amble, amity, ancon, anent, animus, aphid, aptly, atone, awry, bask, bemoan, berate, bide, biome, boor, brash, brawl, butte, bygone, captor, cask, chaff, chard, chasm, chock, chroma, coax, cower, coy, crag, curt, dais, deft, delude, drab, droll, dupe, ebb, eke, elude, feign, fickle, flak, forgo, furl, garish, gauche, ghee, goad, gouge, heft, hew, hoax, hubris, illude, ingot, iota, jive, jolt, jot, knoll, laud, lave, lax, leery, lithe, loath, lob, lop, lunge, lurk, maim, meek, morsel, mull, naught, nook, obtuse, opus, placid, pleat, pout, privy, profuse, pry, puny, putrid, quell, rapt, reek, reform, relent, rend, repute, ruse, sate, shard, sigil, slat, sleuth, smirk, snafu, sneer, snide, snub, soiree, somber, sop, spew, spire, splay, stasis, stifle, stoke, suave, supine, surly, swath, tepid, topple, trawl, trice, trifle, trill, trope, unapt, usury, valor, verve, vouch, welt, whirl, wield, wile, writhe"
college_level = "abase, abashed, abasia, abatis, abduct, abele, abhor, abiosis, abient, ablate, ablaze, abrade, abscind, absolve, accost, accosted, arrogate, bashful, carouse, connive, contusion, debunk, duplicity, elegy, emend, enervate, exhort, exigent, expunge, extol, fetter, garrulous, gourmand, iconoclast, inchoate, licentious, modicum, neophyte, noisome, ostracism, palliate, pariah, pejorative, penurious, pithy, platitude, plaudit, plenitude, potentate, presage, probity, proclivity, protean, quandary, reprobate, scurrilous, stolid, subjugate, surfeit, torpid, truculent, nauseant, addend, acerbate, acerose, acetous, achene, aciniform, acquiesce, acrostic, adit, adulate, aerate, affix, antic, arete, argot, balk, bequeath, bungle, callow, careen, chortle, cohere, collude, comity, contort, contuse, couth, cowl, cyborg, dally, decry, dote, drawl, dyad, effigy, elate, exult, frill, gnash, gnaw, gruff, gyro, husk, imbue, inapt, knave, lank, malady, maraud, maw, ombre, plait, preen, pylon, pyre, quash, rancor, rasp, recant, redact, refract, resile, revile, rile, scoff, scowl, seethe, shirk, sidle, sinew, singe, slosh, souse, squall, sunder, swathe, tatter, teem, tousle, tuft, vapid, wiry"
post_college_level = "abaft, abamp, abampere, abash, abaxial, abbacy, abbess, abduce, abducent, abductee, abductor, abelmosk, abeyant, abjure, abnegate, abscond, acclivity, accroach, adumbrate, aggrandize, beguile, bilk, cajole, calumny, cavort, congruity, cupidity, debauch, discomfit, enfranchise, equivocal, execrable, expiate, expurgate, fatuous, mawkish, obdurate, officious, pellucid, phlegmatic, proscribe, quixotic, solipsism, toady, umbrage, upbraid, vilify, fipple, accustom, acedia, acerbity, acicular, aciculate, acolyte, acquit, addle, adjoin, adjure, agog, amuck, apiary, appall, avow, bawl, chide, conjoin, conk, dactyl, decoct, deify, divvy, doff, doily, edify, embroil, espy, exude, flay, fleck, flub, guck, gyre, hovel, irk, jeer, jibe, jinx, jowl, laze, liquefy, mewl, miff, mope, perplex, pilfer, posy, primp, qualm, quip, quirk, rebuff, reify, schlep, skulk, spry, vex, waft, yowl"
# + [markdown] id="QWHa_1VFZ21H"
# Splitting each string to make lists.
# + id="UML_y-Isu8qJ"
middle_school_level = middle_school_level.split(', ')
high_school_level = high_school_level.split(', ')
college_level = college_level.split(', ')
post_college_level = post_college_level.split(', ')
# + [markdown] id="KflbHgI-Z8ef"
# Making each list into it's own dataframe.
# + id="7jVWY9ZQFpUM"
df = pd.DataFrame(middle_school_level, columns=["word"])
df_high = pd.DataFrame(high_school_level, columns=["word"])
df_college = pd.DataFrame(college_level, columns=["word"])
df_post = pd.DataFrame(post_college_level, columns=["word"])
# + [markdown] id="Vt06GJPqaEq9"
# Assigning the school level tag for each dataframe.
# + id="BTyLHx5zWKGP"
df['school level'] = "middle school"
df_high['school level'] = "high school"
df_college['school level'] = "college"
df_post['school level'] = "post college"
# + [markdown] id="6H_Pb5TuagjR"
# Assigning the complexity score based on the school level.
# + id="2T6Lu5vaXxKj"
df['complexity'] = 14
df_high['complexity'] = 16
df_college['complexity'] = 18
df_post['complexity'] = 20
# + [markdown] id="GauEAQXBalwt"
# Concatenating the dataframes into one.
# + id="B_BcaNmNWcIp"
frames = [df, df_high, df_college, df_post]
result = pd.concat(frames)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="pVWIauoOXoV8" outputId="940c212e-875b-4b10-c97b-359287c9010f"
result
# + [markdown] id="aG0R4JlYa6gc"
# Making sure that there aren't any duplicates.
# + id="of0wBLa7Z-4J"
result = result.drop_duplicates("word")
# + [markdown] id="sxg6ArKbbAWr"
# Same amount of rows shows that there were no duplicates.
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="AJH15eQDjRhP" outputId="7dca1c61-d43e-417a-a61c-eb1ef0c6db4a"
result
# + [markdown] id="_aPpQk6WbFV1"
# I tried to sort the whole dataframe alphabetically, but that didn't work correctly either. I compiled all the words into their respective catagory based on the first letter of the word.
# + id="kqgvW5tJlBVc"
a_words = ["aback", "abaft", "abalone", "abamp", "abampere", "abase", "abased", "abash", "abashed", "abasia", "abate", "abatis", "abaxial", "abbacy", "abbess", "abbot", "abduce", "abduct", "abducent", "abductee", "abductor", "abele", "abelmosk", "abet", "abettor", "abeyance", "abeyant", "abhor", "abject", "abhorrent", "abjure", "abide", "abiosis", "abient", "ablate", "ablaze", "abnegate", "abode", "abrade", "abridge", "abrogate", "abrupt", "abscind", "absolve", "absorb", "abscond", "abstain", "abstruse", "absurd", "abyss", "acarid", "accede", "acclivity", "accolade", "accord", "accost", "accosted", "accredit", "accroach", "accrue", "acumen", "admonish", "adumbrate", "aggrandize", "alacrity", "anathema", "annex", "arboreal", "arrogate", "ascetic", "accursed", "accuser", "accustom", "acedia", "acerbic", "acerbate", "acerbity", "acerose", "acerous", "acetic", "acetous", "achene", "acicular", "aciculate", "aciniform", "acme", "acolyte", "acquiesce", "acquit", "acrid", "acrostic", "acta", "adage", "addle", "adduce", "adit", "adjoin", "adjure", "adulate", "aegis", "aerate", "affix", "aft", "agog", "airfoil", "akin", "alcove", "alibi", "allay", "allude", "alms", "ambit", "amble", "amid", "amity", "amuck", "ana", "ancon", "anent", "animus", "antic", "aphid", "apiary", "appall", "aptly", "arete", "argot", "asp", "atone", "aura", "avid", "avow", "awe", "awry", "addend"]
b_words = ["bane", "bashful", "beguile", "bilk", "balk", "bask", "bawl", "bemoan", "bequeath", "berate", "bide", "biome", "boor", "brash", "brawl", "brute", "bungle", "butte", "bygone"]
c_words = ["cajole", "calumny", "carouse", "cavort", "clamor", "cleave", "cobbler", "cogent", "conduit", "congruity", "connive", "consign", "contusion", "contrite", "covet", "cupidity", "crass", "clout", "callow", "captor", "careen", "cask", "chaff", "chard", "chasm", "chic", "chide", "chock", "chortle", "chroma", "clad", "coax", "cohere", "collude", "comity", "conjoin", "conk", "contort", "contuse", "coup", "cower", "cowl", "coy", "crag", "curt", "couth", "cyborg"]
d_words = ["dap", "deify", "dactyl", "duly", "dupe", "deft", "decry", "divvy", "dire", "delude", "doily", "dote", "drab", "drawl", "dyad", "droll", "daft", "dally", "dais", "dearth", "decoct", "debacle", "debauch", "debunk", "defunct", "despot", "didactic", "dirge", "discomfit", "dispel", "disrepute", "dogmatic", "dour", "duplicity", "doff"]
e_words = ["edict", "elegy", "elicit", "emend", "emollient", "enervate", "enfranchise", "ephemeral", "equivocal", "espouse", "evince", "exhort", "execrable", "exigent", "expiate", "expunge", "extol", "extant", "expurgate", "ebb", "edify", "effigy", "eke", "elate", "elude", "embroil", "eon", "ergo", "espy", "eve", "exude", "exult"]
f_words = ["fallacious", "fatuous", "fetter", "flagrant", "foiled", "foil", "fortuitous", "fractious", "fipple", "feign", "fickle", "flak", "flay", "fleck", "flub", "flux", "forgo", "frill", "furl"]
g_words = ["garrulous", "gourmand", "gratuitous", "grommet", "garish", "gauche", "ghee", "gnash", "gnaw", "goad", "gouge", "gruff", "guck", "gyre", "gyro"]
h_words = ["hapless", "heed", "heft", "helm", "hew", "hex", "hind", "hoax", "hovel", "hubris", "hue", "husk"]
i_words = ["iconoclast", "impinge", "impute", "inane", "inchoate", "inimical", "inoculate", "instigate", "inure", "invective", "illude", "imbue", "inapt", "ingot", "iota", "ire", "irk"]
j_words = ["jeer", "jibe", "jinx", "jive", "jolt", "jot", "jowl"]
k_words = ["knell", "keen", "kiln", "knave", "knoll"]
l_words = ["licentious", "linchpin", "litigant", "lank", "laud", "lave", "lax", "laze", "leer", "leery", "lest", "liquefy", "lithe", "loath", "lob", "lop", "lug", "lunge", "lurk", "lush"]
m_words = ["maelstrom", "maudlin", "maverick", "mawkish", "modicum", "morass", "mores", "maim", "malady", "maraud", "maw", "meek", "mewl", "miff", "moot", "mope", "morsel", "mull"]
n_words = ["nadir", "neophyte", "noisome", "nauseant", "naught", "nook"]
o_words = ["obdurate", "officious", "ostracism", "obtuse", "ode", "ombre", "omit", "opus"]
p_words = ["palliate", "panacea", "pariah", "paucity", "pejorative", "pellucid", "penurious", "pert", "pernicious", "phlegmatic", "pithy", "platitude", "plaudit", "plenitude", "portent", "potentate", "presage", "probity", "proclivity", "profligate", "proscribe", "protean", "prurient", "puerile", "perplex", "pilfer", "placid", "plait", "pleat", "poise", "posy", "pout", "preen", "primp", "privy", "prod", "profuse", "prone", "pry", "puny", "putrid", "pylon", "pyre"]
q_words = ["quaint", "quixotic", "quandary", "qualm", "quash", "quell", "quip", "quirk"]
r_words = ["relegate", "remiss", "reprieve", "reprobate", "rife", "rancor", "rapt", "rasp", "rebuff", "recant", "redact", "reek", "reel", "reform", "refract", "reify", "relent", "rend", "repute", "resile", "revile", "riff", "rile", "romp", "rout", "ruse"]
s_words = ["sanguine", "scurrilous", "sobriety", "solicitous", "solipsism", "staid", "stolid", "subjugate", "surfeit", "swarthy", "sultry", "sate", "scant", "schlep", "scoff", "scowl", "sect", "seep", "seethe", "shard", "sheen", "shirk", "shunt", "sidle", "sigil", "silt", "sinew", "singe", "skew", "skirr", "skulk", "slat", "sleuth", "slosh", "sly", "smelt", "smirk", "snafu", "snare", "sneer", "snide", "snub", "sod", "soiree", "somber", "sop", "souse", "spew", "spiel", "spire", "splay", "spry", "squall", "stasis", "stern", "stifle", "stint", "stoke", "stout", "stow", "strut", "suave", "subtle", "sunder", "supine", "surly", "swath", "swathe", "sync"]
t_words = ["tome", "toady", "torpid", "travesty", "trenchant", "trite", "truculent", "turpitude", "tirade", "tutor", "tacit", "tact", "tatter", "taut", "teem", "tepid", "tether", "thwart", "toil", "topple", "tousle", "trawl", "trice", "trifle", "trill", "trope", "tuft"]
u_words = ["umbrage", "upbraid", "unapt", "usury"]
v_words = ["veracity", "vestige", "vilify", "vitriolic", "valor", "vapid", "vat", "verve", "vex", "vie", "vigor", "vivid", "vouch", "vying"]
w_words = ["winsome", "wily", "waft", "wane", "wary", "weary", "welt", "whim", "whirl", "wield", "wile", "wiry", "woe", "writhe"]
y_words = ["yoke", "yowl"]
z_words = ["zephyr", "zeal"]
# + [markdown] id="OB19SI_Ebc7Y"
# Then sorted each list individually.
# + id="PQ8zE9LlpMDB"
a_words.sort()
b_words.sort()
c_words.sort()
d_words.sort()
e_words.sort()
f_words.sort()
g_words.sort()
h_words.sort()
i_words.sort()
j_words.sort()
k_words.sort()
l_words.sort()
m_words.sort()
n_words.sort()
o_words.sort()
p_words.sort()
q_words.sort()
r_words.sort()
s_words.sort()
t_words.sort()
u_words.sort()
v_words.sort()
w_words.sort()
y_words.sort()
z_words.sort()
# + [markdown] id="7WTAVuvUbisj"
# Combined all of the lists into one.
# + id="BJp90RcTotYd"
alphabetically_sorted_words = a_words + b_words + c_words + d_words + e_words + f_words + g_words + h_words + i_words + j_words + k_words + l_words + m_words + n_words + o_words + p_words + q_words + r_words + s_words + t_words + u_words + v_words + w_words + y_words + z_words
# + [markdown] id="odWxPXTNbli0"
# Made the combined list into a new dataframe and checked to make sure there were no duplicates.
# + colab={"base_uri": "https://localhost:8080/", "height": 640} id="pjWx8HV4p8p8" outputId="ac8d3a90-d39e-4354-9e7e-51a4b453bfe4"
alpha_df = pd.DataFrame(alphabetically_sorted_words, columns=['word'])
print(alpha_df.value_counts())
alpha_df
# + [markdown] id="2PWfMCA1b6x-"
# Combined the lists of words by school level.
# + id="ZTz4lmZ3Zw6G"
levels_added_together = middle_school_level + high_school_level + college_level + post_college_level
# + [markdown] id="B3F4Egz2cD3R"
# Combined all of the lists together to insure that there were exactly two of each word. Meaning that I had the same words in both.
# + id="d-JZvMpSFpQY" colab={"base_uri": "https://localhost:8080/"} outputId="50021d70-31b5-45f2-f66e-5d130aa51315"
checking_for_missing_words = alphabetically_sorted_words + levels_added_together
checking_for_missing_words_df = pd.DataFrame(checking_for_missing_words, columns=['word'])
print(checking_for_missing_words_df.value_counts())
# + [markdown] id="AAeJXe0kcQrU"
# I then reindexed the result dataframe to match the order of the alphabetically sorted dataframe.
# + id="kbi7NdInqGpE"
result = result.set_index('word')
result = result.reindex(index=alpha_df['word'])
result = result.reset_index()
# + [markdown] id="wrgf40-mccfi"
# Again insuring that there are only one of each word in my result dataframe.
# + colab={"base_uri": "https://localhost:8080/"} id="o9ZtJQZZq5wl" outputId="a75a33e8-3bd4-4c91-aec0-b6890c80c92f"
result['word'].value_counts()
# + [markdown] id="1678kBgtcyva"
# Just checking that everything looks correct.
# + colab={"base_uri": "https://localhost:8080/"} id="ZDlF7xVcRV9u" outputId="6af76fe5-884a-4c23-83f0-f80991afbb8d"
result['complexity'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="GjVuO4vtUI2x" outputId="7d18dd18-887c-465a-cae0-2cb5628c38a8"
result['complexity']
# + [markdown] id="5lu5iWC0c3Wi"
# Converting my result dataframe to the complex_words.csv.
# + id="ziCIa_kCfTiA"
result.to_csv("complex_words.csv", index=False)
# + [markdown] id="IF8yShtxc_m-"
# Checking that the csv reads correctly.
# + id="6un5S5crnT73"
complex_words = pd.read_csv('/content/complex_words.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="6B0bAClIuYu6" outputId="d397c543-44f1-499a-81b4-637853bb8e07"
complex_words
| notebooks/Creating_complex_words_csv.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Cvičení 12. Vícevýběrové testy
# ## <NAME>, <NAME>, <NAME>
# ## Testovací data pro ukázku volání funkcí
# +
# vyrobím data framy o jednom sloupci z náh. dat
a = as.data.frame(rnorm(n = 35, mean = 107, sd = 10))
b = as.data.frame(rnorm(n = 30, mean = 105, sd = 10))
c = as.data.frame(rnorm(n = 40, mean = 102, sd = 10))
d = as.data.frame(rnorm(n = 32, mean = 101, sd = 10))
# přejmenuju název sloupce
colnames(a) = c("hodnota")
colnames(b) = c("hodnota")
colnames(c) = c("hodnota")
colnames(d) = c("hodnota")
# doplním typ pro všechny data framy
a$typ = "a"
b$typ = "b"
c$typ = "c"
d$typ = "d"
# slepím po řádcích dohromady
data = rbind(a,b,c,d)
# převedu typ na typ factor
data$typ = as.factor(data$typ)
head(data)
# -
boxplot(data$hodnota ~ data$typ)
# pokud jsou k dispozici nějaká OP, budu je igrorovat
# (vím, že data jsou z normálního rozdělení!)
# (vím i že mají stejný rozptyl)
# # Přehled funkcí
# ## Míry variability
# ### Bartlettův test
# - ověřuje shodu rozptylů
# - $H_0: \sigma^2_1 = \sigma^2_2 = \sigma^2_3 = \ldots$
# - $H_A: \neg H_0$
# - předpokladem je normalita dat (a samozřejmě nezávislost a spojitost)
bartlett.test(data$hodnota ~ data$typ)
# ### Leveneův test
# - ověřuje shodu rozptylů
# - $H_0: \sigma^2_1 = \sigma^2_2 = \sigma^2_3 = \ldots$
# - $H_A: \neg H_0$
# - předpokladem je pouze nezávislost a spojitost
car::leveneTest(data$hodnota ~ data$typ)
# ### Cochranův a Hartleyův test
# - také ověřují shodu rozptylů
# - požadují normalitu dat a tzv. vyvážené třízení
# - vyvážené třízení znamená, že máme přibližně stejné množství dat v každé skupině
# - nebudeme je používat
#
# ## Míry polohy
# ### ANOVA (analýza rozptylu)
# - ověřuje shodu polohy (středních hodnot)
# - $H_0: \mu_1 = \mu_2 = \mu_3 = \ldots$
# - $H_A: \neg H_0$
# - předpoklady:
# - normalita dat
# - homoskedasticita (shodné rozptyly)
# - (a samozřejmě nezávislost a spojitost)
# - pokud zamítáme $H_0$ je vyžadována Post-Hoc analýza
# - pomocí TukeyHSD testu
# +
# Základní ANOVA
# H0: mu1 = mu2 = mu3 = mu4
# HA: ~H0 (negace H0)
res = aov(data$hodnota ~ data$typ)
summary(res)
# +
# Post-Hoc analýza
TukeyHSD(res)
# +
# počítání efektů
library(dplyr)
# celkový průměr
prumer_vsech = mean(data$hodnota)
prumer_vsech
# průměry ve skupinách
efekty = data %>% group_by(typ) %>%
summarize(mean_skup = mean(hodnota))
# efekty
efekty$efekt = efekty$mean_skup - prumer_vsech
# vypsat setřízené
efekty %>% arrange(desc(efekt))
# -
# ### Kruskalův - Wallisův test
# - ověřuje shodu polohy (mediánů)
# - $H_0: X_{0.5,1} = X_{0.5,2} = X_{0.5,3} = \ldots$
# - $H_A: \neg H_0$
# - předpoklady:
# - symetrie dat
# - (a samozřejmě nezávislost a spojitost)
# - pokud zamítáme $H_0$ je vyžadována Post-Hoc analýza
# - pomocí Dunnové testu/metody
# +
# Základní KW test
# H0: X0.5,1 = X0.5,2 = X0.5,3 = X0.5,4
# HA: ~H0 (negace H0)
kruskal.test(data$hodnota ~ data$typ)
# +
# Post-Hoc analýza
# altP = T nastavuje p-hodnotu tak,aby se při rozhodování
# o statistické významnosti srovnávala s alfa
# (defaultně: altp = FALSE, pak srovnáváme s alfa/2)
# install.packages("dunn.test")
dunn.test::dunn.test(x = data$hodnota, g = data$typ,
method = "bonferroni", altp = TRUE)
# +
# počítání efektů
# celkový median
median_vsech = median(data$hodnota)
median_vsech
# mediany ve skupinách
efekty = data %>% group_by(typ) %>%
summarize(median_skup = median(hodnota))
# efekty
efekty$efekt = efekty$median_skup - median_vsech
# vypsat setřízené
efekty %>% arrange(desc(efekt))
# -
# # Příklady
# ## Příklad 1.
# Testujeme nulovou hypotézu µ1 = µ2 = µ3. Bylo zjištěno, že data, která máme k dispozici jsou výběry z normálního rozdělení splňující předpoklad homoskedasticity (shody rozptylů). Na základě údajů získaných explorační analýzou doplňte tabulku ANOVA a vyplývající závěry.<br>
# 
n = c(40,40,42) # rozsahy výběrů
prum = c(300,290,310) # průměry v jednotlivých skupinách / třídách
s = c(33,34,31) # směr. odchylky v jednotlivých skup. / třídách
n.total = sum(n) # celkový rozsah výběrů
k = 3 # počet tříd
df.b = k-1 # počet stupňů volnosti - meziskupinový
df.e = n.total-k # počet stupňů volnosti - reziduální
# celkový průměr (pomocí váženého průměru)
prum.total = weighted.mean(x = prum, w = n)
prum.total
# meziskupinový součet čtverců
ss.b = sum(n*(prum - prum.total)^2)
ss.b
# reziduální součet čtverců
ss.e = sum((n - 1)*s^2)
ss.e
# celkový součet čtverců
ss.b + ss.e
# rozptyl mezi skupinami / třídami
ms.b = ss.b/df.b
ms.b
# rozptyl uvnitř skupin / tříd
ms.e = ss.e/df.e
ms.e
# F-poměr
F = ms.b/ms.e
F
# +
# p-hodnota
p = 1 - pf(F,df.b,df.e)
p
# Na hladině významnosti 0,05 zamítáme hypotézu o shodě středních hodnot
# tj. střední hodnoty alespoň jedné dvojice skupin se stat. významně liší.
# +
# odhady skupinových efektů
efekt = prum - prum.total
efekt
# Oproti celkovému průměru vykazuje nejvíce podprůměrné výsledky skupina 2
# (o cca 10 jednotek nižší než celkový průměr). Naopak průměr skupiny 3 je
# o cca 10 jednotek vyšší než celkový průměr. Průměrné výsledky skupiny 1
# odpovídají celkovému průměru.
# -
# ## Příklad 2.
# 122 pacientů, kteří podstoupili operaci srdce, bylo náhodně rozděleno do tří skupin.<br>
# **Skupina 1:** Pacienti dostali 50 % oxidu dusného a 50 % kyslíkové směsi nepřetržitě po dobu 24 hodin.<br>
# **Skupina 2:** Pacienti dostali 50 % oxidu dusného a 50 % kyslíkové směsi pouze během operace.<br>
# **Skupina 3:** Pacienti nedostali žádný oxid dusný, ale dostali 35-50 % kyslíku po dobu 24 hodin.<br>
# Data v souboru kyselina listova.xls odpovídají koncentracím soli kyseliny listové v červených krvinkách ve všech třech skupinách po uplynutí 24 hodin ventilace. Ověřte, zda pozorované rozdíly mezi koncentracemi soli kyseliny listové jsou statisticky významné, tj. zda existuje vliv složení směsi na sledovaný parametr.
kysel = readxl::read_excel("data/testy_vicevyberove.xlsx", sheet=1)
colnames(kysel) = c("sk1","sk2","sk3") # přejmenování sloupců
kysel
# převod do standardního datového formátu
kysel.s = stack(kysel)
colnames(kysel.s) = c("hodnoty","skupina")
kysel.s = na.omit(kysel.s)
head(kysel.s)
boxplot(kysel.s$hodnoty ~ kysel.s$skupina)
# Data neobsahují odlehlá pozorování.
# +
# otestujeme normalitu pomocí S.-W. testu
library(dplyr)
kysel.s %>% group_by(skupina) %>%
summarise(norm.pval = shapiro.test(hodnoty)$p.value)
# +
# Informace potřebné pro nastavení zaokrouhlování
kysel.s %>% group_by(skupina) %>%
summarise(len = length(hodnoty), st.dev = sd(hodnoty))
# sd zaokrouhlujeme na 3 platné cifry
# sd a míry polohy zaokrouhlujeme na desetiny
# +
# Ověření shody rozptylů
s2 = kysel.s %>% group_by(skupina) %>%
summarise(var = sd(hodnoty)^2)
s2 # výběrové rozptyly
max(s2$var)/min(s2$var)
# Dle krabicového grafu a informace o poměru největšího a nejmenšího
# rozptylů (<2) nepředpokládáme, že se rozptyly statisticky významně liší
# +
# Předpoklad normality nebyl zamítnut -> Bartlettův test
bartlett.test(kysel.s$hodnoty ~ kysel.s$skupina)
# Na hladině významnosti 0,05 nelze zamítnout předpoklad o shodě rozptylů
# (Bartlettův test, x_OBS = 0,878, df = 2, p-hodnota = 0,645).
# +
# Chceme srovnávat stř. hodnoty nezávislých výběrů z normálních rozdělení
# se stejnými rozptyly -> ANOVA
# příkaz aov() vyžaduje data ve standardním datovém formátu
vysledky = aov(kysel.s$hodnoty ~ kysel.s$skupina)
summary(vysledky)
# Na hladině významnosti 0,05 zamítáme hypotézu o shodě středních hodnot
# (ANOVA, p-hodnota<<0,001) -> mnohonásobné porovnávání
# -
# post-hoc analýza
TukeyHSD(vysledky)
# +
# počítání efektů
library(dplyr)
# celkový průměr
prumer_vsech = mean(kysel.s$hodnoty)
prumer_vsech
# průměry ve skupinách
efekty = kysel.s %>% group_by(skupina) %>%
summarize(mean_skup = mean(hodnoty))
# efekty
efekty$efekt = efekty$mean_skup - prumer_vsech
# vypsat setřízené
efekty %>% arrange(desc(efekt))
# Považujeme-li vysoký obsah kyseliny listové za pozitivní, pak statisticky
# významně nejlepších výsledků dosáhli pacienti ze skupiny 1 (průměrný obsah
# kys. listové o cca 27 jednotek vyšší než prům. obsah kys. listové v krvi
# všech testovaných pacientů) a statisticky významně nejhorších výsledků
# dosáhli pacienti ze skup. 2 (průměrný obsah kys. listové o cca 26 jednotek
# nižší než průměrný obsah kys. listové v krvi všech testovaných pacientů).
# Obsah kys. listové v krvi pacientů ze skupiny 3
# odpovídá celkovému průměru. Všechny tři skupiny pacientů jsou navzájem dle
# obsahu kys. listové v krvi statisticky významně odlišné.
# -
# ## Příklad 3.
# Na farmě jsou chována tři plemena králíků. Byl proveden pokus kralici.xls, jehož cílem bylo zjistit, zda i když chováme a vykrmujeme všechny králíky po stejnou dobu a za stejných podmínek, existuje statisticky významný (průkazný) rozdíl mezi plemeny v hmotnostech králíků. Ověřte.
kralici = readxl::read_excel("data/testy_vicevyberove.xlsx", sheet=2)
colnames(kralici) = c("viden","cesky","kalif") # přejmenování sloupců
kralici
# převod do standardního datového formátu
kralici.s = stack(kralici)
colnames(kralici.s) = c("hodnoty","skupina")
kralici.s = na.omit(kralici.s)
head(kralici.s)
boxplot(kralici.s$hodnoty ~ kralici.s$skupina)
# data obsahojí OP
# +
# Odstranění odlehlého pozorování
pom = boxplot(kralici.s$hodnoty ~ kralici.s$skupina, plot = FALSE)
pom$out
kralici.s$hodnoty.bez = kralici.s$hodnoty
kralici.s$hodnoty.bez[kralici.s$hodnoty.bez %in% pom$out]=NA
# Krabicový graf
boxplot(kralici.s$hodnoty.bez ~ kralici.s$skupina)
# +
library(dplyr)
kralici.s %>% group_by(skupina) %>%
summarise(norm.pval = shapiro.test(hodnoty.bez)$p.value)
# Na hladině významnosti 0,05 nezamítáme předpoklad normality.
# +
# Informace potřebné pro nastavení zaokrouhlování
kralici.s %>% group_by(skupina) %>%
summarize(len = sum(!is.nan(hodnoty.bez)),
sd = sd(hodnoty.bez, na.rm = TRUE))
# sd zaokrouhlujeme na 2 platné cifry
# sd a míry polohy zaok. na setiny (sjednocení napříč druhy králíků)
# +
# Ověření shody rozptylů
s2 = kralici.s %>% group_by(skupina) %>%
summarize(var = sd(hodnoty.bez, na.rm = TRUE)^2)
s2
max(s2$var)/min(s2$var)
# Dle krabicového grafu a informace o poměru největšího a nejmenšího rozpt.
# (blízký 2, avšak rozsah výběrů < 30) je těžší odhadnout, zda lze
# předpokládat shodu rozptylů. Rozhodnout nám pomůže test.
# +
# Předpoklad normality nebyl zamítnut -> Bartlettův test
bartlett.test(kralici.s$hodnoty.bez ~ kralici.s$skupina)
# Na hladině významnosti 0,05 nelze zamítnout předpoklad o shodě rozptylů
# (Bartlettův test, x_OBS = 3,1, df = 2, p-hodnota = 0,217).
# +
# Chceme srovnávat stř. hodnoty nezávislých výběrů z normálních
# rozdělení se stejnými rozptyly -> ANOVA
# příkaz aov() vyžaduje data ve standardním datovém formátu
vysledky = aov(kralici.s$hodnoty.bez ~ kralici.s$skupina)
summary(vysledky)
# Na hladině významnosti 0,05 zamítáme hypotézu o shodě středních hodnot
# (p-hodnota<<0,001, ANOVA) -> mnohonásobné porovnávání
# -
# post-hoc analýza
TukeyHSD(vysledky)
# +
# počítání efektů
library(dplyr)
# celkový průměr
prumer_vsech = mean(kralici.s$hodnoty.bez, na.rm = TRUE)
prumer_vsech
# průměry ve skupinách
efekty = kralici.s %>% group_by(skupina) %>%
summarize(mean_skup = mean(hodnoty.bez, na.rm = TRUE))
# efekty
efekty$efekt = efekty$mean_skup - prumer_vsech
# vypsat setřízené
efekty %>% arrange(desc(efekt))
# -
# ## Příklad 4.
# Soutěž o nejlepší jakost výrobků obeslali čtyři výrobci A, B, C, D celkem 66 výrobky. Porota sestavila pořadí (uvedeno pouze pořadí výrobku od nejlepšího k nejhoršímu), jež je uvedené v souboru jakost.xls. Na základě uvedených údajů posuďte, zda původ výrobků má vliv na jeho jakost.
jakost.s = readxl::read_excel("data/testy_vicevyberove.xlsx", sheet = 3)
colnames(jakost.s) = c("poradi", "skupina") # přejmenování sloupců
head(jakost.s)
# data již jsou v standardním formátu
# +
boxplot(jakost.s$poradi ~ jakost.s$skupina)
# Ověření normality nemá smysl provádět - z povahy jde o diskr. data-pořadí
# +
# Informace potřebné pro nastavení zaokrouhlování
jakost.s %>% group_by(skupina) %>%
summarize(len = length(poradi),
sd = sd(poradi))
# sd zaokrouhlujeme na 2 platné cifry
# sd a míry polohy zaokrouhlujeme na celá čísla
# +
# Ověření shody rozptylů
s2 = jakost.s %>% group_by(skupina) %>% summarize(var = sd(poradi)^2)
s2
max(s2$var)/min(s2$var)
# Dle krabicového grafu a informace o poměru největšího a nejmenšího
# rozptylů (<2) ze předpokládat shodu rozptylů.
# (Kruskalův - Wallisův test má větší sílu testu, jsou-li data homosk.)
# -
# Jde o "pořadová" data, nemá smysl uvažovat o předpokladu norm.
# -> Leveneův test
car::leveneTest(jakost.s$poradi ~ jakost.s$skupina)
# Na hladině významnosti 0,05 nelze zamítnout předpoklad o shodě rozptylů
# (Leveneho test, x_OBS = 0,4, df_num = 3, df_denom = 62, p-hodnota = 0,750)
# +
# Ověření symetrie
jakost.s %>% group_by(skupina) %>%
summarize(sikmost = moments::skewness(poradi),
test.pval = lawstat::symmetry.test(poradi,boot=FALSE)$p.value)
# +
# Chceme srovnávat mediány nezávislých výběrů -> Kruskalův-Wallisův test
kruskal.test(jakost.s$poradi ~ jakost.s$skupina)
# Na hladině významnosti 0,05 nelze zamítnout hypotézu o shodě mediánů
# (Kruskalův-Wallisův test, x_OBS = 3,7, df = 3, p-hodnota=0,295).
# Tj. statisticky významné rozdíly mezi výrobci (z hlediska pořadí
# výrobků v soutěži) neexistují.
# -
# ## Příklad 5.
# Byl sledován vliv tří preparátů na srážlivost krve. Kromě jiných ukazatelů byl zjišťován tzv. trombinový čas. Údaje o 42 sledovaných osobách jsou zaznamenány v souboru trombin.xls. Závisí velikost trombinového času na tom, jaký byl použit preparát?
# +
trombin.s = readxl::read_excel("data/testy_vicevyberove.xlsx",
sheet=4, skip = 1)
colnames(trombin.s) = c("hodnoty","skupina") # přejmenování sloupců
head(trombin.s)
# data jsou již ve standardním formátu
# -
# explorační analýza - ověření OP
boxplot(trombin.s$hodnoty ~ trombin.s$skupina)
# neobsahuje OP
# +
# ověření normality
library(dplyr)
trombin.s %>% group_by(skupina) %>%
summarize(norm.pval = shapiro.test(hodnoty)$p.value)
# Na hladině významnosti 0,05 zamítáme předpoklad normality (u Ačka)
# +
# Informace potřebné pro nastavení zaokrouhlování
trombin.s %>% group_by(skupina) %>%
summarize(len = length(hodnoty), stdev = sd(hodnoty))
# sd zaokrouhlujeme na 2 platné cifry
# sd a míry polohy zaokrouhlujeme na setiny (sjednocení napříč skupinami)
# +
# Ověření shody rozptylů (není nutné - stejně musíme použít KW)
s2 = trombin.s %>% group_by(skupina) %>%
summarize(var = sd(hodnoty)^2)
s2
max(s2$var)/min(s2$var)
# Dle krabicového grafu a informace o poměru největšího a nejmenšího
# rozptylů (>>2) nelze předpokládat shodu rozptylů.
# +
# Předpoklad normality byl zamítnut -> Leveneho test
#trombin.s$skupina = as.factor(trombin.s$skupina)
car::leveneTest(trombin.s$hodnoty ~ trombin.s$skupina)
# předpoklad homoskedasticity byl zamítnut
# -
# Ověření symetrie
trombin.s %>% group_by(skupina) %>%
summarize(sikmost = moments::skewness(hodnoty),
test.pval = lawstat::symmetry.test(hodnoty, boot=FALSE)$p.value)
# nezamítáme přepoklad symetrie dat
# +
# Chceme srovnávat mediány nez. výběrů, která nemají norm. rozdělení
# -> Kruskalův - Wallisův test
kruskal.test(trombin.s$hodnoty,trombin.s$skupina)
# Na hladině významnosti 0,05 zamítáme hypotézu o shodě mediánů
# Tj. trombinový čas je statisticky významně
# ovlivněn preparátem. -> mnohonásobné porovnávání
# +
# altP = T nastavuje p-hodnotu tak,aby se při rozhodování
# o statistické významnosti srovnávala s alfa
# (defaultně: altp = FALSE, pak srovnáváme s alfa/2)
dunn.test::dunn.test(trombin.s$hodnoty,trombin.s$skupina,
method = "bonferroni", altp = TRUE)
# +
# počítání efektů
library(dplyr)
# celkový průměr
median_vsech = median(trombin.s$hodnoty)
median_vsech
# průměry ve skupinách
efekty = trombin.s %>% group_by(skupina) %>%
summarize(median_skup = median(hodnoty))
# efekty
efekty$efekt = efekty$median_skup - median_vsech
# vypsat setřízené
efekty %>% arrange(desc(efekt))
# -
# ## Příklad 6. (více skupin)
# Co se Sněhurka dostala k sedmi trpaslíkům vycítila příležitost nemalého výdělku. Trpaslíci Sněhurce v podstatě zobou z ruky a veškeré vydolované zlato jí ihned předávají. Sněhurce však ani toto úplně nestačí a má pocit, že by mohla z trpaslíků benefitovat více. Proto si začla zaznamenávat kolik kilogramů zlata denně od každého z trpaslíků obdrží (snehurka.xlsx). Ověřte, zda se trpaslíci liší v množství vytěženého zlata, pokud ano sestave homogenní skupiny z hlediska vytěženého zlata.
zlato = readxl::read_excel("data/snehurka.xlsx")
head(zlato)
# data jsou ve standardním dtovém formátu
boxplot(zlato$hodnota ~ zlato$typ)
# data neobsahují OP
# +
# ověření normality
library(dplyr)
zlato %>% group_by(typ) %>%
summarize(p.hodnota = shapiro.test(hodnota)$p.value)
# Na hladině významnosti 0,05 ne zamítáme předpoklad normality
# +
# Předpoklad normality nebyl zamítnut -> Bartlettův test
bartlett.test(zlato$hodnota ~ zlato$typ)
# Na hladině významnosti 0,05 nelze zamítnout předpoklad o shodě rozptylů
# -
# ANOVA
vysledky = aov(zlato$hodnota ~ zlato$typ)
summary(vysledky)
# Zamítáme předpoklad o shodě
# -> existují stat. významné rozdíly ve středních hodnotách
# POST-HOC
res = TukeyHSD(vysledky)[[1]]
res
# +
# počítání efektů
library(dplyr)
# celkový průměr
prumer_vsech = mean(zlato$hodnota)
prumer_vsech
# průměry ve skupinách
efekty = zlato %>% group_by(typ) %>%
summarize(mean_skup = mean(hodnota))
# efekty
efekty$efekt = efekty$mean_skup - prumer_vsech
# vypsat setřízené
efekty.s = efekty %>% arrange(desc(efekt))
efekty.s
# -
| CV12/cv12.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .groovy
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Groovy
// language: groovy
// name: groovy
// ---
// + deletable=true editable=true
import com.twosigma.beaker.jvm.object.OutputContainer;
def o = new OutputContainer()
o.addItem("simplest example")
o << [2, 3, 5, 7] // shorter syntax
// + deletable=true editable=true
import com.twosigma.beaker.fileloader.CsvPlotReader
def rates = new CsvPlotReader().readAsList("tableRows.csv")
def c = new Color(120, 120, 120, 100)
plot1 = new Plot(initWidth: 300, initHeight: 400) \
<< new Points(x:rates.y1, y:rates.y30, size: 3, displayName:"y1 vs y30") \
<< new Points(x:rates.m3, y:rates.y5, size: 3, displayName:"m3 vs y5") \
<< new Line(x:rates.m3, y:rates.y5, color: c) \
<< new Line(x:rates.y1, y:rates.y30, color: c)
plot2 = new SimpleTimePlot(rates, ["m3", "y1"], showLegend:false, initWidth: 300, initHeight: 400)
plot3 = new SimpleTimePlot(rates, ["y5", "y10"], showLegend:false, initWidth: 300, initHeight: 400)
table = rates[0]
"ok"
// + deletable=true editable=true
import com.twosigma.beaker.jvm.object.OutputContainer;
new OutputContainer() << plot1 << plot2 << table
// + deletable=true editable=true
import com.twosigma.beaker.jvm.object.TabbedOutputContainerLayoutManager;
import com.twosigma.beaker.jvm.object.OutputContainer;
def l = new TabbedOutputContainerLayoutManager()
l.setBorderDisplayed(false)
def o = new OutputContainer()
o.setLayoutManager(l)
o.addItem(plot1, "Scatter with History")
o.addItem(plot2, "Short Term")
o.addItem(plot3, "Long Term")
o.addItem(table, "1990/01")
o.addItem([2, 3, 5, 7],"array")
o
// + deletable=true editable=true
| demoFiles/OutputContainersAndLayoutManagers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt # this is for visualization
import seaborn as sns # for visualization
# %matplotlib inline
import statsmodels.formula.api as sm
import scipy, scipy.stats
import math # log function is inside this library
from pandas import Series, DataFrame
from collections import Counter
df = pd.read_csv("Suicide data set.csv")
df.head()
df.columns
df.info()
df.isnull().sum()
# # Section-A
# #### Ques1. How many categorical variables does the data contain? Please state the number of categories for each such variable.
df1 = DataFrame(df.dtypes)
df1
# Country, sex, age, country-year, gdp_for_year($), generation are the categorical variables as data explanes.
len(df['country'].value_counts()) #101 countries are there present in the data
df['sex'].value_counts() #2 types of categories are there Male, Female
df['age'].value_counts() #6 types of age categories are there
df['country-year'].value_counts() #2321 categories are there for country-year
df['generation'].value_counts() #6 categories are there for generation
# #### Ques2. How strong is the correlation between HDI and suicides_no?
import seaborn as sns
sns.heatmap(df.corr(),annot=True)
plt.show()
# There is a positive weak correlation between HDI for year and suicides_no eaual to 0.15
# #### Ques4. Which generation has the highest number of suicides/100k pop?
df.groupby(['generation'])['suicides/100k pop'].count()
# #### Ques5. Which country has the least number of suicides between 1990-1995?
year_req = (df['year'] > 1990) & (df['year'] < 1995)
#country_suicide = pd.crosstab(df.country,df.year_req)
#country_suicide
df.groupby(['country','year_req'])['suicides_no'].count()
df.groupby(['country','year'])['suicides_no'].count()
# # Section B
# #### Ques1. Are there any countries with no suicides recorded? Create a new data frame which ranks these countries by HDI.
No_suicides = df['suicides_no']==0
df.groupby(['country'])(df['suicides_no']==0).count()
df.groupby(['country'])[df["suicides_no"]==0].count()
# #### Ques2. Generate suitable graphs for comparing suicides between men and women for the top 5 countries with the highest suicide rate per 100,000.
# #### Ques3. Are there any redundant columns in the dataset? Which coulmn is it? Can that column be dropped? State your reasons.
df.isnull().sum()
# HDI for year can be dropped beacuse it is of float type and there are 19456 null values are there.
# #### Ques4. Please obtain the distribution of suicides for each age group for Argentina. Plot these as graphs
def f(x):
return Series(dict(Number_of_earthquakes = x['ID'].count(),
Intensity = x['Magnitude'].mean()
))
f(df)
suicide_dist = df.groupby(['country','suicides_no','age'])['age'].count()
suicide_dist
suicide_dist['Argentina'].plot(kind = 'bar')
# #### Ques9. Generate a correlation heatmap for the dataset. Which pairs of variables are highly correlated.
import seaborn as sns
sns.heatmap(df.corr(),annot=True)
plt.show()
# Population and suicide number = 0.62, HDI for year and gdp_per_capita = 0.77. These are two hihgly correlated terms.
# # Section-C
# #### Ques10. Generate the following tables:
# a) A table containing the columns ‘Country’, ‘Year’, ‘Total suicides’. Total Suicides has to be calculated from the existing table.
table=pd.crosstab(df.country,df.year)
table
# b) A Table containing the columns ‘Country’, ‘Year’, ‘per capita gdp’.
# +
table=pd.crosstab(df.country,df.year,df.gdp_per_capita($))
table
# -
df.gdp_per_capita($)
| EDA_EXAM/EDA_FE.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// ## Table of contents
//
// * [Find out the version info of the underlying JDK/JVM on which this notebook is running](#Find-out-the-version-info-of-the-underlying-JDK/JVM-on-which-this-notebook-is-running)
// * [Valohai command-line client](#Valohai-command-line-client)
// * [Set up project using the vh client](Set-up-project-using-the-vh-client)
// * Java bindings (Java API) via Valohai client
// * [Language Detector API](#Language-Detector-API)
// * [Sentence Detection API](#Sentence-Detection-API)
// * [Tokenizer API](#Tokenizer-API)
// * [Name Finder API](#Name-Finder-API)
// * [More Name Finder API examples](#More-Name-Finder-API-examples)
// * [Parts of speech (POS) Tagger API](#Parts-of-speech-(POS)-Tagger-API)
// * [Chunking API](#Chunking-API)
// * [Parsing API](#Parsing-API)
// ### Find out the version info of the underlying JDK/JVM on which this notebook is running
System.out.println("java.version: " + System.getProperty("java.version"));
System.out.println("java.specification.version: " + System.getProperty("java.specification.version"));
System.out.println("java.runtime.version: " + System.getProperty("java.runtime.version"));
// +
import java.lang.management.ManagementFactory;
System.out.println("java runtime VM version: " + ManagementFactory.getRuntimeMXBean().getVmVersion());
// -
// Return to [Table of contents](#Table-of-contents)
// ### Valohai command-line client
//
// The container comes with the VH client installed, so you won't need to do anything. Above all the shell scripts in the container encapsulates a few of the VH client functionalities for ease of use.
//
// If you still would like to use the VH client, make sure you use the VALOHAI_TOKEN variable each time wherever it involves authentication, for e.g.
//
// ```
// $ vh --valohai-token ${VALOHAI_TOKEN} [...your commands and options...]
// ```
//
// For more details, see [CLI usage docs](https://docs.valohai.com/valohai-cli/index.html) on [Valohai.com]().
// %system vh --help
// ### Set up project using a shell script (internally uses the vh client)
// _Your Valohai token has must have been provided (and set) during startup of the container. Without this the rest of the commands in the notebook may not work. The below commands expects it and will run successfully when it is not set in the environment._
// +
// Please execute this cell only once, check your Valohai dashoard for presence of the project
// %system ./create-project.sh nlp-java-jvm-example
// -
// ### Language Detector API
// ##### Show a simple example detecting a language of a sentence using a Language detecting model called langdetect-183.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "detect-language" "Another sentence"
// %system ./watch-execution.sh 54
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 54
// ^^^ this is returned by the action, see two cells above
// Check out https://www.apache.org/dist/opennlp/models/langdetect/1.8.3/README.txt to find out what each of the two-letter language indicators mean.
// **Apparantly it detects this to be Latin, instead of English
// maybe the language detecting model needs more training.
// See https://opennlp.apache.org/docs/1.9.1/manual/opennlp.html#tools.langdetect.training on how this can be achieved**
// Return to [Table of contents](#Table-of-contents)
// ### Sentence Detection API
//
// ##### Show a simple example detecting sentences using a Sentence detecting model called en-sent.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "detect-sentence" "Yet another sentence. And some other sentence."
// %system ./watch-execution.sh 55
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 55
// ^^^ this is returned by the action, see two cells above
// **As you can see the two ways to use the SentenceDetect API to detect sentences in a piece of text.**
// Return to [Table of contents](#Table-of-contents)
// ### Tokenizer API
// ##### Show a simple example of tokenization of a sentence using a Tokenizer model called en-token.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "tokenize" "Yes please tokenize this sentence."
// %system ./watch-execution.sh 56
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 56
// ^^^ this is returned by the action, see two cells above
// Return to [Table of contents](#Table-of-contents)
// ### Name Finder API
// ##### Show a simple example of tokenization of a sentence using a Tokenizer model called en-token.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "name-finder-person" "My name is John. And his name is Pierre."
// %system ./watch-execution.sh 58
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 58
// ^^^ this is returned by the action, see two cells above
// **As you can see above, it has detected the name of the person in both sentences**
// Return to [Table of contents](#Table-of-contents)
// ### More Name Finder API examples
//
// There are a handful more Name Finder related models i.e.
//
// - Name Finder Date
// - Name Finder Location
// - Name Finder Money
// - Name Finder Organization
// - Name Finder Percentage
// - Name Finder Time
//
// Their model names go by these names respectively:
//
// - en-ner-date.bin
// - en-ner-location.bin
// - en-ner-money.bin
// - en-ner-organization.bin
// - en-ner-percentage.bin
// - en-ner-time.bin
//
// and can be found at the same location all other models are found at, i.e. http://opennlp.sourceforge.net/models-1.5/
// Return to [Table of contents](#Table-of-contents)
// ### Parts of speech (POS) Tagger API
// ##### Show a simple example of Parts of speech tagger on a sentence using a PoS Tagger model called en-pos-maxent.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "pos-tagger" "Tag this sentence word by word."
// %system ./watch-execution.sh 59
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 59
// ^^^ this is returned by the action, see two cells above
// Check out https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
// to find out what each of the tags mean
// Return to [Table of contents](#Table-of-contents)
// ### Chunking API
// ##### Show a simple example of chunking on a sentence using a Chunker model called en-chunker.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "chunker"
// %system ./watch-execution.sh 60
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 60
// ^^^ this is returned by the action, see two cells above
// Check out https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
// to find out what each of the tags mean
// Return to [Table of contents](#Table-of-contents)
// ### Parsing API
// ##### Show a simple example of parsing chunked sentences using a Parser Chunker model called en-parser-chunking.bin on a remote instance (powered by Valohai), from within the notebook cell using cell magic!
// %system ./exec-step.sh "parser" "Another not so quick brown fox jumps over the lazy dog."
// %system ./watch-execution.sh 62
// ^^^ this number is returned by the previous action, see above cell
// %system ./show-final-result.sh 62
// ^^^ this is returned by the action, see two cells above
// Check out https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html
// to find out what each of the tags mean
// Return to [Table of contents](#Table-of-contents)
// ### For more resources please refer to [Apache OpenNLP README](https://github.com/neomatrix369/nlp-java-jvm-example/blob/master/images/java/opennlp/README.md) and [Apache OpenNLP Resources](https://github.com/neomatrix369/nlp-java-jvm-example/blob/master/images/java/opennlp/README.md#resources).
| shared/notebooks/MyNextJupyterNLPJavaNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="7JMp8K0iJPr3"
# # Technical task - BBC data set
#
# We are going to build a classifier that categorize BBC articles into 5 different topics/classes: Politics, business, sport, entertainment and techology. We will use 2225 documents from the BBC news website to train and test an algorithm.
#
# The data can be downloaded from this website: http://mlg.ucd.ie/datasets/bbc.html We are going to use the Dataset: BBC in raw format.
#
# Firstly we will prepare the data to be ingested by the ML algorithm, afterwords we will train the models and finally we will evaluate the performance.
#
# We have also explored the use of LDA for the extraction of natural topics in this dataset.
# + [markdown] id="TxXfdWMAJPr4"
# # 1. Data preparation
# + id="-9-5kSmAJkkR"
#If not installed uncomment
# #!pip install spacy
# + colab={"base_uri": "https://localhost:8080/"} id="u6JKcaCFLwe0" outputId="b2670955-26eb-401d-b68c-318d761ee1b1"
# !python -m spacy download en_core_web_sm
# + colab={"base_uri": "https://localhost:8080/"} id="kRjhVObcO5cR" outputId="c25b9fe1-caff-4eeb-89e2-d72c2aab64c1"
from google.colab import drive
drive.mount('/content/drive')
# -
from zipfile import ZipFile
with ZipFile('bbc-fulltext.zip', 'r') as zipObj:
zipObj.extractall()
# + id="oxAYZiQwJPr5"
import spacy
import os
import pandas as pd
from __future__ import division
# + colab={"base_uri": "https://localhost:8080/"} id="NR_9Ionoez2T" outputId="a83f39a8-5ac1-4c9b-d3e3-831a14474e2e"
nlp = spacy.load('en_core_web_sm')
text = u"This is first sentence. Second sentence. Third sentence."
text_sentences = nlp(text)
for sentence in text_sentences.sents:
print(sentence.text)
# + colab={"base_uri": "https://localhost:8080/"} id="wfH-MtFtjb-O" outputId="01acea7a-0ed3-4e17-b723-1ce22116c09d"
doc = nlp(u'I am Clara and this is the NLP class')
for token in doc:
print(token.text, token.lemma_, token.is_stop, token.pos_)
# + id="D-yB0bw_JPr7"
# function that loops through the directories that contain the files. Read them, extract the topic/class from the name of each folder
# and returns a dataframe
def extract_articles(path):
#rootdir =
cat_article=[]
for subdir, dirs, files in os.walk(path):
print(subdir)
for file in files:
if '.txt' in file:
category= subdir.split('/')[-1]
f=open(os.path.join(subdir, file),'r',encoding='utf-8',errors='ignore')
lines=f.readlines()
lines=' '.join(lines).replace('\n','')
#list of lists: [category,article]
cat_article.append([category,lines])
f.close()
# we convert the list of lists [category, article] into a pandas dataframe
data=pd.DataFrame(cat_article)
data.columns=['category','article']
return data
# + colab={"base_uri": "https://localhost:8080/", "height": 461} id="NiXpmkt_JPr8" outputId="906563cc-3937-4dcb-df6e-5a51a887473d"
#data=extract_articles('/content/drive/MyDrive/BBC_articles/articles')
data = extract_articles('./bbc')
data.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="xiT3EQLJJPr-" outputId="70e04bdb-c701-424c-82bf-65cd0afbbd09"
data.groupby('category').size()
# + [markdown] id="6Esv88UAJPr9"
# Number of articles per class. The number of samples per class is quite balanced
# + [markdown] id="9XFqnJriJPr-"
# We are going to use a library for NLP called spacy.
# We need to convert the text into unicode format to be used by the library.
# + id="NiSB_9mvJPr_"
data['article'] = data['article'].apply(lambda x: x.decode('utf-8','ignore'))
# + id="pyvNyTmZjBcy"
pd.set_option('display.max_colwidth', -1)
# + id="_VBKiJ2cJPr_"
# we load english language to use spacy library and define functions that help us tokenize,
# lemmatize the words of the articles and remove stop words.
nlp = spacy.load('en')
def punct_space(token):
"""
helper function to eliminate tokens
that are pure punctuation or whitespace
"""
return token.is_punct or token.is_space
def lemmatize(doc):
"""
function that tokenize the text, lemmatizes it and removes stop words.
"""
parsed_doc=nlp(doc)
lemm_doc = [token.lemma_ for token in parsed_doc
if not punct_space(token) and (token.lemma_!= '-PRON-') and not(nlp.vocab[token.text].is_stop)]
# write the transformed text
clean_text = u' '.join(lemm_doc)
return clean_text
# + id="3bJs1sBXJPsA"
# we apply the lemmatization to all articles
data['article_lemmatized']=data.article.map(lemmatize)
# + id="asNQ9IoVJPsA" outputId="b672d948-f77e-4b2c-897f-6a9a667fc3be"
# Types of categories
print "categories: \n",set(data.category)
# + id="LjctVZa0JPsA" outputId="f84b6c25-a301-45a5-ca29-5ee31b7ac6c5"
data[['category','article_lemmatized']].head(10)
# + [markdown] id="ZhoQuK32JPsB"
# Split the data into train and test sets for the machine learning algorithm
# + id="2VqnwTH7JPsB"
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import train_test_split
# + colab={"base_uri": "https://localhost:8080/"} id="MDcZ2x4-lwnV" outputId="432867fb-3a05-4e62-de4e-63c65f06737f"
data['article_lemmatized'].shape
# + colab={"base_uri": "https://localhost:8080/"} id="I0xmVHfVJPsB" outputId="9b817fc7-30b5-452d-d0b9-1db11aef997a"
X_train, X_test, y_train, y_test = train_test_split(data['article_lemmatized'], data['category'], test_size=0.4, random_state=42)
print "size of training",len(X_train)
print "size of test",len(X_test)
# + [markdown] id="JQ3pPuDfJPsB"
# We need to convert the text into a format that can be ingested by the algorithm. We use CountVectorizer to convert the collection of articles to a matrix of token counts. It will produce a matrix in compressed sparse row format.
# + id="TKqKLPVIJPsC"
cvec = CountVectorizer(stop_words='english', min_df=3)
# we create a matrix for the training set
cvec.fit(X_train)
cvec_counts_train = cvec.transform(X_train)
# we create a matrix for the test set
cvec_counts_test=cvec.transform(X_test)
# + [markdown] id="k7L4VPsDJPsC"
# Some info about the matrix of training set in compressed sparse row format
# + colab={"base_uri": "https://localhost:8080/"} id="zJ6VlYB8JPsC" outputId="56e293c1-0563-4667-bdc5-ecf86f240ad6"
print 'sparse matrix shape:', cvec_counts_train.shape
print 'nonzero count:', cvec_counts_train.nnz
print 'sparsity: %.2f%%' % (100.0 * cvec_counts_train.nnz / (cvec_counts_train.shape[0] * cvec_counts_train.shape[1]))
# + [markdown] id="ad7IbzukJPsD"
# Instead of using word frequency alone we use tf/idf metric, which represents how important a word is to a document in a collection of corpus. It penalises words that tend to appear in all documents.
# + id="EQcXp0mLJPsD"
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(cvec_counts_train)
X_test_tfidf = tfidf_transformer.fit_transform(cvec_counts_test)
# + [markdown] id="lyZJfWp5JPsD"
# Now the data is ready to be ingested by a ML alforithm
# + [markdown] id="9sFur9_sJPsE"
# # 2. Machine learning algorithm
# + [markdown] id="t_hmy-8OJPsE"
# The problem we are facing is a multi-class classification problem. We are going to try 2 ml algorithms: random forest and multinomial naive bayes.
# + [markdown] id="hP_nhSTxJPsE"
# # 2.1 Random forest:
# + [markdown] id="p-rzZGdLJPsF"
# Random forest classifier is an ensemble classification method that consists of training several decission trees
# on different subsamples of the data taking different subsets of features. The output class is selected by majority vote of the classes provided by all the trees.
# + id="65eWktkdJPsF"
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn import metrics
# training
clf = RandomForestClassifier().fit(X_train_tfidf, y_train)
# testing
predicted=clf.predict(X_test_tfidf)
# + [markdown] id="hCNBsbUBJPsG"
# How accurate is the model? We can calculate the global accuracy
# + colab={"base_uri": "https://localhost:8080/"} id="RPqQ-YHRJPsG" outputId="db8d08ba-d21b-42ce-ecd2-f9928602a508"
metrics.accuracy_score(y_test, predicted)
# + [markdown] id="9YY5d5NUJPsG"
# We compute also other metrics for each class to evaluate the model like precision, recall or f1-score. We see that the precision is high: 0.91 in average and we can observe in the table bellow which is the precission, recall and f1-score for each class to evaluate how the model performs in classifying samples from each class.
# + colab={"base_uri": "https://localhost:8080/"} id="46DpzqxfJPsG" outputId="ee6a1f86-abc3-4f75-b968-ecac481903b5"
print(metrics.classification_report(y_test, predicted))
# + [markdown] id="MIX845iFJPsH"
# But this evaluation has been performed over one training and test data set chosen randomly. In order to be able to evaluate the performance of the model in a general basis we need to perform k-fold cross validation, which consists of partitioning the dataset in k subsets and using k-1 subsets for training and the one remaining for test and repeat this process until all the subsets has been used for testing.
# + [markdown] id="rFOfCj2IJPsH"
# In order to make the count_vectorizer, tfidf-transformer, classifier easier to work with,
# scikit-learn provides a Pipeline class that helps run all the process together:
# + id="1AmqDZIEJPsH"
from sklearn.pipeline import Pipeline
text_clf = Pipeline([('vect', CountVectorizer(stop_words='english', min_df=3)),\
('tfidf', TfidfTransformer()),\
('clf', RandomForestClassifier())])
# + colab={"base_uri": "https://localhost:8080/"} id="5yiBuq0qJPsH" outputId="3b71790a-8d9b-448a-eb52-cf18c49482d7"
#Description of steps of pipeline
text_clf.steps
# + colab={"base_uri": "https://localhost:8080/"} id="v5mNP_KbJPsI" outputId="0ad176fe-7892-4c25-9dd6-23d62606ddc5"
text_clf.fit(X_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="eGa3o8QRJPsI" outputId="73291064-1f74-4789-cd00-dd41b2f77539"
from sklearn.model_selection import cross_val_score
#cross_val_score evaluates a score by cross-validation. k-folds= parameter cv
kfold_acc = cross_val_score(text_clf,data['article_lemmatized'], data['category'],cv=10,
scoring='accuracy')
print "mean accuracy with 10-fold cross validation:", kfold_acc.mean()
# + [markdown] id="3p6c00wKJPsI"
# We can also extract what are the words that are more critical in differentiating the classes:
# + colab={"base_uri": "https://localhost:8080/", "height": 331} id="uvNNAhvfJPsI" outputId="e3df247b-423f-4b1f-dcc2-757bebd98b36"
feat_imp=list(text_clf.steps[2][1].feature_importances_)
features=text_clf.steps[0][1].get_feature_names()
result=pd.DataFrame(feat_imp,features)
result.columns=(['feature importance'])
# %matplotlib inline
result.sort_values(['feature importance'],ascending=False).head(30).plot(kind='bar')
# + [markdown] id="oGs6MkGFJPsJ"
# There are some parameters that affect the performance of random forest like the number of trees used, the number of predictors/features that random forest is allowed to try in each split (max_features), also the min_sample_leaf is the minimum number of samples required in one leaf. Leaf is the end node of a decision tree, a smaller leaf can make the model capture noise in train data.
# + [markdown] id="5-7dijl8JPsJ"
# Let's change the number of trees (n_estimators parameter) and see if the results improve:
# + colab={"base_uri": "https://localhost:8080/"} id="TeTu8dnCJPsJ" outputId="8d29c151-9c99-4349-938b-68b2ca59cdd2"
text_clf2 = Pipeline([('vect', CountVectorizer(stop_words='english', min_df=3)),\
('tfidf', TfidfTransformer()),\
('clf', RandomForestClassifier(n_estimators=100))])
text_clf2.fit(X_train, y_train)
#cross_val_score evaluates a score by cross-validation. k-folds= parameter cv
kfold_acc2 = cross_val_score(text_clf2,data['article_lemmatized'], data['category'],cv=10,
scoring='accuracy')
print "mean accuracy with 10-fold cross validation:", kfold_acc2.mean()
# + [markdown] id="zR8LEFvxJPsK"
# We observe that the performance has improved after changing the number of trees.
#
# + [markdown] id="4Rugrmm5JPsK"
# We can also optimize the performance of the model by using grid search. With grid search we can select different values for the parameters and the function will return the set of parameters that make the model perform better in the metric chosen. In the example above we choose accuracy.
# + id="DWywkLj1JPsK" outputId="b68550de-619c-4fd5-ef92-a9ca983cad34"
from sklearn.model_selection import GridSearchCV
# use a full grid over all parameters
param_grid = {"clf__n_estimators": [100,150],
"clf__max_features": ['auto','sqrt',0.2],
"clf__min_samples_leaf": [2, 3, 50],
"clf__criterion": ["gini", "entropy"]}
# run grid search
grid_search = GridSearchCV(text_clf2, param_grid=param_grid, scoring='accuracy')
start = time()
grid_search.fit(X_train, y_train)
# + id="WKhaXB5bJPsK" outputId="692c4fac-47c1-4b6d-e75b-c6b6544b8371"
print 'best parameters values:\n',grid_search.best_params_
print
#Mean cross-validated score of the best_estimator
print 'best score:\n',grid_search.best_score_
# + [markdown] id="mrwaH10uJPsL"
#
# What we can infere from the results obtained using random forest is that the features selected for the classification, in this case the word counts or their equivalent tf/idf values are clearly discriminant between the 5 classes.
#
#
# Although Random Forest is performing very well in this case, usually Random Forest does not necessarily work well with sparse matrix such as the ones used in text classification.
#
#
# + [markdown] id="498jKFgoJPsL"
# Another model that is frequently used in text classification is multinomial naive bayes. We will try it.
#
# + [markdown] id="AqVuNwSEJPsM"
# # 2.2 Multinomial naive bayes
# + [markdown] id="fskqvEQiJPsM"
# Naive Bayes is a probabilistic method that assumes independence between the features in our case the words in the vocabulary of the corpus of articles.
# + id="Xyvqg7NxJPsM"
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
#In order to make the vectorizer => transformer => classifier easier to work with,
#scikit-learn provides a Pipeline class that behaves like a compound classifier:
text_clf_nb = Pipeline([('vect', CountVectorizer(stop_words='english', min_df=3)),\
('tfidf', TfidfTransformer()),\
('clf', MultinomialNB())])
# + id="0PDgtP74JPsN"
text_clf_nb.fit(X_train, y_train)
predicted = text_clf_nb.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="rfqUorDgJPsN" outputId="535862f4-76bc-43f2-da1d-564910dc9c20"
print(metrics.classification_report(y_test, predicted))
# + id="ntyv1CivJPsN"
# Saving the model
from sklearn.externals import joblib
joblib.dump(text_clf_nb, 'text_clf_nb.pkl')
# + [markdown] id="d20ZtwXUJPsO"
# k-fold cross validation
# + id="zjkq3Ps0JPsO" outputId="3748d20e-3f1b-4945-f575-16f410dd8c7a"
#cross_val_score evaluates a score by cross-validation. k-folds= parameter cv
kfold_acc_nb = cross_val_score(text_clf_nb,X_train,y_train,cv=10,
scoring='accuracy')
print "mean accuracy with 10-fold cross validation:", kfold_acc_nb.mean()
# + [markdown] id="b7WQ4Y-GJPsO"
# We observe that the accuracy is similar to random forest, slightly higher with the multinomial naive bayes classifier (NBC). We can then assert that for this particular problem both methods perform really well.
#
# Something worth mentioning is that the speed of NBC is greater than RF, which can be of importance in case there is a need of escalating the problem to a larger number of articles.
#
# One of the things that we could have used is ngrams of 2 or 3 words as features for the model. To test this, tests have been performed adding the parameter: ngram_range=(1,3) and ngram_range=(1,2) to the CountVectorizer function but the peformance did not improve.
#
# + [markdown] id="YZGZNCaYJPsO"
# # One step more of validation
# + [markdown] id="ghuJgohJJPsP"
# It is always good to validate the performance of a model with a validation dataset. We can obtain a validation dataset using the second dataset in http://mlg.ucd.ie/datasets/bbc.html that contains articles from BBC Sports labeled with sport categories.
#
# We can use this dataset to validate the model.
# + id="XfBQFqPbJPsP"
sports=extract_articles('/content/drive/MyDrive/BBC_articles/bbcsport')
# + id="oB8-EB-IJPsP" outputId="6378ce12-57b0-4281-ad00-9d226496a084"
sports.head()
# + id="qhqV3PRDJPsP" outputId="510f74ef-68eb-4627-91e9-0ba13fb9d9ca"
print 'sports categories:',set(sports.category)
# + [markdown] id="YVS8AUc4JPsP"
# Text cleaning
#
# + id="jtOoHxZfJPsQ"
# converting to unicode
sports['article'] = sports['article'].apply(lambda x: x.decode('utf-8','ignore'))
# lemmatization
sports['article_lemmatized']=sports.article.map(lemmatize)
# + [markdown] id="ftASabnEJPsQ"
# It could be that some sport articles in the training dataset are contained in the sports dataset. In order to avoid having articles in the validation dataset that have been used for training in the previous steps we exclude those articles that appear in both.
# + id="r6BpZe26JPsQ" outputId="84e25ceb-52b9-4516-b56d-a4d788b55f59"
print 'number of sports articles in the whole dataset:',data[data['category']=='sport'].shape[0]
print 'number of articles in the sports dataset',sports.shape[0]
# + id="NAAqT688JPsQ" outputId="ab6eff35-1deb-452e-a695-d07715ea18a7"
#concatenate the two dataframes and remove the duplicates
concat=pd.concat([data[data.category=='sport'][['category','article','article_lemmatized']],sports[['category','article','article_lemmatized']]])
unique_sport=concat[(concat.duplicated(subset=['article'])==False)&(concat.category!='sport')]
print 'number of articles not used in the training:',unique_sport.shape[0]
# + id="rEsqqCafJPsR" outputId="68511a53-180d-4d63-e1d2-175e8d2b6733"
#predict the class with the multinomial naive Bayes trained classifier
predicted=text_clf_nb.predict(unique_sport['article_lemmatized'])
#accuracy: number of articles labeled with sport
print 'accuracy of the model:',list(predicted).count('sport')/len(predicted)
# + [markdown] id="ulRCBnisJPsR"
# We can confirm that the model works well also for this validation dataset
# + [markdown] id="b7RP8m1eJPsR"
# # Example of prediction
# + id="0vd_eA0yJPsR" outputId="ecce18d9-23c5-4aab-f096-af87f55e313a"
#we select one article of the test set
ind1=X_test.index[400]
data.article[ind1]
# + id="zm9fHiY5JPsR" outputId="f0f46f54-e21e-466f-d540-5beb7cf6f152"
class_=text_clf_nb.predict([data.article[ind1]])[0]
print "the predicted class is:",class_ , "\nand the true one was:",data['category'][ind1]
# + [markdown] id="t6BG5N-dJPsS"
# Another example:
# + id="Qmas0pZ7JPsS" outputId="fec3e437-d821-4186-f386-8ad1fa9aa78b"
#we select one article of the test set
ind1=X_test.index[800]
data.article[ind1]
# + id="8BygyHu7JPsS" outputId="da1079ef-829a-4333-d31b-f8892bc8de82"
class_=text_clf_nb.predict([data.article[ind1]])[0]
print "the predicted class is:",class_ , "\nand the true one was:",data['category'][ind1]
# + [markdown] id="cMiDUocBJPsS"
# # What else can we extract from this dataset?
# + [markdown] id="TEKdgU1PJPsT"
# We have built supervised learning models that learn to classify articles into predefined categories. Now we can try to extract natural topics within the articles. We explore now how the unsupervised learning algorithm LDA can help us. LDA is a generative probabilistic model that needs to know the number of topics a priori. It assigns a probability to each word in the corpus of documents to belong to a topic. This way we can find groups of words that tend to appear in certain groups of articles.
# + [markdown] id="PpB3qBX3JPsT"
# # LDA - topic modelling
# + id="ptF4Tni_JPsT"
import gensim
from gensim.corpora import Dictionary, MmCorpus
#from gensim.models.ldamulticore import LdaMulticore
from gensim.models.phrases import Phraser
from gensim.models import Phrases, LdaModel
#from gensim.models.word2vec import LineSentence
import pyLDAvis
import pyLDAvis.gensim
import warnings
import cPickle as pickle
# + id="D3PpunyDJPsT"
#The function Dictionary creates a dictionary of terms from the docs. It needs as input a list of list of tokens.
#We need to split the documents into lists of words
splitter = lambda x: x.split()
doc_list=data.article_lemmatized.apply(splitter)
# Phrases identifies multi word expressions co-ocurring in multiple docs (n-grams)
phrases = Phrases(doc_list)
# Phraser replace the n-grams found in the documents
bigrams = Phraser(phrases)
# creation of the dictionary for LDA input
dic= Dictionary(doc_list)
# + id="dCGYABXLJPsT"
dic.filter_extremes(no_below=1)#, no_above=0.8)
# + [markdown] id="PnZIL62uJPsT"
# This is an exploratory phase so we select 20 topics.
# + id="ZwBdV_yGJPsU"
#use the dictionary to create a document-term matrix
corpus = [dic.doc2bow(doc) for doc in doc_list]
lda_model = LdaModel(corpus, num_topics=20, id2word=dic, update_every=1, chunksize=1000, passes=50)
lda_model.save('lda_model')
# + id="ThH2Xwp5JPsU"
# load model in order not to retrain
lda_model = gensim.models.ldamodel.LdaModel.load('lda_model')
# + id="kTzkc7-oJPsU"
lda_model.show_topic(topicid=10)
# + id="iDS2z98hJPsU" outputId="cf1db271-2fa2-457a-9daf-4d76481c5251"
# we retrieve the 15 words with the highest probability to belong to each topic
topic_words=lda_model.print_topics(num_words=15)
topics=[]
for topic in topic_words:
topics.append( topic[1].split('+'))
pd.DataFrame(topics).transpose()
# + id="omP2koqEJPsU"
#We use the model to classify the articles into the topic with highest probability
def return_topic(article):
article_bow=dic.doc2bow(article.split())
article_lda = lda_model[article_bow]
article_lda
#sorted sorts the elements of an iterable, in this case the iterable is a list of tuples (topic,probability/frequency)
#we order by frequency
topics_sorted=sorted(article_lda, key=lambda (topic_number, freq): -freq)
#topic with highest probability
return topics_sorted[0][0]
# + id="tqJB5vtvJPsV"
#new column with the topic
data['topic']=data.article_lemmatized.map(return_topic)
# + id="h2u2I9LsJPsV" outputId="a2600a7e-da71-417e-b171-77637a9d0226"
# Number of articles per topic
data.topic.value_counts()
# + id="vXkM1hTLJPsV"
# examples of topics that can be found within the general categories
# + [markdown] id="f8qzhLhYJPsV"
# 1 -> dollar, growth, deficit, economy, rise (business)
# 9 -> $,£, company, bank, firm (business)
# 3 -> mobile, phone (technology)
# 4 -> game, gaming (technology)
# 5 -> government, party, labour, election (politics)
# 18 -> mail, virus, software, security (technology)
# 8 -> film, music, star, award (enterteinment)
# + id="N0jZGtucJPsW"
# number of articles per topic that belong to each of the general categories
# + id="fT6m6cUoJPsW" outputId="8e0ee01b-47ec-4f84-e45a-dcbca8785400"
data.groupby('topic').category.value_counts()
# + [markdown] id="dgqdfip3JPsW"
# Examples of interesting topics broken down by general category.
# + id="44cUlIiUJPsW" outputId="2697bab9-5a5e-441b-b9f7-e2a230ab7fe4"
# cybersecurity (mail, virus, user, site, security)
data.groupby('topic').category.value_counts()[18]
# + id="fPORJtu_JPsW" outputId="0103c4ea-a3f1-4909-e72a-d30be80c9435"
#telephony (mobile,phone, camera)
data.groupby('topic').category.value_counts()[3]
# + id="ZbOkastCJPsX" outputId="0f2432f5-fd1a-42e9-9166-23d686f4cbdf"
#gaming
data.groupby('topic').category.value_counts()[4]
# + id="888_pd5cJPsX" outputId="b5e3c8f1-de42-4608-a6a3-2eb3cf2d4835"
#market ($,£, company, bank, firm)
data.groupby('topic').category.value_counts()[9]
# + id="BPnfiWAeJPsX" outputId="c584065e-82d2-426b-9699-6ee7dfe6aa81"
# business (dollar, growth, deficit, economy, rise)
data.groupby('topic').category.value_counts()[1]
# + id="uLJes1WZJPsY" outputId="3c6aea29-b58b-44b0-cac8-d7d3b55833f7"
#politics (government, party)
data.groupby('topic').category.value_counts()[5]
# + id="o1d0Eg72JPsY" outputId="da22aa61-e7c2-415c-c7d6-9310f3e2bdb5"
# sport1 (play, game, team)
data.groupby('topic').category.value_counts()[2]
# + id="A1EEyuEAJPsY" outputId="2e670742-fd5c-4ae9-d395-f99935ac7eeb"
#sport2 (win, final, champion)
data.groupby('topic').category.value_counts()[10]
# + id="Cmre7OxTJPsY" outputId="6c85df23-5371-4340-a1b9-cf12b7f75e93"
#entertainment (film, music, star, award)
data.groupby('topic').category.value_counts()[8]
# + [markdown] id="PiSeW9cKJPsY"
# We have performed an initial exploration of what could be achieved with LDA. We have found topics within the general categories that can be useful to classify the text at a more granular level. One of the drawbacks of this algorithm is the fact that it needs a post-training human interpreation to interpret the topics. Also the number of topics needs to be defined a priori which is a common problem within the family of clustering algorithms.
#
#
# + id="54BPyq7EJPsZ" outputId="7b500735-90c7-4ab4-f49a-756ee5c35ffc"
import os
os.system('jupyter nbconvert --to html technical_task_clara_higuera-submission.ipynb')
# + id="wYzoa9_eJPsZ"
| AdvancedDataAnalysis/Session5 NLP/technical_task_BTS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
##_____________________________
def get_ds_infos():
## 0:Code, 1:Weight, 2:Height, 3:Age, 4:Gender
dss = np.genfromtxt("data_subjects_info.csv",delimiter=',')
dss = dss[1:]
print("----> Data subjects information is imported.")
return dss
##____________
def creat_time_series(num_features, num_act_labels, num_gen_labels, label_codes, trial_codes):
dataset_columns = num_features+num_act_labels+num_gen_labels
ds_list = get_ds_infos()
train_data = np.zeros((0,dataset_columns))
test_data = np.zeros((0,dataset_columns))
for i, sub_id in enumerate(ds_list[:,0]):
for j, act in enumerate(label_codes):
for trial in trial_codes[act]:
fname = 'A_DeviceMotion_data/'+act+'_'+str(trial)+'/sub_'+str(int(sub_id))+'.csv'
raw_data = pd.read_csv(fname)
raw_data = raw_data.drop(['Unnamed: 0'], axis=1)
unlabel_data = raw_data.values
label_data = np.zeros((len(unlabel_data), dataset_columns))
label_data[:,:-(num_act_labels + num_gen_labels)] = unlabel_data
label_data[:,label_codes[act]] = 1
label_data[:,-(num_gen_labels)] = int(ds_list[i,4])
## We consider long trials as training dataset and short trials as test dataset
if trial > 10:
test_data = np.append(test_data, label_data, axis = 0)
else:
train_data = np.append(train_data, label_data, axis = 0)
return train_data , test_data
#________________________________
print("--> Start...")
## Here we set parameter to build labeld time-series from dataset of "(A)DeviceMotion_data"
num_features = 12 # attitude(roll, pitch, yaw); gravity(x, y, z); rotationRate(x, y, z); userAcceleration(x,y,z)
num_act_labels = 6 # dws, ups, wlk, jog, sit, std
num_gen_labels = 1 # 0/1(female/male)
label_codes = {"dws":num_features, "ups":num_features+1, "wlk":num_features+2, "jog":num_features+3, "sit":num_features+4, "std":num_features+5}
trial_codes = {"dws":[1,2,11], "ups":[3,4,12], "wlk":[7,8,15], "jog":[9,16], "sit":[5,13], "std":[6,14]}
## Calling 'creat_time_series()' to build time-series
print("--> Building Training and Test Datasets...")
train_ts, test_ts = creat_time_series(num_features, num_act_labels, num_gen_labels, label_codes, trial_codes)
print("--> Shape of Training Time-Seires:", train_ts.shape)
print("--> Shape of Test Time-Series:", test_ts.shape)
# -
from pandas import Series
import matplotlib.pylab as plt
import matplotlib.pyplot as pyplt
##________________________________
## For Example: Attiude data
## female
data = train_ts[train_ts[:,-1]==0]
## jogging
data = data[data[:,-4]==1]
## 10 seconds
data = pd.DataFrame(data[10000:10500,0:3])
data.plot()
plt.xlabel('Second', fontsize=18)
plt.ylabel('Value', fontsize=16)
lgnd=plt.legend()
lgnd.get_texts()[0].set_text('roll')
lgnd.get_texts()[1].set_text('pitch')
lgnd.get_texts()[2].set_text('yaw')
fig = pyplt.gcf()
fig.set_size_inches(18, 8)
plt.show()
## For Example: Attiude data
## male
data = train_ts[train_ts[:,-1]==1]
## jogging
data = data[data[:,-4]==1]
## 10 seconds
data = pd.DataFrame(data[10000:10500,0:3])
data.plot()
plt.xlabel('Second', fontsize=18)
plt.ylabel('Value', fontsize=16)
lgnd=plt.legend()
lgnd.get_texts()[0].set_text('roll')
lgnd.get_texts()[1].set_text('pitch')
lgnd.get_texts()[2].set_text('yaw')
fig = pyplt.gcf()
fig.set_size_inches(18, 8)
plt.show()
## For Example: Acceleration data
## female
data = train_ts[train_ts[:,-1]==0]
## walking
data = data[data[:,-5]==1]
data = pd.DataFrame(data[10000:10500,9:12])
data.plot()
plt.xlabel('Second', fontsize=18)
plt.ylabel('Value', fontsize=16)
lgnd=plt.legend()
lgnd.get_texts()[0].set_text('x')
lgnd.get_texts()[1].set_text('y')
lgnd.get_texts()[2].set_text('z')
fig = pyplt.gcf()
fig.set_size_inches(18, 8)
plt.show()
## For Example: Acceleration data
## male
data = train_ts[train_ts[:,-1]==1]
## walking
data = data[data[:,-5]==1]
data = pd.DataFrame(data[10000:10500,9:12])
data.plot()
plt.xlabel('Second', fontsize=18)
plt.ylabel('Value', fontsize=16)
lgnd=plt.legend()
lgnd.get_texts()[0].set_text('x')
lgnd.get_texts()[1].set_text('y')
lgnd.get_texts()[2].set_text('z')
fig = pyplt.gcf()
fig.set_size_inches(18, 8)
plt.show()
# etc.
| codes/.ipynb_checkpoints/0_import_dataset_and_creat_labeld_time_series-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# In order to get the best use out of the Panel user guide, it is important to have a grasp of some core concepts, ideas, and terminology.
#
# ### Components
#
# Panel provides three main types of component: ``Pane``, ``Widget``, and ``Panel``. These components are introduced and explained in the [Components user guide](./Components.ipynb), but briefly:
#
# * **``Pane``**: A ``Pane`` wraps a user supplied object of almost any type and turns it into a renderable view. When the wrapped ``object`` or any parameter changes, a pane will update the view accordingly.
#
# * **``Widget``**: A ``Widget`` is a control component that allows users to provide input to your app or dashboard, typically by clicking or editing objects in a browser, but also controllable from within Python.
#
# * **``Panel``**: A ``Panel`` is a hierarchical container to lay out multiple components (panes, widgets, or other ``Panel``s) into an arrangement that forms an app or dashboard.
#
# ---
# ### APIs
#
# Panel is a very flexible system that supports many different usage patterns, via multiple application programming interfaces (APIs). Each API has its own advantages and disadvantages, and is suitable for different tasks and ways of working. The [API user guide](APIs.ipynb) goes through each of the APIs in detail, comparing their pros and cons and providing recommendations on when to use each.
#
# #### [Reactive functions](./APIs.ipynb#Reactive-Functions)
#
# Defining a reactive function using the ``pn.bind`` function or ``pn.depends`` decorator provides an explicit way to link specific inputs (such as the value of a widget) to some computation in a function, reactively updating the output of the function whenever the parameter changes. This approach is a highly convenient, intuitive, and flexible way of building interactive UIs.
#
# #### [``interact``](./Interact.ipynb)
#
# The ``interact`` API will be familiar to ipywidgets users; it provides a very simple API to define an interactive view of the results of a Python function. This approach works by declaring functions whose arguments will be inspected to infer a set of widgets. Changing any of the resulting widgets causes the function to be re-run, updating the displayed output. This approach makes it extremely easy to get started and also easy to rearrange and reconfigure the resulting plots and widgets, but it may not be suited to more complex scenarios. See the [Interact user guide](./Interact.ipynb) for more detail.
#
# #### [``Param``](./Param.ipynb)
#
# ``Panel`` itself is built on the [param](https://param.pyviz.org) library, which allows capturing parameters and their allowable values entirely independently of any GUI code. By using Param to declare the parameters along with methods that depend on those parameters, even very complex GUIs can be encapsulated in a tidy, well-organized, maintainable, and declarative way. Panel will automatically convert parameter definition to corresponding widgets, allowing the same codebase to support command-line, batch, server, and GUI usage. This API requires the use of the param library to express the inputs and encapsulate the computations to be performed, but once implemented this approach leads to flexible, robust, and well encapsulated code. See the Panel [Param user guide](./Param.ipynb) for more detail.
#
# #### [Callback API](./Widgets.ipynb)
#
# At the lowest level, you can build interactive applications using ``Pane``, ``Widget``, and ``Panel`` components and connect them using explicit callbacks. Registering callbacks on components to modify other components provides full flexibility in building interactive features, but once you have defined numerous callbacks it can be very difficult to track how they all interact. This approach affords the most amount of flexibility but can easily grow in complexity, and is not recommended as a starting point for most users. That said, it is the interface that all the other APIs are built on, so it is powerful and is a good approach for building entirely new ways of working with Panel, or when you need some specific behavior not covered by the other APIs. See the [Widgets user guide](./Widgets.ipynb) and [Links user guide](./Links.ipynb) for more detail.
#
# ---
# ### Display and rendering
#
# Throughout this user guide we will cover a number of ways to display Panel objects, including display in a Jupyter notebook, in a standalone server, by saving and embedding, and more. For a detailed description see the [Deploy and Export user guide](./Deploy_and_Export.ipynb).
#
# #### Notebook
#
# All of Panel's documentation is built from Jupyter notebooks that you can explore at your own pace. Panel does not require Jupyter in any way, but it has extensive Jupyter support:
#
# ##### ``pn.extension()``
#
# > The Panel extension loads BokehJS, any custom models required, and optionally additional custom JS and CSS in Jupyter notebook environments. It also allows passing any [`pn.config`](#pn.config) variables
#
# ##### ``pn.ipywidget()``
#
# > Given a Panel model `pn.ipywidget` will return an ipywidget model that renders the object in the notebook. This can be useful for including an panel widget in an ipywidget layout and deploying Panel objects using [Voilà](https://github.com/voila-dashboards/voila/).
#
# ##### ``pn.io.push_notebook``
#
# > When working with Bokeh models directly in a Jupyter Notebook any changes to the model are not automatically sent to the frontend. Instead we have to explicitly call `pn.io.push_notebook` on the Panel component(s) wrapping the Bokeh component being updated.
#
# ##### Rich display
#
# Jupyter notebooks allow the final value of a notebook cell to display itself, using a mechanism called [rich display](https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display). As long as `pn.extension()` has been called in a notebook, all Panel components (widgets, panes, and panels) will display themselves when placed on the last line of a notebook cell.
#
# ##### ``.app()``
#
# > The ``.app()`` method present on all viewable Panel objects allows displaying a Panel server process inline in a notebook, which can be useful for debugging a standalone server interactively.
#
# #### Python REPL and embedding a server
#
# When working in a Python REPL that does not support rich-media output (e.g. in a text-based terminal) or when embedding a Panel application in another tool, a panel can be launched in a browser tab using:
#
# ##### ``.show()``
#
# > The ``.show()`` method is present on all viewable Panel objects and starts a server instance then opens a browser tab to point to it. To support working remotely, a specific port on which to launch the app can be supplied.
#
# ##### ``pn.serve()``
#
# >Similar to .show() on a Panel object but allows serving one or more Panel apps on a single server. Supplying a dictionary mapping from the URL slugs to the individual Panel objects being served allows launching multiple apps at once. Note that to ensure that each user gets separate session state you should wrap your app in a function which returns the Panel component to render. This ensures that whenever a new user visits the application a new instance of the application can be created.
#
# #### Command line
#
# Panel mirrors Bokeh's command-line interface for launching and exporting apps and dashboards:
#
# ##### ``panel serve app.py``
#
# > The ``panel serve`` command allows allows interactively displaying and deploying Panel web-server apps from the commandline.
#
# ##### ``panel serve app.ipynb``
#
# > ``panel serve`` also supports using Jupyter notebook files, where it will serve any Panel objects that were marked `.servable()` in a notebook cell. This feature allows you to maintain a notebook for exploring and analysis that provides certain elements meant for broader consumption as a standalone app.
#
# #### Export
#
# When not working interactively, a Panel object can be exported to a static file.
#
# ##### ``.save()`` to PNG
#
# > The ``.save`` method present on all viewable Panel objects allows saving the visual representation of a Panel object to a PNG file.
#
# ##### ``.save()`` to HTML
#
# > ``.save`` to HTML allows sharing the full Panel object, including any static links ("jslink"s) between widgets and other components, but other features that depend on having a live running Python process will not work (as for many of the Panel webpages).
#
# #### Embedding
#
# Panel objects can be serialized into a static JSON format that captures the widget state space and the corresponding plots or other viewable items for each combination of widget values, allowing fully usable Panel objects to be embedded into external HTML files or emails. For simple cases, this approach allows distributing or publishing Panel apps that no longer require a Python server in any way. Embedding can be enabled when using ``.save()``, using the ``.embed()`` method or globally using [Python and Environment variables](#Python and Environment variables) on ``pn.config``.
#
# ##### ``.embed()``
#
# > The ``.embed()`` method embeds the contents of the object it is being called on in the notebook.
#
# ___
# ### Linking and callbacks
#
# One of the most important aspects of a general app and dashboarding framework is the ability to link different components in flexible ways, scheduling callbacks in response to internal and external events. Panel provides convenient lower and higher-level APIs to achieve both. For more details, see the [Links](./Links.ipynb) user guide.
#
# ##### ``.param.watch``
#
# > The ``.param.watch`` method allows listening to parameter changes on an object using Python callbacks. It is the lowest level API and provides the most amount of control, but higher-level APIs are more appropriate for most users and most use cases.
#
# ##### ``.link()``
#
# > The Python-based ``.link()`` method present on all viewable Panel objects is a convenient API to link the parameters of two objects together, uni- or bi-directionally.
#
# ##### ``.jscallback``
#
# > The Javascript-based ``.jscallback()`` method allows defining arbitrary Javascript code to be executed when some property changes or event is triggered.
#
# ##### ``.jslink()``
#
# > The JavaScript-based ``.jslink()`` method directly links properties of the underlying Bokeh models, making it possible to define interactivity that works even without a running Python server.
#
# ___
# ### State and configuration
#
# Panel provides top-level objects to hold current state and control high-level configuration variables.
#
# ##### `pn.config`
#
# The `pn.config` object allows setting various configuration variables, the config variables can also be set as environment variables or passed through the [`pn.extension`](#pn-extension):
#
# ##### Python only
#
# > - `css_files`: External CSS files to load.
# > - `js_files`: External JS files to load. Dictionary should map from exported name to the URL of the JS file.
# > - `loading_spinner`: The style of the global loading indicator, e.g. 'arcs', 'bars', 'dots', 'petals'.
# > - `loading_color`: The color of the global loading indicator as a hex color, e.g. #6a6a6a
# > - `raw_css`: List of raw CSS strings to add to load.
# > - `safe_embed`: Whether to record all set events when embedding rather than just those that are changed
# > - `session_history`: If set to a non-zero value this determines the maximum length of the pn.state.session_info dictionary, which tracks information about user sessions. A value of -1 indicates an unlimited history.
# > - `sizing_mode`: Specify the default sizing mode behavior of panels.
# > - `template`: The template to render the served application into, e.g. `'bootstrap'` or `'material'`.
# > - `theme`: The theme to apply to the selected template (no effect unless `template` is set)
# > - `throttled`: Whether sliders and inputs should be throttled until release of mouse.
#
# #### Python and Environment variables
# > - `comms` (`PANEL_COMMS`): Whether to render output in Jupyter with the default Jupyter extension or use the `jupyter_bokeh` ipywidget model.
# > - `console_output` (`PANEL_CONSOLE_OUTPUT`): How to log errors and stdout output triggered by callbacks from Javascript in the notebook. Options include `'accumulate'`, `'replace'` and `'disable'`.
# > - `embed` (`PANEL_EMBED`): Whether plot data will be [embedded](./Deploy_and_Export.ipynb#Embedding).
# > - `embed_json` (`PANEL_EMBED_JSON`): Whether to save embedded state to json files.
# > - `embed_json_prefix` (`PANEL_EMBED_JSON_PREFIX`): Prefix for randomly generated json directories.
# > - `embed_load_path` (`PANEL_EMBED_LOAD_PATH`): Where to load json files for embedded state.
# > - `embed_save_path` (`PANEL_EMBED_SAVE_PATH`): Where to save json files for embedded state.
# > - `inline` (`PANEL_INLINE`): Whether to inline JS and CSS resources. If disabled, resources are loaded from CDN if one is available.
#
# ##### `pn.state`
#
# The `pn.state` object makes various global state available and provides methods to manage that state:
#
# > - `busy`: A boolean value to indicate whether a callback is being actively processed.
# > - `cache`: A global cache which can be used to share data between different processes.
# > - `cookies`: HTTP request cookies for the current session.
# > - `curdoc`: When running a server session this property holds the current bokeh Document.
# > - `location`: In a server context this provides read and write access to the URL:
# > * `hash`: hash in window.location e.g. '#interact'
# > * `pathname`: pathname in window.location e.g. '/user_guide/Interact.html'
# > * `search`: search in window.location e.g. '?color=blue'
# > * `reload`: Reloads the page when the location is updated.
# > * `href` (readonly): The full url, e.g. 'https://localhost:80?color=blue#interact'
# > * `hostname` (readonly): hostname in window.location e.g. 'panel.holoviz.org'
# > * `protocol` (readonly): protocol in window.location e.g. 'http:' or 'https:'
# > * `port` (readonly): port in window.location e.g. '80'
# > - `headers`: HTTP request headers for the current session.
# > - `session_args`: When running a server session this return the request arguments.
# > - `session_info`: A dictionary tracking information about server sessions:
# > * `total` (int): The total number of sessions that have been opened
# > * `live` (int): The current number of live sessions
# > * `sessions` (dict(str, dict)): A dictionary of session information:
# > * `started`: Timestamp when the session was started
# > * `rendered`: Timestamp when the session was rendered
# > * `ended`: Timestamp when the session was ended
# > * `user_agent`: User-Agent header of client that opened the session
# > - `webdriver`: Caches the current webdriver to speed up export of bokeh models to PNGs.
# >
# > #### Methods
# >
# > - `as_cached`: Allows caching data across sessions by memoizing on the provided key and keyword arguments to the provided function.
# > - `add_periodic_callback`: Schedules a periodic callback to be run at an interval set by the period
# > - `kill_all_servers`: Stops all running server sessions.
# > - `onload`: Allows defining a callback which is run when a server is fully loaded
# > - `sync_busy`: Sync an indicator with a boolean value parameter to the busy property on state
| examples/user_guide/Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + papermill={"duration": 7.980091, "end_time": "2021-10-06T17:02:06.218239", "exception": false, "start_time": "2021-10-06T17:01:58.238148", "status": "completed"} tags=[]
# !pip install classification-models-3D
# + papermill={"duration": 0.038253, "end_time": "2021-10-06T17:02:06.286092", "exception": false, "start_time": "2021-10-06T17:02:06.247839", "status": "completed"} tags=[]
# # !pip install efficientnet-3D keras_applications
# + papermill={"duration": 6.599632, "end_time": "2021-10-06T17:02:12.912182", "exception": false, "start_time": "2021-10-06T17:02:06.312550", "status": "completed"} tags=[]
# !pip install keras_applications
# + papermill={"duration": 4.586736, "end_time": "2021-10-06T17:02:17.516731", "exception": false, "start_time": "2021-10-06T17:02:12.929995", "status": "completed"} tags=[]
import tensorflow as tf
import numpy as np
from tensorflow import keras
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
import cv2
import time
import glob
import os
import pandas
from tensorflow.keras import layers
from classification_models_3D.keras import Classifiers
tf.random.set_seed(1)
np.random.seed(1)
#random.seed(1)
# + papermill={"duration": 0.027064, "end_time": "2021-10-06T17:02:17.561972", "exception": false, "start_time": "2021-10-06T17:02:17.534908", "status": "completed"} tags=[]
def get_all_slices(df,base_dir):
all_paths = []
for i in list(df['folder_id']):
i = os.path.join(base_dir,i)
all_paths.append(len(glob.glob(i+'/flair/*')))
return all_paths
def split_train_test(slices_list,folders_list,label_list,split_ratio=0.1):
test_size = int(len(slices_list)*split_ratio)
test_slices_list = slices_list[:test_size]
test_folders_list = folders_list[:test_size]
test_label_list = label_list[:test_size]
train_slices_list = slices_list[test_size:]
train_folders_list = folders_list[test_size:]
train_label_list = label_list[test_size:]
return train_slices_list,train_folders_list,train_label_list,test_slices_list,test_folders_list,test_label_list
# + papermill={"duration": 0.0381, "end_time": "2021-10-06T17:02:17.617843", "exception": false, "start_time": "2021-10-06T17:02:17.579743", "status": "completed"} tags=[]
df = pd.read_csv('../input/rsnasubmissionresult/result.csv',dtype='str')
base_dir = '../input/classify-tumor-best/DATATUMORONLY_TRAIN/train'
#slices_list = np.array(get_all_slices(df,base_dir))
# + papermill={"duration": 0.024583, "end_time": "2021-10-06T17:02:17.659663", "exception": false, "start_time": "2021-10-06T17:02:17.635080", "status": "completed"} tags=[]
train_df = df.iloc[:525,:]
test_df = df.iloc[526:,:]
# + papermill={"duration": 2.985552, "end_time": "2021-10-06T17:02:20.662770", "exception": false, "start_time": "2021-10-06T17:02:17.677218", "status": "completed"} tags=[]
train_slices_list = np.array(get_all_slices(train_df,base_dir))
test_slices_list = np.array(get_all_slices(test_df,base_dir))
#slices_list = np.array(list(df['flair']))
train_folders_list = np.array(list(train_df['folder_id']))
test_folders_list = np.array(list(test_df['folder_id']))
train_label_list = np.array(list(train_df['MGMT_value']))
test_label_list = np.array(list(test_df['MGMT_value']))
indexes = np.where((train_slices_list > 0 )&(train_slices_list < 50))
train_slices_list = np.take(train_slices_list,indexes)[0]
train_folders_list = np.take(train_folders_list,indexes)[0]
train_label_list = np.take(train_label_list,indexes)[0]
indexes = np.where((test_slices_list > 0 )&(test_slices_list < 50))
test_slices_list = np.take(test_slices_list,indexes)[0]
test_folders_list = np.take(test_folders_list,indexes)[0]
test_label_list = np.take(test_label_list,indexes)[0]
# + papermill={"duration": 0.023545, "end_time": "2021-10-06T17:02:20.704339", "exception": false, "start_time": "2021-10-06T17:02:20.680794", "status": "completed"} tags=[]
# df = pd.read_csv('../input/rsnasubmissionresult/result.csv',dtype='str')
# base_dir = '../input/classify-tumor-best/DATATUMORONLY_TRAIN/train'
# slices_list = np.array(get_all_slices(df,base_dir))
# #slices_list = np.array(list(df['flair']))
# folders_list = np.array(list(df['folder_id']))
# label_list = np.array(list(df['MGMT_value']))
# indexes = np.where((slices_list > 0 )&(slices_list < 50))
# slices_list = np.take(slices_list,indexes)[0]
# folders_list = np.take(folders_list,indexes)[0]
# label_list = np.take(label_list,indexes)[0]
# shuffler = np.random.permutation(len(slices_list))
# slices_list = slices_list[shuffler]
# folders_list = folders_list[shuffler]
# label_list = label_list[shuffler]
# train_slices_list,train_folders_list,train_label_list,\
# test_slices_list,test_folders_list,test_label_list = split_train_test(slices_list,folders_list,label_list,split_ratio=0.1)
# + papermill={"duration": 0.04173, "end_time": "2021-10-06T17:02:20.763799", "exception": false, "start_time": "2021-10-06T17:02:20.722069", "status": "completed"} tags=[]
class DataGenerator(keras.utils.Sequence):
def __init__(self,slices_list,folders_list,label_list,width=256,height=256,batch_size=16,shuffle=True):
self.batch_size = batch_size
self.base_dir = '../input/classify-tumor-best/DATATUMORONLY_TRAIN/train'
self.width = width
self.crop_length = 224
self.height = height
self.tolerance = 5
self.shuffle = shuffle
self.intial_slices_list = slices_list
self.intial_folders_list = folders_list
self.intial_label_list = label_list
#print(len(self.slices_list))
self.on_epoch_end()
def on_epoch_end(self):
print('epoch ended')
self.slices_list = self.intial_slices_list.copy()
self.folders_list = self.intial_folders_list.copy()
self.label_list = self.intial_label_list.copy()
if self.shuffle:
shuffler = np.random.permutation(len(self.slices_list))
self.slices_list = self.slices_list[shuffler]
self.folders_list = self.folders_list[shuffler]
self.label_list = self.label_list[shuffler]
def __len__(self):
return len(self.intial_slices_list)
def __getitem__(self,user_index):
start =time.time()
index = self.slices_list[0]
#print(len(self.slices_list))
labels = []
indexes = np.where((self.slices_list >= index-self.tolerance) &(self.slices_list <= index+self.tolerance))
tol_slice= np.take(self.slices_list, indexes)[0]
tol_folder= np.take(self.folders_list, indexes)[0]
random_indexes = np.random.choice(indexes[0], size=min(self.batch_size,len(tol_folder)),replace=False)
random_folder = np.take(self.folders_list,random_indexes)
random_slices = np.take(self.slices_list,random_indexes)
random_labels = np.take(self.label_list,random_indexes)
self.folders_list = np.delete(self.folders_list,random_indexes)
self.slices_list = np.delete(self.slices_list,random_indexes)
self.label_list = np.delete(self.label_list,random_indexes)
#print(len(self.slices_list))
self.max_depth = random_slices.max()
#print(random_folder)
batch_x = self.__data_gen_batch(random_folder)
#for i in random_folder:
# labels.append(int(self.label_list[np.where(self.folders_list == i)[0]][0]))
#print(labels)
return preprocess_input(batch_x),self.one_hot_encoder(random_labels.astype(np.int8))
def one_hot_encoder(self,y):
b = np.zeros((len(y), 2))
b[np.arange(len(y)),y] = 1
return b
def get_max_len(self,batch,min_depth=50):
max_len = 0
for patient_id in batch['folder_id']:
#print(os.path.join(self.base_dir,patient_id,'flair/*'))
length = len(glob.glob(os.path.join(self.base_dir,patient_id,'flair/*')))
if length > max_len:
max_len = length
if max_len < min_depth:
max_len = min_depth
return max_len
def __data_gen_image(self,folder_name):
flair_path = glob.glob(os.path.join(self.base_dir,folder_name,'flair/*'))
flair_path = sorted(flair_path,key=lambda x:x.split('-')[-1].split('.')[-2].zfill(3))
all_images = []
all_images = np.zeros(shape=(self.max_depth,self.height,self.height,1),dtype=np.float64)
for i,img_path in enumerate(flair_path):
img = image.load_img(img_path,target_size=(self.height,self.width),color_mode='grayscale')
img = image.img_to_array(img)
all_images[i,] = img
return np.transpose(all_images,(1,2,0,3))
def __data_gen_batch(self,folder_names):
batch_data = np.empty(shape=(len(folder_names),self.height,self.width,self.max_depth,1))
for i,patient_id in enumerate(folder_names):
batch_data[i,] = self.__data_gen_image(patient_id)
return batch_data
def crop(self,image,crop_length=224):
img_height ,img_width = image.shape[:2]
start_y = (img_height - self.crop_length) // 2
start_x = (img_width - self.crop_length) // 2
cropped_image=image[start_y:(img_height - start_y), start_x:(img_width - start_x), :]
return cropped_image
# + papermill={"duration": 0.025456, "end_time": "2021-10-06T17:02:20.806717", "exception": false, "start_time": "2021-10-06T17:02:20.781261", "status": "completed"} tags=[]
train_datagen = DataGenerator(train_slices_list,train_folders_list,train_label_list,batch_size=5,height=224,width=224,shuffle=True)
test_datagen = DataGenerator(test_slices_list,test_folders_list,test_label_list,batch_size=1,height=224,width=224,shuffle=True)
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.024142, "end_time": "2021-10-06T17:02:20.850268", "exception": false, "start_time": "2021-10-06T17:02:20.826126", "status": "completed"} tags=[]
# for _ in range(3):
# print('new epoch')
# for i in range(len(test_datagen)-1):
# x,y = test_datagen[i]
# print(i,x.shape,len(test_datagen.slices_list))
# test_datagen.on_epoch_end()
# + papermill={"duration": 0.024282, "end_time": "2021-10-06T17:02:20.894086", "exception": false, "start_time": "2021-10-06T17:02:20.869804", "status": "completed"} tags=[]
# basemodel = efn.EfficientNetB0(input_shape=(256, 256, None, 1), weights=None)
# x = layers.GlobalAveragePooling3D()(basemodel.output)
# x = layers.Dense(units=128, activation="relu")(x)
# x = layers.Dropout(0.1)(x)
# outputs = layers.Dense(units=2, activation="softmax")(x)
# # Define the model.
# model = keras.Model(basemodel.input, outputs, name="eff3dcnn")
# model.summary()
# + papermill={"duration": 2.376689, "end_time": "2021-10-06T17:02:23.288485", "exception": false, "start_time": "2021-10-06T17:02:20.911796", "status": "completed"} tags=[]
ResNet18, preprocess_input = Classifiers.get('resnet18')
model = ResNet18(input_shape=(224, 224, None, 1), weights=None,include_top=True)
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.088549, "end_time": "2021-10-06T17:02:23.395124", "exception": false, "start_time": "2021-10-06T17:02:23.306575", "status": "completed"} tags=[]
#x = layers.Dense(units=128, activation="relu")(model.layers[-3].output)
#x = layers.Dropout(0.1)(x)
outputs = layers.Dense(units=2, activation="softmax")(model.layers[-3].output)
# Define the model.
new_model = keras.Model(model.input, outputs, name="resnet18_3d")
new_model.summary()
# + papermill={"duration": 0.048789, "end_time": "2021-10-06T17:02:23.483038", "exception": false, "start_time": "2021-10-06T17:02:23.434249", "status": "completed"} tags=[]
os.makedirs('models')
os.makedirs('logs')
# + papermill={"duration": 0.473796, "end_time": "2021-10-06T17:02:23.992515", "exception": false, "start_time": "2021-10-06T17:02:23.518719", "status": "completed"} tags=[]
new_model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
metrics=["accuracy"]
)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(
filepath="models/3d_image_classification.hdf5", save_best_only=True,monitor="val_accuracy",mode="max",verbose=0)
summary = tf.keras.callbacks.TensorBoard(log_dir="./logs",update_freq=1,histogram_freq=2)
# + papermill={"duration": 9706.334341, "end_time": "2021-10-06T19:44:10.345451", "exception": false, "start_time": "2021-10-06T17:02:24.011110", "status": "completed"} tags=[]
new_model.fit(
train_datagen,
steps_per_epoch=len(train_datagen)//5,
validation_data=test_datagen,\
validation_steps=len(test_datagen)-2,
epochs=300,
verbose=1,
callbacks = [checkpoint_cb,summary]
)
# + papermill={"duration": 7.628899, "end_time": "2021-10-06T19:44:25.783107", "exception": false, "start_time": "2021-10-06T19:44:18.154208", "status": "completed"} tags=[]
new_model.save('best_50.hdf5')
# + papermill={"duration": 7.4734, "end_time": "2021-10-06T19:44:40.705022", "exception": false, "start_time": "2021-10-06T19:44:33.231622", "status": "completed"} tags=[]
train_datagen = DataGenerator(train_slices_list,train_folders_list,train_label_list,batch_size=5,height=224,width=224,shuffle=True)
# + papermill={"duration": 32.148581, "end_time": "2021-10-06T19:45:20.727576", "exception": false, "start_time": "2021-10-06T19:44:48.578995", "status": "completed"} tags=[]
true_cnt = 0
all_cnt = 0
for i in range(len(train_datagen)//5):
x,y = train_datagen[i]
y_pred = new_model.predict(x)
output = np.argmax(np.round_(y_pred,1),axis=1)==np.argmax(y,1)
true_cnt += sum(output)
all_cnt += len(output)
# + papermill={"duration": 6.977126, "end_time": "2021-10-06T19:45:34.880292", "exception": false, "start_time": "2021-10-06T19:45:27.903166", "status": "completed"} tags=[] active=""
# true_cnt/all_cnt
| fork-of-eff-3d-train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import serial
ser = serial.Serial('/dev/cu.usbserial-AI03Y56G', 9600)
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import time
now = lambda: int(round(time.time() * 1000))
# -
t = []
vals = []
while True:
line = ser.readline()
if 'accleration' in str(line):
val = []
for _ in range(3):
line = ser.readline()
end = 7 if '-' in str(line) else 6
num = float(str(line)[2:end])
val.append(num)
t.append(now())
vals.append(val)
x = [val[0] for val in vals]
plt.plot(t, x)
y = [val[1] for val in vals]
plt.plot(t, y)
z = [val[2] for val in vals]
plt.plot(t, z)
| accelerometer_testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# orphan: true
# ---
# + tags=["remove-input", "remove-output", "active-ipynb"]
# try:
# from openmdao.utils.notebook_utils import notebook_mode
# except ImportError:
# !python -m pip install openmdao[notebooks]
# -
# # Sparse Partial Derivatives
#
# When a partial derivative is sparse (few nonzero entries compared to the total size of the matrix), it may be advantageous to utilize a format that stores only the nonzero entries. To use sparse partial derivatives, they must first be declared with the sparsity pattern in setup_partials using the declare_partials method.
#
# ## Usage
#
# To specify the sparsity pattern in the AIJ format (alternatively known as COO format), use the `rows` and `cols` arguments to `declare_partials`. For example, to declare a sparsity pattern of nonzero entries in the (0, 0), (1, 1), (1, 2), and (1,3) positions, one would use rows=[0, 1, 1, 1], cols=[0, 1, 2, 3]. When using `compute_partials`, you do not need to pass the sparsity pattern again. Instead, you simply give the values for the entries in the same order as given in `declare_partials`.
# +
import numpy as np
import openmdao.api as om
class SparsePartialComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', shape=(4,))
self.add_output('f', shape=(2,))
def setup_partials(self):
self.declare_partials(of='f', wrt='x',
rows=[0, 1, 1, 1],
cols=[0, 1, 2, 3])
def compute_partials(self, inputs, partials):
# Corresponds to the [(0,0), (1,1), (1,2), (1,3)] entries.
partials['f', 'x'] = [1., 2., 3., 4.]
model = om.Group()
model.add_subsystem('example', SparsePartialComp())
problem = om.Problem(model=model)
problem.setup()
problem.run_model()
totals = problem.compute_totals(['example.f'], ['example.x'])
# -
print(totals['example.f', 'example.x'])
# + tags=["remove-input", "remove-output"]
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(totals['example.f', 'example.x'],
np.array([[1., -0., -0., -0.], [-0., 2., 3., 4.]]))
# -
# If only some of your Jacobian entries change across iterations, or if you wish to avoid creating intermediate arrays, you may update the entries in-place.
# +
import numpy as np
import openmdao.api as om
class SparsePartialComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', shape=(4,))
self.add_output('f', shape=(2,))
def setup_partials(self):
self.declare_partials(of='f', wrt='x',
rows=[0, 1, 1, 1],
cols=[0, 1, 2, 3])
def compute_partials(self, inputs, partials):
pd = partials['f', 'x']
# Corresponds to the (0, 0) entry
pd[0] = 1.
# (1,1) entry
pd[1] = 2.
# (1, 2) entry
pd[2] = 3.
# (1, 3) entry
pd[3] = 4
model = om.Group()
model.add_subsystem('example', SparsePartialComp())
problem = om.Problem(model=model)
problem.setup()
problem.run_model()
totals = problem.compute_totals(['example.f'], ['example.x'])
# -
print(totals['example.f', 'example.x'])
# + tags=["remove-input", "remove-output"]
assert_near_equal(totals['example.f', 'example.x'],
np.array([[1., -0., -0., -0.], [-0., 2., 3., 4.]]))
# -
# If your partial derivative is constant and sparse, or if you simply wish to provide an initial value for the derivative, you can pass in the values using the `val` argument. If you are using the AIJ format, `val` should receive the nonzero entries in the same order as given for `rows` and `cols`. Alternatively, you may provide a Scipy sparse matrix, from which the sparsity pattern is deduced.
# +
import numpy as np
import scipy as sp
import openmdao.api as om
class SparsePartialComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', shape=(4,))
self.add_input('y', shape=(2,))
self.add_output('f', shape=(2,))
def setup_partials(self):
self.declare_partials(of='f', wrt='x',
rows=[0, 1, 1, 1],
cols=[0, 1, 2, 3],
val=[1., 2., 3., 4.])
self.declare_partials(of='f', wrt='y', val=sp.sparse.eye(2, format='csc'))
def compute_partials(self, inputs, partials):
pass
model = om.Group()
model.add_subsystem('example', SparsePartialComp())
problem = om.Problem(model=model)
problem.setup()
problem.run_model()
totals = problem.compute_totals(['example.f'], ['example.x', 'example.y'])
# -
print(totals['example.f', 'example.x'])
print(totals['example.f', 'example.y'])
# + tags=["remove-input", "remove-output"]
assert_near_equal(totals['example.f', 'example.x'],
np.array([[1., -0., -0., -0.], [-0., 2., 3., 4.]]))
assert_near_equal(totals['example.f', 'example.y'],
np.array([[1., -0.], [-0., 1.]]))
| openmdao/docs/openmdao_book/features/core_features/working_with_derivatives/sparse_partials.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
## Exercices 1 sur github
##refaire à partir de How many orders were made per bike sub-categories?
## très pertinent
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
sales = pd.read_csv('https://raw.githubusercontent.com/ine-rmotr-curriculum/FreeCodeCamp-Pandas-Real-Life-Example/master/data/sales_data.csv',
parse_dates=['Date'])
sales.head()
sales['Customer_Age'].mean()
sales.shape
sales1 = sales.head(n=300)
sales1.shape
# +
sales1['Customer_Age'].plot(kind = 'kde', figsize = (14,6))
# -
print(sales1['Customer_Age'] > 5)
# +
sales1['Customer_Age'].plot(kind = 'box', figsize = (14,6))
# -
sales1['Order_Quantity'].mean()
sales1['Order_Quantity'].describe()
sales1['Order_Quantity'].plot(kind='box',vert = False, figsize = (14,5))
sales1['Order_Quantity'].plot(kind='box', figsize = (14,5))
##sales per year
sales1.names
sales1[0:5]
col = sales1.columns.tolist()
sales['Year'].value_counts()
sales['Year'].value_counts().plot(kind='pie')
sales['Month'].value_counts()
sales['Month'].value_counts().plot(kind= 'pie')
sales['Country'].value_counts().plot(kind='bar')
sales['Product'].value_counts().head(1)
sales['Product'].unique()
sales['Product'].value_counts().head(10).plot(kind='bar')
sales.plot(kind = 'scatter', x='Unit_Cost', y = 'Unit_Price')
sales.plot(kind = 'scatter', x='Order_Quantity', y = 'Profit')
sales[['Profit', 'Country']].boxplot(by='Country', figsize=(10,6))
sales[['Profit', 'Country']].boxplot(by='Country')
sales[['Customer_Age', 'Country']].boxplot(by='Country', figsize = (10,6))
sales['date_calculée'] = sales[['Day', 'Month', 'Year']].apply(lambda x: '{}-{}-{}'.format(x[0], x[1], x[2]), axis=1)
sales['date_calculée'].head()
# +
sales['date_calculée'] = pd.to_datetime(sales['date_calculée'])
sales['date_calculée'].head()
# -
sales['date_calculée'].value_counts().plot(kind='line')
sales['Revenue'] +=50
col
print( sales.loc[sales['Country'] == 'Canada'].shape[0])
print("vente au Canada sur %s " % len(sales))
sales['Country']
sales.loc[(sales['Country'] == 'Canada') |
(sales['Country'] == 'France')].shape[0]
sales.loc[(sales['Country'] == 'Canada') &
(sales['Sub_Category'] == 'Bike Racks')].shape[0]
sales.loc[(sales['Country'] == 'France') &
(sales['State'].value_counts()
sales.loc[sales['Country'] == 'France'].shape[0]
sales.loc[sales['Product_Category'] == 'Bike', 'Sub_Category'].shape[0]
col
sales['Customer_Gender'].value_counts()
sales['Revenue'].value_counts().head(5)
sales.sort_values(['Revenue'], ascending=False).head(1)
sales.sort_values(['Revenue'], ascending=False).max()
# +
cond = sales['Revenue'] == sales['Revenue'].max()
sales.loc[cond]
# +
##entraînement 2, suite et reprise des exercices de manip dans le df pandas
sales.columns
# -
sales.loc[(sales['Country'] == 'Canada') | (sales['Country'] == 'France')].shape[0]
sales['Product'].value_counts().head(10)
sales.loc[(sales['Sub_Category'] == 'Bike Racks') |
(sales['Country'] == 'Canada')].shape[0]
# +
france_sales = sales.loc[sales['Country'] == 'France'
, 'State'].value_counts()
france_sales
# -
sales.columns
sales['Product_Category'].value_counts().plot(kind = 'pie')
sales.Sub_Category.value_counts()
sales['Product_Category'] == 'Accessories' true
# +
bikes = sales.loc[sales['Product_Category'] == 'Bikes', 'Sub_Category'].value_counts()
bikes
# -
sales['Revenue'].max()
# +
cond = sales['Revenue'] == sales['Revenue'].max()
sales.loc[cond]
# +
cond = sales['Revenue'] < 10_000
sales.loc[cond, 'Order_Quantity'].mean()
# -
print(sales.loc[1, 'Revenue'])
##------------------------------##
##Entrainement en ligne
for i in range(0,5):
print(i)
# +
# Fournir les données d'entrée
n = int(input("Saisir un nombre : "))
print("les nombres naturels de {0} à 1".format(n))
for i in range(n, 0, -1):
print(i, end=' ')
# +
# Fournir les données d'entrée
n = int(input("Saisir un nombre : "))
print("les nombres naturels de {0} à 1".format(n))
for i in range(1, n+1):
print(i, end=' ')
# +
# Fournir les données d'entrée
n = int(input("Saisir un nombre : "))
print("les nombres naturels de {0} à 1".format(n))
for i in range(1, n+1):
if i % 2 != 0:
print(i, end=' ')
# +
# Fournir les données d'entrée
num = int(input("Saisir un nombre : "))
compteur = 0
while (num != 0):
# incrémenter le compteur
compteur += 1
# Supprimer le dernier chiffre de 'num'
num = num//10
print("Nombre de chiffres : ", compteur)
# -
num = 10
num //= 10
print(num)
# cadre de données cerveau: cerveau_df = pandas.read_csv('https://scipy-lectures.org/_downloads/brain_size.csv', sep=';', na_values='.')
# Testez la différence entre les poids des hommes et des femmes.
# Utilisez des statistiques non paramétriques pour tester la différence entre le VIQ chez les hommes et les femmes.
# créer le modèle à l'aide de statsmodels
# Récupérez les paramètres estimés du modèle.
# Astuce: utilisez la saisie TAB, semi-automatique pour trouver l'attribut pertinent.
#
# +
import pandas as pandas
cerveau_df = pandas.read_csv('https://scipy-lectures.org/_downloads/brain_size.csv', sep=';', na_values='.')
cerveau_genre = cerveau_df.groupby('Gender')
# -
com1 = cerveau_genre.Weight.describe().T
com1.head()
scipy.stats.ttest_ind(com1['Female'], com1['Male'])
cerveau_df['Gender']
cerveau_df.loc[(cerveau_df['Gender'] == 'Female')]
import scipy
from scipy import stats
scipy.stats.ttest_ind((cerveau_df['Gender'] == 'Female'), (cerveau_df['Gender] == 'Male'))
femme_viq = cerveau_df[cerveau_df['Gender'] == 'Female']['VIQ']
homme_viq = cerveau_df[cerveau_df['Gender'] == 'Male']['VIQ']
| ch_cours/entrainement1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # PyNEST - First Steps
# + [markdown] slideshow={"slide_type": "-"}
# **Modeling networks of spiking neurons using NEST**
#
# **Python Module of the Week, 03.05.2019**
#
# **[<NAME>](mailto:<EMAIL>)**
# + [markdown] slideshow={"slide_type": "subslide"}
# This notebook guides through your first steps using NEST. It shows
# * how to get help
# * how to create and simulate a single neuron
# * how to visualize the output
#
# Essentially, this is a reproduction of the 'Hello World!' notebook with added explanations.
#
# For more details see [part 1 of the official PyNEST tutorial](https://nest-simulator.readthedocs.io/en/latest/tutorials/pynest_tutorial/part_1_neurons_and_simple_neural_networks.html).
# + slideshow={"slide_type": "subslide"}
# populate namespace with pylab functions and stuff
# %pylab inline
# + slideshow={"slide_type": "-"}
import nest # import NEST module
# + [markdown] slideshow={"slide_type": "slide"}
# ## Getting help
# + slideshow={"slide_type": "subslide"}
# information about functions with Python's help() ...
help(nest.Models)
# + slideshow={"slide_type": "subslide"}
# ... or IPython's question mark
# nest.Models?
# + slideshow={"slide_type": "subslide"}
# list neuron models
nest.Models()
# + slideshow={"slide_type": "subslide"}
# choose LIF neuron with exponential synaptic currents: 'iaf_psc_exp'
# look in documentation for model description
# or (if not compiled with MPI)
nest.help('iaf_psc_exp')
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating a neuron
# + slideshow={"slide_type": "subslide"}
# before creating a new network,
# reset the simulation kernel / remove all nodes
nest.ResetKernel()
# + slideshow={"slide_type": "-"}
# create the neuron
neuron = nest.Create('iaf_psc_exp')
# + slideshow={"slide_type": "-"}
# investigate the neuron
# Create() just returns a list (tuple) with handles to the new nodes
# (handles = integer numbers called ids)
neuron
# + slideshow={"slide_type": "subslide"}
# current dynamical state/parameters of the neuron
# note that the membrane voltage is at -70 mV
nest.GetStatus(neuron)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating a spikegenerator
# + slideshow={"slide_type": "subslide"}
# create a spike generator
spikegenerator = nest.Create('spike_generator')
# + slideshow={"slide_type": "-"}
# check out 'spike_times' in its parameters
nest.GetStatus(spikegenerator)
# + slideshow={"slide_type": "subslide"}
# set the spike times at 10 and 50 ms
nest.SetStatus(spikegenerator, {'spike_times': [10., 50.]})
# + [markdown] slideshow={"slide_type": "slide"}
# ## Creating a voltmeter
# + slideshow={"slide_type": "subslide"}
# create a voltmeter for recording
voltmeter = nest.Create('voltmeter')
# + slideshow={"slide_type": "-"}
# investigate the voltmeter
voltmeter
# + slideshow={"slide_type": "-"}
# see that it records membrane voltage, senders, times
nest.GetStatus(voltmeter)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Connecting
# + slideshow={"slide_type": "subslide"}
# investigate Connect() function
# nest.Connect?
# + slideshow={"slide_type": "subslide"}
# connect spike generator and voltmeter to the neuron
nest.Connect(spikegenerator, neuron, syn_spec={'weight': 1e3})
# + slideshow={"slide_type": "-"}
nest.Connect(voltmeter, neuron)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Simulating
# + slideshow={"slide_type": "subslide"}
# run simulation for 100 ms
nest.Simulate(100.)
# + slideshow={"slide_type": "subslide"}
# look at nest's KernelStatus:
# network_size (root node, neuron, spike generator, voltmeter)
# num_connections
# time (simulation duration)
nest.GetKernelStatus()
# + slideshow={"slide_type": "subslide"}
# note that voltmeter has recorded 99 events
nest.GetStatus(voltmeter)
# + slideshow={"slide_type": "subslide"}
# read out recording time and voltage from voltmeter
times = nest.GetStatus(voltmeter)[0]['events']['times']
voltages = nest.GetStatus(voltmeter)[0]['events']['V_m']
# + [markdown] slideshow={"slide_type": "slide"}
# ## Plotting
# + slideshow={"slide_type": "subslide"}
# plot results
# units can be found in documentation
pylab.plot(times, voltages, label='Neuron 1')
pylab.xlabel('Time (ms)')
pylab.ylabel('Membrane potential (mV)')
pylab.title('Membrane potential')
pylab.legend()
# + slideshow={"slide_type": "subslide"}
# create the same plot with NEST's build-in plotting function
import nest.voltage_trace
# + slideshow={"slide_type": "-"}
nest.voltage_trace.from_device(voltmeter)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Bored?
#
# * Try to make the neuron spike (maybe use `0_hello_world.ipynb`)
# * Connect another neuron to the first neuron recieving that spike
# * Check out the [official PyNEST tutorials](https://nest-simulator.readthedocs.io/en/latest/tutorials/index.html), in particular
# * part 1: Neurons and simple neural networks
# * part 2: Populations of neurons
| session20_NEST/jupyter_notebooks/1_first_steps.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: julia-1.2
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# # Numerical Optimization
#
# This notebook uses the [Optim](https://github.com/JuliaNLSolvers/Optim.jl) package which has routines for unconstrained optimization and for the case with simple bounds on the solution.
#
# As alternatives, consider the [NLopt](https://github.com/JuliaOpt/NLopt.jl) or the [JuMP](https://github.com/JuliaOpt/JuMP.jl) packages. They can easily handle various types of constrained optimization problems.
# ## Load Packages and Extra Functions
# +
using Printf, Optim
include("jlFiles/printmat.jl")
# +
using Plots
#pyplot(size=(600,400))
gr(size=(480,320))
default(fmt = :svg)
# -
# # Optimization with One Choice Variable
#
# Running
# ```
# Sol = optimize(x->fn1(x,0.5),x₀,x₁)
# ```
# finds the `x` value (in the interval `[x₀,x₁]`) that *minimizes* `fn1(x,0.5)`. The solution for the `fn1` function below should be $x=1.1$. The `x->fn1(x,0.5)` syntax makes this a function of `x` only.
#
# The output (`Sol`) contains a lot of information.
function fn1(x,c) #notice: the function has two arguments
value = 2*(x - 1.1)^2 - c
return value
end
# +
x = -1:0.1:3
p1 = plot( x,fn1.(x,0.5),
linecolor = :red,
linewidth = 2,
legend = nothing,
title = "the fn1(x,0.5) function",
xlabel = "x",
ylabel = "y" )
display(p1)
# +
Sol = optimize(x->fn1(x,0.5),-2.0,3.0)
println(Sol)
printlnPs("\nThe minimum is at: ", Optim.minimizer(Sol)) #the optimal x value
println("Compare with the plot above\n")
# -
# ## One Choice Variable: Supplying a Starting Guess Instead (extra)
#
# If you prefer to give a starting guess `x₀` instead of an interval, then supply it as as a vector `[x₀]`:
# ```
# Sol = optimize(x->fn1(x[],0.5),[x₀],LBFGS())
# ```
# Notice: *(a)* `x[]` to make it a function of the first (and only) element in the vector `x`; *(b)* choose the `LBFGS()` method since the default method does not work in the case of only one choice variable.
# +
Solb = optimize(x->fn1(x[],0.5),[0.1],LBFGS())
printlnPs("The minimum is at: ", Optim.minimizer(Solb))
# -
# # Several Choice Variables: Unconstrained Optimization
#
# In the example below, we choose $(x,y)$ so as to minimize the fairly simple objective function
#
# $
# (x-2)^2 + (4y+3)^2,
# $
#
# without any constraints. The solution should be $(x,y)=(2,-3/4)$.
function fn2(p)
(x,y) = (p[1],p[2]) #unpack the choice variables and get nicer names
L = (x-2)^2 + (4*y+3)^2
return L
end
# +
nx = 2*41
ny = 2*61
x = range(1,stop=5,length=nx)
y = range(-1,stop=0,length=ny)
loss2d = fill(NaN,(nx,ny)) #matrix with loss fn values
for i = 1:nx, j = 1:ny
loss2d[i,j] = fn2([x[i];y[j]])
end
# -
p1 = contour( x,y,loss2d', #notice: loss2d'
xlims = (1,5),
ylims = (-1,0),
legend = false,
levels = 21,
title = "Contour plot of loss function",
xlabel = "x",
ylabel = "y" )
scatter!([2],[-0.75],label="optimum",legend=true)
display(p1)
Sol = optimize(fn2,[0.0;0.0]) #use p->lossfn(p,other arguments) if
#there are additional (non-choice) arguments
printlnPs("minimum at (x,y)= ",Optim.minimizer(Sol))
# # Several Choice Variables: Bounds on the Solutions
#
# The next few cells discuss how to impose bounds on the solution.
#
# In the example below, we impose $2.75 \leq x$ (a lower bound) and $y \leq -0.3$ (an upper bound). We will see that only one of these restrictions binds.
p1 = contour( x,y,loss2d',
xlims = (1,5),
ylims = (-1,0),
legend = false,
levels = 21,
title = "Contour plot of loss function",
xlabel = "x",
ylabel = "y",
annotation = (3.5,-0.7,text("somewhere here",8)) )
plot!([2.75,2.75],[-1,0.5],color=:red,linewidth=2,label="2.75 < x",legend = true)
plot!([1,5],[-0.3,-0.3],color=:black,linewidth=2,label="y < -0.3")
scatter!([2.75],[-0.75],color=:red,label="optimum")
display(p1)
# +
lower = [2.75, -Inf]
upper = [Inf, -0.3]
Sol = optimize(fn2,lower,upper,[3.0,-0.5])
printlnPs("The optimum is at (x,y) = ",Optim.minimizer(Sol))
# -
# # Several Choice Variables: Supplying the Gradient (extra)
#
# Supplying a function for calculating the derivatives improves speed and accuracy. See below for an example. (The `inplace=false` means that the function for the derivatives creates a new matrix at each call.)
# +
function g2(x) #derivatives of fn2 wrt. x[1] and x[2]
G = [2*(x[1]-2), 2*4(4*x[2]+3)] #creates a new vector: use inplace = false in optimize()
return G
end
Sol3 = optimize(fn2,g2,[1.0,-0.5],inplace=false)
println(Sol3)
# -
| Tutorial_22a_Optimization_Optim.ipynb |