text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='half_cell'></a>
Step2: For second half cell $f_h \to -f_h$
Step3: Phase advance and stability of the FODO cell
Step4: Phase advance
Step5: SASE1 undulator. Numerical simulation
Step6: Limitations
Step7: FODO correction due to undulator focusing
Step8: Current lattice $\overline\beta \approx 32$ m
Step9: $\overline \beta = 60$ m
Step10: Checking quads limits
Step11: $\overline \beta = 11$ m
Step12: Checking quads limits
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import sympy as sp
sp.init_printing()
fh, d = sp.symbols("fh, d")
Mqf = sp.Matrix([[1, 0],[-1/fh, 1]])
Md = sp.Matrix([[1, d],[0, 1]])
Mqd = sp.Matrix([[1, 0],[1/fh, 1]])
M1 = Mqf*Md*Mqd
sp.simplify(M1)
M2 = sp.Matrix([
[(d - fh)/-fh, d],
[ -d/fh**2, (-d -fh)/-fh]])
M_cell = sp.simplify(M1*M2)
M_cell
trace_M = sp.simplify(M_cell[0,0] + M_cell[1,1])
trace_M
phi = sp.Symbol("phi")
sp.diff((1 + sp.sin(phi/2))/sp.sin(phi), phi)
import numpy as np
from scipy.optimize import root
from matplotlib import pyplot as plt
fun = lambda phi: -(np.sin(phi/2) + 1)*np.cos(phi)/np.sin(phi)**2 + np.cos(phi/2)/(2*np.sin(phi))
res = root(fun, 0.1)
print("Cell phase advance for minimum possible bmax: phi = ", res.x*180/np.pi)
%matplotlib inline
from ocelot import *
from ocelot.gui.accelerator import *
from scipy.integrate import simps
from ocelot.cpbd import optics
d1 = Drift(l=0.43065, eid='d1')
d2 = Drift(l=0.55565, eid='d2')
l = 0.1137
k = 2*0.1137/l
qd = Quadrupole(l=l/2, k1=-k, tilt=0.0)
qf = Quadrupole(l=l, k1=k, tilt=0.0)
K = 3
u40 = Undulator(lperiod=0.04, nperiods=125, Kx=K, Ky=0.0)
m1 = Marker()
m2 = Marker()
fodo_cell = [m1, qd, d1, u40, d2, qf, d1, u40, d2, qd, m2]
fodo_lat = MagneticLattice(fodo_cell)
tws = twiss(fodo_lat, nPoints=1000)
plot_opt_func(fodo_lat, tws, top_plot=["mux", "muy"], legend=False)
plt.show()
def beta(k):
qd.k1 = -k
qf.k1 = k
fodo_lat.update_transfer_maps()
tws = twiss(fodo_lat, nPoints=1000)
bx = np.array([tw.beta_x for tw in tws])
s = np.array([tw.s for tw in tws])
bx_av = simps(bx, s)/fodo_lat.totalLen
phi = tws[-1].mux
L = fodo_lat.totalLen
bx_av_th = L/6 * (5 + np.cos(phi))/np.sin(phi)
return bx_av, bx.max(), bx.min(), tws[-1].mux*180/np.pi, bx_av_th
k = np.arange(0.3, 2.9, 0.05)
Bx_min = []
Bx_max = []
Bx_av = []
Bx_av_theory = []
Phi = []
for k1 in k:
bx_av, bx_max, bx_min, phi, bx_av_th = beta(k1)
Bx_min.append(bx_min)
Bx_max.append(bx_max)
Bx_av.append(bx_av)
Phi.append(phi)
Bx_av_theory.append(bx_av_th)
fig, ax1 = plt.subplots()
plt.title(r"$\beta$ - average and max against quad strength")
ax1.plot(k, Bx_av, label=r"$\beta_{av}$")
ax1.plot(k, Bx_av_theory,"g--", label=r"$\beta_{av}^{theory}$")
ax1.plot(k, Bx_max, label=r"$\beta_{max}$")
ax1.set_ylabel(r"$\beta$, m")
ax1.set_xlabel("k, 1/m2")
plt.legend()
print("min(beta_av) = ", np.min(Bx_av), "m" )
ax2 = ax1.twinx()
ax2.plot(k, Phi, "r", label=r"$\phi_{cell}$")
ax2.set_ylabel(r"$\phi_{cell}$", color='r')
ax2.tick_params('y', colors='r')
plt.legend()
plt.grid(False)
plt.show()
from scipy.optimize import root
def fodo_estimator(beta_av, Lcell, lq=0.1137):
fun = lambda phi: Lcell/6 * (5 + np.cos(phi))/np.sin(phi) - beta_av
res = root(fun, 0.1)
phi = res.x[0]
b_av = Lcell/6 * (5 + np.cos(phi))/np.sin(phi)
f = Lcell/(4*np.sin(phi/2))
kq = 1/f/lq
bmax = (1 + np.sin(phi/2))/np.sin(phi)*Lcell
bmin = (1 - np.sin(phi/2))/np.sin(phi)*Lcell
return kq, phi, bmin, bmax, b_av
kq, phi, bmin, bmax, b_av = fodo_estimator(beta_av=11, Lcell=12.2)
print("k1 = ", kq, "1/m^2")
print("bmin / bmax = ", bmin, "/", bmax, "m")
print("calculated beta_av = ", b_av, "m")
print("phi = ", phi*180/np.pi, "deg")
def fodo_correction_SASE1(beta_av=60, K=3):
kq, phi, bmin, bmax, b_av = fodo_estimator(beta_av=beta_av, Lcell=12.2)
print("Estimation: beta_x = ", np.round(bmin,3), "; beta_y = ", np.round(bmax, 3),
"; phi = ", np.round(phi*180/np.pi), "grad"
"; kf/kd = ", np.round(kq, 4),"/", np.round(-kq, 4))
# SASE1 FODO cell
d1 = Drift(l=0.43065, eid='d1')
d2 = Drift(l=0.55565, eid='d2')
qd = Quadrupole(l=0.1137/2, k1=-kq, tilt=0.0)
qf = Quadrupole(l=0.1137, k1=kq, tilt=0.0)
u40 = Undulator(lperiod=0.04, nperiods=125, Kx=K, Ky=0.0)
m1 = Marker()
m2 = Marker()
fodo_cell = [m1, qd, d1, u40, d2, qf, d1, u40, d2, qd, m2]
# constraints
constr = {fodo_cell[-1]: {'mux':phi, "muy":phi}, "periodic": True}
# variables
vars = [qf, qd]
tws = Twiss()
tws.beta_x = bmin
tws.beta_y = bmax
tws.E = 14
res = match(MagneticLattice(fodo_cell), constr=constr,tw=tws,
vars=vars, max_iter=2000, verbose=False)
kf, kd = res
qf.k1 = kf
qd.k1 = kd
fodo_lat = MagneticLattice(fodo_cell)
tws = twiss(fodo_lat, tws0=tws, nPoints=1000)
print("Correction: beta_x = ", np.round(tws[0].beta_x, 3),
"beta_y = ", np.round(tws[0].beta_y, 3),
" kf/kd = ", np.round(kf,4), "/", np.round(kd,4))
#plot_opt_func(fodo_lat, tws, top_plot=["mux", "muy"], legend=False)
#plt.show()
return kf, kd, bmin, bmax
import sase1_lattice as sase1
lat = MagneticLattice(sase1.cell)#, stop=sase1.fodo_match)
print(sase1.tws)
#sase1.tws.beta_x = 15
#sase1.tws.beta_y = 50
tws = twiss(lat, tws0=sase1.tws)
plot_opt_func(lat, tws, legend=False)
plt.show()
kf, kd, bmin, bmax = fodo_correction_SASE1(beta_av=60, K=3)
# constraints
constr = {sase1.fodo_match: {'beta_x':bmax, "beta_y":bmin,
"alpha_x": 0, "alpha_y": 0}}
quad_limits = {sase1.qf_2177_t2: [-0.654, 0],
sase1.qf_2192_t2: [0 ,0.654],
sase1.qf_2207_t2: [-0.654, 0],
sase1.qf_2218_t2: [0 ,0.654],
sase1.qa_2229_t2: [-1.94, 0],
sase1.qa_2235_t2: [0, 1.94]}
# variables
vars = [sase1.qf_2177_t2, sase1.qf_2192_t2,
sase1.qf_2207_t2,
sase1.qf_2218_t2, sase1.qa_2229_t2, sase1.qa_2235_t2]
# because of variables redundancy, we can help a bit to find a more elegant solution
# *** comment lines or change inital conditions if you want to play with matching
sase1.qf_2177_t2.k1 = -0.2244
sase1.qf_2192_t2.k1 = 0.2309
sase1.qf_2207_t2.k1 = -0.1911
sase1.qf_2218_t2.k1 = 0.1653
sase1.qa_2229_t2.k1 = -0.0881
sase1.qa_2235_t2.k1 = 0.0953
# *** comment lines or change inital conditions if you want to play with matching
lat.update_transfer_maps()
res = match(lat, constr=constr, vars=vars, tw=sase1.tws, max_iter=2000, verbose=False)
sase1.qa_2241_sa1.k1 = kd
sase1.qa_2247_sa1.k1 = kf
sase1.qa_2247_sa1_h.k1 = kf
lat.update_transfer_maps()
tws = twiss(lat, tws0=sase1.tws)
plot_opt_func(lat, tws, legend=False)
plt.show()
# check quads limits
for q, k in zip(vars, res):
print(q.id + ".k1 = "+ str(np.round(k,4)) + "; strength is OK :",
quad_limits[q][0]<k<quad_limits[q][1])
kf, kd, bmin, bmax = fodo_correction_SASE1(beta_av=11, K=3)
# constraints
constr = {sase1.fodo_match: {'beta_x':bmax, "beta_y":bmin,
"alpha_x": 0, "alpha_y": 0}}
# variables
vars = [sase1.qf_2177_t2, sase1.qf_2192_t2, sase1.qf_2207_t2,
sase1.qf_2218_t2, sase1.qa_2229_t2, sase1.qa_2235_t2]
# because of variables redundancy, we can help a bit to find a more elegant solution
# *** comment lines or change inital conditions if you want to play with matching
sase1.qf_2177_t2.k1 = -0.2227
sase1.qf_2192_t2.k1 = 0.211
sase1.qf_2207_t2.k1 = -0.2176
sase1.qf_2218_t2.k1 = 0.2392
sase1.qa_2229_t2.k1 = -0.9728
sase1.qa_2235_t2.k1 = 1.3215
# *** comment lines or change inital conditions if you want to play with matching
lat.update_transfer_maps()
res = match(lat, constr=constr, vars=vars, tw=sase1.tws, max_iter=2000, verbose=False)
sase1.qa_2241_sa1.k1 = kd
sase1.qa_2247_sa1.k1 = kf
sase1.qa_2247_sa1_h.k1 = kf
lat.update_transfer_maps()
tws = twiss(lat, tws0=sase1.tws)
plot_opt_func(lat, tws, legend=False)
plt.show()
# check quads limits
for q, k in zip(vars, res):
print(q.id + ".k1 = "+ str(np.round(k,4)) + "; strength is OK :",
quad_limits[q][0]<k<quad_limits[q][1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create count vectorizer
Step2: Create y
Step3: Train multi-class multi-label model
Step4: Predict the testing dataset
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import os
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import r2_score, roc_auc_score
from sklearn.model_selection import train_test_split
dataTraining = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/dataTraining.zip', encoding='UTF-8', index_col=0)
dataTesting = pd.read_csv('https://github.com/albahnsen/PracticalMachineLearningClass/raw/master/datasets/dataTesting.zip', encoding='UTF-8', index_col=0)
dataTraining.head()
dataTesting.head()
vect = CountVectorizer(max_features=1000)
X_dtm = vect.fit_transform(dataTraining['plot'])
X_dtm.shape
print(vect.get_feature_names()[:50])
dataTraining['genres'] = dataTraining['genres'].map(lambda x: eval(x))
le = MultiLabelBinarizer()
y_genres = le.fit_transform(dataTraining['genres'])
y_genres
X_train, X_test, y_train_genres, y_test_genres = train_test_split(X_dtm, y_genres, test_size=0.33, random_state=42)
clf = OneVsRestClassifier(RandomForestClassifier(n_jobs=-1, n_estimators=100, max_depth=10, random_state=42))
clf.fit(X_train, y_train_genres)
y_pred_genres = clf.predict_proba(X_test)
roc_auc_score(y_test_genres, y_pred_genres, average='macro')
X_test_dtm = vect.transform(dataTesting['plot'])
cols = ['p_Action', 'p_Adventure', 'p_Animation', 'p_Biography', 'p_Comedy', 'p_Crime', 'p_Documentary', 'p_Drama', 'p_Family',
'p_Fantasy', 'p_Film-Noir', 'p_History', 'p_Horror', 'p_Music', 'p_Musical', 'p_Mystery', 'p_News', 'p_Romance',
'p_Sci-Fi', 'p_Short', 'p_Sport', 'p_Thriller', 'p_War', 'p_Western']
y_pred_test_genres = clf.predict_proba(X_test_dtm)
res = pd.DataFrame(y_pred_test_genres, index=dataTesting.index, columns=cols)
res.head()
res.to_csv('pred_genres_text_RF.csv', index_label='ID')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following figure shows the behaviour of the chosen function and demonstrates how the $npf$ schedule changes depending on the supplied parameters.
Step2: Say we need to give the parametric model more time to adjust in order to produce an overall better outcome at the expense of a longer fitting time. Then, we want the update of the non-parametric function to be delayed and over a longer period. We might do the following, perhaps also increasing $n$ to 4 or 5.
|
<ASSISTANT_TASK:>
Python Code:
# set up plotting
%matplotlib inline
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = (8.0, 5.0)
plt.rcParams['font.family'] = 'serif'
plt.rcParams['mathtext.fontset'] = 'dejavuserif'
plt.rcParams['font.size'] = 16
def npf(i, a, b, c, d):
return a * b**c / (b**c + np.abs(i-d)**c)
i = np.arange(200)
fig, axarr = plt.subplots(2, 2, sharex=True, sharey=True)
axarr[0,0].plot(i, npf(i, 0.70, 25.0, 4.0, 40.0), label='a')
axarr[0,1].plot(i, npf(i, 0.75, 35.0, 4.0, 40.0), label='b')
axarr[1,0].plot(i, npf(i, 0.75, 25.0, 8.0, 40.0), label='c')
axarr[1,1].plot(i, npf(i, 0.75, 25.0, 4.0, 60.0), label='d')
for ax in axarr.flat:
ax.plot(i, npf(i, 0.75, 25.0, 4.0, 40.0), label='0')
ax.legend()
for ax in axarr[:,0]:
ax.set_ylabel('$npf$')
for ax in axarr[1]:
ax.set_xlabel('$i$')
plt.tight_layout()
plt.plot(i, npf(i, 0.75, 40.0, 6.0, 80.0), label='new')
plt.plot(i, npf(i, 0.75, 25.0, 4.0, 40.0), label='default')
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To manipulate the data we will use the DataFrame object from pandas library. A DataFrame represents a tabular, spreadsheet-like data structure containing an ordered collection of columns, each of which can be a different value type (numeric,
Step2: Starting the exploration.
Step3: As we can see the response percent is only 5.08. Sometimes, the data are easy to understand if they are presented graphically. For example, we can created a pie chart for visualize this data with the following code.
Step4: Here is clear the small portion of donors.
Step6: This graphic is not clear because it has too many amounts; so we could make a segmentation; for this we will create a custom function in order to segment the amounts into categories.
Step7: Now the plot is more clear. We can see that the major number of donations are for a small amount, less than $30.
Step8: One of the most useful graphics in descriptive statitics is the boxplot. the Boxplot is a convenient way of graphically depicting groups of numerical data through their quartiles. Box plots may also have lines extending vertically from the boxes indicating variability outside the upper and lower quartiles. Outliers are plotted as individual points in this graphs.
Step9: Here we can see that donation amounts of 200 and 150 are outliers; the main distribution of donation amounts is between 0 and 50, with an average of 15.
Step10: After our analysis we can see that the profit after mailing every donor in the dataset will be 2004.53, with an average donation amount of 0.79 and a average porfit of 0.04; not quite good numbers. We will try to improve this profits with our analysis.
Step11: Here we see that the describe method give us useful information that we can use to get some insights and could help us to filter the dataset.
Step12: After taking a look to the dataset, we are ready to select only some columns. This way the amount of data we have to manage is reduced and our exploration functions and algorithms run faster.
Step13: Here we can see that there are some values of the HIT variable that are separate from the majority of HIT distribution.
Step14: This plot shows that people aged from 30 to 60 are of higher median amount donation than others.
Step15: Here we confirmed the same observations.
Step16: Here we can see that the join and the male are the genders with the higher media amount of donations.
Step17: in this plot we can see the proportion between Males and Females. Females is a larger group of donors.
Step18: Here we see that most donations came from CA and FL states and the media donation amount of this states is greater than the others states.
Step19: Here we see the fields with the best correlation to the donor amount.
Step20: Calculating donation probability
Step21: Here we can see that the donation probability is better when the previous donation amount decrease. We can conclude that there is a inverse correlation between the donation amounts and the probability of donation.
Step22: Here we can see thath only 64 donors accounts for the 8% of the total donation amount.
Step23: Here we can see thath only 136 donors account for the 13% of the total donation amount.
Step24: only 54 donors in common between the two segments
Step25: people who have donated over $3.50 in the 96NK campaign have a higher probability of donating than the average.
Step26: 6% of total donation came from families with an average income greater than 80.000 a year.
Step27: 26% of total donation came from families with an average home value greater than 160.000.
Step29: 24% of total donation came from families with an % of adults +25 with graduate degree greater than 12
Step31: Then we build another function to test our single model.
Step32: Applying this single model to the LEARNING dataset we can see a profit improvement of 51.11%; in our model we only need to mail 17,561 customers from the dataset to obtain a mean donation of 16.43 and a total profit of 3,020.03
Step34: In order to test our model in the validation dataset; we need to build a custom function that predict the donation amounts for the validation dataset from the learning dataset.
Step35: Applying our single model to the VALIDATION dataset we can see a profit improvement of 52.39%; mailing 17,700 customers with a mean donation of 16.43 and a total profit of 3,053.01.
Step36: Here we can see really impresive results, our model can predict very well the donation amounts.
Step37: Now, we could apply our new prediction model to the validation dataset.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd # importing pandas
import numpy as np # importing numpy
from pandas import DataFrame, Series # importing DataFrame and Series objects from pandas
import matplotlib.pyplot as plt # importing matplotlib for plotting.
from sklearn.ensemble import RandomForestRegressor # importing RandomForest; maching learning algorithm for classification.
from IPython.display import Image, HTML, display # IPython rich display Image.
# Ignoring deprecation warning messages.
import warnings
warnings.filterwarnings('ignore')
# importing the R language iPython integration. Rmagic.
%load_ext rmagic
# Creating the NGOData DataFrame from the LEARNING dataset.
NGOData = pd.read_csv('/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/LEARNING.csv',
header=0)
# Creating the donors subset from the NGOData.
NGOData_donors = NGOData[NGOData.DONOR_AMOUNT > 0]
round((NGOData[NGOData.DONOR_AMOUNT > 0]['DONOR_AMOUNT'].count() * 1.0 / NGOData['DONOR_AMOUNT'].count()) * 100.0, 2)
# percent of donors from the dataset.
donors = NGOData.groupby('DONOR_FLAG').IDX.count() # Grouping by DONOR_FLAG
# Creating the chart labels.
labels = [ 'Donors\n' + str(round(x * 1.0 / donors.sum() * 100.0, 2)) + '%' for x in donors ]
labels[0] = 'No ' + labels[0]
# Plotting the results using matplotlib.
fig = plt.figure()
p1 = fig.add_subplot(1,1,1)
p1.pie(donors, labels=labels)
p1.set_title('Portion of Donors')
plot = fig.show()
donors_amounts = NGOData_donors.groupby('DONOR_AMOUNT').size() # Grouping by DONOR_FLAG
# Plotting the grouped amounts.
plot = donors_amounts.plot(kind='bar', title='Donation amounts')
def segment_amounts(serie):
This function return a pandas Series object with the values segemented into categories
# Create a Serie, with our segments as index.
result = Series(index=['0-10', '10-20', '20-30', '30-40', '40-50', '50-60', '60-100', '100-200']).fillna(0)
# Segmenting the amounts into the new category indexes.
for index, amount in serie.iteritems():
if index < 10.1:
result['0-10'] += amount
elif index < 20.1:
result['10-20'] += amount
elif index < 30.1:
result['20-30'] += amount
elif index < 40.1:
result['30-40'] += amount
elif index < 50.1:
result['40-50'] += amount
elif index < 60.1:
result['50-60'] += amount
elif index < 100.1:
result['60-100'] += amount
else:
result['100-200'] += amount
return result
# Calling our segmentation function.
donors_amounts1 = segment_amounts(donors_amounts)
donors_amounts1.index.name='Donation amount' # Naming the index.
# Plotting semented results.
plot = donors_amounts1.plot(kind='bar', title='Donors amounts')
# using pandas cut function to segment the Serie.
bb = pd.cut(NGOData_donors['DONOR_AMOUNT'], [0, 10, 20, 30, 40, 50, 60, 100, 200])
# Plotting the results using pandas value_counts function.
plot = pd.value_counts(bb).plot(kind='bar', title='Donation amounts')
# R programming language is better for boxplot graph, so we will use Rmagic to made a donation amount boxplot using R.
# Passing python DataFrame to R.
%R -i NGOData_donors
# R boxplot of donation amounts.
%R donation <- NGOData_donors$DONOR_AMOUNT
plot = %R boxplot(donation)
cost = 0.75 # the cost by donor mailed.
# Calculating the profit of mailing every donor in the data set.
total_cost_all = cost * NGOData['DONOR_AMOUNT'].count()
total_donations_all = NGOData['DONOR_AMOUNT'].sum()
total_profits_all = round(total_donations_all - total_cost_all, 2)
total_profits_all
# Average donation all dataset.
round(NGOData['DONOR_AMOUNT'].mean(), 2)
# Average donation only donators.
round(NGOData_donors['DONOR_AMOUNT'].mean(), 2)
# Average Profit
round((NGOData_donors['DONOR_AMOUNT'].sum() - \
cost * NGOData['DONOR_AMOUNT'].count()) / NGOData['DONOR_AMOUNT'].count(), 2)
# useful describe statistics on the data.
describe = NGOData.describe()
# Collection of numeric columns.
numeric_columns = list(describe.columns)
# Content of describe DataFrame for DONOR_AMOUNT column.
describe['DONOR_AMOUNT']
describe.to_csv('/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/NGODescribe.csv')
# Correlation DataFrame on Excel.
Image(filename='/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/descr_excel.png')
columns = [
# demographics
"ODATEDW", "OSOURCE", "STATE", "EC8", "PVASTATE", "DOB", "RECINHSE",
"MDMAUD", "DOMAIN", "CLUSTER", "AGE", "HV2", "CHILD03", "CHILD07","IC4",
"CHILD12", "CHILD18", "NUMCHLD", "INCOME", "GENDER", "WEALTH1", "HIT",
# donor interests
"COLLECT1", "VETERANS", "BIBLE", "CATLG", "HOMEE", "PETS", "CDPLAY",
"STEREO", "PCOWNERS", "PHOTO", "CRAFTS", "FISHER", "GARDENIN", "BOATS",
"WALKER", "KIDSTUFF", "CARDS", "PLATES",
# PEP star RFA status
"PEPSTRFL",
# summary variables of promotion history
"CARDPROM", "MAXADATE", "NUMPROM", "CARDPM12", "NUMPRM12",
# summary variables of donation history
"RAMNTALL", "NGIFTALL", "CARDGIFT", "MINRAMNT", "MAXRAMNT", "LASTGIFT",
"LASTDATE", "FISTDATE", "TIMELAG", "AVGGIFT","RAMNT_3",
# ID & donor variables.
"IDX", "DONOR_FLAG", "DONOR_AMOUNT",
# RFA (Recency/Frequency/Donation Amount)
"RFA_2F", "RFA_2A", "MDMAUD_R", "MDMAUD_F", "MDMAUD_A",
#others
"CLUSTER2", "GEOCODE2"]
# Creating a new DataFrame with the columns subset.
new_NGOData = NGOData[columns]
# Analysis of Age distribution.
plot = new_NGOData['AGE'].hist().set_title('Age distribution')
# Analysis of Number of childs.
plot = new_NGOData['NUMCHLD'].hist().set_title('number of childs distribution')
# exploring the HIT value. The number of responses of a donor.
plot = boxplot(new_NGOData['HIT'])
plot = boxplot(new_NGOData[new_NGOData.HIT < 200]['HIT'])
# Creating a new DataFrame of NGOData_donors with the columns subset.
new_NGOData_donors = NGOData_donors[columns]
AGE2 = pd.cut(new_NGOData_donors['AGE'], range(0, 100, 5))
plot = pd.value_counts(AGE2).plot(kind='bar', title='Donations amounts by age')
# Adding the AGE2 segment column to our DataFrame.
new_NGOData_donors['AGE2'] = AGE2
# Exploring the donors amounts by age.
plot = new_NGOData_donors[['DONOR_AMOUNT', 'AGE2']].boxplot(by='AGE2')
plot = new_NGOData_donors[new_NGOData_donors.DONOR_AMOUNT < 41][['DONOR_AMOUNT', 'AGE2']].boxplot(by='AGE2')
# Exploring the donors amounts by gender.
plot = new_NGOData_donors[new_NGOData_donors.DONOR_AMOUNT <= 80][['DONOR_AMOUNT', 'GENDER']].boxplot(by='GENDER')
plot = new_NGOData_donors.groupby('GENDER').size().plot(kind='bar').set_title('Gender distribution')
# Listing the state ranking.
states = new_NGOData_donors.groupby('STATE').size()
states.sort(ascending=False)
states[:5] # top 5 states.
# Exploring the donors amounts by States.
plot = new_NGOData_donors[new_NGOData_donors.STATE.isin(['CA', 'FL', 'TX', 'MI', 'IL', 'NC', 'WA'])] \
[['DONOR_AMOUNT', 'STATE']].boxplot(by='STATE')
# numeric columns.
ix_numeric = list(NGOData.describe().columns)
# creating a correlation Serie.
correlation = NGOData[ix_numeric].corrwith(new_NGOData['DONOR_AMOUNT'])
# Sorting the correlation Serie.
correlation = abs(correlation)
correlation.sort(ascending=False)
correlation[:30]
# Correlation between all columns.
corr_all = NGOData[ix_numeric].corr()
corr_all[ix_numeric[:5]][:5]
# Export the correlation DataFrame to csv.
corr_all.to_csv('/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/corr_all.csv')
correlation.to_csv('/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/corr_amounts.csv')
# Correlation DataFrame on Excel.
Image(filename='/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/corr_excel.png')
#Calculating overall donation probability.
average_prob = round((NGOData[NGOData.DONOR_AMOUNT > 0]['DONOR_AMOUNT'].count() * 1.0 \
/ NGOData['DONOR_AMOUNT'].count()) * 100.0, 2)
average_prob
#Calculating donation probability for donors with a lastgift less or equal to 10.
a = round((NGOData[(NGOData.DONOR_AMOUNT > 0) & (NGOData.LASTGIFT <= 10)]['DONOR_AMOUNT'].count() * 1.0 \
/ NGOData[NGOData.LASTGIFT <= 10]['DONOR_AMOUNT'].count()) * 100.0, 2)
a
# Plotting the comparison.
lastgift = Series({'average': average_prob, 'lastgift<=10': a})
plot=lastgift.plot(kind='barh', color=['blue', 'green']).set_title('Donation probabiliy')
# Average donation.
average_donation = round(NGOData_donors['DONOR_AMOUNT'].mean(), 2)
average_donation
# Average donation lastgift <= 10
a = round(NGOData_donors[NGOData_donors.LASTGIFT <= 10]['DONOR_AMOUNT'].mean(), 2)
a
# Plotting the comparison.
lastgift = Series({'average': average_donation, 'lastgift<=10': a})
plot = lastgift.plot(kind='barh', color=['blue', 'green']).set_title('Average gross donations')
#Calculating donation probability for donors with a lastgift greater than 35.
a = round((NGOData[(NGOData.DONOR_AMOUNT > 0) & (NGOData.LASTGIFT >35)]['DONOR_AMOUNT'].count() * 1.0 \
/ NGOData[NGOData.LASTGIFT > 35]['DONOR_AMOUNT'].count()) * 100.0, 2)
a
# Plotting the comparison.
lastgift = Series({'average': average_prob, 'lastgift>35': a})
plot=lastgift.plot(kind='barh', color=['blue', 'green']).set_title('Donation probabiliy lastgift >35')
# Average donation lastgift > 35
a = round(NGOData_donors[NGOData_donors.LASTGIFT > 35]['DONOR_AMOUNT'].mean(), 2)
a
# Plotting the comparison.
lastgift = Series({'average': average_donation, 'lastgift>35': a})
plot = lastgift.plot(kind='barh', color=['blue', 'green']).set_title('Average gross donations lastgift > 35')
# Total donation learning data set.
total_donation = round(NGOData_donors['DONOR_AMOUNT'].sum(), 2)
total_donation
# donation amount for donors with lastgift > 35
a = round(NGOData_donors[NGOData_donors.LASTGIFT > 35]['DONOR_AMOUNT'].sum(), 2)
a
# Donors with higher average donation.
b = round(NGOData_donors[NGOData_donors.LASTGIFT > 35]['DONOR_AMOUNT'].count(), 2)
b
# percentage of total donation.
round(a / total_donation * 100, 4)
# donation amount for donors with max donation over $30
a = round(NGOData_donors[NGOData_donors.MAXRAMNT > 30]['DONOR_AMOUNT'].sum(), 2)
a
# Donors with max donation over $30
b = round(NGOData_donors[NGOData_donors.MAXRAMNT > 30]['DONOR_AMOUNT'].count(), 2)
b
# percentage of total donation.
round(a / total_donation * 100, 4)
# donation amount for donors with total past donations greater than $250
a = round(NGOData_donors[NGOData_donors.RAMNTALL > 250]['DONOR_AMOUNT'].sum(), 2)
a
# Donors with total past donations greater than $250
b = round(NGOData_donors[NGOData_donors.RAMNTALL > 250]['DONOR_AMOUNT'].count(), 2)
b
# percentage of total donation.
round(a / total_donation * 100, 4)
# overlap between the two previous segments
b = round(NGOData_donors[(NGOData_donors.RAMNTALL > 250) & (NGOData_donors.MAXRAMNT >30) ]\
['DONOR_AMOUNT'].count(), 2)
b
#Calculating donation probability for donors who have donated in the 96NK campaign.
a = round((NGOData[(NGOData.DONOR_AMOUNT > 0) & (NGOData.RAMNT_3 > 3.5)]['DONOR_AMOUNT'].count() * 1.0 \
/ NGOData[NGOData.RAMNT_3 > 3.5]['DONOR_AMOUNT'].count()) * 100.0, 2)
a
# Average donation for donors who have donated in the 96NK campaign.
b = round(NGOData_donors[NGOData_donors.RAMNT_3 > 3.5]['DONOR_AMOUNT'].mean(), 2)
b
# Plotting the comparison.
comp = Series({'average': average_prob, '96NK campaign': a})
plot=comp.plot(kind='barh', color=['blue', 'green']).set_title('Donation probabiliy 96NK campaign')
# Plotting the comparison.
comp = Series({'average': average_donation, '96NK campaign': b})
plot = comp.plot(kind='barh', color=['blue', 'green']).set_title('Average gross donations 96NK campaign')
# IC4 Average family income in hundreds
IC4 = round(NGOData_donors['IC4'].mean(), 2)
IC4
#Calculating donation probability for IC4
a = round((NGOData[(NGOData.DONOR_AMOUNT > 0) & (NGOData.IC4 > 800)]['IC4'].count() * 1.0 \
/ NGOData[NGOData.IC4 > 800]['DONOR_AMOUNT'].count()) * 100.0, 2)
a
# Average donation for IC4
b = round(NGOData_donors[NGOData_donors.IC4 > 800]['DONOR_AMOUNT'].mean(), 2)
b
# Plotting the comparison.
comp = Series({'average': average_prob, 'family income': a})
plot=comp.plot(kind='barh', color=['blue', 'green']).set_title('Donation probabiliy by family income')
# Plotting the comparison.
comp = Series({'average': average_donation, 'family income': b})
plot = comp.plot(kind='barh', color=['blue', 'green']).set_title('average donation by family income')
a = round(NGOData_donors[NGOData_donors.IC4 > 800]['DONOR_AMOUNT'].count())
a
b = round(NGOData_donors[NGOData_donors.IC4 > 800]['DONOR_AMOUNT'].sum())
b
# percentage of total donation.
round(b / total_donation * 100, 4)
#Calculating donation probability for HV2
a = round((NGOData[(NGOData.DONOR_AMOUNT > 0) & (NGOData.HV2 > 1600)]['HV2'].count() * 1.0 \
/ NGOData[NGOData.HV2 > 1600]['DONOR_AMOUNT'].count()) * 100.0, 2)
a
# Average donation for HV2
b = round(NGOData_donors[NGOData_donors.HV2 > 1600]['DONOR_AMOUNT'].mean(), 2)
b
# Plotting the comparison.
comp = Series({'average': average_prob, 'average home value': a})
plot=comp.plot(kind='barh', color=['blue', 'green']).set_title('Donation probabiliy by average home value')
# Plotting the comparison.
comp = Series({'average': average_donation, 'average home value': b})
plot = comp.plot(kind='barh', color=['blue', 'green']).set_title('average donation by average home value')
b = round(NGOData_donors[NGOData_donors.HV2 > 1600]['DONOR_AMOUNT'].sum())
b
a = round(NGOData_donors[NGOData_donors.HV2 > 1600]['DONOR_AMOUNT'].count())
a
# percentage of total donation.
round(b / total_donation * 100, 4)
#Calculating donation probability for EC8
a = round((NGOData[(NGOData.DONOR_AMOUNT > 0) & (NGOData.EC8 > 12)]['EC8'].count() * 1.0 \
/ NGOData[NGOData.EC8 > 12]['DONOR_AMOUNT'].count()) * 100.0, 2)
a
# Average donation for EC8
b = round(NGOData_donors[NGOData_donors.EC8 > 12]['DONOR_AMOUNT'].mean(), 2)
b
# Plotting the comparison.
comp = Series({'average': average_prob, '% adults + 25 with a graduate degree ': a})
plot=comp.plot(kind='barh', color=['blue', 'green']).set_title('Donation probabiliy \
by % adults + 25 with a graduate degree')
# Plotting the comparison.
comp = Series({'average': average_donation, '% adults + 25 with a graduate degree ': b})
plot=comp.plot(kind='barh', color=['blue', 'green']).set_title('Average Donation by \
% adults + 25 with a graduate degree')
# Number of donors with a EC8 greater than 12.
a = round(NGOData_donors[NGOData_donors.EC8 > 12]['DONOR_AMOUNT'].count())
a
# total donation of donors with a EC8 greater than 12.
b = round(NGOData_donors[NGOData_donors.EC8 > 12]['DONOR_AMOUNT'].sum())
b
# percentage of total donation.
round(b / total_donation * 100, 2)
def apply_model(df):
This function applies our model sampling to a dataset.
Criteria:
1. MAXRAMNT > 30
2. RAMNTALL > 250
3. HV2 > 1600 and AGE between 30 and 60.
4. EC8 > 12
5. IC4 > 800
6. RAMNT_3 > 3.5
7. STATE in ('CA', 'FL', 'MI')
This function will return a python set object with the
list of IDXs that are selected by our model criteria selection.
#Building the model sample.
# Segments samples.
sample7 = df[df.STATE.isin(['CA', 'FL', 'MI'])]['IDX']
sample3 = df[(df.HV2 > 1600) & (df.AGE >=30)& (df.AGE >=60)]['IDX']
sample4 = df[df.EC8 > 12]['IDX']
sample1 = df[df.MAXRAMNT > 30]['IDX']
sample2 = df[df.RAMNTALL > 250]['IDX']
sample5 = df[df.IC4 > 800]['IDX']
sample6 = df[df.RAMNT_3 > 3.5]['IDX']
# depurating the model sample.
sample = set(sample7.values)
# using sets difference propierty to depurate the sample.
sample = sample ^ set(sample3.values)
sample = sample ^ set(sample4.values)
sample = sample ^ set(sample1.values)
sample = sample ^ set(sample2.values)
sample = sample ^ set(sample5.values)
sample = sample ^ set(sample6.values)
return sample
# Building our simple model.
def single_model(df, cost):
This function apply the simple model to a DataFrame.
The model is builded under the following segments:
1. MAXRAMNT > 30
2. RAMNTALL > 250
3. HV2 > 1600 and AGE between 30 and 60.
4. EC8 > 12
5. IC4 > 800
6. RAMNT_3 > 3.5
7. STATE in ('CA', 'FL', 'MI')
Parameters:
* df : DataFrame to apply the model
* cost: Cost per piece mailed.
print the dataset and model information
plot the comparison between the given dataset and the model.
Returns the DataFrame with the model subselection.
# copy the Dataframe to a new object.
df1 = df
#Calculating profits for all DataFrame.
total_donations_all = round(df['DONOR_AMOUNT'].sum(), 2)
total_cost_all = round(cost * df['DONOR_AMOUNT'].count(), 2)
total_profits_all = total_donations_all - total_cost_all
mean_donation_all = df[df.DONOR_FLAG == 1]['DONOR_AMOUNT'].mean()
donation_prob_all = round((df[df.DONOR_FLAG == 1]['DONOR_AMOUNT'].count() * 1.0 \
/ df['DONOR_AMOUNT'].count()) * 100.0, 2)
#Building the model sample with our apply_sample function.
sample = apply_model(df)
sample_all = list(sample) # sample size.
# Applying our sample to the new dataframe.
df1 = df1[df1.IDX.isin(sample_all)]
# Calculating contribution profits of model
total_donations = round(df1['DONOR_AMOUNT'].sum(), 2)
total_cost = round(cost * len(sample_all), 2)
model_profits = total_donations - total_cost
profit_improvement = round(((model_profits - total_profits_all) / total_profits_all) * 100, 2)
mean_donation = df1[NGOData.DONOR_FLAG == 1]['DONOR_AMOUNT'].mean()
donation_prob = (float(df1[NGOData.DONOR_FLAG == 1]['DONOR_AMOUNT'].count()) \
/ float(len(sample))) * 100
donors_percent = (len(sample) * 1.0 /df['IDX'].count()) * 100.0
# Printing the results
# Printing all df values.
print 'Original dataset values:\n'
print 'All dataset size: %d' % df['IDX'].count()
print 'All dataset donation prob.: %.2f%%' % donation_prob_all
print 'All dataset donations: $%.2f' % total_donations_all
print 'All dataset cost: $%.2f' % total_cost_all
print 'All dataset profits: $%.2f' % total_profits_all
print 'All dataset mean donation: $%.2f' % mean_donation_all
print '\n'
# Printing model values.
print 'Model values:\n'
print 'Model sample size: %d' % len(sample)
print 'Model sample donation prob.: %.2f%%' % donation_prob
print 'Model total donations: $%.2f' % total_donations
print 'Model total cost: $%.2f' % total_cost
print 'Model total profits: $%.2f' % model_profits
print 'Model mean donation: $%.2f' % mean_donation
print 'Model profit improvement: %.2f %%' % profit_improvement
print 'Model donors mailed percent: %.2f %%' % donors_percent
# Plotting the comparison.
# Average donation
comp = Series({'All dataset average donation': mean_donation_all, 'Model average donation': mean_donation})
comp2 = Series({'All dataset donation prob.': donation_prob_all, 'Model donation porb.': donation_prob})
plt.figure()
comp.plot(kind='barh', color=['blue', 'green']).set_title('Average Donation all dataset vs model')
plt.figure()
comp2.plot(kind='barh', color=['blue', 'green']).set_title('Donation probability all dataset vs model')
return df1
# Applying the simple model to the NGO dataset.
x = single_model(NGOData, cost)
# Creating the NGOvalidation DataFrame from the VALIDATION dataset.
NGOvalidation = pd.read_csv('/home/raul/Ga_Tech/gA Tech Contest 2013 - Challenge 02 - Datasets/VALIDATION.txt',
header=0)
def single_model_val(dfl, dfv, cost):
This function apply the simple model to a DataFrame.
The model is builded under the following segments:
1. MAXRAMNT > 30
2. RAMNTALL > 250
3. HV2 > 1600 and AGE between 30 and 60.
4. EC8 > 12
5. IC4 > 800
6. RAMNT_3 > 3.5
7. STATE in ('CA', 'FL', 'MI')
Parameters:
* dfl : the learning dataset.
* dfv : the validation dataset.
* cost: Cost per piece mailed.
Prints the original dataset, the learning dataset and the validation dataset information.
Plot the comparison between the given dataset and the model.
Returns the DataFrame with the model subselection.
learn = dfl # copy the learning dataset
valid = dfv # copy the validation dataset
learn_values = apply_model(learn) # applying our model to the learning dataset
valid_values = apply_model(valid) # applying our model to the validation dataset
learn = learn[learn.IDX.isin(learn_values)] # selecting the customers
valid = valid[valid.IDX.isin(valid_values)] # selecting the customers
# Calculating variables for learning dataset
total_donations_learn = round(learn['DONOR_AMOUNT'].sum(), 2)
total_cost_learn = round(cost * len(learn_values), 2)
model_profits_learn = total_donations_learn - total_cost_learn
mean_donation_learn = learn[learn.DONOR_FLAG == 1]['DONOR_AMOUNT'].mean()
donation_prob_learn = (float(learn[learn.DONOR_FLAG == 1]['DONOR_AMOUNT'].count()) \
/ float(len(learn_values)))
#Calculating variables for all DataFrame.
mean_donation_all = dfl[dfl.DONOR_FLAG == 1]['DONOR_AMOUNT'].mean()
donation_prob_all = dfl[dfl.DONOR_FLAG == 1]['DONOR_AMOUNT'].count() * 1.0 \
/ dfl['DONOR_AMOUNT'].count()
total_donations_all = mean_donation_all * donation_prob_all * len(dfv)
total_cost_all = cost * len(dfv)
total_profits_all = total_donations_all - total_cost_all
# Calculation varaibles for validation dataset.
total_donations_valid = mean_donation_learn * donation_prob_learn * len(valid_values)
total_cost_valid = round(cost * len(valid_values), 2)
model_profits_valid = total_donations_valid - total_cost_valid
donors_percent_valid = (len(valid_values) * 1.0 /dfv['IDX'].count()) * 100.0
profit_improvement_valid = (model_profits_valid - total_profits_all) / total_profits_all
# Printing the results
# Printing all df values.
print 'Original validation dataset values:\n'
print 'All dataset size: %d' % len(dfv)
print 'All dataset donation prob.: %.2f%% (infer from learning)' % (donation_prob_all * 100)
print 'All dataset donations: $%.2f (infer from learning)' % total_donations_all
print 'All dataset cost: $%.2f' % total_cost_all
print 'All dataset profits: $%.2f' % total_profits_all
print 'All dataset mean donation: $%.2f (infer from learning)' % mean_donation_all
print '\n'
# Printing learning df values.
print 'Learning dataset values:\n'
print 'Learning dataset size: %d' % len(learn_values)
print 'Learning dataset donation prob.: %.2f%%' % (donation_prob_learn * 100)
print 'Learning dataset donations: $%.2f' % total_donations_learn
print 'Learning dataset cost: $%.2f' % total_cost_learn
print 'Learning dataset profits: $%.2f' % model_profits_learn
print 'Learning dataset mean donation: $%.2f' % mean_donation_learn
print '\n'
# Printing validation values.
print 'Validation dataset values:\n'
print 'Validation sample size: %d' % len(valid_values)
print 'Validation sample donation prob.: %.2f%%' % (donation_prob_learn * 100)
print 'Validation total donations: $%.2f' % total_donations_valid
print 'Validation total cost: $%.2f' % total_cost_valid
print 'Validation total profits: $%.2f' % model_profits_valid
print 'Validation mean donation: $%.2f' % mean_donation_learn
print '\n'
print 'Model profit improvement: %.2f %%' % (profit_improvement_valid * 100)
print 'Model donors mailed percent: %.2f %%' % donors_percent_valid
# Plotting the comparison.
# Average donation
comp = Series({'All dataset average donation': mean_donation_all, \
'Model average donation': mean_donation_learn})
comp2 = Series({'All dataset donation prob.': donation_prob_all * 100, \
'Model donation porb.': donation_prob_learn * 100.0})
plt.figure()
comp.plot(kind='barh', color=['blue', 'green']).set_title('Average Donation all dataset vs model')
plt.figure()
comp2.plot(kind='barh', color=['blue', 'green']).set_title('Donation probability all dataset vs model')
return valid
aa = single_model_val(NGOData, NGOvalidation, cost)
# Selecting the more statistically significant variables to predict the donor_amount.
columns = ['DONOR_AMOUNT', 'IDX', 'HV2', 'SOLP3', 'MAXRAMNT', 'IC4', 'EC8', 'RAMNT_3', \
'RDATE_3', 'RAMNT_21', 'RAMNTALL', 'LASTGIFT', 'RAMNT_14', 'RAMNT_22' ]
# Feature selection
features = columns[2:]
# Preparing the train dataset
train = NGOData[columns]
# Cleansing the dataset.
train = train.fillna(0)
# building our Random forest model.
clf = RandomForestRegressor(n_estimators=50, n_jobs=2)
clf.fit(train[features], train.DONOR_AMOUNT)
# Predicting the results.
preds = clf.predict(train[features])
# Testing the results of our prediction model.
# Adding the predicted column to the dataset.
train['DONOR_PRED'] = preds
# previewing the results.
aa = train [['DONOR_AMOUNT', 'DONOR_PRED']]
aa[aa.DONOR_AMOUNT > 0][:10]
# Total donations dataset.
aa['DONOR_AMOUNT'].sum()
# Total donations predicted by model
aa['DONOR_PRED'].sum()
# Value predicted but no actual donation
aa[ (aa.DONOR_AMOUNT == 0) & (aa.DONOR_PRED >0.75 )].count()
# Total Donation amount wrongly predicted
aa[ (aa.DONOR_AMOUNT == 0) & (aa.DONOR_PRED >0.75 )]['DONOR_PRED'].sum()
# mean donation wrongly predicted.
aa[ (aa.DONOR_AMOUNT == 0) & (aa.DONOR_PRED >0.75 )]['DONOR_PRED'].mean()
# Error rate.
error_rate = aa[ (aa.DONOR_AMOUNT == 0) & (aa.DONOR_PRED >0.75 )]['DONOR_PRED'].sum() \
/aa['DONOR_PRED'].sum()
# Model corrected rate.
corrected_rate = round(1.0 - error_rate, 2)
corrected_rate
# Actual donations not predicted.
aa[ (aa.DONOR_AMOUNT > 0) & (aa.DONOR_PRED ==0 )].count()
# Actual equals predicted.
aa[ (aa.DONOR_AMOUNT == aa.DONOR_PRED)].count()
columns = ['IDX', 'HV2', 'SOLP3', 'MAXRAMNT', 'IC4', 'EC8', 'RAMNT_3', \
'RDATE_3', 'RAMNT_21', 'RAMNTALL', 'LASTGIFT', 'RAMNT_14', 'RAMNT_22' ]
# subset of validation
validation = NGOvalidation[columns]
# Cleansing the dataset.
validation = validation.fillna(0)
# predicting the donation amounts.
DONOR_AMOUNT = clf.predict(validation[features])
# Adding predicted donation amounts to validations subset.
validation['DONOR_AMOUNT'] = DONOR_AMOUNT
# Selecting only the customers with a donation greater than cost.
validation_mail = validation[validation.DONOR_AMOUNT > 0.75]
# Calculating customer mailed
mailed = len(validation_mail)
print 'Customer mailed: %d' % mailed
# Calculating total cost.
total_cost = round(len(validation_mail) * 0.75, 2)
print 'Total Cost: %2.f' % total_cost
# Calculating total donation amounts.
total_donations = round(validation_mail['DONOR_AMOUNT'].sum() * corrected_rate, 2)
total_donations
# Calculating net profits
profits = total_donations - total_cost
profits
# Model profits improvement.
model_improvement = round(((profits - total_profits_all)/ total_profits_all) * 100.0, 2)
print 'Model profits improvements of %.2f%%' % model_improvement
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Segment source text<a name="SegmentSourceText"></a>
Step2: Read segments into a variable <a name="ReadSegmentsIntoVariable"></a>
Step3: Now we should have 45 strings in the variable corpus to play around with
Step4: For a quick impression, let's see the opening 500 characters of an arbitrary one of them
Step5: Tokenising <a name="Tokenising"></a>
Step6: For our examples, let's have a look at (the first 50 words of) an arbitrary one of those segments
Step7: Already, we can have a first go at finding the most frequent words for a segment. (For this we use a simple library of functions that we import by the name of 'collections'.)
Step8: Perhaps now is a good opportunity for a small excursus. What we have printed in the last code is a series of pairs
Step9: Looks better now, doesn't it?
Step10: So, we can again build a dictionary of key-value pairs associating all the lemmata ("values") with their wordforms ("keys")
Step11: Again, a quick test
Step12: Now we can use this dictionary to build a new list of words, where only lemmatised forms occur
Step13: As you can see, the original text is lost now from the data that we are currently working with (unless we add another dimension to our lemmatised variable which can keep the original word form). But let us see if something in the 10 most frequent words has changed
Step14: Yes, things have changed
Step15: Now let's try and suppress the stopwords in the segments...
Step16: With this, we can already create a first "profile" of our first 4 segments
|
<ASSISTANT_TASK:>
Python Code:
bigsourcefile = 'TextProcessing_2017/W0013.orig.txt' # This is the path to our file
input = open(bigsourcefile, encoding='utf-8').readlines() # We use a variable 'input' for
# keeping its contents.
input[:10] # Just for information,
# let's see the first 10 lines of the file.
splitLen = 80 # 80 lines per file
outputBase = 'TextProcessing_2017/segment' # source/segment.1.txt, source/segment.2.txt, etc.
count = 0 # initialise some variables.
at = 0
dest = None # this later takes our destination files
for line in input:
if count % splitLen == 0:
if dest: dest.close()
dest = open(outputBase + '.' + str(at) + '.txt', encoding='utf-8', mode='w') # 'w' is for writing: here we open the file the current segment is being written to
at += 1
dest.write(line.strip())
count += 1
print(str(at - 1) + ' files written.')
import sys
import glob
import errno
path = 'TextProcessing_2017'
filename = 'segment.'
suffix = '.txt'
corpus = []
for i in range(0, at - 1):
try:
with open(path + '/' + filename + str(i) + suffix, encoding='utf-8') as f:
corpus.append(f.read())
f.close()
except IOError as exc:
if exc.errno != errno.EISDIR: # Do not fail if a directory is found, just ignore it.
raise # Propagate other kinds of IOError.
len(corpus)
corpus[5][:500]
import re
tokenised = []
for segment in corpus:
tokenised.append(list(filter(None, (word.lower() for word in re.split('\W+', segment)))))
print(tokenised[5][:50])
import collections
counter = collections.Counter(tokenised[5])
print(counter.most_common(10))
import pandas as pd
df1 = pd.DataFrame.from_dict(counter, orient='index').reset_index()
df2 = df1.rename(columns={'index':'lemma',0:'count'})
df2.sort_values('count',0,False)[:10]
wordfile_path = 'TextProcessing_2017/wordforms-lat.txt'
wordfile = open(wordfile_path, encoding='utf-8')
print (wordfile.read()[:59])
wordfile.close; # (The semicolon suppresses the returned object in cell output)
lemma = {} # we build a so-called dictionary for the lookups
tempdict = []
wordfile = open(wordfile_path, encoding='utf-8')
for line in wordfile.readlines():
tempdict.append(tuple(line.split('>')))
lemma = {k.strip(): v.strip() for k, v in tempdict}
wordfile.close;
print(str(len(lemma)) + ' wordforms registered.')
lemma['ciuicior']
lemmatised = [[lemma[word] if word in lemma else word for word in segment] \
for segment in tokenised]
print(lemmatised[5][:50])
counter2 = collections.Counter(lemmatised[5])
df1 = pd.DataFrame.from_dict(counter2, orient='index').reset_index()
df2 = df1.rename(columns={'index':'lemma',0:'count'})
df2.sort_values('count',0,False)[:10]
stopwords_path = 'TextProcessing_2017/stopwords-lat.txt'
stopwords = open(stopwords_path, encoding='utf-8').read().splitlines()
print(str(len(stopwords)) + ' stopwords, e.g.: ' + str(stopwords[24:54]))
stopped = [[item for item in lemmatised_segment if item not in stopwords] \
for lemmatised_segment in lemmatised]
print(stopped[5][:20])
counter3 = collections.Counter(stopped[0])
counter4 = collections.Counter(stopped[1])
counter5 = collections.Counter(stopped[2])
counter6 = collections.Counter(stopped[3])
df0_1 = pd.DataFrame.from_dict(counter3, orient='index').reset_index()
df0_2 = df0_1.rename(columns={'index':'lemma',0:'count'})
df1_1 = pd.DataFrame.from_dict(counter4, orient='index').reset_index()
df1_2 = df1_1.rename(columns={'index':'lemma',0:'count'})
df2_1 = pd.DataFrame.from_dict(counter5, orient='index').reset_index()
df2_2 = df2_1.rename(columns={'index':'lemma',0:'count'})
df3_1 = pd.DataFrame.from_dict(counter6, orient='index').reset_index()
df3_2 = df3_1.rename(columns={'index':'lemma',0:'count'})
print(' ')
print(' Most frequent lemmata in the first text segment')
print(df0_2.sort_values(by='count',axis=0,ascending=False)[:10])
print(' ')
print(' ')
print(' Most frequent lemmata in the second text segment')
print(df1_2.sort_values(by='count',axis=0,ascending=False)[:10])
print(' ')
print(' ')
print(' Most frequent lemmata in the third text segment')
print(df2_2.sort_values(by='count',axis=0,ascending=False)[:10])
print(' ')
print(' ')
print(' Most frequent lemmata in the fourth text segment')
print(df3_2.sort_values(by='count',axis=0,ascending=False)[:10])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For a reason that will become clearer under the exec visualization, let's add an iternode at the beginning of the spmflow and connect them together under a new workflow, called metaflow. The iternode will cause the workflow to be executed three times, once with the fwhm value set to 4, once set to 6 and once set to 8. For more about this see the Iteration tutorial.
Step2: orig graph
Step3: flat graph
Step4: hierarchical graph
Step5: colored graph
Step6: exec graph
Step7: Detailed graphs
Step8: Such a visualization might be more complicated to read, but it gives you a complete overview of a workflow and all its components.
Step9: In the middle left of the figure, we have three preproc.smooth nodes of the spm interface with the names "a0", "a1" and "a2". Those represent the three smoothing nodes with the fwhm parameter set to 4, 6 and 8. Now if those nodes would be connected to another workflow, this would mean that the workflow that follows would be depicted three times, each time for another input coming from the preproc.smooth node.
|
<ASSISTANT_TASK:>
Python Code:
# Import the function to create an spm fmri preprocessing workflow
from niflow.nipype1.workflows.fmri.spm import create_spm_preproc
# Create the workflow object
spmflow = create_spm_preproc()
# Import relevant modules
from nipype import IdentityInterface, Node, Workflow
# Create an iternode that iterates over three different fwhm values
inputNode = Node(IdentityInterface(fields=['fwhm']), name='iternode')
inputNode.iterables = ('fwhm', [4, 6, 8])
# Connect inputNode and spmflow in a workflow
metaflow = Workflow(name='metaflow')
metaflow.connect(inputNode, "fwhm", spmflow, "inputspec.fwhm")
# Write graph of type orig
spmflow.write_graph(graph2use='orig', dotfilename='./graph_orig.dot')
# Visualize graph
from IPython.display import Image
Image(filename="graph_orig.png")
# Write graph of type flat
spmflow.write_graph(graph2use='flat', dotfilename='./graph_flat.dot')
# Visualize graph
from IPython.display import Image
Image(filename="graph_flat.png")
# Write graph of type hierarchical
metaflow.write_graph(graph2use='hierarchical', dotfilename='./graph_hierarchical.dot')
# Visualize graph
from IPython.display import Image
Image(filename="graph_hierarchical.png")
# Write graph of type colored
metaflow.write_graph(graph2use='colored', dotfilename='./graph_colored.dot')
# Visualize graph
from IPython.display import Image
Image(filename="graph_colored.png")
# Write graph of type exec
metaflow.write_graph(graph2use='exec', dotfilename='./graph_exec.dot')
# Visualize graph
from IPython.display import Image
Image(filename="graph_exec.png")
from IPython.display import Image
Image(filename="graph_flat_detailed.png")
from IPython.display import Image
Image(filename="graph_exec_detailed.png")
# Write graph of type orig
spmflow.write_graph(graph2use='orig', dotfilename='./graph_orig_notSimple.dot', simple_form=False)
# Visualize graph
from IPython.display import Image
Image(filename="graph_orig_notSimple.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tensor2Tensor Reinforcement Learning
Step2: Play using a pre-trained policy
Step3: To evaluate and generate videos for a pretrained policy on Pong
Step4: The above command will run a single evaluation setting to get the results fast. We usually run a grid of different settings (sampling temperatures and whether to do initial no-ops). To do that, remove eval_max_num_noops=8,eval_sampling_temps=[0.5] from the command. You can override the evaluation settings
Step5: Train your policy (model-free training)
Step6: Hyperparameter sets are defined in tensor2tensor/models/research/rl.py. You can override them using the hparams flag, e.g.
|
<ASSISTANT_TASK:>
Python Code:
#@title
# Copyright 2018 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install -q tensorflow==1.13.1
!pip install -q tensorflow_probability==0.6.0
!pip install -q tensor2tensor==1.13.1
!pip install -q gym[atari]
# Helper function for playing videos in the colab.
def play_video(path):
from IPython.core.magics.display import HTML
display_path = "/nbextensions/vid.mp4"
display_abs_path = "/usr/local/share/jupyter" + display_path
!rm -f $display_abs_path
!ffmpeg -loglevel error -i $path $display_abs_path
return HTML(
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
.format(display_path))
# experiment_id is an integer from [0, 4].
def get_run_dir(game, experiment_id):
from tensor2tensor.data_generators.gym_env import ATARI_GAMES_WITH_HUMAN_SCORE_NICE
EXPERIMENTS_PER_GAME = 5
run_id = ATARI_GAMES_WITH_HUMAN_SCORE_NICE.index(game) * EXPERIMENTS_PER_GAME + experiment_id + 1
return "gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/{}".format(run_id)
get_run_dir('pong', 2)
game = 'pong'
run_dir = get_run_dir(game, 1)
!python -m tensor2tensor.rl.evaluator \
--loop_hparams_set=rlmb_long_stochastic_discrete \
--loop_hparams=game=$game,eval_max_num_noops=8,eval_sampling_temps=[0.5] \
--policy_dir=$run_dir/policy \
--eval_metrics_dir=pong_pretrained \
--debug_video_path=pong_pretrained \
--num_debug_videos=4
play_video('pong_pretrained/0.avi')
!python -m tensor2tensor.rl.trainer_model_free \
--hparams_set=rlmf_base \
--hparams=game=pong \
--output_dir=mf_pong
!python -m tensor2tensor.rl.evaluator \
--loop_hparams_set=rlmf_tiny \
--hparams=game=pong \
--policy_dir=mf_pong \
--debug_video_path=mf_pong \
--num_debug_videos=4 \
--eval_metrics_dir=mf_pong/full_eval_metrics
play_video('mf_pong/0.avi')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functions are first class objects
Step2: Q. Can you write this function in fewer lines?
Step3: The predicate parameter
Step4: 2. instance of a class that implements __call__ (functor)
Step5: 3. lambda expression
Step6: Functions can be nested
Step7: the nested function is only accessible from the parent
Step8: Functions can be return values
Step9: Nested functions have access to the parent's scope
Step10: Function factory
Step11: Wrapper function factory
Step12: Wrapping a function
Step13: now add some noise
Step14: Bound the original reference to the wrapped function
Step15: this turns out to be a frequent operation
Step16: Decorator syntax
Step17: Pie syntax
Step19: Solution 1. Copy manually
Step20: What about other metadata such as the docstring?
Step22: Solution 2. functools.wraps
Step23: Problem 2. Function arguments
Step24: the same mechanism can be used in decorators
Step25: the decorator has only one parameter
Step26: Decorators can take parameters too
Step27: Decorators can be implemented as classes
Step28: See also
Step29: Filter
Step30: Most comprehensions can be rewritten using map and filter
Step31: Reduce
Step32: an initial value for the accumulator may be supplied
Step33: Modules and imports
Step34: importing submodules
Step35: the as keyword binds the module to a different name
Step36: importing more than one module/submodule
Step37: importing functions or classes
Step38: importing everything from a module
|
<ASSISTANT_TASK:>
Python Code:
def greeter(func):
print("Hello")
func()
def say_something():
print("Let's learn some Python.")
greeter(say_something)
# greeter(12)
def count_predicate(predicate, iterable):
true_count = 0
for element in iterable:
if predicate(element) is True:
true_count += 1
return true_count
def count_predicate(predicate, iterable):
return sum(predicate(e) for e in iterable)
def is_even(number):
return number % 2 == 0
numbers = [1, 3, 2, -5, 0, 0]
count_predicate(is_even, numbers)
class IsEven(object):
def __call__(self, number):
return number % 2 == 0
print(count_predicate(IsEven(), numbers))
IsEven()(123)
i = IsEven()
i(11)
count_predicate(lambda x: x % 2 == 0, numbers)
def parent():
print("I'm the parent function")
def child():
print("I'm the child function")
parent()
def parent():
print("I'm the parent function")
def child():
print("I'm the child function")
print("Calling the nested function")
child()
parent()
# parent.child # raises AttributeError
def parent():
print("I'm the parent function")
def child():
print("I'm the child function")
return child
child_func = parent()
print("Calling child")
child_func()
print("\nUsing parent's return value right away")
parent()()
def parent(value):
def child():
print("I'm the nested function. "
"The parent's value is {}".format(value))
return child
child_func = parent(42)
print("Calling child_func")
child_func()
f1 = parent("abc")
f2 = parent(123)
f1()
f2()
f1 is f2
def make_func(param):
value = param
def func():
print("I'm the nested function. "
"The parent's value is {}".format(value))
return func
func_11 = make_func(11)
func_abc = make_func("abc")
func_11()
func_abc()
def add_noise(func):
def wrapped_with_noise():
print("Calling function {}".format(func.__name__))
func()
print("{} finished.".format(func.__name__))
return wrapped_with_noise
def noiseless_function():
print("This is not noise")
noiseless_function()
noisy_function = add_noise(noiseless_function)
noisy_function()
def greeter():
print("Hello")
print(id(greeter))
greeter = add_noise(greeter)
greeter()
print(id(greeter))
def friendly_greeter():
print("Hello friend")
def rude_greeter():
print("Hey you")
friendly_greeter = add_noise(friendly_greeter)
rude_greeter = add_noise(rude_greeter)
friendly_greeter()
rude_greeter()
@add_noise
def informal_greeter():
print("Yo")
# informal_greeter = add_noise(informal_greeter)
informal_greeter()
informal_greeter.__name__
def add_noise(func):
def wrapped_with_noise():
print("Calling {}...".format(func.__name__))
func()
print("{} finished.".format(func.__name__))
wrapped_with_noise.__name__ = func.__name__
return wrapped_with_noise
@add_noise
def greeter():
meaningful documentation
print("Hello")
print(greeter.__name__)
print(greeter.__doc__)
from functools import wraps
def add_noise(func):
@wraps(func)
def wrapped_with_noise():
print("Calling {}...".format(func.__name__))
func()
print("{} finished.".format(func.__name__))
wrapped_with_noise.__name__ = func.__name__
return wrapped_with_noise
@add_noise
def greeter():
function that says hello
print("Hello")
print(greeter.__name__)
print(greeter.__doc__)
def function_with_variable_arguments(*args, **kwargs):
print(args)
print(kwargs)
function_with_variable_arguments(1, "apple", tree="peach")
def add_noise(func):
@wraps(func)
def wrapped_with_noise(*args, **kwargs):
print("Calling {}...".format(func.__name__))
func(*args, **kwargs)
print("{} finished.".format(func.__name__))
return wrapped_with_noise
@add_noise
def personal_greeter(name):
print("Hello {}".format(name))
personal_greeter("John")
def decorator_with_param(param1, param2=None):
print("Creating a new decorator: {0}, {1}".format(
param1, param2))
def actual_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
print("Wrapper function {}".format(
func.__name__))
print("Params: {0}, {1}".format(param1, param2))
return func(*args, **kwargs)
return wrapper
return actual_decorator
@decorator_with_param(42, "abc")
def personal_greeter(name):
print("Hello {}".format(name))
@decorator_with_param(4)
def personal_greeter2(name):
print("Hello {}".format(name))
print("\nCalling personal_greeter")
personal_greeter("Mary")
def hello(name):
print("Hello {}".format(name))
hello = decorator_with_param(1, 2)(hello)
hello("john")
class MyDecorator(object):
def __init__(self, func):
self.func_to_wrap = func
wraps(func)(self)
def __call__(self, *args, **kwargs):
print("before func {}".format(self.func_to_wrap.__name__))
res = self.func_to_wrap(*args, **kwargs)
print("after func {}".format(self.func_to_wrap.__name__))
return res
@MyDecorator
def foo():
print("bar")
foo()
def double(e):
return e * 2
l = [2, 3, "abc"]
list(map(double, l))
map(double, l)
list(map(lambda x: x * 2, [2, 3, "abc"]))
def is_even(n):
return n % 2 == 0
l = [2, 3, -1, 0, 2]
list(filter(is_even, l))
list(filter(lambda x: x % 2 == 0, range(8)))
l = [2, 3, 0, -1, 2, 0, 1]
signum = [x / abs(x) if x != 0 else x for x in l]
print(signum)
list(map(lambda x: x / abs(x) if x != 0 else 0, l))
even = [x for x in l if x % 2 == 0]
print(even)
print(list(filter(lambda x: x % 2 == 0, l)))
from functools import reduce
l = [1, 2, -1, 4]
reduce(lambda x, y: x*y, l)
reduce(lambda x, y: x*y, l, 10)
reduce(lambda x, y: max(x, y), l)
reduce(max, l)
reduce(lambda x, y: x + int(y % 2 == 0) * y, l, 0)
import sys
", ".join(dir(sys))
from os import path
try:
os
except NameError:
print("os does not seem to be defined")
try:
path
print("path found")
except NameError:
print("path does not seem to be defined")
import os as os_module
try:
os
except NameError:
print("os does not seem to be defined")
try:
os_module
print("os_module found")
except NameError:
print("os_module does not seem to be defined")
# import os, sys
from sys import stdin, stderr, stdout
from argparse import ArgumentParser
import inspect
inspect.isclass(ArgumentParser)
from os import *
try:
makedirs
stat
print("everything found")
except NameError:
print("Something not found")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Open file with Nansat
Step2: Read information ABOUT the data (METADATA)
Step3: Read the actual DATA
Step4: Check what kind of data we have
|
<ASSISTANT_TASK:>
Python Code:
import os
import shutil
import nansat
idir = os.path.join(os.path.dirname(nansat.__file__), 'tests', 'data/')
import matplotlib.pyplot as plt
%matplotlib inline
from nansat import Nansat
n = Nansat(idir+'gcps.tif')
print (n)
b1 = n[1]
%whos
plt.imshow(b1);plt.colorbar()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
X, y = load_data()
assert type(X) == np.ndarray
assert type(y) == np.ndarray
pipe = Pipeline([
("scale", StandardScaler()),
("model", RidgeClassifier(random_state=24))
])
grid = GridSearchCV(pipe, param_grid={"model__alpha": [2e-4, 3e-3, 4e-2, 5e-1]}, cv=7)
grid.fit(X, y)
coef = grid.best_estimator_.named_steps['model'].coef_
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Introduction
Step4: This tutorial walks you through the process of creating a new neuroevolution algoritm.
Step8: Notice that our implementation above is extremely simple, we haven't used many options or functions provided by CMA-ES.
Step10: The simple CMA-ES wrapper worked! However, we also notice that the training time increased significantly.
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import clear_output, Image
!pip install evojax
clear_output()
import os
import numpy as np
import jax
import jax.numpy as jnp
from evojax.task.cartpole import CartPoleSwingUp
from evojax.policy.mlp import MLPPolicy
from evojax.algo import PGPE
from evojax import Trainer
from evojax.util import create_logger
# Let's create a directory to save logs and models.
log_dir = './log'
logger = create_logger(name='EvoJAX', log_dir=log_dir)
logger.info('Welcome to the tutorial on Neuroevolution algorithm creation!')
logger.info('Jax backend: {}'.format(jax.local_devices()))
!nvidia-smi --query-gpu=name --format=csv,noheader
seed = 42 # Wish me luck!
# We use the classic cart-pole swing up as our tasks, see
# https://github.com/google/evojax/tree/main/evojax/task for more example tasks.
# The test flag provides the opportunity for a user to
# 1. Return different signals as rewards. For example, in our MNIST example,
# we use negative cross-entropy loss as the reward in training tasks, and the
# classification accuracy as the reward in test tasks.
# 2. Perform reward shaping. It is common for RL practitioners to modify the
# rewards during training so that the agent learns more efficiently. But this
# modification should not be allowed in tests for fair evaluations.
hard = False
train_task = CartPoleSwingUp(harder=hard, test=False)
test_task = CartPoleSwingUp(harder=hard, test=True)
# We use a feedforward network as our policy.
# By default, MLPPolicy uses "tanh" as its activation function for the output.
policy = MLPPolicy(
input_dim=train_task.obs_shape[0],
hidden_dims=[64, 64],
output_dim=train_task.act_shape[0],
logger=logger,
)
# We use PGPE as our evolution algorithm.
# If you want to know more about the algorithm, please take a look at the paper:
# https://people.idsia.ch/~juergen/nn2010.pdf
solver = PGPE(
pop_size=64,
param_size=policy.num_params,
optimizer='adam',
center_learning_rate=0.05,
seed=seed,
)
# Now that we have all the three components instantiated, we can create a
# trainer and start the training process.
trainer = Trainer(
policy=policy,
solver=solver,
train_task=train_task,
test_task=test_task,
max_iter=600,
log_interval=100,
test_interval=200,
n_repeats=5,
n_evaluations=128,
seed=seed,
log_dir=log_dir,
logger=logger,
)
_ = trainer.run()
# Let's visualize the learned policy.
def render(task, algo, policy):
Render the learned policy.
task_reset_fn = jax.jit(test_task.reset)
policy_reset_fn = jax.jit(policy.reset)
step_fn = jax.jit(test_task.step)
act_fn = jax.jit(policy.get_actions)
params = algo.best_params[None, :]
task_s = task_reset_fn(jax.random.PRNGKey(seed=seed)[None, :])
policy_s = policy_reset_fn(task_s)
images = [CartPoleSwingUp.render(task_s, 0)]
done = False
step = 0
reward = 0
while not done:
act, policy_s = act_fn(task_s, params, policy_s)
task_s, r, d = step_fn(task_s, act)
step += 1
reward = reward + r
done = bool(d[0])
if step % 3 == 0:
images.append(CartPoleSwingUp.render(task_s, 0))
print('reward={}'.format(reward))
return images
imgs = render(test_task, solver, policy)
gif_file = os.path.join(log_dir, 'cartpole.gif')
imgs[0].save(
gif_file, save_all=True, append_images=imgs[1:], duration=40, loop=0)
Image(open(os.path.join(log_dir, 'cartpole.gif'),'rb').read())
import cma
from evojax.algo.base import NEAlgorithm
class CMAWrapper(NEAlgorithm):
This is a wrapper of CMA-ES.
def __init__(self, param_size, pop_size, init_stdev=0.1, seed=0):
self.pop_size = pop_size
self.params = None
self._best_params = None
# We create CMA-ES in a simplest form.
self.cma = cma.CMAEvolutionStrategy(
x0=np.zeros(param_size),
sigma0=init_stdev,
inopts={
'popsize': pop_size,
'seed': seed if seed > 0 else 42,
'randn': np.random.randn,
},
)
# We jit-compile some utility functions.
self.jnp_array = jax.jit(jnp.array)
self.jnp_stack = jax.jit(jnp.stack)
def ask(self):
self.params = self.cma.ask()
return self.jnp_stack(self.params)
def tell(self, fitness):
# CMA-ES minimizes, so we negate the fitness.
self.cma.tell(self.params, -np.array(fitness))
self._best_params = np.array(self.cma.result.xfavorite)
@property
def best_params(self):
return self.jnp_array(self._best_params)
@best_params.setter
def best_params(self, params):
self._best_params = np.array(params)
# Instead of PGPE, we use our CMAWrapper now.
solver = CMAWrapper(
pop_size=64,
param_size=policy.num_params,
seed=seed,
)
trainer = Trainer(
policy=policy,
solver=solver,
train_task=train_task,
test_task=test_task,
max_iter=600,
log_interval=100,
test_interval=200,
n_repeats=5,
n_evaluations=128,
seed=seed,
log_dir=log_dir,
logger=logger,
)
_ = trainer.run()
from evojax.algo.base import NEAlgorithm
class SimplePGPE(NEAlgorithm):
A simplified version of PGPE.
def __init__(self, param_size, pop_size,
lr_mu=0.05, lr_sigma=0.1, init_stdev=0.1, seed=0):
self.pop_size = pop_size
assert pop_size % 2 == 0, "pop_size must be a multpile of 2."
n_directs = pop_size // 2
self.noises = jnp.zeros(param_size)
self.params = jnp.zeros(param_size)
self.mu = jnp.zeros(param_size)
self.sigma = jnp.ones(param_size) * init_stdev
self.rand_key = jax.random.PRNGKey(seed=seed)
def ask_fn(key, mu, sigma):
next_key, sample_key = jax.random.split(key=key, num=2)
perturbations = jax.random.normal(
key=sample_key, shape=(n_directs, param_size)) * sigma[None, :]
params = jnp.vstack([perturbations, -perturbations]) + mu[None, :]
return params, perturbations, next_key
self.ask_fn = jax.jit(ask_fn)
def tell_fn(rewards, mu, sigma, perturbations):
fitness = jnp.array(rewards).reshape([2, n_directs])
# To map to the formulae above:
# (r - b) = (avg_fitness - b) and (theta - mu) = perturbations
avg_fitness = fitness.mean(axis=0)
b = jnp.mean(fitness)
# Update the means.
grad_mu = (
(avg_fitness - b)[:, None] * perturbations
).mean(axis=0)
new_mu = mu + lr_mu * grad_mu
# Update the sigmas.
# We constrain the change of sigma to prevent numerical errors.
grad_sigma = (
(avg_fitness - b)[:, None] *
(perturbations ** 2 - (sigma ** 2)[None, :]) / sigma[None, :]
).mean(axis=0)
new_sigma = jnp.clip(
sigma + lr_sigma * grad_sigma, 0.8 * sigma, 1.2 * sigma)
return new_mu, new_sigma
self.tell_fn = jax.jit(tell_fn)
def ask(self):
self.params, self.noises, self.rand_key = self.ask_fn(
self.rand_key, self.mu, self.sigma)
return self.params
def tell(self, fitness):
self.mu, self.sigma = self.tell_fn(
fitness, self.mu, self.sigma, self.noises)
@property
def best_params(self):
return self.mu
@best_params.setter
def best_params(self, params):
self.mu = jnp.array(params)
# Let's test our simple PGPE.
solver = SimplePGPE(
pop_size=64,
param_size=policy.num_params,
seed=seed,
)
trainer = Trainer(
policy=policy,
solver=solver,
train_task=train_task,
test_task=test_task,
max_iter=1000,
log_interval=100,
test_interval=200,
n_repeats=5,
n_evaluations=128,
seed=seed,
log_dir=log_dir,
logger=logger,
)
_ = trainer.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Noiseless mixture of 2 Gaussians in 1D
Step2: Noisy mixture of Gaussian
Step3: PSD max-cut
|
<ASSISTANT_TASK:>
Python Code:
# we are dependent on numpy, sympy and cvxopt.
import numpy as np
import cvxopt
import mompy as mp
# just some basic settings and setup
mp.cvxsolvers.options['show_progress'] = False
from IPython.display import display, Markdown, Math, display_markdown
sp.init_printing()
def print_problem(obj, constraints = None, moment_constraints = None):
display_markdown(mp.problem_to_str(obj,constraints,moment_constraints, False), raw=True)
xi,c = sp.symbols('xi,c')
K = 2 # number of clusters
xi0 = [1, -0.9] # true parameters
c0 = [0.4, 0.6]
pi0 = [0.4, 0.6]
moment_exprs = [xi, xi**2 + c, xi**3 + 3*xi*c, xi**4 + 6*xi**2 * c + 3*c**2,\
xi**5 + 10*xi**3*c + 15*xi*c**2,\
xi**6 + 15*xi**4*c**1 + 45*xi**2*c**2 + 15*c**3 ,\
xi**7 + 21*xi**5*c**1 + 105*xi**3*c**2 + 105*xi*c**3]
moment_exprs = moment_exprs[0:6]
#print 'Gaussian moments are '
display(moment_exprs)
# construct the true constraints
hs = []
for expr in moment_exprs:
val = 0
for k in range(K):
val += pi0[k]*expr.subs({xi:xi0[k], c:c0[k]})
hs += [expr - val]
hs_true = hs
# we will minimize some kind of a trace..
f = 1 + xi**2 + c + c**2 + xi**4 + c*xi**2
gs = [c>=0]
print_problem(f, gs, hs)
sol = mp.solvers.solve_GMP(f, gs, hs, rounds = 2, slack=1e-3)
display(mp.extractors.extract_solutions_lasserre(sol['MM'], sol['x'], 2, tol = 1e-5, maxdeg=2))
print 'the truth: ' + str({c:c0, xi:xi0})
sol['MM'].numeric_instance(sol['x'],1)
# draw some samples
numsample = 1e5
np.random.seed(1)
z = (np.random.rand(numsample) < pi0[0]).astype('int8')
means = xi0[0]*z + xi0[1]*(1-z)
stds = np.sqrt(c0[0]*z + c0[1]*(1-z))
Xs = means + stds * np.random.randn(numsample)
import matplotlib.pyplot as plt
%matplotlib inline
plt.hist(Xs, 50);
# construct the empirical constraints
hs = []
for d,expr in enumerate(moment_exprs):
val = np.mean(np.power(Xs,d+1))
hs += [expr - val]
# we will minimize some kind of a trace..
f = 1 + xi**2 + c + c**2 + xi**4 + c*xi**2
gs = [c>=0.1]
print_problem(f, gs, hs)
sol = mp.solvers.solve_GMP(f, gs, hs, rounds = 4, slack = 1e-5)
display(mp.extractors.extract_solutions_lasserre(sol['MM'], sol['x'], 2, tol = 1e-5, maxdeg=2))
print 'the truth: ' + str({c:c0, xi:xi0})
size = 5
np.random.seed(1)
xs = sp.symbols('x1:'+str(size+1))
Wh = np.random.randn(size,size)
W = -Wh*Wh.T;
gs = [x**2 >=1 for x in xs] + [x**2 <=1 for x in xs]
fs = [ w * xs[ij[0]] * xs[ij[1]] for ij,w in np.ndenumerate(W) ]
f = sum(fs)
print_problem(f, gs)
sol = mp.solvers.solve_GMP(f, gs, rounds = 3)
mp.extractors.extract_solutions_lasserre(sol['MM'], sol['x'], 2, maxdeg = 2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we join the two trajectories together. The approach we use is a little complicated, but the basic idea is this
Step2: Note, importantly, that the trajectory we'll use as input for the piggybacker is the whole trajectory, not just the segment generated by one-way shooting.
Step3: OPS objects that must be created
Step4: Create the MoveStub and PseudoSimulator
Step5: Feed the fake data to the PseudoSimulator
Step6: Use OPS analysis tools on the faked data
|
<ASSISTANT_TASK:>
Python Code:
from openpathsampling.tests.test_helpers import make_1d_traj
left_state_edge = 0.0
right_state_edge = 10.0
def make_traj(suffix, stride=1):
frame = left_state_edge -1.0 + suffix
coords = [frame]
while frame < right_state_edge:
frame += 1.0*stride
coords.append(frame)
return make_1d_traj(coords)
traj1 = make_traj(suffix=0.1)
traj2 = make_traj(suffix=0.2, stride=2)
traj3 = make_traj(suffix=0.3, stride=3)
traj4 = make_traj(suffix=0.4, stride=2)
traj5 = make_traj(suffix=0.5, stride=1)
mytrajs = [traj1, traj2, traj3, traj4, traj5]
for traj in mytrajs:
plt.plot([s.xyz[0][0] for s in traj], 'o')
plt.plot([0.0]*13, 'r')
plt.plot([10.0]*13, 'r')
def join_as_shooting(prev_traj, new_traj, shooting_point, direction):
sp_index = prev_traj.index(shooting_point)
if direction == 1:
prev_seg = prev_traj[:sp_index+1]
new_seg = [snap for snap in new_traj if snap.xyz[0][0] > shooting_point.xyz[0][0]]
result = prev_seg + new_seg
elif direction == -1:
prev_seg = prev_traj[sp_index:]
new_seg = [snap for snap in new_traj if snap.xyz[0][0] < shooting_point.xyz[0][0]]
result = new_seg + prev_seg
else:
raise RuntimeError("Bad direction")
return paths.Trajectory(result)
# simple function to plot the traj and show where the snapshots originated
def simple_plot(traj, origin_trajs):
plt.plot([s.xyz[0][0] for s in traj], 'k')
for (otraj, style) in zip(origin_trajs, ['bo', 'go', 'ro', 'co', 'mo']):
in_style = zip(*[(traj.index(s), s.xyz[0][0]) for s in traj if s in otraj])
if len(in_style) == 2:
plt.plot(in_style[0], in_style[1], style)
# tuples of shooting point (frame number), direction, and acceptance
sp_index = [4, 4, 6, 5]
directions = [-1, +1, -1, -1]
acceptance = [True, True, False, True]
long_trials = mytrajs[1:]
sp_dir_acc_traj = zip(sp_index, directions, acceptance, long_trials)
#sp_dir_acc_traj = [(4, -1, True, traj2), (4, +1, True, traj3), (6, -1, False, traj4), (5, -1, True, traj5)]
inp_trajs = [traj1]
last_accepted = 0
for i in range(len(sp_dir_acc_traj)):
move = sp_dir_acc_traj[i]
prev_traj = inp_trajs[last_accepted]
sp = move[0]
direction = move[1]
accept = move[2]
long_trial = move[3]
trial = join_as_shooting(prev_traj, long_trial, prev_traj[sp], direction)
inp_trajs.append(trial)
if accept:
last_accepted = i+1
simple_plot(inp_trajs[0], mytrajs)
simple_plot(inp_trajs[1], mytrajs)
simple_plot(inp_trajs[2], mytrajs)
simple_plot(inp_trajs[3], mytrajs) # REJECTED PATH!
simple_plot(inp_trajs[4], mytrajs)
# volumes
cv = paths.FunctionCV("x", lambda snap: snap.xyz[0][0])
left_state = paths.CVDefinedVolume(cv, float("-inf"), 0.0)
right_state = paths.CVDefinedVolume(cv, 10.0, float("inf"))
# network
network = paths.TPSNetwork(left_state, right_state)
ensemble = network.sampling_ensembles[0] # the only one
initial_conditions = paths.SampleSet([
paths.Sample(replica=0,
trajectory=inp_trajs[0],
ensemble=ensemble)
])
template = initial_conditions[0].trajectory[0]
shoot = oink.ShootingStub(ensemble)
sim = oink.ShootingPseudoSimulator(storage=paths.Storage('pseudoshoot.nc', 'w', template=template),
initial_conditions=initial_conditions,
mover=shoot,
network=network)
trial_trajectories = inp_trajs[1:]
replicas = [0]*len(sp_index)
move_inputs = zip(replicas, trial_trajectories, sp_index, acceptance)
for move in move_inputs:
print move
sim.run(move_inputs)
sim.storage.close()
analysis_file = paths.AnalysisStorage("pseudoshoot.nc")
scheme = analysis_file.schemes[0]
scheme.move_summary(analysis_file.steps)
import openpathsampling.visualize as ops_vis
from IPython.display import SVG
history = ops_vis.PathTree(
analysis_file.steps,
ops_vis.ReplicaEvolution(replica=0)
)
# switch to the "boxcar" look for the trajectories
history.options.movers['default']['new'] = 'single'
history.options.css['horizontal_gap'] = True
SVG(history.svg())
path_lengths = [len(step.active[0].trajectory) for step in analysis_file.steps]
plt.hist(path_lengths, alpha=0.5);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TV Script Generation
Step3: Explore the Data
Step6: Implement Preprocessing Functions
Step9: Tokenize Punctuation
Step11: Preprocess all the data and save it
Step13: Check Point
Step15: Build the Neural Network
Step18: Input
Step21: Build RNN Cell and Initialize
Step24: Word Embedding
Step27: Build RNN
Step30: Build the Neural Network
Step33: Batches
Step35: Neural Network Training
Step37: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Implement Generate Functions
Step49: Choose Word
Step51: Generate TV Script
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
data_dir = './data/simpsons/moes_tavern_lines.txt'
text = helper.load_data(data_dir)
# Ignore notice, since we don't use it for analysing the data
text = text[81:]
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
scenes = text.split('\n\n')
print('Number of scenes: {}'.format(len(scenes)))
sentence_count_scene = [scene.count('\n') for scene in scenes]
print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene)))
sentences = [sentence for scene in scenes for sentence in scene.split('\n')]
print('Number of lines: {}'.format(len(sentences)))
word_count_sentence = [len(sentence.split()) for sentence in sentences]
print('Average number of words in each line: {}'.format(np.average(word_count_sentence)))
print()
print('The sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
import numpy as np
import problem_unittests as tests
from collections import Counter
def create_lookup_tables(text):
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
# Build vocab_to_int
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
# TODO: Implement Function
return vocab_to_int, int_to_vocab
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_create_lookup_tables(create_lookup_tables)
def token_lookup():
Generate a dict to turn punctuation into a token.
:return: Tokenize dictionary where the key is the punctuation and the value is the token
lookup_table = {
'.': '||Period||',
',': '||Comma||',
'"': '||Quotation_Mark||',
';': '||Semicolon||',
'?': '||Question_mark||',
'!': '||Exclamation_mark||',
'(': '||Left_parentheses||',
')': '||Right_parentheses||',
'--': '||Dash||',
'\n': '||Return||'
}
return lookup_table
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_tokenize(token_lookup)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import numpy as np
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def get_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate)
input_ = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
return input_, targets, lr
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_inputs(get_inputs)
lstm_layers = 1
def get_init_cell(batch_size, rnn_size):
Create an RNN Cell and initialize it.
:param batch_size: Size of batches
:param rnn_size: Size of RNNs
:return: Tuple (cell, initialize state)
# Basic LSTM
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([lstm] * lstm_layers)
# Get the initial state
initial_state = cell.zero_state(batch_size, tf.int32)
initial_state = tf.identity(initial_state, name='initial_state')
# TODO: Implement Function
return cell, initial_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_init_cell(get_init_cell)
def get_embed(input_data, vocab_size, embed_dim):
Create embedding for <input_data>.
:param input_data: TF placeholder for text input.
:param vocab_size: Number of words in vocabulary.
:param embed_dim: Number of embedding dimensions
:return: Embedded input.
embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1.0, 1.0))
embed = tf.nn.embedding_lookup(embedding, input_data)
return embed
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_embed(get_embed)
def build_rnn(cell, inputs):
Create a RNN using a RNN Cell
:param cell: RNN Cell
:param inputs: Input text data
:return: Tuple (Outputs, Final State)
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = tf.identity(final_state, name='final_state')
return outputs, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_rnn(build_rnn)
def build_nn(cell, rnn_size, input_data, vocab_size, embed_dim):
Build part of the neural network
:param cell: RNN cell
:param rnn_size: Size of rnns
:param input_data: Input data
:param vocab_size: Vocabulary size
:param embed_dim: Number of embedding dimensions
:return: Tuple (Logits, FinalState)
embed = get_embed(input_data, vocab_size, rnn_size)
outputs, final_state = build_rnn(cell, embed)
logits = tf.contrib.layers.fully_connected(outputs,
vocab_size,
weights_initializer=tf.truncated_normal_initializer(
stddev=0.05),
biases_initializer=tf.zeros_initializer(), activation_fn=None)
return logits, final_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_build_nn(build_nn)
def get_batches(int_text, batch_size, seq_length):
Return batches of input and target
:param int_text: Text with the words replaced by their ids
:param batch_size: The size of batch
:param seq_length: The length of sequence
:return: Batches as a Numpy array
int_text = np.array(int_text)
slice_size = batch_size * seq_length
num_batches = int(len(int_text) / slice_size)
x = int_text[:num_batches * slice_size]
y = int_text[1: num_batches * slice_size + 1]
x_data = np.split(x.reshape(batch_size, -1), num_batches, axis=1)
y_data = np.split(y.reshape(batch_size, -1), num_batches, axis=1)
batches = np.array(list(zip(x_data, y_data)))
return batches
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_batches(get_batches)
get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3)
# Number of Epochs
num_epochs = 100
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 512
# Sequence Length
seq_length = 20
# Learning Rate
learning_rate = 0.01
# Show stats for every n number of batches
show_every_n_batches = 20
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
save_dir = './save'
DON'T MODIFY ANYTHING IN THIS CELL
from tensorflow.contrib import seq2seq
tf.reset_default_graph()
train_graph = tf.Graph()
with train_graph.as_default():
vocab_size = len(int_to_vocab)
input_text, targets, lr = get_inputs()
input_data_shape = tf.shape(input_text)
cell, initial_state = get_init_cell(input_data_shape[0], rnn_size)
logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size)
# Probabilities for generating words
probs = tf.nn.softmax(logits, name='probs')
# Loss function
cost = seq2seq.sequence_loss(
logits,
targets,
tf.ones([input_data_shape[0], input_data_shape[1]]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
batches = get_batches(int_text, batch_size, seq_length)
restore_model = False
with tf.Session(graph=train_graph) as sess:
saver = tf.train.Saver()
if not restore_model:
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, save_dir)
for epoch_i in range(num_epochs):
state = sess.run(initial_state, {input_text: batches[0][0]})
for batch_i, (x, y) in enumerate(batches):
feed = {
input_text: x,
targets: y,
initial_state: state,
lr: learning_rate}
train_loss, state, _ = sess.run([cost, final_state, train_op], feed)
# Show every <show_every_n_batches> batches
if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0:
print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
epoch_i,
batch_i,
len(batches),
train_loss))
# Save Model
saver.save(sess, save_dir)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params((seq_length, save_dir))
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
seq_length, load_dir = helper.load_params()
def get_tensors(loaded_graph):
Get input, initial state, final state, and probabilities tensor from <loaded_graph>
:param loaded_graph: TensorFlow graph loaded from file
:return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor)
# TODO: Implement Function
InputTensor = loaded_graph.get_tensor_by_name("input:0")
InitialStateTensor = loaded_graph.get_tensor_by_name("initial_state:0")
FinalStateTensor = loaded_graph.get_tensor_by_name("final_state:0")
ProbsTensor = loaded_graph.get_tensor_by_name("probs:0")
return InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_get_tensors(get_tensors)
def pick_word(probabilities, int_to_vocab):
Pick the next word in the generated text
:param probabilities: Probabilites of the next word
:param int_to_vocab: Dictionary of word ids as the keys and words as the values
:return: String of the predicted word
# TODO: Implement Function
ix = np.random.choice(np.arange(len(probabilities)), p=probabilities)
word = int_to_vocab[ix]
return word
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_pick_word(pick_word)
gen_length = 200
# homer_simpson, moe_szyslak, or Barney_Gumble
prime_word = 'moe_szyslak'
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_dir + '.meta')
loader.restore(sess, load_dir)
# Get Tensors from loaded model
input_text, initial_state, final_state, probs = get_tensors(loaded_graph)
# Sentences generation setup
gen_sentences = [prime_word + ':']
prev_state = sess.run(initial_state, {input_text: np.array([[1]])})
# Generate sentences
for n in range(gen_length):
# Dynamic Input
dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]
dyn_seq_length = len(dyn_input[0])
# Get Prediction
probabilities, prev_state = sess.run(
[probs, final_state],
{input_text: dyn_input, initial_state: prev_state})
pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab)
gen_sentences.append(pred_word)
# Remove tokens
tv_script = ' '.join(gen_sentences)
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
tv_script = tv_script.replace(' ' + token.lower(), key)
tv_script = tv_script.replace('\n ', '\n')
tv_script = tv_script.replace('( ', '(')
print(tv_script)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Numerical Integration
Step3: Below is, mathematically, $f_{-h}
Step4: Then, we can use sympy to calculate, symbolically, $f_{h}
Step5: Success! Trapezoid rule was rederived (stop using pen/pencil and paper or chalkboard; computers can do computations faster and without mistakes)
Step6: Legendre Polynomials
|
<ASSISTANT_TASK:>
Python Code:
from itertools import combinations
import sympy
from sympy import Function, integrate, Product, Sum, Symbol, symbols
from sympy.abc import a,b,h,i,k,m,n,x
from sympy import Rational as Rat
def lagrange_basis_polys(N,x,xpts=None):
lagrange_basis_polynomials(N,x,xpts)
returns the Lagrange basis polynomials as a list
INPUTS/PARAMETERS
-----------------
<int> N - N > 0. Note that there are N+1 points total
<sympy.Symbol> x
<list> xpts
assert N > 0
if xpts != None:
assert len(xpts) == N + 1
if xpts == None:
print "I'll generate symbolic sympy symbols for you for xpts"
xpts = symbols('x0:'+str(N+1))
basis_polys = []
for i in range(N+1):
tmpprod = Rat(1)
for k in [k for k in range(N+1) if k != i]:
tmpprod = tmpprod * (x - xpts[k])/(xpts[i]-xpts[k])
basis_polys.append(tmpprod)
return basis_polys
def lagrange_interp(N,x,xpts=None,ypts=None):
lagrange_interp(N,x,xpts,ypts)
Lagrange interpolation formula
if xpts != None and ypts != None:
assert len(xpts) == len(ypts)
if xpts == None:
print "I'll generate symbolic sympy symbols for you for xpts"
xpts = symbols('x0:'+str(N+1))
if ypts == None:
print "I'll generate symbolic sympy symbols for you for xpts"
ypts = symbols('y0:'+str(N+1))
basis = lagrange_basis_polys(N,x,xpts)
p_N = sum( [ypts[i]*basis[i] for i in range(N+1)] )
return p_N
xpts = symbols('x0:'+str(1+1))
ypts = symbols('y0:'+str(1+1))
p_1x = lagrange_interp(1,x,xpts,ypts)
x_0 = Symbol('x_0',real=True)
f = Function('f')
f_minush = p_1x.subs({xpts[0]:x_0-h,xpts[1]:x_0, ypts[0]:f(x_0-h), ypts[1]:f(x_0) })
integrate( f_minush, (x,x_0-h,x_0 ) )
f_h = p_1x.subs({xpts[0]:x_0,xpts[1]:x_0+h, ypts[0]:f(x_0), ypts[1]:f(x_0+h) })
integrate( f_h, (x,x_0,x_0+h ) )
( integrate( f_minush, (x,x_0-h,x_0 ) ) + integrate( f_h, (x,x_0,x_0+h ) ) ).simplify()
xpts = symbols('x0:'+str(2+1))
ypts = symbols('y0:'+str(2+1))
p_2x = lagrange_interp(2,x,xpts,ypts)
f2_h = p_2x.subs({xpts[0]:x_0-h,xpts[1]:x_0,xpts[2]:x_0+h,ypts[0]:f(x_0-h), ypts[1]:f(x_0),ypts[2]:f(x_0+h) })
integrate( f2_h,(x,x_0-h,x_0+h)).simplify()
from sympy.polys.orthopolys import legendre_poly
print "n \t \t \t \t P_n(x) \n"
for i in range(11):
print str(i) + "\t \t \t \t " , legendre_poly(i,x)
sympy.latex(legendre_poly(2,x))
sympy.N( sympy.integrate(1/(2+x**2),(x,0,3)) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a giant comparison plot
|
<ASSISTANT_TASK:>
Python Code:
ABIG = 1.0
big_sil = SingleGrainPop('Grain', 'Silicate', 'Mie', amax=ABIG, md=MD)
big_gra = SingleGrainPop('Grain', 'Graphite', 'Mie', amax=ABIG, md=MD)
%%time
big_sil.calculate_ext(EVALS, unit='kev', theta=THVALS)
%%time
big_gra.calculate_ext(EVALS, unit='kev', theta=THVALS)
ax = plt.subplot(111)
big_sil.plot_ext(ax, 'all')
plt.loglog()
ax.set_ylim(0.01, 2)
plt.title('Silicate')
ax = plt.subplot(111)
big_gra.plot_ext(ax, 'all')
plt.loglog()
ax.set_ylim(0.01, 2)
plt.title('Graphite')
inds = [0, 50, -10]
ms = dict(zip(inds,['d','o','s']))
for i in inds:
plt.plot(THVALS, big_sil.int_diff[i], 'g', ls='',
marker=ms[i], markersize=10, label='%.2f keV' % EVALS[i])
plt.plot(THVALS, big_gra.int_diff[i], 'b', ls='', marker=ms[i], markersize=10)
plt.loglog()
plt.legend(loc='lower left', frameon=False)
giant_sil = SingleGrainPop('Grain', 'Silicate', 'Mie', amax=A0, md=MD)
giant_gra = SingleGrainPop('Grain', 'Graphite', 'Mie', amax=A0, md=MD)
%%time
giant_sil.calculate_ext(EVALS, unit='kev', theta=THVALS)
%%time
giant_gra.calculate_ext(EVALS, unit='kev', theta=THVALS)
ax = plt.subplot(111)
giant_sil.plot_ext(ax, 'all')
plt.loglog()
ax.set_ylim(0.01, 2)
plt.title('Silicate')
ax = plt.subplot(111)
giant_gra.plot_ext(ax, 'all')
plt.loglog()
ax.set_ylim(0.01, 2)
plt.title('Graphite')
inds = [0, 50, -10]
ms = dict(zip(inds,['d','o','s']))
for i in inds:
plt.plot(THVALS, giant_sil.int_diff[i], 'g', ls='',
marker=ms[i], markersize=10, label='%.2f keV' % EVALS[i])
plt.plot(THVALS, giant_gra.int_diff[i], 'b', ls='', marker=ms[i], markersize=10)
plt.loglog()
plt.legend(loc='lower left', frameon=False)
ax = plt.subplot(111)
big_gra.plot_ext(ax, 'abs', color='b', lw=1, label='1 um gra')
big_sil.plot_ext(ax, 'abs', color='g', lw=1, label='1 um sil')
giant_gra.plot_ext(ax, 'abs', color='b', lw=2, label='10 um gra')
giant_sil.plot_ext(ax, 'abs', color='g', lw=2, label='10 um sil')
plt.loglog()
plt.xlim(0.1, 20)
plt.ylim(0.001, 2)
plt.title("Absorption")
plt.legend(loc='lower left', frameon=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we import the relevant modules from BurnMan. The burnman
Step2: 2. Import seismic model
Step3: We create an array of 20 depths at which we want to evaluate PREM, and then
Step4: 3. Input composition
Step5: At this point we want to tell the rock which equation of state to use for
Step6: 4. Input temperature
Step7: 4. Calculate velocities
Step8: 5. Plot results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os, sys, numpy as np, matplotlib.pyplot as plt
sys.path.insert(1,os.path.abspath('../..'))
import burnman
from burnman import minerals
seismic_model = burnman.seismic.PREM()
depths = np.linspace(750e3, 2800e3, 20)
pressure, seis_rho, seis_vp, seis_vs, seis_vphi = \
seismic_model.evaluate(['pressure','density','v_p','v_s','v_phi'],depths)
plt.plot(pressure/1.e9,seis_vs/1.e3,'k',label='Vs')
plt.plot(pressure/1.e9,seis_vp/1.e3,'b',label='Vp')
plt.plot(pressure/1.e9,seis_rho/1.e3,'r',label='Vphi')
plt.plot(pressure/1.e9,seis_vphi/1.e3,'g',label='rho')
plt.xlabel('pressure (GPa)')
plt.ylabel('velocity (km/s) density (kg/m^3)')
plt.xlim(min(pressure)/1.e9,max(pressure)/1.e9)
plt.title('PREM')
plt.legend();
rock = burnman.Composite([minerals.SLB_2011.mg_perovskite(), minerals.SLB_2011.periclase()], \
[0.8, 0.2])
rock.set_method('slb3')
temperature = burnman.geotherm.brown_shankland(depths)
plt.plot(pressure/1.e9,temperature,'r')
plt.xlim(min(pressure)/1.e9,max(pressure)/1.e9)
plt.xlabel('pressure (GPa)')
plt.ylabel('temperature (K)');
density, vp, vs, vphi, K, G = rock.evaluate(['density','v_p','v_s','v_phi','K_S','G'], pressure, temperature)
# First, we plot the s-wave speed verses the PREM s-wave speed
plt.figure(figsize=(15,5))
plt.subplot(1,3,1)
plt.plot(pressure/1.e9,vs/1.e3,color='b',linestyle='-',marker='o', markerfacecolor='b',markersize=4,label='computation')
plt.plot(pressure/1.e9,seis_vs/1.e3,color='k',linestyle='-',marker='o', markerfacecolor='k',markersize=4,label='reference')
plt.title("S wave speed (km/s)")
plt.xlim(min(pressure)/1.e9,max(pressure)/1.e9)
plt.xlabel('pressure (GPa)')
plt.legend(loc='lower right')
plt.ylim(5,8.0)
# Next, we plot the p-wave speed verses the PREM p-wave speed
plt.subplot(1,3,2)
plt.plot(pressure/1.e9,vp/1.e3,color='b',linestyle='-',marker='o',markerfacecolor='b',markersize=4)
plt.plot(pressure/1.e9,seis_vp/1.e3,color='k',linestyle='-',marker='o',markerfacecolor='k',markersize=4)
plt.title("P wave speed (km/s)")
plt.xlabel('pressure (GPa)')
plt.xlim(min(pressure)/1.e9,max(pressure)/1.e9)
plt.ylim(10,16)
# Next, we plot the density verses the PREM density
plt.subplot(1,3,3)
plt.plot(pressure/1.e9,density/1.e3,color='b',linestyle='-',marker='o', markerfacecolor='b',markersize=4)
plt.plot(pressure/1.e9,seis_rho/1.e3,color='k',linestyle='-',marker='o', markerfacecolor='k',markersize=4)
plt.xlim(min(pressure)/1.e9,max(pressure)/1.e9)
plt.xlabel("Pressure (GPa)")
plt.title("Density ($10^3$ kg/m$^3$)");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dataset
Step2: Inputs
Step3: Model
Step4: Loss
Step5: Putting it all together
Step6: 3. Training the model
Step7: Validation
Step8: Tensorboard
Step9: Day 4
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import time
from os.path import join
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import utils
from data import Dataset
tf.set_random_seed(31415)
tf.logging.set_verbosity(tf.logging.ERROR)
plt.rcParams["figure.figsize"] = (15, 5)
batch_size = 10
num_classes = Dataset.num_classes
# create the Dataset for training and validation
train_data = Dataset('train', batch_size)
val_data = Dataset('val', batch_size, shuffle=False)
# downsample = 2
# train_data = Dataset('train', batch_size, downsample)
# val_data = Dataset('val', batch_size, downsample, shuffle=False)
print('Train shape:', train_data.x.shape)
print('Validation shape:', val_data.x.shape)
#print('mean = ', train_data.x.mean((0,1,2)))
#print('std = ', train_data.x.std((0,1,2)))
# store the input image dimensions
height = train_data.height
width = train_data.width
channels = train_data.channels
# create placeholders for inputs
def build_inputs():
with tf.name_scope('data'):
x = tf.placeholder(tf.float32, shape=(None, height, width, channels), name='rgb_images')
y = tf.placeholder(tf.int32, shape=(None, height, width), name='labels')
return x, y
# helper function which applies conv2d + ReLU with filter size k
def conv(x, num_maps, k=3):
x = tf.layers.conv2d(x, num_maps, k, padding='same')
x = tf.nn.relu(x)
return x
# helper function for 2x2 max pooling with stride=2
def pool(x):
return tf.layers.max_pooling2d(x, pool_size=2, strides=2, padding='same')
# this functions takes the input placeholder and the number of classes, builds the model and returns the logits
def build_model(x, num_classes):
input_size = x.get_shape().as_list()[1:3]
block_sizes = [64, 64, 64, 64]
x = conv(x, 32, k=3)
for i, size in enumerate(block_sizes):
with tf.name_scope('block'+str(i)):
x = pool(x)
x = conv(x, size)
x = conv(x, size)
print(x)
with tf.name_scope('logits'):
x = tf.layers.conv2d(x, num_classes, 1, padding='same')
# ask why no relu
x = tf.image.resize_bilinear(x, input_size, name='upsample_logits')
return x
# this funcions takes logits and targets (y) and builds the loss subgraph
def build_loss(logits, y):
with tf.name_scope('loss'):
# vectorize the image
y = tf.reshape(y, shape=[-1])
logits = tf.reshape(logits, [-1, num_classes])
# gather all labels with valid ID
mask = y < num_classes
y = tf.boolean_mask(y, mask)
logits = tf.boolean_mask(logits, mask)
# define softmax and cross entropy loss
y_one_hot = tf.one_hot(y, num_classes)
xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_one_hot)
# take the mean because we don't want the loss to depend on the number of pixels in batch
xent = tf.reduce_mean(xent)
tf.summary.scalar('cross_entropy', xent)
return xent
# create inputs
x, y = build_inputs()
# create model
logits = build_model(x, num_classes)
# create loss
loss = build_loss(logits, y)
# we are going to need argmax predictions for IoU
y_pred = tf.argmax(logits, axis=3, output_type=tf.int32)
# this functions trains the model
def train(sess, x, y, y_pred, loss, checkpoint_dir):
num_epochs = 30
batch_size = 10
log_dir = 'local/logs'
utils.clear_dir(log_dir)
utils.clear_dir(checkpoint_dir)
learning_rate = 1e-3
decay_power = 1.0
global_step = tf.Variable(0, trainable=False)
decay_steps = num_epochs * train_data.num_batches
# usually SGD learning rate is decreased over time which enables us
# to better fine-tune the parameters when close to solution
lr = tf.train.polynomial_decay(learning_rate, global_step, decay_steps,
end_learning_rate=0, power=decay_power)
train_step = tf.train.AdamOptimizer(lr).minimize(loss, global_step=global_step)
saver = tf.train.Saver()
summary_all = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(join(log_dir, 'train'), sess.graph)
tf.global_variables_initializer().run(session=sess)
step = 0
best_iou = 0
best_epoch = 0
exp_start_time = time.time()
for epoch in range(1, num_epochs+1):
# confusion_mat = np.zeros((num_classes, num_classes), dtype=np.uint64)
print('\nTraining phase:')
for x_np, y_np, names in train_data:
start_time = time.time()
loss_np, summary, _ = sess.run([loss, summary_all, train_step],
feed_dict={x: x_np, y: y_np})
train_writer.add_summary(summary, step)
duration = time.time() - start_time
# confusion_mat += batch_conf_mat.astype(np.uint64)
if step % 20 == 0:
# if step % 2 == 0:
string = '%s: epoch %d / %d, iter %05d, loss = %.2f (%.1f images/sec)' % \
(utils.get_expired_time(exp_start_time), epoch, num_epochs, step, loss_np, batch_size / duration)
print(string)
step += 1
# utils.print_metrics(confusion_mat, 'Train')
# add this later
iou = validate(sess, val_data, x, y, y_pred, loss, draw_steps=5)
if iou > best_iou:
best_iou, best_epoch = iou, epoch
save_path = saver.save(sess, join(checkpoint_dir, 'model.ckpt'))
print('Model saved in file: ', save_path)
print('\nBest IoU = %.2f (epoch %d)' % (best_iou, best_epoch))
sess = tf.Session()
train(sess, x, y, y_pred, loss, 'local/checkpoint1')
def validate(sess, data, x, y, y_pred, loss, draw_steps=0):
print('\nValidation phase:')
conf_mat = np.zeros((num_classes, num_classes), dtype=np.uint64)
for i, (x_np, y_np, names) in enumerate(data):
start_time = time.time()
loss_np, y_pred_np = sess.run([loss, y_pred],
feed_dict={x: x_np, y: y_np})
duration = time.time() - start_time
batch_conf_mat = confusion_matrix(y_np.reshape(-1), y_pred_np.reshape(-1))
batch_conf_mat = batch_conf_mat[:-1,:-1].astype(np.uint64)
conf_mat += batch_conf_mat
for j in range(min(draw_steps, batch_size)):
img_pred = utils.colorize_labels(y_pred_np[j], Dataset.class_info)
img_true = utils.colorize_labels(y_np[j], Dataset.class_info)
img_raw = data.get_img(names[j])
img = np.concatenate((img_raw, img_true, img_pred), axis=1)
plt.imshow(img)
plt.show()
draw_steps -= 1
if i % 5 == 0:
string = 'batch %03d loss = %.2f (%.1f images/sec)' % \
(i, loss_np, x_np.shape[0] / duration)
print(string)
print(conf_mat)
return utils.print_stats(conf_mat, 'Validation', Dataset.class_info)
sess = tf.Session()
# ask why forward is faster
train(sess, x, y, y_pred, loss, 'local/checkpoint1')
# restore the best checkpoint
checkpoint_path = 'local/pretrained1/model.ckpt'
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
validate(sess, val_data, x, y, y_pred, loss, draw_steps=10)
def upsample(x, skip, num_maps):
skip_size = skip.get_shape().as_list()[1:3]
x = tf.image.resize_bilinear(x, skip_size)
x = tf.concat([x, skip], 3)
return conv(x, num_maps)
# this functions takes the input placeholder and the number of classes, builds the model and returns the logits
def build_model(x, num_classes):
input_size = x.get_shape().as_list()[1:3]
block_sizes = [64, 64, 64, 64]
skip_layers = []
x = conv(x, 32, k=3)
for i, size in enumerate(block_sizes):
with tf.name_scope('block'+str(i)):
x = pool(x)
x = conv(x, size)
x = conv(x, size)
# if i < len(block_sizes) - 1:
skip_layers.append(x)
for i, skip in reversed(list(enumerate(skip_layers))):
with tf.name_scope('upsample'+str(i)):
print(i, x, '\n', skip)
x = upsample(x, skip, block_sizes[i])
with tf.name_scope('logits'):
x = tf.layers.conv2d(x, num_classes, 1, padding='same')
x = tf.image.resize_bilinear(x, input_size, name='upsample_logits')
return x
sess.close()
tf.reset_default_graph()
# create inputs
x, y = build_inputs()
# create model
logits = build_model(x, num_classes)
# create loss
loss = build_loss(logits, y)
# we are going to need argmax predictions for IoU
y_pred = tf.argmax(logits, axis=3, output_type=tf.int32)
sess = tf.Session()
train(sess, x, y, y_pred, loss, 'local/checkpoint2')
# restore the best checkpoint
checkpoint_path = 'local/pretrained2/model.ckpt'
saver = tf.train.Saver()
saver.restore(sess, checkpoint_path)
validate(sess, val_data, x, y, y_pred, loss, draw_steps=10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And here is how our random sample looks like. Without knowing the true relation between feature and response one could easily argue the dependent variable follow some linear function (or maybe a polynomail function with low order).
Step2: Bias
Step3: Knowing the true relationship between feature and response we can see that we have a certain bias present in that we approximate a logarithmic function with a much more static linear function. Especially towards the edges of the spectrum we tend to misclasify the sample.
Step4: Here we see that a Kernel regression is much better at approximating the true function than a simple linear model. Therefore, the bias for the more complex function is lower and should be preferred.
Step5: Here it becomes pretty clear that for the kernel regression the variance is high as this regression is simply too dependent on the actual sample. The linear regression however, is much more stable and produces +/- the same results independent of the sample. Therefore its model variance is low and linear model should be preferred over a kernel model.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
np.random.seed(123)
n = 100
x = np.linspace(0.01, 2, n)
y = 2 * np.log(x)
y_noise = y + np.random.normal(size=(n))
plt.figure(figsize=(12, 8))
plt.scatter(x, y_noise);
# Create OLS object
reg = sm.OLS(y_noise, exog=sm.add_constant(x)).fit()
print('R2 = ', reg.rsquared)
reg_ln = sm.OLS(y_noise, exog=sm.add_constant(np.log(x))).fit()
print('R2 = ', reg_ln.rsquared)
# Plot data
plt.figure(figsize=(12, 8))
plt.scatter(x, y_noise, label="Sample")
plt.plot(x, y, color='g', label="True function")
plt.plot(x, reg.fittedvalues, c='k', label='Linear Fit') # Linear fit
#plt.plot(x, reg_ln.fittedvalues, c='r', label='Ln Fit')
plt.legend();
# Kreate kernel regression object
kreg = sm.nonparametric.KernelReg(y_noise, exog=x, var_type='o', bw=[0.20]).fit()
# Plot results
plt.figure(figsize=(12, 8))
plt.plot(x, y, color='g', label="True")
plt.scatter(x, y_noise, label="Sample")
plt.plot(x, reg.fittedvalues, c='k', label='Linear Fit') # Linear fit
plt.plot(x, kreg[0], c='cyan', label='Kernel Fit') # Kernel fit
plt.legend();
plt.figure(figsize=(16, 8))
for i in range(1, 7):
indices = np.sort(np.random.randint(0, x.shape[0], 20))
reg = sm.OLS(y_noise[indices], exog=sm.add_constant(x[indices])).fit()
kreg = sm.nonparametric.KernelReg(y_noise[indices], exog=x[indices], var_type='o', bw=[0.05]).fit()
plt.subplot(2, 3, i)
plt.plot(x[indices], reg.fittedvalues, c='k', label='Fit')
plt.plot(x[indices], kreg[0], c='cyan', label='Kernel Fit')
plt.plot(x, y, color='g', label="True")
plt.ylim(-10, 4)
plt.scatter(x[indices], y_noise[indices], label="Sample")
plt.text(0.1, -8, 'Lin.fit: f(x) = {0: .2f} + {1: .2f} $x$'.format(reg.params[0], reg.params[1]))
plt.legend();
from IPython.display import YouTubeVideo
from datetime import timedelta
YouTubeVideo('EuBBz3bI-aA', start=int(timedelta(hours=0, minutes=0, seconds=17).total_seconds()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Operators on Strings
Step2: Operations on Strings
|
<ASSISTANT_TASK:>
Python Code:
s = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua"
s
s[10]
s[20:] # start from 10 to end of string
s[:20] # start from 0 to index 19
s[10:30:2] # start from 10, end at 29 with steps of 2
s[30:10:-2] # in reverse order
'Lorem' in s
'Some Random text : ' + s
for i,c in enumerate(s): # string is iterable
if i == 11:
break
else:
print(c,end='')
s
s.count('it')
s.find('it')
s.split(',')
part = s.split(',')[2]
part
part = part.strip()
part
part = part.upper()
part
'-'.join('defg')
s = 'abcd'
s += 'defg' # Appending
s
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function toDot takes four arguments
Step2: HeapSort
Step3: The function ascend takes two arguments
Step4: The function sink takes three arguments.
Step5: The function $\texttt{heapSort}$ has the task to sort the array A and proceeds in two phases.
Step6: Testing
|
<ASSISTANT_TASK:>
Python Code:
import graphviz as gv
def toDot(A, f, g, u=None):
n = len(A)
dot = gv.Digraph(node_attr={'shape': 'record'})
for k, p in enumerate(A):
if k == u:
dot.node(str(k), label='{' + str(p) + '|' + str(k) + '}', style='filled', fillcolor='orange')
elif k < f:
dot.node(str(k), label='{' + str(p) + '|' + str(k) + '}', style='filled', fillcolor='red')
elif k < g:
dot.node(str(k), label='{' + str(p) + '|' + str(k) + '}', style='rounded')
else:
dot.node(str(k), label='{' + str(p) + '|' + str(k) + '}', style='filled', fillcolor='green')
for k in range(0, n // 2):
if 2 * k + 1 < g:
dot.edge(str(k), str(2 * k + 1))
if 2 * k + 2 < g:
dot.edge(str(k), str(2 * k + 2))
return dot
def swap(A, i, j):
A[i], A[j] = A[j], A[i]
def ascend(A, k):
while k > 0:
p = (k - 1) // 2
if A[k] < A[p]:
swap(A, p, k)
k = p
else:
return
def descend(A, k, n):
while 2 * k + 1 <= n:
j = 2 * k + 1
if j + 1 <= n and A[j] > A[j + 1]:
j += 1
if A[k] < A[j]:
return
swap(A, k, j)
k = j
def heap_sort(A):
n = len(A)
for k in range(1, n):
display(toDot(A, 0, k+1, k))
ascend(A, k)
display(toDot(A, 0, k+1))
n = n - 1
while n >= 1:
swap(A, 0, n)
display(toDot(A[:], 1, n + 1))
n -= 1
descend(A, 0, n)
display(toDot(A[:], 0, n + 1))
import random as rnd
def demo():
L = [ rnd.randrange(1, 200) for n in range(12) ]
print("L = ", L)
heap_sort(L)
print("L = ", L)
demo()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now read the train and test questions into list of questions.
Step2: Using keras tokenizer to tokenize the text and then do padding the sentences to 30 words
Step3: Now let us create the embedding matrix where each row corresponds to a word.
Step4: Now its time to build the model. Let us specify the model architecture. First layer is the embedding layer.
Step5: In embedding layer, 'trainable' is set to False so as to not train the word embeddings during the back propogation.
Step6: Model training and predictions
|
<ASSISTANT_TASK:>
Python Code:
import os
import csv
import codecs
import numpy as np
import pandas as pd
np.random.seed(1337)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Input, Flatten, merge, LSTM, Lambda, Dropout
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.layers.wrappers import TimeDistributed, Bidirectional
from keras.layers.normalization import BatchNormalization
from keras import backend as K
import sys
BASE_DIR = 'data/'
GLOVE_DIR = '/home/mageswarand/dataset/glove/'
TRAIN_DATA_FILE = BASE_DIR + 'train.csv'
TEST_DATA_FILE = BASE_DIR + 'test.csv'
MAX_SEQUENCE_LENGTH = 30
MAX_NB_WORDS = 200000
EMBEDDING_DIM = 300
VALIDATION_SPLIT = 0.01
print('Indexing word vectors.')
embeddings_index = {}
f = codecs.open(os.path.join(GLOVE_DIR, 'glove.840B.300d.txt'), encoding='utf-8')
for line in f:
values = line.split(' ')
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
print('Processing text dataset')
texts_1 = []
texts_2 = []
labels = [] # list of label ids
with codecs.open(TRAIN_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for values in reader:
texts_1.append(values[3])
texts_2.append(values[4])
labels.append(int(values[5]))
print('Found %s texts.' % len(texts_1))
test_texts_1 = []
test_texts_2 = []
test_labels = [] # list of label ids
with codecs.open(TEST_DATA_FILE, encoding='utf-8') as f:
reader = csv.reader(f, delimiter=',')
header = next(reader)
for values in reader:
test_texts_1.append(values[1])
test_texts_2.append(values[2])
test_labels.append(values[0])
print('Found %s texts.' % len(test_texts_1))
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts_1 + texts_2 + test_texts_1 + test_texts_2)
sequences_1 = tokenizer.texts_to_sequences(texts_1)
sequences_2 = tokenizer.texts_to_sequences(texts_2)
word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(word_index))
test_sequences_1 = tokenizer.texts_to_sequences(test_texts_1)
test_sequences_2 = tokenizer.texts_to_sequences(test_texts_2)
data_1 = pad_sequences(sequences_1, maxlen=MAX_SEQUENCE_LENGTH)
data_2 = pad_sequences(sequences_2, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(labels)
print('Shape of data tensor:', data_1.shape)
print('Shape of label tensor:', labels.shape)
test_data_1 = pad_sequences(test_sequences_1, maxlen=MAX_SEQUENCE_LENGTH)
test_data_2 = pad_sequences(test_sequences_2, maxlen=MAX_SEQUENCE_LENGTH)
test_labels = np.array(test_labels)
del test_sequences_1
del test_sequences_2
del sequences_1
del sequences_2
import gc
gc.collect()
print('Preparing embedding matrix.')
# prepare embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index))
embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM))
for word, i in word_index.items():
if i >= nb_words:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0))
embedding_layer = Embedding(nb_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
# Model Architecture #
sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_1 = embedding_layer(sequence_1_input)
x1 = Conv1D(128, 3, activation='relu')(embedded_sequences_1)
x1 = MaxPooling1D(10)(x1)
x1 = Flatten()(x1)
x1 = Dense(64, activation='relu')(x1)
x1 = Dropout(0.2)(x1)
sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences_2 = embedding_layer(sequence_2_input)
y1 = Conv1D(128, 3, activation='relu')(embedded_sequences_2)
y1 = MaxPooling1D(10)(y1)
y1 = Flatten()(y1)
y1 = Dense(64, activation='relu')(y1)
y1 = Dropout(0.2)(y1)
merged = merge([x1,y1], mode='concat')
merged = BatchNormalization()(merged)
merged = Dense(64, activation='relu')(merged)
merged = Dropout(0.2)(merged)
merged = BatchNormalization()(merged)
preds = Dense(1, activation='sigmoid')(merged)
model = Model(input=[sequence_1_input,sequence_2_input], output=preds)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['acc'])
# pass
model.fit([data_1,data_2], labels, validation_split=VALIDATION_SPLIT, nb_epoch=1, batch_size=1024, shuffle=True)
preds = model.predict([test_data_1, test_data_2])
print(preds.shape)
out_df = pd.DataFrame({"test_id":test_labels, "is_duplicate":preds.ravel()})
out_df.to_csv("test_predictions.csv", index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the MNIST dataset, flatten the images, convert the class labels, and scale the data.
Step2: I. Basic CNN Example
Step3: Fit the model over 10 epochs. The predictiveness is impressive for such a small model!
Step4: For reference, here are the dimensions of the weights in each layer
Step5: Evaluate model on the test set
Step6: Predict classes on the test set.
Step7: II. Double convolution
Step8: III. Visualizing convolution weights
Step9: The first set of weights will be given as weights the same size as the input space.
Step10: The second layer of weights will be given as a set of 32 weights with dimensions of 3x3x32.
Step11: IV. Activations of the neural network
Step12: Activations after the second layer
Step13: Finally, activations after max pooling
Step14: V. Further tweaks
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.datasets import mnist, cifar10
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.optimizers import SGD, RMSprop
from keras.utils import np_utils
from keras.regularizers import l2
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.callbacks import EarlyStopping
from keras.preprocessing.image import ImageDataGenerator
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32') / 255
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32') / 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape = (1, 28, 28)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train, Y_train, batch_size=32, nb_epoch=10,
verbose=1, show_accuracy=True, validation_split=0.1)
print(model.layers[0].get_weights()[0].shape) # Convolution2D
print(model.layers[5].get_weights()[0].shape) # Dense
print("Test classification rate %0.05f" % model.evaluate(X_test, Y_test, show_accuracy=True)[1])
y_hat = model.predict_classes(X_test)
pd.crosstab(y_hat, y_test)
test_wrong = [im for im in zip(X_test,y_hat,y_test) if im[1] != im[2]]
print(len(test_wrong))
plt.figure(figsize=(15, 15))
for ind, val in enumerate(test_wrong):
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.subplot(15, 15, ind + 1)
im = 1 - val[0].reshape((28,28))
axis("off")
plt.imshow(im, cmap='gray')
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape = (1, 28, 28)))
model.add(Activation("relu"))
model.add(Convolution2D(32, 3, 3, border_mode='same'))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train[:1000], Y_train[:1000], batch_size=32, nb_epoch=10,
verbose=1, show_accuracy=True, validation_split=0.1)
print("Test classification rate %0.05f" % model.evaluate(X_test, Y_test, show_accuracy=True)[1])
y_hat = model.predict_classes(X_test)
pd.crosstab(y_hat, y_test)
test_wrong = [im for im in zip(X_test,y_hat,y_test) if im[1] != im[2]]
plt.figure(figsize=(10, 10))
for ind, val in enumerate(test_wrong[:100]):
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.subplot(10, 10, ind + 1)
im = 1 - val[0].reshape((28,28))
plt.axis("off")
plt.text(0, 0, val[2], fontsize=14, color='blue')
plt.text(8, 0, val[1], fontsize=14, color='red')
plt.imshow(im, cmap='gray')
print(model.layers) # list of the layers
print(model.layers[0].get_weights()[0].shape) # the weights
W1 = model.layers[0].get_weights()[0]
plt.figure(figsize=(10, 10), frameon=False)
for ind, val in enumerate(W1):
plt.subplot(6, 6, ind + 1)
im = val.reshape((3,3))
plt.axis("off")
plt.imshow(im, cmap='gray',interpolation='nearest')
W2 = model.layers[2].get_weights()[0]
plt.figure(figsize=(10, 10), frameon=False)
for ind, val in enumerate(W2):
plt.subplot(6, 6, ind + 1)
im = val.reshape((32,9))
plt.axis("off")
plt.imshow(im, cmap='gray',interpolation='nearest')
model2 = copy.copy(model)
model2.layers = model2.layers[:2]
model2.compile(loss='categorical_crossentropy', optimizer=rms) # don't forget this step!
these = random.choice(range(1000),3,replace=False)
x_rep = model2.predict(X_test[these])
for this_x_rep in x_rep:
plt.figure(figsize=(10, 10), frameon=False)
for ind, val in enumerate(this_x_rep):
plt.subplot(6, 6, ind + 1)
plt.axis("off")
plt.imshow(val, cmap='gray',interpolation='nearest')
model2 = copy.copy(model)
model2.layers = model2.layers[:4]
model2.compile(loss='categorical_crossentropy', optimizer=rms) # don't forget this step!
x_rep = model2.predict(X_test[these])
for this_x_rep in x_rep:
plt.figure(figsize=(10, 10), frameon=False)
for ind, val in enumerate(this_x_rep):
plt.subplot(6, 6, ind + 1)
plt.axis("off")
plt.imshow(val, cmap='gray',interpolation='nearest')
model2 = copy.copy(model)
model2.layers = model2.layers[:5]
model2.compile(loss='categorical_crossentropy', optimizer=rms) # don't forget this step!
x_rep = model2.predict(X_test[these])
for this_x_rep in x_rep:
plt.figure(figsize=(10, 10), frameon=False)
for ind, val in enumerate(this_x_rep):
plt.subplot(6, 6, ind + 1)
plt.axis("off")
plt.imshow(val, cmap='gray',interpolation='nearest')
model = Sequential()
model.add(Flatten(input_shape = (1, 28, 28)))
model.add(Dense(128, init="glorot_normal"))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)
model.fit(X_train, Y_train, batch_size=32, nb_epoch=25,
verbose=1, show_accuracy=True, validation_split=0.1,
callbacks=[EarlyStopping(monitor='val_loss', patience=0)])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def equilibrium(arr ) :
left_sum =[]
right_sum =[]
for i in range(len(arr ) ) :
if(i ) :
left_sum . append(left_sum[i - 1 ] + arr[i ] )
right_sum . append(right_sum[i - 1 ] + arr[len(arr ) - 1 - i ] )
else :
left_sum . append(arr[i ] )
right_sum . append(arr[len(arr ) - 1 ] )
for i in range(len(arr ) ) :
if(left_sum[i ] == right_sum[len(arr ) - 1 - i ] ) :
return(i )
return - 1
arr =[- 7 , 1 , 5 , 2 , - 4 , 3 , 0 ]
print(' First ▁ equilibrium ▁ index ▁ is ▁ ' , equilibrium(arr ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If you do not have pydot library installed, open your terminal and type either conda install pydot or pip install pydot
Step2: The plan
Step3: When you have time, try it with other bases for the log
Step4: Very interesting. The distribution of labels is almost indistinguishable from uniform.
Step5: According to the information gain metric, petal length is the most useful feature, followed by petal width. Let's confirm that this agrees with the sklearn decision tree implementation.
Step6: We've been using the binarized version of the iris features. Recall that we simply chose thresholds for each feature by inspecting feature histograms. Let's use information gain as a metric to choose a best feature and a best threshold.
Step7: It looks like when we binarized our data, we didn't choose the thresholds that maximized information gain for 3 out of 4 features. Let's try training actual decision trees (as opposed to stumps) with the original (non-binarized) data. You may need to install GraphViz before exporting the tree.
|
<ASSISTANT_TASK:>
Python Code:
# This tells matplotlib not to try opening a new window for each plot.
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
# For producing decision tree diagrams.
from IPython.core.display import Image, display
from sklearn.externals.six import StringIO
import pydot
# Load the data, which is included in sklearn.
iris = load_iris()
print 'Iris target names:', iris.target_names
print 'Iris feature names:', iris.feature_names
X, Y = iris.data, iris.target
# Shuffle the data, but make sure that the features and accompanying labels stay in sync.
np.random.seed(0)
shuffle = np.random.permutation(np.arange(X.shape[0]))
X, Y = X[shuffle], Y[shuffle]
# Split into train and test.
train_data, train_labels = X[:100], Y[:100]
test_data, test_labels = X[100:], Y[100:]
# Define a function that applies a threshold to turn real valued iris features into 0/1 features.
# 0 will mean "short" and 1 will mean "long".
def binarize_iris(data, thresholds=[6.0, 3.0, 2.5, 1.0]):
# Initialize a new feature array with the same shape as the original data.
binarized_data = np.zeros(data.shape)
# Apply a threshold to each feature.
for feature in range(data.shape[1]):
binarized_data[:,feature] = data[:,feature] > thresholds[feature]
return binarized_data
# Create new binarized training and test data
binarized_train_data = binarize_iris(train_data)
binarized_test_data = binarize_iris(test_data)
def entropy(distribution):
h = 0.0
for probability in distribution:
logprob = -100.0 # log(0) = -inf so let's approximate it with -100 to avoid an error
if probability > 0.0: logprob = np.log2(probability)
h -= probability * logprob
return h
# Show a plot of the entropy, H(X), of a Bernoulli random variable X.
p_values = np.linspace(0, 1, 50)
entropies = [entropy([p, 1-p]) for p in p_values]
plt.figure(figsize=(4,4))
plt.plot(p_values, entropies, 'o')
plt.xlabel('P(X=1)')
plt.ylabel('H(X)')
print
def get_label_distribution(labels):
# Initialize counters for all labels to zero.
label_probs = np.array([0.0 for i in range(len(iris.target_names))])
# Iterate over labels in the training data and update counts.
for label in labels:
label_probs[label] += 1.0
# Normalize to get a distribution.
label_probs /= label_probs.sum()
return label_probs
label_probs = get_label_distribution(train_labels)
print 'Label distribution', label_probs
# Compare the label entropy to a uniform distribution.
print 'Label entropy:', entropy(label_probs)
print 'Uniform entropy:', entropy([1./3, 1./3, 1./3])
# A function that computes information gain given these inputs:
# data: an array of featurized examples
# labels: an array of labels corresponding to the the data
# feature: the feature to use to split the data
# threshold: the feature value to use to split the data (the default threshold is good for binary features)
def information_gain(data, labels, feature, threshold=0):
# Get the initial entropy of the label distribution.
initial_entropy = entropy(get_label_distribution(labels))
# subset0 will contain the labels for which the feature is 0 and
# subset1 will contain the labels for which the feature is 1.
subset0, subset1 = [], []
for datum, label in zip(data, labels):
if datum[feature] > threshold: subset1.append(label)
else: subset0.append(label)
# Compute the entropy of each subset.
subset0_entropy = entropy(get_label_distribution(subset0))
subset1_entropy = entropy(get_label_distribution(subset1))
# Make it a fair comparison:
# Compute the final entropy by weighting each subset's entropy according to its size.
subset0_weight = 1.0 * len(subset0) / len(labels)
subset1_weight = 1.0 * len(subset1) / len(labels)
final_entropy = subset0_weight * subset0_entropy + subset1_weight * subset1_entropy
# Finally, compute information gain as the difference between the initial and final entropy.
return initial_entropy - final_entropy
for feature in range(binarized_train_data.shape[1]):
## We are looking at binarized data; so the threshold = 0
ig = information_gain(binarized_train_data, train_labels, feature)
print '%d %.3f %s' %(feature, ig, iris.feature_names[feature])
dt = DecisionTreeClassifier(criterion='entropy', max_depth=1)
dt.fit(binarized_train_data, train_labels)
print 'Using a decision stump -- a tree with depth 1:'
print 'Feature importances:', dt.feature_importances_
print 'Accuracy:', dt.score(binarized_test_data, test_labels)
def try_features_and_thresholds(data, labels):
for feature in range(data.shape[1]):
# Choose a set of thresholds between the min- and max-valued feature, ignoring the min and max themselves.
thresholds = np.linspace(data[:,feature].min(), data[:,feature].max(), 20)[1:-1]
# Try each threshold and keep track of the best one for this feature.
best_threshold = 0
best_ig = 0
for threshold in thresholds:
ig = information_gain(data, labels, feature, threshold)
if ig > best_ig:
best_ig = ig
best_threshold = threshold
# Show the best threshold and information gain for this feature.
print '%d %.3f %.3f %s' %(feature, best_threshold, best_ig, iris.feature_names[feature])
try_features_and_thresholds(train_data, train_labels)
# Train a decision tree classifier.
dt = DecisionTreeClassifier(criterion='entropy', min_samples_split=2)
dt.fit(train_data, train_labels)
print 'Accuracy:', dt.score(test_data, test_labels)
# Export the trained tree so we can look at it.
output_name = 'iris-decisiontree.jpg'
print output_name
dot_data = StringIO()
tree.export_graphviz(dt, out_file=dot_data)
## print 'dot_data value:', dot_data.getvalue()
graph = pydot.graph_from_dot_data(dot_data.getvalue())
# If the export was successful, show the image.
if graph.write_jpg(output_name):
print 'Output:', output_name
display(Image(filename=output_name))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Entrenamos múltiples clasificadores
Step2: 2.2. Gradient tree boosting
Step3: 2.3. Random forests
Step4: 2.4. SVM con probabilidades
Step5: 2.5. Linear regression
Step6: 2.6. Bayes
|
<ASSISTANT_TASK:>
Python Code:
import pickle
path = '../../rsc/obj/'
X_train_path = path + 'X_train.sav'
y_train_path = path + 'y_train.sav'
X_train = pickle.load(open(X_train_path, 'rb'))
y_train = pickle.load(open(y_train_path, 'rb'))
print(X_train.shape)
from sklearn.svm import LinearSVC
from sklearn.model_selection import GridSearchCV
svm_path = path + 'svm_clf.sav'
grid = GridSearchCV(LinearSVC(), {'C': [1.0, 2.0, 4.0, 8.0]})
grid.fit(X_train, y_train)
# Re-entrenamiento partiendo del mejor estimador
svm_clf = grid.best_estimator_
svm_clf.fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(svm_clf, open(svm_path, 'wb'))
from sklearn.ensemble import GradientBoostingClassifier
gtb_path = path + 'gtb_clf.sav'
gtb_clf = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0,
max_depth=1, random_state=0).fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(gtb_clf, open(gtb_path, 'wb'))
from sklearn.ensemble import RandomForestClassifier
rf_path = path + 'rf_clf.sav'
rf_clf = RandomForestClassifier(n_estimators=10, max_depth=None,
min_samples_split=2, random_state=0).fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(rf_clf, open(rf_path, 'wb'))
from sklearn import svm
from sklearn.model_selection import GridSearchCV
svm2_path = path + 'svm2_clf.sav'
svm2_clf = svm.SVC(gamma=2, C=1)
svm2_clf.fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(svm2_clf, open(svm2_path, 'wb'))
svm3_path = path + 'svm3_clf.sav'
grid2 = GridSearchCV(svm.SVC(), {'C': [1.0, 2.0, 4.0, 8.0]})
grid2.fit(X_train, y_train)
# Re-entrenamiento partiendo del mejor estimador
svm3_clf = grid.best_estimator_
svm3_clf.fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(svm3_clf, open(svm3_path, 'wb'))
from sklearn import linear_model
regr_path = path + 'rgr_clf.sav'
# Create linear regression object
regr_clf = linear_model.LinearRegression()
# Train the model using the training sets
regr_clf.fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(regr_clf, open(regr_path, 'wb'))
from sklearn.naive_bayes import GaussianNB
gnb_path = path + 'gnb_clf.sav'
gnb_clf = GaussianNB()
gnb_clf.fit(X_train, y_train)
#Serializamos clasfificador
pickle.dump(gnb_clf, open(gnb_path, 'wb'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <font color='red'>Please put your datahub API key into a file called APIKEY and place it to the notebook folder or assign your API key directly to the variable API_key!</font>
Step2: Now it's time to read the actual data as Pandas DataFrame. We add separate columns for 'year' and 'month' for later use. For reading the data, we use function get_data_from_point_API from module named get_data_from_point_API which is in the Notebook folder (the same folder where this script is located in GitHub). At the end of this box we also print out Pandas data structure so that you can see how it looks like.
Step3: Now that we have fetched the time-series of precipitation data, we can easily compute the following statistics
Step4: The following plot will show the number of days by year with no observed precipitation. We can see that the difference between the years is not significant.
Step5: In this plot we look at how many days had more than 20 mm precipitation in a year. In 1998, one of the strongest El Niño years in the recent history, is the clear winner. Daniel Swain also brought out the fact in his Weather West blog that California’s wettest years on record were 1982-1983 and 1997-1998, and they occurred during the strongest El Niño years. Those years clearly stand out in our demo as well.
Step6: The next plot is about annual total precipitation. Two from the three driest years in the whole period, 2013 and 2015, have been very recent. 1998 has been among the strongest and is exceeded only by the exceptionally large values of 1982 and 1983. Those, again, were strongest El Niño years we talked about above. The El Niño of 2015-2016 still doesn't stand out.
Step7: Daily maximum precipitation was on 1982. Again, this plot confirms results from previous plots.
Step8: The average annual cycle of precipitation shows that it mostly rains during the winter months and the summer is usually dry.
Step9: Finally, let's look at a histogram. As we saw from the previous plots, Palo Alto has very many completely dry days. From the histogram we can see that when it does rain, it rains a lot! Almost 350 days since 1981 there has been 8-16 mm/day and near 300 days there has been 16-32 mm/day. Duing 30 days of the entire period it has been raining even 64-128 mm/day.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import pandas as pd
import numpy
from po_data_process import get_data_from_point_API, make_histogram, make_plot
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
API_key = open('APIKEY').read().strip()
dataset_key = 'chg_chirps_global_05'
#Palo Alto
latitude = 37.42
longitude = -122.17
data = get_data_from_point_API(dataset_key, longitude, latitude, API_key)
data['time'] = pd.to_datetime(data['time'])
data['year'] = data['time'].dt.year
data['month'] = data['time'].dt.month
data = data.loc[data['year'] < 2019]
print (data.keys())
print ('There have been ' + str(len(data.loc[data['precip'] == 0])) + ' completely dry days in Palo Alto of ' + str(len(data)) + ' total.')
print ('It means that ' + str(round((100. * len(data.loc[data['precip'] == 0]))/len(data),2)) + '% of the days since 1981 have been completely dry in Palo Alto.')
make_plot(data.loc[data['precip'] == 0].groupby('year').count()['precip'],dataset_key,'Completely dry days by year')
make_plot(data.loc[data['precip'] > 20].groupby('year').count()['precip'],dataset_key,'Number of days with more than 20 mm precipitation in a year')
make_plot(data.groupby('year').sum()['precip'],dataset_key,'Annual precipitation by year')
make_plot(data.groupby('year')['precip'].max(),dataset_key,'Daily maximum precipitation by year')
make_plot(data.groupby('month')['precip'].mean(),dataset_key, 'Average annual cycle of precipitation')
bins = [1,2,4,8,16,32,64,128]
make_histogram(data['precip'],bins)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Step 2
Step3: As we can see, the footprints (rectangles) do not exactly match the AOI. Indeed, none of them cover the AOI. We don't care about pixels outside of the AOI, so we are going to want to clip the imagery to the AOI (to remove pixels outside the AOI).
Step 3
Step4: Step 3.1
Step5: Step 3.2
Step6: Option 2
Step7: Step 4
Step8: Step 4.2
Step9: Step 4.3
Step10: Step 5
Step13: 5.2
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import json
import os
from pathlib import Path
from pprint import pprint
import time
from zipfile import ZipFile
import numpy as np
from planet import api
from planet.api import filters
import rasterio
from rasterio import plot
from shapely.geometry import MultiPolygon, shape
# if your Planet API Key is not set as an environment variable, you can paste it below
API_KEY = os.environ.get('PL_API_KEY', 'PASTE_YOUR_KEY_HERE')
client = api.ClientV1(api_key=API_KEY)
# define test data for the filter
test_start_date = datetime.datetime(year=2019,month=4,day=1)
test_stop_date = datetime.datetime(year=2019,month=5,day=1)
# iowa crops aoi
test_aoi_geom = {
"type": "Polygon",
"coordinates": [
[
[-93.299129, 42.699599],
[-93.299674, 42.812757],
[-93.288436, 42.861921],
[-93.265332, 42.924817],
[-92.993873, 42.925124],
[-92.993888, 42.773637],
[-92.998396, 42.754529],
[-93.019154, 42.699988],
[-93.299129, 42.699599]
]
]
}
# create an api request from the search specifications
def build_request(aoi_geom, start_date, stop_date):
'''build a data api search request for clear PSScene 4-Band imagery'''
item_type = 'PSScene'
query = filters.and_filter(
filters.geom_filter(aoi_geom),
filters.range_filter('clear_percent', gte=90),
filters.date_range('acquired', gt=start_date),
filters.date_range('acquired', lt=stop_date)
)
return filters.build_search_request(query, ['PSScene'])
request = build_request(test_aoi_geom, test_start_date, test_stop_date)
print(request)
# search the data api
def search_data_api(request, client, limit=500):
result = client.quick_search(request)
# this returns a generator
return result.items_iter(limit=limit)
items = list(search_data_api(request, client))
print(len(items))
# check out an item just for fun
# pprint(items[0])
# visualize a scene footprint
footprints = [shape(i['geometry']) for i in items]
footprints[0]
# visualize subset of footprints and aoi
MultiPolygon([shape(test_aoi_geom), *footprints[:5]])
# work with just a subset of the items in the interest of bandwidth
test_items = items[:2]
# filter to item ids
ids = [i['id'] for i in test_items]
ids
# specify the PSScene 4-Band surface reflectance product
# make sure to get the *_udm2 bundle so you get the udm2 product
# note: capitalization really matters in item_type when using planet client orders api
item_type = 'PSScene'
bundle = 'analytic_sr_udm2'
# specify tools
# clip to AOI
clip_tool = {'clip': {'aoi': test_aoi_geom}}
# convert to NDVI
bandmath_tool = {'bandmath': {
"pixel_type": "32R",
"b1": "(b4 - b3) / (b4+b3)"
}}
tools = [clip_tool, bandmath_tool]
pprint(tools)
# specify a name
name = 'tutorial_order'
orders_request = {
'name': name,
'products': [{
'item_ids': ids,
'item_type': item_type,
'product_bundle': bundle
}],
'tools': tools,
'delivery': {
'single_archive': True,
'archive_filename':'{{name}}_{{order_id}}.zip',
'archive_type':'zip'
},
'notifications': {
'email': False
},
}
# pprint(orders_request, indent=4)
order_info = client.create_order(orders_request).get()
order_id = order_info['id']
order_id
# zip up entire order into one file
ziptype = 'order'
# format the ids for use with the CLI
cli_ids = ','.join([i['id'] for i in test_items])
# save tools definition to file
tools_file = 'tools.json'
with open(tools_file, 'w') as dst:
dst.write(json.dumps(tools))
order_info_file = 'order.json'
# submit the order and save the response to a file so we can get the order id
!set -x;planet orders create \
--id $cli_ids \
--item-type $item_type \
--bundle $bundle \
--zip $ziptype \
--tools $tools_file \
--name $name | tee $order_info_file
# read the order id
with open(order_info_file, 'r') as src:
order_info = json.load(src)
order_id = order_info['id']
order_id
def poll_for_success(order_id, client, num_loops=50):
count = 0
while(count < num_loops):
count += 1
order_info = client.get_individual_order(order_id).get()
state = order_info['state']
print(state)
success_states = ['success', 'partial']
if state == 'failed':
raise Exception(response)
elif state in success_states:
break
time.sleep(10)
poll_for_success(order_id, client)
demo_data_dir = os.path.join('data', 'demo')
# make the download directory if it doesn't exist
Path(demo_data_dir).mkdir(parents=True, exist_ok=True)
!set -x;planet orders download --dest $demo_data_dir $order_id
!ls data/demo
def get_download_locations(download_dir):
manifest_file = os.path.join(download_dir, 'manifest.json')
with open(manifest_file, 'r') as src:
manifest = json.load(src)
# uncomment to see the manifest
# pprint(manifest)
locations = [os.path.join(download_dir, f['path'])
for f in manifest['files']]
return locations
locations = get_download_locations(demo_data_dir)
pprint(locations)
# lets just double check to see if the zip file got downloaded
!ls data/demo
def unzip(filename):
location = Path(filename)
zipdir = location.parent / location.stem
with ZipFile(location) as myzip:
myzip.extractall(zipdir)
return zipdir
zipdir = unzip(locations[0])
zipdir
def get_unzipped_files(zipdir):
filedir = zipdir / 'files'
filenames = os.listdir(filedir)
return [filedir / f for f in filenames]
file_paths = get_unzipped_files(zipdir)
pprint(file_paths)
def get_image_and_udm_files(file_paths):
files = [str(p) for p in file_paths]
# the image files are tiffs and are identified with '_SR_' in the name
img_id = '_AnalyticMS_SR_'
imgfiles = [f for f in files
if f.endswith('.tif') and img_id in f]
# get associated udm files for image files
# each image has a unique id at the beginning of the name
imgroots = [str(f).split(img_id)[0] for f in imgfiles]
# the udm files are identified with '_udm2' in the name
udmfiles = [next(f for f in files if f.startswith(r + '_udm2'))
for r in imgroots]
return imgfiles, udmfiles
imgfiles, udmfiles = get_image_and_udm_files(file_paths)
pprint(imgfiles)
pprint(udmfiles)
# read UDM2 file
def read_notclear(udm2_filename):
with rasterio.open(udm2_filename) as img:
# the first band is the clear/not clear band
mask=img.read(1)
not_clear = mask == 0
return not_clear
udmfile = udmfiles[0]
not_clear = read_notclear(udmfile)
# there is an issue where some udms aren't the same size as the images
# to deal with this just cut off any trailing rows/columns
# this isn't ideal as it can result in up to one pixel shift in x or y direction
def crop(img, shape):
return img[:shape[0], :shape[1]]
def read_ndvi(img_filename, not_clear):
with rasterio.open(imgfile) as img:
# ndvi is a single-band image
band = img.read(1)
# crop image and mask to same size
img_shape = min(band.shape, not_clear.shape)
ndvi = np.ma.array(crop(band, img_shape), mask=crop(not_clear, img_shape))
return ndvi
imgfile = imgfiles[0]
ndvi = read_ndvi(imgfile, not_clear)
# set up NDVI visualization
# copied from: https://stackoverflow.com/a/48598564
import matplotlib.pyplot as plt
import matplotlib.colors as colors
The NDVI values will range from -1 to 1. You want to use a diverging color scheme to visualize the data,
and you want to center the colorbar at a defined midpoint. The class below allows you to normalize the colorbar.
class MidpointNormalize(colors.Normalize):
Normalise the colorbar so that diverging bars work there way either side from a prescribed midpoint value)
e.g. im=ax1.imshow(array, norm=MidpointNormalize(midpoint=0.,vmin=-100, vmax=100))
Credit: Joe Kington, http://chris35wills.github.io/matplotlib_diverging_colorbar/
Credit: https://stackoverflow.com/a/48598564
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# Note that I'm ignoring clipping and other edge cases here.
result, is_scalar = self.process_value(value)
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.array(np.interp(value, x, y), mask=result.mask, copy=False)
def show_ndvi(ndvi):
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111)
# diverging color scheme chosen from https://matplotlib.org/users/colormaps.html
cmap = plt.cm.RdYlGn
mmin = np.nanmin(ndvi)
mmax = np.nanmax(ndvi)
# print((mmin, mmax))
mid = 0
cax = ax.imshow(ndvi, cmap=cmap, clim=(mmin, mmax),
norm=MidpointNormalize(midpoint=mid,vmin=mmin, vmax=mmax))
ax.axis('off')
ax.set_title('Normalized Difference Vegetation Index', fontsize=18, fontweight='bold')
cbar = fig.colorbar(cax, orientation='horizontal', shrink=0.65)
plt.show()
# show_ndvi(ndvi)
for imgfile, udmfile in zip(imgfiles, udmfiles):
show_ndvi(read_ndvi(imgfile, read_notclear(udmfile)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Packaging up the code
Step2: Find absolute paths to your data
Step3: Running the Python module from the command-line
Step4: Clean model training dir/output dir
Step5: Running locally using gcloud
Step6: Submit training job using gcloud
Step7: Don't be concerned if the notebook appears stalled (with a blue progress bar) or returns with an error about being unable to refresh auth tokens. This is a long-lived Cloud job and work is going on in the cloud.
Step8: Train on larger dataset
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
import os
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# For Python Code
# Model Info
MODEL_NAME = 'taxifare'
# Model Version
MODEL_VERSION = 'v1'
# Training Directory name
TRAINING_DIR = 'taxi_trained'
# for bash
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
os.environ['MODEL_NAME'] = MODEL_NAME
os.environ['MODEL_VERSION'] = MODEL_VERSION
os.environ['TRAINING_DIR'] = TRAINING_DIR
os.environ['TFVERSION'] = '2.5' # Tensorflow version
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
find ${MODEL_NAME}
%%bash
cat ${MODEL_NAME}/trainer/model.py
%%bash
echo "Working Directory: ${PWD}"
echo "Head of taxi-train.csv"
head -1 $PWD/taxi-train.csv
echo "Head of taxi-valid.csv"
head -1 $PWD/taxi-valid.csv
%%bash
# This is so that the trained model is started fresh each time. However, this needs to be done before
rm -rf $PWD/${TRAINING_DIR}
%%bash
# Setup python so it sees the task module which controls the model.py
export PYTHONPATH=${PYTHONPATH}:${PWD}/${MODEL_NAME}
# Currently set for python 2. To run with python 3
# 1. Replace 'python' with 'python3' in the following command
# 2. Edit trainer/task.py to reflect proper module import method
python -m trainer.task \
--train_data_paths="${PWD}/taxi-train*" \
--eval_data_paths=${PWD}/taxi-valid.csv \
--output_dir=${PWD}/${TRAINING_DIR} \
--train_steps=1000 --job-dir=./tmp
%%bash
ls $PWD/${TRAINING_DIR}/export/exporter/
%%writefile ./test.json
{"pickuplon": -73.885262,"pickuplat": 40.773008,"dropofflon": -73.987232,"dropofflat": 40.732403,"passengers": 2}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
# This model dir is the model exported after training and is used for prediction
#
model_dir=$(ls ${PWD}/${TRAINING_DIR}/export/exporter | tail -1)
# predict using the trained model
gcloud ai-platform local predict \
--model-dir=${PWD}/${TRAINING_DIR}/export/exporter/${model_dir} \
--json-instances=./test.json
%%bash
# This is so that the trained model is started fresh each time. However, this needs to be done before
rm -rf $PWD/${TRAINING_DIR}
%%bash
# Use Cloud Machine Learning Engine to train the model in local file system
gcloud ai-platform local train \
--module-name=trainer.task \
--package-path=${PWD}/${MODEL_NAME}/trainer \
-- \
--train_data_paths=${PWD}/taxi-train.csv \
--eval_data_paths=${PWD}/taxi-valid.csv \
--train_steps=1000 \
--output_dir=${PWD}/${TRAINING_DIR}
%%bash
ls $PWD/${TRAINING_DIR}
%%bash
# Clear Cloud Storage bucket and copy the CSV files to Cloud Storage bucket
echo $BUCKET
gsutil -m rm -rf gs://${BUCKET}/${MODEL_NAME}/smallinput/
gsutil -m cp ${PWD}/*.csv gs://${BUCKET}/${MODEL_NAME}/smallinput/
%%bash
OUTDIR=gs://${BUCKET}/${MODEL_NAME}/smallinput/${TRAINING_DIR}
JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S)
echo $OUTDIR $REGION $JOBNAME
# Clear the Cloud Storage Bucket used for the training job
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/${MODEL_NAME}/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version 2.3 \
--python-version 3.5 \
-- \
--train_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-train*" \
--eval_data_paths="gs://${BUCKET}/${MODEL_NAME}/smallinput/taxi-valid*" \
--output_dir=$OUTDIR \
--train_steps=10000
%%bash
gsutil ls gs://${BUCKET}/${MODEL_NAME}/smallinput
%%bash
OUTDIR=gs://${BUCKET}/${MODEL_NAME}/${TRAINING_DIR}
JOBNAME=${MODEL_NAME}_$(date -u +%y%m%d_%H%M%S)
CRS_BUCKET=cloud-training-demos # use the already exported data
echo $OUTDIR $REGION $JOBNAME
gsutil -m rm -rf $OUTDIR
gcloud ai-platform jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${PWD}/${MODEL_NAME}/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=STANDARD_1 \
--runtime-version 2.3 \
--python-version 3.5 \
-- \
--train_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/train.csv" \
--eval_data_paths="gs://${CRS_BUCKET}/${MODEL_NAME}/ch3/valid.csv" \
--output_dir=$OUTDIR \
--train_steps=100000
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading data
Step2: Training variables
Step3: Folding strategy - stacking algorithm
Step4: Define folding model
Step5: Default prediction (predict i_th_ fold by i_th_ classifier)
Step6: Voting prediction (predict i-fold by all classifiers and take value, which is calculated by vote_function)
Step7: Comparison of folds
Step8: Signal distribution for each fold
Step9: Background distribution for each fold
Step10: ROCs (each fold used as test dataset)
Step11: Report for test dataset
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy, pandas
from rep.utils import train_test_split
from sklearn.metrics import roc_auc_score
sig_data = pandas.read_csv('toy_datasets/toyMC_sig_mass.csv', sep='\t')
bck_data = pandas.read_csv('toy_datasets/toyMC_bck_mass.csv', sep='\t')
labels = numpy.array([1] * len(sig_data) + [0] * len(bck_data))
data = pandas.concat([sig_data, bck_data])
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, train_size=0.7)
variables = ["FlightDistance", "FlightDistanceError", "IP", "VertexChi2", "pt", "p0_pt", "p1_pt", "p2_pt", 'LifeTime', 'dira']
data = data[variables]
from rep.estimators import SklearnClassifier
from sklearn.ensemble import GradientBoostingClassifier
from rep.metaml import FoldingClassifier
n_folds = 4
folder = FoldingClassifier(GradientBoostingClassifier(), n_folds=n_folds, features=variables)
folder.fit(train_data, train_labels)
folder.predict_proba(train_data)
# definition of mean function, which combines all predictions
def mean_vote(x):
return numpy.mean(x, axis=0)
folder.predict_proba(test_data, vote_function=mean_vote)
from rep.data.storage import LabeledDataStorage
from rep.report import ClassificationReport
# add folds_column to dataset to use mask
train_data["FOLDS"] = folder._get_folds_column(len(train_data))
lds = LabeledDataStorage(train_data, train_labels)
report = ClassificationReport({'folding': folder}, lds)
for fold_num in range(n_folds):
report.prediction_pdf(mask="FOLDS == %d" % fold_num, labels_dict={1: 'sig fold %d' % fold_num}).plot()
for fold_num in range(n_folds):
report.prediction_pdf(mask="FOLDS == %d" % fold_num, labels_dict={0: 'bck fold %d' % fold_num}).plot()
for fold_num in range(n_folds):
report.roc(mask="FOLDS == %d" % fold_num).plot()
lds = LabeledDataStorage(test_data, test_labels)
report = ClassificationReport({'folding': folder}, lds)
report.prediction_pdf().plot(new_plot=True, figsize = (9, 4))
report.roc().plot(xlim=(0.5, 1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Notation
Step2: Definition 3. The heaviside function maps strictly positive values to the value 1 and non-positive values to 0
Step3: Definition 4. The logistic function maps a real number continuously into the interval (0, 1)
Step4: Properties of the logistic function. The following is a handy list of properties of $G(y)$.
Step5: Demo
Step6: Heaviside vs. logistic functions
Step7: Determining $\theta$ via Maximum Likelihood Estimation
Step9: To optimize the log-likelihood with respect to the parameters, $\theta$, you'd like to do the moral equivalent of taking its derivative, setting it to zero, and then solving for $\theta$.
Step10: Exercise. Make a contour plot of the log-likelihood and draw the trajectory taken by the $\theta(t)$ values laid on top of it.
Step11: Numerical optimization via Newton's method
Step13: Exercise. Show that the Hessian of the log-likelihood for logistic regression is,
Step14: Exercise. Complete the code below, which implements Newton's method.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import seaborn as sns
import numpy as np
from IPython.display import display
%matplotlib inline
import plotly.plotly as py
from plotly.graph_objs import *
# @YOUSE: Fill in your credentials (user ID, API key) for Plotly here
py.sign_in ('USERNAME', 'APIKEY')
%reload_ext autoreload
%autoreload 2
import cse6040utils
from cse6040utils import lin_discr
from cse6040utils import heaviside
from cse6040utils import logistic
df = pd.read_csv ('http://vuduc.org/cse6040/logreg_points_train.csv')
points = np.insert (df.as_matrix (['x_1', 'x_2']), 0, 1.0, axis=1)
labels = df.as_matrix (['label'])
from cse6040utils import make_2d_scatter_traces
print "Number of points:", len (points)
traces = make_2d_scatter_traces (points, labels)
py.iplot (traces)
from cse6040utils import check_labels
from cse6040utils import np_col_vec
from cse6040utils import gen_lin_discr_trace
#theta = np_col_vec ([0., -1., 3.])
#theta = np_col_vec ([-0.55, -2., -0.5])
theta = np_col_vec ([-1.35, -6.5, -1.])
# Generate 0/1 labels for your discriminant:
is_correct = check_labels (points, labels,
fun=lambda X: heaviside (lin_discr (X, theta)))
print "Number of misclassified points:", (len (points) - sum (is_correct))[0]
print "\n(Run the code cell below to visualize the results.)"
# Visually inspect the above results
traces = make_2d_scatter_traces (points, is_correct)
traces.append (gen_lin_discr_trace (points, theta))
# Plot it!
layout = Layout (xaxis=dict (range=[-1.25, 2.25]),
yaxis=dict (range=[-3.25, 2.25]))
fig = Figure (data=traces, layout=layout)
py.iplot (fig)
# Use Numpy's handy meshgrid() to create a regularly-spaced grid of values.
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
x1 = np.linspace (-2., +2., 100)
x2 = np.linspace (-2., +2., 100)
x1_grid, x2_grid = np.meshgrid (x1, x2)
h_grid = heaviside (theta[0] + theta[1]*x1_grid + theta[2]*x2_grid)
trace_grid = Contour (x=x1, y=x2, z=h_grid)
py.iplot ([trace_grid])
x_logit_1d = np.linspace (-6.0, +6.0, 101)
y_logit_1d = logistic (x_logit_1d)
trace_logit_1d = Scatter (x=x_logit_1d, y=y_logit_1d)
py.iplot ([trace_logit_1d])
g_grid = logistic (theta[0] + theta[1]*x1_grid + theta[2]*x2_grid)
trace_logit_grid = Contour (x=x1, y=x2, z=g_grid)
py.iplot ([trace_logit_grid])
def log_likelihood (theta, l, X):
# @YOUSE: Complete this function to evaluate the log-likelihood
pass
def gradient_log_likelihood (theta, l, X):
Returns the gradient of the log-likelihood.
# @YOUSE: Implement the gradient for the logistic regression
# model's log-likelihood
pass
MAX_STEP = 500
ALPHA = 0.5
# Get the data coordinate matrix, X, and labels vector, l
X = points
l = labels.astype (dtype=float)
# Store *all* guesses, for subsequent analysis
thetas = np.zeros ((3, MAX_STEP+1))
for t in range (MAX_STEP):
theta_t = thetas[:, t:t+1]
# @YOUSE: Fill in the code to compute thetas[:, t+1:t+2]
pass
thetas[:, t+1:t+2] = theta_t + ALPHA*delta_t
theta_gd = thetas[:, MAX_STEP:]
print "Your (hand) solution:", theta.T.flatten ()
print "Computed solution:", theta_gd
print "\n=== Comparisons ==="
print "\n\\theta_0/\\theta_2:", \
"manual =", theta[0]/theta[2], \
", vs. MLE (via gradient ascent) =", theta_gd[0]/theta_gd[2]
print "\n\\theta_1/\\theta_2:", \
"manual =", theta[1]/theta[2], \
", vs. MLE (via gradient ascent) =", theta_gd[1]/theta_gd[2]
# Generate 0/1 labels for computed discriminant using the logistic function
def gen_label_logreg (X, theta):
return heaviside (logistic (lin_discr (X, theta)) - 0.5)
def check_correct_logreg (l, X, theta):
return check_labels (X, l, fun=lambda X: gen_label_logreg (X, theta))
def count_correct_logreg (l, X, thetas):
num_steps = thetas.shape[1]
num_correct = np.zeros (num_steps, dtype=int)
for t in range (num_steps):
theta_t = thetas[:, t:t+1]
is_correct = check_correct_logreg (l, X, theta_t)
num_correct[t] = sum (is_correct)[0]
return num_correct
num_correct_gd = count_correct_logreg (l, X, thetas)
is_correct_gd = check_correct_logreg (l, X, thetas[:, -1:])
print "Number of misclassified points using MLE via gradient ascent:", \
(len (points) - num_correct_gd[-1])
print "\n(Run the code cell below to visualize the results.)"
# Visually inspect the above results
traces_gd = make_2d_scatter_traces (points, is_correct_gd)
traces_gd.append (gen_lin_discr_trace (points, theta_gd))
# Plot it!
layout_gd = Layout (xaxis=dict (range=[-1.25, 2.25]),
yaxis=dict (range=[-3.25, 2.25]))
fig_gd = Figure (data=traces_gd, layout=layout_gd)
py.iplot (fig_gd)
n1_ll = 100
x1_ll = np.linspace (-20., 0., n1_ll)
n2_ll = 100
x2_ll = np.linspace (-20., 0., n2_ll)
x1_ll_grid, x2_ll_grid = np.meshgrid (x1_ll, x2_ll)
ll_grid = np.zeros ((n1_ll, n2_ll))
for i1 in range (n1_ll):
for i2 in range (n2_ll):
theta_i1_i2 = np.array ([[thetas[0, MAX_STEP]],
[x1_ll_grid[i1][i2]],
[x2_ll_grid[i1][i2]]
])
ll_grid[i1][i2] = log_likelihood (theta_i1_i2, l, X)
trace_ll_grid = Contour (x=x1_ll, y=x2_ll, z=ll_grid)
trace_thetas = Scatter (x=thetas[1, :], y=thetas[2, :], mode='markers+lines')
py.iplot ([trace_ll_grid, trace_thetas])
A = np.array ([[1, 2, 3],
[4, 5, 6]])
B = np.array ([[-1, 2, -3],
[4, -5, 6]])
print np.multiply (A, B) # elementwise product
print np.multiply (A, B[:, 0:1]) # "auto-extend" version
def hessian_log_likelihood (theta, l, X):
Returns the Hessian of the log-likelihood.
# @YOUSE: Implement the Hessian
pass
MAX_STEP = 10
# Get the data coordinate matrix, X, and labels vector, l
X = points
l = labels.astype (dtype=float)
# Store *all* guesses, for subsequent analysis
thetas_newt = np.zeros ((3, MAX_STEP+1))
for t in range (MAX_STEP):
theta_t = thetas_newt[:, t:t+1]
# @YOUSE: Fill in this code
pass
thetas_newt[:, t+1:t+2] = theta_t + delta_t
theta_newt = thetas_newt[:, MAX_STEP:]
print "Your (hand) solution:", theta.T.flatten ()
print "Computed solution:", theta_newt
num_correct_newt = count_correct_logreg (l, X, thetas_newt)
is_correct_newt = check_correct_logreg (l, X, thetas_newt[:, -1:])
print "\nNumber of misclassified points using MLE:", (len (points) - num_correct_newt[-1])
print "\n(Run the code cell below to visualize the results.)"
print "\n=== Comparisons ==="
print "\n\\theta_0/\\theta_2:", \
"manual =", theta[0]/theta[2], \
", vs. Newton =", theta_newt[0]/theta_newt[2]
print "\n\\theta_1/\\theta_2:", \
"manual =", theta[1]/theta[2], \
", vs. Newton =", theta_newt[1]/theta_newt[2]
# Visually inspect the above results
traces_newt = make_2d_scatter_traces (points, is_correct_newt)
traces_newt.append (gen_lin_discr_trace (points, theta_newt))
# Plot it!
layout_newt = Layout (xaxis=dict (range=[-1.25, 2.25]),
yaxis=dict (range=[-3.25, 2.25]))
fig_newt = Figure (data=traces_newt, layout=layout_newt)
py.iplot (fig_newt)
trace_thetas_newt = Scatter (x=thetas_newt[1, :], y=thetas_newt[2, :], mode='markers+lines')
py.iplot ([trace_ll_grid, trace_thetas_newt])
I_gd = range (len (num_correct_gd))
trace_gd_mistakes = Scatter (x=I_gd, y=len (points) - num_correct_gd,
mode='markers+lines', name='Gradient descent'
)
I_newt = range (len (num_correct_newt))
trace_newt_mistakes = Scatter (x=I_newt, y=len (points) - num_correct_newt,
mode='markers+lines', name='Newton'
)
layout_mistakes = Layout (xaxis=dict (type='log'), yaxis=dict (type='log'))
fig_mistakes = Figure (data=[trace_gd_mistakes, trace_newt_mistakes],
layout=layout_mistakes)
py.iplot (fig_mistakes)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example. Rolling a fair n-sided die (with n=6).
Step2: Example. Flipping a fair coin twice and recording the results in sequence.
Step3: Example. Unequally likely outcomes on a colored "spinner".
Step4: DeckOfCards() is a special case of BoxModel for drawing from a standard deck of 52 cards. By default replace=False.
|
<ASSISTANT_TASK:>
Python Code:
from symbulate import *
%matplotlib inline
n = 6
die = list(range(1, n+1))
P = BoxModel(die)
RV(P).sim(10000).plot()
P = BoxModel(['H', 'T'], size=2, order_matters=True)
P.sim(10000).tabulate(normalize=True)
P = BoxModel(['orange', 'brown', 'yellow'], probs=[0.5, 0.25, 0.25])
P.sim(10000).tabulate(normalize = True)
DeckOfCards(size=5).sim(3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reminder on file parsing strategy
Step2: The file indicated bellow contain a representative sample of popular votes for the last US presidential elections. Parse the file and return a count of the number of occurrence of each name
Step3: Exercise 2
Step4: OrderedDict
Step5: Generator
Step6: 2) Using this generator to generate a 100nt sequence (as a string)
Step7: Statistics and viewing
Step8: Which of the following code blocks will
Step9: Merging dataframes
Step10: Mixing Pandas and generators
Step11: .
Step12: Exercise 2
Step13: Exercise 3
Step14: Exercise 4
Step15: )2
Step16: Exercise 5
Step17: Exercise 6
Step18: Exercise 7
Step19: Exercise 8
|
<ASSISTANT_TASK:>
Python Code:
# Import the packages that will be usefull for this part of the lesson
from collections import OrderedDict, Counter
import pandas as pd
from pprint import pprint
# Small trick to get a larger display
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
from random import choice
disaster_list = ["Global_pandemic", "Brexit", "US_presidential_elections", "Nuclear_war", "Asteroide_storm", "End of_antibiotics_era"]
choice(disaster_list)
file = "../data/US_election_vote_sample.txt"
file = "../data/gencode_sample.gff3"
! head -2 ../data/gencode_sample.gff3 # check format of GFF file
# rearrange the lines below to create a working solution
type_field = line.split('\t')[2]
type_counter = Counter()
print('%s:\t%d' % count_info)
with open(file, 'r') as fh:
for line in fh.readlines():
for count_info in type_counter.most_common():
from collections import Counter
type_counter[type_field] += 1
file = "../data/gencode_sample.gff3"
! head -2 ../data/gencode_sample.gff3
# fill in the blanks (---) in the code below to create a working solution
from collections import ---
sequences = OrderedDict()
with open(file, ---) as fh:
--- line in fh.readlines():
fields = line.split()
seqid = fields[---]
attributes = ---
ID = attributes.split(';')[0]
if --- in sequences:
sequences[seqid].append(ID)
else:
sequences[seqid] = [ID]
for seq, ids in sequences.items():
print('%s:\t%s' % (seq, ', '.join(ids)))
# Import the uniform method (which is also a generator by the way) to generate random floats
from random import uniform
def DNA_generator (): # Eventually you can have options for relative nucleotide frequencies
# Calculate cummulative frequencies to avoid having to do it each time in the loop
---
---
---
---
# Iterate indefinitly... until a nuclear apocalypse at least.
while True:
# Generate a random float frequency between 0 and max freq
freq = uniform (0, ---)
# Depending on the random frequency return the approriate base
if --- :
yield "A"
elif ---:
yield "T"
elif ---:
yield "C"
else:
yield "G"
d = DNA_generator()
---
file = "../data/sample_alignment.sam"
import pandas as pd
df = pd.read_table(file, comment='@', header=None)
file = "../data/abundance.tsv"
abundance_file = "../data/abundance.tsv")
gsym_to_tid_file = "../data/gene_symbol_to_transcript_id.tsv"
df1 =
df1.head()
df2 =
df2.head()
df3 =
file = "../data/codon_usage_bias_human.tsv"
c = Counter()
# Open the file
with open ("../data/US_election_vote_sample.txt", "r") as fp:
for candidate in fp:
# Increment the counter for the current element
c[candidate]+=1
# Order by most frequent element
c.most_common()
file = "../data/gencode_sample.gff3"
c = Counter()
# Open the file
with open (file, "r") as fp:
# Iterate over lines
for line in fp:
# Split the line and get the element 3
feature_type = line.split("\t")[2]
# Increment the counter
c[feature_type]+=1
# Order by most frequent element
c.most_common()
!head -n 1 "../data/gencode_sample.gff3"
file = "../data/gencode_sample.gff3"
d = OrderedDict()
# Open the file
with open (file, "r") as fp:
# Iterate over lines
for line in fp:
# Split the line and get the element 3
seqid = line.split("\t")[0]
# Parse the line to get the ID
ID = line.split("\t")[8].split(";")[0][3:]
#
if not seqid in d:
d[seqid] = []
d[seqid].append(ID)
d
from random import uniform
def DNA_generator (A_freq, T_freq, C_freq, G_freq): # Customizable frequency argument
# Calculate cummulative frequencies to avoid having to do it each time in the loop
cum_A_freq = A_freq
cum_T_freq = A_freq+T_freq
cum_C_freq = A_freq+T_freq+C_freq
cum_G_freq = A_freq+T_freq+C_freq+G_freq
# Iterate indefinitly
while True:
# Generate a random float frequency between 0 and max freq
freq = uniform (0, cum_G_freq)
# Depending on the random frequency return the approriate base
if freq <= cum_A_freq:
yield "A"
elif freq <= cum_T_freq:
yield "T"
elif freq <= cum_C_freq:
yield "C"
else:
yield "G"
# achieve the same using random.choices
# if using python v<3.6, import choices from numpy.random
from random import choices
def DNA_generator_choices(weights=[0.19, 0.31, 0.31, 0.19], letters=['A', 'C', 'G', 'T']):
while True:
yield choices(population=letters, weights=weights, k=1)[0]
# Create the generator with the required frequencies
d = DNA_generator(A_freq=0.19, T_freq=0.19, C_freq=0.31, G_freq=0.31)
# Test the generator
print(next(d), next(d), next(d), next(d), next(d), next(d), next(d), next(d))
# Create the generator with the required frequencies
d = DNA_generator_choices()
# Test the generator
print(next(d), next(d), next(d), next(d), next(d), next(d), next(d), next(d))
# iterative str contruction with a loop
seq=""
for _ in range (100):
seq += next(d)
seq
# Same with a one liner list comprehension
seq = "".join([next(d) for _ in range (100)])
seq
file = "../data/sample_alignment.sam"
columns_names = ['QNAME', 'FLAG', 'RNAME', 'POS', 'MAPQ', 'CIGAR', 'RNEXT', 'PNEXT', 'TLEN', 'SEQ', 'QUAL']
df = pd.read_table(file, sep="\t", names = columns_names, skiprows=[0,1], index_col=0)
df.tail(10)
tlen_sample = df.sample(10).TLEN
print (tlen_sample)
print ("\nMean:", tlen_sample.mean())
print ("\nMedian:", tlen_sample.median())
df.describe(include="all")
file = "../data/abundance.tsv"
df = pd.read_table(file, index_col=0)
df.loc[['ENST00000487368.4', 'ENST00000623229.1', 'ENST00000444276.1', 'ENST00000612487.4', 'ENST00000556673.2', 'ENST00000623191.1']]
df[["est_counts", "tpm"]].head(10)
df[(df.tpm > 10000)]
df = df[(df.est_counts > 1000) & (df.tpm > 1000)]
df = df.sort_values("eff_length")
df.head(10)
df1 = pd.read_table("../data/abundance.tsv")
df2 = pd.read_table("../data/gene_symbol_to_transcript_id.tsv", names=["transcript_id", "gene_symbol"])
df3 = pd.merge(left=df1, right=df2, left_on="target_id", right_on="transcript_id", how="inner")
df3 = df3.sort_values("transcript_id")
df3 = df3.reset_index(drop=True)
df3.drop(["target_id"], axis=1)
df3.head()
print ("\x47\x6f\x6f\x64 \x4c\x75\x63\x6b")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'dwd', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. NUMPY
Step2: Define a new 3x2 array named my_array2 with [1, 2, 3] in the first row and [4,5,6] in the second.
Step3: Until now, we have created arrays defining their elements. But you can also create it defining the range
Step4: Check the functions np.linspace, np.logspace and np.meshgrid which let you create more sophisticated ranges
Step5: 1.b Elementwise operations
Step6: Compare this with operations over python lists
Step7: 1.c Indexing numpy arrays
Step8: One important thing to consider when you do slicing are the dimensions of the output array. Run the following cell and check the shape of my_array3. Check also its dimension with ndim function
Step9: If you have correctly computed it you will see that my_array3 is one dimensional. Sometimes this can be a problem when you are working with 2D matrixes (and vectors can be considered as 2D matrixes with one of the sizes equal to 1). To solve this, numpy provides the newaxis constant.
Step10: Check again the shape and dimension of my_array3
Step11: When you try to index different rows and columns of a matrix you have to define it element by element. For example, consider that we want to select elements of rows [0, 3] and columns [0, 2], we have to define the row 0 index for each column to be selected....
Step12: To make this easier, we can use the ix_ function which automatically creates all the needed indexes
Step13: Another important array manipulation method is array concatenation or stacking. It is useful to always state explicitly in which direction we want to stack the arrays. For example in the following example we are stacking the arrays vertically.
Step14: EXERCISE
Step15: Numpy also includes the functions hstack() and vstack() to concatenate by columns or rows, respectively.
Step16: 1.e Matrix multiplications
Step17: EXERCISE
Step18: 1.f Other useful functions
Step19: Compute the maximum, minimum or, even, the positions of the maximum or minimum
Step20: Sort a vector
Step21: Calculate some statistical parameters
Step22: Obtain random numbers
Step23: In addition to numpy we have a more advanced library for scientific computing, scipy. Scipy includes modules for linear algebra, signal processing, fourier transform, ...
Step24: 3. Classification example
Step25: In the previous code we have saved the features in matrix X and the class labels in the vector labels. Both are 2D numpy arrays.
Step26: According to this plot, which classes seem more difficult to distinguish?
Step27: Take the columns (5,6,17) of the data and save them in a matrix X_com. This will be our input data. Convert this array into a float array. The shape should be (1994,3)
Step28: EXERCISE
Step29: 4.3 Train/Test splitting
Step30: 4.4 Normalization
Step31: 4.5 Training
Step32: 4.6 Prediction and evaluation
Step33: 4.7 Saving the results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# The line above is needed to include the figures in this notebook, you can remove it if you work with a normal script
import numpy as np
import csv
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
my_array = np.array([[1, 2],[3, 4]])
print my_array
print np.shape(my_array)
my_array2 = np.array([[1, 2, 3],[4, 5, 6]])
print my_array2
print np.shape(my_array2)
my_new_array = np.arange(3,11,2)
print my_new_array
A1 = np.zeros((3,4))
print A1
A2 = np.ones((2,6))
print A2
A3 = np.eye(5)
print A3
a = np.array([0,1,2,3,4,5])
print a*2
print a**2
[1,2,3,4,5]*2
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
print x[1:7:2] # start:stop:step
print x[-2:10] # confusing, avoid negative values...
print x[8:10] # equivalent
print x[-3:3:-1] # confusing, avoid negative values...
print x[7:3:-1] # equivalent
print x[:7] # when start value is not indicated, it takes the first
print x[5:] # when stop value is not indicated, it takes the last
print x[:] # select "from first to last" == "all"
my_array = np.array([[1, 2],[3, 4]])
my_array3 = my_array[:,1]
print my_array3
print my_array[1,0:2]
print my_array3.shape
print my_array3.ndim
my_array3 = my_array3[:,np.newaxis]
print my_array3.shape
print my_array3.ndim
x = np.array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]])
# We want to select elements of rows [0, 3] and columns [0, 2]
rows = np.array([[0, 0],[3, 3]], dtype=np.intp)
columns = np.array([[0, 2],[0, 2]], dtype=np.intp)
print x[rows, columns]
# With ix_
rows = np.array([0, 3], dtype=np.intp)
columns = np.array([0, 2], dtype=np.intp)
print np.ix_(rows, columns)
print x[np.ix_(rows, columns)]
my_array = np.array([[1, 2],[3, 4]])
my_array2 = np.array([[11, 12],[13, 14]])
print np.concatenate( (my_array, my_array2) , axis=1) # columnwise concatenation
print <COMPLETAR>
print <COMPLETAR>
x=np.array([1,2,3])
y=np.array([1,2,3])
print x*y #Element-wise
print np.multiply(x,y) #Element-wise
print sum(x*y) # dot product
print #Fast matrix product
x=[1,2,3]
dot_product_x = <COMPLETAR>
print dot_product_x
x = np.array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]])
print x
print np.where(x>4)
print np.nonzero(x>4)
print a.argmax(axis=0)
print a.max(axis=0)
# a.min(axis=0), a.argmin(axis=0)
a = np.array([[1,4], [3,1]])
print a
a.sort(axis=1)
print a
a.sort(axis=0)
b = a
print b
x = np.array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]])
print x.mean(axis=0)
print x.var(axis=0)
print x.std(axis=0)
np.random.seed(0)
perm = np.random.permutation(100)
perm[:10]
t = np.arange(0.0, 1.0, 0.05)
a1 = np.sin(2*np.pi*t)
a2 = np.sin(4*np.pi*t)
plt.figure()
ax1 = plt.subplot(211)
ax1.plot(t,a1)
plt.xlabel('t')
plt.ylabel('a_1(t)')
ax2 = plt.subplot(212)
ax2.plot(t,a2, 'r.')
plt.xlabel('t')
plt.ylabel('a_2(t)')
plt.show()
# Open up the csv file in to a Python object
csv_file_object = csv.reader(open('data/iris_data.csv', 'rb'))
datalist = [] # Create a variable called 'data'.
for row in csv_file_object: # Run through each row in the csv file,
datalist.append(row) # adding each row to the data variable
data = np.array(datalist) # Then convert from a list to an array
# Be aware that each item is currently
# a string in this format
print np.shape(data)
X = data[:,0:-1]
label = data[:,-1,np.newaxis]
print X.shape
print label.shape
x = X[:,0:2]
#print len(set(list(label)))
list_label = [l[0] for l in label]
labels = list(set(list_label))
colors = ['bo', 'ro', 'go']
#print list_label
plt.figure()
for i, l in enumerate(labels):
pos = np.where(np.array(list_label) == l)
plt.plot(x[pos,0], x[pos,1], colors[i])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
csv_file_object = csv.reader(open('communities.csv', 'rb'))
datalist = []
for row in csv_file_object:
datalist.append(row)
data = np.array(datalist)
print np.shape(data)
X_com = <COMPLETAR>
Nrow = np.shape(data)[0]
Ncol = np.shape(data)[1]
print X_com.shape
y_com = <COMPLETAR>
print y_com.shape
plt.figure()
plt.plot(<COMPLETAR>, 'bo')
plt.xlabel('X_com[0]')
plt.ylabel('y_com')
plt.figure()
plt.plot(<COMPLETAR>, 'ro')
plt.xlabel('X_com[1]')
plt.ylabel('y_com')
plt.figure()
plt.plot(<COMPLETAR>, 'go')
plt.xlabel('X_com[2]')
plt.ylabel('y_com')
from sklearn.cross_validation import train_test_split
Random_state = 131
X_train, X_test, y_train, y_test = train_test_split(<COMPLETAR>, <COMPLETAR>, test_size=<COMPLETAR>, random_state=Random_state)
print X_train.shape
print X_test.shape
print y_train.shape
print y_test.shape
print "Values before normalizing:\n"
print <COMPLETAR>.mean(axis=0)
print X_test.<COMPLETAR>
print <COMPLETAR>.std(axis=0)
print X_test.<COMPLETAR>
# from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(<COMPLETAR>) # computes mean and std using the train dataset
X_train_normalized = scaler.transform(<COMPLETAR>) # applies the normalization to train
X_test_normalized = scaler.transform(<COMPLETAR>) # applies the normalization to test
print "\nValues after normalizing:\n"
print <COMPLETAR>
print <COMPLETAR>
print <COMPLETAR>
print <COMPLETAR>
from sklearn import neighbors
knn1_model = neighbors.KNeighborsRegressor(<COMPLETAR>)
knn1_model.fit(<COMPLETAR>.astype(np.float), <COMPLETAR>.astype(np.float))
knn7_model = neighbors.KNeighborsRegressor(<COMPLETAR>)
knn7_model.fit(<COMPLETAR>.astype(np.float), <COMPLETAR>.astype(np.float))
print knn1_model
print knn7_model
y_predict_1 = knn1_model.predict(<COMPLETAR>.astype(np.float))
mse1 = <COMPLETAR>
print " The MSE value for model1 is %f\n " % mse1
y_predict_7 = knn7_model.predict(<COMPLETAR>.astype(np.float))
mse7 = <COMPLETAR>
print " The MSE value for model7 is %f\n " % mse7
print "First 5 prediction values with model 1:\n"
print <COMPLETAR>
print "\nFirst 5 prediction values with model 7:\n"
print <COMPLETAR>
y_pred = y_predict_1.squeeze()
csv_file_object = csv.writer(open('output.csv', 'wb'))
for index, y_aux in enumerate(<COMPLETAR>): # Run through each row in the csv file,
csv_file_object.writerow([index,y_aux])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Settings
Step2: Dataset Preparation
Step3: Your Turn
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import datetime
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from numpy import nan
now = datetime.datetime.now
now = datetime.datetime.now
batch_size = 128
nb_classes = 5
nb_epoch = 5
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = 2
# convolution kernel size
kernel_size = 3
if K.image_data_format() == 'channels_first':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
def train_model(model, train, test, nb_classes):
X_train = train[0].reshape((train[0].shape[0],) + input_shape)
X_test = test[0].reshape((test[0].shape[0],) + input_shape)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(train[1], nb_classes)
Y_test = np_utils.to_categorical(test[1], nb_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
t = now()
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1,
validation_data=(X_test, Y_test))
print('Training time: %s' % (now() - t))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# create two datasets one with digits below 5 and one with 5 and above
X_train_lt5 = X_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
X_test_lt5 = X_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
X_train_gte5 = X_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5 # make classes start at 0 for
X_test_gte5 = X_test[y_test >= 5] # np_utils.to_categorical
y_test_gte5 = y_test[y_test >= 5] - 5
# define two groups of layers: feature (convolutions) and classification (dense)
feature_layers = [
Convolution2D(nb_filters, kernel_size, kernel_size,
border_mode='valid',
input_shape=input_shape),
Activation('relu'),
Convolution2D(nb_filters, kernel_size, kernel_size),
Activation('relu'),
MaxPooling2D(pool_size=(pool_size, pool_size)),
Dropout(0.25),
Flatten(),
]
classification_layers = [
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(nb_classes),
Activation('softmax')
]
# create complete model
model = Sequential(feature_layers + classification_layers)
# train model for 5-digit classification [0..4]
train_model(model,
(X_train_lt5, y_train_lt5),
(X_test_lt5, y_test_lt5), nb_classes)
# freeze feature layers and rebuild model
for l in feature_layers:
l.trainable = False
# transfer: train dense layers for new classification task [5..9]
train_model(model,
(X_train_gte5, y_train_gte5),
(X_test_gte5, y_test_gte5), nb_classes)
## your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's integrate this system for 100 orbital periods.
Step2: Rebound exits the integration routine normally. We can now explore the final particle orbits
Step3: We see that the orbits of both planets changed significantly and we can already speculate that there was a close encounter.
Step4: As you see, we got an exception! Let's redo the simulation once again and store the particle distance while we're integrating. This time we'll also catch the exception with a try/except construct so that our script doesn't break.
Step5: Let plot the distance as a function of time.
Step6: We did indeed find the close enounter correctly. We could now do something with the two particles that collided.
|
<ASSISTANT_TASK:>
Python Code:
import rebound
import numpy as np
def setupSimulation():
rebound.reset()
rebound.integrator = "ias15" # IAS15 is the default integrator, so we don't need this line
rebound.add(m=1.)
rebound.add(m=1e-3,a=1.)
rebound.add(m=1e-3,a=1.25)
rebound.move_to_com()
setupSimulation()
rebound.integrate(100.*2.*np.pi)
for o in rebound.calculate_orbits():
print(o)
setupSimulation() # Resets everything
rebound.integrate(100.*2.*np.pi, minD=0.2)
setupSimulation() # Resets everything
Noutputs = 1000
times = np.linspace(0,100.*2.*np.pi,Noutputs)
distances = np.zeros(Noutputs)
ps = rebound.particles # ps is now an array of pointers. It will update as the simulation runs.
try:
for i,time in enumerate(times):
rebound.integrate(time,minD=0.2)
dx = ps[1].x - ps[2].x
dy = ps[1].y - ps[2].y
dz = ps[1].z - ps[2].z
distances[i] = np.sqrt(dx*dx+dy*dy+dz*dz)
except rebound.CloseEncounter as e:
print("Close encounter detected at t=%f, between particles %d and %d." % (rebound.t, e.id1, e.id2))
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,5))
ax = plt.subplot(111)
ax.set_xlabel("time [orbits]")
ax.set_xlim([0,rebound.t/(2.*np.pi)])
ax.set_ylabel("distance")
plt.plot(times/(2.*np.pi), distances);
plt.plot([0.0,12],[0.2,0.2]) # Plot our close encounter criteria;
import copy
def mergeParticles(id1,id2):
old_ps = rebound.particles
new_ps = []
for i in range(rebound.N):
if i!=id1 and i!=id2:
new_ps.append(copy.deepcopy(old_ps[i]))
mergedPlanet = rebound.Particle()
mergedPlanet.m = old_ps[id1].m + old_ps[id2].m
mergedPlanet.x = (old_ps[id1].m*old_ps[id1].x + old_ps[id2].m*old_ps[id2].x) /mergedPlanet.m
mergedPlanet.y = (old_ps[id1].m*old_ps[id1].y + old_ps[id2].m*old_ps[id2].y) /mergedPlanet.m
mergedPlanet.z = (old_ps[id1].m*old_ps[id1].z + old_ps[id2].m*old_ps[id2].z) /mergedPlanet.m
mergedPlanet.vx = (old_ps[id1].m*old_ps[id1].vx + old_ps[id2].m*old_ps[id2].vx)/mergedPlanet.m
mergedPlanet.vy = (old_ps[id1].m*old_ps[id1].vy + old_ps[id2].m*old_ps[id2].vy)/mergedPlanet.m
mergedPlanet.vz = (old_ps[id1].m*old_ps[id1].vz + old_ps[id2].m*old_ps[id2].vz)/mergedPlanet.m
new_ps.append(mergedPlanet)
del(rebound.particles)
rebound.add(new_ps)
setupSimulation() # Resets everything
print("Number of particles at the beginning of the simulation: %d."%rebound.N)
for i,time in enumerate(times):
try:
rebound.integrate(time,minD=0.2)
except rebound.CloseEncounter as e:
print("Close encounter detected at t=%f, between particles %d and %d. Merging." % (rebound.t, e.id1, e.id2))
mergeParticles(e.id1,e.id2)
print("Number of particles at the end of the simulation: %d."%rebound.N)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we'll load our data set.
Step2: Examine the data
Step3: This data is at the city block level, so these features reflect the total number of rooms in that block, or the total number of people who live on that block, respectively. Let's create a different, more appropriate feature. Because we are predicing the price of a single house, we should try to make all our features correspond to a single house as well
Step4: Build a custom estimator linear regressor
Step5: Challenge Excercise
|
<ASSISTANT_TASK:>
Python Code:
import math
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
df = pd.read_csv("https://storage.googleapis.com/ml_universities/california_housing_train.csv", sep = ",")
df.head()
df.describe()
df['num_rooms'] = df['total_rooms'] / df['households']
df['num_bedrooms'] = df['total_bedrooms'] / df['households']
df['persons_per_house'] = df['population'] / df['households']
df.describe()
df.drop(['total_rooms', 'total_bedrooms', 'population', 'households'], axis = 1, inplace = True)
df.describe()
# Define feature columns
feature_columns = {
colname : tf.feature_column.numeric_column(colname) \
for colname in ['housing_median_age','median_income','num_rooms','num_bedrooms','persons_per_house']
}
# Bucketize lat, lon so it's not so high-res; California is mostly N-S, so more lats than lons
feature_columns['longitude'] = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('longitude'), np.linspace(-124.3, -114.3, 5).tolist())
feature_columns['latitude'] = tf.feature_column.bucketized_column(tf.feature_column.numeric_column('latitude'), np.linspace(32.5, 42, 10).tolist())
# Split into train and eval and create input functions
msk = np.random.rand(len(df)) < 0.8
traindf = df[msk]
evaldf = df[~msk]
SCALE = 100000
BATCH_SIZE=128
train_input_fn = tf.estimator.inputs.pandas_input_fn(x = traindf[list(feature_columns.keys())],
y = traindf["median_house_value"] / SCALE,
num_epochs = None,
batch_size = BATCH_SIZE,
shuffle = True)
eval_input_fn = tf.estimator.inputs.pandas_input_fn(x = evaldf[list(feature_columns.keys())],
y = evaldf["median_house_value"] / SCALE, # note the scaling
num_epochs = 1,
batch_size = len(evaldf),
shuffle=False)
# Create the custom estimator
def custom_estimator(features, labels, mode, params):
# 0. Extract data from feature columns
input_layer = tf.feature_column.input_layer(features, params['feature_columns'])
# 1. Define Model Architecture
predictions = tf.layers.dense(input_layer,1,activation=None)
# 2. Loss function, training/eval ops
if mode == tf.estimator.ModeKeys.TRAIN or mode == tf.estimator.ModeKeys.EVAL:
labels = tf.expand_dims(tf.cast(labels, dtype=tf.float32), -1)
loss = tf.losses.mean_squared_error(labels, predictions)
optimizer = tf.train.FtrlOptimizer(learning_rate=0.2)
train_op = optimizer.minimize(
loss = loss,
global_step = tf.train.get_global_step())
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(labels*SCALE, predictions*SCALE)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. Create export outputs
export_outputs = {"regression_export_outputs": tf.estimator.export.RegressionOutput(value = predictions)}
# 5. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create serving input function
def serving_input_fn():
feature_placeholders = {
colname : tf.placeholder(tf.float32, [None]) for colname in 'housing_median_age,median_income,num_rooms,num_bedrooms,persons_per_house'.split(',')
}
feature_placeholders['longitude'] = tf.placeholder(tf.float32, [None])
feature_placeholders['latitude'] = tf.placeholder(tf.float32, [None])
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(output_dir):
estimator = tf.estimator.Estimator(
model_fn = custom_estimator,
model_dir = output_dir,
params={'feature_columns': list(feature_columns.values())})
train_spec = tf.estimator.TrainSpec(input_fn = train_input_fn,
max_steps = 1000)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(input_fn = eval_input_fn,
steps = None,
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
#Run Training
OUTDIR = 'custom_estimator_trained_model'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
train_and_evaluate(OUTDIR)
def custom_estimator(features, labels, mode, params):
# 0. Extract data from feature columns
input_layer = tf.feature_column.input_layer(features, params['feature_columns'])
# 1. Define Model Architecture
predictions = tf.layers.dense(input_layer,10,activation=tf.nn.relu)
predictions = tf.layers.dense(input_layer,1,activation=None)
.....REST AS BEFORE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I. Python Overview
Step2: (If you're typing this into an IPython notebook, or otherwise using notebook file, you hit shift-Enter to evaluate a cell.)
Step3: or you can simply import the math library itself
Step4: You can define variables using the equals (=) sign
Step5: You can name a variable almost anything you want. It needs to start with an alphabetical character or "_", can contain alphanumeric charcters plus underscores ("_"). Certain words, however, are reserved for the language
Step6: The Python Tutorial has more on using Python as an interactive shell. The IPython tutorial makes a nice complement to this, since IPython has a much more sophisticated iteractive shell.
Step7: or double quotes
Step8: Just like the other two data objects we're familiar with (ints and floats), you can assign a string to a variable
Step9: The print statement is often used for printing character strings
Step10: But it can also print data types other than strings
Step11: In the above snipped, the number 600 (stored in the variable "area") is converted into a string before being printed out.
Step12: If you have a lot of words to concatenate together, there are other, more efficient ways to do this. But this is fine for linking a few strings together.
Step13: You can access members of the list using the index of that item
Step14: Python lists, like C, but unlike Fortran, use 0 as the index of the first element of a list. Thus, in this example, the 0 element is "Sunday", 1 is "Monday", and so on. If you need to access the nth element from the end of the list, you can use a negative index. For example, the -1 element of a list is the last element
Step15: You can add additional items to the list using the .append() command
Step16: The range() command is a convenient way to make sequential lists of numbers
Step17: Note that range(n) starts at 0 and gives the sequential list of integers less than n. If you want to start at a different number, use range(start,stop)
Step18: The lists created above with range have a step of 1 between elements. You can also give a fixed step size via a third command
Step19: Lists do not have to hold the same data type. For example,
Step20: However, it's good (but not essential) to use lists for similar objects that are somehow logically connected. If you want to group different data types together into a composite data object, it's best to use tuples, which we will learn about below.
Step21: Iteration, Indentation, and Blocks
Step22: This code snippet goes through each element of the list called days_of_the_week and assigns it to the variable day. It then executes everything in the indented block (in this case only one line of code, the print statement) using those variable assignments. When the program has gone through every element of the list, it exists the block.
Step23: The range() command is particularly useful with the for statement to execute loops of a specified length
Step24: Slicing
Step25: This is only occasionally useful. Slightly more useful is the slicing operation, which you can also use on any sequence. We already know that we can use indexing to get the first element of a list
Step26: If we want the list containing the first two elements of a list, we can do this via
Step27: or simply
Step28: If we want the last items of the list, we can do this with negative slicing
Step29: which is somewhat logically consistent with negative indices accessing the last elements of the list.
Step30: Since strings are sequences, you can also do this to them
Step31: If we really want to get fancy, we can pass a third element into the slice, which specifies a step length (just like a third argument to the range() function specifies the step)
Step32: Note that in this example I was even able to omit the second argument, so that the slice started at 2, went to the end of the list, and took every second element, to generate the list of even numbers less that 40.
Step33: (Quick quiz
Step34: If we evaluate it by itself, as we just did, we see that it returns a boolean value, False. The "==" operator performs equality testing. If the two items are equal, it returns True, otherwise it returns False. In this case, it is comparing two variables, the string "Sunday", and whatever is stored in the variable "day", which, in this case, is the other string "Saturday". Since the two strings are not equal to each other, the truth test has the false value.
Step35: We see a few other boolean operators here, all of which which should be self-explanatory. Less than, equality, non-equality, and so on.
Step36: We can do boolean tests on lists as well
Step37: Finally, note that you can also string multiple comparisons together, which can result in very intuitive tests
Step38: If statements can have elif parts ("else if"), in addition to if/else parts. For example
Step39: Of course we can combine if statements with for loops, to make a snippet that is almost interesting
Step40: This is something of an advanced topic, but ordinary data types have boolean values associated with them, and, indeed, in early versions of Python there was not a separate boolean object. Essentially, anything that was a 0 value (the integer or floating point 0, an empty string "", or an empty list []) was False, and everything else was true. You can see the boolean value of any data object using the bool() function.
Step41: Code Example
Step42: Let's go through this line by line. First, we define the variable n, and set it to the integer 20. n is the length of the sequence we're going to form, and should probably have a better variable name. We then create a variable called sequence, and initialize it to the list with the integers 0 and 1 in it, the first two elements of the Fibonacci sequence. We have to create these elements "by hand", since the iterative part of the sequence requires two previous elements.
Step43: We can now call fibonacci() for different sequence_lengths
Step44: We've introduced a several new features here. First, note that the function itself is defined as a code block (a colon followed by an indented block). This is the standard way that Python delimits things. Next, note that the first line of the function is a single string. This is called a docstring, and is a special kind of comment that is often available to people using the function through the python command line
Step45: If you define a docstring for all of your functions, it makes it easier for other people to use them, since they can get help on the arguments and return values of the function.
Step46: Tuples are like lists, in that you can access the elements using indices
Step47: However, tuples are immutable, you can't append to them or change the elements of them
Step48: Tuples are useful anytime you want to group different pieces of data together in an object, but don't want to create a full-fledged class (see below) for them. For example, let's say you want the Cartesian coordinates of some objects in your program. Tuples are a good way to do this
Step49: Again, it's not a necessary distinction, but one way to distinguish tuples and lists is that tuples are a collection of different things, here a name, and x and y coordinates, whereas a list is a collection of similar things, like if we wanted a list of those coordinates
Step50: Tuples can be used when functions return more than one value. Say we wanted to compute the smallest x- and y-coordinates of the above list of objects. We could write
Step51: Dictionaries are an object called "mappings" or "associative arrays" in other languages. Whereas a list associates an integer index with a set of objects
Step52: The index in a dictionary is called the key, and the corresponding dictionary entry is the value. A dictionary can use (almost) anything as the key. Whereas lists are formed with square brackets [], dictionaries use curly brackets {}
Step53: There's also a convenient way to create dictionaries without having to quote the keys.
Step54: The len() command works on both tuples and dictionaries
Step55: Conclusion of the Python Overview
Step56: No matter how experienced a programmer you are, these are words to meditate on.
Step57: size of the array
Step58: To build matrices, you can either use the array command with lists of lists
Step59: Add a column of ones to mat
Step60: size of a matrix
Step61: You can also form empty (zero) matrices of arbitrary shape (including vectors, which Numpy treats as vectors with one row), using the zeros command
Step62: There's also an identity command that behaves as you'd expect
Step63: as well as a ones command.
Step64: If you provide a third argument, it takes that as the number of points in the space. If you don't provide the argument, it gives a length 50 linear space.
Step65: linspace is an easy way to make coordinates for plotting. Functions in the numpy library (all of which are imported into IPython notebook) can act on an entire vector (or even a matrix) of points at once. Thus,
Step66: In conjunction with matplotlib, this is a nice way to plot things
Step67: Matrix operations
Step68: as well as when you add two matrices together. (However, the matrices have to be the same shape.)
Step69: Something that confuses Matlab users is that the times (*) operator give element-wise multiplication rather than matrix multiplication
Step70: To get matrix multiplication, you need the dot command
Step71: dot can also do dot products (duh!)
Step72: as well as matrix-vector products.
Step73: There's also a diag() function that takes a list or a vector and puts it along the diagonal of a square matrix.
Step75: We'll find this useful later on.
Step76: There's a section below on parsing CSV data. We'll steal the parser from that. For an explanation, skip ahead to that section. Otherwise, just assume that this is a way to parse that text into a numpy array that we can plot and do other analyses with.
Step77: Since we expect the data to have an exponential decay, we can plot it using a semi-log plot.
Step78: For a pure exponential decay like this, we can fit the log of the data to a straight line. The above plot suggests this is a good approximation. Given a function
Step79: Let's see whether this curve fits the data.
Step81: If we have more complicated functions, we may not be able to get away with fitting to a simple polynomial. Consider the following data
Step82: This data looks more Gaussian than exponential. If we wanted to, we could use polyfit for this as well, but let's use the curve_fit function from Scipy, which can fit to arbitrary functions. You can learn more using help(curve_fit).
Step83: Now fit to it using curve_fit
Step84: The curve_fit routine we just used is built on top of a very good general minimization capability in Scipy. You can learn more at the scipy documentation pages.
Step85: random() uses the Mersenne Twister algorithm, which is a highly regarded pseudorandom number generator. There are also functions to generate random integers, to randomly shuffle a list, and functions to pick random numbers from a particular distribution, like the normal distribution
Step86: It is generally more efficient to generate a list of random numbers all at once, particularly if you're drawing from a non-uniform distribution. Numpy has functions to generate vectors and matrices of particular types of random distributions.
Step87: III. Introduction to Pandas
Step88: Series
Step89: Get the array representation of a Series
Step90: Index objects are immutable and hold the axis labels and metadata such as names and axis names.
Step91: Create a Series with a custom index
Step92: Get a value from a Series
Step93: Get a set of values from a Series by passing in a list
Step94: Get values great than 0
Step95: Scalar multiply
Step96: Apply a numpy math function
Step97: A Series is like a fixed-length, ordered dict.
Step98: Re-order a Series by passing in an index (indices not found are NaN)
Step99: Check for NaN with the pandas method
Step100: Check for NaN with the Series method
Step101: Series automatically aligns differently indexed data in arithmetic operations
Step102: Name a Series
Step103: Name a Series index
Step104: Rename a Series' index in place
Step105: DataFrame
Step106: Like Series, columns that are not present in the data are NaN
Step107: Retrieve a column by key, returning a Series
Step108: Retrive a column by attribute, returning a Series
Step109: Retrieve a row by position
Step110: Update a column by assignment
Step111: Assign a Series to a column (note if assigning a list or array, the length must match the DataFrame, unlike a Series)
Step112: Assign a new column that doesn't exist to create a new column
Step113: Delete a column
Step114: Transpose the DataFrame
Step115: Create a DataFrame from a nested dict of dicts (the keys in the inner dicts are unioned and sorted to form the index in the result, unless an explicit index is specified)
Step116: Create a DataFrame from a dict of Series
Step117: Set the DataFrame index name
Step118: Set the DataFrame columns name
Step119: Return the data contained in a DataFrame as a 2D ndarray
Step120: If the columns are different dtypes, the 2D ndarray's dtype will accomodate all of the columns
Step121: Reindexing
Step122: Reindexing rows returns a new frame with the specified index
Step123: Reindex columns
Step124: Dropping Entries
Step125: Indexing, Selecting, Filtering
Step126: Select specified columns from a DataFrame
Step127: Select a slice from a DataFrame
Step128: Select from a DataFrame based on a filter
Step129: Select a slice of rows from a specific column of a DataFrame
Step130: Arithmetic and Data Alignment
Step131: Set a fill value instead of NaN for indices that do not overlap
Step132: Like NumPy, pandas supports arithmetic operations between DataFrames and Series.
Step133: Match the index of the Series on the DataFrame's columns, broadcasting down the rows and union the indices that do not match
Step134: Function Application and Mapping
Step135: Apply a function on 1D arrays to each column
Step136: Apply a function on 1D arrays to each row
Step137: Apply an element-wise Python function to a DataFrame
Step138: Sorting
Step139: Sort a DataFrame by its index
Step140: Sort a DataFrame by columns in descending order
Step141: Sort a DataFrame's values by column
Step142: Summarizing and Computing Descriptive Statistics
Step143: Sum and Mean
Step144: Descriptive analysis
Step145: Pivot tables
|
<ASSISTANT_TASK:>
Python Code:
import sys
print('Python version:', sys.version)
import IPython
print('IPython:', IPython.__version__)
import numpy
print('numpy:', numpy.__version__)
import scipy
print('scipy:', scipy.__version__)
import matplotlib
print('matplotlib:', matplotlib.__version__)
import pandas
print('pandas:', pandas.__version__)
import sklearn
print('scikit-learn:', sklearn.__version__)
2+2
(50-5*6)/4
sqrt(81)
from math import sqrt
sqrt(81)
import math
math.sqrt(81)
radius = 20
pi = math.pi
area = pi * radius ** 2
area
return = 0
'Hello, World!'
"Hello, World!"
greeting = "Hello, World!"
print(greeting)
print("The area is " + area)
print("The area is " + str(area))
statement = "Hello, " + "World!"
print(statement)
days_of_the_week = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
days_of_the_week[2]
days_of_the_week[-1]
languages = ["Fortran","C","C++"]
languages.append("Python")
print(languages)
list(range(10))
list(range(2,8))
evens = list(range(0,20,2))
evens
evens[3]
["Today",7,99.3,""]
help(len)
len(evens)
for day in days_of_the_week:
print(day)
for day in days_of_the_week:
statement = "Today is " + day
print(statement)
for i in range(20):
print("The square of ",i," is ",i*i)
for letter in "Sunday":
print(letter)
days_of_the_week[0]
days_of_the_week[0:2]
days_of_the_week[:2]
days_of_the_week[-2:]
workdays = days_of_the_week[1:6]
print(workdays)
day = "Sunday"
abbreviation = day[:3]
print(abbreviation)
numbers = list(range(0,40))
evens = numbers[2::2]
evens
if day == "Sunday":
print("Sleep in")
else:
print("Go to work")
day == "Sunday"
1 == 2
50 == 2*25
3 < 3.14159
1 == 1.0
1 != 0
1 <= 2
1 >= 1
1 is 1.0
[1,2,3] == [1,2,4]
[1,2,3] < [1,2,4]
hours = 5
0 < hours < 24
if day == "Sunday":
print("Sleep in")
elif day == "Saturday":
print("Do chores")
else:
print("Go to work")
for day in days_of_the_week:
statement = "Today is " + day
print(statement)
if day == "Sunday":
print(" Sleep in")
elif day == "Saturday":
print(" Do chores")
else:
print(" Go to work")
bool(1)
bool(0)
bool(["This "," is "," a "," list"])
n = 10
sequence = [0,1]
for i in range(2,n): # This is going to be a problem if we ever set n <= 2!
sequence.append(sequence[i-1]+sequence[i-2])
print(sequence)
def fibonacci(sequence_length):
"Return the Fibonacci sequence of length *sequence_length*"
sequence = [0,1]
if sequence_length < 1:
print("Fibonacci sequence only defined for length 1 or greater")
return
if 0 < sequence_length < 3:
return sequence[:sequence_length]
for i in range(2,sequence_length):
sequence.append(sequence[i-1]+sequence[i-2])
return sequence
fibonacci(2)
fibonacci(12)
help(fibonacci)
t = (1,2,'hi',9.0)
t
t[1]
t.append(7)
t[1]=77
('Bob',0.0,21.0)
positions = [
('Bob',0.0,21.0),
('Cat',2.5,13.1),
('Dog',33.0,1.2)
]
def minmax(objects):
minx = 1e20 # These are set to really big numbers
miny = 1e20
for obj in objects:
name,x,y = obj
if x < minx:
minx = x
if y < miny:
miny = y
return minx,miny
x,y = minmax(positions)
print(x,y)
mylist = [1,2,9,21]
ages = {"Rick": 46, "Bob": 86, "Fred": 21}
print("Rick's age is ",ages["Rick"])
dict(Rick=46,Bob=86,Fred=20)
len(t)
len(ages)
import this
import numpy as np
import scipy as sp
array = np.array([1,2,3,4,5,6])
array
array.shape
mat = np.array([[0,1],[1,0]])
mat
mat2 = np.c_[mat, np.ones(2)]
mat2
mat2.shape
np.zeros((3,3))
np.identity(4)
np.linspace(0,1)
np.linspace(0,1,11)
x = np.linspace(0,2*np.pi)
np.sin(x)
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('ggplot')
plt.plot(x,np.sin(x))
0.125*np.identity(3)
np.identity(2) + np.array([[1,1],[1,2]])
np.identity(2)*np.ones((2,2))
np.dot(np.identity(2),np.ones((2,2)))
v = np.array([3,4])
np.sqrt(np.dot(v,v))
m = np.array([[1,2],[3,4]])
m.T
np.linalg.inv(m)
np.diag([1,2,3,4,5])
raw_data = \
3.1905781584582433,0.028208609537968457
4.346895074946466,0.007160804747670053
5.374732334047101,0.0046962988461934805
8.201284796573875,0.0004614473299618756
10.899357601713055,0.00005038370219939726
16.295503211991434,4.377451812785309e-7
21.82012847965739,3.0799922117601088e-9
32.48394004282656,1.524776208284536e-13
43.53319057815846,5.5012073588707224e-18
data = []
for line in raw_data.splitlines():
words = line.split(',')
data.append(words)
data = np.array(data, dtype=np.float)
data
data[:, 0]
plt.title("Raw Data")
plt.xlabel("Distance")
plt.plot(data[:,0],data[:,1],'bo')
plt.title("Raw Data")
plt.xlabel("Distance")
plt.semilogy(data[:,0],data[:,1],'bo')
params = sp.polyfit(data[:,0],np.log(data[:,1]),1)
a = params[0]
A = np.exp(params[1])
x = np.linspace(1,45)
plt.title("Raw Data")
plt.xlabel("Distance")
plt.semilogy(data[:,0],data[:,1],'bo')
plt.semilogy(x,A*np.exp(a*x),'b-')
gauss_data = \
-0.9902286902286903,1.4065274110372852e-19
-0.7566104566104566,2.2504438576596563e-18
-0.5117810117810118,1.9459459459459454
-0.31887271887271884,10.621621621621626
-0.250997150997151,15.891891891891893
-0.1463309463309464,23.756756756756754
-0.07267267267267263,28.135135135135133
-0.04426734426734419,29.02702702702703
-0.0015939015939017698,29.675675675675677
0.04689304689304685,29.10810810810811
0.0840994840994842,27.324324324324326
0.1700546700546699,22.216216216216214
0.370878570878571,7.540540540540545
0.5338338338338338,1.621621621621618
0.722014322014322,0.08108108108108068
0.9926849926849926,-0.08108108108108646
data = []
for line in gauss_data.splitlines():
words = line.split(',')
data.append(words)
data = np.array(data, dtype=np.float)
plt.plot(data[:,0],data[:,1],'bo')
def gauss(x,A,a):
return A*np.exp(a*x**2)
from scipy.optimize import curve_fit
params,conv = curve_fit(gauss,data[:,0],data[:,1])
x = np.linspace(-1,1)
plt.plot(data[:,0],data[:,1],'bo')
A,a = params
plt.plot(x,gauss(x,A,a),'b-')
from random import random
rands = []
for i in range(100):
rands.append(random())
plt.plot(rands)
from random import gauss
grands = []
for i in range(100):
grands.append(gauss(0,1))
plt.plot(grands)
plt.plot(np.random.rand(100))
import pandas as pd
import numpy as np
ser_1 = pd.Series([1, 1, 2, -3, -5, 8, 13])
ser_1
ser_1.values
ser_1.index
ser_2 = pd.Series([1, 1, 2, -3, -5], index=['a', 'b', 'c', 'd', 'e'])
ser_2
ser_2[4] == ser_2['e']
ser_2[['c', 'a', 'b']]
ser_2[ser_2 > 0]
ser_2 * 2
np.exp(ser_2)
dict_1 = {'foo' : 100, 'bar' : 200, 'baz' : 300}
ser_3 = pd.Series(dict_1)
ser_3
index = ['foo', 'bar', 'baz', 'qux']
ser_4 = pd.Series(dict_1, index=index)
ser_4
pd.isnull(ser_4)
ser_4.isnull()
ser_3 + ser_4
ser_4.name = 'foobarbazqux'
ser_4.index.name = 'label'
ser_4
ser_4.index = ['fo', 'br', 'bz', 'qx']
ser_4
data_1 = {'state' : ['VA', 'VA', 'VA', 'MD', 'MD'],
'year' : [2012, 2013, 2014, 2014, 2015],
'pop' : [5.0, 5.1, 5.2, 4.0, 4.1]}
df_1 = pd.DataFrame(data_1)
df_1
df_2 = pd.DataFrame(data_1, columns=['year', 'state', 'pop'])
df_2
df_3 = pd.DataFrame(data_1, columns=['year', 'state', 'pop', 'unempl'])
df_3
df_3['state']
df_3.year
df_3.iloc[0]
df_3['unempl'] = np.arange(5)
df_3
unempl = pd.Series([6.0, 6.0, 6.1], index=[2, 3, 4])
df_3['unempl'] = unempl
df_3
df_3['state_dup'] = df_3['state']
df_3
del df_3['state_dup']
df_3
df_3.T
pop = {'VA' : {2013 : 5.1, 2014 : 5.2},
'MD' : {2014 : 4.0, 2015 : 4.1}}
df_4 = pd.DataFrame(pop)
df_4
data_2 = {'VA' : df_4['VA'][1:],
'MD' : df_4['MD'][2:]}
df_5 = pd.DataFrame(data_2)
df_5
df_5.index.name = 'year'
df_5
df_5.columns.name = 'state'
df_5
df_5.values
df_3.values
df_3
df_3.reindex(list(reversed(range(0, 6))))
df_3.reindex(columns=['state', 'pop', 'unempl', 'year'])
df_7 = df_3.drop([0, 1])
df_7
df_7 = df_7.drop('unempl', axis=1)
df_7
df_3
df_3[['pop', 'unempl']]
df_3[:2]
df_3.iloc[1:3]
df_3[df_3['pop'] > 5]
df_3.loc[0:2, 'pop']
df_3
np.random.seed(0)
df_8 = pd.DataFrame(np.random.rand(9).reshape((3, 3)),
columns=['a', 'b', 'c'])
df_8
np.random.seed(1)
df_9 = pd.DataFrame(np.random.rand(9).reshape((3, 3)),
columns=['b', 'c', 'd'])
df_9
df_8 + df_9
df_10 = df_8.add(df_9, fill_value=0)
df_10
ser_8 = df_10.iloc[0]
df_11 = df_10 - ser_8
df_11
ser_9 = pd.Series(range(3), index=['a', 'd', 'e'])
ser_9
df_11 - ser_9
df_11 = np.abs(df_11)
df_11
df_11.apply(sum)
df_11.apply(sum, axis=1)
def func_3(x):
return '%.2f' %x
df_11.applymap(func_3)
df_12 = pd.DataFrame(np.arange(12).reshape((3, 4)),
index=['three', 'one', 'two'],
columns=['c', 'a', 'b', 'd'])
df_12
df_12.sort_index()
df_12.sort_index(axis=1, ascending=False)
df_12.sort_values(by=['d', 'c'])
df_15 = pd.DataFrame(np.random.randn(10, 3),
columns=['a', 'b', 'c'])
df_15['cat1'] = (np.random.rand(10) * 3).round(0)
df_15['cat2'] = (np.random.rand(10)).round(0)
df_15
df_15.sum()
df_15.sum(axis=1)
df_15.mean(axis=0)
df_15['a'].describe()
df_15['cat1'].value_counts()
pd.pivot_table(df_15, index='cat1', aggfunc=np.mean)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TensorBoard を使う
Step2: MNIST データセットを例として使用しながら、データを正規化し、画像を 10 個のクラスに分類する単純な Keras モデルを作成する関数を記述します。
Step3: Keras Model.fit() で TensorBoard を使用する
Step4: コマンドラインから、またはノートブックエクスペリエンス内で TensorBoard を起動します。2 つのインターフェースはほぼ同じです。ノートブックでは、%tensorboard ラインマジックを使用しますが、コマンドラインでは同じコマンドを「%」をつけずに実行します。
Step5: <!-- <img class="tfo-display-only-on-site" src="https
Step6: トレーニングコードは高度なクイックスタートチュートリアルの内容に従いますが、メトリックを TensorBoard にログ記録する方法が示されています。損失とオプティマイザを選択してください。
Step7: トレーニング中の値を蓄積するために使用し、任意の時点でログ記録できるステートフルメトリックを作成します。
Step8: トレーニングとテストの関数を定義します。
Step9: logs ディレクトリに要約をディスクに書き込むためのサマリーライターをセットアップします。
Step10: トレーニングを開始します。サマリーライターがディスクに要約を書き込めるように、tf.summary.scalar() を使用して、トレーニング/テスト中のメトリック(損失と精度)をログします。ログするメトリックとその頻度は、ユーザーが制御します。ほかの tf.summary 関数は、ほかの種類のデータのログに使用されます。
Step11: TensorBoard をもう一度開き、新しいログディレクトリにポイントします。また、TensorBoard を起動してトレーニングの経過を監視することもできます。
Step12: <!-- <img class="tfo-display-only-on-site" src="https
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Load the TensorBoard notebook extension
%load_ext tensorboard
import tensorflow as tf
import datetime
# Clear any logs from previous runs
!rm -rf ./logs/
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
def create_model():
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
model = create_model()
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model.fit(x=x_train,
y=y_train,
epochs=5,
validation_data=(x_test, y_test),
callbacks=[tensorboard_callback])
%tensorboard --logdir logs/fit
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
train_dataset = train_dataset.shuffle(60000).batch(64)
test_dataset = test_dataset.batch(64)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# Define our metrics
train_loss = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')
test_loss = tf.keras.metrics.Mean('test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy('test_accuracy')
def train_step(model, optimizer, x_train, y_train):
with tf.GradientTape() as tape:
predictions = model(x_train, training=True)
loss = loss_object(y_train, predictions)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss(loss)
train_accuracy(y_train, predictions)
def test_step(model, x_test, y_test):
predictions = model(x_test)
loss = loss_object(y_test, predictions)
test_loss(loss)
test_accuracy(y_test, predictions)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
model = create_model() # reset our model
EPOCHS = 5
for epoch in range(EPOCHS):
for (x_train, y_train) in train_dataset:
train_step(model, optimizer, x_train, y_train)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
for (x_test, y_test) in test_dataset:
test_step(model, x_test, y_test)
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print (template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset metrics every epoch
train_loss.reset_states()
test_loss.reset_states()
train_accuracy.reset_states()
test_accuracy.reset_states()
%tensorboard --logdir logs/gradient_tape
!tensorboard dev upload \
--logdir logs/fit \
--name "(optional) My latest experiment" \
--description "(optional) Simple comparison of several hyperparameters" \
--one_shot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From the summary method we see that the Bounding Rectangle is reported along with the Area of the window for the point pattern. Two things to note here.
Step2: The bounding box is given in left, bottom, right, top ordering.
Step3: The parts attribute for the window is a list of polygons. In this case the window has only a single part and it is a rectangular polygon with vertices listed clockwise in closed cartographic form.
Step4: This also applies to sequences of points
Step5: Multi-part Windows
Step6: We will plot this using matplotlib to get a better understanding of the challenges that this type of window presents for statistical analysis of the associated point pattern.
Step7: Not, quite what we wanted, as the first part of our multi-part polygon is a ring, but it was not encoded in closed cartographic form
Step8: We can fix this with a helper function from the window module
Step9: Now we can print all the rings composing our window
Step10: The red hole is associated with the first exterior ring.
Step11: Of the five points two are clearly outside of both of the exterior rings. The three remaining points are each contained in one of the bounding boxes for an exterior ring. However, one of these points is also contained in the hole ring, and thus is not contained in the exterior ring associated with that hole.
Step12: Here we have extended the figure to include the bounding box for the multi-part window (in cyan). Now we can call the filter_contained method of the window on the point sequence
Step13: This was a lot of code just to illustrate that the methods of a window can be used to identify topological relationships between points and the window's constituent parts. Let's turn to a less contrived example to see this in action.
Step14: The county shapefile vautm17n.shp has 136 shapes of the polygon type. Some of these are composed of multiple-rings and holes to reflect the interesting history of political boundaries in that State.
Step15: This creates a PySAL Polygon
Step16: We can construct a Window from this polygon instance using the helper function as_window
Step17: The window has three parts consisting of the union of mainland counties and two "island" parts associated with Accomack and Northampton counties and has no holes.
Step18: So the centroid for our new window is contained by the window. Such a result is not guaranteed as the geometry of the window could be complex such that the centroid falls outside of the window.
Step19: What we did here was create a window for each of the individual counties in the state. With these in hand we checked each one for containment of the window's centroid. The result is we see the window (count) with index 67 is the only one that contains the centroid point.
Step20: Here the default is to form the minimum bounding rectangle and use that as the window for the point pattern and, in turn, to implement the intensity estimation.
Step21: Here, the window is redefined. Thus, window related attributes Area of window and Intensity estimate for window are changed. However, the Bounding rectangle remains unchanged since it is not relevant to the definition of window.
Step22: as are the intensity estimates
|
<ASSISTANT_TASK:>
Python Code:
import pysal.lib as ps
import numpy as np
from pysal.explore.pointpats import PointPattern
f = ps.examples.get_path('vautm17n_points.shp')
fo = ps.io.open(f)
pp_va = PointPattern(np.asarray([pnt for pnt in fo]))
fo.close()
pp_va.summary()
pp_va.window.area
pp_va.window.bbox
pp_va.window.centroid
pp_va.window.parts
pp_va.window.contains_point((623277.82697965798, 4204412.8815969583))
pnts = ((-623277.82697965798, 4204412.8815969583),
(623277.82697965798, 4204412.8815969583),
(1000.01, 200.9))
pnts_in = pp_va.window.filter_contained(pnts)
pnts_in
parts = [[(0.0, 0.0), (0.0, 10.0), (10.0, 10.0), (10.0, 0.0)],
[(11.,11.), (11.,20.), (20.,20.), (20.,11.)]]
holes = [[(3.0,3.0), (6.0, 3.0), (6.0, 6.0), (3.0, 6.0)]]
%matplotlib inline
import matplotlib.pyplot as plt
p0 = np.asarray(parts[0])
plt.plot(p0[:,0], p0[:,1])
plt.xlim(-10,20)
t = plt.ylim(-10,20) # silence the output of ylim
p0
from pysal.explore.pointpats.window import to_ccf
print(parts[0])
print(to_ccf(parts[0])) #get closed ring
from pysal.explore.pointpats.window import to_ccf
p0 = np.asarray(to_ccf(parts[0]))
plt.plot(p0[:,0], p0[:,1])
plt.xlim(-10,20)
t=plt.ylim(-10,20)
for part in parts:
part = np.asarray(to_ccf(part))
plt.plot(part[:,0], part[:,1], 'b')
for hole in holes:
hole = np.asarray(to_ccf(hole))
plt.plot(hole[:,0], hole[:,1], 'r')
plt.xlim(-10,30)
t = plt.ylim(-10,30)
pnts = [(12,12), (4,4), (2,2), (25,1), (5,20)]
for pnt in pnts:
plt.plot(pnt[0], pnt[1], 'g.')
for part in parts:
part = np.asarray(to_ccf(part))
plt.plot(part[:,0], part[:,1], 'b')
for hole in holes:
hole = np.asarray(to_ccf(hole))
plt.plot(hole[:,0], hole[:,1], 'r')
plt.xlim(-10,30)
t = plt.ylim(-10,30)
from pysal.explore.pointpats import Window
window = Window(parts, holes)
window.parts
window.holes
window.bbox
window.area
pnts = [(12,12), (4,4), (2,2), (25,1), (5,20)]
for pnt in pnts:
plt.plot(pnt[0], pnt[1], 'g.') #plot the five points in green
for part in parts:
part = np.asarray(to_ccf(part))
plt.plot(part[:,0], part[:,1], 'b') #plot "parts" in blue
for hole in holes:
hole = np.asarray(to_ccf(hole))
plt.plot(hole[:,0], hole[:,1], 'r') #plot "hole" in red
from pysal.explore.pointpats.window import poly_from_bbox
poly = np.asarray(poly_from_bbox(window.bbox).vertices)
plt.plot(poly[:,0], poly[:,1], 'm-.') #plot the minimum bounding box in magenta
plt.xlim(-10,30)
t = plt.ylim(-10,30)
pin = window.filter_contained(pnts)
pin
from pysal.lib.cg import shapely_ext
import numpy as np
from pysal.explore.pointpats.window import poly_from_bbox, as_window, Window
import pysal.lib as ps
%matplotlib inline
import matplotlib.pyplot as plt
va = ps.io.open(ps.examples.get_path("vautm17n.shp")) #open "vautm17n" polygon shapefile
polys = [shp for shp in va]
vapnts = ps.io.open(ps.examples.get_path("vautm17n_points.shp")) #open "vautm17n_points" point shapefile
points = [shp for shp in vapnts]
print(len(polys))
cu = shapely_ext.cascaded_union(polys)
type(cu)
w = as_window(cu)
w.holes
len(w.parts)
w.bbox
w.centroid
w.contains_point(w.centroid)
#create a window for each of the individual counties in the state
windows = [as_window(county) for county in polys]
#check each county for containment of the window's centroid
cent_poly = [ (i, county) for i,county in enumerate(windows) if county.contains_point(w.centroid)]
cent_poly
i, cent_poly = cent_poly[0]
cent_poly.bbox
f = ps.examples.get_path('vautm17n_points.shp') #open "vautm17n_points" point shapefile
fo = ps.io.open(f)
pnts = np.asarray([pnt for pnt in fo])
fo.close()
pp_va = PointPattern(pnts)
pp_va.summary()
pp_va_union = PointPattern(pnts, window=w)
pp_va_union.summary()
pp_va.window.area / pp_va_union.window.area
pp_va.lambda_window / pp_va_union.lambda_window
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: The Meetup API limits requests, however their documentation isn't exactly helpful. Using their headers, I saw that I was limited to 30 requests per 10 seconds. Therefore, I'll sleep 1 second in between each request to be safe.
Step5: Part 2
Step6: Part 3
Step7: Sanity check (I have a tree command installed via brew install tree)
Step8: Part 4
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from collections import defaultdict
import json
import os
import time
import requests
def save_output(data, output_file):
with open(output_file, "w") as f:
json.dump(data, f)
# Set some global variables
MEETUP_API_KEY = "yeah right"
MEETUP_GROUPS_URL = "https://api.meetup.com/2/groups"
PARAMS = {
"signed": True,
"key": MEETUP_API_KEY,
"topic": "python",
"category_id": 34, # 34 = Tech, there are only ~35 categories
"order": "members",
"page": 200, # max allowed
"omit": "group_photo" # no need for photos in response
}
TOTAL_PAGES = 6 # looked on the API console, 1117 meetup groups as of 7/17, 200 groups per page = 6 pages
def get_meetup_groups():
meetup_groups = []
for i in xrange(TOTAL_PAGES):
PARAMS["offset"] = i
print("GROUPS: Getting page {0} of {1}".format(i+1, TOTAL_PAGES+1))
response = requests.get(MEETUP_GROUPS_URL, params=PARAMS)
if response.ok:
meetup_groups.extend(response.json().get("results"))
time.sleep(1) # don't bombard the Meetup API
print("GROUPS: Collected {0} Meetup groups".format(len(meetup_groups)))
return meetup_groups
meetup_groups = get_meetup_groups()
# Create a directory to save everything
data_dir = "meetup_data"
if not os.path.exists(data_dir):
os.makedirs(data_dir)
# Save meetup groups data
output = os.path.join(data_dir, "meetup_groups.json")
save_output(meetup_groups, output)
# inspect one for funsies
meetup_groups[0]
search = ["python", "pydata", "pyramid", "py", "django", "flask", "plone"]
omit = ["happy"] # I realize that a group could be called "happy python user group" or something...
def is_pug(group):
Return `True` if in `search` key words and not in `omit` keywords.
group_name = group.get("name").lower()
for o in omit:
if o in group_name:
return False
for s in search:
if s in group_name:
return True
return False
def sort_groups(groups):
Sort groups by 'pyladies' and 'python user groups'.
pyladies = []
user_groups = []
for g in groups:
if "pyladies" in g.get("name").lower():
pyladies.append(g)
else:
if is_pug(g):
user_groups.append(g)
return user_groups, pyladies
user_groups, pyladies = sort_groups(meetup_groups)
# Let's spot check the UGs to see if what we're left with makes sense
# Note: I took a peek at a few (not shown here) and for the most part,
# all seems okay
for g in user_groups:
print(g.get("name"))
from math import sin, cos, asin, degrees, radians, atan2, sqrt
RADIUS = 3958.75 # Earth's radius in miles
def is_within_50_miles(pyladies_coords, python_coords):
pyladies_lat, pyladies_lon = pyladies_coords[0], pyladies_coords[1]
python_lat, python_lon = python_coords[0], python_coords[1]
d_lat = radians(pyladies_lat - python_lat)
d_lon = radians(pyladies_lon - python_lon)
sin_d_lat = sin(d_lat / 2)
sin_d_lon = sin(d_lon / 2)
a = (sin_d_lat ** 2 + sin_d_lon ** 2 ) * cos(radians(pyladies_lat)) * cos(radians(python_lat))
c = 2 * atan2(sqrt(a), sqrt(1-a))
dist = RADIUS * c
return dist <= 50
def get_coords(group):
return group.get("lat"), group.get("lon")
def get_nearby_python_groups(pyl, collect):
pyl_coords = get_coords(pyl)
nearby = []
for group in user_groups:
pyt_coords = get_coords(group)
if is_within_50_miles(pyl_coords, pyt_coords):
nearby.append(group)
collect[pyl.get("name")] = nearby
return collect
collect = {}
for pylady in pyladies:
collect = get_nearby_python_groups(pylady, collect)
for item in collect.items():
print(item[0], len(item[1]))
# Save data into pyladies-specific directories
def pylady_dir(pyl):
_dir = pyl.split()
_dir = "".join(_dir)
outdir = os.path.join(data_dir, _dir)
if not os.path.exists(outdir):
os.makedirs(outdir)
return _dir
def save_pyladies():
for pylady in pyladies:
name = pylady.get("name")
subdir = pylady_dir(name)
outputdir = os.path.join(data_dir, subdir)
output = os.path.join(outputdir, subdir + ".json")
save_output(pylady, output)
groups = collect.get(name)
for g in groups:
group_link = g.get("link")
group_name = group_link.split(".com/")[1][:-1]
group_name = "".join(group_name)
outfile = group_name + ".json"
ug_output = os.path.join(outputdir, outfile)
save_output(g, ug_output)
save_pyladies()
!tree
MEETUP_MEMBER_URL = "https://api.meetup.com/2/members"
PARAMS = {
"signed": True,
"key": MEETUP_API_KEY,
}
def get_members(group):
PARAMS["group_id"] = group.get("id")
members_count = group.get("members")
print(u"MEMBERS: Getting {0} members for group {1}".format(members_count, group.get("name")))
pages = members_count / 200
remainder = members_count % 200
if remainder > 0:
pages += 1
members = []
for i in xrange(pages):
print("MEMBERS: Iteration {0} out of {1}".format(i+1, pages+1))
PARAMS["offset"] = i
resp = requests.get(MEETUP_MEMBER_URL, PARAMS)
if resp.ok:
results = resp.json().get("results")
members.extend(results)
time.sleep(1)
print("MEMBERS: Got {0} members".format(len(members)))
return members
def get_members_collection(pylady, groups):
pylady_members = get_members(pylady)
pug_members = defaultdict(list)
for g in groups:
pg_mbrs = get_members(g)
pug_members[g.get("name")].append(pg_mbrs)
return pylady_members, pug_members
# NOTE: this takes *FOREVER*.
start = time.time()
for i, item in enumerate(collect.items()):
print("COLLECTING: {0} out of {1}".format(i+1, len(collect)+1))
pylady = [p for p in pyladies if p.get("name") == item[0]][0]
pylady_members, pug_members = get_members_collection(pylady, item[1])
print("COLLECTING: Saving all the data!")
pylady_name = pylady.get("name")
outdir = pylady_dir(pylady_name)
outdir = os.path.join(data_dir, outdir)
outfile = os.path.join(outdir, "pyladies_members.json")
save_output(pylady_members, outfile)
outfile = os.path.join(outdir, "pug_members.json")
save_output(pug_members, outfile)
end = time.time()
delta_s = end - start
delta_m = delta_s / 60
print("**DONE**")
print("Completed in {:.0f} minutes".format(delta_m))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Monte Carlo methods do not bootstrap
Step2: 5.4 Monte Carlo Control without Exploring Starts
Step3: 5.5 Off-policy Prediction via Importances Sampling
Step4: 5.7 Off-policy Monte Carlo Control
|
<ASSISTANT_TASK:>
Python Code:
Image('./res/first_visit_mc.png')
Image('./res/gpi.png')
Image('./res/monte_carlo_es.png')
Image('./res/on_epsilon_soft.png')
Image('./res/off_policy_predict.png')
Image('./res/off_policy_control.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 策略
Step2: Python 策略
Step3: 最重要的方法为 action(time_step),该方法可将包含环境观测值的 time_step 映射到包含以下特性的 PolicyStep 命名元组:
Step4: 示例 2:脚本化 Python 策略
Step5: TensorFlow 策略
Step6: 示例 2:参与者策略
Step7: 在 TensorFlow 中,大多数网络层都是针对批量运算而设计的,因此我们希望输入 time_step 得到批处理,网络的输出也得到批处理。另外,网络还负责在给定 action_spec 的正确范围内生成操作。常用方法是对最后一层使用 tanh 激活函数以在 [-1, 1] 区间内生成操作,然后将其缩放并移动到正确的范围作为输入 action_spec(例如,请参阅 tf_agents/agents/ddpg/networks.actor_network())。
Step8: 我们可以将其应用于遵循 time_step_spec 的任何 time_step 批次:
Step9: 在以上示例中,我们是使用生成操作张量的操作网络来创建的策略。在这种情况下,policy.distribution(time_step) 是围绕 policy.action(time_step) 输出的确定性(增量)分布。生成随机策略的一种方法是在策略包装器内包装参与者策略,为操作增加噪声。另一种方法是使用操作分布网络而非操作网络来创建参与者策略,如下所示。
Step10: 请注意,上例中的操作被裁剪到给定操作规范 [-1, 1] 区间内。这是因为 ActorPolicy clip 的构造函数参数的默认值为 True。将其设置为 False 将返回网络生成的未裁剪操作。
Step11: 策略包装器
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.networks import network
from tf_agents.policies import py_policy
from tf_agents.policies import random_py_policy
from tf_agents.policies import scripted_py_policy
from tf_agents.policies import tf_policy
from tf_agents.policies import random_tf_policy
from tf_agents.policies import actor_policy
from tf_agents.policies import q_policy
from tf_agents.policies import greedy_policy
from tf_agents.trajectories import time_step as ts
class Base(object):
@abc.abstractmethod
def __init__(self, time_step_spec, action_spec, policy_state_spec=()):
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._policy_state_spec = policy_state_spec
@abc.abstractmethod
def reset(self, policy_state=()):
# return initial_policy_state.
pass
@abc.abstractmethod
def action(self, time_step, policy_state=()):
# return a PolicyStep(action, state, info) named tuple.
pass
@abc.abstractmethod
def distribution(self, time_step, policy_state=()):
# Not implemented in python, only for TF policies.
pass
@abc.abstractmethod
def update(self, policy):
# update self to be similar to the input `policy`.
pass
@property
def time_step_spec(self):
return self._time_step_spec
@property
def action_spec(self):
return self._action_spec
@property
def policy_state_spec(self):
return self._policy_state_spec
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
my_random_py_policy = random_py_policy.RandomPyPolicy(time_step_spec=None,
action_spec=action_spec)
time_step = None
action_step = my_random_py_policy.action(time_step)
print(action_step)
action_step = my_random_py_policy.action(time_step)
print(action_step)
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
action_script = [(1, np.array([5, 2], dtype=np.int32)),
(0, np.array([0, 0], dtype=np.int32)), # Setting `num_repeats` to 0 will skip this action.
(2, np.array([1, 2], dtype=np.int32)),
(1, np.array([3, 4], dtype=np.int32))]
my_scripted_py_policy = scripted_py_policy.ScriptedPyPolicy(
time_step_spec=None, action_spec=action_spec, action_script=action_script)
policy_state = my_scripted_py_policy.get_initial_state()
time_step = None
print('Executing scripted policy...')
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
action_step= my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
action_step = my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
print('Resetting my_scripted_py_policy...')
policy_state = my_scripted_py_policy.get_initial_state()
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
action_spec = tensor_spec.BoundedTensorSpec(
(2,), tf.float32, minimum=-1, maximum=3)
input_tensor_spec = tensor_spec.TensorSpec((2,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
my_random_tf_policy = random_tf_policy.RandomTFPolicy(
action_spec=action_spec, time_step_spec=time_step_spec)
observation = tf.ones(time_step_spec.observation.shape)
time_step = ts.restart(observation)
action_step = my_random_tf_policy.action(time_step)
print('Action:')
print(action_step.action)
class ActionNet(network.Network):
def __init__(self, input_tensor_spec, output_tensor_spec):
super(ActionNet, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name='ActionNet')
self._output_tensor_spec = output_tensor_spec
self._sub_layers = [
tf.keras.layers.Dense(
action_spec.shape.num_elements(), activation=tf.nn.tanh),
]
def call(self, observations, step_type, network_state):
del step_type
output = tf.cast(observations, dtype=tf.float32)
for layer in self._sub_layers:
output = layer(output)
actions = tf.reshape(output, [-1] + self._output_tensor_spec.shape.as_list())
# Scale and shift actions to the correct range if necessary.
return actions, network_state
input_tensor_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
action_spec = tensor_spec.BoundedTensorSpec((3,),
tf.float32,
minimum=-1,
maximum=1)
action_net = ActionNet(input_tensor_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_net)
batch_size = 2
observations = tf.ones([2] + time_step_spec.observation.shape.as_list())
time_step = ts.restart(observations, batch_size)
action_step = my_actor_policy.action(time_step)
print('Action:')
print(action_step.action)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step.action)
class ActionDistributionNet(ActionNet):
def call(self, observations, step_type, network_state):
action_means, network_state = super(ActionDistributionNet, self).call(
observations, step_type, network_state)
action_std = tf.ones_like(action_means)
return tfp.distributions.MultivariateNormalDiag(action_means, action_std), network_state
action_distribution_net = ActionDistributionNet(input_tensor_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_distribution_net)
action_step = my_actor_policy.action(time_step)
print('Action:')
print(action_step.action)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step.action)
input_tensor_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
action_spec = tensor_spec.BoundedTensorSpec((),
tf.int32,
minimum=0,
maximum=2)
num_actions = action_spec.maximum - action_spec.minimum + 1
class QNetwork(network.Network):
def __init__(self, input_tensor_spec, action_spec, num_actions=num_actions, name=None):
super(QNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
self._sub_layers = [
tf.keras.layers.Dense(num_actions),
]
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self._sub_layers:
inputs = layer(inputs)
return inputs, network_state
batch_size = 2
observation = tf.ones([batch_size] + time_step_spec.observation.shape.as_list())
time_steps = ts.restart(observation, batch_size=batch_size)
my_q_network = QNetwork(
input_tensor_spec=input_tensor_spec,
action_spec=action_spec)
my_q_policy = q_policy.QPolicy(
time_step_spec, action_spec, q_network=my_q_network)
action_step = my_q_policy.action(time_steps)
distribution_step = my_q_policy.distribution(time_steps)
print('Action:')
print(action_step.action)
print('Action distribution:')
print(distribution_step.action)
my_greedy_policy = greedy_policy.GreedyPolicy(my_q_policy)
action_step = my_greedy_policy.action(time_steps)
print('Action:')
print(action_step.action)
distribution_step = my_greedy_policy.distribution(time_steps)
print('Action distribution:')
print(distribution_step.action)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The we load some data into a DataFrame. In this case, we'll load the US Census surnames data ranked by frequency.
Step2: We can create a dictionary of Soundex values mapping to all the surnames with the same Soundex code. These represent Soundex collisions (or blocking). Getting the basic Soundex value of a string is as simple as calling soundex() on it.
Step3: Better yet, we can construct a Soundex() object to reuse for encoding multiple names.
Step4: With this dictionary, we can retrieve all the names that map to the same Soundex value as, for example, the name Williamson.
Step5: We can build up a DataFrame with some interesting information about these names. First, we'll just collect all the names in a column.
Step6: To that, let's add a few distance measures.
Step7: And finally, we'll add a few phonetic encodings.
Step8: Let's check the row for WILLIAMSON.
Step9: In addition to their Soundex collision, 7 names have matching first Double Metaphone encodings.
Step10: 28 have matching NYSIIS encodings.
Step11: And 7 have matching first Alpha-SIS encodings.
Step12: 6 names match in all four of the phonetic algorithms considered here.
|
<ASSISTANT_TASK:>
Python Code:
from abydos.phonetic import *
from abydos.distance import *
import pandas as pd
names = pd.read_csv('../tests/corpora/uscensus2000.csv',
comment='#', index_col=1, usecols=(0,1), keep_default_na=False)
names.head()
soundex('WILLIAMSON')
sdx = Soundex()
reverse_soundex = {}
for name in names.name:
encoded = sdx.encode(name)
if encoded not in reverse_soundex:
reverse_soundex[encoded] = set()
reverse_soundex[encoded].add(name)
reverse_soundex[soundex('WILLIAMSON')]
df = pd.DataFrame(sorted(reverse_soundex[soundex('WILLIAMSON')]), columns=['name'])
df
# Levenshtein distance from 'WILLIAMSON'
lev = Levenshtein()
df['Levenshtein'] = df.name.apply(lambda name: lev.dist_abs('WILLIAMSON', name))
# Jaccard similarity on 2-grams
jac = Jaccard()
df['Jaccard'] = df.name.apply(lambda name: jac.sim('WILLIAMSON', name))
# Jaro-Winkler similarity
jw = JaroWinkler()
df['Jaro_Winkler'] = df.name.apply(lambda name: jw.sim('WILLIAMSON', name))
# Double Metaphone (first code only)
dm = DoubleMetaphone()
df['Double_Metaphone'] = df.name.apply(lambda name: dm.encode(name)[0])
# NYSIIS
nysiis = NYSIIS()
df['NYSIIS'] = df.name.apply(lambda name: nysiis.encode(name))
# Alpha-SIS (first code only)
alphasis = AlphaSIS()
df['Alpha_SIS'] = df.name.apply(lambda name: alphasis.encode(name)[0])
df
df[df.name == 'WILLIAMSON']
df[df.Double_Metaphone == 'ALMSN']
df[df.NYSIIS == 'WALANS']
df[df.Alpha_SIS == '45302000000000']
df[(df.Alpha_SIS == '45302000000000') & (df.NYSIIS == 'WALANS') &
(df.Double_Metaphone == 'ALMSN')]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Set up variables
Step8: Vertex constants
Step9: AutoML constants
Step10: Tutorial
Step11: Dataset
Step12: Now save the unique dataset identifier for the Dataset resource instance you created.
Step13: Data preparation
Step14: Quick peek at your data
Step15: Import data
Step16: Train the model
Step17: Construct the task requirements
Step18: Now save the unique identifier of the training pipeline you created.
Step19: Get information on a training pipeline
Step20: Deployment
Step21: Model information
Step22: Deploy the Model resource
Step23: Now get the unique identifier for the Endpoint resource you created.
Step24: Compute instance scaling
Step25: Deploy Model resource to the Endpoint resource
Step26: Make a online prediction request
Step27: Make a prediction
Step28: Undeploy the Model resource
Step29: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install -U google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# Image Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml"
# Image Labeling type
LABEL_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/ioformat/image_segmentation_io_format_1.0.0.yaml"
# Image Training task
TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_image_segmentation_1.0.0.yaml"
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_endpoint_client():
client = aip.EndpointServiceClient(client_options=client_options)
return client
def create_prediction_client():
client = aip.PredictionServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["endpoint"] = create_endpoint_client()
clients["prediction"] = create_prediction_client()
for client in clients.items():
print(client)
TIMEOUT = 90
def create_dataset(name, schema, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
dataset = aip.Dataset(
display_name=name, metadata_schema_uri=schema, labels=labels
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("unknown-" + TIMESTAMP, DATA_SCHEMA)
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
IMPORT_FILE = "gs://ucaip-test-us-central1/dataset/isg_data.jsonl"
if "IMPORT_FILES" in globals():
FILE = IMPORT_FILES[0]
else:
FILE = IMPORT_FILE
count = ! gsutil cat $FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $FILE | head
def import_data(dataset, gcs_sources, schema):
config = [{"gcs_source": {"uris": gcs_sources}, "import_schema_uri": schema}]
print("dataset:", dataset_id)
start_time = time.time()
try:
operation = clients["dataset"].import_data(
name=dataset_id, import_configs=config
)
print("Long running operation:", operation.operation.name)
result = operation.result()
print("result:", result)
print("time:", int(time.time() - start_time), "secs")
print("error:", operation.exception())
print("meta :", operation.metadata)
print(
"after: running:",
operation.running(),
"done:",
operation.done(),
"cancelled:",
operation.cancelled(),
)
return operation
except Exception as e:
print("exception:", e)
return None
import_data(dataset_id, [IMPORT_FILE], LABEL_SCHEMA)
def create_pipeline(pipeline_name, model_name, dataset, schema, task):
dataset_id = dataset.split("/")[-1]
input_config = {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
}
training_pipeline = {
"display_name": pipeline_name,
"training_task_definition": schema,
"training_task_inputs": task,
"input_data_config": input_config,
"model_to_upload": {"display_name": model_name},
}
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
PIPE_NAME = "unknown_pipe-" + TIMESTAMP
MODEL_NAME = "unknown_model-" + TIMESTAMP
task = json_format.ParseDict(
{"budget_milli_node_hours": 2000, "model_type": "CLOUD_LOW_ACCURACY_1"}, Value()
)
response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
def list_model_evaluations(name):
response = clients["model"].list_model_evaluations(parent=name)
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = json_format.MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
print(metric)
print("confidenceMetricsEntries", metrics["confidenceMetricsEntries"])
return evaluation.name
last_evaluation = list_model_evaluations(model_to_deploy_id)
ENDPOINT_NAME = "unknown_endpoint-" + TIMESTAMP
def create_endpoint(display_name):
endpoint = {"display_name": display_name}
response = clients["endpoint"].create_endpoint(parent=PARENT, endpoint=endpoint)
print("Long running operation:", response.operation.name)
result = response.result(timeout=300)
print("result")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" description:", result.description)
print(" labels:", result.labels)
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
return result
result = create_endpoint(ENDPOINT_NAME)
# The full unique ID for the endpoint
endpoint_id = result.name
# The short numeric ID for the endpoint
endpoint_short_id = endpoint_id.split("/")[-1]
print(endpoint_id)
MIN_NODES = 1
MAX_NODES = 1
DEPLOYED_NAME = "unknown_deployed-" + TIMESTAMP
def deploy_model(
model, deployed_model_display_name, endpoint, traffic_split={"0": 100}
):
deployed_model = {
"model": model,
"display_name": deployed_model_display_name,
"automatic_resources": {
"min_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
}
response = clients["endpoint"].deploy_model(
endpoint=endpoint, deployed_model=deployed_model, traffic_split=traffic_split
)
print("Long running operation:", response.operation.name)
result = response.result()
print("result")
deployed_model = result.deployed_model
print(" deployed_model")
print(" id:", deployed_model.id)
print(" model:", deployed_model.model)
print(" display_name:", deployed_model.display_name)
print(" create_time:", deployed_model.create_time)
return deployed_model.id
deployed_model_id = deploy_model(model_to_deploy_id, DEPLOYED_NAME, endpoint_id)
import json
test_items = !gsutil cat $IMPORT_FILE | head -n1
test_data = test_items[0].replace("'", '"')
test_data = json.loads(test_data)
try:
test_item = test_data["image_gcs_uri"]
test_label = test_data["segmentation_annotation"]["annotation_spec_colors"]
except:
test_item = test_data["imageGcsUri"]
test_label = test_data["segmentationAnnotation"]["annotationSpecColors"]
print(test_item, test_label)
import base64
import tensorflow as tf
def predict_item(filename, endpoint, parameters_dict):
parameters = json_format.ParseDict(parameters_dict, Value())
with tf.io.gfile.GFile(filename, "rb") as f:
content = f.read()
# The format of each instance should conform to the deployed model's prediction input schema.
instances_list = [{"content": base64.b64encode(content).decode("utf-8")}]
instances = [json_format.ParseDict(s, Value()) for s in instances_list]
response = clients["prediction"].predict(
endpoint=endpoint, instances=instances, parameters=parameters
)
print("response")
print(" deployed_model_id:", response.deployed_model_id)
predictions = response.predictions
print("predictions")
for prediction in predictions:
print(" prediction:", dict(prediction))
predict_item(test_item, endpoint_id, None)
def undeploy_model(deployed_model_id, endpoint):
response = clients["endpoint"].undeploy_model(
endpoint=endpoint, deployed_model_id=deployed_model_id, traffic_split={}
)
print(response)
undeploy_model(deployed_model_id, endpoint_id)
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up the pipeline
Step2: Extract relevant parameters for dtreeviz from the pipeline
Step3: Initialize shadow tree
Step4: Visualizations
Step5: ctreeviz_leaf_samples
Step6: dtreeviz
Step7: show just path
Step8: viz_leaf_criterion
Step9: describe_node_sample
|
<ASSISTANT_TASK:>
Python Code:
random_state = 1234
dataset = pd.read_csv("../data/titanic/titanic.csv")
# Fill missing values for Age
dataset.fillna({"Age":dataset.Age.mean()}, inplace=True)
# Encode categorical variables
dataset["Sex_label"] = dataset.Sex.astype("category").cat.codes
dataset["Cabin_label"] = dataset.Cabin.astype("category").cat.codes
dataset["Embarked_label"] = dataset.Embarked.astype("category").cat.codes
features = ["Pclass", "Age", "Fare", "Sex_label", "Cabin_label", "Embarked_label"]
target = "Survived"
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import VarianceThreshold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
model = make_pipeline(
VarianceThreshold(0.5),
PolynomialFeatures(degree=2, interaction_only=True),
DecisionTreeClassifier(max_depth=4))
model.fit(dataset[features], dataset[target])
from dtreeviz.utils import extract_params_from_pipeline
tree_classifier, x_data, features_model = extract_params_from_pipeline(
pipeline=model,
x_data=dataset[features],
feature_names=features)
y_data = dataset[target]
features_model
sk_dtree = ShadowSKDTree(tree_classifier, x_data, y_data, features_model, target, [0, 1])
trees.viz_leaf_samples(tree_classifier, x_data, features)
trees.viz_leaf_samples(sk_dtree)
trees.ctreeviz_leaf_samples(tree_classifier, x_data, y_data, features)
trees.ctreeviz_leaf_samples(sk_dtree)
trees.dtreeviz(tree_classifier, x_data, y_data, features_model, target, class_names=[0, 1])
trees.dtreeviz(sk_dtree, fancy=False)
trees.dtreeviz(sk_dtree, show_just_path=True, X = x_data.iloc[10])
trees.viz_leaf_criterion(tree_classifier)
trees.viz_leaf_criterion(sk_dtree)
trees.describe_node_sample(tree_classifier, node_id=10, x_data=x_data, feature_names=features_model)
trees.describe_node_sample(sk_dtree, node_id=10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 Ejercicio
Step2: Dada la lista de las ciudades más pobladas de Italia it
|
<ASSISTANT_TASK:>
Python Code:
d1 = {}
d2 = {'Hola': ['Hi','Hello'], 'Adios': ['Bye'] }
d2["Hola"]
# Sol:
# Sol:
def fusion():
dic1 = {1: 'A', 2:'B', 3:'C'}
dic2 = {4: 'Aa', 5:'Ba', 6:'Ca'}
dic1.update(dic2)
return dic1
fusion()
# Sol:
it = [ 'Roma', 'Milán', 'Nápoles', 'Turín', 'Palermo' , 'Génova',
'Bolonia', 'Florencia', 'Bari', 'Catania']
secuencia = range(1, len(it)+1)
m = list(zip(secuencia, it))
ciudades = dict(m)
ciudades
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem Statement
Step3: Each dot corresponds to a position on the football field where a football player has hit the ball with his/her head after the French goal keeper has shot the ball from the left side of the football field.
Step4: Let's train the model without any regularization, and observe the accuracy on the train/test sets.
Step5: The train accuracy is 94.8% while the test accuracy is 91.5%. This is the baseline model (you will observe the impact of regularization on this model). Run the following code to plot the decision boundary of your model.
Step7: The non-regularized model is obviously overfitting the training set. It is fitting the noisy points! Lets now look at two techniques to reduce overfitting.
Step9: Expected Output
Step10: Expected Output
Step11: Congrats, the test set accuracy increased to 93%. You have saved the French football team!
Step13: Observations
Step15: Expected Output
Step16: Expected Output
Step17: Dropout works great! The test accuracy has increased again (to 95%)! Your model is not overfitting the training set and does a great job on the test set. The French football team will be forever grateful to you!
|
<ASSISTANT_TASK:>
Python Code:
# import packages
import numpy as np
import matplotlib.pyplot as plt
from reg_utils import sigmoid, relu, plot_decision_boundary, initialize_parameters, load_2D_dataset, predict_dec
from reg_utils import compute_cost, predict, forward_propagation, backward_propagation, update_parameters
import sklearn
import sklearn.datasets
import scipy.io
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
train_X, train_Y, test_X, test_Y = load_2D_dataset()
def model(X, Y, learning_rate = 0.3, num_iterations = 30000, print_cost = True, lambd = 0, keep_prob = 1):
Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)
learning_rate -- learning rate of the optimization
num_iterations -- number of iterations of the optimization loop
print_cost -- If True, print the cost every 10000 iterations
lambd -- regularization hyperparameter, scalar
keep_prob - probability of keeping a neuron active during drop-out, scalar.
Returns:
parameters -- parameters learned by the model. They can then be used to predict.
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
layers_dims = [X.shape[0], 20, 3, 1]
# Initialize parameters dictionary.
parameters = initialize_parameters(layers_dims)
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.
if keep_prob == 1:
a3, cache = forward_propagation(X, parameters)
elif keep_prob < 1:
a3, cache = forward_propagation_with_dropout(X, parameters, keep_prob)
# Cost function
if lambd == 0:
cost = compute_cost(a3, Y)
else:
cost = compute_cost_with_regularization(a3, Y, parameters, lambd)
# Backward propagation.
assert(lambd==0 or keep_prob==1) # it is possible to use both L2 regularization and dropout,
# but this assignment will only explore one at a time
if lambd == 0 and keep_prob == 1:
grads = backward_propagation(X, Y, cache)
elif lambd != 0:
grads = backward_propagation_with_regularization(X, Y, cache, lambd)
elif keep_prob < 1:
grads = backward_propagation_with_dropout(X, Y, cache, keep_prob)
# Update parameters.
parameters = update_parameters(parameters, grads, learning_rate)
# Print the loss every 10000 iterations
if print_cost and i % 10000 == 0:
print("Cost after iteration {}: {}".format(i, cost))
if print_cost and i % 1000 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (x1,000)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
parameters = model(train_X, train_Y)
print ("On the training set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model without regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: compute_cost_with_regularization
def compute_cost_with_regularization(A3, Y, parameters, lambd):
Implement the cost function with L2 regularization. See formula (2) above.
Arguments:
A3 -- post-activation, output of forward propagation, of shape (output size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
parameters -- python dictionary containing parameters of the model
Returns:
cost - value of the regularized loss function (formula (2))
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
W3 = parameters["W3"]
cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost
### START CODE HERE ### (approx. 1 line)
L2_regularization_cost = (lambd/(2*m))*(np.sum(np.square(W1)) + np.sum(np.square(W2)) + np.sum(np.square(W3)))
### END CODER HERE ###
cost = cross_entropy_cost + L2_regularization_cost
return cost
A3, Y_assess, parameters = compute_cost_with_regularization_test_case()
print("cost = " + str(compute_cost_with_regularization(A3, Y_assess, parameters, lambd = 0.1)))
# GRADED FUNCTION: backward_propagation_with_regularization
def backward_propagation_with_regularization(X, Y, cache, lambd):
Implements the backward propagation of our baseline model to which we added an L2 regularization.
Arguments:
X -- input dataset, of shape (input size, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation()
lambd -- regularization hyperparameter, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
### START CODE HERE ### (approx. 1 line)
dW3 = 1./m * np.dot(dZ3, A2.T) + (lambd/m)*W3
### END CODE HERE ###
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
### START CODE HERE ### (approx. 1 line)
dW2 = 1./m * np.dot(dZ2, A1.T) + (lambd/m)*W2
### END CODE HERE ###
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
### START CODE HERE ### (approx. 1 line)
dW1 = 1./m * np.dot(dZ1, X.T) + (lambd/m)*W1
### END CODE HERE ###
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_regularization_test_case()
grads = backward_propagation_with_regularization(X_assess, Y_assess, cache, lambd = 0.7)
print ("dW1 = "+ str(grads["dW1"]))
print ("dW2 = "+ str(grads["dW2"]))
print ("dW3 = "+ str(grads["dW3"]))
parameters = model(train_X, train_Y, lambd = 0.7)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with L2-regularization")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# GRADED FUNCTION: forward_propagation_with_dropout
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5):
Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
Arguments:
X -- input dataset, of shape (2, number of examples)
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (20, 2)
b1 -- bias vector of shape (20, 1)
W2 -- weight matrix of shape (3, 20)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
A3 -- last activation value, output of the forward propagation, of shape (1,1)
cache -- tuple, information stored for computing the backward propagation
np.random.seed(1)
# retrieve parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above.
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...)
D1 = D1 < keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold)
A1 = A1 * D1 # Step 3: shut down some neurons of A1
A1 = A1 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
### START CODE HERE ### (approx. 4 lines)
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...)
D2 = D2 < keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold)
A2 = A2 * D2 # Step 3: shut down some neurons of A2
A2 = A2 / keep_prob # Step 4: scale the value of neurons that haven't been shut down
### END CODE HERE ###
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
X_assess, parameters = forward_propagation_with_dropout_test_case()
A3, cache = forward_propagation_with_dropout(X_assess, parameters, keep_prob = 0.7)
print ("A3 = " + str(A3))
# GRADED FUNCTION: backward_propagation_with_dropout
def backward_propagation_with_dropout(X, Y, cache, keep_prob):
Implements the backward propagation of our baseline model to which we added dropout.
Arguments:
X -- input dataset, of shape (2, number of examples)
Y -- "true" labels vector, of shape (output size, number of examples)
cache -- cache output from forward_propagation_with_dropout()
keep_prob - probability of keeping a neuron active during drop-out, scalar
Returns:
gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables
m = X.shape[1]
(Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True)
dA2 = np.dot(W3.T, dZ3)
### START CODE HERE ### (≈ 2 lines of code)
dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation
dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True)
dA1 = np.dot(W2.T, dZ2)
### START CODE HERE ### (≈ 2 lines of code)
dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation
dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down
### END CODE HERE ###
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2,
"dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1,
"dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
X_assess, Y_assess, cache = backward_propagation_with_dropout_test_case()
gradients = backward_propagation_with_dropout(X_assess, Y_assess, cache, keep_prob = 0.8)
print ("dA1 = " + str(gradients["dA1"]))
print ("dA2 = " + str(gradients["dA2"]))
parameters = model(train_X, train_Y, keep_prob = 0.86, learning_rate = 0.3)
print ("On the train set:")
predictions_train = predict(train_X, train_Y, parameters)
print ("On the test set:")
predictions_test = predict(test_X, test_Y, parameters)
plt.title("Model with dropout")
axes = plt.gca()
axes.set_xlim([-0.75,0.40])
axes.set_ylim([-0.75,0.65])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cleaning and Formatting JSON Data
Step2: Creating the Tooltip to display the required fields
Step3: Creating the Label to display the year
Step4: Defining Axes and Scales
Step5: Creating the Scatter Mark with the appropriate size and color parameters passed
Step6: Creating the Figure
Step7: Using a Slider to allow the user to change the year and a button for animation
Step8: On the slider value callback (a function that is triggered everytime the value of the slider is changed) we change the x, y and size co-ordinates of the Scatter. We also update the text of the Label to reflect the current year.
Step9: Defining the callback for the button
Step10: Displaying the GUI
|
<ASSISTANT_TASK:>
Python Code:
# Required imports
import pandas as pd
from bqplot import (LogScale, LinearScale, OrdinalColorScale, ColorAxis,
Axis, Scatter, CATEGORY10, Label, Figure)
from bqplot.default_tooltip import Tooltip
from ipywidgets import VBox, IntSlider, Button
from IPython.display import display
import os
import numpy as np
from time import sleep
# The GUI starts with this year
initial_year = 1800
data = pd.read_json(os.path.abspath('data_files/nations.json'))
def clean_data(data):
for column in ['income', 'lifeExpectancy', 'population']:
data = data.drop(data[data[column].apply(len) <= 4].index)
return data
def extrap_interp(data):
data = np.array(data)
x_range = np.arange(1800, 2009, 1.)
y_range = np.interp(x_range, data[:, 0], data[:, 1])
return y_range
def extrap_data(data):
for column in ['income', 'lifeExpectancy', 'population']:
data[column] = data[column].apply(extrap_interp)
return data
data = clean_data(data)
data = extrap_data(data)
income_min, income_max = np.min(data['income'].apply(np.min)), np.max(data['income'].apply(np.max))
life_exp_min, life_exp_max = np.min(data['lifeExpectancy'].apply(np.min)), np.max(data['lifeExpectancy'].apply(np.max))
pop_min, pop_max = np.min(data['population'].apply(np.min)), np.max(data['population'].apply(np.max))
def get_data(year):
year_index = year - 1800
income = data['income'].apply(lambda x: x[year_index])
life_exp = data['lifeExpectancy'].apply(lambda x: x[year_index])
pop = data['population'].apply(lambda x: x[year_index])
return income, life_exp, pop
tt = Tooltip(fields=['name', 'x', 'y'], labels=['Country Name', 'Income per Capita', 'Life Expectancy'])
year_label = Label(x=0.85, y=0.1, font_size='52px', font_weight='bolder', color='orange',
text=str(initial_year), enable_move=True)
x_sc = LogScale(min=income_min, max=income_max)
y_sc = LinearScale(min=life_exp_min, max=life_exp_max)
c_sc = OrdinalColorScale(domain=data['region'].unique().tolist(), colors=CATEGORY10[:6])
size_sc = LinearScale(min=pop_min, max=pop_max)
ax_y = Axis(label='Life Expectancy', scale=y_sc, orientation='vertical', side='left')
ax_x = Axis(label='Income per Capita', scale=x_sc)
# Start with the first year's data
cap_income, life_exp, pop = get_data(initial_year)
wealth_scat = Scatter(x=cap_income, y=life_exp, color=data['region'], size=pop,
names=data['name'], display_names=False,
scales={'x': x_sc, 'y': y_sc, 'color': c_sc, 'size': size_sc},
default_size=4112, tooltip=tt, animate=True, stroke='Black')
fig = Figure(marks=[wealth_scat, year_label], axes=[ax_x, ax_y],
title='Health and Wealth of Nations', fig_color='White',
animation_duration=100)
year_slider = IntSlider(min=1800, max=2008, step=1, description='Year', value=initial_year)
animate_button = Button(description='Play', background_color='MediumSeaGreen', color='Black', icon='fa-play')
def year_changed(new):
wealth_scat.x, wealth_scat.y, wealth_scat.size = get_data(year_slider.value)
year_label.text = str(year_slider.value)
year_slider.observe(year_changed, 'value')
def button_clicked(value):
animate_button.visible = False
for i in range(1800, 2009, 1):
year_slider.value = i
sleep(0.05)
animate_button.visible = True
animate_button.on_click(button_clicked)
display(VBox([animate_button, fig, year_slider]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
Step2: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
Step3: And we can see the characters encoded as integers.
Step4: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
Step5: Making training mini-batches
Step6: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
Step7: If you implemented get_batches correctly, the above output should look something like
Step8: LSTM Cell
Step9: RNN Output
Step10: Training loss
Step11: Optimizer
Step12: Build the network
Step13: Hyperparameters
Step14: Time for training
Step15: Saved checkpoints
Step16: Sampling
Step17: Here, pass in the path to a checkpoint and sample from the network.
|
<ASSISTANT_TASK:>
Python Code:
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
encoded[:100]
len(vocab)
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the batch size and number of batches we can make
batch_size = n_seqs * n_steps
n_batches = len(arr)//batch_size
# Keep only enough characters to make full batches
arr = arr[:n_batches * batch_size]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs, -1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:, n:n+n_steps]
# The targets, shifted by one
y = np.zeros_like(x)
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
yield x, y
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
# Keep probability placeholder for drop out layers
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
def build_cell(lstm_size, keep_prob):
# Use a basic LSTM cell
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
# Add dropout to the cell
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
# Stack up multiple LSTM layers, for deep learning
cell = tf.contrib.rnn.MultiRNNCell([build_cell(lstm_size, keep_prob) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
x: Input tensor
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# That is, the shape should be batch_size*num_steps rows by lstm_size columns
seq_output = tf.concat(lstm_output, axis=1)
x = tf.reshape(seq_output, [-1, in_size])
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits = tf.matmul(x, softmax_w) + softmax_b
# Use softmax to get the probabilities for predicted characters
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per batch_size per step
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cell
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, self.keep_prob)
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
tf.train.get_checkpoint_state('checkpoints')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>02.Small Shop</h2>
Step2: <h2>03.Point in Rectangle</h2>
Step3: <h2>04.Fruit or Vegetable</h2>
Step4: <h2>05.Invalid Number</h2>
Step5: <h2>06.Point on Rectangle Border</h2>
Step6: <h2>07.Fruit Shop</h2>
Step7: <h2>08.Trade Comissions</h2>
Step8: <h2>09.Day of Week</h2>
Step9: <h2>10.Animal Type</h2>
Step10: <h2>11.Cinema</h2>
Step11: <h2>12.Volleyball</h2>
Step12: <h2>13.Point in the Figure</h2>
|
<ASSISTANT_TASK:>
Python Code:
age = float(input())
sex = input()
if sex == "m":
if age >= 16:
print("Mr.")
else:
print("Master")
else:
if age >= 16:
print("Ms.")
else:
print("Miss")
product = input()
city = input()
quantity = float(input())
price = 0
if city == "Sofia":
if product == "coffee":
price = quantity * 0.50
elif product == "water":
price = quantity * 0.80
elif product == "beer":
price = quantity * 1.20
elif product == "sweets":
price = quantity * 1.45
elif product == "peanuts":
price = quantity * 1.60
elif city == "Plovdiv":
if product == "coffee":
price = quantity * 0.40
elif product == "water":
price = quantity * 0.70
elif product == "beer":
price = quantity * 1.15
elif product == "sweets":
price = quantity * 1.30
elif product == "peanuts":
price = quantity * 1.50
elif city == "Varna":
if product == "coffee":
price = quantity * 0.45
elif product == "water":
price = quantity * 0.70
elif product == "beer":
price = quantity * 1.10
elif product == "sweets":
price = quantity * 1.35
elif product == "peanuts":
price = quantity * 1.55
print(float("{0:.2f}".format(price)))
x1 = float(input())
y1 = float(input())
x2 = float(input())
y2 = float(input())
x = float(input())
y = float(input())
if x >= x1 and x <= x2 and y >= y1 and y <= y2:
print("Inside")
else:
print("Outside")
product = input()
if product == "banana":
print("fruit")
elif product == "apple":
print("fruit")
elif product == "kiwi":
print("fruit")
elif product == "cherry":
print("fruit")
elif product == "lemon":
print("fruit")
elif product == "grapes":
print("fruit")
elif product == "tomato":
print("vegetable")
elif product == "cucumber":
print("vegetable")
elif product == "pepper":
print("vegetable")
elif product == "carrot":
print("vegetable")
else:
print("unknown")
num = float(input())
if num == 0 or (num >= 100 and num <= 200):
print()
else:
print("invalid")
x1 = float(input())
y1 = float(input())
x2 = float(input())
y2 = float(input())
x = float(input())
y = float(input())
if ((( y == y1 or y == y2)and (x1 <= x and x <= x2))) or ((x == x1 or x == x2)and (y1 <= y and y <= y2)):
print("Border")
else:
print("Inside / Outside")
fruit = input()
day = input()
quantity = float(input())
price = 0
valid_Day = day == "Monday" or day == "Tuesday" or\
day == "Wednesday" or day == "Thursday" or\
day == "Friday"
if day == "Saturday" or day == "Sunday":
if fruit == "banana":
price = quantity * 2.70
print(price)
elif fruit == "apple":
price = quantity * 1.25
print(price)
elif fruit == "orange":
price = quantity * 0.90
print(price)
elif fruit == "grapefruit":
price = quantity * 1.60
print(price)
elif fruit == "kiwi":
price = quantity * 3.00
print(price)
elif fruit == "pineapple":
price = quantity * 5.60
print(price)
elif fruit == "grapes":
price = quantity * 4.20
print(price)
else:
print("error")
elif valid_Day:
if fruit == "banana":
price = quantity * 2.50
print(price)
elif fruit == "apple":
price = quantity * 1.20
print(price)
elif fruit == "orange":
price = quantity * 0.85
print(price)
elif fruit == "grapefruit":
price = quantity * 1.45
print(price)
elif fruit == "kiwi":
price = quantity * 2.70
print(price)
elif fruit == "pineapple":
price = quantity * 5.50
print(price)
elif fruit == "grapes":
price = quantity * 3.85
print(price)
else:
print("error")
else:
print("error")
city = input()
sales = float(input())
commission = 0
commission_Percent = 0
if city == "Sofia":
if 0 <= sales <= 500:
commission_Percent = 0.05
elif 500 <= sales <= 1000:
commission_Percent = 0.07
elif 1000 <= sales <= 10000:
commission_Percent = 0.08
elif sales > 10000:
commission_Percent = 0.12
elif city == "Varna":
if 0 <= sales <= 500:
commission_Percent = 0.045
elif 500 <= sales <= 1000:
commission_Percent = 0.075
elif 1000 <= sales <= 10000:
commission_Percent = 0.10
elif sales > 10000:
commission_Percent = 0.13
else:
print("error")
elif city == "Plovdiv":
if 0 <= sales <= 500:
commission_Percent = 0.055
elif 500 <= sales <= 1000:
commission_Percent = 0.08
elif 1000 <= sales <= 10000:
commission_Percent = 0.12
elif sales > 10000:
commission_Percent = 0.145
else:
print("error")
else:
print("error")
commission = sales * commission_Percent
print(float("{0:.2f}".format(commission)))
num = float(input())
if num == 1:
print("Monday")
elif num == 2:
print("Tuesday")
elif num == 3:
print("Wednesday")
elif num == 4:
print("Thursday")
elif num == 5:
print("Friday")
elif num == 6:
print("Saturday")
elif num == 7:
print("Sunday")
else:
print("error")
animal = input()
if animal == "dog":
print("mammal")
elif animal == "crocodile" or animal == "snake" or animal == "tortoise":
print("reptile")
else:
print("unknown")
projections_Type = input()
row = int(input())
cow = int(input())
tickets_Price = 0
total_Price = 0
if projections_Type == "Premiere":
tickets_Price = 12
elif projections_Type == "Normal":
tickets_Price = 7.5
elif projections_Type == "Discount":
tickets_Price = 5
total_Price = tickets_Price * (row * cow)
print("{0:.2f} leva".format(total_Price))
import math
years_Type = input()
holidays = int(input())
weekends = int(input())
times = 0
weekends_games = 0
games_in_Sofia = 0
if years_Type == "normal":
weekends_games = (48 - weekends) * ( 3 / 4)
games_in_Sofia = holidays * (2/3)
times = weekends_games + games_in_Sofia + weekends
print(math.floor(times))
elif years_Type == "leap":
weekends_games = (48 - weekends) * ( 3 / 4)
games_in_Sofia = holidays * (2/3)
times = weekends_games + games_in_Sofia + weekends
times += 0.15 * times
print(math.floor(times))
h = int(input())
x = int(input())
y = int(input())
left_square = (0 < x <= h) and (0 < y < h) # Is the point in the left square.
mid = (h < x < 2 * h) and (0 < y < 4 * h) # Is the point in the middle cow.
right_square = (2 * h <= x < 3 * h) and (0 < y < h) # Is the point in the right square.
left_squares_borders = (x == 0 and 0 <= y <= h)or\
(y == h and 0 <= x <= h) # Is the point on the left squares border.
down_border = (y == 0 and 0 <= x <= h * 3)
right_squares_borders = (x == h * 3 and 0 <= y <= h)or\
(y == h and 2 * h <= x <= 3 * h) # Is the point on the right squares border.
mid_borders = (x == 2 * h and h <= y <= 4 * h)or\
(x == h and h <= y <= 4 * h)
top_border = (y == 4 * h and h <= x <= 2 * h)
inside = left_square or mid or right_square
border = left_squares_borders or right_squares_borders or\
down_border or mid_borders or top_border
if inside:
print("inside")
elif border:
print("border")
else:
print("Outside")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to Timesketch
Step2: Now that we've connected to the Timesketch server, we need to select the Sketch that has the CTF timeline.
Step3: Then we'll select the MUS2019-CTF sketch (shown as sketch 3 above; you can change the number below to select a different sketch)
Step4: Lastly, I'll briefly explain a few paramters of the explore function, which we'll use heavily when answering questions.
Step5: Multiple results, as is expected since Plaso creates multiple records for different types of timestamps, but they all point to the same filename
Step6: Q
Step7: That's a bunch of rows, so let's filter it down by searching for messages that contain '60725-10'
Step8: That filename is really long and cut off; let's just select that field, then deduplicate using set()
Step9: Another way to solve this is to query for the file reference number directly. That's not as easy as it sounds, since Plaso stores it in the hex form (I'm working on fixing that). We can work with that though!
Step10: The file_reference value is not the format we want, since it's hard to tell what the sequence number is. We can convert it to a more useful form though
Step11: There. Now we have the file_reference number in an easier-to-read format, and the history of all filenames that MFT entry 60725 has had! It's easy to look for the entry with a sequence number of 10 and get our answer.
Step12: Q
Step13: In the second row of the results, we can find the correct path we're looking for in the message and see that the corresponding inode is 99916. We could do another search, similar to how we answered other questions... or we could just look down a few rows for a USN entry that shows
Step14: Q
Step15: You can see the VSN in a readable format at the end of the device_path or in the message string. I'm only seeing one value here, so we don't need to determine which drive was the OS one. If we did, I'd look for some system processes that need to run from the OS drive to get the right VSN.
Step16: TeamViewer Questions
Step17: That returned a lot of results (600+). We could page through them all, but why not see if there are any interesting clusters first? That sounds like a job for a visualization!
Step18: Or to use grouping/aggregation in pandas
Step19: Okay, so from the graphs it looks like we have a good cluster at the end of February; let's look closer. I'll slice the results to only show after 2019-02-20
Step20: So from this, in a short interval starting 2019-02-25T20
Step21: Q
Step22: Registry Questions
Step23: From the message, the Current control set is 1.
Step24: The message is really long; let's pull it out
Step25: The name of the Timezone is in the message string, as is the ActiveTimeBias, which we can use to get the UTC offset
Step26: Q
Step27: Q
Step28: There are a few entries, but only the last one has what we want. Reading through it (or using Ctrl+F) we can find the 'IPAddress' is 64.44.141.76.
Step29: Or we can use str.extract
Step30: Q
|
<ASSISTANT_TASK:>
Python Code:
# Install the TimeSketch API client if you don't have it
!pip install timesketch-api-client
# Import some things we'll need
from timesketch_api_client import config
from timesketch_api_client import search
import pandas as pd
pd.options.display.max_colwidth = 60
#@title Client Information
# @markdown In order to connect to Timesketch you need to first get a Timesketch object, which will require you to answer
# @markdown some questions the first time you execute this code. The answers are:
# @markdown + **auth_mode**: timesketch (username/pwd combination)
# @markdown + **host_uri**: https://demo.timesketch.org
# @markdown + **username**: demo
# @markdown + **password**: demo
ts_client = config.get_client(confirm_choices=True)
sketches = ts_client.list_sketches()
ctf = None
for sketch in sketches:
print('[{0:d}] {1:s}'.format(sketch.id, sketch.name))
if sketch.name == 'MUS2019 CTF':
ctf = sketch
print(ctf.name)
print(ctf.description)
search_obj = search.Search(ctf)
search_obj.query_string = 'inode:102698'
search_obj.return_fields='datetime,timestamp_desc,data_type,inode,filename'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','inode','filename']]
ts_results.filename.unique()
search_obj = search.Search(ctf)
search_obj.query_string = '60725'
search_obj.return_fields='datetime,timestamp_desc,data_type,filename,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','filename','message']]
ts_results[ts_results.message.str.contains('60725-10')]
set(ts_results[ts_results.message.str.contains('60725-10')].filename)
search_obj = search.Search(ctf)
search_obj.query_string = '60725'
search_obj.return_fields='datetime,timestamp_desc,data_type,file_reference,filename,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','file_reference','filename','message']]
# Drop any rows with NaN, since they aren't what we're looking for and will
# break the below function.
ts_results = ts_results.dropna()
pd.options.display.max_colwidth = 110
# Replace the file_reference hex value with the human-readable MFT-Seq version.
# This is basically what Plaso does to display the result in the 'message'
# string we searched for.
ts_results['file_reference'] = ts_results['file_reference'].map(
lambda x: '{0:d}-{1:d}'.format(int(x) & 0xffffffffffff, int(x) >> 48))
ts_results[['datetime','timestamp_desc','data_type','file_reference','filename']]
search_obj = search.Search(ctf)
search_obj.query_string = 'update_sequence_number:546416480'
search_obj.return_fields='datetime,timestamp_desc,data_type,update_sequence_number,filename'
ts_results = search_obj.table
ts_results.shape
#ts_results[['datetime','timestamp_desc','data_type','update_sequence_number','filename']]
search_obj = search.Search(ctf)
search_obj.query_string = 'FTK Imager.exe'
search_obj.return_fields='datetime,timestamp_desc,data_type,inode,message,filename'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','inode','message']]
ts_results[
~ts_results.filename.isna() & (
ts_results.filename.str.contains(r'Users\\Administrator\\Desktop\\FTK_Imager_Lite_3.1.1\\FTK Imager.exe'))][['filename', 'inode']].drop_duplicates()
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:volume:creation"'
search_obj.return_fields='datetime,timestamp_desc,data_type,device_path,hostname,serial_number,message'
ts_results = search_obj.table
pd.options.display.max_colwidth = 70
ts_results[['datetime','timestamp_desc','data_type','device_path','hostname','serial_number','message']]
for serial_nr in ts_results.serial_number.unique():
print('{0:08X}'.format(serial_nr))
search_obj = search.Search(ctf)
search_obj.query_string = 'TeamViewer'
search_obj.return_fields='datetime,timestamp_desc,timestamp,data_type,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','message']]
ts_results = ts_results.set_index('datetime')
ts_results['2018':].message.resample('D').count().plot()
ts_results.reset_index(inplace=True)
ts_results['day'] = ts_results.datetime.dt.strftime('%Y%m%d')
group = ts_results[['day', 'timestamp']].groupby('day', as_index=False)
group_df = group.count().rename(columns={'timestamp': 'count'})
group_df.sort_values('count', ascending=False)[:10]
search_obj = search.Search(ctf)
date_chip = search.DateRangeChip()
date_chip.start_time = '2019-02-25T00:00:00'
date_chip.end_time = '2019-03-04T23:59:59'
search_obj.query_string = 'TeamViewer'
search_obj.add_chip(date_chip)
search_obj.return_fields = '*'
ts_results = search_obj.table
#ts_results = ts_results.set_index('datetime')
#ts_results['2019-02-20':][['timestamp_desc','data_type','filename','message']]
ts_results.data_type.value_counts()
ts_results.search_string.value_counts()
ts_results[ts_results.data_type.str.contains('chrome')][['datetime', 'url', 'domain', 'search_string', 'message', 'title']]
ts_results[ts_results.data_type == 'fs:stat'][['datetime', 'display_name', 'timestamp_desc']]
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:prefetch:execution" AND teamviewer_desktop.exe'
search_obj.return_fields = 'datetime,timestamp_desc,data_type,executable,run_count,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','executable','run_count','message']]
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:prefetch:execution" AND teamviewer_desktop.exe'
search_obj.return_fields = '*'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','executable','run_count', 'path_hints']]
# Escaping fun: We need to esacpe the slashes in the key_path once for Timesketch and once for Python, so we'll have triple slashes (\\\)
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:registry:key_value" AND key_path:"HKEY_LOCAL_MACHINE\\\System\\\Select"'
search_obj.return_fields='datetime,timestamp_desc,data_type,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','message']]
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:registry:timezone"'
search_obj.return_fields = 'datetime,timestamp_desc,data_type,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','message']]
message = list(ts_results.message.unique())[0]
buffer = []
first = True
key = ''
for word in message.split():
if first:
print(word)
first = False
continue
if not word.endswith(':'):
buffer.append(word)
continue
if key:
words = ' '.join(buffer)
buffer = []
print(f'{" "*4}{key} = {words}')
key = word[:-1]
words = ' '.join(buffer)
buffer = []
print(f'{" "*4}{key} = {words}')
# The ActiveTimeBias is in minutes, so divide by -60 (I don't know why it's stored negative):
420 / -60
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:registry:installation"'
search_obj.return_fields = 'datetime,timestamp_desc,data_type,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','message']]
search_obj = search.Search(ctf)
search_obj.query_string = 'key_path:"System\\\ControlSet001\\\Services\\\Tcpip\\\Parameters\\\Interfaces"'
search_obj.return_fields = 'datetime,timestamp_desc,data_type,message'
ts_results = search_obj.table
ts_results[['datetime','timestamp_desc','data_type','message']]
set(ts_results.message)
ts_results.message.str.extract(r'DhcpIPAddress: \[REG_SZ\] ([^ ]+)').drop_duplicates()
search_obj = search.Search(ctf)
search_obj.query_string = 'data_type:"windows:evtx:record" AND display_name:"System.evtx" AND event_identifier:"1074"'
search_obj.return_fields='*'
ts_results = search_obj.table
ts_results = ts_results.set_index('datetime')
ts_results['2019-02-25':'2019-02-26'][['timestamp_desc','data_type','username','message']]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create the tree
Step2: Specify where to evaluate the kernel
Step3: Specify the kernel and its hyperparameters
Step4: Sample the kernel
Step5: Plot the sample
Step6: You can rerun the same code as many times as you want and get different sample paths
|
<ASSISTANT_TASK:>
Python Code:
import pickle
import gpflow
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from BranchedGP import BranchingTree as bt
from BranchedGP import VBHelperFunctions as bplot
from BranchedGP import branch_kernParamGPflow as bk
plt.style.use("ggplot")
%matplotlib inline
branchingPoint = 0.5
tree = bt.BinaryBranchingTree(
0, 10, fDebug=False
) # set to true to print debug messages
tree.add(None, 1, branchingPoint) # single branching point
(fm, fmb) = tree.GetFunctionBranchTensor()
t = np.linspace(0.01, 1, 10)
(XForKernel, indicesBranch, Xtrue) = tree.GetFunctionIndexList(t, fReturnXtrue=True)
Bvalues = np.expand_dims(np.asarray(tree.GetBranchValues()), 1)
KbranchParam = bk.BranchKernelParam(gpflow.kernels.RBF(1), fm, b=Bvalues)
KbranchParam.kern.lengthscales = 2
KbranchParam.kern.variance = 1
samples = bk.SampleKernel(KbranchParam, XForKernel)
bk.PlotSample(XForKernel, samples)
indKernel = bk.IndKern(gpflow.kernels.RBF(1))
samples = bk.SampleKernel(indKernel, XForKernel)
bk.PlotSample(XForKernel, samples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configuration
Step2: Read images and labels [WORK REQUIRED]
Step3: Useful code snippets
Step4: Decode a JPEG and extract folder name in TF
|
<ASSISTANT_TASK:>
Python Code:
import os, sys, math
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
print("Tensorflow version " + tf.__version__)
#@title "display utilities [RUN ME]"
def display_9_images_from_dataset(dataset):
plt.figure(figsize=(13,13))
subplot=331
for i, (image, label) in enumerate(dataset):
plt.subplot(subplot)
plt.axis('off')
plt.imshow(image.numpy().astype(np.uint8))
plt.title(label.numpy().decode("utf-8"), fontsize=16)
subplot += 1
if i==8:
break
plt.tight_layout()
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.show()
GCS_PATTERN = 'gs://flowers-public/*/*.jpg'
CLASSES = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'] # flower labels (folder names in the data)
nb_images = len(tf.io.gfile.glob(GCS_PATTERN))
print("Pattern matches {} images.".format(nb_images))
#
# YOUR CODE GOES HERE
#
#display_9_images_from_dataset(dataset)
def decode_jpeg(filename):
bits = tf.io.read_file(filename)
image = tf.io.decode_jpeg(bits)
return image
def decode_jpeg_and_label(filename):
bits = tf.io.read_file(filename)
image = tf.io.decode_jpeg(bits)
# parse flower name from containing directory
label = tf.strings.split(tf.expand_dims(filename, axis=-1), sep='/')
label = label.values[-2]
return image, label
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In out algorithm we know that the matrix is banded and apply much faster algorithm which is linear of size of matrix and quadratic of band size.
Step2: Roundoff errors leads to loss of orthogonality, the second function makes less amount of errors.
Step3: Problem 3 (Word2Vec as Matrix Factorization) 45 pts
Step4: Download dataset enwik8 of compressed Wikipedia articles and preprocess raw data with Perl script main_.pl. This script will clean all unnecessary symbols, make all words to lowercase, and produce only sentences with words.
Step5: Construct the word vocabulary from the obtained sentences which enumerates words which occur more than $r=200$ times in the corpus.
Step6: Scan the text corpus with sliding window of size $5$ and step $1$ (which corresponds to $L$=2) and construct co-occurrence word-context matrix $D$ with elements $D_{wc}=#(w,c)$. Please, ignore words which occur less than $r=200$ times, but include them into the sliding window. Please, see the graphical illustration of the procedure described.
Step7: To find good word embeddings, Levy and Goldberg, 2015 proposed to find rank-$d$ SVD of Shifted Positive Pointwise Mutual Information (SPPMI) matrix
Step10: Write class WordVectors using provided template.
Step11: Calculate top 10 nearest neighbours with the corresponding cosine similarities for the words {numerical, linear, algebra} and insert them in the correspoding functions in pset2.py.
Step12: (5 pts) Implement the power method for a given matrix $A$, an initial guess $x_0$ and a number of iterations num_iter. It should be organized as a function power_method(A, x0, num_iter) that outputs approximation to eigenvector $x$, eigenvalue $\lambda$ and history of residuals ${\|Ax_k - \lambda_k x_k\|_2}$. Make sure that the method conveges to the correct solution on a matrix $\begin{bmatrix} 2 & -1 \ -1 & 2 \end{bmatrix}$ which is known to have the largest eigenvalue equal to $3$.
Step13: (2 pts) Run the power method for the graph presented above and plot residuals $\|Ax_k - \lambda_k x_k\|_2$ as a function of $k$ for num_iter=100 and random initial guess x0. Explain the absence of convergence.
Step14: (2 pts) Consider the same graph, but with a directed edge that goes from the node 3 to the node 4 being removed. Plot residuals as in the previous task and discuss the convergence. Now, run the power method with num_iter=100 for 10 different initial guesses and print/plot the resulting approximated eigenvectors. Why do they depend on the initial guess?
Step15: So convergence ratio is less than 1 hence it converges. The result of process depends on the initial vector as it might to the closest collinear vector (as it can be seen when $i = 2$).
Step16: (5 pts) Find the second largest in the absolute value eigenvalue of the obtained matrix $A_d$. How and why is it connected to the damping factor $d$? What is the convergence rate of the PageRank algorithm when using damping factor?
Step17: Here it can be seen that after regularization $\lambda_2' = d \lambda_2 $, where $d$ is damping factor.
Step18: Much faster!
Step19: (1 pts) In order to provide pagerank_matvec to your power_method (without rewriting it) for fast calculation of $A_dx$, you can create a LinearOperator
Step20: (2 pts) Run the power method starting from the vector of all ones and plot residuals $\|A_dx_k - \lambda_k x_k\|_2$ as a function of $k$ for $d=0.85$.
Step21: (1 pts) Print names of the top-10 authors according to PageRank over DBLP when $d=0.85$. Comment on your findings.
|
<ASSISTANT_TASK:>
Python Code:
# Implement function in the ```pset2.py``` file
from pset2 import band_lu
import scipy.sparse
import scipy as sp # can be used with broadcasting of scalars if desired dimensions are large
import numpy as np
import scipy.linalg as lg
import time
import matplotlib.pyplot as plt
%matplotlib inline
def build_diag(diag_broadcast, n):
length = len(diag_broadcast) // 2
diag_map = np.arange(-length, length + 1, 1)
A = sp.sparse.diags(diag_broadcast, diag_map, shape=(n, n)).toarray()
return A
def do_test(matrix_size):
my_impl = []
numpy_impl = []
for n in matrix_size:
diag_elem = np.random.random((1, 3))[0]
start = time.time()
L, U = band_lu(diag_elem, n)
end = time.time()
my_impl.append(end - start)
start = time.time()
A = build_diag(diag_elem, n)
_, _, _ = lg.lu(A,permute_l=False)
end = time.time()
numpy_impl.append(end - start)
return my_impl, numpy_impl
matrix_size = [x for x in range(100, 3000, 100)]
my_impl_ = []
numpy_impl_ = []
N = 20
for i in range(N):
a, b = do_test(matrix_size)
my_impl_.append(a)
numpy_impl_.append(b)
my_impl = [k/N for k in [sum(i) for i in zip(*my_impl_)]]
numpy_impl = [k/N for k in [sum(i) for i in zip(*numpy_impl_)]]
plt.figure(figsize=(14,7))
plt.loglog(matrix_size, my_impl, label='Optimized version for band matrices')
plt.loglog(matrix_size, numpy_impl, label='Scipy algorithm')
plt.ylabel(r"Time", size=14)
plt.xlabel("Square matrix size, items", size=14)
plt.title("Different LU decomposition algorithm performance (band=3)", size=14)
plt.legend(loc='upper left')
plt.grid()
plt.show()
def build_diag(diag_broadcast, n):
length = len(diag_broadcast) // 2
diag_map = np.arange(-length, length + 1, 1)
A = sp.sparse.diags(diag_broadcast, diag_map, shape=(n, n)).toarray()
return A
def do_test(matrix_size):
my_impl = []
numpy_impl = []
for n in matrix_size:
diag_elem = np.random.random((1, 5))[0]
start = time.time()
L, U = band_lu(diag_elem, n)
end = time.time()
my_impl.append(end - start)
start = time.time()
A = build_diag(diag_elem, n)
_, _, _ = lg.lu(A,permute_l=False)
end = time.time()
numpy_impl.append(end - start)
return my_impl, numpy_impl
matrix_size = [x for x in range(100, 3000, 100)]
my_impl_ = []
numpy_impl_ = []
N = 20
for i in range(N):
a, b = do_test(matrix_size)
my_impl_.append(a)
numpy_impl_.append(b)
my_impl = [k/N for k in [sum(i) for i in zip(*my_impl_)]]
numpy_impl = [k/N for k in [sum(i) for i in zip(*numpy_impl_)]]
plt.figure(figsize=(14,7))
plt.loglog(matrix_size, my_impl, label='Optimized version for band matrices')
plt.loglog(matrix_size, numpy_impl, label='Scipy algorithm')
plt.ylabel(r"Time", size=14)
plt.xlabel("Square matrix size, items", size=14)
plt.title("Different LU decomposition algorithm performance (band=5)", size=14)
plt.legend(loc='upper left')
plt.grid()
plt.show()
# Implement the functions in the ```pset2.py``` file
from pset2 import gram_schmidt_qr
from pset2 import modified_gram_schmidt_qr
n = 20
x = np.linspace(0, 1, n)
V = np.vander(x)
Q, _ = gram_schmidt_qr(V)
print("Error (first version):", np.linalg.norm(Q.T @ Q - np.identity(n), 2))
Q, _ = modified_gram_schmidt_qr(V)
print("Error (second version):", np.linalg.norm(Q.T @ Q - np.identity(n), 2))
A = np.array([[4, -4, 9], [4, 2, 0], [2, 4, 0]])
Q, R = householder_qr(A)
np.allclose(np.dot(Q, R), A)
import os
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse.linalg import svds
# Load enwik 8
import re
file = open("data/enwik8.txt", "r")
doclist = [line for line in file]
docstr = ''.join(doclist)
sentences = re.split(r'[.!?]', docstr)
sentences = [sentence.split() for sentence in sentences if len(sentence) > 1]
print (sentences[1249])
def create_vocabulary(sentences, r=200):
vocabulary = {}
# Your code is here
return vocabulary
vocab = create_vocabulary(sentences)
def create_corpus_matrix(sentences, vocabulary):
# Your code is here
return corpus_matrix
D = create_corpus_matrix(sentences, vocab)
def compute_embeddings(D, k, d=200):
# Your code is here
return embedding_matrix
k = 5 # negative sampling parameter
W = compute_embeddings(D, k)
class WordVectors:
def __init__(self, vocabulary, embedding_matrix):
self.vocab = vocabulary
self.W = embedding_matrix
self.inv_vocab = {v: k for k, v in self.vocab.items()}
def word_vector(self, word):
Takes word and returns its word vector.
# Your code is here
return word_vector
def nearest_words(self, word, top_n=10):
Takes word from the vocabulary and returns its top_n
nearest neighbors in terms of cosine similarity.
# Your code is here
return neighbors
model = WordVectors(vocab, W)
model.nearest_words("anarchism")
model.nearest_words("ussr")
model.nearest_words("rap")
# implement the functions in the pset2.py file
from pset2 import pagerank_matrix, power_method, pagerank_matvec
G = np.matrix([[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0]])
A = pagerank_matrix(G)
print("Matrix A:")
print(A)
ev = np.linalg.eig(A)
print(ev[0])
print("Largest eigenvalue is 1 with multiplicity 2")
A = np.matrix([[2, -1], [-1, 2]])
x0 = np.matrix([0, 1])
x, l, res = power_method(A, x0, 10)
print(x, l)
A = pagerank_matrix(G)
x0 = np.random.rand(G.shape[0], 1)
x, l, res = power_method(A, x0, 100)
plt.figure(figsize=(14, 7))
plt.loglog(res/res[0])
plt.ylabel("Residual", size=14)
plt.xlabel("Iteration number", size=14)
plt.title(r'Convergence plot for the largest eigenvalue', size=14)
plt.grid()
plt.show()
G = np.matrix([[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
A = pagerank_matrix(G)
ev = np.linalg.eig(A)
print(sorted([abs(x) for x in ev[0]], reverse=True))
x0 = np.random.rand(G.shape[0], 1)
x, l, res = power_method(A, x0, 100)
plt.figure(figsize=(14, 7))
plt.loglog(res/res[0])
plt.ylabel("Residual", size=14)
plt.xlabel("Iteration number", size=14)
plt.title(r'Convergence plot for the largest eigenvalue (one edge is removed)', size=14)
plt.grid()
plt.show()
np.set_printoptions(precision=4)
for i in range(10):
x0 = np.random.rand(G.shape[0], 1)
if (i == 2):
x0[0] = 0
x0[1] = 0
x0[2] = 0
x0[3] = 1
x0[4] = -1
x, l, res = power_method(A, x0, 100)
print("Eigen vector: ")
print(x.T)
d = 0.97
A_d = d * A + (1 - d)/A.shape[0] * np.ones(A.shape)
x0 = np.random.rand(A_d.shape[0], 1)
x, l, res = power_method(A_d, x0, 100)
plt.figure(figsize=(14, 7))
plt.loglog(res/res[0])
plt.ylabel("Residual", size=14)
plt.xlabel("Iteration number", size=14)
plt.title(r'Convergence plot for the largest eigenvalue (with regularization)', size=14)
plt.grid()
plt.show()
G = np.matrix([[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0]])
A = pagerank_matrix(G)
A_d = d * A + (1 - d)/A.shape[0] * np.ones(A.shape)
ev = np.linalg.eig(A)
print("Before regularization")
print('l = ', sorted([abs(x) for x in ev[0]], reverse=True))
ev = np.linalg.eig(A_d)
print("After regularization")
print('l = ', sorted([abs(x) for x in ev[0]], reverse=True))
import random
import time
N = 10000
G = np.zeros((N, N))
subG = (np.random.random((10, 10)) - 0.5).clip(0,1)
subG[subG.nonzero()] = 1
p1, p3 = random.randint(1, N - 10), random.randint(1, N - 10)
p2, p4 = p1 + 10, p3 + 10
G[p1:p2,p3:p4] = subG
x = np.random.rand(N, 1)
G = sp.sparse.csr_matrix(G)
A = pagerank_matrix(G)
d = 0.97
%%time
y = pagerank_matvec(A, d, x)
%%time
y = np.dot((d * A + (1 - d)/A.shape[0] * np.ones(A.shape)), x)
from scipy.sparse import load_npz
import numpy as np
def load_dblp(path_auth, path_graph):
G = load_npz(path_graph).astype(float)
with np.load(path_auth) as data: authors = data['authors']
return G, authors
G, authors = load_dblp('dblp_authors.npz', 'dblp_graph.npz')
density = np.count_nonzero(G.data)/(G.shape[0]) ** 2
print("Density: ", density)
diags = np.squeeze(np.asarray(np.sum(G, axis=1)))
top10 = sorted(range(len(diags)), reverse=True, key=lambda k: diags[k])[:10]
for i,val in enumerate(top10):
print(str(i + 1) + ". ", authors[val])
G[G.nonzero()] = 1
A = pagerank_matrix(G)
columns_sum = np.squeeze(np.asarray(np.sum(A, axis=0)))
epsilon = 10e-13
passed = 1
for col in range(A.shape[0]):
if (abs(columns_sum[col]) > epsilon) and (abs(columns_sum[col] - 1) > epsilon):
passed = 0
print(columns_sum[col])
break
if (passed):
print("Matrix is stochastic")
else:
print("Matrix is not stochastic")
from scipy.sparse import linalg
d = 0.85
L = sp.sparse.linalg.LinearOperator(A.shape, matvec=lambda x, A=A, d=d: pagerank_matvec(A, d, x))
x0 = np.ones((A.shape[0], 1))
x_k, l, res = power_method(L, x0, 50)
plt.figure(figsize=(14, 7))
plt.loglog(res/res[0])
plt.ylabel("Residual", size=14)
plt.xlabel("Iteration number", size=14)
plt.title(r'Convergence plot for the DBLP', size=14)
plt.grid()
plt.show()
top10_2 = sorted(range(len(x_k)), reverse=True, key=lambda k: x_k[k])[:10]
for i,val in enumerate(top10_2):
print(str(i + 1) + ". ", authors[val])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Begin User Input
Step2: Load Settings from previous analysis
Step3: Display Event Detection Tables
Step4: Display Summary Results for Peaks
Step5: Display Summary Results for Bursts
Step6: Interactive Graphs
Step7: Two pannel
Step8: Autocorrelation
Step9: Raster Plot
Step10: Frequency Plot
Step11: Analyze Events by Measurement
Step12: Poincare Plots
Step13: Quick Poincare Plot
Step14: Power Spectral Density
Step15: Time Series
Step16: Use the block below to generate the PSD graph and power in bands results (if selected). scale toggles which units to use for the graph
Step17: Spectrogram
Step18: Descriptive Statistics
Step19: Entropy
Step20: Approximate entropy
Step21: Time Series
Step22: Sample Entropy
Step23: Time Series
Step24: Helpful Stuff
|
<ASSISTANT_TASK:>
Python Code:
from bass import *
#initialize new file
Data = {}
Settings = {}
Results ={}
############################################################################################
#manual Setting block
Settings['folder']= r"/Users/abigaildobyns/Desktop"
Settings['Label'] = r'rat34_ECG.txt'
Settings['Output Folder'] = r"/Users/abigaildobyns/Desktop/demo"
#transformation Settings
Settings['Absolute Value'] = True #Must be True if Savitzky-Golay is being used
Settings['Bandpass Highcut'] = r'none' #in Hz
Settings['Bandpass Lowcut'] = r'none' #in Hz
Settings['Bandpass Polynomial'] = r'none' #integer
Settings['Linear Fit'] = False #between 0 and 1 on the whole time series
Settings['Linear Fit-Rolling R'] = 0.75 #between 0 and 1
Settings['Linear Fit-Rolling Window'] = 1000 #window for rolling mean for fit, unit is index not time
Settings['Relative Baseline'] = 0 #default 0, unless data is normalized, then 1.0. Can be any float
Settings['Savitzky-Golay Polynomial'] = 4 #integer
Settings['Savitzky-Golay Window Size'] = 251 #must be odd. units are index not time
#Baseline Settings
Settings['Baseline Type'] = r'static' #'linear', 'rolling', or 'static'
#For Linear
Settings['Baseline Start'] = 0.0 #start time in seconds
Settings['Baseline Stop'] = 1.0 #end time in seconds
#For Rolling
Settings['Rolling Baseline Window'] = r'none' #leave as 'none' if linear or static
#Peaks
Settings['Delta'] = 0.25
Settings['Peak Minimum'] = -1 #amplitude value
Settings['Peak Maximum'] = 1 #amplitude value
#Bursts
Settings['Burst Area'] = False #calculate burst area
Settings['Exclude Edges'] = True #False to keep edges, True to discard them
Settings['Inter-event interval minimum (seconds)'] = 0.0100 #only for bursts, not for peaks
Settings['Maximum Burst Duration (s)'] = 10
Settings['Minimum Burst Duration (s)'] = 0
Settings['Minimum Peak Number'] = 1 #minimum number of peaks/burst, integer
Settings['Threshold']= 0.15 #linear: proportion of baseline.
#static: literal value.
#rolling, linear ammount grater than rolling baseline at each time point.
#Outputs
Settings['Generate Graphs'] = False #create and save the fancy graph outputs
#Settings that you should not change unless you are a super advanced user:
#These are settings that are still in development
Settings['Graph LCpro events'] = False
Settings['File Type'] = r'Plain' #'LCPro', 'ImageJ', 'SIMA', 'Plain', 'Morgan'
Settings['Milliseconds'] = False
############################################################################################
#Load in a Settings File
#initialize new file
Data = {}
Settings = {}
Results ={}
############################################################################################
#manual Setting block
Settings['folder']= r"/Users/abigaildobyns/Desktop"
Settings['Label'] = r'rat34_ECG.txt'
Settings['Output Folder'] = r"/Users/abigaildobyns/Desktop/demo"
#Load a Settings file
Settings['Settings File'] = r'/Users/abigaildobyns/Desktop/rat34_Settings.csv'
##Settings that you should not change unless you are a super advanced user:
#These are settings that are still in development
Settings['File Type'] = r'Plain' #'LCPro', 'ImageJ', 'SIMA', 'Plain', 'Morgan'
Settings['Milliseconds'] = False
Settings = load_settings(Settings)
Data, Settings, Results = analyze(Data, Settings, Results)
display_settings(Settings)
#grouped summary for peaks
Results['Peaks-Master'].groupby(level=0).describe()
#grouped summary for bursts
Results['Bursts-Master'].groupby(level=0).describe()
#Interactive, single time series by Key
key = 'Mean1'
graph_ts(Data, Settings, Results, key)
key = 'Mean1'
start =100 #start time in seconds
end= 101#end time in seconds
results_timeseries_plot(key, start, end, Data, Settings, Results)
#autocorrelation
key = 'Mean1'
start = 0 #seconds, where you want the slice to begin
end = 1 #seconds, where you want the slice to end.
autocorrelation_plot(Data['trans'][key][start:end])
plt.show()
#raster
raster(Data, Results)
event_type = 'Peaks'
meas = 'Intervals'
key = 'Mean1' #'Mean1' default for single wave
frequency_plot(event_type, meas, key, Data, Settings, Results)
#Get average plots, display only
event_type = 'peaks'
meas = 'Peaks Amplitude'
average_measurement_plot(event_type, meas,Results)
#Batch
event_type = 'Peaks'
meas = 'all'
Results = poincare_batch(event_type, meas, Data, Settings, Results)
pd.concat({'SD1':Results['Poincare SD1'],'SD2':Results['Poincare SD2']})
#quick
event_type = 'Bursts'
meas = 'Burst Duration'
key = 'Mean1'
poincare_plot(Results[event_type][key][meas])
Settings['PSD-Event'] = Series(index = ['Hz','ULF', 'VLF', 'LF','HF','dx'])
#Set PSD ranges for power in band
Settings['PSD-Event']['hz'] = 4.0 #freqency that the interpolation and PSD are performed with.
Settings['PSD-Event']['ULF'] = 0.03 #max of the range of the ultra low freq band. range is 0:ulf
Settings['PSD-Event']['VLF'] = 0.05 #max of the range of the very low freq band. range is ulf:vlf
Settings['PSD-Event']['LF'] = 0.15 #max of the range of the low freq band. range is vlf:lf
Settings['PSD-Event']['HF'] = 0.4 #max of the range of the high freq band. range is lf:hf. hf can be no more than (hz/2)
Settings['PSD-Event']['dx'] = 10 #segmentation for the area under the curve.
event_type = 'Peaks'
meas = 'Intervals'
key = 'Mean1'
scale = 'raw'
Results = psd_event(event_type, meas, key, scale, Data, Settings, Results)
Results['PSD-Event'][key]
#optional
Settings['PSD-Signal'] = Series(index = ['ULF', 'VLF', 'LF','HF','dx'])
#Set PSD ranges for power in band
Settings['PSD-Signal']['ULF'] = 25 #max of the range of the ultra low freq band. range is 0:ulf
Settings['PSD-Signal']['VLF'] = 75 #max of the range of the very low freq band. range is ulf:vlf
Settings['PSD-Signal']['LF'] = 150 #max of the range of the low freq band. range is vlf:lf
Settings['PSD-Signal']['HF'] = 300 #max of the range of the high freq band. range is lf:hf. hf can be no more than (hz/2) where hz is the sampling frequency
Settings['PSD-Signal']['dx'] = 2 #segmentation for integration of the area under the curve.
scale = 'raw' #raw or db
Results = psd_signal(version = 'original', key = 'Mean1', scale = scale,
Data = Data, Settings = Settings, Results = Results)
Results['PSD-Signal']
version = 'original'
key = 'Mean1'
spectogram(version, key, Data, Settings, Results)
#Moving Stats
event_type = 'Peaks'
meas = 'all'
window = 30 #seconds
Results = moving_statistics(event_type, meas, window, Data, Settings, Results)
#Histogram Entropy
event_type = 'Bursts'
meas = 'all'
Results = histent_wrapper(event_type, meas, Data, Settings, Results)
Results['Histogram Entropy']
#Approximate Entropy
event_type = 'Peaks'
meas = 'all'
Results = ap_entropy_wrapper(event_type, meas, Data, Settings, Results)
Results['Approximate Entropy']
#Approximate Entropy on raw signal
#takes a VERY long time
from pyeeg import ap_entropy
version = 'original' #original, trans, shift, or rolling
key = 'Mean1' #Mean1 default key for one time series
start = 0 #seconds, where you want the slice to begin
end = 1 #seconds, where you want the slice to end. The absolute end is -1
ap_entropy(Data[version][key][start:end].tolist(), 2, (0.2*np.std(Data[version][key][start:end])))
#Sample Entropy
event_type = 'Bursts'
meas = 'all'
Results = samp_entropy_wrapper(event_type, meas, Data, Settings, Results)
Results['Sample Entropy']
#on raw signal
#takes a VERY long time
version = 'original' #original, trans, shift, or rolling
key = 'Mean1' #Mean1 default key for one time series
start = 0 #seconds, where you want the slice to begin
end = 1 #seconds, where you want the slice to end. The absolute end is -1
samp_entropy(Data[version][key][start:end].tolist(), 2, (0.2*np.std(Data[version][key][start:end])))
help(moving_statistics)
moving_statistics??
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now set up everything so that the figures show up in the notebook
Step2: More info on other options for Offline Plotly usage can be found here.
Step3: Now we need to begin to build our data dictionary. Easiest way to do this is to use the dict() function of the general form
Step4: Then we create the layout nested dictionary
Step5: Then we use
Step6: Real Data US Map Choropleth
Step7: Now out data dictionary with some extra marker and colorbar arguments
Step8: And our layout dictionary with some more arguments
Step9: World Choropleth Map
|
<ASSISTANT_TASK:>
Python Code:
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
import pandas as pd
data = dict(type = 'choropleth',
locations = ['AZ','CA','NY'],
locationmode = 'USA-states',
colorscale= 'Portland',
text= ['text1','text2','text3'],
z=[1.0,2.0,3.0],
colorbar = {'title':'Colorbar Title'})
layout = dict(geo = {'scope':'usa'})
choromap = go.Figure(data = [data],layout = layout)
iplot(choromap)
df = pd.read_csv('2011_US_AGRI_Exports')
df.head()
data = dict(type='choropleth',
colorscale = 'YIOrRd',
locations = df['code'],
z = df['total exports'],
locationmode = 'USA-states',
text = df['text'],
marker = dict(line = dict(color = 'rgb(255,255,255)',width = 2)),
colorbar = {'title':"Millions USD"}
)
layout = dict(title = '2011 US Agriculture Exports by State',
geo = dict(scope='usa',
showlakes = True,
lakecolor = 'rgb(85,173,240)')
)
choromap = go.Figure(data = [data],layout = layout)
iplot(choromap)
df = pd.read_csv('2014_World_GDP')
df.head()
data = dict(
type = 'choropleth',
locations = df['CODE'],
z = df['GDP (BILLIONS)'],
text = df['COUNTRY'],
colorbar = {'title' : 'GDP Billions US'},
)
layout = dict(
title = '2014 Global GDP',
geo = dict(
showframe = False,
projection = {'type':'Mercator'}
)
)
choromap = go.Figure(data = [data],layout = layout)
iplot(choromap)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: API Endpoints
Step2: Convert byte stream sent over REST back to a numpy array. The numpy array is a 2,400 dimensional embedding which are latent features of the GitHub Issue.
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
import numpy as np
from passlib.apps import custom_app_context as pwd_context
API_ENDPOINT = 'https://embeddings.gh-issue-labeler.com/text'
API_KEY = 'YOUR_API_KEY' # Contact maintainers for your api key
data = {'title': 'Fix the issue',
'body': 'I am encountering an error\n when trying to push the button.'}
# sending post request and saving response as response object
r = requests.post(url=API_ENDPOINT,
headers={'Token':pwd_context.hash(API_KEY)},
json=data)
embeddings = np.frombuffer(r.content, dtype='<f4')
embeddings.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Declaring elements in a function
Step2: The function defines a number of parameters that will change the signal, but using the default parameters the function outputs a Curve like this
Step3: HoloMaps
Step4: Note how the keys in our HoloMap map on to two automatically generated sliders. HoloViews supports two types of widgets by default
Step5: Apart from their simplicity and generality, one of the key features of HoloMaps is that they can be exported to a static HTML file, GIF, or video, because every combination of the sliders (parameter values) has been pre-computed already. This very convenient feature of pre-computation becomes a liability for very large or densely sampled parameter spaces, however, leading to the DynamicMap type discussed next.
Step6: Faceting parameter spaces
Step7: Faceting with methods
Step8: Using these methods with a DynamicMap requires special attention, because a dynamic map can return an infinite number of different values along its dimensions, unlike a HoloMap. Obviously, HoloViews could not comply with such a request, but these methods are perfectly legal with DynamicMap if you also define which specific dimension values you need, using the .redim.values method
Step9: Optional
Step10: You can do the same using the select method
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import holoviews as hv
hv.extension('bokeh')
%opts Curve Area [width=600]
def fm_modulation(f_carrier=110, f_mod=110, mod_index=1, length=0.1, sampleRate=3000):
x = np.arange(0, length, 1.0/sampleRate)
y = np.sin(2*np.pi*f_carrier*x + mod_index*np.sin(2*np.pi*f_mod*x))
return hv.Curve((x, y), kdims=['Time'], vdims=['Amplitude'])
fm_modulation()
carrier_frequencies = [10, 20, 110, 220, 330]
modulation_frequencies = [110, 220, 330]
hmap = hv.HoloMap({(fc, fm): fm_modulation(fc, fm) for fc in carrier_frequencies
for fm in modulation_frequencies}, kdims=['fc', 'fm'])
hmap
# Exercise: Try changing the function below to return an ``Area`` or ``Scatter`` element,
# in the same way `fm_modulation` returned a ``Curve`` element.
def fm_modulation2(f_carrier=220, f_mod=110, mod_index=1, length=0.1, sampleRate=3000):
x = np.arange(0,length, 1.0/sampleRate)
y = np.sin(2*np.pi*f_carrier*x + mod_index*np.sin(2*np.pi*f_mod*x))
# Then declare a HoloMap like above and assign it to a ``exercise_hmap`` variable and display that
%%opts Curve (color='red')
dmap = hv.DynamicMap(fm_modulation, kdims=['f_carrier', 'f_mod', 'mod_index'])
dmap = dmap.redim.range(f_carrier=((10, 110)), f_mod=(10, 110), mod_index=(0.1, 2))
dmap
# Exercise: Declare a DynamicMap using the function from the previous exercise and name it ``exercise_dmap``
# Exercise (Optional): Use the ``.redim.step`` method and a floating point range to modify the slider step
%%opts Curve [width=150]
hv.GridSpace(hmap).opts()
# Exercise: Try casting your ``exercise_hmap`` HoloMap from the first exercise to an ``NdLayout`` or
# ``NdOverlay``, guessing from the name what the resulting organization will be before testing it.
hmap.overlay('fm')
%%opts Curve [width=150]
dmap.redim.values(f_mod=[10, 20, 30], f_carrier=[10, 20, 30]).overlay('f_mod').grid('f_carrier').opts()
# Exercise: Facet the ``exercise_dmap`` DynamicMap using ``.overlay`` and ``.grid``
# Hint: Use the .redim.values method to set discrete values for ``f_mod`` and ``f_carrier`` dimensions
%%opts Curve [width=300]
hmap[10, 110] + hmap[10, 200:].overlay() + hmap[[10, 110], 110].overlay()
(hmap.select(fc=10, fm=110) +
hmap.select(fc=10, fm=(200, None)).overlay() +
hmap.select(fc=[10, 110], fm=110).overlay())
# Exercise: Try selecting two carrier frequencies and two modulation frequencies on the ``exercise_hmap``
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comparing mutated proteins.
Step2: If we now try to subtract the two, this will fail because we can't overlap the atom contact maps
Step3: But, we can still look at the residues by using AtomMismatchedContactDifference. All the ContactDifference objects will only show differences between contacts that can be present in the map. So in this case it will not show any differences for residue 168 (GDP) which is only present for the wildtype.
Step4: Looking at difference when the residue information is missing
Step5: Here the atoms are correct, but we don't know if the residues would make sense so we can use ResidueMismatchedContactDifference if we only care about the atom-contacts.
Step6: The other option is to override the topology if we have a correct one (in this case full.topology), then we can use OverrideTopologyContactDifference
|
<ASSISTANT_TASK:>
Python Code:
import mdtraj as md
from contact_map import ContactFrequency
full = md.load("data/gsk3b_example.h5") # Start with the full trajectory from another example
# Slice another trajectory down to 150 residues
truncated = full.atom_slice(full.topology.select("resid 0 to 150"))
map_full = ContactFrequency(full)
# Here we just use 1 frame so there is a difference to show
map_truncated = ContactFrequency(truncated[0])
diff = map_full - map_truncated
# This will only show data for up to residue 150, as differences don't make sense outside of that region
diff.residue_contacts.plot(figsize=(12,8));
WT_full = md.load("data/4obe.pdb") # Load the wildtype structure
# This pdb contains 2 protein chains and we only want one
WT_chain1 = WT_full.atom_slice([i.index for i in WT_full.topology.chain(0).atoms])
G12C_full = md.load("data/5v9o.pdb") # Load the mutated structure
# Cut this one down to just the protein
G12C_protein = G12C_full.atom_slice(G12C_full.topology.select("protein"))
wt_map = ContactFrequency(WT_chain1)
g12c_map = ContactFrequency(G12C_protein)
print(wt_map.topology.residue(11))
print(g12c_map.topology.residue(11))
#diff = wt_map - g12c_map # This will fail, because the atom indices don't make sense
from contact_map import AtomMismatchedContactDifference
diff = AtomMismatchedContactDifference(wt_map, g12c_map)
# Grab residue 12
res = diff.topology.residue(11)
# Print the contact differences between WT and G12C (this will print GLY/CYS12 as residue name)
print(diff.residue_contacts.most_common(res))
diff.residue_contacts.plot();
full = full
# Make a copy by slicing it to the same number of atoms
broken = full.atom_slice(range(full.topology.n_atoms))
# Here we break the residues, making all resSeqs equal
for i in range(broken.topology.n_residues):
broken.topology.residue(i).resSeq = "broken"
# We also have to break a name, as otherwise mdtraj thinks the topologies are equal
broken.topology.residue(0).name = "test"
map_full = ContactFrequency(full)
map_broken = ContactFrequency(broken[0])
from contact_map import ResidueMismatchedContactDifference
# diff = map_full - map_broken
diff = ResidueMismatchedContactDifference(map_full, map_broken)
diff.atom_contacts.plot();
from contact_map import OverrideTopologyContactDifference
diff = OverrideTopologyContactDifference(map_full, map_broken, topology=full.topology)
diff.residue_contacts.plot(figsize=(12,8));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table 4 - Low Resolution Analysis
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import seaborn as sns
sns.set_context("notebook", font_scale=1.5)
import warnings
warnings.filterwarnings("ignore")
from astropy.io import ascii
tbl4 = ascii.read("http://iopscience.iop.org/0004-637X/794/1/36/suppdata/apj500669t4_mrt.txt")
tbl4[0:4]
Na_mask = ((tbl4["f_EWNaI"] == "Y") | (tbl4["f_EWNaI"] == "N"))
print "There are {} sources with Na I line detections out of {} sources in the catalog".format(Na_mask.sum(), len(tbl4))
tbl4_late = tbl4[['Name', '2MASS', 'SpType', 'e_SpType','EWHa', 'f_EWHa', 'EWNaI', 'e_EWNaI', 'f_EWNaI']][Na_mask]
tbl4_late.pprint(max_lines=100, )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We first solve the homogeneous equation
Step2: Now solve the non-homogeneous case for some $f(x)$
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
init_printing()
from IPython.display import display
x = Symbol("x")
y = Function("y")
f = Function("f")
eqn = Eq(Derivative(y(x), x, x) + 2*Derivative(y(x), x) + y(x), 0)
display(eqn)
dsolve(eqn)
eqn = Eq(Derivative(y(x), x, x) + 2*Derivative(y(x), x) + y(x), f(x))
dsolve(eqn)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Recursion, Greedy Algorithm, Dynamic Programming
Step3: Greedy Algorithm
Step5: The greedy method works fine when we are using U.S. coins, but suppose, in addition to the usual 1, 5, 10, and 25 cent coins we now have a 21 cent coin. In this instance our greedy method fails to find the optimal solution for 63 cents in change. With the addition of the 21 cent coin the greedy method would still find the solution to be 6 coins when the optimal answer should be 3 21 cent pieces.
Step7: Dynamic Programming - 0/1 Knapsack
|
<ASSISTANT_TASK:>
Python Code:
from jupyterthemes import get_themes
from jupyterthemes.stylefx import set_nb_theme
themes = get_themes()
set_nb_theme(themes[1])
%load_ext watermark
%watermark -a 'Ethen' -d -t -v -p jupyterthemes
def to_str(n, base):
convert_str = '0123456789ABCDEF'
if n < base:
# look up the string representation if it's smaller than the base
return convert_str[n]
else:
# convert_str comes after to to_str method so that it will
# delayed the addition until the recursive call finishes
return to_str(n // base, base) + convert_str[n % base]
print(to_str(769, 10))
print(to_str(1453, 16))
def change_money_greedy(amount, coin_values):
using greedy algorithm to solve the minimum
number of coins needed to make change for the input
amount (an integer), given the all the possible coin values.
The coin values has to be sorted in
decreasing order for this code to work properly
# key = coin_values
# value = corresponding number of that coin value
change = {}
for d in coin_values:
n_coins = amount // d
change[d] = n_coins
amount = amount % d
if not amount:
break
return change
amount = 63
coin_values = [25, 10, 5, 1]
change = change_money_greedy(amount, coin_values)
print(change)
import numpy as np
from collections import defaultdict
def change_money_dp(amount, coin_values):
using dynamic programming to solve
the minimum number of coins needed to make change for the
input amount (an integer), given the all the possible coin values.
unlike the greedy algorithm the coin values doesn't need to be sorted in
decreasing order for this code to work properly
# index starts at 0 (change 0 essentially means nothing
min_coin = np.zeros(amount + 1, dtype = np.int)
used_coin = np.zeros(amount + 1, dtype = np.int)
for cents in range(amount + 1):
# all the coins that are smaller than the
# current change are all candidates for exchanging
possible_choices = [c for c in coin_values if c <= cents]
# store the minimum change number 1, and
# the maximum number of coins required to
# make change for the current `cents`,
# these will later be compared and updated
coin = 1
coin_count = cents
# consider using all possible coins to make
# change for the amount specified by cents,
# and store the minimum number to min_coins
for j in possible_choices:
# access the minimum coin required to make
# cents - j amount and add 1 to account for
# the fact that you're using the current coin
# to give the changes
min_coin_count = min_coin[cents - j] + 1
if min_coin_count < coin_count:
coin_count = min_coin_count
coin = j
min_coin[cents] = coin_count
used_coin[cents] = coin
# determine the number of each coins used to
# make the change
change = defaultdict(int)
coin = amount
while coin > 0:
coin_current = used_coin[coin]
coin -= coin_current
change[coin_current] += 1
return change
amount = 63
coin_values = [21, 10, 35, 5, 1]
change = change_money_dp(amount, coin_values)
print(change)
def knapsack(value_weight, capacity):
0/1 knapsack problem
# construct the dynamic programming table, where each row represents
# the current capacity level and each column represents the item
n_items = len(value_weight)
# the padding (0, 1) tuple represents no padding at the beginning of both
# dimension and pad 1 value at the end of the dimension
# https://stackoverflow.com/questions/35751306/python-how-to-pad-numpy-array-with-zeros
table = np.pad(np.zeros((capacity, n_items)), (0, 1), 'constant').astype(np.int)
for j in range(1, n_items + 1):
value, weight = value_weight[j - 1]
for i in range(1, capacity + 1):
# if the current item's weight is
# larger than the capacity, then
# all we can do is lookup the maximum
# value of the previous column, i.e.
# best value at this capacity with previously
# seen items
if weight > i:
table[i, j] = table[i, j - 1]
else:
# if we can fit the item in, then we compare adding this new
# item's value with the capacity level just enough to add this
# value in
table[i, j] = max(table[i, j - 1], table[i - weight, j - 1] + value)
return table
capacity = 11
value_weight = [(8, 4), (4, 3), (10, 5), (15, 8)]
table = knapsack(value_weight, capacity)
print('max value:', table[capacity, len(value_weight)])
table
# to see which items were taken (put in the knapsack),
# we check whether the row corresponding to the capacity
# we have remaining to use is different in the current
# column and the one before it, if it is, that means
# that item was chosen
remaining = capacity
items_taken = np.zeros(len(value_weight), dtype = np.bool)
for j in range(len(value_weight), 0, -1):
if table[remaining, j] != table[remaining, j - 1]:
items_taken[j - 1] = True
_, weight = value_weight[j - 1]
remaining -= weight
items_taken
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I. Problem Set 8, Part 1
Step2: And construct a flattened version of it, for the linear model case
Step3: (1) neural network
Step4: (2) support vector machine
Step5: (3) penalized logistc model
Step6: II. Problem Set 8, Part 2
Step7: Now, built a neural network for the model
Step8: III. Transfer Learning IMDB Sentiment analysis
Step9: I'll fit a significantly larger vocabular this time, as the embeddings are basically given for us.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import copy
import numpy as np
import pandas as pd
import sys
import os
import re
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras.layers.wrappers import TimeDistributed
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import SimpleRNN, LSTM, GRU
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from gensim.models import word2vec
dir_in = "../../../class_data/stl10/"
X_train = np.genfromtxt(dir_in + 'X_train_new.csv', delimiter=',')
Y_train = np.genfromtxt(dir_in + 'Y_train.csv', delimiter=',')
X_test = np.genfromtxt(dir_in + 'X_test_new.csv', delimiter=',')
Y_test = np.genfromtxt(dir_in + 'Y_test.csv', delimiter=',')
Y_train_flat = np.zeros(Y_train.shape[0])
Y_test_flat = np.zeros(Y_test.shape[0])
for i in range(10):
Y_train_flat[Y_train[:,i] == 1] = i
Y_test_flat[Y_test[:,i] == 1] = i
model = Sequential()
model.add(Dense(1024, input_shape = (X_train.shape[1],)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms,
metrics=['accuracy'])
model.fit(X_train, Y_train, batch_size=32, nb_epoch=5, verbose=1)
test_rate = model.evaluate(X_test, Y_test)[1]
print("Test classification rate %0.05f" % test_rate)
svc_obj = SVC(kernel='linear', C=1)
svc_obj.fit(X_train, Y_train_flat)
pred = svc_obj.predict(X_test)
pd.crosstab(pred, Y_test_flat)
c_rate = sum(pred == Y_test_flat) / len(pred)
print("Test classification rate %0.05f" % c_rate)
lr = LogisticRegression(penalty = 'l1')
lr.fit(X_train, Y_train_flat)
pred = lr.predict(X_test)
pd.crosstab(pred, Y_test_flat)
c_rate = sum(pred == Y_test_flat) / len(pred)
print("Test classification rate %0.05f" % c_rate)
dir_in = "../../../class_data/chi_python/"
X_train = np.genfromtxt(dir_in + 'chiCrimeMat_X_train.csv', delimiter=',')
Y_train = np.genfromtxt(dir_in + 'chiCrimeMat_Y_train.csv', delimiter=',')
X_test = np.genfromtxt(dir_in + 'chiCrimeMat_X_test.csv', delimiter=',')
Y_test = np.genfromtxt(dir_in + 'chiCrimeMat_Y_test.csv', delimiter=',')
model = Sequential()
model.add(Dense(1024, input_shape = (434,)))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(1024))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.2))
model.add(Dense(5))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms,
metrics=['accuracy'])
# downsample, if need be:
num_sample = X_train.shape[0]
model.fit(X_train[:num_sample], Y_train[:num_sample], batch_size=32,
nb_epoch=10, verbose=1)
test_rate = model.evaluate(X_test, Y_test)[1]
print("Test classification rate %0.05f" % test_rate)
path = "../../../class_data/aclImdb/"
ff = [path + "train/pos/" + x for x in os.listdir(path + "train/pos")] + \
[path + "train/neg/" + x for x in os.listdir(path + "train/neg")] + \
[path + "test/pos/" + x for x in os.listdir(path + "test/pos")] + \
[path + "test/neg/" + x for x in os.listdir(path + "test/neg")]
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
input_label = ([1] * 12500 + [0] * 12500) * 2
input_text = []
for f in ff:
with open(f) as fin:
pass
input_text += [remove_tags(" ".join(fin.readlines()))]
num_words = 5000
max_len = 400
tok = Tokenizer(num_words)
tok.fit_on_texts(input_text[:25000])
X_train = tok.texts_to_sequences(input_text[:25000])
X_test = tok.texts_to_sequences(input_text[25000:])
y_train = input_label[:25000]
y_test = input_label[25000:]
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
words = []
for iter in range(num_words):
words += [key for key,value in tok.word_index.items() if value==iter+1]
loc = "/Users/taylor/files/word2vec_python/GoogleNews-vectors-negative300.bin"
w2v = word2vec.Word2Vec.load_word2vec_format(loc, binary=True)
weights = np.zeros((num_words,300))
for idx, w in enumerate(words):
try:
weights[idx,:] = w2v[w]
except KeyError as e:
pass
model = Sequential()
model.add(Embedding(num_words, 300, input_length=max_len))
model.add(Dropout(0.5))
model.add(GRU(16,activation='relu'))
model.add(Dense(128))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.layers[0].set_weights([weights])
model.layers[0].trainable = False
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
model.fit(X_train, y_train, batch_size=32, nb_epoch=10, verbose=1,
validation_data=(X_test, y_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: reading the training data into a data frame and assigning headings
Step2: reading the testing data into a data frame and assigning headings
Step3: changing the strings in the train data into to numbers so that they can be used while calculating euclidean distance
Step4: changing the strings in the test data into to numbers so that they can be used while calculating euclidean distance
Step5: Normalizing the values in the Vacation,eCredit,Salary,Property so that all the values range in between 0 to 1.we use a formula to do this.we are going to capture min an max for the categories which should be normalized so that we can use them for test data normalization also
Step6: normalizing the testing data too
Step7: converting all the data too float into make calculations accurate
Step8: we have to shuffle data so that we can see how good our own function is classifing the instances into clusters
Step9: developing our own k nearest algorithm with respective to our requirments, we use similarity matrix to take decision between then two parameters, and the rest we can go with euclidean distance.
Step10: splitting the data with respective to ratio and segregating the train an test data into respective clustres
Step11: Removing the cluster names from the instances and adding them to lists.
Step12: Sending our slpitted test and train from the given train data in order to cross validadate
Step13: Sending our actual testing data to find the accuracy our model
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from collections import Counter
from math import sqrt
import random
import warnings
df = pd.read_table('train.csv', sep=',', header=None, names=['Type', 'LifeStyle', 'Vacation', 'eCredit', 'Salary', 'Property', 'Label'])
df.head()
dft = pd.read_table('test.csv', sep=',', header=None, names=['Type', 'LifeStyle', 'Vacation', 'eCredit', 'Salary', 'Property', 'Label'])
dft.head()
df['Type'] = df.Type.map({'student':1,'engineer':2,'librarian':3,'professor':4,'doctor':5 })
# df.head()
df['LifeStyle'] = df.LifeStyle.map({'spend<<saving':1, 'spend<saving':2, 'spend>saving':3, 'spend>>saving':4})
df['Label'] = df.Label.map({'C1':1, 'C2':2 ,'C3':3 ,'C4':4 ,'C5':5})
# df['Vacation']=df['Vacation']/100
df.head()
dft['Type'] = dft.Type.map({'student':1,'engineer':2,'librarian':3,'professor':4,'doctor':5 })
# df.head()
dft['LifeStyle'] = dft.LifeStyle.map({'spend<<saving':1, 'spend<saving':2, 'spend>saving':3, 'spend>>saving':4})
dft['Label'] = dft.Label.map({'C1':1, 'C2':2 ,'C3':3 ,'C4':4 ,'C5':5})
# df['Vacation']=df['Vacation']/100
dft.head()
vacmaxval=0
vacminval=0
ecrmaxval=0
ecrminval=0
salmaxval=0
salminval=0
prpminval=0
prpmaxval=0
for attribute in list(df.columns.values):
if attribute == 'Vacation' or attribute == 'eCredit' or attribute == 'Salary' or attribute == 'Property':
if attribute == 'Vacation':
vacmaxval=max(df[attribute])
vacminval=min(df[attribute])
elif attribute == 'eCredit':
ecrmaxval=max(df[attribute])
ecrminval=min(df[attribute])
elif attribute == 'Salary':
salmaxval=max(df[attribute])
salminval=min(df[attribute])
elif attribute == 'Property':
prpmaxval=max(df[attribute])
prpminval=min(df[attribute])
maxValue = max(df[attribute])
minValue = min(df[attribute])
norm = []
for i in df[attribute]:
normalisedValue = (i - minValue + 0.0)/(maxValue - minValue + 0.0)
norm.append(normalisedValue)
df[attribute] = norm
df.head()
# uncomment the below line to get a csv file of the dataframe
#df.to_csv('sanitizedData.csv', sep=',', encoding='utf-8', header=False)
minValue=0
maxValue=0
for attribute in list(dft.columns.values):
if attribute == 'Vacation' or attribute == 'eCredit' or attribute == 'Salary' or attribute == 'Property':
norm = []
for i in dft[attribute]:
if attribute == 'Vacation':
minValue=vacminval
maxValue=vacmaxval
elif attribute == 'eCredit':
minValue=ecrminval
maxValue=ecrmaxval
elif attribute == 'Salary':
minValue=salmaxval
maxValue=salminval
elif attribute == 'Property':
minValue=prpminval
maxValue=prpmaxval
normalisedValue = (i - minValue + 0.0)/(maxValue - minValue + 0.0)
norm.append(normalisedValue)
dft[attribute] = norm
dft.head()
dataframe= df.astype(float).values.tolist()
dataframet= dft.astype(float).values.tolist()
#print dataframe[:10]
random.shuffle(dataframe)
#print dataframe[:10]
def k_nearest(data,predict,k=5):
#if(len(data)>=k):
# warnings.warn('bye')
distances =[]
for group in data:
for features in data[group]:
euclidean_distance=0
if(predict[0]!=features[0]):
euclidean_distance+=1
if(predict[1]!=features[1]):
euclidean_distance+=1
euclidean_distance += ((predict[2]-features[2])**2 + (predict[3]-features[3])**2 +(predict[4]-features[4])**2 + (predict[5]-features[5])**2)
#euclidean_distance = np.linalg.norm(np.array(features)-np.array(predict))
euclidean_distance=sqrt(euclidean_distance)
distances.append([euclidean_distance,group])
votes = [i[1] for i in sorted(distances) [:k]]
#print distances
print votes
#print (Counter(votes).most_common(1))
vote_result = Counter(votes).most_common(1)[0][0]
#print vote_result
return vote_result
test_size=0.2
train_set={1.0:[],2.0:[],3.0:[],4.0:[],5.0:[]}
test_set={1.0:[],2.0:[],3.0:[],4.0:[],5.0:[]}
test_setnew={1.0:[],2.0:[],3.0:[],4.0:[],5.0:[]}
train_setnew={1.0:[],2.0:[],3.0:[],4.0:[],5.0:[]}
train_data= dataframe[:-int(test_size*len(dataframe))]
test_data= dataframe[-int(test_size*len(dataframe)):]
train_datanew= dataframe[:int(1*len(dataframe))]
test_datanew=dataframet[:int(1*len(dataframet))]
#print test_set
#print test_datanew
for i in train_data:
train_set[i[-1]].append(i[:-1])
for i in test_data:
test_set[i[-1]].append(i[:-1])
#print test_set
for i in test_datanew:
test_setnew[i[-1]].append(i[:-1])
for i in train_datanew:
train_setnew[i[-1]].append(i[:-1])
#print test_set
#print test_setnew
correct=0.0
total=0.0
for group in test_set:
for data in test_set[group]:
vote= k_nearest(train_set,data,k=5)
#print '*****'
#print group
#print vote
#print '*****'
if group==vote:
correct+=1
total +=1
print correct
print total
print ('Accuracy:', correct/total)
correct =0.0
total =0.0
for group in test_setnew:
for data in test_setnew[group]:
vote= k_nearest(train_set,data,k=5)
print '*****'
print group
print vote
#print '*****'
if group==vote:
correct+=1
total +=1
print correct
print total
print ('Accuracy:', (correct)/total)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <hr/>
Step2: The mixing coefficient for a numerical node attribute $X = \big(x_i\big)$ in an undirected graph $G$, with the adjacency matrix $A$, is defined as
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import networkx as nx
from matplotlib import pyplot as plt
%matplotlib inline
import warnings
warnings.filterwarnings( 'ignore' )
def fw( A, pi = None ) :
if pi is None :
pi = A.copy( )
pi[ A == 0 ] = np.inf
np.fill_diagonal( pi, 0 )
for k in xrange( A.shape[ 0 ] ) :
pi = np.minimum( pi, pi[ :, k ] + pi[ k, : ] )
return pi
G = nx.gml.read_gml( './data/ha5/huge_100004196072232_2015_03_21_22_33_65c744356ffedcfa83bf49b64a76445a.gml' )
fig = plt.figure( figsize = (16,12) )
axs = fig.add_subplot( 1,1,1, axisbg = 'black' )
nx.draw( G, pos = nx.spring_layout( G ), ax = axs, node)
nx.is_connected(G)
nx.to_numpy_matrix( G )
A = nx.to_numpy_matrix( G )
def spectral( A, T = 10, _index = None ) :
if _index is None :
_index = np.arange( A.shape[ 0 ], dtype = np.int )
## Get the vertex degrees
deg = A.sum( axis = 1, dtype = np.float ).getA1( )
## Check for isolated vertices
if np.any( deg == 0 ) :
## Find nonisolated
nz = np.where( deg != 0 )[ 0 ]
return np.concatenate( ( np.where( deg == 0 )[ 0 ],
nz[ spectral( A[:,nz][nz,:], T = T, _index = _index[ nz ] ) ] ) )
## Assume the matrix A has no isolated vertices
D = np.diag( 1.0 / deg )
L = np.eye( *A.shape, dtype = np.float ) - D.dot( A )
l, v = np.linalg.eig( L )
e = v[ :, np.argsort( l )[ 1 ] ].real.getA1()
n, p = np.where( e < 0 )[ 0 ], np.where( e >= 0 )[ 0 ]
if len( p ) > T :
p = p[ spectral( A[:,p][p,:], T = T, _index = _index[ p ] ) ]
if len( n ) > T :
n = n[ spectral( A[:,n][n,:], T = T, _index = _index[ n ] ) ]
if len( p ) > len( n ) :
p, n = n, p
return np.concatenate( ( n, p ) )
pi = fw( A )
I = nx.spectral_ordering( G )
J = spectral( A )
plt.subplot( 121 )
plt.imshow( pi[:,I][I,:] )
plt.subplot( 122 )
plt.imshow( pi[:,J][J,:] )
nx.spectral_ordering()
plt.plot(e[n])
plt.plot(e[p], '-r')
i = np.argsort( l )[ :10 ]
# print v[ :, i ].real
print l[ i ]
np.isclose( l[ i ], 0 )
def assortativity( G, X ) :
## represent the graph in an adjacency matrix form
A = nx.to_numpy_matrix( G, dtype = np.float, nodelist = G.nodes( ) )
## Convert x -- dictionary to a numpy vector
x = np.array( [ X[ n ] for n in G.nodes( ) ] , dtype = np.float )
## Compute the x'Ax part
xAx = np.dot( x, np.array( A.dot( x ) ).flatten( ) )
## and the x'\text{diag}(D)x part. Note that left-multiplying a vector
## by a diagonal matrix is equivalent to element-wise multiplication.
D = np.array( A.sum( axis = 1 ), dtype = np.float ).flatten( )
xDx = np.dot( x, np.multiply( D, x ) )
## numpy.average( ) actually normalizes the weights.
x_bar = np.average( x, weights = D )
D_sum = np.sum( D, dtype = np.float )
return ( xAx - D_sum * x_bar * x_bar ) / ( xDx - D_sum * x_bar * x_bar )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we untar the file ../data/SMC_data.tar.gz. This will copy the config.yaml and ft1 file in the notebook directory.
Step2: The setup() method performs the data preparation and response calculations needed for the analysis (selecting the data, creating counts and exposure maps, etc.). Depending on the data selection and binning of the analysis this will often be the slowest step in the analysis sequence. The output of setup() is cached in the analysis working directory so subsequent calls to setup() will run much faster.
Step3: The table above contains
Step4: Now you see that the ts column is filled and the free column has all asterix because we have freed the sources.
Step5: CHARACTERISTICS OF THE POSITION OF THE SOURCE
Step6: The error for the position is nan because we have not calculated yet the relocalization.
Step7: Using the option make_plots=True a few control plots are created like 3fgl_j0023.9-7203_localize_peak.png with the result for the likelihood analysis for the besy fit position and error for the position.
Step8: SED PARAMETERS FLUX AND SCAN OF FLUX
Step9: Customizing your model
Step10: Now we free all parameters
Step11: Now we free all the SED parameters of sources within 3 degrees from 3FGL J0059.0-7242e
Step12: Now we want to delete the source 3FGL J0021.6-6835
Step13: You can also delete all the sources as a function of the npred/ts or position using the options minmax_npred/minmax_ts/skydir options. Finally, you can delete sources in a given list using the option names.
Step14: In the example below we delete all the sources that have an npred=[0,500]
Step15: We can aldo add a new source in the model using the gta.add_source function.
Step16: In the example below we add a Source called Source_PL with a PLSuperExpCutoff and pointlike and a source called Source_Gauss that is spatial extended with a PowerLaw SED and with a RadialGaussian template with an extension of 1 deg.
Step17: Now I load the model saved into model_test to have back the intial model.
Step18: It is also possible to modify the SED parameters using the functions gta.set_norm, gta.set_parameter, gta.set_parameter_bounds, gta.set_parameter_error .
Step19: In the example below we fix the normalization to 1e-11 and the slope to 2.0.
Step20: It is also possible to set the SED shape of a source using gta.set_source_spectrum tool.
Step21: As you can see above the tool gta.set_source_spectrum has modified the SED shape from PowerLaw to LogParabola.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import numpy as np
from fermipy.gtanalysis import GTAnalysis
from fermipy.plotting import ROIPlotter, SEDPlotter
import matplotlib.pyplot as plt
import matplotlib
from IPython.display import Image
if os.path.isfile('../data/SMC_data.tar.gz'):
!tar xzf ../data/SMC_data.tar.gz
else:
!curl -OL https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/SMC_data.tar.gz
!tar xzf SMC_data.tar.gz
gta = GTAnalysis('config.yaml')
matplotlib.interactive(True)
gta.setup()
gta.print_model()
gta.free_sources()
fitresult=gta.fit()
gta.print_model()
print gta.roi.sources[0]['name'] #name of the source
print gta.roi.sources[0]['Source_Name'] #name of the source
print gta.roi.sources[0]['SpatialModel'] #spatial model
print gta.roi.sources[0]['SpatialWidth'] #spatial size parameter
print gta.roi.sources[0]['SpatialType'] #spatial size parameter
print gta.roi.sources[0]['SourceType'] #Source type
print gta.roi.sources[0]['SpectrumType'] #Spectrum type string
print gta.roi.sources[0]['Spatial_Filename'] #Path to spatial template
print gta.roi.sources[0]['Spectrum_Filename'] #Path to the SED source template
print gta.roi.sources[0]['correlation'] #Dictionary of correlation coefficients.
print gta.roi.sources[0]['model_counts'] #Vector of predicted counts for this source
print gta.roi.sources[0]['ra'] #ra
print gta.roi.sources[0]['dec'] #dec
print gta.roi.sources[0]['glon'] #glon
print gta.roi.sources[0]['glat'] #glat
print gta.roi.sources[0]['ra_err'] #error for ra
print gta.roi.sources[0]['dec_err'] #error for dec
print gta.roi.sources[0]['glon_err'] #error for glon
print gta.roi.sources[0]['glat_err'] #error for glat
print gta.roi.sources[0]['pos_err'] #error for the position in deg
print gta.roi.sources[0]['pos_r68'] #68% CL error for the position
print gta.roi.sources[0]['pos_r95'] #95% CL error for the position
print gta.roi.sources[0]['pos_r99'] #99% CL error for the position
print gta.roi.sources[0]['pos_err_semimajor'] #1-sigma uncertainty (deg) along major axis of uncertainty ellipse.
print gta.roi.sources[0]['pos_err_semiminor'] #1-sigma uncertainty (deg) along minor axis of uncertainty ellipse.
print gta.roi.sources[0]['offset_ra'] #Right ascension offset from ROI center in local celestial projection (deg).
print gta.roi.sources[0]['offset_dec'] #Declination offset from ROI center in local celestial projection (deg).
print gta.roi.sources[0]['offset_glon'] #Galactic longitude offset from ROI center in local galactic projection (deg).
print gta.roi.sources[0]['offset_glat'] #Galactic latitude offset from ROI center in local galactic projection (deg).
print gta.roi.sources[0]['offset'] #Angular offset from ROI center (deg).
gta.free_sources(free=True)
gta.print_model()
gta.free_sources(skydir=gta.roi[gta.roi.sources[2].name].skydir,distance=[3.0],free=True)
gta.print_model()
localsmc = gta.localize(gta.roi.sources[2].name, update=True, make_plots=True)
gta.print_model()
Image(filename='3fgl_j0023.9-7203_localize_peak.png')
print gta.roi.sources[2]['ra'] #ra
print gta.roi.sources[2]['dec'] #dec
print gta.roi.sources[2]['glon'] #glon
print gta.roi.sources[2]['glat'] #glat
print gta.roi.sources[2]['ra_err'] #error for ra
print gta.roi.sources[2]['dec_err'] #error for dec
print gta.roi.sources[2]['glon_err'] #error for glon
print gta.roi.sources[2]['glat_err'] #error for glat
print gta.roi.sources[2]['pos_err'] #error for the position in deg
print gta.roi.sources[2]['pos_r68'] #68% CL error for the position
print gta.roi.sources[2]['pos_r95'] #95% CL error for the position
print gta.roi.sources[2]['pos_r99'] #99% CL error for the position
print gta.roi.sources[2]['pos_err_semimajor'] #1-sigma uncertainty (deg) along major axis of uncertainty ellipse.
print gta.roi.sources[2]['pos_err_semiminor'] #1-sigma uncertainty (deg) along minor axis of uncertainty ellipse.
print gta.roi.sources[2]['offset_ra'] #Right ascension offset from ROI center in local celestial projection (deg).
print gta.roi.sources[2]['offset_dec'] #Declination offset from ROI center in local celestial projection (deg).
print gta.roi.sources[2]['offset_glon'] #Galactic longitude offset from ROI center in local galactic projection (deg).
print gta.roi.sources[2]['offset_glat'] #Galactic latitude offset from ROI center in local galactic projection (deg).
print gta.roi.sources[2]['offset'] #Angular offset from ROI center (deg).
print gta.roi.sources[0]['param_names'] #Names of spectral parameters.
print gta.roi.sources[0]['param_values'] #Spectral parameter values.
print gta.roi.sources[0]['param_errors'] #Spectral parameters errors.
print gta.roi.sources[0]['ts'] #Source test statistic.
print gta.roi.sources[0]['loglike'] #Log-likelihood of the model evaluated at the best-fit normalization of the source.
print gta.roi.sources[0]['flux_scan'] #Flux values for scan of source normalization.
print gta.roi.sources[0]['norm_scan'] #Normalization parameters values for scan of source normalization.
print gta.roi.sources[0]['npred'] #Number of predicted counts from this source integrated over the analysis energy range.
print gta.roi.sources[0]['flux'] #Photon flux (cm−2 s−1) integrated over analysis energy range
print gta.roi.sources[0]['flux_err'] #Photon flux uncertainty (cm−2 s−1) integrated over analysis energy range
print gta.roi.sources[0]['flux_ul95'] #95% CL upper limit on the photon Differential photon flux (cm−2 s−1 MeV−1tegrated over analysis energy range
print gta.roi.sources[0]['dnde'] #Differential photon flux (cm−2 s−1 MeV−1) evaluated at the pivot energy.
gta.write_roi('model_test',make_plots=True,save_model_map=True)
gta.free_sources(free=False)
gta.print_model()
gta.free_sources(free=True)
gta.print_model()
gta.free_sources(free=False)
gta.free_sources(skydir=gta.roi['3FGL J0059.0-7242e'].skydir,distance=3.0)
gta.print_model()
gta.delete_source('3FGL J0021.6-6835')
gta.print_model()
help(gta.delete_sources)
gta.delete_sources(minmax_npred=[0,500])
gta.print_model()
help(gta.add_source)
gta.add_source('Source_PL',{ 'glon' : 300., 'glat' : -46.,'SpectrumType' : 'PLSuperExpCutoff', 'Index1':-1.5, 'Index2' : 1.0,'Scale' : 1000,'Prefactor':1e-9,'SpatialModel' : 'PointSource' })
gta.add_source('Source_Gauss',{ 'glon' : 302., 'glat' : -45.,'SpectrumType' : 'PowerLaw', 'Index':2.0,'Scale' : 1000,'Prefactor':1e-9,'SpatialModel' : 'RadialGaussian', 'SpatialWidth': 1.0 })
gta.print_model()
gta.load_roi('model_test')
gta.print_model()
help(gta.set_norm)
help(gta.set_parameter)
print gta.roi['3FGL J0059.0-7242e']['param_names']
print gta.roi['3FGL J0059.0-7242e']['param_values']
print gta.roi['3FGL J0059.0-7242e']['param_errors']
gta.set_norm('3FGL J0059.0-7242e',value=1.)
gta.set_parameter('3FGL J0059.0-7242e',par='Index',value=2.0)
print gta.roi['3FGL J0059.0-7242e']['param_names']
print gta.roi['3FGL J0059.0-7242e']['param_values']
print gta.roi['3FGL J0059.0-7242e']['param_errors']
help(gta.set_source_spectrum)
gta.load_roi('model_test')
gta.print_model()
gta.roi['3FGL J0059.0-7242e']['SpectrumType']
gta.set_source_spectrum('3FGL J0059.0-7242e',spectrum_type='LogParabola')
gta.roi['3FGL J0059.0-7242e']['SpectrumType']
gta.fit()
print gta.roi['3FGL J0059.0-7242e']['param_names']
print gta.roi['3FGL J0059.0-7242e']['param_values']
print gta.roi['3FGL J0059.0-7242e']['param_errors']
help(gta.set_source_morphology)
gta.set_source_morphology(name='3FGL J0029.1-7045',spatial_model='RadialGaussian',spatial_pars={'SpatialWidth': 1.0} )
gta.print_model()
print gta.roi['3FGL J0029.1-7045']['SpatialType']
print gta.roi['3FGL J0029.1-7045']['SpatialWidth']
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Methods for searching for data
Step2: Datasets
Step3: Example
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import urllib
import datetime
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
import beapy
apiKey = '3EDEAA66-4B2B-4926-83C9-FD2089747A5B'
bea = beapy.initialize(apiKey =apiKey)
# Get a list of the the data sets available from the BEA along with descriptions.
bea.getDataSetList()
# The getDataSet() method adds a dataSetList attiribute that is a list of the available datasets:
print(bea.dataSetList)
# Get a list of the the parameters for the NIPA dataset
bea.getParameterList('NIPA')
# The getParameterList() method adds a parameterList attiribute that is a list of the parameters of the chosen dataset.
print(bea.parameterList)
# Get a list of the values that the Frequency parameter in the NIPA dataset can take:
bea.getParameterValues('NIPA','Frequency')
# Download data from Table 1.1.5, TableID: 5. and plot
results = bea.getNipa(TableID=5,Frequency='A',Year='X')
frame =results['data']
np.log(frame['Gross domestic product']).plot(grid=True,lw=3)
bea.getParameterValues('RegionalData','KeyCode')
bea.getParameterValues('RegionalData','GeoFips')
bea.getParameterValues('RegionalData','Year')
bea.getParameterValues('RegionalData','KeyCode')
# Get per capita personal income at the state level for all years.
result = bea.getRegionalData(KeyCode='PCPI_SI',GeoFips = 'STATE', Year = 'ALL')
frame = result['data']
# For each state including Washington, D.C., find the percentage difference between state pc income and US pc income.
for state in frame.columns:
f = 100*(frame[state] - frame['United States'])/frame['United States']
f.plot(grid=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Upper air data can be obtained using the siphon package, but for this example we will use
Step2: We will pull the data out of the example dataset into individual variables and
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 630, 80, size='large')
# Grid for plots
gs = gridspec.GridSpec(3, 3)
skew = SkewT(fig, rotation=45, subplot=gs[:, :2])
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-30, 40)
# Create a hodograph
ax = fig.add_subplot(gs[0, -1])
h = Hodograph(ax, component_range=60.)
h.add_grid(increment=20)
h.plot(u, v)
# Show the plot
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Build a base image for LightGBM
Step2: Launch a LightGBM train task
Step3: Let's look at the trained model
|
<ASSISTANT_TASK:>
Python Code:
from kubeflow import fairing
# Setting up google container repositories (GCR) for storing output containers
# You can use any docker container registry istead of GCR
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
DOCKER_REGISTRY = 'gcr.io/{}/fairing-job'.format(GCP_PROJECT)
BASE_IMAGE = 'gcr.io/{}/lightgbm:latest'.format(GCP_PROJECT)
!docker build . -t {BASE_IMAGE}
!docker push {BASE_IMAGE}
from kubeflow import fairing
from kubeflow.fairing.frameworks import lightgbm
# Creating a bucket for copying the trained model.
# You can set gcs_bucket variable to an existing bucket name if that is desired.
gcs_bucket = "gs://{}-fairing".format(GCP_PROJECT)
!gsutil mb {gcs_bucket}
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': {'l2', 'l1'},
'metric_freq': 1,
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
"n_estimators": 10,
"is_training_metric": "true",
"valid_data": "gs://fairing-lightgbm/regression-example/regression.test",
"train_data": "gs://fairing-lightgbm/regression-example/regression.train",
'verbose': 1,
"model_output": "{}/lightgbm/example/model.txt".format(gcs_bucket)
}
lightgbm.execute(config=params,
docker_registry=DOCKER_REGISTRY,
base_image=BASE_IMAGE)
url = params['model_output']
!gsutil cp {url} /tmp/model.txt
!head /tmp/model.txt
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The image has been correctly loaded by openCV as a numpy array, but the color of each pixel has been sorted as BGR. Matplotlib's plot expects an RGB image so, for a correct display of the image, it is necessary to swap those channels. This operation can be done either by using openCV conversion functions cv2.cvtColor() or by working directly with the numpy array.
Step2: In this case it's necessary to change the image space from BGR (Blue, Green, Red) to RGB, so the correct flag is cv2.COLOR_BGR2RGB
Step3: below from from http
Step4: Added Friday afternoon 17 Mar 17
Step5: Added Thursday afternoon 23 Mar 17
Step6: Added Friday afternoon 15 Apr 17
Step7: 20 Jun 17 test case for Matplotlib bug https
Step8: Below is (dangerously) relying on the latest python 2.7.13 dictionary preserving key order based on creation sequence. Will fix later
|
<ASSISTANT_TASK:>
Python Code:
! wget --no-check-certificate http://www.hobieco.com/linked_images/H18-Magnum.jpg
%matplotlib inline
import cv2
from matplotlib import pyplot as plt
import numpy as np
import time as t
print "OpenCV Version : %s " % cv2.__version__
image = cv2.imread("H18-Magnum.jpg")
fig, ax = plt.subplots()
fig.set_size_inches(3, 3)
ax.axis([35, 150, 250, 100])
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
from matplotlib.pyplot import imshow
import numpy as np
from PIL import Image
%matplotlib inline
pil_im = Image.open('H18-Magnum.jpg', 'r')
imshow(np.asarray(pil_im))
from IPython.display import Image
Image(filename='H18-Magnum.jpg')
BGRflags = [flag for flag in dir(cv2) if flag.startswith('COLOR_BGR') ]
print BGRflags
t0 = t.time()
cv_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
t1 = t.time()
dt_cv = t1-t0
print "Conversion took %0.5f seconds" % dt_cv
plt.imshow(cv_rgb)
plt.show()
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
fig = plt.figure()
fig.suptitle('bold figure suptitle', fontsize=14, fontweight='bold')
ax = fig.add_subplot(111)
fig.subplots_adjust(top=0.85)
ax.set_title('axes title')
ax.set_xlabel('xlabel')
ax.set_ylabel('ylabel')
ax.text(3, 8, 'boxed italics text in data coords', style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
ax.text(2, 6, r'an equation: $E=mc^2$', fontsize=15)
ax.text(3, 2, u'unicode: Institut f\374r Festk\366rperphysik')
ax.text(0.95, 0.01, 'colored text in axes coords',
verticalalignment='bottom', horizontalalignment='right',
transform=ax.transAxes,
color='green', fontsize=15)
ax.plot([2], [1], 'o')
ax.annotate('annotate', xy=(2, 1), xytext=(3, 4),
arrowprops=dict(facecolor='black', shrink=0.05))
ax.axis([0, 10, 0, 10])
plt.show()
%matplotlib inline
import cv2
from matplotlib import pyplot as plt
import matplotlib.cm as cm
image = cv2.imread("Screenshot_2016-02-23-12-47-43.png")
fig, ax = plt.subplots()
fig.set_size_inches(4, 4)
#ax.axis([1280, 1400, 400, 200])
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
plt.show()
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(2, 2))
up_right_gray_target = image_gray[210:310, 1280:1400]
plt.imshow(up_right_gray_target, cmap = cm.gray)
plt.show()
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(2, 2))
low_left_gray_target = image_gray[2412:2512,65:165]
plt.imshow(low_left_gray_target, cmap = cm.gray)
plt.show()
image_gray = cv2.imread("Screenshot_2016-02-23-12-47-43.png",0)
#targets = [up_right_gray_target,low_left_gray_target]
targets = [up_right_gray_target]
for tgt in targets:
w, h = tgt.shape[::-1]
res = cv2.matchTemplate(image_gray,tgt,cv2.TM_CCOEFF)
res1= cv2.matchTemplate(image_gray,tgt,cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(image_gray,top_left, bottom_right, 255, 2)
#fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 14))
#plt.imshow(image_gray, cmap = cm.gray)
#plt.show()
plt.figure(figsize=(16,9))
plt.subplot(1,2,1)
plt.imshow(res,cmap=cm.gray)
plt.subplot(1,2,2)
plt.imshow(res1,cmap=cm.gray)
plt.show()
%matplotlib inline
import cv2
from matplotlib import pyplot as plt
import matplotlib.cm as cm
image_gray = cv2.imread("Screenshot_2016-02-23-12-47-43.png",0)
up_right_gray_target = image_gray[210:310, 1280:1400]
#targets = [up_right_gray_target,low_left_gray_target]
targets = [up_right_gray_target]
for tgt in targets:
w, h = tgt.shape[::-1]
res_TM_CCOEFF = cv2.matchTemplate(image_gray,tgt,cv2.TM_CCOEFF)
res_TM_CCOEFF_NORMED = cv2.matchTemplate(image_gray,tgt,cv2.TM_CCOEFF_NORMED)
res_TM_SQDIFF = cv2.matchTemplate(image_gray,tgt,cv2.TM_SQDIFF)
res_TM_SQDIFF_NORMED = cv2.matchTemplate(image_gray,tgt,cv2.TM_SQDIFF_NORMED)
res_TM_CORR = cv2.matchTemplate(image_gray,tgt,cv2.TM_CCORR)
res_TM_CORR_NORMED = cv2.matchTemplate(image_gray,tgt,cv2.TM_CCORR_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_TM_SQDIFF_NORMED)
# top_left = max_loc
top_left = min_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(image_gray,top_left, bottom_right, 255, 2)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 14))
plt.imshow(image_gray, cmap = cm.gray)
plt.show()
fig = plt.figure(figsize=(12,29))
ax1 = fig.add_subplot(321)
plt.title('CCOEFF')
plt.imshow(res_TM_CCOEFF,cmap=cm.gray)
plt.subplot(3,2,2)
plt.title('CCOEFF_NORMED')
plt.imshow(res_TM_CCOEFF_NORMED,cmap=cm.gray)
plt.subplot(3,2,3)
plt.title('TM_SQDIFF')
plt.imshow(res_TM_SQDIFF,cmap=cm.gray)
plt.subplot(3,2,4)
plt.title('TM_SQDIFF_NORMED')
plt.imshow(res_TM_SQDIFF_NORMED,cmap=cm.gray)
plt.subplot(3,2,5)
plt.title('TM_CORR')
plt.imshow(res_TM_CORR,cmap=cm.gray)
plt.subplot(3,2,6)
plt.title('TM_CORR_NORMED')
plt.imshow(res_TM_CORR_NORMED,cmap=cm.gray)
plt.show()
! pip install --upgrade pandas
%matplotlib inline
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.dates import date2num, MonthLocator, WeekdayLocator, DateFormatter
import datetime as dt
import numpy as np
import pandas as pd
count = (dt.datetime.today() - dt.datetime(2016,11,15)).days
count
dates = [dt.datetime(2016,11,15) + dt.timedelta(days=i) for i in xrange(count)]
type(dates)
import numpy as np
dates_np = np.arange(np.datetime64('2016-11-15','D'),np.datetime64(dt.datetime.today(),'D'))
dates_np
type1 = np.random.randint(0,5,count)
type2 = np.random.randint(0,5,count)
type3 = np.random.randint(0,7,count)
type(type1)
#plt.figure(figsize=(20,7))
#plt.title('Testing', fontsize=16)
#plt.xlabel('Date', fontsize=16)
#plt.ylabel('Frequency', fontsize=16)
fig, ax = plt.subplots(1,1)
p1 = plt.bar(dates_np, type1, width=1, label='Type 1')
p2 = plt.bar(dates_np, type2, bottom = type1, width=1, label='Type 2')
p3 = plt.bar(dates_np, type3, bottom = type1 + type2, width=1, label='Type 3')
ax.xaxis_date()
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(WeekdayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b %y'))
ax.set_title('Testing', fontsize=16)
ax.set_xlabel('Date')
ax.set_ylabel('Frequency')
ax.set_xlim(dates_np[0],dates_np[-1])
fig.set_size_inches(17,6)
fig.autofmt_xdate()
fig.tight_layout()
plt.legend((p1[0],p2[0],p3[0]), ('First', 'Second','Third'))
plt.show()
type(dates_np[0]),type(type1[0])
! pip install --upgrade pandas
import pandas
%matplotlib inline
import numpy as np
import datetime as dt
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.dates import date2num, MonthLocator, WeekdayLocator, DateFormatter
class test_object_type:
'''builds test objects which have random dates within a range plus type name, and magnatudes'''
def __init__(self, first_date, last_date):
self.full_space_list = self.generate_random_spaced_list()
self.first_date = first_date
self.last_date = last_date
self.event_date_list = self.build_obj_date_list()
self.date_value_dict = self.build_obj_dict()
self.sorted_keys = self.build_obj_sorted_keys()
self.value_list = np.asarray(self.build_value_list())
self.full_date_dict = self.build_full_date_dict()
def generate_random_spaced_list(self):
return np.random.randint(4,size=325)
def build_obj_date_list(self):
'''Makes event days based on spacing by self.full_space_list. May get several
zero spaces in a row those are not checked for before attempting to recreated same key
instead a new entry overwrites the previous.'''
obj_date_list = []
current_date = self.first_date
for x in self.full_space_list:
current_date = current_date + np.timedelta64(x,'D')
if not current_date > self.last_date:
obj_date_list.append(current_date)
else:
return obj_date_list
def build_obj_dict(self):
date_value_dict = {}
for x in self.event_date_list:
value = np.random.randint(1,5)
date_value_dict[x] = value
return date_value_dict
def build_obj_sorted_keys(self):
dict_keys = self.date_value_dict.keys()
dict_keys.sort()
return dict_keys
def build_value_list(self):
value_list = []
for x in self.sorted_keys:
value_list.append(self.date_value_dict[x])
return value_list
def build_full_date_dict(self):
full_date_list =[]
current_date = self.first_date
while not current_date > self.last_date:
full_date_list.append(current_date)
current_date = current_date + np.timedelta64(1,'D')
full_date_dict = {}
for x in full_date_list:
if x in self.date_value_dict:
full_date_dict[x] = self.date_value_dict[x]
else:
full_date_dict[x] = 0
return full_date_dict
%prun aaa = test_object_type(np.datetime64('2016-11-15','D'),np.datetime64(dt.datetime.today(),'D'))
aaa.full_date_dict.keys()[-1] - aaa.full_date_dict.keys()[0]
xxx = np.asarray(aaa.full_date_dict.keys())
yyy = np.asarray(aaa.full_date_dict.values())
type(xxx[0]),type(yyy[0])
fig, ax = plt.subplots(1,1)
p1 = plt.bar(xxx, yyy, width=1, label='Type 1')
ax.xaxis_date()
ax.xaxis.set_major_locator(MonthLocator())
ax.xaxis.set_minor_locator(WeekdayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b %y'))
ax.set_title('Testing', fontsize=16)
ax.set_xlabel('Date')
ax.set_ylabel('Frequency')
fig.set_size_inches(17,6)
fig.autofmt_xdate()
fig.tight_layout()
plt.show()
import matplotlib as mpl
mpl.__version__numpy__
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This gets harder with more variables.
Step2: Python has string interpolation, which uses %s to insert other strings into placeholders.
|
<ASSISTANT_TASK:>
Python Code:
'hello ' + 'world!'
name = 'Alice'
place = 'Main Street'
time = '6 pm'
food = 'turnips'
print('Hello ' + name + ', you are invited to a party at ' + place + ' at ' + time + '. Please bring ' + food + '.')
print(' Hello %s, you are invited to a party at %s at %s. Please bring %s.' % (name, place, time, food))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We try to construct the vocabulary from a set of template images. It is a set of three general images belonging to the category of car, plane and train.
Step2: 2. Group similar descriptors into an arbitrary number of clusters.
Step3: 3. Now, compute training data for the SVM classifiers. .
Step4: We here define get_sift_training() function to get all the SIFT descriptors present in all the training images.
Step5: We define the compute_training_data() function which returns the training data required for multiclass classification in the later stages.
Step6: We have to solve a multiclass classification problem here. In Shogun these are implemented in
Step7: 5. Now, classify by using the trained SVM
Step8: We define the function get_sift_testing() which returns all the descriptors present in the testing images.
Step9: In the following classify_svm() function, we use the trained GMNPSVM for classifying the test images. It returns the predictions from our trained SVM.
Step10: 6.
Step11: Form the expected list.
Step12: We extend all the steps that we did for k=100 to few other values of k and check their accuracies with respect to the expected labels. Alongside, we also draw their respective confusion matrix.
Step13: From all the above k's we choose the one which has the best accuracy. Number of k's can be extended further to enhance the overall accuracy.
|
<ASSISTANT_TASK:>
Python Code:
#import Opencv library
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
try:
import cv2
except ImportError:
print "You must have OpenCV installed"
exit(1)
#check the OpenCV version
try:
v=cv2.__version__
assert (tuple(map(int,v.split(".")))>(2,4,2))
except (AssertionError, ValueError):
print "Install newer version of OpenCV than 2.4.2, i.e from 2.4.3"
exit(1)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from shogun import *
import shogun as sg
# get the list of all jpg images from the path provided
import os
def get_imlist(path):
return [[os.path.join(path,f) for f in os.listdir(path) if (f.endswith('.jpg') or f.endswith('.png'))]]
#Use the following function when reading an image through OpenCV and displaying through plt.
def showfig(image, ucmap):
#There is a difference in pixel ordering in OpenCV and Matplotlib.
#OpenCV follows BGR order, while matplotlib follows RGB order.
if len(image.shape)==3 :
b,g,r = cv2.split(image) # get b,g,r
image = cv2.merge([r,g,b]) # switch it to rgb
imgplot=plt.imshow(image, ucmap)
imgplot.axes.get_xaxis().set_visible(False)
imgplot.axes.get_yaxis().set_visible(False)
plt.rcParams['figure.figsize'] = 17, 4
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/template/'))
filenames=np.array(filenames)
# for keeping all the descriptors from the template images
descriptor_mat=[]
# initialise OpenCV's SIFT
sift=cv2.SIFT()
fig = plt.figure()
plt.title('SIFT detected Keypoints')
plt.xticks(())
plt.yticks(())
for image_no in xrange(3):
img=cv2.imread(filenames[0][image_no])
img=cv2.resize(img, (500, 300), interpolation=cv2.INTER_AREA)
gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray=cv2.equalizeHist(gray)
#detect the SIFT keypoints and the descriptors.
kp, des=sift.detectAndCompute(gray,None)
# store the descriptors.
descriptor_mat.append(des)
# here we draw the keypoints
img=cv2.drawKeypoints(img, kp, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
fig.add_subplot(1, 3, image_no+1)
showfig(img, None)
def get_similar_descriptors(k, descriptor_mat):
descriptor_mat=np.double(np.vstack(descriptor_mat))
descriptor_mat=descriptor_mat.T
#initialize KMeans in Shogun
sg_descriptor_mat_features=features(descriptor_mat)
#EuclideanDistance is used for the distance measurement.
distance = sg.distance('EuclideanDistance')
distance.init(sg_descriptor_mat_features, sg_descriptor_mat_features)
#group the descriptors into k clusters.
kmeans=KMeans(k, distance)
kmeans.train()
#get the cluster centers.
cluster_centers=(kmeans.get_cluster_centers())
return cluster_centers
cluster_centers=get_similar_descriptors(100, descriptor_mat)
# name of all the folders together
folders=['cars','planes','trains']
training_sample=[]
for folder in folders:
#get all the training images from a particular class
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/%s'%folder))
for i in xrange(10):
temp=cv2.imread(filenames[0][i])
training_sample.append(temp)
plt.rcParams['figure.figsize']=21,16
fig=plt.figure()
plt.xticks(())
plt.yticks(())
plt.title('10 training images for each class')
for image_no in xrange(30):
fig.add_subplot(6,5, image_no+1)
showfig(training_sample[image_no], None)
def get_sift_training():
# name of all the folders together
folders=['cars','planes','trains']
folder_number=-1
des_training=[]
for folder in folders:
folder_number+=1
#get all the training images from a particular class
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/%s'%folder))
filenames=np.array(filenames)
des_per_folder=[]
for image_name in filenames[0]:
img=cv2.imread(image_name)
# carry out normal preprocessing routines
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=cv2.resize(gray, (500, 300), interpolation=cv2.INTER_AREA)
gray=cv2.equalizeHist(gray)
#get all the SIFT descriptors for an image
_, des=sift.detectAndCompute(gray, None)
des_per_folder.append(des)
des_training.append(des_per_folder)
return des_training
descriptor_training=get_sift_training()
def compute_training_data(k, cluster_centers, descriptors):
# a list to hold histograms of all the training images
all_histograms=[]
# labels for all of the test images
final_labels=[]
# to hold the cluster number a descriptor belong to
cluster_labels=[]
#initialize a KNN in Shogun
dist = sg.distance('EuclideanDistance')
labels=MulticlassLabels(np.double(range(k)))
knn=KNN(1, dist, labels)
#Target descriptors are the cluster_centers that we got earlier.
#All the descriptors of an image are matched against these for
#calculating the histogram.
sg_cluster_centers=features(cluster_centers)
knn.train(sg_cluster_centers)
# name of all the folders together
folders=['cars','planes','trains']
folder_number=-1
for folder in folders:
folder_number+=1
#get all the training images from a particular class
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/%s'%folder))
for image_name in xrange(len(filenames[0])):
des=descriptors[folder_number][image_name]
#Shogun works in a way in which columns are samples and rows are features.
#Hence we need to transpose the observation matrix
des=(np.double(des)).T
sg_des=features(np.array(des))
#find all the labels of cluster_centers that are nearest to the descriptors present in the current image.
cluster_labels=(knn.apply_multiclass(sg_des)).get_real_vector('labels')
histogram_per_image=[]
for i in xrange(k):
#find the histogram for the current image
histogram_per_image.append(sum(cluster_labels==i))
all_histograms.append(np.array(histogram_per_image))
final_labels.append(folder_number)
# we now have the training features(all_histograms) and labels(final_labels)
all_histograms=np.array(all_histograms)
final_labels=np.array(final_labels)
return all_histograms, final_labels, knn
all_histograms, final_labels, knn=compute_training_data(100, cluster_centers, descriptor_training)
def train_svm(all_histograms, final_labels):
# we will use GMNPSVM class of Shogun for one vs rest multiclass classification
obs_matrix=np.double(all_histograms.T)
sg_features=features(obs_matrix)
sg_labels=MulticlassLabels(np.double(final_labels))
kernel=LinearKernel(sg_features, sg_features)
C=1
gsvm=GMNPSVM(C, kernel, sg_labels)
_=gsvm.train(sg_features)
return gsvm
gsvm=train_svm(all_histograms, final_labels)
# Lets see the testing images
testing_sample=[]
#get all the testing images
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/test_image/'))
for i in xrange(len(filenames[0])):
temp=cv2.imread(filenames[0][i])
testing_sample.append(temp)
plt.rcParams['figure.figsize']=20,8
fig=plt.figure()
plt.xticks(())
plt.yticks(())
plt.title('Test Images')
for image_no in xrange(len(filenames[0])):
fig.add_subplot(3,8, image_no+1)
showfig(testing_sample[image_no], None)
def get_sift_testing():
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/test_image/'))
filenames=np.array(filenames)
des_testing=[]
for image_name in filenames[0]:
result=[]
#read the test image
img=cv2.imread(image_name)
#follow the normal preprocessing routines
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
gray=cv2.resize(gray, (500, 300), interpolation=cv2.INTER_AREA)
gray=cv2.equalizeHist(gray)
#compute all the descriptors of the test images
_, des=sift.detectAndCompute(gray, None)
des_testing.append(des)
return des_testing
descriptor_testing=get_sift_testing()
def classify_svm(k, knn, des_testing):
# a list to hold histograms of all the test images
all_histograms=[]
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/test_image/'))
for image_name in xrange(len(filenames[0])):
result=[]
des=des_testing[image_name]
#Shogun works in a way in which columns are samples and rows are features.
#Hence we need to transpose the observation matrix
des=(np.double(des)).T
sg_des=features(np.array(des))
#cluster all the above found descriptors into the vocabulary
cluster_labels=(knn.apply_multiclass(sg_des)).get_real_vector('labels')
#get the histogram for the current test image
histogram=[]
for i in xrange(k):
histogram.append(sum(cluster_labels==i))
all_histograms.append(np.array(histogram))
all_histograms=np.double(np.array(all_histograms))
all_histograms=all_histograms.T
sg_testfeatures=features(all_histograms)
return gsvm.apply(sg_testfeatures).get_labels()
predicted=classify_svm(100, knn, descriptor_testing)
print "the predicted labels for k=100 are as follows: "
print predicted
def create_conf_matrix(expected, predicted, n_classes):
m = [[0] * n_classes for i in range(n_classes)]
for pred, exp in zip(predicted, expected):
m[exp][int(pred)] += 1
return np.array(m)
import re
filenames=get_imlist(os.path.join(SHOGUN_DATA_DIR, 'SIFT/test_image/'))
# get the formation of the files, later to be used for calculating the confusion matrix
formation=([int(''.join(x for x in filename if x.isdigit())) for filename in filenames[0]])
# associate them with the correct labels by making a dictionary
keys=range(len(filenames[0]))
values=[0,1,0,2,1,0,1,0,0,0,1,2,2,2,2,1,1,1,1,1]
label_dict=dict(zip(keys, values))
# the following list holds the actual labels
expected=[]
for i in formation:
expected.append(label_dict[i-1])
best_k=1
max_accuracy=0
for k in xrange(1,5):
k=100*k
# step 2
cluster_centers=get_similar_descriptors(k, descriptor_mat)
# step 3
all_histograms, final_labels, knn=compute_training_data(k, cluster_centers, descriptor_training)
# step 4
gsvm=train_svm(all_histograms, final_labels)
# step 5
predicted=classify_svm(k, knn, descriptor_testing)
accuracy=sum(predicted==expected)*100/float(len(expected))
print "for a k=%d, accuracy is %d%%"%(k, accuracy)
#step 6
m=create_conf_matrix(expected, predicted, 3)
if accuracy>max_accuracy:
best_k=k
max_accuracy=accuracy
best_prediction=predicted
print "confusion matrix for k=%d"%k
print m
plt.rcParams['figure.figsize']=20,8
fig=plt.figure()
for image_no in xrange(len(filenames[0])):
fig.add_subplot(3,8, image_no+1)
plt.title('pred. class: '+folders[int(best_prediction[image_no])])
showfig(testing_sample[image_no], None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 2 - Use OCR to read the address
Step2: Testing location
Step3: Google Maps
|
<ASSISTANT_TASK:>
Python Code:
from PIL import Image
import pytesseract
import googlemaps
import gmaps as jupmap
import sys
from datetime import datetime
# get my private keys for google maps and gmaps
f = open('private.key', 'r')
for line in f:
temp = line.rstrip('').replace(',','').replace('\n','').split(" ")
exec(temp[0])
myMap = googlemaps.Client(key=googlemap_key)
jupmap.configure(api_key=jupmap_key)
img = Image.open('mm_address.jpg')
label = pytesseract.image_to_string(img)
print(label)
clientLocation = label.splitlines()[2] + ', ' + label.splitlines()[3]
print(clientLocation)
testLocation = '2403 Englewood Ave, Durham, NC 27705'
print(testLocation)
testGeoCode = myMap.geocode(testLocation)[0]
lat = testGeoCode.get('geometry').get('location').get('lat')
lng = testGeoCode.get('geometry').get('location').get('lng')
print(lat, ' ', lng )
clientList = ['300 N Roxboro St, Durham, NC 27701','911 W Cornwallis Rd, Durham, NC 27707', '345 W Main Street, Durham, NC 27701' ]
wp=[]
for x in clientList:
testGeoCode = myMap.geocode(x)[0]
lat = testGeoCode.get('geometry').get('location').get('lat')
lng = testGeoCode.get('geometry').get('location').get('lng')
wp.append([lat,lng])
print(wp)
m = jupmap.Map()
home = (36.0160282, -78.9321707)
foodLion = (36.0193147,-78.9603636)
church = (35.9969749, -78.9091543)
dl = jupmap.directions_layer(church, home, waypoints=wp)
#googlemaps has an optimize_waypoints=True but I can't find it in jupyter gmaps
m.add_layer(dl)
m
import gmaps
import gmaps.datasets
locations = gmaps.datasets.load_dataset("starbucks_uk")
print(locations)
fig = gmaps.Map()
starbucks_layer = gmaps.symbol_layer(locations, fill_color="green", stroke_color="green", scale=2)
fig.add_layer(starbucks_layer)
fig
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make data
Step2: With defined indices
Step3: Get information about a series
Step4: Date ranges
Step5: Frames (2D data)
Step6: With defined indices and columns
Step7: Using numpy arrays
Step8: Using Series
Step9: With columns from dict
Step10: To define index as well
Step11: Get information about a dataframe
Step12: More details about dtype
Step13: Panels (3D data)
Step14: HDF5 files
Step15: Setting more options
Step16: Read CSV files
Step17: Setting more options
Step18: JSON files
Step19: Write JSON files
Step20: Setting orient="split"
Step21: Setting orient="records"
Step22: Setting orient="index" (the default option for Series)
Step23: Setting orient="columns" (the default option for DataFrame) (for DataFrame only)
Step24: Setting orient="values" (for DataFrame only)
Step25: Setting more options
Step26: Read JSON files
Step27: Using orient="records"
Step28: Using orient="index"
Step29: Using orient="columns"
Step30: Using orient="values" (for DataFrame only)
Step31: Setting more options
Step32: YAML
Step33: Other file formats
Step34: Select a single column
Step35: Index based selection
Step36: Select multiple columns
Step37: Index based selection
Step38: Select rows
Step39: Select a single row
Step40: Index based selection
Step41: Select multiple rows
Step42: Index based selection
Step43: Select rows based on values
Step44: This can be written
Step45: This could be written df[df.A >= 2][df.B < 50] but this is a bad practice (named "chained indexing").
Step46: Setting values
Step47: Apply a function to selected rows values
Step48: WARNING
Step49: Sample rows
Step50: With replacement
Step51: Sample 90% of the rows
Step52: Without replacement
Step53: Sample 90% of the rows
Step54: Weighted sampling
Step55: Shuffle/permute rows
Step56: To reset indexes too
Step57: Sort a DataFrame
Step58: Sorting by row index or column label
Step59: Sorting by columns
Step60: Sorting by values
Step61: Missing data
Step62: Get the boolean mask where values are nan
Step63: Drop any rows that have missing data
Step64: Drop any rows that have missing data in a given column
Step65: Drop any columns that have missing data
Step66: Drop any columns that have missing data in a given row
Step67: Filling missing data
Step68: Count the number of NaN values in a given column
Step69: Miscellaneous operations on data frames
Step70: Merge
Step71: Merge with NaN
Step72: Merge with missing rows
Step73: GroupBy
Step74: GroupBy with single key
Step75: GroupBy with multiple keys
Step76: Rolling
Step77: More realistic example
Step78: Rolling with an aggregation window of size 20.
Step79: Pivot
Step80: Count the number of occurrences of a column value
Step81: Stats
Step82: Time series
Step83: A Period is a range in time (with a "anchored" start time and a "anchored" end time)
Step84: A Timedelta is a "floating" duration (i.e. not "anchored" in time)
Step85: Generate datetime index (with a fixed frequency)
Step86: Generate period index
Step87: Plot time series
Step88: Indexing (select datetime)
Step89: Rolling
Step90: More realistic example
Step91: Resampling
Step92: Is there an offset ?
Step93: More realistic example
Step94: Difference between rolling() and resample()
Step95: Group by
Step96: Basic example of wrong usage
Step97: More realistic example
Step98: With Periods
Step99: Round
Step100: Count
Step101: Plot
Step102: Line plot
Step103: or
Step104: Steps
Step105: Bar plot
Step106: Vertical
Step107: Horizontal
Step108: Histogram
Step109: To normalize the $y$ axis, use density=True
Step110: Box plot
Step111: Hexbin plot
Step112: Kernel Density Estimation (KDE) plot
Step113: Area plot
Step114: Pie chart
Step115: Scatter plot
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
#%matplotlib notebook
from IPython.display import display
import matplotlib
matplotlib.rcParams['figure.figsize'] = (9, 9)
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import pandas as pd
import numpy as np
pd.__version__
data_list = [1, 3, np.nan, 7]
series = pd.Series(data_list)
series
data_array = np.array(data_list)
series = pd.Series(data_array)
series
indices = pd.Series([1, 3, 5, 7])
series = pd.Series([10, 30, 50, 70], index=indices)
series
indices = pd.Series(['A', 'B', 'C', 'D'])
series = pd.Series([10, 30, 50, 70], index=indices)
series
data_dict = {'A': 10, 'B': 30, 'C': 50, 'D': 70}
series = pd.Series(data_dict)
series
series.index
series.values
series.shape
series.dtypes
series.describe()
type(series.describe())
series.memory_usage()
dates = pd.date_range('20130101', periods=6)
dates
dates = pd.date_range(start='2013-01-01', end='2013-01-08')
dates
dates = pd.date_range('2013-01-01', periods=4, freq='M')
dates
num_days = 7
data = np.random.random(num_days)
index = pd.date_range('2017-01-01', periods=num_days)
series = pd.Series(data, index)
series
data_list = [[1, 2, 3], [4, 5, 6]]
df = pd.DataFrame(data_array)
df
data_array = np.array([[1, 2, 3], [4, 5, 6]])
df = pd.DataFrame(data_array)
df
data = [[1, 2, 3], [4, 5, 6]]
index = [10, 20]
columns = ['A', 'B', 'C']
df = pd.DataFrame(data, index, columns)
df
data = np.array([[1, 2, 3], [4, 5, 6]])
index = np.array([10, 20])
columns = np.array(['A', 'B', 'C'])
df = pd.DataFrame(data, index=index, columns=columns)
df
data = np.array([[1, 2, 3], [4, 5, 6]])
index = pd.Series([10, 20])
columns = pd.Series(['A', 'B', 'C'])
df = pd.DataFrame(data, index=index, columns=columns)
df
data_dict = {'A': 'foo',
'B': [10, 20, 30],
'C': 3}
df = pd.DataFrame(data_dict)
df
data_dict = {'A': 'foo',
'B': [10, 20, 30],
'C': 3}
df = pd.DataFrame(data_dict, index=[10, 20, 30])
df
df.index
df.columns
df.values
df.shape
df.dtypes
df.info()
df.describe()
type(df.describe())
df.memory_usage()
data_dict = {'A': 'foo',
'B': [10, 20, 30],
'C': 3}
df = pd.DataFrame(data_dict)
df
df.dtypes
df2 = df.T
df2
df2.dtypes
data_array = np.array([[1, 2, 3], [4, 5, 6]])
df = pd.DataFrame(data_array, index=[10, 20], columns=[100, 200, 300])
df
df.to_csv(path_or_buf="python_pandas_io_test.csv")
!cat python_pandas_io_test.csv
# FYI, many other options are available
df.to_csv(path_or_buf="python_pandas_io_test.csv",
sep=',',
columns=None,
header=True,
index=True,
index_label=None,
compression=None, # allowed values are 'gzip', 'bz2' or 'xz'
date_format=None)
!cat python_pandas_io_test.csv
df = pd.read_csv("python_pandas_io_test.csv")
df
df = pd.read_csv("python_pandas_io_test.csv",
sep=',',
delimiter=None,
header='infer',
names=None,
index_col=0,
usecols=None,
squeeze=False,
prefix=None,
mangle_dupe_cols=True,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skipinitialspace=False,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
skip_blank_lines=True,
parse_dates=False,
infer_datetime_format=False,
keep_date_col=False,
date_parser=None,
dayfirst=False,
iterator=False,
chunksize=None,
compression='infer',
thousands=None,
decimal=b'.',
lineterminator=None,
quotechar='"',
quoting=0,
escapechar=None,
comment=None,
encoding=None,
dialect=None,
#tupleize_cols=False,
error_bad_lines=True,
warn_bad_lines=True,
skipfooter=0,
#skip_footer=0,
doublequote=True,
delim_whitespace=False,
#as_recarray=False,
#compact_ints=False,
#use_unsigned=False,
low_memory=True,
#buffer_lines=None,
memory_map=False,
float_precision=None)
df
!rm python_pandas_io_test.csv
import io
df.to_json(path_or_buf="python_pandas_io_test.json")
!cat python_pandas_io_test.json
df.to_json(path_or_buf="python_pandas_io_test_split.json",
orient="split")
!cat python_pandas_io_test_split.json
df.to_json(path_or_buf="python_pandas_io_test_records.json",
orient="records")
!cat python_pandas_io_test_records.json
df.to_json(path_or_buf="python_pandas_io_test_index.json",
orient="index")
!cat python_pandas_io_test_index.json
df.to_json(path_or_buf="python_pandas_io_test_columns.json",
orient="columns")
!cat python_pandas_io_test_columns.json
df.to_json(path_or_buf="python_pandas_io_test_values.json",
orient="values")
!cat python_pandas_io_test_values.json
# FYI, many other options are available
df.to_json(path_or_buf="python_pandas_io_test.json",
orient='columns', # For DataFrame: 'split','records','index','columns' or 'values'
date_format=None, # None, 'epoch' or 'iso'
double_precision=10,
force_ascii=True,
date_unit='ms')
!cat python_pandas_io_test.json
!cat python_pandas_io_test_split.json
df = pd.read_json("python_pandas_io_test_split.json",
orient="split")
df
!cat python_pandas_io_test_records.json
df = pd.read_json("python_pandas_io_test_records.json",
orient="records")
df
!cat python_pandas_io_test_index.json
df = pd.read_json("python_pandas_io_test_index.json",
orient="index")
df
!cat python_pandas_io_test_columns.json
df = pd.read_json("python_pandas_io_test_columns.json",
orient="columns")
df
!cat python_pandas_io_test_values.json
df = pd.read_json("python_pandas_io_test_values.json",
orient="values")
df
df = pd.read_json("python_pandas_io_test.json",
orient=None,
typ='frame',
dtype=True,
convert_axes=True,
convert_dates=True,
keep_default_dates=True,
numpy=False,
precise_float=False,
date_unit=None,
encoding=None,
lines=False)
df
!rm python_pandas_io_test*.json
!echo "- {A: 1, B: 2}" > python_pandas_io_test.yaml
!echo "- {A: 3}" >> python_pandas_io_test.yaml
!echo "- {B: 4}" >> python_pandas_io_test.yaml
!cat python_pandas_io_test.yaml
try:
import yaml
with open('python_pandas_io_test.yaml', 'r') as f:
df = pd.io.json.json_normalize(yaml.load(f))
print(df)
except:
pass
!rm python_pandas_io_test.yaml
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df.B
df["B"]
df.loc[:,"B"]
df.iloc[:,1]
df[['A','B']]
df.loc[:,['A','B']]
df.iloc[:,0:2]
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=["i" + str(i+1) for i in range(9)],
columns=['A', 'B', 'C'])
df
df.loc["i3"]
df.loc["i3",:]
df.iloc[2] # Select over index
df.iloc[2,:] # Select over index
df.loc[["i3", "i4"],:]
df.iloc[2:4,:] # Select over index
df.B < 50.
type(df.B < 50.)
df[[True, True, True, True, False, False, False, False, False]]
series_mask = pd.Series({'i1': True,
'i2': True,
'i3': True,
'i4': True,
'i5': False,
'i6': False,
'i7': False,
'i8': False,
'i9': False})
df[series_mask]
df[df.B < 50.]
df[df['B'] < 50.]
df[(df.A >= 2) & (df.B < 50)]
df.loc[(df.A >= 2) & (df.B < 50)]
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df[(df.A >= 2) & (df.B < 50)]
df[(df.B < 20) | (df.B > 50)]
df.loc[(df.B < 20) | (df.B > 50), 'C']
df[(df['A'] >= 2) & (df['B'] < 50)]
df.loc[(df.A >= 2) & (df.B < 50), ['A','B']]
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df.B *= 2.
df
df.B = pow(df.B, 2)
df
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df[df.B < 50.] *= -1.
df
# df['B'][df['B'] < 50.] = 0 # OK but chain indexing is bad...
# df.A[df.B < 50.] = 0 # OK but chain indexing is bad...
df.loc[df.B < 50., 'A'] = 0
df
df.loc[(df.B < 50.) & (df.B > 20), 'C'] = 0
df
df.loc[(df.B < 20) | (df.B > 50), 'C'] = -1
df
df[df.B < 50.] = pow(df[df.B < 50.], 2)
df
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df.sample(n=30, replace=True)
df.sample(frac=0.9, replace=True)
df.sample(n=3)
df.sample(frac=0.9)
df.sample(n=30, replace=True, weights=np.arange(len(df)))
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df = df.sample(frac=1)
df
df = df.sample(frac=1).reset_index(drop=True)
df
NROWS = 7
col1 = np.arange(1., NROWS, 1)
col2 = np.arange(10., NROWS*10, 10)
col3 = np.arange(100., NROWS*100, 100)
np.random.shuffle(col1)
np.random.shuffle(col2)
np.random.shuffle(col3)
data = np.array([col1,
col2,
col3]).T
index = np.arange(1, NROWS, 1)
columns = np.array(['A', 'B', 'C'])
np.random.shuffle(index)
np.random.shuffle(data)
np.random.shuffle(columns)
df = pd.DataFrame(data,
index=index,
columns=columns)
df
df.sort_index()
df.sort_index(axis=0) # axis=0 -> sort by row index
df.sort_index(ascending=False)
df.sort_index(axis=1) # axis=1 -> sort by column label
df.sort_index(axis=1, ascending=False)
df.sort_values(by='B')
df.sort_values(by='B', ascending=False)
df.sort_values(by='B', inplace=True)
df
a = np.array([[3, np.nan, 5, np.nan, 7],
[2, 4, np.nan, 3, 1],
[3, 4, 5, 6, 1]]).T
df = pd.DataFrame(a,
columns=['A', 'B', 'C'])
df
df.isnull()
df.dropna()
df.dropna(how='any') # but 'any' is the default value...
df.dropna(subset=['B'])
df.dropna(subset=['B', 'C'])
df.dropna(axis=1)
df.dropna(axis=1, how='any') # but 'any' is the default value...
df.dropna(axis=1, subset=[2])
df.dropna(axis=1, subset=[1, 2])
df.fillna(value=999)
df.A.isnull().sum()
data_array = np.array([np.arange(1, 10, 1), np.arange(10, 100, 10), np.arange(100, 1000, 100)]).T
df = pd.DataFrame(data_array,
index=np.arange(1, 10, 1),
columns=['A', 'B', 'C'])
df
df.T
a1 = np.array([np.arange(1, 5, 1), np.arange(10, 50, 10), np.arange(100, 500, 100)]).T
df1 = pd.DataFrame(a1,
columns=['ID', 'B', 'C'])
a2 = np.array([np.arange(1, 5, 1), np.arange(1000, 5000, 1000), np.arange(10000, 50000, 10000)]).T
df2 = pd.DataFrame(a2,
columns=['ID', 'B', 'C'])
display(df1)
display(df2)
df = pd.merge(df1, df2, on="ID", suffixes=('_1', '_2')) #.dropna(how='any')
display(df)
a1 = np.array([np.arange(1, 5, 1), np.arange(10, 50, 10), np.arange(100, 500, 100)]).T
df1 = pd.DataFrame(a1,
columns=['ID', 'B', 'C'])
a2 = np.array([np.arange(1, 5, 1), np.arange(1000, 5000, 1000), np.arange(10000, 50000, 10000)]).T
df2 = pd.DataFrame(a2,
columns=['ID', 'B', 'C'])
df1.iloc[0,2] = np.nan
df1.iloc[1,1] = np.nan
df1.iloc[2,2] = np.nan
df1.iloc[3,1] = np.nan
df2.iloc[0,1] = np.nan
df2.iloc[1,2] = np.nan
df2.iloc[2,1] = np.nan
df2.iloc[3,2] = np.nan
df = pd.merge(df1, df2, on="ID", suffixes=('_1', '_2')) #.dropna(how='any')
display(df1)
display(df2)
display(df)
a1 = np.array([np.arange(1, 5, 1), np.arange(10, 50, 10), np.arange(100, 500, 100)]).T
df1 = pd.DataFrame(a1,
columns=['ID', 'B', 'C'])
a2 = np.array([np.arange(1, 3, 1), np.arange(1000, 3000, 1000), np.arange(10000, 30000, 10000)]).T
df2 = pd.DataFrame(a2,
columns=['ID', 'B', 'C'])
display(df1)
display(df2)
print("Left: use only keys from left frame (SQL: left outer join)")
df = pd.merge(df1, df2, on="ID", how="left", suffixes=('_1', '_2')) #.dropna(how='any')
display(df)
print("Right: use only keys from right frame (SQL: right outer join)")
df = pd.merge(df1, df2, on="ID", how="right", suffixes=('_1', '_2')) #.dropna(how='any')
display(df)
print("Inner: use intersection of keys from both frames (SQL: inner join) [DEFAULT]")
df = pd.merge(df1, df2, on="ID", how="inner", suffixes=('_1', '_2')) #.dropna(how='any')
display(df)
print("Outer: use union of keys from both frames (SQL: full outer join)")
df = pd.merge(df1, df2, on="ID", how="outer", suffixes=('_1', '_2')) #.dropna(how='any')
display(df)
a = np.array([[3, 5, 5, 5, 7, 7, 7, 7],
[2, 4, 4, 3, 1, 3, 3, 2],
[3, 4, 5, 6, 1, 8, 9, 8]]).T
df = pd.DataFrame(a,
columns=['A', 'B', 'C'])
df
df.groupby(["A"]).count()
df.groupby(["A"]).sum().B
df.groupby(["A"]).mean().B
df.groupby(["A","B"]).count()
s = pd.Series([1., 0., 5., 2., 1.])
print("DATA:")
print(s)
mean_s = s.rolling(2).mean()
print()
print("ROLLING MEAN:")
print(mean_s)
sum_s = s.rolling(2).sum()
print()
print("ROLLING SUM:")
print(sum_s)
min_s = s.rolling(2).min()
print()
print("ROLLING MIN:")
print(min_s)
max_s = s.rolling(2).max()
print()
print("ROLLING MAX:")
print(max_s)
ax = s.plot(figsize=(18, 3), color="blue")
mean_s.plot(color="red", label="mean", ax=ax)
sum_s.plot(color="green", label="sum", style="--", alpha=0.5, ax=ax)
min_s.plot(color="black", label="min", style=":", alpha=0.25, ax=ax)
max_s.plot(color="black", label="max", style=":", alpha=0.25, ax=ax)
ax.legend();
index = np.arange(0, 20, 0.05)
s = pd.Series(np.sin(index))
s = s + np.random.normal(scale=0.4, size=s.shape)
ax = s.plot(figsize=(18, 3))
s.shape
s_mean = s.rolling(20).mean()
s_median = s.rolling(20).median()
s_min = s.rolling(20).min()
s_max = s.rolling(20).max()
ax = s_mean.plot(y='duration', figsize=(18, 8), color="red", label="mean", alpha=0.75)
s_median.plot(ax=ax, color="blue", label="median", alpha=0.75)
s_min.plot(ax=ax, color="blue", alpha=0.5, style=":", label="min")
s_max.plot(ax=ax, color="blue", alpha=0.5, style=":", label="max")
plt.fill_between(s_min.index, s_min.values, s_max.values, facecolor='blue', alpha=0.1)
ax.legend()
ax.set_xlabel('Time');
s_mean.shape
df = pd.DataFrame([["i1", "A", 1],
["i1", "B", 2],
["i2", "A", 3],
["i2", "B", 4]], columns=["foo", "bar", "baz"])
df
df.pivot(index="foo", columns="bar", values="baz")
a = np.array([[3, 5, 5, 5, 7, 7, 7, 7],
[2, 4, 4, 3, 1, 3, 3, 2],
[3, 4, 5, 6, 1, 8, 9, 8]]).T
df = pd.DataFrame(a,
columns=['A', 'B', 'C'])
df
df.A.value_counts()
df.A.value_counts().plot.bar()
df = pd.DataFrame(np.random.normal(size=100000))
df.quantile(0.50)
df.quantile([0.25, 0.75])
df.quantile([0.01, 0.001])
pd.Timestamp(year=2018, month=1, day=1, hour=12, minute=30)
p = pd.Period(freq='D', year=2018, month=1, day=1, hour=12, minute=30)
print(p)
print("Start time:", p.start_time)
print("End time:", p.end_time)
print(pd.Timedelta(days=5, seconds=30))
ts1 = pd.Timestamp(year=2018, month=1, day=1, hour=12, minute=30)
ts2 = pd.Timestamp(year=2018, month=1, day=2, hour=12, minute=30)
print(ts2 - ts1)
pd.date_range('2018-01-01', '2018-03-01', freq='D')
pd.date_range('2018-01-01', periods=10, freq='h')
pd.date_range('1/1/2012', periods=10, freq='S')
pd.date_range('3/6/2012 00:00', periods=5, freq='D')
pd.date_range('1/1/2012', periods=5, freq='M')
pd.period_range('2018-01-01', '2018-03-01', freq='D')
pd.date_range('2018-01-01', '2018-03-01', freq='D').to_period()
dti = pd.date_range('2012-01-01 00:00', periods=40, freq='D')
ts = pd.Series(np.random.randint(0, 200, len(dti)), index=dti)
ts.plot();
ts.plot(x_compat=True);
dti = pd.date_range('2018-01-01 00:00', '2018-01-03 00:00', freq='H')
ts = pd.Series(np.random.randint(0, 100, len(dti)), index=dti)
ax = ts.plot(x_compat=True, figsize=(16, 4)) # x_compat is required as matplotlib doesn't understand pandas datetime format -> x_compat=True makes the conversion...
# set monthly locator
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_minor_locator(mdates.HourLocator(interval=1))
# set formatter
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
# set font and rotation for date tick labels
plt.gcf().autofmt_xdate()
dti = pd.date_range('2012-1-1 00:00', periods=40, freq='D')
ts = pd.Series(np.random.randint(0, 200, len(dti)), index=dti)
ts
ts["2012-01-09"]
ts[datetime.datetime(2012, 1, 9)]
ts[ts.index < "2012-01-09"]
ts[ts.index > "2012-01-20"]
ts["2012-01-09":"2012-01-20"]
ts[datetime.datetime(2012, 1, 9):datetime.datetime(2012, 1, 20)]
ts[ts.index.day <= 3]
ts[ts.index.month == 2]
ts["2012-02"]
ts[ts.index.dayofweek == 1]
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 06:45', '2018-1-1 12:00',
'2018-1-2 00:00', '2018-1-2 06:00', '2018-1-2 12:00'])
ts = pd.Series([2., 1., 3., 2., 2., 0.], index=dti)
print("DATA:")
print(ts)
ax = ts.plot(figsize=(18, 3), style="*-", color="blue")
ax.vlines(pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-2 00:00']), ymin=0, ymax=8, color="red", linestyle=":", alpha=0.3);
ts_rw = ts.rolling('D').sum() # Rolling window size: 1 day
print()
print("MEAN:")
print(ts_rw)
ts_rw.plot(color="red", label="sum", style="*-", alpha=0.75, ax=ax)
ax.legend()
ax.set_xlabel('Time')
ax.grid(True);
ts.rolling('6h').min()
ts.rolling('3h').mean()
dti = pd.date_range('1/1/2018 00:00', periods=6*480, freq='10min')
ts = pd.Series(np.sin(dti.hour * 2. * np.pi / 24.), index=dti)
ts = ts + np.random.normal(scale=0.4, size=ts.shape)
ax = ts.plot(figsize=(18, 3))
ax.vlines(pd.date_range('1/1/2018 00:00', periods=480/24, freq='D'), ymin=-2, ymax=2, color="red", linestyle=":", alpha=0.3);
ts.shape
ts_mean = ts.rolling('5H').mean()
ts_median = ts.rolling('5H').median()
ts_min = ts.rolling('5H').min()
ts_max = ts.rolling('5H').max()
ax = ts_mean.plot(y='duration', figsize=(18, 3), color="red", label="mean", alpha=0.75)
ts_median.plot(ax=ax, color="blue", label="median", alpha=0.75)
ts_min.plot(ax=ax, color="blue", alpha=0.5, style=":", label="min")
ts_max.plot(ax=ax, color="blue", alpha=0.5, style=":", label="max")
plt.fill_between(ts_min.index, ts_min.values, ts_max.values, facecolor='blue', alpha=0.1)
ax.legend()
ax.set_xlabel('Time');
ts_mean.shape
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 06:45', '2018-1-1 12:00',
'2018-1-2 00:00', '2018-1-2 12:00'])
ts = pd.Series([1., 0., 5., 2., 0.], index=dti)
print("DATA:")
print(ts)
ax = ts.plot(figsize=(18, 3), style="*-", color="blue")
ax.vlines(pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-2 00:00']), ymin=0, ymax=5, color="red", linestyle=":", alpha=0.3);
ts_resampled = ts.resample('D').mean()
print()
print("MEAN:")
print(ts_resampled)
ts_resampled.plot(color="red", style="*-", label="mean", alpha=0.75, ax=ax)
ax.legend()
ax.set_xlabel('Time');
ts.resample('6h').min()
ts.resample('3h').sum()
dti = pd.DatetimeIndex(['2018-1-1 12:00',
'2018-1-2 08:00', '2018-1-2 18:00', '2018-1-2 23:59:59',
'2018-1-3 00:00'])
ts = pd.Series([0.,
10., 20., 30.,
5.], index=dti)
print("DATA:")
print(ts)
ts_resampled = ts.resample('D').mean()
print()
print("MEAN:")
print(ts_resampled)
# Illustrative plot
ax = ts.plot(x_compat=True, figsize=(18, 3), style="*-", color="blue")
ax.vlines(pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-2 00:00', '2018-1-3 00:00']), ymin=-10, ymax=40, color="red", linestyle=":", linewidth=2, alpha=0.5);
ax.vlines(pd.DatetimeIndex(['2018-1-1 12:00', '2018-1-2 12:00', '2018-1-3 12:00']), ymin=-10, ymax=40, color="green", linestyle=":", linewidth=2, alpha=0.5);
ax.plot(pd.DatetimeIndex(['2018-1-1 12:15', '2018-1-2 11:45']), [40, 40], marker="|", markersize=20, color="green")
ax.plot(pd.DatetimeIndex(['2018-1-2 12:15', '2018-1-3 11:45']), [40, 40], marker="|", markersize=20, color="green")
ax.plot(pd.DatetimeIndex(['2018-1-1 00:15', '2018-1-1 23:45']), [35, 35], marker="|", markersize=20, color="red")
ax.plot(pd.DatetimeIndex(['2018-1-2 00:15', '2018-1-2 23:45']), [35, 35], marker="|", markersize=20, color="red")
ts_resampled.plot(color="red", style="*-", label="mean", alpha=0.75, ax=ax)
# set monthly locator
ax.xaxis.set_major_locator(mdates.DayLocator(interval=1))
ax.xaxis.set_minor_locator(mdates.HourLocator(interval=1))
# set formatter
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y %H:%M'))
# set font and rotation for date tick labels
plt.gcf().autofmt_xdate()
ax.legend()
ax.set_xlabel('Time');
dti = pd.DatetimeIndex(['2018-1-1 01:00',
'2018-1-1 05:30', '2018-1-1 07:30',
'2018-1-1 10:00'])
ts = pd.Series([0.,
10., 20.,
5.], index=dti)
print("DATA:")
print(ts)
ts_resampled = ts.resample('5h').mean()
print()
print("MEAN:")
print(ts_resampled)
# Illustrative plot
ax = ts.plot(x_compat=True, figsize=(18, 3), style="*-", color="blue")
ax.vlines(pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 05:00', '2018-1-1 10:00']), ymin=-10, ymax=40, color="red", linestyle=":", linewidth=2, alpha=0.5);
ax.vlines(pd.DatetimeIndex(['2018-1-1 01:00', '2018-1-1 06:00', '2018-1-1 11:00']), ymin=-10, ymax=40, color="green", linestyle=":", linewidth=2, alpha=0.5);
ax.plot(pd.DatetimeIndex(['2018-1-1 01:05', '2018-1-1 05:55']), [40, 40], marker="|", markersize=20, color="green")
ax.plot(pd.DatetimeIndex(['2018-1-1 06:05', '2018-1-1 10:55']), [40, 40], marker="|", markersize=20, color="green")
ax.plot(pd.DatetimeIndex(['2018-1-1 00:05', '2018-1-1 04:55']), [35, 35], marker="|", markersize=20, color="red")
ax.plot(pd.DatetimeIndex(['2018-1-1 05:05', '2018-1-1 09:55']), [35, 35], marker="|", markersize=20, color="red")
ts_resampled.plot(color="red", style="*-", label="mean", alpha=0.75, ax=ax)
# set monthly locator
ax.xaxis.set_major_locator(mdates.HourLocator(interval=1))
#ax.xaxis.set_minor_locator(mdates.HourLocator(interval=1))
# set formatter
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
# set font and rotation for date tick labels
plt.gcf().autofmt_xdate()
ax.legend()
ax.set_xlabel('Time');
dti = pd.date_range('1/1/2018 00:00', periods=60*480, freq='min')
ts = pd.Series(np.sin(dti.hour * 2. * np.pi / 24.), index=dti)
ts = ts + np.random.normal(scale=0.4, size=ts.shape)
ax = ts.plot(figsize=(18, 3))
ax.vlines(pd.date_range('1/1/2018 00:00', periods=480/24, freq='D'), ymin=-2, ymax=2, color="red", linestyle=":", alpha=0.3);
ts.shape
ts_mean = ts.resample('2H').mean()
ts_median = ts.resample('2H').median()
ts_min = ts.resample('2H').min()
ts_max = ts.resample('2H').max()
ax = ts_mean.plot(y='duration', figsize=(18, 8), color="red", label="mean", alpha=0.75)
ts_median.plot(ax=ax, color="blue", label="median", alpha=0.75)
ts_min.plot(ax=ax, color="blue", alpha=0.5, style=":", label="min")
ts_max.plot(ax=ax, color="blue", alpha=0.5, style=":", label="max")
plt.fill_between(ts_min.index, ts_min.values, ts_max.values, facecolor='blue', alpha=0.1)
ax.legend()
ax.set_xlabel('Time');
ts_mean.shape
rolling_window = '6H'
start = '2018-1-1 00:00'
end = '2018-1-4 00:00'
dti = pd.date_range(start=start, end=end, freq='min')
ts = pd.Series(np.sin(dti.hour * 2. * np.pi / 24.), index=dti)
ts = ts + np.random.normal(scale=0.4, size=ts.shape)
ax = ts.plot(figsize=(18, 3))
ax.vlines(pd.date_range(start=start, end=end, freq=rolling_window), ymin=-2, ymax=2, color="red", linestyle=":", alpha=0.5);
ts2 = ts.rolling(rolling_window).mean() # Rolling window size: 1 day
ax = ts2.plot(figsize=(18, 3), color="red", alpha=0.75)
ax.vlines(pd.date_range(start=start, end=end, freq=rolling_window), ymin=-1, ymax=1, color="red", linestyle=":", alpha=0.5);
ts2 = ts.resample(rolling_window).mean() # Rolling window size: 1 day
ax = ts2.plot(figsize=(18, 3), color="red", alpha=0.75)
ax.vlines(pd.date_range(start=start, end=end, freq=rolling_window), ymin=-1, ymax=1, color="red", linestyle=":", alpha=0.5);
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 12:00', '2018-1-2 00:00', '2018-1-2 12:00'])
ts = pd.Series([1., 0., 2., 1.], index=dti)
print(ts)
ax = ts.plot(figsize=(18, 3))
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-2 00:00'])
ax.vlines(dti, ymin=0, ymax=2, color="red", linestyle=":", alpha=0.3);
ts_mean = ts.groupby(ts.index.time).mean()
print(ts_mean)
ax = ts_mean.plot(y='duration', figsize=(10, 4), color="red", label="mean", alpha=0.75)
ax.legend()
ax.set_xlabel('Time');
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 12:00', '2018-1-2 00:31', '2018-1-2 12:25']) # Here time is not aligned (non constant frequency)
ts = pd.Series([1., 0., 2., 1.], index=dti)
print(ts)
ax = ts.plot(figsize=(18, 3));
ts_mean = ts.groupby(ts.index.time).mean()
print(ts_mean)
ax = ts_mean.plot(y='duration', figsize=(10, 4), color="red", label="mean", alpha=0.75)
ax.legend()
ax.set_xlabel('Time');
dti = pd.date_range('1/1/2018 00:00', periods=960, freq='h')
ts = pd.Series(np.sin(dti.hour * 2. * np.pi / 24.), index=dti)
ts = ts + np.random.normal(scale=0.4, size=ts.shape)
ax = ts.plot(figsize=(18, 3))
ax.vlines(pd.date_range('1/1/2018 00:00', periods=960/24, freq='D'), ymin=-2, ymax=2, color="red", linestyle=":", alpha=0.3);
ts_mean = ts.groupby(ts.index.time).mean()
ts_median = ts.groupby(ts.index.time).median()
ts_quartile_1 = ts.groupby(ts.index.time).quantile(0.25)
ts_quartile_3 = ts.groupby(ts.index.time).quantile(0.75)
ax = ts_mean.plot(y='duration', figsize=(14, 8), color="red", label="mean", alpha=0.75)
ts_median.plot(ax=ax, color="blue", label="median", alpha=0.75)
ts_quartile_1.plot(ax=ax, color="blue", alpha=0.5, style=":", label="1st quartile")
ts_quartile_3.plot(ax=ax, color="blue", alpha=0.5, style=":", label="3rd quartile")
plt.fill_between(ts_quartile_1.index, ts_quartile_1.values, ts_quartile_3.values, facecolor='blue', alpha=0.1)
ax.legend()
ax.set_xlabel('Time');
dti = pd.period_range('1/1/2018 00:00', periods=960, freq='h')
ts = pd.Series(np.sin(dti.hour * 2. * np.pi / 24.), index=dti)
ts = ts + np.random.normal(scale=0.4, size=ts.shape)
ax = ts.plot(figsize=(18, 3))
ax.vlines(pd.date_range('1/1/2018 00:00', periods=960/24, freq='D'), ymin=-2, ymax=2, color="red", linestyle=":", alpha=0.3);
ts_mean = ts.groupby(ts.index.start_time.time).mean() # Note the ".start_time" here
ts_median = ts.groupby(ts.index.start_time.time).median() # Note the ".start_time" here
ts_quartile_1 = ts.groupby(ts.index.start_time.time).quantile(0.25) # Note the ".start_time" here
ts_quartile_3 = ts.groupby(ts.index.start_time.time).quantile(0.75) # Note the ".start_time" here
ax = ts_mean.plot(y='duration', figsize=(14, 8), color="red", label="mean", alpha=0.75)
ts_median.plot(ax=ax, color="blue", label="median", alpha=0.75)
ts_quartile_1.plot(ax=ax, color="blue", alpha=0.5, style=":", label="1st quartile")
ts_quartile_3.plot(ax=ax, color="blue", alpha=0.5, style=":", label="3rd quartile")
plt.fill_between(ts_quartile_1.index, ts_quartile_1.values, ts_quartile_3.values, facecolor='blue', alpha=0.1)
ax.legend()
ax.set_xlabel('Time');
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 12:00', '2018-1-2 00:31', '2018-1-2 12:25']) # Here time is not aligned (non constant frequency)
ts = pd.Series([1., 0., 2., 1.], index=dti)
print(ts)
ts.index.round('H')
dti = pd.DatetimeIndex(['2018-1-1 00:00', '2018-1-1 06:45', '2018-1-1 12:00',
'2018-1-3 00:00', '2018-1-3 06:00'])
s = pd.Series(np.ones(dti.shape), index=dti)
#dti.groupby(dti.date) # it works but it returns a dictionary...
s.groupby(dti.date).count().plot.bar(color="blue", alpha=0.5);
s.resample('1d').count().plot.bar(color="blue", alpha=0.5);
#help(df.plot)
x = np.arange(0, 6, 0.1)
y1 = np.cos(x)
y2 = np.sin(x)
Y = np.array([y1, y2]).T
df = pd.DataFrame(Y,
columns=['cos(x)', 'sin(x)'],
index=x)
df.iloc[:10]
df.plot(legend=True)
df.plot.line(legend=True)
df = pd.DataFrame(np.random.randn(36, 2))
df.plot(drawstyle="steps", linewidth=2)
df.plot(drawstyle="steps-post", linewidth=2);
x = np.arange(0, 6, 0.5)
y1 = np.cos(x)
y2 = np.sin(x)
Y = np.array([y1, y2]).T
df = pd.DataFrame(Y,
columns=['cos(x)', 'sin(x)'],
index=x)
df
df.plot.bar(legend=True)
df.plot.bar(legend=True, stacked=True)
df.plot.barh(legend=True)
x1 = np.random.normal(size=(10000))
x2 = np.random.normal(loc=3, scale=2, size=(10000))
X = np.array([x1, x2]).T
df = pd.DataFrame(X, columns=[r'$\mathcal{N}(0,1)$', r'$\mathcal{N}(3,2)$'])
df.plot.hist(alpha=0.5, bins=100, legend=True);
df.plot.hist(alpha=0.5, bins=100, legend=True, density=True);
x1 = np.random.normal(size=(10000))
x2 = np.random.normal(loc=3, scale=2, size=(10000))
X = np.array([x1, x2]).T
df = pd.DataFrame(X, columns=[r'$\mathcal{N}(0,1)$', r'$\mathcal{N}(3,2)$'])
df.plot.box()
df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b'])
df['b'] = df['b'] + np.arange(1000)
df.plot.hexbin(x='a', y='b', gridsize=25)
x1 = np.random.normal(size=(10000))
x2 = np.random.normal(loc=3, scale=2, size=(10000))
X = np.array([x1, x2]).T
df = pd.DataFrame(X, columns=[r'$\mathcal{N}(0,1)$', r'$\mathcal{N}(3,2)$'])
df.plot.kde()
df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
df.plot.area()
x = np.random.randint(low=0, high=6, size=(50))
df = pd.DataFrame(x, columns=["A"])
df.A.value_counts()
df.A.value_counts().plot.pie(y="A")
x1 = np.random.normal(size=(10000))
x2 = np.random.normal(loc=3, scale=2, size=(10000))
X = np.array([x1, x2]).T
df = pd.DataFrame(X, columns=[r'$\mathcal{N}(0,1)$', r'$\mathcal{N}(3,2)$'])
df.plot.scatter(x=r'$\mathcal{N}(0,1)$',
y=r'$\mathcal{N}(3,2)$',
alpha=0.2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Le's assume now we introduce extra tabulations.
Step2: It works well because we use pandas to save the dataframe, and we use pandas to restore it. In the file flatfile_tab.txt, it looks like "on\te". pandas interprets the quotes as a delimiter. However most of the times, the flat file is produced in a different way and the quotes are not present.
Step3: It failed! Data is not aligned and it did not raise an exception. If we move the extra tab in second position, we get
Step4: As suggested in Python Pandas Error tokenizing data, we could add the parameter error_bad_lines=False or skiprows=N but we would still lose those bad lines. So we use function import_flatfile_into_database.
Step5: We check that we got the inserted line in the dataframe
|
<ASSISTANT_TASK:>
Python Code:
import random, pandas
text = [ "one","two","three","four","five","six","seven","eight","nine","ten" ]
data = [ { "name": text[random.randint(0,9)], "number": random.randint(0,99)} \
for i in range(0,10000) ]
df = pandas.DataFrame(data)
df.head(n=3)
df.to_csv("flatfile.txt", sep="\t", encoding="utf8", header=True, index=False)
dfr = pandas.read_csv("flatfile.txt", sep="\t", encoding="utf8")
dfr.head(n=3)
datatab = [ {"name": " one\ttab", "number":100 } ] + data
df = pandas.DataFrame(datatab)
df.head(n=3)
df.to_csv("flatfile_tab.txt", sep="\t", encoding="utf8", header=True, index=False)
dfr = pandas.read_csv("flatfile_tab.txt", sep="\t", encoding="utf8")
dfr.head(n=3)
with open("flatfile_tab.txt", "r", encoding="utf8") as f:
content = f.read()
content = content.replace('"','')
with open("flatfile_tab2.txt", "w", encoding="utf8") as f:
f.write(content)
dfr = pandas.read_csv("flatfile_tab2.txt", sep="\t", encoding="utf8")
dfr.head(n=3)
datatab = data[:1] + [ {"name": " one\ttab", "number":100 } ] + data[1:]
df = pandas.DataFrame(datatab)
df.to_csv("flatfile_tab_pos2.txt", sep="\t", encoding="utf8", header=True, index=False)
with open("flatfile_tab_pos2.txt","r",encoding="utf8") as f:
content = f.read()
content = content.replace('"','')
with open("flatfile_tab_pos2.txt","w",encoding="utf8") as f:
f.write(content)
dfr = pandas.read_csv("flatfile_tab_pos2.txt", sep="\t", encoding="utf8")
dfr.head(n=3)
from pyensae.sql import import_flatfile_into_database
import_flatfile_into_database("flatfile_tab_pos2.db3", "flatfile_tab_pos2.txt")
from pyensae.sql import Database
db = Database("flatfile_tab_pos2.db3")
db.connect()
df = db.to_df("SELECT * FROM flatfile_tab_pos2")
db.close()
df.head()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Map electoral results to regions
Step2: First, handle Alaska specially
Step3: Normalize candidate names
Step4: Slightly disagrees with https
Step5: UOCAVA = The Uniformed and Overseas Citizens Absentee Voting Act. Ignore these.
Step6: 15005 is Kalawao County, Hawaii, which has a population of 89 and is accessible only by mule trail. Its votes are counted under Maui (15009), and they're in the same PUMA anyway
Step7: Do the actual grouping
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
%matplotlib inline
import numpy as np
import pandas as pd
import re
import six
from IPython.display import display
import sys
sys.path.append('..')
from pummeler.data import geocode_data
county_to_region = geocode_data('county_region_10').region.to_dict()
from glob import glob
assert len({v for k, v in county_to_region.iteritems() if k.startswith('02')}) == 1
ak_precincts = pd.read_csv('../../election-2012-results/data/ak_precincts.csv')
ak = ak_precincts.groupby(ak_precincts.candidate).sum().reset_index()
ak['state'] = 'ak'
ak['fips'] = next(k for k in county_to_region if k.startswith('02'))
ak['county'] = 'All of Alaska'
ak
bits = [ak]
for f in glob('../../election-2012-results/data/??.csv'):
piece = pd.read_csv(f, dtype={'fips': str})
piece['state'] = f[-6:-4]
bits.append(piece)
election = pd.concat(bits)
reps = {
'goode': 'virgil goode',
'obama': 'barack obama',
'johnson': 'gary johnson',
'romney': 'mitt romney',
'stein': 'jill stein',
'virgil h. goode': 'virgil goode',
'virgil h. goode jr.': 'virgil goode',
'gary e. johnson': 'gary johnson',
'write in': 'write-in',
'write-ins': 'write-in',
'hoefling': 'tom hoefling',
'obama barack': 'barack obama',
'stein jill': 'jill stein',
'romney mitt': 'mitt romney',
'johnson gary': 'gary johnson',
'jill stein write-in': 'jill stein',
'hoefling (write-in)': 'tom hoefling',
'tom hoeffling': 'tom hoefling',
'alexander': 'stewart alexander',
'ross c. "rocky"': 'ross c. "rocky"',
'ross c. rocky': 'ross c. "rocky"',
'ross c.': 'ross c. "rocky"',
'rocky': 'ross c. "rocky"',
'paul': 'ron paul',
'ron paul write-in': 'ron paul',
'write-in**': 'write-in',
'clymer': 'james clymer',
'roth': 'cecil james roth',
'prokopich': 'barbara prokopich',
'barbara a. prokopich': 'barbara prokopich',
'kevin m. thorne': 'kevin thorne',
'thorne': 'kevin thorne',
}
def rewrite(s):
s = s.lower()
for x in ['/', ',', '(', ' and', ' for president']:
p = s.find(x)
if p != -1:
s = s[:p]
s = s.strip().replace(' ', ' ')
s = reps.get(s, s)
return s
election['cand'] = election.candidate.apply(rewrite)
cand_votes = election.groupby(election.cand).votes.sum().sort_values(ascending=False)
cand_votes.head(50)
election['party'] = 'oth'
election.loc[election.cand == 'barack obama', 'party'] = 'D'
election.loc[election.cand == 'mitt romney', 'party'] = 'R'
election.loc[election.cand == 'gary johnson', 'party'] = 'L'
election.loc[election.cand == 'jill stein', 'party'] = 'G'
election.groupby(election.party).votes.sum()
set(election.fips) - set(county_to_region)
election[pd.isnull(election.fips)]
{fips for fips in set(county_to_region) - set(election.fips)
if not fips.startswith('02')}
county_to_region['15005'] == county_to_region['15009']
election_region = election.groupby(election.fips.map(county_to_region)) \
.apply(lambda x: x.votes.groupby(x.party).sum()).unstack()
election_region.index.name = 'region'
election_region.columns = ['votes_{}'.format(p) for p in election_region.columns]
election_region.fillna(0, inplace=True)
election_region = election_region.astype('int')
election_region.head()
election_region.to_csv('2012-by-region.csv.gz', compression='gzip')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First load the Spotify dataset.
Step2: 1) Add Dropout to Spotify Model
Step3: Now run this next cell to train the model see the effect of adding dropout.
Step4: 2) Evaluate Dropout
Step5: Now, we'll switch topics to explore how batch normalization can fix problems in training.
Step6: Run the following cell to train the network on the unstandardized Concrete data.
Step7: Did you end up with a blank graph? Trying to train this network on this dataset will usually fail. Even when it does converge (due to a lucky weight initialization), it tends to converge to a very large number.
Step8: Run the next cell to see if batch normalization will let us train the model.
Step9: 4) Evaluate Batch Normalization
|
<ASSISTANT_TASK:>
Python Code:
# Setup plotting
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('animation', html='html5')
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning_intro.ex5 import *
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.model_selection import GroupShuffleSplit
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import callbacks
spotify = pd.read_csv('../input/dl-course-data/spotify.csv')
X = spotify.copy().dropna()
y = X.pop('track_popularity')
artists = X['track_artist']
features_num = ['danceability', 'energy', 'key', 'loudness', 'mode',
'speechiness', 'acousticness', 'instrumentalness',
'liveness', 'valence', 'tempo', 'duration_ms']
features_cat = ['playlist_genre']
preprocessor = make_column_transformer(
(StandardScaler(), features_num),
(OneHotEncoder(), features_cat),
)
def group_split(X, y, group, train_size=0.75):
splitter = GroupShuffleSplit(train_size=train_size)
train, test = next(splitter.split(X, y, groups=group))
return (X.iloc[train], X.iloc[test], y.iloc[train], y.iloc[test])
X_train, X_valid, y_train, y_valid = group_split(X, y, artists)
X_train = preprocessor.fit_transform(X_train)
X_valid = preprocessor.transform(X_valid)
y_train = y_train / 100
y_valid = y_valid / 100
input_shape = [X_train.shape[1]]
print("Input shape: {}".format(input_shape))
# YOUR CODE HERE: Add two 30% dropout layers, one after 128 and one after 64
model = keras.Sequential([
layers.Dense(128, activation='relu', input_shape=input_shape),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
# Check your answer
q_1.check()
#%%RM_IF(PROD)%%
# Wrong dropout layers
model = keras.Sequential([
layers.Dense(128, activation='relu', input_shape=input_shape),
layers.Dropout(0.3),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
q_1.assert_check_failed()
#%%RM_IF(PROD)%%
# Wrong dropout rate
model = keras.Sequential([
layers.Dense(128, activation='relu', input_shape=input_shape),
layers.Dropout(0.7),
layers.Dense(64, activation='relu'),
layers.Dropout(0.7),
layers.Dense(1)
])
q_1.assert_check_failed()
#%%RM_IF(PROD)%%
model = keras.Sequential([
layers.Dense(128, activation='relu', input_shape=input_shape),
layers.Dropout(0.3),
layers.Dense(64, activation='relu'),
layers.Dropout(0.3),
layers.Dense(1)
])
q_1.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_1.hint()
#_COMMENT_IF(PROD)_
q_1.solution()
model.compile(
optimizer='adam',
loss='mae',
)
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=512,
epochs=50,
verbose=0,
)
history_df = pd.DataFrame(history.history)
history_df.loc[:, ['loss', 'val_loss']].plot()
print("Minimum Validation Loss: {:0.4f}".format(history_df['val_loss'].min()))
# View the solution (Run this cell to receive credit!)
q_2.check()
import pandas as pd
concrete = pd.read_csv('../input/dl-course-data/concrete.csv')
df = concrete.copy()
df_train = df.sample(frac=0.7, random_state=0)
df_valid = df.drop(df_train.index)
X_train = df_train.drop('CompressiveStrength', axis=1)
X_valid = df_valid.drop('CompressiveStrength', axis=1)
y_train = df_train['CompressiveStrength']
y_valid = df_valid['CompressiveStrength']
input_shape = [X_train.shape[1]]
model = keras.Sequential([
layers.Dense(512, activation='relu', input_shape=input_shape),
layers.Dense(512, activation='relu'),
layers.Dense(512, activation='relu'),
layers.Dense(1),
])
model.compile(
optimizer='sgd', # SGD is more sensitive to differences of scale
loss='mae',
metrics=['mae'],
)
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=64,
epochs=100,
verbose=0,
)
history_df = pd.DataFrame(history.history)
history_df.loc[0:, ['loss', 'val_loss']].plot()
print(("Minimum Validation Loss: {:0.4f}").format(history_df['val_loss'].min()))
# YOUR CODE HERE: Add a BatchNormalization layer before each Dense layer
model = keras.Sequential([
layers.Dense(512, activation='relu', input_shape=input_shape),
layers.Dense(512, activation='relu'),
layers.Dense(512, activation='relu'),
layers.Dense(1),
])
# Check your answer
q_3.check()
#%%RM_IF(PROD)%%
# Wrong layers
model = keras.Sequential([
layers.Dense(512, activation='relu', input_shape=input_shape),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(1),
])
q_3.assert_check_failed()
#%%RM_IF(PROD)%%
model = keras.Sequential([
layers.BatchNormalization(input_shape=input_shape),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(1),
])
q_3.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_3.hint()
#_COMMENT_IF(PROD)_
q_3.solution()
model.compile(
optimizer='sgd',
loss='mae',
metrics=['mae'],
)
EPOCHS = 100
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=64,
epochs=EPOCHS,
verbose=0,
)
history_df = pd.DataFrame(history.history)
history_df.loc[0:, ['loss', 'val_loss']].plot()
print(("Minimum Validation Loss: {:0.4f}").format(history_df['val_loss'].min()))
# View the solution (Run this cell to receive credit!)
q_4.check()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the Earth's magnetic field $B_0$
Step2: Define the observations
Step3: Calculate data for plotting
Step4: 3D plot of field lines and data
|
<ASSISTANT_TASK:>
Python Code:
# define a dipole
dipoleloc = (0.,0.,-50.)
dipoleL = 100.
dipoledec, dipoleinc = 0., 90.
dipolemoment = 1e13
# geomagnetic field
B0, Binc, Bdec = 53600e-9, 90., 0. # in Tesla, degree, degree
B0x = B0*np.cos(np.radians(Binc))*np.sin(np.radians(Bdec))
B0y = B0*np.cos(np.radians(Binc))*np.cos(np.radians(Bdec))
B0z = -B0*np.sin(np.radians(Binc))
# set observation grid
xmin, xmax, ymin, ymax, z = -5., 5., -5., 5., 1. # x, y bounds and elevation
profile_x = 0. # x-coordinate of y-profile
profile_y = 0. # y-coordinate of x-profile
h = 0.2 # grid interval
radii = (2., 5.) # how many layers of field lines for plotting
Naz = 10 # number of azimuth
# get field lines
linex, liney, linez = MagneticLongDipoleLine(dipoleloc,dipoledec,dipoleinc,dipoleL,radii,Naz)
# get map
xi, yi = np.meshgrid(np.r_[xmin:xmax+h:h], np.r_[ymin:ymax+h:h])
x1, y1 = xi.flatten(), yi.flatten()
z1 = np.full(x1.shape,z)
Bx, By, Bz = np.zeros(len(x1)), np.zeros(len(x1)), np.zeros(len(x1))
for i in np.arange(len(x1)):
Bx[i], By[i], Bz[i] = MagneticLongDipoleField(dipoleloc,dipoledec,dipoleinc,dipoleL,(x1[i],y1[i],z1[i]),dipolemoment)
Ba1 = np.dot(np.r_[B0x,B0y,B0z], np.vstack((Bx,By,Bz)))
# get x-profile
x2 = np.r_[xmin:xmax+h:h]
y2, z2 = np.full(x2.shape,profile_y), np.full(x2.shape,z)
Bx, By, Bz = np.zeros(len(x2)), np.zeros(len(x2)), np.zeros(len(x2))
for i in np.arange(len(x2)):
Bx[i], By[i], Bz[i] = MagneticLongDipoleField(dipoleloc,dipoledec,dipoleinc,dipoleL,(x2[i],y2[i],z2[i]),dipolemoment)
Ba2 = np.dot(np.r_[B0x,B0y,B0z], np.vstack((Bx,By,Bz)))
# get y-profile
y3 = np.r_[ymin:ymax+h:h]
x3, z3 = np.full(y3.shape,profile_x), np.full(y3.shape,z)
Bx, By, Bz = np.zeros(len(x3)), np.zeros(len(x3)), np.zeros(len(x3))
for i in np.arange(len(x3)):
Bx[i], By[i], Bz[i] = MagneticLongDipoleField(dipoleloc,dipoledec,dipoleinc,dipoleL,(x3[i],y3[i],z3[i]),dipolemoment)
Ba3 = np.dot(np.r_[B0x,B0y,B0z], np.vstack((Bx,By,Bz)))
fig = plt.figure()
ax = fig.gca(projection='3d')
# plot field lines
for lx,ly,lz in zip(linex,liney,linez):
ax.plot(lx,ly,lz,'-',markersize=1)
# plot map
ax.scatter(x1,y1,z1,s=2,alpha=0.3)
Bt = Ba1.reshape(xi.shape)*1e9 # contour and color scale in nT
c = ax.contourf(xi,yi,Bt,alpha=1,zdir='z',offset=z-max(radii)*2,cmap='jet',
levels=np.linspace(Bt.min(),Bt.max(),50,endpoint=True))
fig.colorbar(c)
# auto-scaling for profile plot
ptpmax = np.max((Ba2.ptp(),Ba3.ptp())) # dynamic range
autoscaling = np.max(radii) / ptpmax
# plot x-profile
ax.scatter(x2,y2,z2,s=2,c='black',alpha=0.3)
ax.plot(x2,Ba2*autoscaling,zs=ymax,c='black',zdir='y')
# plot y-profile
ax.scatter(x3,y3,z3,s=2,c='black',alpha=0.3)
ax.plot(y3,Ba3*autoscaling,zs=xmin,c='black',zdir='x')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_zlim(z-max(radii)*2, max(radii)*1.5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Get HSC Fluxes
Step3: Make the query
Step4: Check if it worked
Step5: Combine databases
Step6: Match HSC objects to COSMOS objects
Step7: Check matches
Step9: Get spec-z's matched to HSC objects
Step10: Make the query
Step11: Check if it worked
Step13: Get FRANKEN-Z photo-z's, and then match to HSC
Step14: Make the query
Step15: Check if it worked
Step16: Cross reference FRANKENZ ids to general HSC ids
Step17: Copy index column to a new data frame, then only add desired columns
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
# give access to importing dwarfz
import os, sys
dwarfz_package_dir = os.getcwd().split("dwarfz")[0]
if dwarfz_package_dir not in sys.path:
sys.path.insert(0, dwarfz_package_dir)
import dwarfz
from dwarfz.hsc_credentials import credential
from dwarfz.hsc_release_query import query_wrapper
# back to regular import statements
import os, sys
import shutil
import glob
import pandas as pd
import numpy as np
import pathlib
sql_base =
SELECT
object_id,
ra, dec,
detect_is_patch_inner, detect_is_tract_inner, detect_is_primary,
gcmodel_flux, gcmodel_flux_err, gcmodel_flux_flags, gcmodel_mag,
rcmodel_flux, rcmodel_flux_err, rcmodel_flux_flags, rcmodel_mag,
icmodel_flux, icmodel_flux_err, icmodel_flux_flags, icmodel_mag,
zcmodel_flux, zcmodel_flux_err, zcmodel_flux_flags, zcmodel_mag,
ycmodel_flux, ycmodel_flux_err, ycmodel_flux_flags, ycmodel_mag
FROM
pdr1_cosmos_widedepth_median.forced
LIMIT
{}
OFFSET
{}
n_objects = 1263503
block_size = 250000
n_blocks = (n_objects // block_size) + 1
temp_hsc_table_dir = pathlib.Path("partial_hsc_tables")
if not temp_hsc_table_dir.is_dir():
temp_hsc_table_dir.mkdir()
limit = block_size
preview_results = False
delete_job = True
out_format = "sqlite3"
for i in range(n_blocks):
offset = i*block_size
sql = sql_base.format(limit, offset)
output_filename = temp_hsc_table_dir / "tmp_{}.sqlite3".format(i)
print(" ---------------- QUERY {} -------------------- ".format(i+1))
print(sql)
with open(output_filename, mode="wb") as output_file:
query_wrapper(credential, sql, preview_results, delete_job,
out_format, output_file,
nomail=True)
database_filenames = sorted(temp_hsc_table_dir.glob("tmp_*.sqlite3"))
database_filenames
dfs = [pd.read_sql_table("table_1", "sqlite:///{}".format(database_filename),
index_col="object_id")
for database_filename in database_filenames]
assert(sum(df.shape[0] for df in dfs) == n_objects)
combined = pd.concat(dfs)
assert(combined.shape[0] == n_objects)
del dfs
combined.head()
for filename in database_filenames:
os.remove(filename)
if len(list(temp_hsc_table_dir.glob("*")))==0:
temp_hsc_table_dir.rmdir()
combined.keys()
hsc_database_filename = "HSC_COSMOS_median_forced.sqlite3"
hsc_database_filename_old = hsc_database_filename + ".old"
if os.path.exists(hsc_database_filename):
try:
shutil.move(hsc_database_filename, hsc_database_filename_old)
combined.to_sql("hsc", "sqlite:///{}".format(hsc_database_filename))
except:
# in case there's an error during writing, don't overwrite/delete the existing database
shutil.move(hsc_database_filename_old, hsc_database_filename)
raise
else:
# only delete if combining went successfully
os.remove(hsc_database_filename + ".old")
else:
combined.to_sql("hsc", "sqlite:///{}".format(hsc_database_filename))
COSMOS_filename = pathlib.Path(dwarfz.data_dir_default) / "COSMOS_reference.sqlite"
COSMOS = dwarfz.datasets.COSMOS(COSMOS_filename)
COSMOS.df.head()
HSC_filename = pathlib.Path(dwarfz.data_dir_default) / "HSC_COSMOS_median_forced.sqlite3"
HSC = dwarfz.datasets.HSC(HSC_filename)
HSC.df.head()
matches = dwarfz.matching.Matches(COSMOS.df, HSC.df)
matches_filename = pathlib.Path(dwarfz.data_dir_default) / "matches.sqlite3"
if not matches_filename.exists():
matches.save_to_filename(matches_filename)
print("threshold (error) : {:>5.2f}".format(matches.threshold_error))
print("threshold (match) : {:>5.2f}".format(matches.threshold_match))
print("overall completeness : {:.2f} %".format(100*np.mean(matches.df.match[~matches.df.error])))
print("min separation: {:.4f} [arcsec]".format(min(matches.df.sep)))
print("max separation: {:.4f} [arcsec]".format(max(matches.df.sep)))
redshifts_sql =
SELECT
object_id, specz_id,
d_pos,
specz_ra, specz_dec,
specz_redshift, specz_redshift_err, specz_flag_homogeneous
FROM
pdr1_cosmos_widedepth_median.specz
preview_results = False
delete_job = True
out_format = "sqlite3"
output_filename = "specz.{}".format(out_format)
print(output_filename)
with open(output_filename, mode="wb") as output_file:
query_wrapper(credential, redshifts_sql, preview_results, delete_job,
out_format, output_file,
nomail=True,
)
!ls -lh specz.sqlite3
df = pd.read_sql_table("table_1",
"sqlite:///{}".format("specz.sqlite3"),
index_col="object_id")
df = df[df.specz_flag_homogeneous]
df.head()
photoz_sql =
SELECT
pdr1_deep.forced.object_id,
pdr1_deep.forced.ra,
pdr1_deep.forced.dec,
pdr1_deep.photoz_frankenz.photoz_best,
pdr1_deep.photoz_frankenz.photoz_risk_best
FROM
pdr1_deep.forced
INNER JOIN pdr1_deep.photoz_frankenz
ON pdr1_deep.photoz_frankenz.object_id=pdr1_deep.forced.object_id
WHERE (ra BETWEEN 149.25 AND 151.25) AND (dec BETWEEN 1.4 AND 3);
preview_results = False
delete_job = True
out_format = "sqlite3"
output_filename = "photoz_tmp.{}".format(out_format)
print(output_filename)
with open(output_filename, mode="wb") as output_file:
query_wrapper(credential, photoz_sql, preview_results, delete_job,
out_format, output_file,
nomail=True,
)
!ls -lh photoz_tmp.sqlite3
df = pd.read_sql_table("table_1",
"sqlite:///{}".format("photoz_tmp.sqlite3"),
index_col="object_id")
df.head()
df.to_sql("FRANKENZ", "sqlite:///franken_z-DEEP-COSMOS.sqlite3",
if_exists="replace")
os.remove("photoz_tmp.sqlite3")
HSC_filename = pathlib.Path(dwarfz.data_dir_default) / "HSC_COSMOS_median_forced.sqlite3"
HSC = dwarfz.datasets.HSC(HSC_filename)
matches = dwarfz.matching.Matches(HSC.df, df )
matches.df["HSC_ids"] = matches.df.index
matches.df["FRANKENZ_ids"] = matches.df.catalog_2_ids
matches.df.head()
HSC.df.join(matches.df).join(df[["photoz_best",
"photoz_risk_best"]],
on="FRANKENZ_ids").head()
HSC_photo_zs = HSC.df.copy()[[]] # only copy index column
HSC_photo_zs = HSC_photo_zs.join(matches.df[["FRANKENZ_ids"]])
HSC_photo_zs = HSC_photo_zs.join(df[["photoz_best", "photoz_risk_best"]],
on="FRANKENZ_ids")
HSC_photo_zs.head()
HSC_photo_zs.to_sql("photo_z",
"sqlite:///HSC_matched_to_FRANKENZ.sqlite",
if_exists="replace",
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We do preprocessing steps as in source code of reference [1]
Step2: Now, we construct the model
Step3: Benchmark HMC
Step4: In CPU, we get avg. time for each step
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro
import time
import numpy as np
import jax.numpy as jnp
from jax import random
import numpyro
import numpyro.distributions as dist
from numpyro.examples.datasets import COVTYPE, load_dataset
from numpyro.infer import HMC, MCMC, NUTS
assert numpyro.__version__.startswith("0.9.2")
# NB: replace gpu by cpu to run this notebook in cpu
numpyro.set_platform("gpu")
_, fetch = load_dataset(COVTYPE, shuffle=False)
features, labels = fetch()
# normalize features and add intercept
features = (features - features.mean(0)) / features.std(0)
features = jnp.hstack([features, jnp.ones((features.shape[0], 1))])
# make binary feature
_, counts = np.unique(labels, return_counts=True)
specific_category = jnp.argmax(counts)
labels = labels == specific_category
N, dim = features.shape
print("Data shape:", features.shape)
print(
"Label distribution: {} has label 1, {} has label 0".format(
labels.sum(), N - labels.sum()
)
)
def model(data, labels):
coefs = numpyro.sample("coefs", dist.Normal(jnp.zeros(dim), jnp.ones(dim)))
logits = jnp.dot(data, coefs)
return numpyro.sample("obs", dist.Bernoulli(logits=logits), obs=labels)
step_size = jnp.sqrt(0.5 / N)
kernel = HMC(
model,
step_size=step_size,
trajectory_length=(10 * step_size),
adapt_step_size=False,
)
mcmc = MCMC(kernel, num_warmup=500, num_samples=500, progress_bar=False)
mcmc.warmup(random.PRNGKey(2019), features, labels, extra_fields=("num_steps",))
mcmc.get_extra_fields()["num_steps"].sum().copy()
tic = time.time()
mcmc.run(random.PRNGKey(2020), features, labels, extra_fields=["num_steps"])
num_leapfrogs = mcmc.get_extra_fields()["num_steps"].sum().copy()
toc = time.time()
print("number of leapfrog steps:", num_leapfrogs)
print("avg. time for each step :", (toc - tic) / num_leapfrogs)
mcmc.print_summary()
mcmc = MCMC(NUTS(model), num_warmup=50, num_samples=50, progress_bar=False)
mcmc.warmup(random.PRNGKey(2019), features, labels, extra_fields=("num_steps",))
mcmc.get_extra_fields()["num_steps"].sum().copy()
tic = time.time()
mcmc.run(random.PRNGKey(2020), features, labels, extra_fields=["num_steps"])
num_leapfrogs = mcmc.get_extra_fields()["num_steps"].sum().copy()
toc = time.time()
print("number of leapfrog steps:", num_leapfrogs)
print("avg. time for each step :", (toc - tic) / num_leapfrogs)
mcmc.print_summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Take global averages and time averages.
Step2: Here is code to make a nicely labeled sounding plot.
Step3: Now compute the Radiative Equilibrium solution for the grey-gas column model
Step4: Plot the radiative equilibrium temperature on the same plot with NCEP reanalysis
Step5: Now use convective adjustment to compute a Radiative-Convective Equilibrium temperature profile
Step6: Now plot this "Radiative-Convective Equilibrium" on the same graph
Step7: The convective adjustment gets rid of the unphysical temperature difference between the surface and the overlying air.
Step8: Now add this new temperature profile to the graph
Step9: Adding stratospheric ozone
Step10: Take the global average of the ozone climatology, and plot it as a function of pressure (or height)
Step11: This shows that most of the ozone is indeed in the stratosphere, and peaks near the top of the stratosphere.
Step12: Now we will do something new
Step13: Now run it out to Radiative-Convective Equilibrium, and plot
Step14: And we finally have something that looks looks like the tropopause, with temperature increasing above at about the correct rate. Though the tropopause temperature is off by 15 degrees or so.
Step15: And we find that the troposphere warms, while the stratosphere cools!
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
ncep_url = "https://psl.noaa.gov/thredds/dodsC/Datasets/ncep.reanalysis.derived/"
ncep_air = xr.open_dataset( ncep_url + "pressure/air.mon.1981-2010.ltm.nc", decode_times=False)
level = ncep_air.level
lat = ncep_air.lat
Tzon = ncep_air.air.mean(dim=('lon','time'))
weight = np.cos(np.deg2rad(lat)) / np.cos(np.deg2rad(lat)).mean(dim='lat')
Tglobal = (Tzon * weight).mean(dim='lat')
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + 273.15, np.log(level/1000))
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/1000) )
ax.set_yticklabels( level.values )
ax.set_title('Global, annual mean sounding from NCEP Reanalysis', fontsize = 24)
ax2 = ax.twinx()
ax2.plot( Tglobal + 273.15, -8*np.log(level/1000) );
ax2.set_ylabel('Approx. height above surface (km)', fontsize=16 );
ax.grid()
import climlab
from climlab import constants as const
col = climlab.GreyRadiationModel()
print(col)
col.subprocess['LW'].diagnostics
col.integrate_years(1)
print("Surface temperature is " + str(col.Ts) + " K.")
print("Net energy in to the column is " + str(col.ASR - col.OLR) + " W / m2.")
pcol = col.lev
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + 273.15, np.log(level/1000), 'b-', col.Tatm, np.log( pcol/const.ps ), 'r-' )
ax.plot( col.Ts, 0, 'ro', markersize=20 )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/1000) )
ax.set_yticklabels( level.values )
ax.set_title('Temperature profiles: observed (blue) and radiative equilibrium in grey gas model (red)', fontsize = 18)
ax2 = ax.twinx()
ax2.plot( Tglobal + const.tempCtoK, -8*np.log(level/1000) );
ax2.set_ylabel('Approx. height above surface (km)', fontsize=16 );
ax.grid()
dalr_col = climlab.RadiativeConvectiveModel(adj_lapse_rate='DALR')
print(dalr_col)
dalr_col.integrate_years(2.)
print("After " + str(dalr_col.time['days_elapsed']) + " days of integration:")
print("Surface temperature is " + str(dalr_col.Ts) + " K.")
print("Net energy in to the column is " + str(dalr_col.ASR - dalr_col.OLR) + " W / m2.")
dalr_col.param
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + 273.15, np.log(level/1000), 'b-', col.Tatm, np.log( pcol/const.ps ), 'r-' )
ax.plot( col.Ts, 0, 'ro', markersize=16 )
ax.plot( dalr_col.Tatm, np.log( pcol / const.ps ), 'k-' )
ax.plot( dalr_col.Ts, 0, 'ko', markersize=16 )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/1000) )
ax.set_yticklabels( level.values )
ax.set_title('Temperature profiles: observed (blue), RE (red) and dry RCE (black)', fontsize = 18)
ax2 = ax.twinx()
ax2.plot( Tglobal + const.tempCtoK, -8*np.log(level/1000) );
ax2.set_ylabel('Approx. height above surface (km)', fontsize=16 );
ax.grid()
rce_col = climlab.RadiativeConvectiveModel(adj_lapse_rate=6, abs_coeff=1.7E-4)
print(rce_col)
rce_col.integrate_years(2.)
print("After " + str(rce_col.time['days_elapsed']) + " days of integration:")
print("Surface temperature is " + str(rce_col.Ts) + " K.")
print("Net energy in to the column is " + str(rce_col.ASR - rce_col.OLR) + " W / m2.")
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + 273.15, np.log(level/1000), 'b-', col.Tatm, np.log( pcol/const.ps ), 'r-' )
ax.plot( col.Ts, 0, 'ro', markersize=16 )
ax.plot( dalr_col.Tatm, np.log( pcol / const.ps ), 'k-' )
ax.plot( dalr_col.Ts, 0, 'ko', markersize=16 )
ax.plot( rce_col.Tatm, np.log( pcol / const.ps ), 'm-' )
ax.plot( rce_col.Ts, 0, 'mo', markersize=16 )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/1000) )
ax.set_yticklabels( level.values )
ax.set_title('Temperature profiles: observed (blue), RE (red), dry RCE (black), and moist RCE (magenta)', fontsize = 18)
ax2 = ax.twinx()
ax2.plot( Tglobal + const.tempCtoK, -8*np.log(level/1000) );
ax2.set_ylabel('Approx. height above surface (km)', fontsize=16 );
ax.grid()
# Put in some ozone
import xarray as xr
ozonepath = "http://thredds.atmos.albany.edu:8080/thredds/dodsC/CLIMLAB/ozone/apeozone_cam3_5_54.nc"
ozone = xr.open_dataset(ozonepath)
ozone
# Taking annual, zonal, and global averages of the ozone data
O3_zon = ozone.OZONE.mean(dim=("time","lon"))
weight_ozone = np.cos(np.deg2rad(ozone.lat)) / np.cos(np.deg2rad(ozone.lat)).mean(dim='lat')
O3_global = (O3_zon * weight_ozone).mean(dim='lat')
O3_global.shape
ax = plt.figure(figsize=(10,8)).add_subplot(111)
ax.plot( O3_global * 1.E6, np.log(O3_global.lev/const.ps) )
ax.invert_yaxis()
ax.set_xlabel('Ozone (ppm)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
yticks = np.array([1000., 500., 250., 100., 50., 20., 10., 5.])
ax.set_yticks( np.log(yticks/1000.) )
ax.set_yticklabels( yticks )
ax.set_title('Global, annual mean ozone concentration', fontsize = 24);
oz_col = climlab.RadiativeConvectiveModel(lev = ozone.lev,
abs_coeff=1.82E-4,
adj_lapse_rate=6,
albedo=0.315)
ozonefactor = 75
dp = oz_col.Tatm.domain.axes['lev'].delta
sw_abs = O3_global * dp * ozonefactor
oz_col.subprocess.SW.absorptivity = sw_abs
oz_col.compute()
oz_col.compute()
print(oz_col.SW_absorbed_atm)
oz_col.integrate_years(2.)
print("After " + str(oz_col.time['days_elapsed']) + " days of integration:")
print("Surface temperature is " + str(oz_col.Ts) + " K.")
print("Net energy in to the column is " + str(oz_col.ASR - oz_col.OLR) + " W / m2.")
pozcol = oz_col.lev
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + const.tempCtoK, np.log(level/1000), 'b-', col.Tatm, np.log( pcol/const.ps ), 'r-' )
ax.plot( col.Ts, 0, 'ro', markersize=16 )
ax.plot( dalr_col.Tatm, np.log( pcol / const.ps ), 'k-' )
ax.plot( dalr_col.Ts, 0, 'ko', markersize=16 )
ax.plot( rce_col.Tatm, np.log( pcol / const.ps ), 'm-' )
ax.plot( rce_col.Ts, 0, 'mo', markersize=16 )
ax.plot( oz_col.Tatm, np.log( pozcol / const.ps ), 'c-' )
ax.plot( oz_col.Ts, 0, 'co', markersize=16 )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/1000) )
ax.set_yticklabels( level.values )
ax.set_title('Temperature profiles: observed (blue), RE (red), dry RCE (black), moist RCE (magenta), RCE with ozone (cyan)', fontsize = 18)
ax.grid()
oz_col2 = climlab.process_like( oz_col )
oz_col2.subprocess['LW'].absorptivity *= 1.2
oz_col2.integrate_years(2.)
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + const.tempCtoK, np.log(level/const.ps), 'b-' )
ax.plot( oz_col.Tatm, np.log( pozcol / const.ps ), 'c-' )
ax.plot( oz_col.Ts, 0, 'co', markersize=16 )
ax.plot( oz_col2.Tatm, np.log( pozcol / const.ps ), 'c--' )
ax.plot( oz_col2.Ts, 0, 'co', markersize=16 )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/const.ps) )
ax.set_yticklabels( level.values )
ax.set_title('Temperature profiles: observed (blue), RCE with ozone (cyan)', fontsize = 18)
ax.grid()
datapath = "http://thredds.atmos.albany.edu:8080/thredds/dodsC/CESMA/"
atmstr = ".cam.h0.clim.nc"
cesm_ctrl = xr.open_dataset(datapath + 'som_1850_f19/clim/som_1850_f19' + atmstr)
cesm_2xCO2 = xr.open_dataset(datapath + 'som_1850_2xCO2/clim/som_1850_2xCO2' + atmstr)
cesm_ctrl.T
T_cesm_ctrl_zon = cesm_ctrl.T.mean(dim=('time', 'lon'))
T_cesm_2xCO2_zon = cesm_2xCO2.T.mean(dim=('time', 'lon'))
weight = np.cos(np.deg2rad(cesm_ctrl.lat)) / np.cos(np.deg2rad(cesm_ctrl.lat)).mean(dim='lat')
T_cesm_ctrl_glob = (T_cesm_ctrl_zon*weight).mean(dim='lat')
T_cesm_2xCO2_glob = (T_cesm_2xCO2_zon*weight).mean(dim='lat')
fig = plt.figure( figsize=(10,8) )
ax = fig.add_subplot(111)
ax.plot( Tglobal + const.tempCtoK, np.log(level/const.ps), 'b-' )
ax.plot( oz_col.Tatm, np.log( pozcol / const.ps ), 'c-' )
ax.plot( oz_col.Ts, 0, 'co', markersize=16 )
ax.plot( oz_col2.Tatm, np.log( pozcol / const.ps ), 'c--' )
ax.plot( oz_col2.Ts, 0, 'co', markersize=16 )
ax.plot( T_cesm_ctrl_glob, np.log( cesm_ctrl.lev/const.ps ), 'r-' )
ax.plot( T_cesm_2xCO2_glob, np.log( cesm_ctrl.lev/const.ps ), 'r--' )
ax.invert_yaxis()
ax.set_xlabel('Temperature (K)', fontsize=16)
ax.set_ylabel('Pressure (hPa)', fontsize=16 )
ax.set_yticks( np.log(level/const.ps) )
ax.set_yticklabels( level.values )
ax.set_title('Temperature profiles: observed (blue), RCE with ozone (cyan), CESM (red)', fontsize = 18)
ax.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io, read_proj, read_selection
from mne.datasets import sample
from mne.time_frequency import psd_multitaper
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
proj_fname = data_path + '/MEG/sample/sample_audvis_eog-proj.fif'
tmin, tmax = 0, 60 # use the first 60s of data
# Setup for reading the raw data (to save memory, crop before loading)
raw = io.read_raw_fif(raw_fname).crop(tmin, tmax).load_data()
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# Add SSP projection vectors to reduce EOG and ECG artifacts
projs = read_proj(proj_fname)
raw.add_proj(projs, remove_existing=True)
fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
# Let's first check out all channel types
raw.plot_psd(area_mode='range', tmax=10.0, show=False)
# Now let's focus on a smaller subset:
# Pick MEG magnetometers in the Left-temporal region
selection = read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False,
stim=False, exclude='bads', selection=selection)
# Let's just look at the first few channels for demonstration purposes
picks = picks[:4]
plt.figure()
ax = plt.axes()
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks,
show=False)
# And now do the same with SSP applied
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks,
show=False)
# And now do the same with SSP + notch filtering
# Pick all channels for notch since the SSP projection mixes channels together
raw.notch_filter(np.arange(60, 241, 60), n_jobs=1)
raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft,
n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks,
show=False)
ax.set_title('Four left-temporal magnetometers')
plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
# Alternatively, you may also create PSDs from Raw objects with ``psd_*``
f, ax = plt.subplots()
psds, freqs = psd_multitaper(raw, low_bias=True, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, proj=True, picks=picks,
n_jobs=1)
psds = 10 * np.log10(psds)
psds_mean = psds.mean(0)
psds_std = psds.std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD', xlabel='Frequency',
ylabel='Power Spectral Density (dB)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read an image
Step2: Quantize with scikit
Step6: Colinearity adjustment
Step7: Travelling salesman problem
Step8: The zero-point trick is legit. Reference from E. L. Lawler, Jan Karel Lenstra, A. H. G. Rinnooy Kan, D. B. Shmoys (1985). The Traveling Salesman Problem
Step9: LKH implementation.
Step10: Now result is the indices of points for the shortest path, shape (256,). And p is our quantized colormap, shape (256, 3). So we can select the points easily for an ordered colourmap.
Step11: Ideally I'd like all the distances too, but it wouldn't be too hard to compute these.
Step12: Check below an interactive version of the 3D plot. May help when there are complicated paths between points. You need to install plotly and colorlover (with pip) if you don't already have them.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
cd ~/Dropbox/dev/rainbow/notebooks
from PIL import Image
# img = Image.open('data/cbar/boxer.png')
# img = Image.open('data/cbar/fluid.png')
# img = Image.open('data/cbar/lisa.png')
# img = Image.open('data/cbar/redblu.png')
# img = Image.open('data/cbar/seismic.png')
# img = Image.open('data/cbar/drainage.jpg')
#img = Image.open('data/cbar/test.png')
img = Image.open('data/cbar/Colormap_Jet1.png')
img
img.size
n_colours = 100
from sklearn.cluster import KMeans
from sklearn.utils import shuffle
im = np.asarray(img)[..., :3] / 255.
h, w, d = im.shape
im_ = im.reshape((w * h, d))
# Define training set.
n = min(h*w//50, n_colours*10)
sample = shuffle(im_, random_state=0)[:n]
kmeans = KMeans(n_clusters=n_colours).fit(sample)
p = kmeans.cluster_centers_
# I don't know why I need to do this, but I do. Floating point precision maybe.
p[p > 1] = 1
p[p < 0] = 0
# ALL TRIPLES
# from itertools import permutations
# triples = np.array(list(permutations(p, 3)))
# triples.shape
# There are n(n - 1) values that "belong" to each point (have it first).
# So maybe I can get a measure of the local linearity of a point
# ... OK this is all too much, let's try something else...
from sklearn.neighbors import BallTree
tree = BallTree(p)
# Get only the three nearest
# _, idx = tree.query(p, 4)
# idx = idx[:, 1:] # For measuring the relationship between the 3 neighbours (not including 'self')
# triples = p[idx]
# Get 2 nearest neighbours and include 'self'
_, idx = tree.query(p, 3)
triples = p[idx]
colin = 1 - np.power(np.abs(np.linalg.det(triples)), 0.25) # 1 = colinear, 0 = not at all
# Will need to scale this I think
np.max(colin)
plt.hist(colin)
plt.plot()
plt.title('{} points'.format(colin.size))
plt.show()
# Now we also need a direction metric. Best thing is probably
# the spherical angle, which has two parameters: alpha and gamma.
def colinear(pts):
Area of a triangle, given an array of three 3d points.
print(pts)
p1, p2, p3 = np.array(pts)
co = 1 - 0.5 * np.linalg.norm(np.cross(p2 - p1, p3 - p1))
return co
np.linalg.norm(np.cross(np.array([1,2,3])-np.array([6,3,9]), np.array([-1,4,3])-np.array([8,2,9])))
x, y, z = triples[-1]
x
np.apply_along_axis(colinear, 1, triples)
def orient_line(p):
Orientation of a line, given an array of two 3d points.
p1, p2 = np.array(p)
line = np.abs(p1 - p2)
unit = line / np.linalg.norm(line)
x, y, z = unit
pitch = np.arcsin(-y)
yaw = np.arctan2(x, z)
return np.array([pitch, yaw])
def orient_tri(points):
Orientation of a triangle, given an array of three 3d points.
Mean of sides or orientation of longest edge? Or mean of two longest...?
this = []
pairs = list(permutations(points, 2))
for pair in [0, 1, -1]:
this.append(orientation(pairs[pair]))
return np.mean(this, axis=0)
idx[:5]
r, c = idx.shape
result = np.zeros((r, 2))
for i, ix in enumerate(idx):
triple = p[ix]
this_point = 0.5 * colinear * orient_tri(t)
result[i] += this_point
result[ix[1]] += 0.5 * this_point
result[ix[2]] += 0.5 * this_point
result
triples[3]
np.mean(a, axis=0)
from itertools import permutations
points = []
for i in triples:
this = []
pairs = list(permutations(i, 2))
for pair in [0, 1, -1]:
this.append(orientation(pairs[pair]))
points.append(this)
points = np.array(points)
points
# need a function that takes 3 points and returns the orientation and area of the triangle
np.arccos(-11/3)
a = np.array([[11,12,13], [21, 22, 23], [31, 32,33], [41, 42, 43], [51,52,53], [61,62,63]])
t = np.array(list(permutations(a, 3)))
from mpl_toolkits.mplot3d import Axes3D
# Set up the figure
fig = plt.figure(figsize=(8, 8))
# Result of TSP solver
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*p.T, c=p, lw=0, s=40, alpha=1)
ax.plot(*p.T, color='k', alpha=0.4)
ax.set_title('Codebook')
plt.show()
from pytsp import run, dumps_matrix
p = np.vstack([[[0.25, 0, 0.5]], p])
#p = np.vstack([[[0, 0, 0]], p])
p[:6]
from scipy.spatial.distance import pdist, squareform
# Make distance matrix.
dists = squareform(pdist(p, 'euclidean'))
# The values in `dists` are floats in the range 0 to sqrt(3).
# Normalize the values to int16s.
d = 32767 * dists / np.sqrt(3)
d = d.astype(np.int16)
# To use a TSP algo to solve the shortest Hamiltonian path problem,
# we need to add a point that is zero units from every other point.
row, col = dists.shape
d = np.insert(d, row, 0, axis=0)
d = np.insert(d, col, 0, axis=1)
d
outf = "/tmp/myroute_lkh.tsp"
with open(outf, 'w') as f:
f.write(dumps_matrix(d, name="My Route"))
tour_lkh = run(outf, start=0, solver="LKH")
#result = np.array(tour_concorde['tour'])
result = np.array(tour_lkh['tour'])
result
result.size # Should be n_colours + 2
# e = np.asscalar(np.where(result == result.size-1)[0])
# if e == 1:
# # Then it's second and I think I know why.
# # As long as it went to the last point next, and I think
# # it necessarily does, then we're good.
# print("Zero-point is second. Probably dealt with it.")
# result = np.concatenate([result[:e], result[e+1::][::-1]])
# elif e == len(result)-1:
# # Then it's at the end already.
# print("Zero-point is at the end. Dealt with it.")
# result = result[:-1]
# else:
# # I'm not sure why this would happen... but I Think in this
# # case we can just skip it.
# print("Zero-point is somewhere weird. Maybe dealt with... BE CAREFUL.")
# result = result[result != result.size-1]
# assert len(result) == len(p)
c = p[result[1:-1]]
from mpl_toolkits.mplot3d import Axes3D
# Set up the figure
fig = plt.figure(figsize=(8, 8))
# Result of TSP solver
ax = fig.add_subplot(111, projection='3d')
ax.scatter(*c.T, c=c, lw=0, s=40, alpha=1)
ax.plot(*c.T, color='k', alpha=0.4)
ax.set_title('TSP solver')
plt.show()
import plotly.graph_objs as go
import colorlover as cl
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
cb = cl.to_rgb(tuple(map(tuple, c*255)))
trace = go.Scatter3d(
name='TSP Sover',
x = c[:,0], y = c[:,1], z = c[:,2],
marker = dict(
size=4.,
color=cb
),
line=dict(
color='#000',
width=1,
),
)
data = [trace]
# Set the different layout properties of the figure:
layout = go.Layout(
autosize=False,
width=600,
height=600,
margin = dict(
t=0,b=0,l=0,r=0
),
scene = go.Scene(
xaxis=dict(
title='red',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(255, 0, 0)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
yaxis=dict(
title='green',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(0, 255, 0)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
zaxis=dict(
title='blue',
gridcolor='rgb(255, 255, 255)',
zerolinecolor='rgb(0, 0, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)'
),
aspectmode='cube',
camera=dict(
eye=dict(
x=1.7,
y=-1.7,
z=1,
)
),
)
)
fig = go.Figure(data=data, layout=layout)
iplot(fig, show_link=False)
np.save('/Users/matt/Dropbox/public/raw_data.npy', p[1:])
np.save('/Users/matt/Dropbox/public/ordered_data.npy', c)
from scipy.spatial import cKDTree
kdtree = cKDTree(c)
dx, ix = kdtree.query(im)
plt.imshow(ix, cmap='gray')
plt.colorbar()
plt.show()
plt.imshow(dx, cmap='gray')
plt.colorbar()
plt.show()
fig = plt.figure(figsize=(18, 5))
ax0 = fig.add_subplot(131)
plt.imshow(im, interpolation='none')
ax0.set_title("Starting image")
ax1 = fig.add_subplot(132, projection='3d')
ax1.scatter(*c.T, c=c, lw=0, s=40, alpha=1)
ax1.plot(*c.T, color='k', alpha=0.5)
ax1.text(*c[0], ' start')
ax1.text(*c[-1], ' end')
ax1.set_title("Recovered cmap locus")
ax2 = fig.add_subplot(133)
plt.imshow(ix, cmap='viridis', interpolation='none')
plt.colorbar(shrink=0.75)
ax2.set_title("Recovered data with known cmap")
plt.show()
cmaps = [('Perceptually Uniform Sequential',
['viridis', 'inferno', 'plasma', 'magma']),
('Sequential', ['Blues', 'BuGn', 'BuPu',
'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples', 'RdPu',
'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']),
('Sequential (2)', ['afmhot', 'autumn', 'bone', 'cool',
'copper', 'gist_heat', 'gray', 'hot',
'pink', 'spring', 'summer', 'winter']),
('Diverging', ['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral',
'seismic']),
('Qualitative', ['Accent', 'Dark2', 'Paired', 'Pastel1',
'Pastel2', 'Set1', 'Set2', 'Set3']),
('Miscellaneous', ['gist_earth', 'terrain', 'ocean', 'gist_stern',
'brg', 'CMRmap', 'cubehelix',
'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow',
'gist_rainbow', 'hsv', 'flag', 'prism'])]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One easy to tell the number of dimensions - look at the number of square brackets at the beginning. [[ = 2 dimensions. [[[ = 3 dimensions. <br>
Step2: Numpy allows for vectorisation, i.e. operations are applied to whole arrays instead of individual elements. To get the results of a_list * b_list using traditional python, you would have had to write a for loop. When dealing with millions or billions of lines of data, that can be inefficient. We will spend some more time on operations of this nature when we get to Broadcasting.
Step3: We can even reshape these arrays into our desired shape. But remember, when we say desired shape, we are not speaking of circles or pentagons. Think square, reactangles, cubes and the like.
Step4: The arange feature generates sequential series though. What if we want random numbers?<br><br>
Step5: Translating from Python to English, "call the randint module from the random module of numpy, then select 20 numbers between 0 and 999 at random, and assign that to an array named rand_arr i.e. 0 is included, 1000 is excluded.
Step6: Remember, the first number always represents the number of rows.
Step7: Random Array with Standard Normal Distribution
Step8: Array of Zeroes
Step9: Array of Ones
Step10: Identity Matrix
Step11: Linspace
Step12: Quick Operations on Numpy Arrays
Step13: Now imagine this is just a small snippet of a large array with millions, or even billions of numbers. Does that sound crazy? Well, Data Scientist regularly work with large arrays of numbers. The Netflix Data Scientists for example, deal with a high dimensional sparse matrix. <br><br>
Step14: Keep in mind that if we have duplicate entries, or multiple entries, only the first entry will be returned.
Step15: Selecting Values
Step16: Remember our old friend, lists?
Step17: Remember, rows before columns. Always!<br>
Step18: Exercise
Step19: Exercise
Step20: Exercise
Step21: Exercise
Step22: Exercise
Step23: Fancy Indexing
Step24: Method 2
Step25: Take
Step26: Works with Multi-Dimensional
Step27: Broadcasting
Step28: Here we have broadcast 10 to all other elements in the array. Remember Vectorisation? Same principles!
Step29: Broadcasting Rule
Step30: A quick digression, in case you are wondering, the .astype('float') was just a quick operation to convert integers to floats as you are already familiar with. If you want to find out what the data type of an element in a numpy array is, simply use the suffix .dtype
Step31: Back to our array, arr1
Step32: Do you see what happened here? Our row with 3 elements, was sequentially added to each 3-element row in arr1.
Step33: A final example now, with a (5,1) and (3) array. Read the rule once again - and it will be clear that the new array will be a 5X3 array.
Step34: Other Array Operations
Step35: So what happened to our original array? Let's find out.
Step36: Why did that happen?! We never touched a1, and even went on to create a whole new array!
Step37: Squaring Arrays
Step38: Square Roots
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# Create an array with the statement np.array
a = np.array([1,2,3,4])
print('a is of type:', type(a))
print('dimension of a:', a.ndim) # To find the dimension of 'a'
arr1 = np.array([1,2,3,4])
arr1.ndim
arr2 = np.array([[1,2],[2,3],[3,4],[4,5]])
arr2.ndim
# Doesn't make a difference to a computer how you represent it,
# but if humans are going to read your code, this might be useful
arr3 = np.array([[[1,2],[2,3]],
[[2,3],[3,4]],
[[4,5],[5,6]],
[[6,7],[7,8]]
])
arr3.ndim
arr4 = np.array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]])
arr4.ndim
a_list = [1,2,3,4,5]
b_list = [5,10,15,20,25]
# Multiplying these will give an error
print(a_list * b_list)
a_list = np.array([1,2,3,4,5])
b_list = np.array([5,10,15,20,25])
print(a_list * b_list)
arr1 = np.arange(16)
print(arr1)
arr1.reshape(4,4)
arr1.reshape(2,8)
arr1.reshape(8,2)
arr1.reshape(16,1)
np.random.seed(42)
rand_arr = np.random.randint(0,1000,20)
print(rand_arr)
rand_arr.reshape(5,4)
rand_arr.reshape(4,5)
rand_arr.reshape(2,10)
np.random.seed(42)
np.random.rand(5)
np.random.seed(42)
np.random.rand(3,2)
np.random.seed(42)
np.random.randn(5)
np.zeros(16)
np.zeros((4,4))
np.ones(5)
np.ones((4,4))
np.eye(10)
# 5 evenly spaced numbers between -5 and 5
np.linspace(-5,5,5)
import numpy as np
np.random.seed(42)
arr1 = np.random.randint(1,1000,100)
arr1 = arr1.reshape(10,10)
arr1.shape
arr1
# Find the highest value in arr1
arr1.max()
# Find the lowest value in arr1
arr1.min()
# Find the location of the highest value in arr1
arr1.argmax()
arr1.argmin()
# From earlier
rand_arr = np.random.randint(0,1000,20)
rand_arr
rand_arr = rand_arr.reshape(4,5)
rand_arr.shape
rand_arr
import numpy as np
np.random.seed(42)
arr1 = np.arange(1,6)
arr1
arr1[0]
arr1[0:3]
arr1[-1]
import numpy as np
np.random.seed(42)
rand_arr = np.random.randint(0,1000,20)
print(rand_arr)
rand_arr = rand_arr.reshape(5,4)
rand_arr
rand_arr[0]
rand_arr[1]
rand_arr[0][-1]
# Another way to write the same thing
rand_arr[0,-1]
import numpy as np
np.random.seed(42)
arr1 = np.arange(1,101)
arr1
arr1 = arr1.reshape(10,10)
arr1
# Step 1 - Narrow down the row
arr1[2] # 3rd row
# 26 is at index 5, we need all the numbers from thr 6th column onwards
arr1[2,5:]
# Step 1: Identify the Row
arr1[7:]
# Now we need the first three columns
arr1[7:,:3]
# Your code here
# Your code here
# Your code here
import numpy as np
np.random.seed(42)
arr1 = np.random.randint(0,1000,100)
arr1
# We check what values are greater than 150
arr1>150
# Assign this operation to a variable x
mask = arr1>150
# Create a new array which subsets arr1 based on a boolean operation
arr2 = arr1[mask]
arr2
# Check the shape
arr2.shape
list1 = [1,3,5,7]
list2 = [2,4,6,8]
arr1 = np.arange(1,101)
arr1
arr_even = arr1[list1]
arr_even
# Alternatively
arr_even = arr1[[1,3,5,7]]
arr_even
arr_odd = arr1[list2]
arr_odd
arr1 = np.arange(1,101)
arr1
indices = [0,2,4,10,20,80,91,97,99]
np.take(arr1, indices)
np.take(arr1, [[0, 1], [11, 18]])
arr_1 = np.arange(1,11)
print(arr_1)
print(arr_1 * 10)
arr_1 = np.array([[1,2],[3,4]])
a = 2
arr_1 + a
arr1 = np.arange(1,13)
arr1
arr1.shape
arr1 = arr1.reshape(4,3).astype('float')
arr1
arr1.dtype
arr_example = np.array([1,2,3,4])
print(arr_example)
print('arr_example is an',arr_example.dtype)
arr_example = arr_example.astype('float')
print('arr_example is now a',arr_example.dtype)
arr1
arr1.shape
arr2 = np.array([0.0,1.0,2.0])
print(arr2)
print(arr2.shape)
arr1 + arr2
arr3 = np.arange(0,4)
arr3 = arr3.astype('float')
print(arr3)
print(arr3.shape)
# Let's generate our error
arr1 + arr3
arr4 = np.arange(1,6)
arr4
arr4 = arr4.reshape(5,1).astype('float')
arr4.shape
arr2
arr4 * arr2
a1 = np.arange(1,21)
a1 = a1.reshape(4,5)
a1
# Let's get the first column
a1[:,0]
# Assign to new array
new_a1 = a1[:,0]
new_a1
# Recall that this is how you select all values
new_a1[:] = 42
new_a1
a1
a1_copy = a1.copy()
a1_copy
a1_copy = np.arange(1,21)
a1_copy = a1_copy.reshape(4,5)
a1_copy
a1
np.square(a1)
np.sqrt(a1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'niwa', 'ukesm1-0-ll', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'pcmdi', 'sandbox-3', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read Image
Step2: Boundary filters
|
<ASSISTANT_TASK:>
Python Code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as scp
img = cv2.imread('paint.jpg', cv2.IMREAD_GRAYSCALE)
kernal = np.zeros((51,51))
kernal[25,25] = 1
Constant_filter = scp.correlate(img,kernal,mode='constant')
Wrap_filter = scp.correlate(img,kernal,mode='wrap')
Mirror_filter = scp.correlate(img,kernal,mode='mirror')
Nearest_filter = scp.correlate(img,kernal,mode='nearest')
Reflect_filter = scp.correlate(img,kernal,mode='reflect')
plt.figure(figsize=(10,8))
plt.subplot(2,3,1), plt.imshow(img, cmap='gray'), plt.title('Original')
plt.subplot(2,3,2), plt.imshow(Constant_filter, cmap='gray'), plt.title('Constant_filtered')
plt.subplot(2,3,3), plt.imshow(Wrap_filter, cmap='gray'), plt.title('Wrap_filtered')
plt.subplot(2,3,4), plt.imshow(Mirror_filter, cmap='gray'), plt.title('Mirror_filtered')
plt.subplot(2,3,5), plt.imshow(Nearest_filter, cmap='gray'), plt.title('Nearest_filtered')
plt.subplot(2,3,6), plt.imshow(Reflect_filter, cmap='gray'), plt.title('Reflect_filtered')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcome[i].
Step5: Tip
Step6: Question 1
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Step18: Question 4
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
# RMS Titanic data visualization code
from titanic_visualizations import survival_stats
from IPython.display import display
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
#print predictions
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Sex')
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
predictions.append(0)
else:
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
#print predictions
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
if passenger['Age'] < 10:
predictions.append(1)
else:
predictions.append(0)
else:
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
print accuracy_score(outcomes, predictions)
survival_stats(data, outcomes, 'Age', ["Sex == 'female'"])
survival_stats(data, outcomes, 'Age', ["Sex == 'male'", "Age > 10", "Pclass == 3","Parch == 0"])
survival_stats(data, outcomes, 'Pclass', ["Sex == 'female'"])
# females from classes one and two will survive
survival_stats(data, outcomes, 'Parch', ["Sex == 'female'", "Pclass == 3"])
# in the 3class if parch equal 0 u will moew likely survive
survival_stats(data, outcomes, 'Fare', ["Sex == 'female'", "Pclass == 3", "Parch != 0"])
# Fare less than 20 will survive
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
predictions = []
for _, passenger in data.iterrows():
# Remove the 'pass' statement below
# and write your prediction conditions here
if passenger['Sex'] == 'male':
if passenger['Age'] < 10:
predictions.append(1)
elif passenger['Pclass'] == 1 and passenger['Age'] < 40 and passenger['Age'] >20:
predictions.append(1)
elif passenger['Pclass'] == 3 and passenger['Parch'] == 1 and passenger['Age'] < 30 and passenger['Age'] >20:
predictions.append(1)
else:
predictions.append(0)
else:
if passenger['Pclass'] == 3:
if passenger['Age'] > 40 and passenger['Age'] < 60:
predictions.append(0)
elif passenger['Parch'] == 0:
predictions.append(1)
else:
if passenger['Fare'] < 20:
predictions.append(1)
else:
predictions.append(0)
else:
predictions.append(1)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Narišimo še grafe funkcij. Uporabimo lahko funkcijo plot iz knjižnice matplotlib.
Step2: Primer
Step3: Vrednost $\log_2(3)$ je rešitev enačbe $3=e^x$ in je približno enaka $1.6$.
Step4: << nazaj
|
<ASSISTANT_TASK:>
Python Code:
import sympy
from sympy import Eq,solve
from sympy.abc import x,y
sympy.init_printing()
f = lambda x: (2*x+2)/(x-1)
enacba = Eq(f(y),x)
enacba
resitve = solve(enacba,y) # izrazimo y
resitve
invf = sympy.lambdify(x,resitve[0])
Eq(y,invf(x))
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
t = np.linspace(-8,8,200)
fig, ax = plt.subplots()
plt.plot(t,f(t),'.',label="$f(x)$")
plt.plot(t,invf(t),'.',label="$f^{-1}(x)$")
plt.plot(t,t,'k',label="$y=x$")
ax.axis("equal")
plt.ylim(-5,5)
ax.legend(loc=2)
# set the x-spine (see below for more info on `set_position`)
ax.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax.spines['right'].set_color('none')
ax.yaxis.tick_left()
# set the y-spine
ax.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax.spines['top'].set_color('none')
ax.xaxis.tick_bottom()
t = np.linspace(0,2)
plt.plot(t,2**t,label="$y=2^x$")
plt.plot(t,3+np.zeros(t.shape),label="$y=3$")
plt.grid()
plt.legend()
plt.title(" $y=3$ prenemo na graf funkcije in odčitamo $x$")
np.log(3)/np.log(2)
import disqus
%reload_ext disqus
%disqus matpy
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 除了默认的局部变量声明方式,Python 还有global和nonlocal两种类型的声明(nonlocal是Python 3.x之后才有,2.7没有),其中 global 指定的变量直接指向(3)当前模块的全局变量,而nonlocal则指向(2)最内层之外,global以内的变量。这里需要强调指向(references and assignments)的原因是,普通的局部变量对最内层局部作用域之外只有只读(read-only)的访问权限,比如下面的例子:
Step2: 这里抛出UnboundLocalError,是因为main()函数内部的作用域对于全局变量x仅有只读权限,想要在main()中对x进行改变,不会影响全局变量,而是会创建一个新的局部变量,显然无法对还未创建的局部变量直接使用x += 1。如果想要获得全局变量的完全引用,则需要global声明:
Step4: Python 闭包
Step5: 对于还没有nonlocal关键字的 Python 2.7,可以通过一点小技巧来规避局部作用域只读的限制:
Step6: 上面的例子中,inc1()是在全局环境下执行的,虽然全局环境是不能向下获取到inc()中的局部变量x的,但是我们返回了一个inc()内部的函数inner(),而inner()对inc()中的局部变量是有访问权限的。也就是说inner()将inc()内的局部作用域打包送给了inc1和inc2,从而使它们各自独立拥有了一块封闭起来的作用域,不受全局变量或者任何其它运行环境的影响,因此称为闭包。
|
<ASSISTANT_TASK:>
Python Code:
scopes = {
"local": {"locals": None,
"non-local": {"locals": None,
"global": {"locals": None,
"built-in": ["built-ins"]}}},
}
x = 100
def main():
x += 1
print(x)
main()
x = 100
def main():
global x
x += 1
print(x)
main()
print(x) # 全局变量已被改变
/* JavaScript Closure example */
var inc = function(){
var x = 0;
return function(){
console.log(x++);
};
};
var inc1 = inc()
var inc2 = inc()
# Python 3.5
def inc():
x = 0
def inner():
nonlocal x
x += 1
print(x)
return inner
inc1 = inc()
inc2 = inc()
inc1()
inc1()
inc1()
inc2()
# Python 2.7
def inc():
x = [0]
def inner():
x[0] += 1
print(x[0])
return inner
inc1 = inc()
inc2 = inc()
inc1()
inc1()
inc1()
inc2()
print(inc1.__closure__[0].cell_contents)
print(inc2.__closure__[0].cell_contents)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, let's grab a few CAGE datasets from FANTOM5 related to heart biology.
Step2: Then we'll write out these BigWig files and labels to a samples table.
Step3: Next, we want to choose genomic sequences to form batches for stochastic gradient descent, divide them into training/validation/test sets, and construct TFRecords to provide to downstream programs.
Step4: Now, data/heart_l131k contains relevant data for training.
|
<ASSISTANT_TASK:>
Python Code:
import os, subprocess
if not os.path.isfile('data/hg19.ml.fa'):
subprocess.call('curl -o data/hg19.ml.fa https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa', shell=True)
subprocess.call('curl -o data/hg19.ml.fa.fai https://storage.googleapis.com/basenji_tutorial_data/hg19.ml.fa.fai', shell=True)
if not os.path.isfile('data/CNhs11760.bw'):
subprocess.call('curl -o data/CNhs11760.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs11760.bw', shell=True)
subprocess.call('curl -o data/CNhs12843.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs12843.bw', shell=True)
subprocess.call('curl -o data/CNhs12856.bw https://storage.googleapis.com/basenji_tutorial_data/CNhs12856.bw', shell=True)
lines = [['index','identifier','file','clip','sum_stat','description']]
lines.append(['0', 'CNhs11760', 'data/CNhs11760.bw', '384', 'sum', 'aorta'])
lines.append(['1', 'CNhs12843', 'data/CNhs12843.bw', '384', 'sum', 'artery'])
lines.append(['2', 'CNhs12856', 'data/CNhs12856.bw', '384', 'sum', 'pulmonic_valve'])
samples_out = open('data/heart_wigs.txt', 'w')
for line in lines:
print('\t'.join(line), file=samples_out)
samples_out.close()
! basenji_data.py -d .1 -g data/unmap_macro.bed -l 131072 --local -o data/heart_l131k -p 8 -t .1 -v .1 -w 128 data/hg19.ml.fa data/heart_wigs.txt
! cut -f4 data/heart_l131k/sequences.bed | sort | uniq -c
! head -n3 data/heart_l131k/sequences.bed
! grep valid data/heart_l131k/sequences.bed | head -n3
! grep test data/heart_l131k/sequences.bed | head -n3
! ls -l data/heart_l131k/tfrecords/*.tfr
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
Step4: Extract the dataset from the compressed .tar.gz file.
Step6: Problem 1
Step8: Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
Step10: Problem 2
Step11: Problem 3
Step12: Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Step13: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
Step14: Problem 4
Step15: Finally, let's save the data for later reuse
Step16: Problem 5
Step18: Problem 6
Step20: Looks like ~85% is the limit for a linear method
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
import random # show random images
import glob # Problem 2
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
# course-harcoded url
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 5% change in download progress.
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
Download a file if not present, and make sure it's the right size.
# Create path where the data file will be stored
# get working directory and go to the parent - distro agnostic code
dpath = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
# go to data directory
dpath = os.path.join(dpath, 'data')
# get filepath
fpath = os.path.join(dpath, filename)
# Download file if needed
if force or not os.path.exists(fpath):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
# move new file from working directory to data location
# current file location
cpath = os.path.join(os.getcwd(), filename)
os.rename(cpath, fpath)
# check existing file if it exists or new if not
statinfo = os.stat(fpath)
# Verify file size.
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename +
'. Can you get to it with a browser or download it again?')
return (filename)
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
# get new dir name
dirn = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
# Create data directory path
dpath = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
dpath = os.path.join(dpath, 'data')
# Create dir to unzip data
dirn = os.path.join(dpath, dirn)
# Create zipped file path
fpath = os.path.join(dpath, filename)
if os.path.isdir(dirn) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (dirn, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % dirn)
tar = tarfile.open(fpath)
sys.stdout.flush()
# set path so data are extracted within the data folder.
tar.extractall(path=dpath)
tar.close()
data_folders = [
os.path.join(dirn, d) for d in sorted(os.listdir(dirn))
if os.path.isdir(os.path.join(dirn, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print("Data folders list:")
print(data_folders)
return(data_folders)
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
def show_random_images(numi):
Function to display a specified number of random images from the extracted dataset.
Arguments:
numi: Integer, how many images to show.
# First let's create a list of all the files.
# Create data directory path
dpath = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
dpath = os.path.join(dpath, 'data')
# notMNIST_small directory:
dsmall = os.path.join(dpath, 'notMNIST_small')
# notMNIST_large directory:
dlarge = os.path.join(dpath, 'notMNIST_large')
# create $numi random number of paths of images
name1 = []
it1 = 0
while it1 < numi:
# select random notMNIST
rpath0 = random.choice([dlarge, dsmall])
# select random letter
rpath1 = random.choice(["A/", "B/", "C/", "D/", "E/", "F/", "H/", "I/", "J/"])
# join them
rpath = os.path.join(rpath0, rpath1)
# select random image from files
onlyfiles = [fi for fi in os.listdir(rpath) if os.path.isfile(os.path.join(rpath, fi))]
name2 = random.choice(onlyfiles)
# add that random name to it's path
name2 = os.path.join(rpath, name2)
# add it to list of images
name1.append(name2)
it1 += 1
for it2 in name1:
print("Showing Image from path:\n" + it2)
im1 = Image(filename=(it2))
display(im1)
# show me 10 images
show_random_images(10)
### Image preprocessing happening in this step !!! ###
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
Load the data for a single letter label.
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
# read image as array:
# https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.ndimage.imread.html
# code below also shifts average to 0 and standard deviation to 1
# This scaling happens assuming the 255 pixel depth is uniformly populated.
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return(dataset)
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return(dataset_names)
train_datasets = maybe_pickle(train_folders, 50000)
test_datasets = maybe_pickle(test_folders, 1800)
## Open a random image from the pickled files.
def show_rnd_pkl_image():
Function that shows a random pickled image.
# First let's create a list of all the files.
# Create data directory path
dpath = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
dpath = os.path.join(dpath, 'data')
# notMNIST_small directory:
dsmall = os.path.join(dpath, 'notMNIST_small')
# notMNIST_large directory:
dlarge = os.path.join(dpath, 'notMNIST_large')
# Find all pickle files in each directory.
# http://stackoverflow.com/a/3215392
# Create a list of all nonMNIST_small pickles
lsmall = glob.glob(dsmall + '/*.pickle')
# Create a list of all nonMNIST_large pickles
llarge = glob.glob(dlarge + '/*.pickle')
# Pick a random pickle to load (either large or small !)
rpklfile = random.choice([lsmall, llarge])
rpklfile = random.choice(rpklfile)
# verify randomness
print(rpklfile)
with open(rpklfile, 'rb') as rf:
imgPkl = pickle.load(rf)
plt.imshow(random.choice(list(imgPkl)))
show_rnd_pkl_image()
def disp_number_images(data_folders):
for folder in data_folders:
pickle_filename = folder + '.pickle'
try:
with open(pickle_filename, 'rb') as f:
dataset = pickle.load(f)
except Exception as e:
print('Unable to read data from', pickle_filename, ':', e)
return
print('Number of images in ', folder, ' : ', len(dataset))
disp_number_images(train_folders)
disp_number_images(test_folders)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 500000
valid_size = 29000
test_size = 18000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return (shuffled_dataset, shuffled_labels)
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
pretty_labels = {0: 'A', 1: 'B', 2: 'C', 3: 'D', 4: 'E', 5: 'F', 6: 'G', 7: 'H', 8: 'I', 9: 'J'}
def disp_sample_dataset(dataset, labels):
items = random.sample(range(len(labels)), 8)
for i, item in enumerate(items):
plt.subplot(2, 4, i+1)
plt.axis('off')
plt.title(pretty_labels[labels[item]])
plt.imshow(dataset[item])
disp_sample_dataset(train_dataset, train_labels)
disp_sample_dataset(train_dataset, train_labels)
# Create data directory path
dpath = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
dpath = os.path.join(dpath, 'data')
# create pickle data file path
pickle_file = os.path.join(dpath,'notMNIST.pickle')
# save data if they aren't already saved or forced.
def maybe_save_data(filepath, force=False):
# Download file if needed
if force or not os.path.exists(filepath):
print('Attempting to save data at:\n', filepath)
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
print('Data saved.')
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
else:
print('Data has been processed and saved in previous run.')
# Note: Previous run reshuffling will likely be different
# from current run!
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
maybe_save_data(pickle_file)
def display_overlap(overlap, source_dataset, target_dataset):
item = random.choice(list(overlap.keys()))
imgs = np.concatenate(([source_dataset[item]], target_dataset[overlap[item][0:7]]))
plt.suptitle(item)
for i, img in enumerate(imgs):
plt.subplot(2, 4, i+1)
plt.axis('off')
plt.imshow(img)
def extract_overlap(dataset_1, dataset_2):
overlap = {}
for i, img_1 in enumerate(dataset_1):
for j, img_2 in enumerate(dataset_2):
if np.array_equal(img_1, img_2):
if not i in overlap.keys():
overlap[i] = []
overlap[i].append(j)
return overlap
%time overlap_test_train = extract_overlap(test_dataset[:200], train_dataset)
print('Number of overlaps:', len(overlap_test_train.keys()))
display_overlap(overlap_test_train, test_dataset[:200], train_dataset)
def tryLogRegr(sample_size):
Arguments:
sample_size: Integer to determine sample size
regr = LogisticRegression()
X_test = test_dataset.reshape(test_dataset.shape[0], 28 * 28)
y_test = test_labels
X_train = train_dataset[:sample_size].reshape(sample_size, 784)
y_train = train_labels[:sample_size]
%time regr.fit(X_train, y_train)
rscore = regr.score(X_test, y_test)
print("Mean acccuracy of the linear regression model is: {}"
.format(rscore))
pred_labels = regr.predict(X_test)
disp_sample_dataset(test_dataset, pred_labels)
tryLogRegr(50)
tryLogRegr(100)
tryLogRegr(1000)
tryLogRegr(5000)
def tryLogRegrAll():
Function to perform Logistic Regression on all our dataset.
# sag solver works better for bigger datasets
# n_jobs = -2 automatically selects (max - 1) available cores!
# using -1
regr = LogisticRegression(solver='sag', n_jobs = -1)
X_test = test_dataset.reshape(test_dataset.shape[0], 28 * 28)
y_test = test_labels
X_train = train_dataset.reshape(train_dataset.shape[0], 784)
y_train = train_labels
%time regr.fit(X_train, y_train)
rscore = regr.score(X_test, y_test)
print("Mean acccuracy of the linear regression model is: {}"
.format(rscore))
pred_labels = regr.predict(X_test)
disp_sample_dataset(test_dataset, pred_labels)
tryLogRegrAll()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Shor's algorithm
Step6: Order finding
Step8: For example, the multiplicative group modulo $n = 15$ is shown below.
Step11: One can check that this set of elements indeed forms a group (under ordinary multiplication).
Step13: An example of computing $r$ for a given $x \in \mathbb{Z}_n$ and given $n$ is shown in the code block below.
Step16: The quantum part of Shor's algorithm is order finding, but done via a quantum circuit, which we'll discuss below.
Step18: Now that we have the operation defined, we can use it in a circuit. The cell below creates two qubit registers, then sets the first register to be $|10\rangle$ (in binary) and the second register to be $|01\rangle$ (in binary) via $X$ gates. Then, we use the Adder operation, then measure all the qubits.
Step20: In the output of this code block, we first see the circuit which shows the initial $X$ gates, the Adder operation, then the final measurements. Next, we see the measurement outcomes which are all the bitstring $1011$ as expected.
Step23: We can understand this unitary as follows. The $i$th column of the unitary is the state $|i + 1 \text{ mod } 4\rangle$. For example, if we look at the $0$th column of the unitary, we see the state $|i + 1 \text{ mod } 4\rangle = |0 + 1 \text{ mod } 4\rangle = |1\rangle$. If we look at the $1$st column of the unitary, we see the state $|i + 1 \text{ mod } 4\rangle = |1 + 1 \text{ mod } 4\rangle = |2\rangle$. Similarly for the last two columns.
Step25: In the apply method, we see that we evaluate (target * base**exponent) % modulus. The target and the exponent depend on the values of the respective qubit registers, and the base and modulus are constant -- namely, the modulus is $n$ and the base is some $x \in \mathbb{Z}_n$.
Step27: As with the simple adder operation, this modular exponential operation has a unitary which we can display (memory permitting) as follows.
Step30: Using the modular exponentional operation in a circuit
Step32: Using this function, we can visualize the circuit for a given $x$ and $n$ as follows.
Step34: As previously described, we put the exponent register into an equal superposition via Hadamard gates. The $X$ gate on the last qubit in the target register is used for phase kickback. The modular exponential operation performs the sequence of controlled unitaries in phase estimation, then we apply the inverse quantum Fourier transform to the exponent register and measure to read out the result.
Step36: We interpret each measured bitstring as an integer, but what do these integers tell us? In the next section we look at how to classically post-process to interpret them.
Step38: The next code block shows an example of creating an order finding circuit, executing it, then using the classical postprocessing function to determine the order. Recall that the quantum part of the algorithm succeeds with some probability. If the order is None, try re-running the cell a few times.
Step40: You should see that the order of $x = 5$ in $\mathbb{Z}_6$ is $r = 2$. Indeed, $5^2 \text{ mod } 6 = 25 \text{ mod } 6 = 1$.
Step44: This completes our quantum implementation of an order finder, and the quantum part of Shor's algorithm.
Step47: The function find_factor uses the quantum_order_finder by default, in which case it is executing Shor's algorithm. As previously mentioned, due to the large memory requirements for classically simulating this circuit, we cannot run Shor's algorithm for $n \ge 15$. However, we can use the classical order finder as a substitute.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Install Cirq.
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
Imports for the notebook.
import fractions
import math
import random
import numpy as np
import sympy
from typing import Callable, List, Optional, Sequence, Union
import cirq
Function to compute the elements of Z_n.
def multiplicative_group(n: int) -> List[int]:
Returns the multiplicative group modulo n.
Args:
n: Modulus of the multiplicative group.
assert n > 1
group = [1]
for x in range(2, n):
if math.gcd(x, n) == 1:
group.append(x)
return group
Example of a multiplicative group.
n = 15
print(f"The multiplicative group modulo n = {n} is:")
print(multiplicative_group(n))
Function for classically computing the order of an element of Z_n.
def classical_order_finder(x: int, n: int) -> Optional[int]:
Computes smallest positive r such that x**r mod n == 1.
Args:
x: Integer whose order is to be computed, must be greater than one
and belong to the multiplicative group of integers modulo n (which
consists of positive integers relatively prime to n),
n: Modulus of the multiplicative group.
Returns:
Smallest positive integer r such that x**r == 1 mod n.
Always succeeds (and hence never returns None).
Raises:
ValueError when x is 1 or not an element of the multiplicative
group of integers modulo n.
# Make sure x is both valid and in Z_n.
if x < 2 or x >= n or math.gcd(x, n) > 1:
raise ValueError(f"Invalid x={x} for modulus n={n}.")
# Determine the order.
r, y = 1, x
while y != 1:
y = (x * y) % n
r += 1
return r
Example of (classically) computing the order of an element.
n = 15 # The multiplicative group is [1, 2, 4, 7, 8, 11, 13, 14].
x = 8
r = classical_order_finder(x, n)
# Check that the order is indeed correct.
print(f"x^r mod n = {x}^{r} mod {n} = {x**r % n}")
Example of defining an arithmetic (quantum) operation in Cirq.
class Adder(cirq.ArithmeticOperation):
Quantum addition.
def __init__(self, target_register, input_register):
self.input_register = input_register
self.target_register = target_register
def registers(self):
return self.target_register, self.input_register
def with_registers(self, *new_registers):
return Adder(*new_registers)
def apply(self, target_value, input_value):
return target_value + input_value
Example of using an Adder in a circuit.
# Two qubit registers.
qreg1 = cirq.LineQubit.range(2)
qreg2 = cirq.LineQubit.range(2, 4)
# Define the circuit.
circ = cirq.Circuit(
cirq.ops.X.on(qreg1[0]),
cirq.ops.X.on(qreg2[1]),
Adder(input_register=qreg1, target_register=qreg2),
cirq.measure_each(*qreg1),
cirq.measure_each(*qreg2)
)
# Display it.
print("Circuit:\n")
print(circ)
# Print the measurement outcomes.
print("\n\nMeasurement outcomes:\n")
print(cirq.sample(circ, repetitions=5).data)
Example of the unitary of an Adder operation.
cirq.unitary(
Adder(target_register=cirq.LineQubit.range(2),
input_register=1)
).real
Defines the modular exponential operation used in Shor's algorithm.
class ModularExp(cirq.ArithmeticOperation):
Quantum modular exponentiation.
This class represents the unitary which multiplies base raised to exponent
into the target modulo the given modulus. More precisely, it represents the
unitary V which computes modular exponentiation x**e mod n:
V|y⟩|e⟩ = |y * x**e mod n⟩ |e⟩ 0 <= y < n
V|y⟩|e⟩ = |y⟩ |e⟩ n <= y
where y is the target register, e is the exponent register, x is the base
and n is the modulus. Consequently,
V|y⟩|e⟩ = (U**e|y)|e⟩
where U is the unitary defined as
U|y⟩ = |y * x mod n⟩ 0 <= y < n
U|y⟩ = |y⟩ n <= y
def __init__(
self,
target: Sequence[cirq.Qid],
exponent: Union[int, Sequence[cirq.Qid]],
base: int,
modulus: int
) -> None:
if len(target) < modulus.bit_length():
raise ValueError(f'Register with {len(target)} qubits is too small '
f'for modulus {modulus}')
self.target = target
self.exponent = exponent
self.base = base
self.modulus = modulus
def registers(self) -> Sequence[Union[int, Sequence[cirq.Qid]]]:
return self.target, self.exponent, self.base, self.modulus
def with_registers(
self,
*new_registers: Union[int, Sequence['cirq.Qid']],
) -> cirq.ArithmeticOperation:
if len(new_registers) != 4:
raise ValueError(f'Expected 4 registers (target, exponent, base, '
f'modulus), but got {len(new_registers)}')
target, exponent, base, modulus = new_registers
if not isinstance(target, Sequence):
raise ValueError(
f'Target must be a qubit register, got {type(target)}')
if not isinstance(base, int):
raise ValueError(
f'Base must be a classical constant, got {type(base)}')
if not isinstance(modulus, int):
raise ValueError(
f'Modulus must be a classical constant, got {type(modulus)}')
return ModularExp(target, exponent, base, modulus)
def apply(self, *register_values: int) -> int:
assert len(register_values) == 4
target, exponent, base, modulus = register_values
if target >= modulus:
return target
return (target * base**exponent) % modulus
def _circuit_diagram_info_(
self,
args: cirq.CircuitDiagramInfoArgs,
) -> cirq.CircuitDiagramInfo:
assert args.known_qubits is not None
wire_symbols: List[str] = []
t, e = 0, 0
for qubit in args.known_qubits:
if qubit in self.target:
if t == 0:
if isinstance(self.exponent, Sequence):
e_str = 'e'
else:
e_str = str(self.exponent)
wire_symbols.append(
f'ModularExp(t*{self.base}**{e_str} % {self.modulus})')
else:
wire_symbols.append('t' + str(t))
t += 1
if isinstance(self.exponent, Sequence) and qubit in self.exponent:
wire_symbols.append('e' + str(e))
e += 1
return cirq.CircuitDiagramInfo(wire_symbols=tuple(wire_symbols))
Create the target and exponent registers for phase estimation,
and see the number of qubits needed for Shor's algorithm.
n = 15
L = n.bit_length()
# The target register has L qubits.
target = cirq.LineQubit.range(L)
# The exponent register has 2L + 3 qubits.
exponent = cirq.LineQubit.range(L, 3 * L + 3)
# Display the total number of qubits to factor this n.
print(f"To factor n = {n} which has L = {L} bits, we need 3L + 3 = {3 * L + 3} qubits.")
See (part of) the unitary for a modular exponential operation.
# Pick some element of the multiplicative group modulo n.
x = 5
# Display (part of) the unitary. Uncomment if n is small enough.
# cirq.unitary(ModularExp(target, exponent, x, n))
Function to make the quantum circuit for order finding.
def make_order_finding_circuit(x: int, n: int) -> cirq.Circuit:
Returns quantum circuit which computes the order of x modulo n.
The circuit uses Quantum Phase Estimation to compute an eigenvalue of
the unitary
U|y⟩ = |y * x mod n⟩ 0 <= y < n
U|y⟩ = |y⟩ n <= y
Args:
x: positive integer whose order modulo n is to be found
n: modulus relative to which the order of x is to be found
Returns:
Quantum circuit for finding the order of x modulo n
L = n.bit_length()
target = cirq.LineQubit.range(L)
exponent = cirq.LineQubit.range(L, 3 * L + 3)
return cirq.Circuit(
cirq.X(target[L - 1]),
cirq.H.on_each(*exponent),
ModularExp(target, exponent, x, n),
cirq.qft(*exponent, inverse=True),
cirq.measure(*exponent, key='exponent'),
)
Example of the quantum circuit for period finding.
n = 15
x = 7
circuit = make_order_finding_circuit(x, n)
print(circuit)
Measuring Shor's period finding circuit.
circuit = make_order_finding_circuit(x=5, n=6)
res = cirq.sample(circuit, repetitions=8)
print("Raw measurements:")
print(res)
print("\nInteger in exponent register:")
print(res.data)
def process_measurement(result: cirq.Result, x: int, n: int) -> Optional[int]:
Interprets the output of the order finding circuit.
Specifically, it determines s/r such that exp(2πis/r) is an eigenvalue
of the unitary
U|y⟩ = |xy mod n⟩ 0 <= y < n
U|y⟩ = |y⟩ n <= y
then computes r (by continued fractions) if possible, and returns it.
Args:
result: result obtained by sampling the output of the
circuit built by make_order_finding_circuit
Returns:
r, the order of x modulo n or None.
# Read the output integer of the exponent register.
exponent_as_integer = result.data["exponent"][0]
exponent_num_bits = result.measurements["exponent"].shape[1]
eigenphase = float(exponent_as_integer / 2**exponent_num_bits)
# Run the continued fractions algorithm to determine f = s / r.
f = fractions.Fraction.from_float(eigenphase).limit_denominator(n)
# If the numerator is zero, the order finder failed.
if f.numerator == 0:
return None
# Else, return the denominator if it is valid.
r = f.denominator
if x**r % n != 1:
return None
return r
Example of the classical post-processing.
# Set n and x here
n = 6
x = 5
print(f"Finding the order of x = {x} modulo n = {n}\n")
measurement = cirq.sample(circuit, repetitions=1)
print("Raw measurements:")
print(measurement)
print("\nInteger in exponent register:")
print(measurement.data)
r = process_measurement(measurement, x, n)
print("\nOrder r =", r)
if r is not None:
print(f"x^r mod n = {x}^{r} mod {n} = {x**r % n}")
def quantum_order_finder(x: int, n: int) -> Optional[int]:
Computes smallest positive r such that x**r mod n == 1.
Args:
x: integer whose order is to be computed, must be greater than one
and belong to the multiplicative group of integers modulo n (which
consists of positive integers relatively prime to n),
n: modulus of the multiplicative group.
# Check that the integer x is a valid element of the multiplicative group
# modulo n.
if x < 2 or n <= x or math.gcd(x, n) > 1:
raise ValueError(f'Invalid x={x} for modulus n={n}.')
# Create the order finding circuit.
circuit = make_order_finding_circuit(x, n)
# Sample from the order finding circuit.
measurement = cirq.sample(circuit)
# Return the processed measurement result.
return process_measurement(measurement, x, n)
Functions for factoring from start to finish.
def find_factor_of_prime_power(n: int) -> Optional[int]:
Returns non-trivial factor of n if n is a prime power, else None.
for k in range(2, math.floor(math.log2(n)) + 1):
c = math.pow(n, 1 / k)
c1 = math.floor(c)
if c1**k == n:
return c1
c2 = math.ceil(c)
if c2**k == n:
return c2
return None
def find_factor(
n: int,
order_finder: Callable[[int, int], Optional[int]] = quantum_order_finder,
max_attempts: int = 30
) -> Optional[int]:
Returns a non-trivial factor of composite integer n.
Args:
n: Integer to factor.
order_finder: Function for finding the order of elements of the
multiplicative group of integers modulo n.
max_attempts: number of random x's to try, also an upper limit
on the number of order_finder invocations.
Returns:
Non-trivial factor of n or None if no such factor was found.
Factor k of n is trivial if it is 1 or n.
# If the number is prime, there are no non-trivial factors.
if sympy.isprime(n):
print("n is prime!")
return None
# If the number is even, two is a non-trivial factor.
if n % 2 == 0:
return 2
# If n is a prime power, we can find a non-trivial factor efficiently.
c = find_factor_of_prime_power(n)
if c is not None:
return c
for _ in range(max_attempts):
# Choose a random number between 2 and n - 1.
x = random.randint(2, n - 1)
# Most likely x and n will be relatively prime.
c = math.gcd(x, n)
# If x and n are not relatively prime, we got lucky and found
# a non-trivial factor.
if 1 < c < n:
return c
# Compute the order r of x modulo n using the order finder.
r = order_finder(x, n)
# If the order finder failed, try again.
if r is None:
continue
# If the order r is even, try again.
if r % 2 != 0:
continue
# Compute the non-trivial factor.
y = x**(r // 2) % n
assert 1 < y < n
c = math.gcd(y - 1, n)
if 1 < c < n:
return c
print(f"Failed to find a non-trivial factor in {max_attempts} attempts.")
return None
Example of factoring via Shor's algorithm (order finding).
# Number to factor
n = 184573
# Attempt to find a factor
p = find_factor(n, order_finder=classical_order_finder)
q = n // p
print("Factoring n = pq =", n)
print("p =", p)
print("q =", q)
Check the answer is correct.
p * q == n
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step2: <br>
Step3: <br>
Step4: <br>
Step5: <br>
Step6: <br>
Step7: <br>
Step8: <br>
Step9: <br>
Step10: <br>
Step11: <br>
Step12: <br>
Step13: <br>
Step14: Now that we have the response object from the deployed model, we can inspect its predictions (i.e., the
|
<ASSISTANT_TASK:>
Python Code:
pip freeze | grep google-cloud-automl==1.0.1 || pip install google-cloud-automl==1.0.1
pip freeze | grep google-cloud-storage==1.27.0 || pip install google-cloud-storage==1.27.0
import os
from google.cloud import automl
import tensorflow as tf
PROJECT_ID = "YOUR_PROJECT_ID" # Replace with your PROJECT ID
SERVICE_ACCOUNT = "YOUR_SERVICE_ACCOUNT_NAME" # Replace with a name of your choice
ZONE = "us-central1"# Make sure the zone is set to "us-central1"
PWD = os.path.abspath(os.path.curdir)
SERVICE_KEY_PATH = os.path.join(PWD, "{0}.json".format(SERVICE_ACCOUNT))
SERVICE_ACCOUNT_EMAIL="{0}@{1}.iam.gserviceaccount.com".format(SERVICE_ACCOUNT, PROJECT_ID)
print(SERVICE_ACCOUNT_EMAIL)
print(PROJECT_ID)
# Exporting the variables into the environment to make them available to all the subsequent cells
os.environ["PROJECT_ID"] = PROJECT_ID
os.environ["SERVICE_ACCOUNT"] = SERVICE_ACCOUNT
os.environ["SERVICE_KEY_PATH"] = SERVICE_KEY_PATH
os.environ["SERVICE_ACCOUNT_EMAIL"] = SERVICE_ACCOUNT_EMAIL
os.environ["ZONE"] = ZONE
%%bash
gcloud config set project $PROJECT_ID
gcloud config set compute/region $ZONE
%%bash
gcloud iam service-accounts list | grep $SERVICE_ACCOUNT ||
gcloud iam service-accounts create $SERVICE_ACCOUNT
%%bash
test -f $SERVICE_KEY_PATH ||
gcloud iam service-accounts keys create $SERVICE_KEY_PATH \
--iam-account $SERVICE_ACCOUNT_EMAIL
echo "Service key: $(ls $SERVICE_KEY_PATH)"
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = SERVICE_KEY_PATH
%%bash
gcloud projects add-iam-policy-binding $PROJECT_ID \
--member "serviceAccount:$SERVICE_ACCOUNT_EMAIL" \
--role "roles/automl.admin" \
--role "roles/storage.admin"
DATASET_NAME = "salad_dataset" # Replace with desired dataset name
client = automl.AutoMlClient()
# A resource that represents Google Cloud Platform location.
project_location = client.location_path(PROJECT_ID, ZONE)
metadata = automl.types.ImageObjectDetectionDatasetMetadata()
dataset = automl.types.Dataset(
display_name=display_name,
image_object_detection_dataset_metadata=metadata,
)
# Create a dataset with the dataset metadata in the region.
response = client.create_dataset(project_location, dataset)
created_dataset = response.result()
# Display the dataset information
print("Dataset name: {}".format(created_dataset.name))
print("Dataset id: {}".format(created_dataset.name.split("/")[-1]))
DATASET_ID = format(created_dataset.name.split("/")[-1])
DATASET_URI = "gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv"
# Get the full path of the dataset.
dataset_full_id = client.dataset_path(
PROJECT_ID, ZONE, DATASET_ID
)
# Get the multiple Google Cloud Storage URIs
input_uris = path.split(",")
gcs_source = automl.types.GcsSource(input_uris=input_uris)
input_config = automl.types.InputConfig(gcs_source=gcs_source)
# Import data from the input URI
response = client.import_data(dataset_full_id, input_config)
print("Processing import...")
print("Data imported. {}".format(response.result()))
MODEL_NAME = "salads" # Replace with desired model name
# A resource that represents Google Cloud Platform location.
project_location = client.location_path(PROJECT_ID, ZONE)
# Leave model unset to use the default base model provided by Google
# train_budget_milli_node_hours: The actual train_cost will be equal or
# less than this value.
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#imageobjectdetectionmodelmetadata
training_metadata = automl.types.ImageObjectDetectionModelMetadata(
train_budget_milli_node_hours=24000
)
model = automl.types.Model(
display_name=display_name,
dataset_id=dataset_id,
image_object_detection_model_metadata=metadata,
)
# Create a model with the model metadata in the region.
training_results = client.create_model(project_location, model)
print("Training operation name: {}".format(response.operation.name))
print("Training started...")
MODEL_ID = format(model.name.split("/")[-1])
# Get the full path of the model.
model_full_id = client.model_path(PROJECT_ID, ZONE, MODEL_ID)
model = client.get_model(model_full_id)
# Retrieve deployment state.
if model.deployment_state == automl.enums.Model.DeploymentState.DEPLOYED:
deployment_state = "deployed"
else:
deployment_state = "undeployed"
# Display the model information.
print("Model name: {}".format(model.name))
print("Model id: {}".format(model.name.split("/")[-1]))
print("Model display name: {}".format(model.display_name))
print("Model create time:")
print("\tseconds: {}".format(model.create_time.seconds))
print("\tnanos: {}".format(model.create_time.nanos))
print("Model deployment state: {}".format(deployment_state))
print("List of model evaluations:")
for evaluation in client.list_model_evaluations(model_full_id, ""):
print("Model evaluation name: {}".format(evaluation.name))
print(
"Model annotation spec id: {}".format(
evaluation.annotation_spec_id
)
)
print("Create Time:")
print("\tseconds: {}".format(evaluation.create_time.seconds))
print("\tnanos: {}".format(evaluation.create_time.nanos / 1e9))
print(
"Evaluation example count: {}".format(
evaluation.evaluated_example_count
)
)
print(
"Object detection model evaluation metrics: {}\n\n".format(
evaluation.image_object_detection_evaluation_metrics
)
)
response = client.deploy_model(model_full_id)
print("Model deployment finished. {}".format(response.result()))
TEST_IMAGE_PATH = "gs://your-bucket-name-vcm/your-folder-name/your-image.jpg" # Replace with a Cloud storage bucket uploaded image of your choice
prediction_client = automl.PredictionServiceClient()
# Read the file.
with tf.io.gfile.GFile(TEST_IMAGE_PATH, "rb") as content_file:
content = content_file.read()
image = automl.types.Image(image_bytes=content)
payload = automl.types.ExamplePayload(image=image)
# params is additional domain-specific parameters.
# score_threshold is used to filter the result
# https://cloud.google.com/automl/docs/reference/rpc/google.cloud.automl.v1#predictrequest
params = {"score_threshold": "0.8"}
response = prediction_client.predict(model_full_id, payload, params)
print("Prediction results:")
for result in response.payload:
print("Predicted class name: {}".format(result.display_name))
print(
"Predicted class score: {}".format(
result.image_object_detection.score
)
)
bounding_box = result.image_object_detection.bounding_box
print("Normalized Vertices:")
for vertex in bounding_box.normalized_vertices:
print("\tX: {}, Y: {}".format(vertex.x, vertex.y))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I want to import Vgg16 as well because I'll want it's low-level features
Step2: Actually, looks like Vgg's ImageNet weights won't be needed.
Step3: II. Load Data
Step4: III. Preprocessing
Step5: One-Hot Encoding the outputs
Step6: Since this notebook's models are all mimicking Vgg16, the input data should be preprocessed in the same way
Step7: Create Data Batch Generator
Step8: General workflow, going forward
Step9: 2. Single Dense Layer
Step10: With an accuracy of 0.9823 and validation accuracy of 0.9664, the model's starting to overfit significantly and hit its limits, so it's time to go on to the next technique.
Step11: 4. Data Augmentation
Step12: 5. Batch Normalization + Data Augmentation
Step13: 6. Dropout + Batch Normalization + Data Augmentation
Step14: 7. Ensembling
Step15: I finally got my GPU running on my workstation. Decided to leave the ghost of Bill Gates alone and put Ubuntu Linux on the second harddrive. This nvidia GTX 870M takes 17 seconds to get through the 60,000 images. The Core i5 on my Mac took an average of 340. A 20x speed up. This also means, at those numbers, a 6-strong ensemble running the regime in train_model() will take about 49 minutes and 18 seconds, instead of 16 hours and 26 minutes. You can see what the motivation was, for me to spend ~9 hours today and get the GPU working. It's a warm feeling, knowing your computer isn't just good for playing DOOM, but'll be doing its share of work real soon.
Step16: Save the models' weights -- bc this wasn't computationally cheap
Step17: Create an array of predictions from the models on the test-set. I'm using a batch size of 256 because that's what was done in lecture, and prediction is such an easier task that I think the large size just helps things go faster.
Step18: Finally, take the average of the predictions
Step19: Boom. 0.99699.. ~ 99.7% accuracy. Same as achieved in lecture; took roughly 50 minutes to train. Unfortunately I didn't have the h5py module installed when I ran this, so the weight's can't be saved easily -- simple fix of rerunning after install.
|
<ASSISTANT_TASK:>
Python Code:
import keras
import numpy as np
from keras.datasets import mnist
from keras.optimizers import Adam
from keras.models import Sequential
from keras.preprocessing import image
from keras.layers.core import Dense
from keras.layers.core import Lambda
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.pooling import MaxPooling2D
from keras.layers.convolutional import Convolution2D
from keras.layers.normalization import BatchNormalization
from keras.utils.np_utils import to_categorical
# import os, sys
# sys.path.insert(1, os.path.join('../utils/'))
# from vgg16 import Vgg16
# vgg = Vgg16()
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.expand_dims(x_train, 1) # can also enter <axis=1> for <1>
x_test = np.expand_dims(x_test, 1)
x_train.shape
y_train, y_test = to_categorical(y_train), to_categorical(y_test)
x_mean = x_train.mean().astype(np.float32)
x_stdv = x_train.std().astype(np.float32)
def norm_input(x): return (x - x_mean) / x_stdv
gen = image.ImageDataGenerator()
trn_batches = gen.flow(x_train, y_train, batch_size=64)
tst_batches = gen.flow(x_test, y_test, batch_size=64)
def LinModel():
model = Sequential([
Lambda(norm_input, input_shape=(1, 28, 28)),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
Linear_model = LinModel()
Linear_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=tst_batches, nb_val_samples=trn_batches.n)
Linear_model.optimizer.lr=0.1
Linear_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
Linear_model.optimizer.lr=0.01
Linear_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
Linear_model.optimizer.lr=0.001
Linear_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=8,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
def FCModel():
model = Sequential([
Lambda(norm_input, input_shape=(1, 28, 28)),
Dense(512, activation='relu'),
Flatten(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
FC_model = FCModel()
FC_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
FC_model.optimizer=0.1
FC_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
FC_model.optimizer=0.01
FC_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
def ConvModel():
model = Sequential([
Lambda(norm_input, input_shape=(1, 28, 28), output_shape=(1, 28, 28)),
Convolution2D(32, 3, 3, activation='relu'),
Convolution2D(32, 3, 3, activation='relu'),
MaxPooling2D(),
Convolution2D(64, 3, 3, activation='relu'),
Convolution2D(64, 3, 3, activation='relu'),
MaxPooling2D(),
Flatten(),
Dense(512, activation='relu'),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
CNN_model = ConvModel()
CNN_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
CNN_model.optimizer=0.1
CNN_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
CNN_model.optimizer=0.01
CNN_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# Running again until validation accuracy stops increasing
CNN_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
gen = image.ImageDataGenerator(rotation_range=8, width_shift_range=0.08, shear_range=0.3,
height_shift_range=0.08, zoom_range=0.08)
trn_batches = gen.flow(x_train, y_train, batch_size=64)
tst_batches = gen.flow(x_test, y_test, batch_size=64)
CNN_Aug_model = ConvModel()
CNN_Aug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# upping LR
print("Learning Rate, η = 0.1")
CNN_Aug_model.optimizer.lr=0.1
CNN_Aug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# brining LR back down for more epochs
print("Learning Rate, η = 0.01")
CNN_Aug_model.optimizer.lr=0.01
CNN_Aug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# 4 more epochs at η=0.01
CNN_Aug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
def ConvModelBN():
model = Sequential([
Lambda(norm_input, input_shape=(1, 28, 28), output_shape=(1, 28, 28)),
Convolution2D(32, 3, 3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(32, 3, 3, activation='relu'),
MaxPooling2D(),
BatchNormalization(axis=1),
Convolution2D(64, 3, 3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(64, 3, 3, activation='relu'),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation='relu'),
BatchNormalization(),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
CNN_BNAug_model = ConvModelBN()
CNN_BNAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
print("Learning Rate, η = 0.1")
CNN_BNAug_model.optimizer=0.1
CNN_BNAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=2, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
print("Learning Rate, η = 0.01")
CNN_BNAug_model.optimizer=0.01
CNN_BNAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=6, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# some more training at 0.1 and 0.01:
print("Learning Rate, η = 0.1")
CNN_BNAug_model.optimizer=0.1
CNN_BNAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
print("Learning Rate, η = 0.01")
CNN_BNAug_model.optimizer=0.01
CNN_BNAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=6, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
def ConvModelBNDo():
model = Sequential([
Lambda(norm_input, input_shape=(1, 28, 28), output_shape=(1, 28, 28)),
Convolution2D(32, 3, 3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(32, 3, 3, activation='relu'),
MaxPooling2D(),
BatchNormalization(axis=1),
Convolution2D(64, 3, 3, activation='relu'),
BatchNormalization(axis=1),
Convolution2D(64, 3, 3, activation='relu'),
MaxPooling2D(),
Flatten(),
BatchNormalization(),
Dense(512, activation='relu'),
BatchNormalization(),
Dropout(0.5),
Dense(10, activation='softmax')
])
model.compile(Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
return model
CNN_BNDoAug_model = ConvModelBNDo()
CNN_BNDoAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
print("Learning Rate, η = 0.1")
CNN_BNDoAug_model.optimizer.lr=0.1
CNN_BNDoAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=4, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
print("Learning Rate, η = 0.01")
CNN_BNDoAug_model.optimizer.lr=0.01
CNN_BNDoAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=6, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# 6 more epochs at 0.01
CNN_BNDoAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=6, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
print("Learning Rate η = 0.001")
CNN_BNDoAug_model.optimizer.lr=0.001
CNN_BNDoAug_model.fit_generator(trn_batches, trn_batches.n, nb_epoch=12, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# I'll set it to display progress at the start of each LR-change
def train_model():
model = ConvModelBNDo()
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
model.optimizer.lr=0.1
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=3, verbose=0,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
model.optimizer.lr=0.01
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=11, verbose=0,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
model.optimizer.lr=0.001
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
model.fit_generator(trn_batches, trn_batches.n, nb_epoch=11, verbose=0,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
return model
# Running a little test on the GPU now
testmodel = ConvModelBNDo()
testmodel.fit_generator(trn_batches, trn_batches.n, nb_epoch=1, verbose=1,
validation_data=tst_batches, nb_val_samples=tst_batches.n)
# this'll take some time
models = [train_model() for m in xrange(6)]
from os import getcwd
path = getcwd() + 'data/mnist/'
model_path = path + 'models/'
for i,m in enumerate(models):
m.save_weights(model_path + 'MNIST_CNN' + str(i) + '.pkl')
ensemble_preds = np.stack([m.predict(x_test, batch_size=256) for m in models])
avg_preds = ensemble_preds.mean(axis=0)
keras.metrics.categorical_accuracy(y_test, avg_preds).eval()
# this'll take some time
models = [train_model() for m in xrange(6)]
from os import getcwd
import os
path = getcwd() + '/data/mnist/'
model_path = path + 'models/'
if not os.path.exists(path):
os.mkdir('data')
os.mkdir('data/mnist')
if not os.path.exists(model_path): os.mkdir(model_path)
for i,m in enumerate(models):
m.save_weights(model_path + 'MNIST_CNN' + str(i) + '.pkl')
ensemble_preds = np.stack([m.predict(x_test, batch_size=256) for m in models])
avg_preds = ensemble_preds.mean(axis=0)
keras.metrics.categorical_accuracy(y_test, avg_preds).eval()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Libraries available in python
Step2: Theano
Step3: Tensorflow
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from random import randint
from pandas import Series,DataFrame
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import math
import time
%matplotlib inline
plt.rcParams['figure.figsize'] = (8, 6)
plt.rcParams['font.size'] = 14
def unpickle(file):
import cPickle
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def array_to_image(arr):
im = arr.reshape((32,32,3),order='F')/255.0
im = np.transpose(im,axes=(1,0,2))
return im
# source: http://www.cs.utoronto.ca/~kriz/cifar.html
# get the training data, batch 1 out of 6. do unzip the tar.gz first
from time import strftime
import os
if os.name == 'nt': # Windows
path = 'cifar\\data_batch_1'
else:
path = 'cifar/data_batch_1'
cifar = unpickle('cifar/data_batch_1')
# image data is uint8 array format
# 3 RGB channels and 32x32 pixels means 32x32x3 = 1024x3 = 3072 numbers in one array
# 10,000 arrays in dataset
lucky_num = randint(0,10000)
sample_arr = cifar["data"][lucky_num].copy()
print 'array_size = {}'.format(len(sample_arr))
print sample_arr
print 'file_name = {}'.format(cifar["filenames"][lucky_num])
#show image
plt.rcParams['figure.figsize'] = (4, 4)
plt.imshow(array_to_image(sample_arr))
plt.show()
# X Format is [image_index, RGB, Height, Width]
X = cifar["data"].copy()
X = X.reshape((-1,3,32,32),order='C')/255.0 - 0.5 #standardize
#set size of input, features, hidden, target
instance_size = X.shape[0]
feature_size = X.shape[1]*X.shape[2]*X.shape[3]
target_size = 10
kernel_size = (12,3,3,3)
weight_size = (10,12,16,16) #softmax layer
#make a flat 10 output with all zeros
Y = np.zeros((instance_size,10))
for j in range(0,instance_size):
Y[j][cifar['labels'][j]] = 1
#split train and test dataset
train_split = 0.8 #6000.0/instance_size
train_size = 200 #int(train_split*instance_size)
test_size = 40 #instance_size - train_size
index = np.random.permutation(instance_size)
train_ix, test_ix = index[:train_size], index[train_size:train_size+test_size]
Y_train , Y_test = Y[train_ix,:], Y[test_ix,:]
X_train , X_test = X[train_ix,:,:,:], X[test_ix,:,:,:]
import os
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32"
import theano
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# input: 32x32 images with 3 channels -> (3, 32, 32) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Convolution2D(nb_filter=12, nb_row=3, nb_col=3, border_mode='valid', \
batch_input_shape=(None, 3, 32, 32), bias=True))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
# Note: Keras does automatic shape inference.
model.add(Dense(output_dim=10, bias=True))
model.add(Activation('softmax'))
sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(X_train, Y_train, batch_size=min(100,int(0.2*train_size)), nb_epoch=10)
model.save('keras_model.hdf5')
import pydot
from keras.utils.visualize_util import plot as kr_plot
from IPython.display import Image
kr_plot(model, to_file='model.png',show_shapes=True)
Image('model.png',width=380,height=700)
y_prob = model.predict(X_test, batch_size=10)
from sklearn.metrics import confusion_matrix
def show_confusion_matrix(cm_mat):
accuracy = np.trace(cm_mat)*100.0/test_size
print 'Test set Accuracy = {:.2f}%'.format(accuracy)
df_cm = pd.DataFrame(cm_mat,index=label_textno[0:cm_mat.shape[0]],columns=label_textno[0:cm_mat.shape[1]])
plt.figure(figsize = (8,6),dpi=300)
sns.heatmap(df_cm, cbar=True ,annot=True, fmt=',.0f')
plt.title('Confusion Matrix')
plt.xlabel('Truth')
plt.ylabel('Predicted')
#get the prediction to compare with target
label_text = ["plane","car","bird","cat","deer","dog","frog","horse","ship","truck"]
label_textno = label_text
for l in range(0,len(label_text)):
label_textno[l] = str(l) + ' ' + label_text[l]
y_pred = [np.argmax(r) for r in y_prob]
y_truth = np.array(cifar['labels'])[test_ix]
cm_mat = confusion_matrix(y_truth,y_pred)
show_confusion_matrix(cm_mat)
# Note:
# if g++ is not installed, theano will use the python implementation instead of optimized C++.
# on windows, do try
import theano
import theano.tensor as T
#Theano hello world - adding 2 matrices
x = T.dmatrix('x')
y = T.dmatrix('y')
z = x + y
f = theano.function([x, y], z)
print f([[0,1],[2,3]],[[0.5,2.5],[-1.5,-2.7]])
#Theano hello world - compute any symboling expression with multiple outputs
a, b = T.dmatrices('a', 'b')
diff = a - b
abs_diff = abs(diff)
diff_squared = diff**2
f = theano.function([a, b], [diff, abs_diff, diff_squared])
print f([[1,2],[3,4]],[[5,6],[7,8]])
import tensorflow as tf
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step4: Boilerplate for graph visualization
Step5: Load the data
Step6: Create a simple classifier with low-level TF Ops
Step7: We can run this graph by feeding in batches of examples using a feed_dict. The keys of the feed_dict are placeholders we've defined previously.
Step8: No learning yet but we get the losses per batch.
Step9: Loss going down, Accuracy going up! \o/
Step10: Custom model, simplified with tf.layers
Step11: Model using canned estimators
Step12: Using Convolutions
|
<ASSISTANT_TASK:>
Python Code:
# This is for graph visualization.
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
Strip large constant values from graph_def.
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
Visualize TensorFlow graph.
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code =
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe =
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
.format(code.replace('"', '"'))
display(HTML(iframe))
DATA_DIR = 'data/'
data_filename = os.path.join(DATA_DIR, "zoo.npz")
data = np.load(open(data_filename))
train_data = data['arr_0']
train_labels = data['arr_1']
test_data = data['arr_2']
test_labels = data['arr_3']
del data
print("Data shapes: ", test_data.shape, test_labels.shape, train_data.shape, train_labels.shape)
tf.reset_default_graph()
input_dimension = train_data.shape[1] # 784 = 28*28 pixels
output_dimension = train_labels.shape[1] # 10 classes
batch_size = 32
hidden1_units = 128
data_batch = tf.placeholder("float", shape=[None, input_dimension], name="data")
label_batch = tf.placeholder("float", shape=[None, output_dimension], name="labels")
weights_1 = tf.Variable(
tf.truncated_normal(
[input_dimension, hidden1_units],
stddev=1.0 / np.sqrt(float(input_dimension))),
name='weights_1')
# Task: Add Bias to first layer
# Task: Use Cross-Entropy instead of Squared Loss
weights_2 = tf.Variable(
tf.truncated_normal(
[hidden1_units, output_dimension],
stddev=1.0 / np.sqrt(float(hidden1_units))),
name='weights_2')
wx_b = tf.matmul(data_batch, weights_1)
hidden_activations = tf.nn.relu(wx_b)
output_activations = tf.nn.tanh(tf.matmul(hidden_activations, weights_2))
with tf.name_scope("loss"):
loss = tf.nn.l2_loss(label_batch - output_activations)
show_graph(tf.get_default_graph().as_graph_def())
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
# We'll pick slices from a random permutation to randomize batches during training
random_indices = np.random.permutation(train_data.shape[0])
for i in range(1000):
batch_start_idx = (i % (train_data.shape[0] // batch_size)) * batch_size
batch_indices = random_indices[batch_start_idx:batch_start_idx + batch_size]
batch_loss = sess.run(
loss,
feed_dict = {
data_batch : train_data[batch_indices,:],
label_batch : train_labels[batch_indices,:]
})
if (i + 1) % 100 == 0:
print("Loss at iteration {}: {}".format(i+1, batch_loss))
# Task: Replace GradientDescentOptimizer with AdagradOptimizer and a 0.1 learning rate.
learning_rate = 0.005
updates = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
random_indices = np.random.permutation(train_data.shape[0])
n_epochs = 10 # how often do to go through the training data
max_steps = train_data.shape[0]*n_epochs // batch_size
for i in range(max_steps):
batch_start_idx = (i % (train_data.shape[0] // batch_size)) * batch_size
batch_indices = random_indices[batch_start_idx:batch_start_idx+batch_size]
batch_loss, _ = sess.run(
[loss, updates],
feed_dict = {
data_batch : train_data[batch_indices,:],
label_batch : train_labels[batch_indices,:]
})
if i % 200 == 0 or i == max_steps - 1:
random_indices = np.random.permutation(train_data.shape[0])
print("Batch-Loss at iteration {}: {}".format(i, batch_loss))
test_predictions = sess.run(
output_activations,
feed_dict = {
data_batch : test_data,
label_batch : test_labels
})
wins = np.argmax(test_predictions, axis=1) == np.argmax(test_labels, axis=1)
print("Accuracy on test: {}%".format(100*np.mean(wins)))
tf.reset_default_graph()
# Model parameters.
batch_size = 32
hidden1_units = 128
learning_rate = 0.005
input_dimension = train_data.shape[1] # 784 = 28*28 pixels
output_dimension = train_labels.shape[1] # 6 classes
n_epochs = 10 # how often do to go through the training data
def input_fn(data, labels):
input_images = tf.constant(data, shape=data.shape, verify_shape=True, dtype=tf.float32)
input_labels = tf.constant(labels, shape=labels.shape, verify_shape=True, dtype=tf.float32)
image, label = tf.train.slice_input_producer(
[input_images, input_labels],
num_epochs=n_epochs)
dataset_dict = dict(images=image, labels=label)
batch_dict = tf.train.batch(
dataset_dict, batch_size, allow_smaller_final_batch=True)
batch_labels = batch_dict.pop('labels')
return batch_dict, batch_labels
def model_fn(features, targets, mode, params):
# 1. Configure the model via TensorFlow operations (same as above)
weights_1 = tf.Variable(
tf.truncated_normal(
[input_dimension, hidden1_units],
stddev=1.0 / np.sqrt(float(input_dimension))))
weights_2 = tf.Variable(
tf.truncated_normal(
[hidden1_units, output_dimension],
stddev=1.0 / np.sqrt(float(hidden1_units))))
hidden_activations = tf.nn.relu(tf.matmul(features['images'], weights_1))
output_activations = tf.matmul(hidden_activations, weights_2)
# 2. Define the loss function for training/evaluation
loss = tf.reduce_mean(tf.nn.l2_loss(targets - output_activations))
# 3. Define the training operation/optimizer
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=learning_rate,
optimizer="SGD")
# 4. Generate predictions
predictions_dict = {
"classes": tf.argmax(input=output_activations, axis=1),
"probabilities": tf.nn.softmax(output_activations, name="softmax_tensor"),
"logits": output_activations,
}
# Optional: Define eval metric ops; here we add an accuracy metric.
is_correct = tf.equal(tf.argmax(input=targets, axis=1),
tf.argmax(input=output_activations, axis=1))
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
eval_metric_ops = { "accuracy": accuracy}
# 5. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
return tf.contrib.learn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
custom_model = tf.contrib.learn.Estimator(model_fn=model_fn)
# Train and evaluate the model.
def evaluate_model(model, input_fn):
for i in range(6):
max_steps = train_data.shape[0]*n_epochs // batch_size
model.fit(input_fn=lambda: input_fn(train_data, train_labels), steps=max_steps)
print(model.evaluate(input_fn=lambda: input_fn(test_data, test_labels),
steps=150))
evaluate_model(custom_model, input_fn)
tf.reset_default_graph()
# Model parameters.
batch_size = 32
hidden1_units = 128
learning_rate = 0.005
input_dimension = train_data.shape[1] # 784 = 28*28 pixels
output_dimension = train_labels.shape[1] # 6 classes
def layers_custom_model_fn(features, targets, mode, params):
# 1. Configure the model via TensorFlow operations (using tf.layers). Note how
# much simpler this is compared to defining the weight matrices and matrix
# multiplications by hand.
hidden_layer = tf.layers.dense(inputs=features['images'], units=hidden1_units, activation=tf.nn.relu)
output_layer = tf.layers.dense(inputs=hidden_layer, units=output_dimension, activation=tf.nn.relu)
# 2. Define the loss function for training/evaluation
loss = tf.losses.mean_squared_error(labels=targets, predictions=output_layer)
# 3. Define the training operation/optimizer
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=learning_rate,
optimizer="SGD")
# 4. Generate predictions
predictions_dict = {
"classes": tf.argmax(input=output_layer, axis=1),
"probabilities": tf.nn.softmax(output_layer, name="softmax_tensor"),
"logits": output_layer,
}
# Define eval metric ops; we can also use a pre-defined function here.
accuracy = tf.metrics.accuracy(
labels=tf.argmax(input=targets, axis=1),
predictions=tf.argmax(input=output_layer, axis=1))
eval_metric_ops = {"accuracy": accuracy}
# 5. Return predictions/loss/train_op/eval_metric_ops in ModelFnOps object
return tf.contrib.learn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
layers_custom_model = tf.contrib.learn.Estimator(
model_fn=layers_custom_model_fn)
# Train and evaluate the model.
evaluate_model(layers_custom_model, input_fn)
tf.reset_default_graph()
# Model parameters.
hidden1_units = 128
learning_rate = 0.005
input_dimension = train_data.shape[1] # 784 = 28*28 pixels
output_dimension = train_labels.shape[1] # 6 classes
# Our model can be defined using just three simple lines...
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
images_column = tf.contrib.layers.real_valued_column("images")
# Task: Use the DNNClassifier Estimator to create the model in 1 line.
canned_model = _
# Potential exercises: play with model parameters, e.g. add dropout
# We need to change the input_fn so that it returns integers representing the classes instead of one-hot vectors.
def class_input_fn(data, labels):
input_images = tf.constant(
data, shape=data.shape, verify_shape=True, dtype=tf.float32)
# The next two lines are different.
class_labels = np.argmax(labels, axis=1)
input_labels = tf.constant(
class_labels, shape=class_labels.shape, verify_shape=True, dtype=tf.int32)
image, label = tf.train.slice_input_producer(
[input_images, input_labels], num_epochs=n_epochs)
dataset_dict = dict(images=image, labels=label)
batch_dict = tf.train.batch(
dataset_dict, batch_size, allow_smaller_final_batch=True)
batch_labels = batch_dict.pop('labels')
return batch_dict, batch_labels
# Train and evaluate the model.
evaluate_model(canned_model, class_input_fn)
import tensorflow as tf
tf.reset_default_graph()
input_dimension = train_data.shape[1] # 784 = 28*28 pixels
output_dimension = train_labels.shape[1] # 6 classes
batch_size = 32
data_batch = tf.placeholder("float", shape=[None, input_dimension])
label_batch = tf.placeholder("float", shape=[None, output_dimension])
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
# Task: convert the batch_size x num_pixels (784) input to batch_size, height (28), width(28), channels
# image_batch = # YOUR CODE HERE
image_batch = tf.reshape(data_batch, [-1, 28, 28, 1])
W_conv1 = weight_variable([5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.relu(conv2d(image_batch, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 32, 48])
b_conv2 = bias_variable([48])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7 * 7 * 48, 256])
b_fc1 = bias_variable([256])
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*48])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# Task: add dropout to fully connected layer. Add a variable to turn dropout off in eval.
W_fc2 = weight_variable([256, output_dimension])
b_fc2 = bias_variable([output_dimension])
output_activations = tf.matmul(h_fc1, W_fc2) + b_fc2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=label_batch,
logits=output_activations))
# Task: Switch from GradientDescentOptimizer to AdamOptimizer
learning_rate = 0.01
updates = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
init = tf.global_variables_initializer()
sess.run(init)
random_indices = np.random.permutation(train_data.shape[0])
n_epochs = 5 # how often to go through the training data
max_steps = train_data.shape[0]*n_epochs // batch_size
for i in range(max_steps):
batch_start_idx = (i % (train_data.shape[0] // batch_size)) * batch_size
batch_indices = random_indices[batch_start_idx:batch_start_idx+batch_size]
batch_loss, _ = sess.run(
[loss, updates],
feed_dict = {
data_batch : train_data[batch_indices,:],
label_batch : train_labels[batch_indices,:]
})
if i % 100 == 0 or i == max_steps - 1:
random_indices = np.random.permutation(train_data.shape[0])
print("Batch-Loss at iteration {}/{}: {}".format(i, max_steps-1, batch_loss))
test_predictions = sess.run(
output_activations,
feed_dict = {
data_batch : test_data,
label_batch : test_labels
})
wins = np.argmax(test_predictions, axis=1) == np.argmax(test_labels, axis=1)
print("Accuracy on test: {}%".format(100*np.mean(wins)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Default plot
Step2: cmap adjusts the colormap used. I like diverging colormaps for heatmaps because they provide good contrast.
Step3: center can be used to indicate at which numeric value to use the center of the colormap. Above we see most of the map using blues, so by setting the value of center equal to the midpoint of the data then we can create a map where there are more equal amounts of red and blue shades.
Step4: Adjust the lower and upper contrast bounds with vmin and vmax. Everything below vmin will be the same color. Likewise for above vmax.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
plt.rcParams['figure.figsize'] = (20.0, 10.0)
plt.rcParams['font.family'] = "serif"
df = pd.pivot_table(data=sns.load_dataset("flights"),
index='month',
values='passengers',
columns='year')
df.head()
sns.heatmap(df)
sns.heatmap(df, cmap='coolwarm')
midpoint = (df.values.max() - df.values.min()) / 2
sns.heatmap(df, cmap='coolwarm', center=midpoint)
midpoint = (df.values.max() - df.values.min()) / 2
sns.heatmap(df, cmap='coolwarm', center=midpoint, vmin=150, vmax=400)
midpoint = (df.values.max() - df.values.min()) / 2
sns.heatmap(df, cmap='coolwarm', center=midpoint, vmin=-100, vmax=800)
midpoint = (df.values.max() - df.values.min()) / 2
p = sns.heatmap(df, cmap='coolwarm', center=midpoint)
p.get_figure().savefig('../../figures/heatmap.png')
flights_long.pivot(index="month", columns="year", values='passengers')
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.