text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q1
Step2: dimension
Step3: observations
Step4: Q2
Step5: Seul l'algorithme brute accepte les features sparses.
Step6: La dimension augmente mais le nombre de features non nulle est constant. Comme l'algorithme est fortement dépendant de la distance entre deux éléments et le coût de cette distance dépend du nombre de coefficients non nuls.
Step7: Même si les performances ne sont pas tout-à-fait comparables, il est vrai qu'il est plus difficile de construire un classifieur basé sur une distance en grande dimension. La raison est simple
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
%matplotlib inline
import time
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
def what_to_measure(n, n_features, n_classes=3, n_clusters_per_class=2, n_informative=8,
neighbors=5, algorithm="brute"):
datax, datay = make_classification(n, n_features=n_features, n_classes=n_classes,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative)
model = KNeighborsClassifier(neighbors, algorithm=algorithm)
model.fit(datax, datay)
t1 = time.perf_counter()
y = model.predict(datax)
t2 = time.perf_counter()
return t2 - t1, y
dt, y = what_to_measure(2000, 10)
dt
x = []
y = []
ys = []
for nf in [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]:
x.append(nf)
dt, _ = what_to_measure(5000, n_features=nf)
y.append(dt)
if nf <= 100:
dt2, _ = what_to_measure(5000, n_features=nf, algorithm="ball_tree")
else:
dt2 = None
ys.append(dt2)
print("nf={0} dt={1} dt2={2}".format(nf, dt, dt2))
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, "o-", label="brute")
ax.plot(x, ys, "o-", label="ball_tree")
ax.set_xlabel("number of features")
ax.set_ylabel("prediction time in seconds")
ax.legend()
x = []
y = []
ys = []
for nobs in [1000, 2000, 5000, 10000, 12000, 15000, 17000, 20000]:
x.append(nobs)
dt, _ = what_to_measure(nobs, n_features=200)
y.append(dt)
if nobs <= 5000:
dt2, _ = what_to_measure(nobs, n_features=200, algorithm="ball_tree")
else:
dt2 = None
ys.append(dt2)
print("nobs={0} dt={1} dt2={2}".format(nobs, dt, dt2))
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, "o-", label="brute")
ax.plot(x, ys, "o-", label="ball_tree")
ax.set_xlabel("number of observations")
ax.set_ylabel("prediction time in seconds")
ax.legend()
import numpy
import numpy.random
import random
import scipy.sparse
def random_sparse_matrix(shape, ratio_sparse=0.2):
rnd = numpy.random.rand(shape[0] * shape[1])
sparse = 0
for i in range(0, len(rnd)):
x = random.random()
if x <= ratio_sparse - sparse:
sparse += 1 - ratio_sparse
else:
rnd[i] = 0
sparse -= ratio_sparse
rnd.resize(shape[0], shape[1])
return scipy.sparse.csr_matrix(rnd)
mat = random_sparse_matrix((20, 20))
"% non null coefficient", 1. * mat.nnz / (mat.shape[0] * mat.shape[1]), "shape", mat.shape
import random
from scipy.sparse import hstack
def what_to_measure_sparse(n, n_features, n_classes=3, n_clusters_per_class=2, n_informative=8,
neighbors=5, algorithm="brute", nb_sparse=20, ratio_sparse=0.2):
datax, datay = make_classification(n, n_features=n_features, n_classes=n_classes,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative)
sp = random_sparse_matrix((datax.shape[0], (nb_sparse - n_features)), ratio_sparse=ratio_sparse)
datax = hstack([datax, sp])
model = KNeighborsClassifier(neighbors, algorithm=algorithm)
model.fit(datax, datay)
t1 = time.perf_counter()
y = model.predict(datax)
t2 = time.perf_counter()
return t2 - t1, y, datax.nnz / (datax.shape[0] * datax.shape[1])
dt, y, sparse_ratio = what_to_measure_sparse(2000, 10, nb_sparse=100, ratio_sparse=0.2)
dt, sparse_ratio
x = []
y = []
nfd = 200
for nf in [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]:
x.append(nf)
dt, _, ratio = what_to_measure_sparse(2000, n_features=nfd, nb_sparse=nfd+nf,
ratio_sparse=1.*nfd/(nfd+nf))
y.append(dt)
print("nf={0} dt={1} ratio={2}".format(nf, dt, ratio))
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, "o-", label="brute")
ax.set_xlabel("number of dimensions")
ax.set_ylabel("prediction time in seconds")
ax.legend()
from sklearn.model_selection import train_test_split
def what_to_measure_perf(n, n_features, n_classes=3, n_clusters_per_class=2, n_informative=8,
neighbors=5, algorithm="brute"):
datax, datay = make_classification(n, n_features=n_features, n_classes=n_classes,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative)
X_train, X_test, y_train, y_test = train_test_split(datax, datay)
model = KNeighborsClassifier(neighbors, algorithm=algorithm)
model.fit(X_train, y_train)
t1 = time.perf_counter()
y = model.predict(X_test)
t2 = time.perf_counter()
good = (y_test == y).sum() / len(datay)
return t2 - t1, good
what_to_measure_perf(5000, 100)
x = []
y = []
for nf in [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]:
x.append(nf)
dt, perf = what_to_measure_perf(5000, n_features=nf)
y.append(perf)
print("nf={0} perf={1} dt={2}".format(nf, perf, dt))
fig, ax = plt.subplots(1, 1)
ax.plot(x, y, "o-", label="brute")
ax.set_xlabel("number of dimensions")
ax.set_ylabel("% good classification")
ax.legend()
from sklearn.decomposition import PCA
def what_to_measure_perf_acp(n, n_features, acp_dim=10,
n_classes=3, n_clusters_per_class=2, n_informative=8,
neighbors=5, algorithm="brute"):
datax, datay = make_classification(n, n_features=n_features, n_classes=n_classes,
n_clusters_per_class=n_clusters_per_class,
n_informative=n_informative)
X_train, X_test, y_train, y_test = train_test_split(datax, datay)
# sans ACP
model = KNeighborsClassifier(neighbors, algorithm=algorithm)
model.fit(X_train, y_train)
t1o = time.perf_counter()
y = model.predict(X_test)
t2o = time.perf_counter()
goodo = (y_test == y).sum() / len(datay)
# ACP
model = KNeighborsClassifier(neighbors, algorithm=algorithm)
pca = PCA(n_components=acp_dim)
t0 = time.perf_counter()
X_train_pca = pca.fit_transform(X_train)
model.fit(X_train_pca, y_train)
t1 = time.perf_counter()
X_test_pca = pca.transform(X_test)
y = model.predict(X_test_pca)
t2 = time.perf_counter()
good = (y_test == y).sum() / len(datay)
return t2o - t1o, goodo, t2 - t1, t1 - t0, good
what_to_measure_perf_acp(5000, 100)
x = []
y = []
yp = []
p = []
p_noacp = []
y_noacp = []
for nf in [10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10000]:
x.append(nf)
dt_noacp, perf_noacp, dt, dt_train, perf = what_to_measure_perf_acp(5000, n_features=nf)
p.append(perf)
y.append(perf)
yp.append(dt_train)
y_noacp.append(dt_noacp)
p_noacp.append(perf_noacp)
print("nf={0} perf={1} dt_predict={2} dt_train={3}".format(nf, perf, dt, dt_train))
fig, ax = plt.subplots(1, 2, figsize=(12,5))
ax[0].plot(x, y, "o-", label="prediction time with PCA")
ax[0].plot(x, yp, "o-", label="training time with PCA")
ax[0].plot(x, y_noacp, "o-", label="prediction time no PCA")
ax[0].set_xlabel("number of dimensions")
ax[0].set_ylabel("time")
ax[1].plot(x, p, "o-", label="with PCA")
ax[1].plot(x, p_noacp, "o-", label="no PCA")
ax[1].set_xlabel("number of dimensions")
ax[1].set_ylabel("% good classification")
ax[0].legend()
ax[1].legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To start off we need to obtain data from somewhere. In order to make things easy the pymks_share package is used to import data.
Step2: The X variable contains a set of 3 Bunge Euler angles at each spatial point, corresponding to the crystal orientation. The y variable is the '11' component of the strain tensor as obtained by a finite element simulation ($\epsilon_{xx}$). We can visualize this by plotting a slice of a 3-D microstructure ($\phi_1$ angle only) and its corresponding strain response.
Step3: This may not mean much, but trust us that the $\epsilon_{xx}$ field is rather expensive to calculate. In principle we could visualize this in 3 dimensions using a package like mayavi, but for this tutorial we will just look at a single slice down through the center.
Step4: We can see that we have 40 calibration sets, and 10 validation sets. Ideally we would have a lot more data to validate the model, but at least the 10 should give us an idea of how transferable the model is.
Step5: Now we have selected the basis functions, perhaps we want to know more about what we've selected. Let's ask for the l, m and n indices of the GSH basis functions we've selected (Note that this is an advanced feature and may only be useful for the most seasoned materials scientists!).
Step6: Now all of the complexity of the GSH basis set will be taken care of by pyMKS from here on out. We just need to fit the model
Step7: and then we can look at the "influence coefficients" which capture the connection between the input and output fields. Notice that there are 6 (complex valued) influence coefficients, because we had 6 "states" in the basis set. In other words, there are 2 fields of influence coefficients for each basis set. Again, we are only looking at a slice through the center in order to avoid the complexities of visualizing 3D data.
Step8: we also want to plot the imaginary components of the influence coefficients, you can't forget about these when using the GSH basis!
Step9: We can see that the coefficients for some basis sets have significant values, while others are mostly zero. This means that in principle we could probably describe the system with fewer basis states. We also notice that when there are non-zero components, they are typically centered near zero. This is intuitive, since it tells us that the elastic response of the material is local, as we would expect (and as can be seen in the other elasticity tutorials).
Step10: First let's simply compare slices of the $\epsilon_{xx}$ strain fields for one of our validation microstructures
Step11: So it looks like the MKS is working pretty well in this case, but it is worth seeing if we can do even better.
Step12: Clearly now the results are very good. You might ask if we have too few or too many basis functions? First, let's look at the influence coefficients and what is going on.
Step13: If we look carefully at the influence coefficients we notice that they appear to be identically zero for the 15th basis function and beyond. If we wanted to be thorough we would want to check the influence coefficients for even more basis functions, but for the purposes of this example we can be satisfied that we only need the first 15.
Step14: As we expected the results look great, probably even better than the previous iteration.
|
<ASSISTANT_TASK:>
Python Code:
import pymks
%matplotlib inline
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
from pymks_share import DataManager
manager = DataManager('pymks.me.gatech.edu')
X, y = manager.fetch_data('random hexagonal orientations')
print(X.shape)
print(y.shape)
from pymks.tools import draw_microstructure_strain
n = X.shape[1]
center = (n-1) / 2
draw_microstructure_strain(X[0, center, :, :, 0], y[0, center])
N_cal = 40
X_cal = X[0:N_cal, ...]
X_val = X[N_cal:, ...]
y_cal = y[0:N_cal, ...]
y_val = y[N_cal:, ...]
print(X_cal.shape)
print(X_val.shape)
from pymks import MKSLocalizationModel
from pymks.bases import GSHBasis
gsh_hex_basis = GSHBasis(n_states=np.arange(6), domain="hexagonal")
print(gsh_hex_basis.basis_indices)
model = MKSLocalizationModel(basis=gsh_hex_basis)
model.fit(X_cal, y_cal)
from pymks.tools import draw_coeff
coef_ = model.coef_
draw_coeff(np.real(coef_[:,center, :, :]), figsize=(2, 3))
draw_coeff(np.imag(coef_[:,center, :, :]), figsize=(2, 3))
y_predict = model.predict(X_val)
from pymks.tools import draw_strains_compare
draw_strains_compare(y_val[0, center], y_predict[0, center])
gsh_hex_basis = GSHBasis(n_states=np.arange(20), domain='hexagonal')
model = MKSLocalizationModel(basis=gsh_hex_basis)
model.fit(X_cal, y_cal)
y_predict = model.predict(X_val)
draw_strains_compare(y_val[0, center], y_predict[0, center])
from pymks.tools import draw_coeff
coeff = model.coef_
draw_coeff(np.real(coeff[:,center, :, :]), figsize=(4, 5))
draw_coeff(np.imag(coeff[:,center, :, :]), figsize=(4, 5))
gsh_hex_basis = GSHBasis(n_states=np.arange(15), domain='hexagonal')
model = MKSLocalizationModel(basis=gsh_hex_basis)
model.fit(X_cal, y_cal)
y_predict = model.predict(X_val)
draw_strains_compare(y_val[0, center], y_predict[0, center])
gsh_cube_basis = GSHBasis(n_states=np.arange(15), domain='cubic')
model = MKSLocalizationModel(basis=gsh_cube_basis)
model.fit(X_cal, y_cal)
y_predict = model.predict(X_val)
draw_strains_compare(y_val[0, center], y_predict[0, center])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Least Squares Linear Regression
Step2: Notice that we didn't get exactly the correct answer. The points were generated with a slope of 2.5 and an intercept of 1, whereas our fit was a little bit off
Step3: The $p$-value is quite high, so we don't reject the null hypothesis
Step4: The $p$-value is 0.002, so we correctly reject the null hypothesis
Step5: At this point, it's unclear if they are normally distributed. Luckily we just learned the Shapiro–Wilk Test!
Step6: It looks like the residuals may indeed be normally distributed. So, our assumption was valid.
Step7: Linear Regression Error Analysis
Step8: Let's just visualize now what the distribution for where the true intercept looks like. We know it is distributed according to
Step9: Example
Step10: Notice that just like in confidence intervals, once $N$ becomes large we can replace the $t$-distribution with a normal distribution.
Step11: Depends on random date above!
Step12: Now we have our $X$ matrix set-up. Now we need to evaluate the matrix equation for $\hat{\beta}$ above
Step13: Since it is tedius to type that whole equation out, you can instead use a shortcut
Step14: The *_ symbol means put the rest of the return value into the _ variable, which recall is how we indicate that we're making a variable which we will not use.
Step15: The first plot will be a $\hat{y}$ vs $y$ plot. This is called a parity plot. If $y$ and $\hat{y}$ are the same, you would see a $y=x$ line. How far the deviation is from that line is how bad the fit is.
Step16: Of course we can also look at the histogram of residuals
Step17: Error Analysis For Multidimensional Least Squares - OLS-ND
Step18: Example Error Analysis
Step19: Now that we have the standard error matrix, we can create confidence intervals for the $\beta$ values.
Step20: Goodness of Fit
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import random
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, pi, erf
import scipy.stats
# Make some data -> this is problem setup
# Do NOT copy this because it only generates random data
# This does not perform regression
x = np.linspace(0,10, 20)
y = 1 + x * 2.5 + scipy.stats.norm.rvs(scale=2, size=20)
plt.plot(x,y, 'o')
plt.show()
cov = np.cov(x,y, ddof=2)
#recall that the diagonal is variances, so we use that directly
beta_hat = cov[0,1] / cov[0,0]
alpha_hat = np.mean( y - beta_hat * x)
print(f'alpha_hat = {alpha_hat:.2} ({1})')
print(f'beta_hat = {beta_hat:.2} ({2.5})')
plt.plot(x,y, 'o')
plt.plot(x, alpha_hat + beta_hat * x)
plt.show()
data = [12.4, 12.6, 11.8, 11.5, 11.9, 12.2, 12.0, 12.1, 11.8]
scipy.stats.shapiro(data)
data = np.linspace(0,1, 100)
scipy.stats.shapiro(data)
plt.hist(y - beta_hat * x - alpha_hat)
plt.show()
scipy.stats.shapiro(y - beta_hat * x - alpha_hat)
ssr = np.sum((y - alpha_hat - beta_hat * x)**2)
tss = np.sum((np.mean(y) - y)**2)
rsq = 1 - ssr / tss
print(rsq, sqrt(rsq))
print(np.corrcoef(x,y))
df = len(x) - 2
s2_epsilon = np.sum((y - alpha_hat - beta_hat * x) ** 2) / df
s2_alpha = s2_epsilon * (1. / df + np.mean(x) ** 2 / (np.sum((np.mean(x) - x) ** 2)))
print('The standard error for the intercept is', np.sqrt(s2_alpha))
alpha_grid = np.linspace(-2, 2, 100)
P_alpha = scipy.stats.t.pdf(alpha_grid, loc=alpha_hat, scale=np.sqrt(s2_alpha), df=len(x) - 2)
plt.plot(alpha_grid, P_alpha)
plt.axvline(1, color='red')
plt.axvline(alpha_hat)
plt.show()
s2_beta = s2_epsilon / np.sum((x - np.mean(x))**2)
T = scipy.stats.t.ppf(0.975, len(x) - 2)
print(s2_beta, T)
print('beta = ', beta_hat, '+/-', T * np.sqrt(s2_beta), ' with 95% confidence')
df = len(x) - 2
s2_epsilon = np.sum((y - alpha_hat - beta_hat * x) ** 2) / df
s2_alpha = s2_epsilon * (1. / df + np.mean(x) ** 2 / (np.sum((np.mean(x) - x) ** 2)))
#ensure our T-value is positive, so our integral doesn't get flipped
T = abs(alpha_hat / sqrt(s2_alpha))
p = 1 - (scipy.stats.t.cdf(T, len(x) - 1) - scipy.stats.t.cdf(-T, len(x) - 1))
print('alpha = ', alpha_hat, ' T = ', T, ' p-value = ', p)
#NOTE: THIS IS NOT PART OF REGRESSION!!!!
#DO NOT COPY PASTE THIS CODE INTO HW/EXAM
#generate data
#I add some noise to the x coordinate to just spread the points out a little.
x1 = np.linspace(0,1,15)+ scipy.stats.norm.rvs(size=15)
x2 = np.linspace(0,1,15) + scipy.stats.norm.rvs(size=len(x1))
y = 3 * x1 - 2 * x2 + 3 + scipy.stats.norm.rvs(size=len(x1))
y
import numpy.linalg as linalg
x_mat = np.column_stack( (np.ones(len(x1)), x1, x2) )
x_mat
#dot -> matrix multiplication
#transpose -> take a transpose
#linalg.inv -> compute a matrix inverse
beta_hat = linalg.inv(x_mat.transpose() @ x_mat) @ x_mat.transpose() @ y
beta_hat, *_ = linalg.lstsq(x_mat, y)
y_hat = x_mat @ beta_hat
plt.plot(y, y_hat, 'o')
plt.plot(y, y, 'r')
plt.xlabel('$y$')
plt.ylabel('$\hat{y}$')
plt.show()
plt.hist(y - y_hat)
plt.show()
#NOTE THIS IS NOT PART OF REGRESSION!
#make some data to regress
x = np.linspace(-3, 3, 25)
y = 2 * x ** 2 - 3 * x + 4 + scipy.stats.norm.rvs(size=len(x), loc=0, scale=1.5)
#END
plt.plot(x,y, 'o', label='data')
plt.plot(x,2 * x ** 2 - 3 * x + 4, '-', label='exact solution')
plt.legend(loc='upper right')
plt.show()
x_mat = np.column_stack( (x**2, x, np.ones(len(x))) )
x_mat
beta,*_ = linalg.lstsq(x_mat, y)
print(beta)
plt.plot(x,y, 'o', label='data')
plt.plot(x,2 * x ** 2 - 3 * x + 4, '-', label='exact solution')
plt.plot(x,x_mat.dot(beta), label='least squares')
plt.legend(loc='upper right')
plt.show()
yhat = x_mat @ beta
resids = yhat - y
SSR = np.sum(resids**2)
se2_epsilon = SSR / (len(x) - len(beta))
print(se2_epsilon)
se2_beta = se2_epsilon * linalg.inv(x_mat.transpose() @ x_mat)
print(se2_beta)
for i in range(len(beta)):
#get our T-value for the confidence interval
T = scipy.stats.t.ppf(0.975, len(x) - len(beta))
# Get the width of the confidence interval using our previously computed standard error
cwidth = T * np.sqrt(se2_beta[i,i])
# print the result, using 2 - i to match our numbering above
print(f'beta_{i} is {beta[i]:.2f} +/- {cwidth:.2f} with 95% confidence')
TSS = np.sum( (np.mean(y) - y)**2)
R2 = 1 - SSR / TSS
R2, np.sqrt(R2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Principal Component/EOF analysis
Step2: EOF analysis
Step3: Applying the SVD
Step4: And begin by looking at the spectrum of singular values $\Sigma$. Defining the variance as $\Sigma^2$ then we can also calculate the cumulative contribution to the total variance as
Step5: Plotting the first 4 Singular Vectors in $V$, shows them to reflect some commonly occuring patterns in the data
Step6: For example, the first EOF pattern is primarily a symmetric pattern with an axial high surrounded by two off axis troughs (or an axial low with two flanking highs, the EOF's are just unit vector bases for the row-space and can be added with any positive or negative coefficient). The Second EOF is broader and all of one sign while the third EOF encodes assymetry.
Step7: And we can consider a few reconstructed profiles compared with the original data
Step8: projection of data onto a subspace
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy.linalg as la
import matplotlib.pyplot as plt
import csv
# read the data from the csv file
data = np.genfromtxt('m80.csv', delimiter='')
data_mean = np.mean(data,0)
# and plot out a few profiles and the mean depth.
plt.figure()
rows = [ 9,59,99]
labels = [ 'slow','medium','fast']
for i,row in enumerate(rows):
plt.plot(data[row,:],label=labels[i])
plt.hold(True)
plt.plot(data_mean,'k--',label='mean')
plt.xlabel('Distance across axis (km)')
plt.ylabel('Relative Elevation (m)')
plt.legend(loc='best')
plt.title('Example cross-axis topography of mid-ocean ridges')
plt.show()
plt.figure()
X = data - data_mean
plt.imshow(X)
plt.xlabel('Distance across axis (Km)')
plt.ylabel('Relative Spreading Rate')
plt.colorbar()
plt.show()
# now calculate the SVD of the de-meaned data matrix
U,S,Vt = la.svd(X,full_matrices=False)
# plot the singular values
plt.figure()
plt.semilogy(S,'bo')
plt.grid()
plt.title('Singular Values')
plt.show()
# and cumulative percent of variance
g = np.cumsum(S*S)/np.sum(S*S)
plt.figure()
plt.plot(g,'bx-')
plt.title('% cumulative percent variance explained')
plt.grid()
plt.show()
plt.figure()
num_EOFs=3
for row in range(num_EOFs):
plt.plot(Vt[row,:],label='EOF{}'.format(row+1))
plt.grid()
plt.xlabel('Distance (km)')
plt.title('First {} EOFs '.format(num_EOFs))
plt.legend(loc='best')
plt.show()
# recontruct the data using the first 5 EOF's
k=5
Ck = np.dot(U[:,:k],np.diag(S[:k]))
Vtk = Vt[:k,:]
data_k = data_mean + np.dot(Ck,Vtk)
plt.figure()
plt.imshow(data_k)
plt.colorbar()
plt.title('reconstructed data')
plt.show()
# show the original 3 profiles and their recontructed values using the first k EOF's
for i,row in enumerate(rows):
plt.figure()
plt.plot(data_k[row,:],label='k={}'.format(k))
plt.hold(True)
plt.plot(data[row,:],label='original data')
Cstring = [ '{:3.0f}, '.format(Ck[row,i]) for i in range(k) ]
plt.title('Reconstruction profile {}:\n C_{}='.format(row,k)+''.join(Cstring))
plt.legend(loc='best')
plt.show()
# plot the data in the plane defined by the first two principal components
plt.figure()
plt.scatter(Ck[:,0],Ck[:,1])
plt.xlabel('$V_1$')
plt.ylabel('$V_2$')
plt.grid()
plt.title('Projection onto the first two principal components')
plt.show()
# Or consider the degree of assymetry (EOF 3) as a function of spreading rate
plt.figure()
plt.plot(Ck[:,2],'bo')
plt.xlabel('Spreading rate')
plt.ylabel('$C_3$')
plt.grid()
plt.title('Degree of assymetry')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Downloading Data
Step2: Principal Component Analysis
Step3: Comparing with Day of Week
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from urllib import request
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
request.urlretrieve(FREMONT_URL, 'Fremont.csv')
# magic function to show the content of the file
%more Fremont.csv
import pandas as pd
df = pd.read_csv('Fremont.csv') # use read_csv to load the data into dataframe
df.head()
# Let's see the type of the data
df.dtypes
# change the Date column to datetime data type
df['Date'] = pd.to_datetime(df['Date'])
df.head()
df.dtypes
# Set the index to Date
df.set_index('Date', inplace=True)
df.head()
df.apply(lambda x: sum(x.isnull()))
# clear the data by delete the non-numeric
df.dropna(inplace=True)
df.apply(lambda x: sum(x.isnull()))
df.columns
df.plot()
df.resample('W').sum().plot()
df.columns=['West', 'East']
df.resample('w').sum().plot()
# To see whether there is any annual trend of the number of rides
df.resample('D').sum().rolling(365).sum().plot()
# each point is the sum of the number of rides in the previuos 365 days
# The y coordinate is not from 0
ax = df.resample('D').sum().rolling(365).sum().plot()
ax.set_ylim(0, None)
# DateimeIndex.time return numpy array of datetime.time, the time part of the Timestamps
df.groupby(df.index.time).mean().plot()
# plot the average of rides at each hours of the day
# Create the pivoted table to investigate the pattern in each day
df['Total'] = df['West'] + df['East']
pivoted = df.pivot_table(values='Total', index=df.index.time, columns=df.index.date)
pivoted.head()
pivoted.shape
# delete the date with non-numeric
pivoted.dropna(axis=1, inplace=True)
pivoted.shape
pivoted.plot(legend=False)
# add transparent parameter alpha
pivoted.plot(legend=False, alpha=0.01)
# Get X with hours as mearsurement and date as observations
X = pivoted.T.values
X.shape
X
from sklearn.decomposition import PCA
X2 = PCA(2, svd_solver='full').fit_transform(X)
X2
X2.shape
plt.scatter(X2[:, 0], X2[:, 1])
# use cluster algorithm Gaussian mixture model
from sklearn.mixture import GaussianMixture
gmm = GaussianMixture(2)
gmm.fit(X)
labels = gmm.predict(X)
labels
# plt.scatter(X2[:, 0], X2[:, 1], c=labels, cmap='rainbow')
# plt.colorbar()
plt.scatter(X2[:, 0], X2[:, 1], c=labels)
plt.colorbar()
labels
# so labels == 1 represents the weekday
pivoted.T[labels == 1].T.plot(legend=False, alpha=0.01)
# labels == 0 represents the weekend or holiday
pivoted.T[labels == 0].T.plot(legend=False, alpha=0.1)
pd.DatetimeIndex(pivoted.columns)
# The DatetimeIndex.dayof week gives the day of the week
dayofweek = pd.DatetimeIndex(pivoted.columns).dayofweek
dayofweek
# Then we plot the color of the weekday
plt.scatter(X2[:, 0], X2[:, 1], c=dayofweek)
plt.colorbar()
# grab the day in label 0 which is not weekend
dates = pd.DatetimeIndex(pivoted.columns)
dates[(labels == 0) & (dayofweek < 5)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download or use cached file oecd-canada-col.json. Caching file on disk permits to work off-line and to speed up the exploration of the data.
Step2: Initialize JsonStatCollection from the file and print the list of dataset contained into the collection.
Step3: Select the firt dataset. Oecd dataset has three dimensions (concept, area, year), and contains 432 values.
Step4: Shows some detailed info about dimensions.
Step5: Trasforming dataset into pandas DataFrame
Step6: Extract a subset of data in a pandas dataframe from the jsonstat dataset.
Step7: Trasforming a dataset into a python list
Step8: It is possible to trasform jsonstat data into table in different order
|
<ASSISTANT_TASK:>
Python Code:
# all import here
from __future__ import print_function
import os
import pandas as ps # using panda to convert jsonstat dataset to pandas dataframe
import jsonstat # import jsonstat.py package
import matplotlib as plt # for plotting
%matplotlib inline
url = 'http://json-stat.org/samples/oecd-canada-col.json'
file_name = "oecd-canada-col.json"
file_path = os.path.abspath(os.path.join("..", "tests", "fixtures", "www.json-stat.org", file_name))
if os.path.exists(file_path):
print("using already downloaded file {}".format(file_path))
else:
print("download file and storing on disk")
jsonstat.download(url, file_name)
file_path = file_name
collection = jsonstat.from_file(file_path)
collection
oecd = collection.dataset(0)
oecd
oecd.dimension('concept')
oecd.dimension('area')
oecd.dimension('year')
oecd.data(area='IT', year='2012')
oecd.value(area='IT', year='2012')
oecd.value(concept='unemployment rate',area='Australia',year='2004') # 5.39663128
oecd.value(concept='UNR',area='AU',year='2004')
df_oecd = oecd.to_data_frame('year', content='id')
df_oecd.head()
df_oecd['area'].describe() # area contains 36 values
df_oecd_ca = oecd.to_data_frame('year', content='id', blocked_dims={'area':'CA'})
df_oecd_ca.tail()
df_oecd_ca['area'].describe() # area contains only one value (CA)
df_oecd_ca.plot(grid=True)
oecd.to_table()[:5]
order = [i.did for i in oecd.dimensions()]
order = order[::-1] # reverse list
table = oecd.to_table(order=order)
table[:5]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: This gives us a pandas dataframe with columns of the latitude and longitude for each recorded point in my location history. There are several other columns as well, but for the time being lets ignore these.
Step4: Okay so finally, lets plot all the data
Step5: Part II
Step6: This shows that $\sim$ 500 km is a good cut-off to describe if a point is from home or away. Lets generate a new dataframe with data from abroad only
Step7: Clustering the away points
Step8: This has created a new column in the data frame, the cluster index to which kmeans has assigned the point. We can plot all the data frames indicating the cluster by color as follows. Note that the original data frame is passed in as the first element of df_list, this is done only to set up the map and is ignored due to the ignore_first flag.
|
<ASSISTANT_TASK:>
Python Code:
# Create a dataframe from the json file in the filepath
raw = pd.io.json.read_json('LocationHistory.json')
df = raw['locations'].apply(pd.Series)
# Create a list from the latitude column, multiplied by -E7
df['latitude'] = df['latitudeE7'] * 1e-7
# Create a list from the longitude column, multiplied by -E7
df['longitude'] = df['longitudeE7'] * 1e-7
def PaddingFunction(xL, xR, frac=0.1):
Return xL and xR with an added padding factor of frac either side
xRange = xR - xL
xL_new = xL - frac*xRange
xR_new = xR + frac*xRange
return xL_new, xR_new
def GeneratePlot(data, fig=None, ignore_first=False, *args, **kwargs):
Helper function to plot points on a map
Parameters
----------
ignore_first : bool,
If true the data in the first df in data is ignored and used only to set
up the map
if type(data) == pd.core.frame.DataFrame:
# Single df
df = data
df_list = [df]
elif type(data) == list:
df_list = data
df = data[0]
if not fig:
fig = plt.figure()
# Calculate some parameters which will be resused]
lat_0 = df.latitude.mean()
lon_0 = df.longitude.mean()
llcrnrlon, urcrnrlon = PaddingFunction(df.longitude.min(), df.longitude.max(), frac=0.3)
llcrnrlat, urcrnrlat = PaddingFunction(df.latitude.min(), df.latitude.max())
# Create a map, using the Gall–Peters projection,
m = Basemap(projection='gall',
resolution = 'l',
area_thresh = 10000.0,
lat_0=lat_0, lon_0=lon_0,
llcrnrlon=llcrnrlon,
urcrnrlon=urcrnrlon,
llcrnrlat=llcrnrlat,
urcrnrlat=urcrnrlat,
ax=fig.gca()
)
m.drawcoastlines()
m.drawcountries()
m.fillcontinents(color = '#996633')
m.drawmapboundary(fill_color='#0099FF')
if ignore_first:
df_list = df_list[1:]
for df in df_list:
# Define our longitude and latitude points
x, y = m(df['longitude'].values, df['latitude'].values)
# Plot them using round markers of size 6
m.plot(x, y, "o", zorder=100, *args, **kwargs)
return fig
fig = GeneratePlot(df, color="r")
def Haversine(theta):
return np.sin(theta/2.0)**2
def DistanceFromGreenwhich(lat, lon):
R = 6.371e6 # m
latG, lonG = 51.48, 0.00 # Grenwhich lat and long
latG = np.radians(latG)
lonG = np.radians(lonG)
lat = np.radians(lat)
lon = np.radians(lon)
arg = Haversine(lat - latG) + np.cos(latG)*np.cos(lat)*Haversine(lon - lonG)
return 2 * R * np.arcsin(np.sqrt(arg))
df['DistanceFromGreenwhich'] = DistanceFromGreenwhich(df.latitude, df.longitude)
fig, ax = plt.subplots()
out = ax.hist(df.DistanceFromGreenwhich * 1e-3, bins=50)
ax.set_xlabel("Distance from Greenwhich (km)")
ax.set_ylabel("Count")
plt.show()
df_away = df[df.DistanceFromGreenwhich > 300e3].copy(deep=True)
fig = GeneratePlot(df_away, color="r")
from scipy.cluster.vq import kmeans,vq
data = np.vstack((df_away.latitude.values, df_away.longitude.values)).T
centroids,_ = kmeans(data, 7, iter=50, thresh=1e-9)
idx,_ = vq(data, centroids)
df_away['cluster_idx'] = idx
df_list = [df]
for idx in df_away.cluster_idx.unique():
df_list.append(df_away[df_away.cluster_idx == idx])
fig = GeneratePlot(df_list, ignore_first=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the lending club dataset
Step2: Like the previous assignment, we reassign the labels to have +1 for a safe loan, and -1 for a risky (bad) loan.
Step3: Unlike the previous assignment where we used several features, in this assignment, we will just be using 4 categorical
Step4: Let's explore what the dataset looks like.
Step5: Subsample dataset to make sure classes are balanced
Step6: Note
Step7: Let's see what the feature columns look like now
Step8: Let's explore what one of these columns looks like
Step9: This column is set to 1 if the loan grade is A and 0 otherwise.
Step10: Train-test split
Step11: Decision tree implementation
Step12: Because there are several steps in this assignment, we have introduced some stopping points where you can check your code and make sure it is correct before proceeding. To test your intermediate_node_num_mistakes function, run the following code until you get a Test passed!, then you should proceed. Otherwise, you should spend some time figuring out where things went wrong.
Step13: Function to pick best feature to split on
Step14: To test your best_splitting_feature function, run the following code
Step15: Building the tree
Step16: We have provided a function that learns the decision tree recursively and implements 3 stopping conditions
Step17: Here is a recursive function to count the nodes in your tree
Step18: Run the following test code to check your implementation. Make sure you get 'Test passed' before proceeding.
Step19: Build the tree!
Step20: Making predictions with a decision tree
Step21: Now, let's consider the first example of the test set and see what my_decision_tree model predicts for this data point.
Step22: Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class
Step23: Quiz question
Step24: Now, let's use this function to evaluate the classification error on the test set.
Step25: Quiz Question
Step26: Quiz Question
Step27: Exploring the left subtree of the left subtree
Step28: Quiz question
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
loans = graphlab.SFrame('lending-club-data.gl/')
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans = loans.remove_column('bad_loans')
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home_ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
target = 'safe_loans'
loans = loans[features + [target]]
loans
safe_loans_raw = loans[loans[target] == 1]
risky_loans_raw = loans[loans[target] == -1]
# Since there are less risky loans than safe loans, find the ratio of the sizes
# and use that percentage to undersample the safe loans.
percentage = len(risky_loans_raw)/float(len(safe_loans_raw))
safe_loans = safe_loans_raw.sample(percentage, seed = 1)
risky_loans = risky_loans_raw
loans_data = risky_loans.append(safe_loans)
print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data))
print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data))
print "Total number of loans in our new dataset :", len(loans_data)
loans_data = risky_loans.append(safe_loans)
for feature in features:
loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1})
loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature)
# Change None's to 0's
for column in loans_data_unpacked.column_names():
loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0)
loans_data.remove_column(feature)
loans_data.add_columns(loans_data_unpacked)
features = loans_data.column_names()
features.remove('safe_loans') # Remove the response variable
features
print "Number of features (after binarizing categorical variables) = %s" % len(features)
loans_data['grade.A']
print "Total number of grade.A loans : %s" % loans_data['grade.A'].sum()
print "Expexted answer : 6422"
train_data, test_data = loans_data.random_split(.8, seed=1)
def intermediate_node_num_mistakes(labels_in_node):
# Corner case: If labels_in_node is empty, return 0
if len(labels_in_node) == 0:
return 0
# Count the number of 1's (safe loans)
## YOUR CODE HERE
safe = (labels_in_node == 1).sum()
# Count the number of -1's (risky loans)
## YOUR CODE HERE
risky = (labels_in_node == -1).sum()
# Return the number of mistakes that the majority classifier makes.
## YOUR CODE HERE
return min(safe, risky)
# Test case 1
example_labels = graphlab.SArray([-1, -1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 1 failed... try again!'
# Test case 2
example_labels = graphlab.SArray([-1, -1, 1, 1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 2 failed... try again!'
# Test case 3
example_labels = graphlab.SArray([-1, -1, -1, -1, -1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 3 failed... try again!'
def best_splitting_feature(data, features, target):
best_feature = None # Keep track of the best feature
best_error = 10 # Keep track of the best error so far
# Note: Since error is always <= 1, we should intialize it with something larger than 1.
# Convert to float to make sure error gets computed correctly.
num_data_points = float(len(data))
# Loop through each feature to consider splitting on that feature
for feature in features:
# The left split will have all data points where the feature value is 0
left_split = data[data[feature] == 0]
# The right split will have all data points where the feature value is 1
## YOUR CODE HERE
right_split = data[data[feature] == 1]
# Calculate the number of misclassified examples in the left split.
# Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes)
# YOUR CODE HERE
left_mistakes = intermediate_node_num_mistakes(left_split[target])
# Calculate the number of misclassified examples in the right split.
## YOUR CODE HERE
right_mistakes = intermediate_node_num_mistakes(right_split[target])
# Compute the classification error of this split.
# Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points)
## YOUR CODE HERE
error = (left_mistakes + right_mistakes) / num_data_points
# If this is the best error we have found so far, store the feature as best_feature and the error as best_error
## YOUR CODE HERE
if error < best_error:
best_feature = feature
best_error = error
return best_feature # Return the best feature we found
if best_splitting_feature(train_data, features, 'safe_loans') == 'term. 36 months':
print 'Test passed!'
else:
print 'Test failed... try again!'
def create_leaf(target_values):
# Create a leaf node
leaf = {'splitting_feature' : None,
'left' : None,
'right' : None,
'is_leaf': True } ## YOUR CODE HERE
# Count the number of data points that are +1 and -1 in this node.
num_ones = len(target_values[target_values == +1])
num_minus_ones = len(target_values[target_values == -1])
# For the leaf node, set the prediction to be the majority class.
# Store the predicted class (1 or -1) in leaf['prediction']
if num_ones > num_minus_ones:
leaf['prediction'] = +1 ## YOUR CODE HERE
else:
leaf['prediction'] = -1 ## YOUR CODE HERE
# Return the leaf node
return leaf
def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print "--------------------------------------------------------------------"
print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values))
# Stopping condition 1
# (Check if there are mistakes at current node.
# Recall you wrote a function intermediate_node_num_mistakes to compute this.)
if intermediate_node_num_mistakes(data[target]) == 0: ## YOUR CODE HERE
print "Stopping condition 1 reached."
# If not mistakes at current node, make current node a leaf node
return create_leaf(target_values)
# Stopping condition 2 (check if there are remaining features to consider splitting on)
if remaining_features == []: ## YOUR CODE HERE
print "Stopping condition 2 reached."
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(target_values)
# Additional stopping condition (limit tree depth)
if current_depth >= max_depth: ## YOUR CODE HERE
print "Reached maximum depth. Stopping for now."
# If the max tree depth has been reached, make current node a leaf node
return create_leaf(target_values)
# Find the best splitting feature (recall the function best_splitting_feature implemented above)
## YOUR CODE HERE
splitting_feature = best_splitting_feature(data, features, target)
# Split on the best feature that we found.
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1] ## YOUR CODE HERE
remaining_features.remove(splitting_feature)
print "Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split))
# Create a leaf node if the split is "perfect"
if len(left_split) == len(data):
print "Creating leaf node."
return create_leaf(left_split[target])
if len(right_split) == len(data):
print "Creating leaf node."
## YOUR CODE HERE
return create_leaf(right_split[target])
# Repeat (recurse) on left and right subtrees
left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth)
## YOUR CODE HERE
right_tree = decision_tree_create(right_split, remaining_features, target, current_depth + 1, max_depth)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
def count_nodes(tree):
if tree['is_leaf']:
return 1
return 1 + count_nodes(tree['left']) + count_nodes(tree['right'])
small_data_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 3)
if count_nodes(small_data_decision_tree) == 13:
print 'Test passed!'
else:
print 'Test failed... try again!'
print 'Number of nodes found :', count_nodes(small_data_decision_tree)
print 'Number of nodes that should be there : 13'
# Make sure to cap the depth at 6 by using max_depth = 6
my_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6)
def classify(tree, x, annotate = False):
# if the node is a leaf node.
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['prediction']
return tree['prediction']
else:
# split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
return classify(tree['right'], x, annotate) ### YOUR CODE HERE
test_data[0]
print 'Predicted class: %s ' % classify(my_decision_tree, test_data[0])
classify(my_decision_tree, test_data[0], annotate=True)
def evaluate_classification_error(tree, data):
# Apply the classify(tree, x) to each row in your data
prediction = data.apply(lambda x: classify(tree, x))
# Once you've made the predictions, calculate the classification error and return it
## YOUR CODE HERE
mistakes = (prediction != data['safe_loans']).sum()
return float(mistakes)/len(data)
evaluate_classification_error(my_decision_tree, test_data)
def print_stump(tree, name = 'root'):
split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months'
if split_name is None:
print "(leaf, label: %s)" % tree['prediction']
return None
split_feature, split_value = split_name.split('.')
print ' %s' % name
print ' |---------------|----------------|'
print ' | |'
print ' | |'
print ' | |'
print ' [{0} == 0] [{0} == 1] '.format(split_name)
print ' | |'
print ' | |'
print ' | |'
print ' (%s) (%s)' \
% (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'),
('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree'))
print_stump(my_decision_tree)
print_stump(my_decision_tree['left'], my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['left']['left'], my_decision_tree['left']['splitting_feature'])
print_stump(my_decision_tree['right'], my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['right']['right'], my_decision_tree['right']['splitting_feature'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: To compute most measures, data(i.e objectives) is normalized. Normalization is scaling the data between 0 and 1. Why do we normalize?
Step11: Data Format
Step14: Reference Set
Step18: Spread
Step21: IGD = inter-generational distance; i.e. how good are you compared to the best known?
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# All the imports
from __future__ import print_function, division
import pom3_ga, sys
import pickle
# TODO 1: Enter your unity ID here
__author__ = "latimko"
def normalize(problem, points):
Normalize all the objectives
in each point and return them
meta = problem.objectives
all_objs = []
for point in points:
objs = []
for i, o in enumerate(problem.evaluate(point)):
low, high = meta[i].low, meta[i].high
# TODO 3: Normalize 'o' between 'low' and 'high'; Then add the normalized value to 'objs'
if high == low: objs.append(0); continue; If low is too low then you will get too value that is very large.
objs.append((o - low)/(high - low))
all_objs.append(objs)
return all_objs
Performing experiments for [5, 10, 50] generations.
problem = pom3_ga.POM3()
pop_size = 10
repeats = 10
test_gens = [5, 10, 50]
def save_data(file_name, data):
Save 'data' to 'file_name.pkl'
with open(file_name + ".pkl", 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def load_data(file_name):
Retrieve data from 'file_name.pkl'
with open(file_name + ".pkl", 'rb') as f:
return pickle.load(f)
def build(problem, pop_size, repeats, test_gens):
Repeat the experiment for 'repeats' number of repeats for each value in 'test_gens'
tests = {t: [] for t in test_gens}
tests[0] = [] # For Initial Population
for _ in range(repeats):
init_population = pom3_ga.populate(problem, pop_size)
pom3_ga.say(".")
for gens in test_gens:
tests[gens].append(normalize(problem, pom3_ga.ga(problem, init_population, retain_size=pop_size, gens=gens)[1]))
tests[0].append(normalize(problem, init_population))
print("\nCompleted")
return tests
Repeat Experiments
# tests = build(problem, pop_size, repeats, test_gens)
Save Experiment Data into a file
# save_data("dump", tests)
Load the experimented data from dump.
tests = load_data("dump")
print(tests.keys())
def make_reference(problem, *fronts):
Make a reference set comparing all the fronts.
Here the comparison we use is bdom. It can
be altered to use cdom as well
retain_size = len(fronts[0])
reference = []
for front in fronts:
reference+=front
def bdom(one, two):
Return True if 'one' dominates 'two'
else return False
:param one - [pt1_obj1, pt1_obj2, pt1_obj3, pt1_obj4]
:param two - [pt2_obj1, pt2_obj2, pt2_obj3, pt2_obj4]
dominates = False
for i, obj in enumerate(problem.objectives):
gt, lt = pom3_ga.gt, pom3_ga.lt
better = lt if obj.do_minimize else gt
# TODO 3: Use the varaibles declared above to check if one dominates two
if better(one[i],two[i]):
dominates = True
elif one[i] != two[i]:
return False
return dominates
def fitness(one, dom):
return len([1 for another in reference if dom(one, another)])
fitnesses = []
for point in reference:
fitnesses.append((fitness(point, bdom), point))
reference = [tup[1] for tup in sorted(fitnesses, reverse=True)]
return reference[:retain_size]
assert len(make_reference(problem, tests[5][0], tests[10][0], tests[50][0])) == len(tests[5][0])
def eucledian(one, two):
Compute Eucledian Distance between
2 vectors. We assume the input vectors
are normalized.
:param one: Vector 1
:param two: Vector 2
:return:
# TODO 4: Code up the eucledian distance. https://en.wikipedia.org/wiki/Euclidean_distance
return (sum([(o-t)**2 for o,t in zip(one,two)]) / len(one))**0.5
def sort_solutions(solutions):
Sort a list of list before computing spread
def sorter(lst):
m = len(lst)
weights = reversed([10 ** i for i in xrange(m)])
return sum([element * weight for element, weight in zip(lst, weights)])
return sorted(solutions, key=sorter)
def closest(one, many):
min_dist = sys.maxint
closest_point = None
for this in many:
dist = eucledian(this, one)
if dist < min_dist:
min_dist = dist
closest_point = this
return min_dist, closest_point
def spread(obtained, ideals):
Calculate the spread (a.k.a diversity)
for a set of solutions
s_obtained = sort_solutions(obtained)
s_ideals = sort_solutions(ideals)
d_f = closest(s_ideals[0], s_obtained)[0]
d_l = closest(s_ideals[-1], s_obtained)[0]
n = len(s_ideals)
distances = []
for i in range(len(s_obtained)-1):
distances.append(eucledian(s_obtained[i], s_obtained[i+1]))
d_bar = sum(distances)/len(distances)
# TODO 5: Compute the value of spread using the definition defined in the previous cell.
d_sum = sum([abs(d_i - d_bar) for d_i in distances])
delta = (d_f + d_l + d_sum) / (d_f + d_l + (n-1)*d_bar)
return delta
ref = make_reference(problem, tests[5][0], tests[10][0], tests[50][0])
print(spread(tests[5][0], ref))
print(spread(tests[10][0], ref))
print(spread(tests[50][0], ref))
def igd(obtained, ideals):
Compute the IGD for a
set of solutions
:param obtained: Obtained pareto front
:param ideals: Ideal pareto front
:return:
# TODO 6: Compute the value of IGD using the definition defined in the previous cell.
return sum([closest(ideal, obtained)[0] for ideal in ideals])/len(ideals)
ref = make_reference(problem, tests[5][0], tests[10][0], tests[50][0])
print(igd(tests[5][0], ref))
print(igd(tests[10][0], ref))
print(igd(tests[50][0], ref))
import sk
sk = reload(sk)
def format_for_sk(problem, data, measure):
Convert the experiment data into the format
required for sk.py and computet the desired
'measure' for all the data.
gens = data.keys()
reps = len(data[gens[0]])
measured = {gen:["gens_%d"%gen] for gen in gens}
for i in range(reps):
ref_args = [data[gen][i] for gen in gens]
ref = make_reference(problem, *ref_args)
for gen in gens:
measured[gen].append(measure(data[gen][i], ref))
return measured
def report(problem, tests, measure):
measured = format_for_sk(problem, tests, measure).values()
sk.rdivDemo(measured)
print("*** IGD ***")
report(problem, tests, igd)
print("\n*** Spread ***")
report(problem, tests, spread)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
from scipy import interpolate
import numpy as np
x = np.array([[0.12, 0.11, 0.1, 0.09, 0.08],
[0.13, 0.12, 0.11, 0.1, 0.09],
[0.15, 0.14, 0.12, 0.11, 0.1],
[0.17, 0.15, 0.14, 0.12, 0.11],
[0.19, 0.17, 0.16, 0.14, 0.12],
[0.22, 0.19, 0.17, 0.15, 0.13],
[0.24, 0.22, 0.19, 0.16, 0.14],
[0.27, 0.24, 0.21, 0.18, 0.15],
[0.29, 0.26, 0.22, 0.19, 0.16]])
y = np.array([[71.64, 78.52, 84.91, 89.35, 97.58],
[66.28, 73.67, 79.87, 85.36, 93.24],
[61.48, 69.31, 75.36, 81.87, 89.35],
[57.61, 65.75, 71.7, 79.1, 86.13],
[55.12, 63.34, 69.32, 77.29, 83.88],
[54.58, 62.54, 68.7, 76.72, 82.92],
[56.58, 63.87, 70.3, 77.69, 83.53],
[61.67, 67.79, 74.41, 80.43, 85.86],
[70.08, 74.62, 80.93, 85.06, 89.84]])
x_val = np.linspace(-1, 1, 100)
result = np.zeros((5, 100))
for i in range(5):
extrapolator = interpolate.UnivariateSpline(x[:, i], y[:, i], k = 2, s = 4)
y_int = extrapolator(x_val)
result[i, :] = y_int
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating a Reddit Application
Step2: Capturing Reddit Posts
Step3: Leveraging Reddit's Voting
Step4: Following Multiple Subreddits
Step5: Accessing Reddit Comments
Step6: Other Functionality
Step7: Connecting to the Facebook Graph
Step8: Now we can use the Facebook Graph API with this temporary access token (it does expire after maybe 15 minutes).
Step9: Parsing Posts from a Public Page
Step10: <hr>
Step11: Creating Twitter Credentials
Step 1. Create a Twitter account If you haven't already done this, do this now at Twitter.com.
Step 2. Setting your mobile number Log into Twitter and go to "Settings." From there, click "Mobile" and fill in an SMS-enabled phone number. You will be asked to confirm this number once it's set, and you'll need to do so before you can create any apps for the next step.
Step 3. Create an app in Twitter's Dev site Go to (apps.twitter.com), and click the "Create New App" button. Fill in the "Name," "Description," and "Website" fields, leaving the callback one blank (we're not going to use it). Note that the website must be a fully qualified URL, so it should look like
Step12: Connecting to Twitter
Step13: Testing our Connection
Step14: Searching Twitter for Keywords
Step15: More Complex Queries
Step16: Dealing with Pages
Step23: Other Search Functionality
Step24: Now we set up the stream using the listener above
|
<ASSISTANT_TASK:>
Python Code:
# For our first piece of code, we need to import the package
# that connects to Reddit. Praw is a thin wrapper around reddit's
# web APIs and works well
import praw
# Now we specify a "unique" user agent for our code
# This is primarily for identification, I think, and some
# user-agents of bad actors might be blocked
redditApi = praw.Reddit(client_id='OdpBKZ1utVJw8Q',
client_secret='KH5zzauulUBG45W-XYeAS5a2EdA',
user_agent='crisis_informatics_v01')
subreddit = "worldnews"
targetSub = redditApi.subreddit(subreddit)
submissions = targetSub.new(limit=10)
for post in submissions:
print(post.title)
subreddit = "worldnews"
targetSub = redditApi.subreddit(subreddit)
submissions = targetSub.hot(limit=5)
for post in submissions:
print(post.title)
subreddit = "worldnews+aww"
targetSub = redditApi.subreddit(subreddit)
submissions = targetSub.new(limit=10)
for post in submissions:
print(post.title)
subreddit = "worldnews"
breadthCommentCount = 5
targetSub = redditApi.subreddit(subreddit)
submissions = targetSub.hot(limit=1)
for post in submissions:
print (post.title)
post.comment_limit = breadthCommentCount
# Get the top few comments
for comment in post.comments.list():
if isinstance(comment, praw.models.MoreComments):
continue
print ("---", comment.name, "---")
print ("\t", comment.body)
for reply in comment.replies.list():
if isinstance(reply, praw.models.MoreComments):
continue
print ("\t", "---", reply.name, "---")
print ("\t\t", reply.body)
# As before, the first thing we do is import the Facebook
# wrapper
import facebook
fbAccessToken = "EAACEdEose0cBAKZAZBoGzF6ZAJBk3uSB0gXSgxPrZBJ5nsZCXkM25xZBT0GzVABvsZBOvARxRukoLxhVEyO42QO1D1IInuE1ZBgQfffxh10BC0iHJmnKfNGHn9bY6ioZA8gHTYAXoOGL0A07hZBKXxMKO1yS3ZAPDB50MVGLBxDjJJDWAYBFhUIoeaAaMAZAzxcT4lMZD"
# Connect to the graph API, note we use version 2.5
graph = facebook.GraphAPI(access_token=fbAccessToken, version='2.5')
# What page to look at?
targetPage = "nytimes"
# Other options for pages:
# nytimes, bbc, bbcamerica, bbcafrica, redcross, disaster
maxPosts = 10 # How many posts should we pull?
maxComments = 5 # How many comments for each post?
post = graph.get_object(id=targetPage + '/feed')
# For each post, print its message content and its ID
for v in post["data"][:maxPosts]:
print ("---")
print (v["message"], v["id"])
# For each comment on this post, print its number,
# the name of the author, and the message content
print ("Comments:")
comments = graph.get_object(id='%s/comments' % v["id"])
for (i, comment) in enumerate(comments["data"][:maxComments]):
print ("\t", i, comment["from"]["name"], comment["message"])
# For our first piece of code, we need to import the package
# that connects to Twitter. Tweepy is a popular and fully featured
# implementation.
import tweepy
# Use the strings from your Twitter app webpage to populate these four
# variables. Be sure and put the strings BETWEEN the quotation marks
# to make it a valid Python string.
consumer_key = "IQ03DPOdXz95N3rTm2iMNE8va"
consumer_secret = "0qGHOXVSX1D1ffP7BfpIxqFalLfgVIqpecXQy9SrUVCGkJ8hmo"
access_token = "867193453159096320-6oUq9riQW8UBa6nD3davJ0SUe9MvZrZ"
access_secret = "5zMwq2DVhxBnvjabM5SU2Imkoei3AE6UtdeOQ0tzR9eNU"
# Now we use the configured authentication information to connect
# to Twitter's API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
print("Connected to Twitter!")
# Get tweets from our timeline
public_tweets = api.home_timeline()
# print the first five authors and tweet texts
for tweet in public_tweets[:5]:
print (tweet.author.screen_name, tweet.author.name, "said:", tweet.text)
# Our search string
queryString = "earthquake"
# Perform the search
matchingTweets = api.search(queryString)
print ("Searched for:", queryString)
print ("Number found:", len(matchingTweets))
# For each tweet that matches our query, print the author and text
print ("\nTweets:")
for tweet in matchingTweets:
print (tweet.author.screen_name, tweet.text)
# Lets find only media or links about earthquakes
queryString = "earthquake (filter:media OR filter:links)"
# Perform the search
matchingTweets = api.search(queryString)
print ("Searched for:", queryString)
print ("Number found:", len(matchingTweets))
# For each tweet that matches our query, print the author and text
print ("\nTweets:")
for tweet in matchingTweets:
print (tweet.author.screen_name, tweet.text)
# Lets find only media or links about earthquakes
queryString = "earthquake (filter:media OR filter:links)"
# How many tweets should we fetch? Upper limit is 1,500
maxToReturn = 100
# Perform the search, and for each tweet that matches our query,
# print the author and text
print ("\nTweets:")
for status in tweepy.Cursor(api.search, q=queryString).items(maxToReturn):
print (status.author.screen_name, status.text)
# First, we need to create our own listener for the stream
# that will stop after a few tweets
class LocalStreamListener(tweepy.StreamListener):
A simple stream listener that breaks out after X tweets
# Max number of tweets
maxTweetCount = 10
# Set current counter
def __init__(self):
tweepy.StreamListener.__init__(self)
self.currentTweetCount = 0
# For writing out to a file
self.filePtr = None
# Create a log file
def set_log_file(self, newFile):
if ( self.filePtr ):
self.filePtr.close()
self.filePtr = newFile
# Close log file
def close_log_file(self):
if ( self.filePtr ):
self.filePtr.close()
# Pass data up to parent then check if we should stop
def on_data(self, data):
print (self.currentTweetCount)
tweepy.StreamListener.on_data(self, data)
if ( self.currentTweetCount >= self.maxTweetCount ):
return False
# Increment the number of statuses we've seen
def on_status(self, status):
self.currentTweetCount += 1
# Could write this status to a file instead of to the console
print (status.text)
# If we have specified a file, write to it
if ( self.filePtr ):
self.filePtr.write("%s\n" % status._json)
# Error handling below here
def on_exception(self, exc):
print (exc)
def on_limit(self, track):
Called when a limitation notice arrives
print ("Limit", track)
return
def on_error(self, status_code):
Called when a non-200 status code is returned
print ("Error:", status_code)
return False
def on_timeout(self):
Called when stream connection times out
print ("Timeout")
return
def on_disconnect(self, notice):
Called when twitter sends a disconnect notice
print ("Disconnect:", notice)
return
def on_warning(self, notice):
print ("Warning:", notice)
Called when a disconnection warning message arrives
listener = LocalStreamListener()
localStream = tweepy.Stream(api.auth, listener)
# Stream based on keywords
localStream.filter(track=['earthquake', 'disaster'])
listener = LocalStreamListener()
localStream = tweepy.Stream(api.auth, listener)
# List of screen names to track
screenNames = ['bbcbreaking', 'CNews', 'bbc', 'nytimes']
# Twitter stream uses user IDs instead of names
# so we must convert
userIds = []
for sn in screenNames:
user = api.get_user(sn)
userIds.append(user.id_str)
# Stream based on users
localStream.filter(follow=userIds)
listener = LocalStreamListener()
localStream = tweepy.Stream(api.auth, listener)
# Specify coordinates for a bounding box around area of interest
# In this case, we use San Francisco
swCornerLat = 36.8
swCornerLon = -122.75
neCornerLat = 37.8
neCornerLon = -121.75
boxArray = [swCornerLon, swCornerLat, neCornerLon, neCornerLat]
# Say we want to write these tweets to a file
listener.set_log_file(codecs.open("tweet_log.json", "w", "utf8"))
# Stream based on location
localStream.filter(locations=boxArray)
# Close the log file
listener.close_log_file()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then you can use basic OCaml expressions and standard library... from IPython or Jupyter notebook with IPython kernel, without having to install OCaml yourself on your laptop!
Step2: It would be a great project to show to students studying Python and C and OCaml
Step3: A first example
Step4: Natively, IPython/Jupyter supports lots of "magic commands", and especially %%bash, %%perl, %%javascript and %%ruby interface to famous scripting languages, and a generic %%script one.
Step5: Not that it has been possible, since a long time, to call an OCaml toplevel like this!
Step6: But it does nothing else than opening a sub-process, running ocaml command, feeding it the content of the cell, and then exiting.
Step7: As I was saying, using %%script ocaml allows to quickly check things, like for instance the interface of a module!
Step8: Using OCaml returned values from Python
Step9: So booleans, strings, integers and floats get perfectly mapped from OCaml values to Python native values.
Step10: So 'a list, 'a array and 'a * 'b * .. heterogeneous tuples get perfectly mapped from OCaml values to Python native values!
Step11: And for functions
Step12: Or simply
Step13: What about user defined types?
Step14: It fails
Step15: Indeed the %ocam magic only works for expression, with no ;;.
Step16: Polymorphic function - works!
Step17: Woooo, somehow OCaml accepted a polymorphic list at some point?
Step18: Another example
Step19: Option type
Step20: Note that this limitation was explained
Step21: Record - fail
Step22: Of course it fails!
Step23: Polymorphic variants - fail
Step24: Optional arguments - fail
Step25: Recursive list?
Step26: It fails, but takes 100% CPU and freezes. But in Python we can do it
Step27: From standard library
Step28: Stack (or any module using a custom type) - fail
Step29: 😮 Curryed functions!
Step30: But you can't directly use it for partial application
Step31: With the Python standard library, it's possible to use functools.partial to obtain partially evaluated functions, which can be viewed as a limited support of Curryed function.
Step32: But in OCaml, the conventions is to directly write functions in Curry form, rather than tuple form
Step33: Indeed, in most situations, the tuple form is just not "OCaml"esque, and tedious to use, and does not allow partial application!
Step34: Well that was some long explanation, but now comes the magic!
Step35: The only information we have on this function is the OCaml signature, in its docstring
Step36: So we can't use it as a classical 3-arguments Python function
Step37: But we CAN use it as a Curryed function!
Step38: Which is awesome because now we can do partial evaluation as in OCaml!
Step39: What's very cool is that these functions docstrings keep showing the signature of the underlying OCaml function, even if they were obtained from pure Python cells!
Step40: That's it for this feature, it's cool and interesting.
Step41: But what about full cell mode, %%ocaml?
Step42: ==> Answer
Step43: Docstring
Step44: Also note that the ocaml module is shipped with an example of a tiny module which was written in OCaml and compiled, being made available to Python directly
Step45: Final note
Step46: Now for a "large" array, let's use IPython %timeit magic for very quick benchmarking.
Step47: Well that's better than what I expected!
Step48: Just to check the experimental values of $\alpha$ and $\beta$ in my claim above, let's use numpy.polyfit function
|
<ASSISTANT_TASK:>
Python Code:
!pip3 install ocaml
import ocaml
answer_to_life = %ocaml 40 + 2
print(answer_to_life)
print(type(answer_to_life)) # a real integer!
%load_ext watermark
%watermark -v -p ocaml
%%ocaml
print_endline "Hello world from OCaml running in Jupyter (from IPython)!";;
%%bash
echo "Hello world from Bash running in Jupyter (from IPython)"
%%script bash
echo "Hello world from Bash running in Jupyter (from IPython)"
%%script ocaml
print_endline "Hello world from OCaml running in Jupyter (from IPython)!";;
%%ocaml
let sum : (int list -> int) = List.fold_left (+) 0 in
let a_list (n:int) : int list = Array.to_list (Array.init n (fun i -> i*i+30)) in
for n = 300000 to 300010 do
Format.printf "\nList of size %2.i had sum = %4.i.%!" n (sum (a_list n));
done;;
%%script ocaml
#show Array;;
b = %ocaml true
print(type(b), b)
s = %ocaml "OK ?"
print(type(s), s)
i = %ocaml 2021
print(type(i), i)
f = %ocaml 2.99792458
print(type(f), f)
l = %ocaml [1, 3, 5]
print(type(l), l)
a = %ocaml [|2; 4; 6|]
print(type(a), a)
t = %ocaml (23, 02, 2021)
print(type(t), t)
c = %ocaml 'C'
print(type(c), c)
sum_ocaml_1 = %ocaml let sum : (int list -> int) = List.fold_left (+) 0 in sum
print(sum_ocaml_1, type(sum_ocaml_1))
sum_ocaml_1 ([1, 2, 3, 4, 5]) # 15
sum_ocaml_2 = %ocaml List.fold_left (+) 0
sum_ocaml_2 ([1, 2, 3, 4, 5]) # 15
%%ocaml
type state = TODO | DONE | Unknown of string;;
let print_state (s:state) =
match s with
| TODO -> Format.printf "TODO%!"
| DONE -> Format.printf "DONE%!"
| Unknown status -> Format.printf "%s%!" status
;;
print_state TODO;;
t = %ocaml type TODO | DONE
xref = %ocaml ref 1
cons = %ocaml fun hd tl -> hd :: tl
print(cons, type(cons))
cons(10)([20, 30])
cons(1.0)([2.0, 30])
head, tail = %ocaml List.hd, List.tl
a_list = [1, 2, 3]
a_list.append(a_list)
head(a_list), tail(a_list)
smaller = %ocaml fun (x: int) (y: int) -> x < y
print(smaller)
help(smaller)
smaller_poly = %ocaml fun (x: 'a) (y: 'a) -> x < y
print(smaller_poly)
help(smaller_poly)
none = %ocaml None
print(none, type(none))
some_int = %ocaml Some 42
print(some_int, type(some_int))
# instinguishable from None, so that's weird!
some_None = %ocaml Some None
print(some_None, type(some_None))
# val fold_left : f:('a -> 'b -> 'a) -> init:'a -> 'b list -> 'a
fold_left = %ocaml ListLabels.fold_left
print(fold_left, type(fold_left))
help(fold_left:)
fold_left(lambda x: lambda y: x + y)(0)([1, 2, 3, 4, 5])
%%ocaml
type ratio = {num: int; denom: int};;
let add_ratio r1 r2 =
{num = r1.num * r2.denom + r2.num * r1.denom;
denom = r1.denom * r2.denom};;
add_ratio {num=1; denom=3} {num=2; denom=5};;
%ocaml {num=1; denom=3}
exc = %ocaml exception Empty_list
%%ocaml
Format.printf "%i%!" (let value `float = 0 in value `float);;
zero = %ocaml let value `float = 0 in value `float
print(zero, type(zero))
variant = %ocaml `float
print(variant, type(variant))
%%ocaml
let bump ?(step = 1) x = x + step;;
Format.printf "\n%i%!" (bump 41);;
Format.printf "\n%i%!" (bump ~step:12 30);;
bump = %ocaml let bump ?(step = 1) x = x + step in bump
%%ocaml
let rec list1 = 0 :: list2
and list2 = 1 :: list1
in
Format.printf "%i -> %i -> %i -> %i ...%!" (List.hd list1) (List.hd list2) (List.hd (List.tl list2)) (List.hd (List.tl list1));;
# don't run
if False:
list1, list2 = %ocaml let rec list1 = 0 :: list2 and list2 = 1 :: list1 in (list1, list2)
list1 = [0]
list2 = [1]
list1.append(list2)
list2.append(list1)
print(list1)
print(list2)
%%ocaml
module IntPairs = struct
type t = int * int
let compare (x0,y0) (x1,y1) =
match Stdlib.compare x0 x1 with
0 -> Stdlib.compare y0 y1
| c -> c
end
module PairsMap = Map.Make(IntPairs)
let m = PairsMap.(empty |> add (0,1) "hello" |> add (1,0) "world")
(* not an expression, not usable in %ocaml magic *)
stack = %ocaml Stack.create()
def product3values(x, y, z):
return x * y * z
x = 1
y = 2
partial_product = product3values(x, y)
z = 10
print(f"With x = {x}, y = {y}, and {partial_product} applied to z = {z}, we got {partial_product(z)}")
import functools
partial_product = functools.partial(product3values, 1, 2)
z = 10
print(f"With x = {x}, y = {y}, and {partial_product} applied to z = {z}, we got {partial_product(z)}")
%%ocaml
(* this is advised *)
let product_curry (x:int) (y:int) (z:int) : int = x * y * z in
let x = 1 and y = 2 in
let partial_product = product_curry x y in
let z = 10 in
Format.printf "With x = %i, y = %i, and partial_product applied to z = %i, we got %i." x y z (partial_product z);;
%%ocaml
(* this is NOT advised *)
let product_curry (xyz : (int * int * int)) : int =
let x, y, z = xyz in
x * y * z
in
let x = 1 and y = 2 in
let partial_product = product_curry x y in
let z = 10 in
Format.printf "With x = %i, y = %i, and partial_product applied to z = %i, we got %i." x y z (partial_product z);;
product_curry = %ocaml let product_curry (x:int) (y:int) (z:int) : int = x * y * z in product_curry
help(product_curry)
product_curry(1, 2, 10)
product_curry(1)(2)(10)
partial_product_1 = product_curry(1)
partial_product_1(2)(10)
partial_product_2 = product_curry(1)(2)
partial_product_2_too = partial_product_1(2)
partial_product_2(10), partial_product_2_too(10)
help(partial_product_1)
help(partial_product_2)
help(partial_product_2_too)
%ocaml let x = 1 in x
%ocaml x
%%ocaml
(* See https://en.wikipedia.org/wiki/42_(number) *)
let answer_to_life = 42 in
Format.printf "\n... « The answer to life, the universe, and everything is %i »%!" answer_to_life;;
%%ocaml
Format.printf "\n... « The answer to life, the universe, and everything is %i »%!" answer_to_life;;
?%ocaml
"opttoploop" in dir(ocaml)
# it doesn't have a docstring, don't try help(<...>) or <...>?
ocaml.ocaml.example_module.approx_pi
ocaml.ocaml.example_module.approx_pi(1000000)
import ocaml
import numpy as np
python_sum = sum
ocaml_sum = %ocaml List.fold_left (+.) 0.
numpy_sum = np.sum
print(python_sum( [1.0, 2.0, 3.0, 4.0, 5.0] ))
print(ocaml_sum( [1.0, 2.0, 3.0, 4.0, 5.0] ))
print(numpy_sum( [1.0, 2.0, 3.0, 4.0, 5.0] ))
sizes = [100, 1000, 10000, 100000, 1000000, 10000000]
print(f"Comparing time of python_sum and ocaml_sun :")
for size in sizes:
print(f"\n- For size = {size}:")
X = list(np.random.randn(size))
print("\tFor python sum: ", end='')
%timeit python_sum(X)
assert np.isclose(python_sum(X), ocaml_sum(X))
print("\tFor OCaml sum: ", end='')
%timeit ocaml_sum(X)
assert np.isclose(python_sum(X), numpy_sum(X))
print("\tFor numpy.sum: ", end='')
%timeit numpy_sum(X)
import matplotlib.pyplot as plt
µs = 1
ms = 1000*µs
s = 1000*ms
X = sizes
# TODO: get this automatically?
Y_python = [ 7.27*µs, 72.1*µs, 786*µs, 7.55*ms, 68.2*ms, 677*ms ]
Y_ocaml = [ 16*µs, 157*µs, 1.8*ms, 24.2*ms, 286*ms, 2.92*s ]
Y_numpy = [ 12*µs, 67.7*µs, 615*µs, 6.25*ms, 62.6*ms, 632*ms ]
fig = plt.figure(figsize=(14, 10), dpi=300)
plt.loglog(X, Y_python, color="blue", marker="o", label="naive Python", lw=4, ms=15)
plt.loglog(X, Y_ocaml, color="green", marker="d", label="using OCaml", lw=4, ms=15)
plt.loglog(X, Y_numpy, color="orange", marker="s", label="using Numpy", lw=4, ms=15)
plt.ylabel("Time in micro-seconds")
plt.xlabel("Size of input list")
plt.legend()
plt.title("Tiny benchmark comparing OCaml binding to Python")
plt.show()
np.polyfit(Y_python, Y_ocaml, deg=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table align="left">
Step2: Install the latest version of the Vertex AI client library.
Step3: Run the following command in your notebook environment to install witwidget
Step4: Run the following command in your notebook environment to install joblib
Step5: Run the following command in your notebook environment to install scikit-learn
Step6: Run the following command in your notebook environment to install fsspec
Step7: Run the following command in your notebook environment to install gcsfs
Step8: Restart the kernel
Step9: Before you begin
Step10: Otherwise, set your project ID here.
Step11: Timestamp
Step12: Authenticate your Google Cloud account
Step13: Create a Cloud Storage bucket
Step14: Only if your bucket doesn't already exist
Step15: Finally, validate access to your Cloud Storage bucket by examining its contents
Step16: Tutorial
Step17: Analyze the dataset
Step18: Check for null values.
Step19: Check the type of transactions involved.
Step20: Working with imbalanced data
Step21: Prepare data for modeling
Step22: Remove the outcome variable from the training data.
Step23: Split the data and assign 70% for training and 30% for testing.
Step24: Fit a random forest model
Step25: Analyzing Results
Step26: Use RandomForestClassifier's feature_importances_ function to get a better understanding about which features were the most useful to the model.
Step27: Save the model to a Cloud Storage path
Step28: Create a model in Vertex AI
Step29: Create an Endpoint
Step30: Deploy the model to the created Endpoint
Step31: What-If Tool
Step32: Running the What-If Tool on the local model
Step33: Running the What-If Tool on the deployed Vertex AI model
Step34: Undeploy the model
Step35: Clean up
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import google.auth
USER_FLAG = ""
# Google Cloud Notebook requires dependencies to be installed with '--user'
if "default" in dir(google.auth):
USER_FLAG = "--user"
! pip install {USER_FLAG} --upgrade google-cloud-aiplatform
! pip install {USER_FLAG} witwidget
! pip install {USER_FLAG} joblib
! pip install {USER_FLAG} scikit-learn
! pip install {USER_FLAG} fsspec
! pip install {USER_FLAG} gcsfs
# Automatically restart kernel after installs
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
! gcloud config set project $PROJECT_ID
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "[your-bucket-name]":
BUCKET_NAME = PROJECT_ID + "-vertex-ai-" + TIMESTAMP
BUCKET_URI = f"gs://{BUCKET_NAME}"
if REGION == "[your-region]":
REGION = "us-central1"
! gsutil mb -l $REGION $BUCKET_URI
! gsutil ls -al $BUCKET_URI
import warnings
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from google.cloud import aiplatform, storage
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (average_precision_score, classification_report,
confusion_matrix, f1_score)
from sklearn.model_selection import train_test_split
from witwidget.notebook.visualization import WitConfigBuilder, WitWidget
warnings.filterwarnings("ignore")
# Load dataset
df = pd.read_csv(
"gs://cloud-samples-data/vertex-ai/managed_notebooks/fraud_detection/fraud_detection_data.csv"
)
print("shape : ", df.shape)
df.head()
df.isnull().sum()
print(df.type.value_counts())
var = df.groupby("type").amount.sum()
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
var.plot(kind="bar")
ax1.set_title("Total amount per transaction type")
ax1.set_xlabel("Type of Transaction")
ax1.set_ylabel("Amount")
# Count number of fraudulent/non-fraudulent transactions
df.isFraud.value_counts()
piedata = df.groupby(["isFlaggedFraud"]).sum()
f, axes = plt.subplots(1, 1, figsize=(6, 6))
axes.set_title("% of fraud transaction detected")
piedata.plot(
kind="pie", y="isFraud", ax=axes, fontsize=14, shadow=False, autopct="%1.1f%%"
)
axes.set_ylabel("")
plt.legend(loc="upper left", labels=["Not Detected", "Detected"])
plt.show()
df.drop(["nameOrig", "nameDest", "isFlaggedFraud"], axis=1, inplace=True)
X = pd.concat([df.drop("type", axis=1), pd.get_dummies(df["type"])], axis=1)
X.head()
y = X[["isFraud"]]
X = X.drop(["isFraud"], axis=1)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42, shuffle=False
)
print(X_train.shape, X_test.shape)
print("before initiating")
forest = RandomForestClassifier(verbose=1)
print("after initiating")
forest.fit(X_train, y_train)
print("after fitting")
print("before predicting")
y_prob = forest.predict_proba(X_test)
print("after predicting y_prob")
y_pred = forest.predict(X_test)
print("AUPRC :", (average_precision_score(y_test, y_prob[:, 1])))
print("F1 - score :", (f1_score(y_test, y_pred)))
print("Confusion_matrix : ")
print(confusion_matrix(y_test, y_pred))
print("classification_report")
print(classification_report(y_test, y_pred))
print("after printing classification_report")
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)
forest_importances = pd.Series(importances, index=list(X_train))
fig, ax = plt.subplots()
forest_importances.plot.bar(yerr=std, ax=ax)
ax.set_title("Feature Importance for Fraud Transaction Detection Model")
ax.set_ylabel("Importance")
fig.tight_layout()
# save the trained model to a local file "model.joblib"
FILE_NAME = "model.joblib"
joblib.dump(forest, FILE_NAME)
# Upload the saved model file to Cloud Storage
BLOB_PATH = "[your-blob-path]"
BLOB_NAME = os.path.join(BLOB_PATH, FILE_NAME)
bucket = storage.Client(PROJECT_ID).bucket(BUCKET_NAME)
blob = bucket.blob(BLOB_NAME)
blob.upload_from_filename(FILE_NAME)
MODEL_DISPLAY_NAME = "[your-model-display-name]"
ARTIFACT_GCS_PATH = f"{BUCKET_URI}/{BLOB_PATH}"
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.1-0:latest"
)
# Create a Vertex AI model resource
aiplatform.init(project=PROJECT_ID, location=REGION)
model = aiplatform.Model.upload(
display_name=MODEL_DISPLAY_NAME,
artifact_uri=ARTIFACT_GCS_PATH,
serving_container_image_uri=SERVING_CONTAINER_IMAGE_URI,
)
model.wait()
print(model.display_name)
print(model.resource_name)
ENDPOINT_DISPLAY_NAME = "[your-endpoint-display-name]"
endpoint = aiplatform.Endpoint.create(display_name=ENDPOINT_DISPLAY_NAME)
print(endpoint.display_name)
print(endpoint.resource_name)
DEPLOYED_MODEL_NAME = "[your-deployed-model-name]"
MACHINE_TYPE = "n1-standard-2"
# deploy the model to the endpoint
model.deploy(
endpoint=endpoint,
deployed_model_display_name=DEPLOYED_MODEL_NAME,
machine_type=MACHINE_TYPE,
)
model.wait()
print(model.display_name)
print(model.resource_name)
# collect 50 samples for each class-label from the test data
pos_samples = y_test[y_test["isFraud"] == 1].sample(50).index
neg_samples = y_test[y_test["isFraud"] == 0].sample(50).index
test_samples_y = pd.concat([y_test.loc[pos_samples], y_test.loc[neg_samples]])
test_samples_X = X_test.loc[test_samples_y.index].copy()
# define target and labels
TARGET_FEATURE = "isFraud"
LABEL_VOCAB = ["not-fraud", "fraud"]
# define the function to adjust the predictions
def adjust_prediction(pred):
return [1 - pred, pred]
# Combine the features and labels into one array for the What-If Tool
test_examples = np.hstack(
(test_samples_X.to_numpy(), test_samples_y.to_numpy().reshape(-1, 1))
)
# Configure the WIT to run on the locally trained model
config_builder = (
WitConfigBuilder(
test_examples.tolist(), test_samples_X.columns.tolist() + ["isFraud"]
)
.set_custom_predict_fn(forest.predict_proba)
.set_target_feature(TARGET_FEATURE)
.set_label_vocab(LABEL_VOCAB)
)
# display the WIT widget
WitWidget(config_builder, height=600)
# configure the target and class-labels
TARGET_FEATURE = "isFraud"
LABEL_VOCAB = ["not-fraud", "fraud"]
# function to return predictions from the deployed Model
def endpoint_predict_sample(instances: list):
prediction = endpoint.predict(instances=instances)
preds = [[1 - i, i] for i in prediction.predictions]
return preds
# Combine the features and labels into one array for the What-If Tool
test_examples = np.hstack(
(test_samples_X.to_numpy(), test_samples_y.to_numpy().reshape(-1, 1))
)
# Configure the WIT with the prediction function
config_builder = (
WitConfigBuilder(
test_examples.tolist(), test_samples_X.columns.tolist() + ["isFraud"]
)
.set_custom_predict_fn(endpoint_predict_sample)
.set_target_feature(TARGET_FEATURE)
.set_label_vocab(LABEL_VOCAB)
)
# run the WIT-widget
WitWidget(config_builder, height=400)
endpoint.undeploy_all()
# delete the endpoint
endpoint.delete()
# delete the model
model.delete()
delete_bucket = True
if delete_bucket or os.getenv("IS_TESTING"):
! gsutil rm -r $BUCKET_URI
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mostanra már tudjuk, hogy hogyan ábrázoljunk egy matematikai függvényt
Step2: Írjunk egy függvényt, ami egy megadott frekvenciájú jelet rajzol ki!
Step3: Most jön a varázslat! Az interact() függvény segítségével interaktívvá tehetjük a fent definiált függvényünket!
Step4: Nézzük meg egy kicsit közelebbről, hogy is működik ez az interact() konstrukció! Definiáljunk ehhez először egy nagyon egyszerű függvényt!
Step5: Az interact egy olyan függvény, amely az első paramétereként egy függvényt vár, és kulcsszavakként várja a függvény bemenő paramétereit! Amit visszaad, az egy interaktív widget, ami lehet sokfajta, de alapvetően azt a célt szolgálja, hogy a func függvényt kiszolgálja. Annak ad egy bemenő paramétert, lefuttatja, majd vár, hogy a felhasználó újra megváltoztassa az állapotot.
Step6: Ha egy bool értéket adunk meg, akkor egy pipálható dobozt
Step7: Ha egy általános listát adunk meg, akkor egy legördülő menüt kapunk
Step8: Ha a sima zárójelbe írt számok nem egészek (legalább az egyik) akkor egy float csúszkát kapunk
Step9: Ha pontosan specifikálni szeretnénk, hogy milyen interaktivitást akarunk, akkor azt az alábbiak szerint tehetjük meg,
Step10: Ha egy függvényt sokáig tart kiértékelni, akkor interact helyett érdemes interact_manual-t használni. Ez csak akkor futtatja le a függvényt, ha a megjelenő gombot megnyomjuk.
Step11: A widgetekről bővebben itt található több információ. Végül nézünk meg egy több változós interactot!
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from ipywidgets import * # az interaktivitásért felelős csomag
t=linspace(0,2*pi,100);
plot(t,sin(t))
def freki(omega):
plot(t,sin(omega*t))
freki(2.0)
interact(freki,omega=(0,10,0.1));
def func(x):
print(x)
interact(func,x=(0,10));
interact(func,x=False);
interact(func,x=['hétfő','kedd','szerda']);
interact(func,x=(0,10,0.1));
interact(func,x=IntSlider(min=0,max=10,step=2,value=2,description='egesz szamos csuszka x='));
interact(func,x=FloatSlider(min=0,max=10,step=0.01,value=2,description='float szamos csuszka x='));
interact(func,x=Dropdown(options=['Hétfő','Kedd','Szerda'],description='legörülő x='));
interact(func,x=Checkbox());
interact(func,x=Text());
interact_manual(func,x=(0,10));
t=linspace(0,2*pi,100);
def oszci(A,omega,phi,szin):
plot(t,A*sin(omega*t+phi),color=szin)
plot(pi,A*sin(omega*pi+phi),'o')
xlim(0,2*pi)
ylim(-3,3)
xlabel('$t$',fontsize=20)
ylabel(r'$A\,\sin(\omega t+\varphi)$',fontsize=20)
grid(True)
interact(oszci,
A =FloatSlider(min=1,max=2,step=0.1,value=2,description='A'),
omega=FloatSlider(min=0,max=10,step=0.1,value=2,description=r'$\omega$'),
phi =FloatSlider(min=0,max=2*pi,step=0.1,value=0,description=r'$\varphi$'),
szin =Dropdown(options=['red','green','blue','darkcyan'],description='szín'));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Outro estudo
Step2: And finally, let’s plot our data points on a 2-D graph to eyeball our dataset and see if we can manually find any relationship between the data using the below script
Step3: Let’s check the average max temperature and once we plot it we can observe that the Average Maximum Temperature is Between Nearly 25 and 35.
Step4: Our next step is to divide the data into “attributes” and “labels”.
Step5: Next, we split 80% of the data to the training set while 20% of the data to test set using below code.
Step6: After splitting the data into training and testing sets, finally, the time is to train our algorithm. For that, we need to import LinearRegression class, instantiate it, and call the fit() method along with our training data.
Step7: As we have discussed that the linear regression model basically finds the best value for the intercept and slope, which results in a line that best fits the data. To see the value of the intercept and slop calculated by the linear regression algorithm for our dataset, execute the following code.
Step8: The result should be approximately 10.66185201 and 0.92033997 respectively.
Step9: Now compare the actual output values for X_test with the predicted values, execute the following script
Step10: We can also visualize comparison result as a bar graph using the below script
Step11: Though our model is not very precise, the predicted percentages are close to the actual ones.
Step12: Outro Estudo
|
<ASSISTANT_TASK:>
Python Code:
# Estimated coefficients for the linear regression problem.
# If multiple targets are passed during the fit (y 2D), this is a 2D array of shape (n_targets, n_features),
# while if only one target is passed, this is a 1D array of length n_features.
regressor.coef_
regressor.intercept_ # Independent term in the linear model.
# Step 3: Predecting the Result
Y_pred = regressor.predict(X_test)
print(Y_pred)
#Step 4: Visualization
# Visualising the Training results
plt.scatter(X_train , Y_train, color = 'red')
plt.plot(X_train , regressor.predict(X_train), color ='blue')
# Visualizing the test results
plt.scatter(X_test , Y_test, color = 'red')
plt.plot(X_test , regressor.predict(X_test), color ='blue')
X_test
Y_test
regressor.score(X_train, Y_train)
regressor.predict(X_train)
regressor.predict(np.array([[2]])) # o valor que vai dentro dos dois colchetes é o valor de x na reta! o retorno é o y
regressor.predict(np.array([[5]]))
regressor.predict(np.array([[1.5]])) # valor na reta! Não nos pontos de treino ou teste.
# https://www.kdnuggets.com/2019/03/beginners-guide-linear-regression-python-scikit-learn.html
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
%matplotlib inline
dataset = pd.read_csv('Weather.csv')
dataset.head()
dataset.shape
dataset.info()
dataset.describe()
dataset.plot(x='MinTemp', y='MaxTemp', style='o')
plt.title('MinTemp vs MaxTemp')
plt.xlabel('MinTemp')
plt.ylabel('MaxTemp')
plt.show()
plt.figure(figsize=(15,10))
plt.tight_layout()
seabornInstance.distplot(dataset['MaxTemp'])
X = dataset['MinTemp'].values.reshape(-1,1)
y = dataset['MaxTemp'].values.reshape(-1,1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
regressor = LinearRegression()
regressor.fit(X_train, y_train) #training the algorithm
#To retrieve the intercept:
print(regressor.intercept_)
#For retrieving the slope:
print(regressor.coef_)
y_pred = regressor.predict(X_test)
df = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': y_pred.flatten()})
df
df1 = df.head(25)
df1.plot(kind='bar',figsize=(16,10))
plt.grid(which='major', linestyle='-', linewidth='0.5', color='green')
plt.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.show()
plt.scatter(X_test, y_test, color='gray')
plt.plot(X_test, y_pred, color='red', linewidth=2)
plt.show()
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# https://www.kdnuggets.com/2019/03/beginners-guide-linear-regression-python-scikit-learn.html/2
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as seabornInstance
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
%matplotlib inline
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: That's it! It is that easy. In fact, you can save this single line of code in a file ending in .py and then run it and you would get the same thing. Running a script can be done using the python command, but IPython is a better way to work.
Step3: Now we are going to write a little program (I would save this as a file, hence the docstring, but I put it here for clarity) to plot a utility function
Step4: This example illustrates some of the basic points of Python programming and syntax.
Step6: Modules and packages
Step7: we can directly fill the object x using what is called a list comprehension
Step8: A list comprehension is a succinct way to write for loop that creates a list. You essentially place all of the syntax within the list definition, between the []'s.
Step9: This is the simplest (for me) form of what's called an iteraterable, beyond simply a list of numbers. Many objects in Python are "iterable"
Step10: In Python, definite loops seem to be the norm, while in C indefinite loops are used more often. I encourage you to stick to definite loops, as indefinite ones can be unruly and a runaway loop can crash your computer quite easily.
Step11: Strings are different from lists, however, in the set of methods that are associated to them
Step12: Strings offer a ton of special methods, so if you are interested in them, check out the <a href="https
Step13: Dictionaries
|
<ASSISTANT_TASK:>
Python Code:
print('Hello World!')
%matplotlib inline
Origin: Plotting a utilty function.
Filename: example_utility.py
Author: Tyler Abbot
Last modified: 8 September, 2015
import numpy as np
import matplotlib.pyplot as plt
# Define the input variable
c = np.linspace(0.01, 10.0, 100.0)
# Calculate utility over the given space
U = np.log(c)
#Plot the function
plt.plot(c, U)
plt.xlabel('Consumption')
plt.ylabel('Utility')
plt.title('An Example Utility Function')
plt.show()
import this
x = []
for i in range(0, 50):
x.append(i)
print(x)
x = [i for i in range(0, 50)]
print(x)
names = ["var_1", "var_2", "var_3"]
for variable in names:
print(variable)
# Definite loop
for i in range(0, 10):
print(i)
# Indefinite loop
import random
x = 0.0
while x < 1:
x += random.random()
print(x)
# Define a string using quotes
x = 'Hello! I am a string!'
print(x)
# The type of quote is irrelevant
x = "Hello! I am a string!"
print(x)
# Reference stings in the same way as a list,
# but indices refer to position in the string
print(x[0])
print(x[:5])
# Strings support arithmetic operations similar to lists
print(x + x)
# Change the case
print(x.upper())
print(x.lower())
# Find the index of a substring
print(x.find('I am'))
print(x[x.find('I am'):x.find('I am') + 4])
# Defining a tuple with or without parentheses
tup = 'a', 'b'
tup = ('a', 'b')
# Tuples can contain different data
tup = 'a', 2
# Trying to modify a tuple will cause an error
tup[0] = 1
# Defining a dictionary
students_grades = {"Joe": [10., 15., 12.],
"Jane": [12., 16., 14.],
"Nick": [8., 6., 6.]}
print(students_grades)
# Retrieving information from the dictionary
print(students_grades['Joe'])
# Looping over the information
students_averages = {}
for student, grades in students_grades.items():
students_averages[student] = sum(grades)/len(grades)
print(students_averages)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we turn to GAIA. While the current DR2 does not explicitly mark catalog entries as multiple systems or extended sources, we can just look at the error in RA and dec. For stars as bright as the ones we've chosen, those errors are typically i nthe range 0.05 milliarcsec. Anything larger than that indicates that the source is not a good point source, so we want to reject it.
Step2: Next, we load all remaining sources.
Step3: Since the (x,y) positions are just picked by hand in ds9, they might be off by a pixel or two. However, for the fake insertion technique later, we want sources that are reasonably centered in the extracted sub-images. Thus, in the first round, we fit the (x,y) values, calculate the difference and then adjust the input (x,y) in our catalog.
Step4: Look at fit results with alpha and r0 free
Step5: Look at fit results when alpha is frozen
Step6: Select the PSF stars
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from astroquery.gaia import Gaia
from astropy import table
from astropy.table import Table
from astropy.wcs import WCS
from astropy.io import fits
from astropy.nddata import NDData
from photutils.psf import extract_stars
import sherpa
import sherpa.astro.models
import sherpa.data
import sherpa.fit
import matplotlib.pyplot as plt
%matplotlib inline
# On melkor, the datapath is hardocded in snapshot program
# but to run on my laptop, I need to put it here
datapath = 'data/'
prflist = [['ibg402010_drz.fits', 612, 209],
['ibg402010_drz.fits', 1007, 951],
['ibg402010_drz.fits', 64, 823],
['ibg403010_drz.fits', 597, 385],
['ibg405010_drz.fits', 340, 40],
['ibg406010_drz.fits', 424, 348],
['ibg412010_drz.fits', 439, 239],
['ibg415010_drz.fits', 676, 73],
['ibg416010_drz.fits', 444, 217],
['ibg417010_drz.fits', 587, 954],
['ibg418010_drz.fits', 113, 946],
['ibg418010_drz.fits', 160, 720],
['ibg418010_drz.fits', 113, 946],
['ibg418010_drz.fits', 917, 873],
['ibg419010_drz.fits', 675, 644],
['ibg422010_drz.fits', 896, 320],
['ibg425010_drz.fits', 647, 697],
['ibg426010_drz.fits', 997, 84],
['ibg426010_drz.fits', 385, 95],
['ibg428010_drz.fits', 356, 85],
['ibg428010_drz.fits', 889, 496],
['ibg429010_drz.fits', 803, 305],
['ibg432010_drz.fits', 153, 109],
['ibg434010_drz.fits', 876, 577],
['ibg436010_drz.fits', 342, 878],
['ibg437010_drz.fits', 873, 239],
['ibg438010_drz.fits', 41, 651],
['ibg438010_drz.fits', 417, 403],
['ibg440010_drz.fits', 212, 338],
['ibg440010_drz.fits', 297, 1018],
['ibg443010_drz.fits', 358, 289],
['ibg444010_drz.fits', 329, 347],
['ibg444010_drz.fits', 724, 725],
['ibg446010_drz.fits', 276, 501],
['ibg449010_drz.fits', 75, 775],
['ibg450010_drz.fits', 669, 272],
['ibg452010_drz.fits', 158, 558],
['ibg453010_drz.fits', 812, 847],
['ibg453010_drz.fits', 333, 189],
['ibg455010_drz.fits', 567, 650],
['ibg455010_drz.fits', 263, 444],
['ibg456010_drz.fits', 530, 696],
['ibg458010_drz.fits', 162, 807],
['ibg459010_drz.fits', 375, 168],
['ibg462010_drz.fits', 683, 641],
['ibg463010_drz.fits', 647, 971],
['ibg463010_drz.fits', 319, 873],
['ibg465010_drz.fits', 588, 723],
['ibg468010_drz.fits', 150, 509],
['ibg468010_drz.fits', 280, 136],
['ibg471010_drz.fits', 600, 685],
['ibg471010_drz.fits', 892, 511],
]
radius = u.Quantity(1.0, u.arcsec)
gaia = []
for i, p in enumerate(prflist):
wcs = WCS(fits.getheader(datapath + p[0], 1))
ra, dec = wcs.all_pix2world(p[1], p[2], 1)
coord = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')
j = Gaia.cone_search(coord, radius)
r = j.get_results()
r['i'] = i
r['file'] = p[0]
r['x'] = p[1]
r['y'] = p[2]
gaia.append(r)
gaiatab = table.vstack(gaia)
gaiatab
# Select only those stars that are likely good single point sources
gaiatab = gaiatab[(gaiatab['ra_error'] < 0.1) & (gaiatab['dec_error'] < 0.1)]
# -1 because the above positions are measured in ds9, which counts from (1,1)
# while the python code counts from (0,0)
stars621 = extract_stars([NDData(fits.open(datapath + row['file'])[1].data) for row in gaiatab],
[Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab],
size=25)
stars845 = extract_stars([NDData(fits.open(datapath + row['file'].replace('10_', '20_'))[1].data)
for row in gaiatab],
[Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab],
size=25)
def sherpa_phot(psf, data):
# Set sensible starting values
# Use hasattr so I can use the same function for e.g. Gauss2D if I want to try that
if hasattr(psf, 'alpha') and not psf.alpha.frozen:
psf.alpha = 2
if hasattr(psf, 'r0') and not psf.r0.frozen:
psf.r0 = 2
psf.ampl = np.max(s.data)
x, y = np.indices(data.shape)
dpsf = sherpa.data.Data2D('', x.flatten(), y.flatten(), data.flatten())
fpsf = sherpa.fit.Fit(dpsf, psf, stat=sherpa.stats.LeastSq())
fpsf.fit()
return data - psf(x.flatten(), y.flatten()).reshape(data.shape)
beta2d = sherpa.astro.models.Beta2D()
beta2d.xpos = stars621[0].data.shape[0] / 2
beta2d.ypos = stars621[0].data.shape[1] / 2
beta2d.alpha = 2
beta2d.alpha.frozen = True
results = []
for i, s in enumerate(stars621):
out = sherpa_phot(beta2d, s.data)
results.append([p.val for p in beta2d.pars])
res621 = Table(rows=results, names = [p.name for p in beta2d.pars])
results = []
for i, s in enumerate(stars845):
out = sherpa_phot(beta2d, s.data)
results.append([p.val for p in beta2d.pars])
res845 = Table(rows=results, names = [p.name for p in beta2d.pars])
plt.plot(res621['xpos'])
plt.plot(res845['xpos'])
plt.plot(res621['ypos'])
plt.plot(res845['ypos'])
# Note how (x, y) -> (y, x) because of the ordering of the indices
# I don't want to find out exactly where that happens, just make make this line look better.
# I just chekc that it works (see below) and that's all that I need here.
gaiatab['x'] = gaiatab['x'] - np.rint(12.5 - res621['ypos'])
gaiatab['y'] = gaiatab['y'] - np.rint(12.5 - res621['xpos'])
# Now extract it again
# -1 because the above positions are measured in ds9, which counts from (1,1)
# while the python code counts from (0,0)
stars621 = extract_stars([NDData(fits.open(datapath + row['file'])[1].data) for row in gaiatab],
[Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab],
size=25)
stars845 = extract_stars([NDData(fits.open(datapath + row['file'].replace('10_', '20_'))[1].data)
for row in gaiatab],
[Table({'x': [row['x'] - 1], 'y': [row['y'] - 1]}) for row in gaiatab],
size=25)
# fit it again
results = []
for i, s in enumerate(stars621):
beta2d.xpos = 12.5
beta2d.ypos = 12.5
out = sherpa_phot(beta2d, s.data)
results.append([p.val for p in beta2d.pars])
res621 = Table(rows=results, names = [p.name for p in beta2d.pars])
results = []
for i, s in enumerate(stars845):
beta2d.xpos = 12.5
beta2d.ypos = 12.5
out = sherpa_phot(beta2d, s.data)
results.append([p.val for p in beta2d.pars])
res845 = Table(rows=results, names = [p.name for p in beta2d.pars])
# Check its centered
plt.plot(res621['xpos'])
plt.plot(res845['xpos'])
plt.plot(res621['ypos'])
plt.plot(res845['ypos'])
beta2d.alpha.frozen = False
results = []
for i, s in enumerate(stars621):
beta2d.xpos = 12.5
beta2d.ypos = 12.5
out = sherpa_phot(beta2d, s.data)
results.append([p.val for p in beta2d.pars])
res621ar = Table(rows=results, names = [p.name for p in beta2d.pars])
results = []
for i, s in enumerate(stars845):
beta2d.xpos = 12.5
beta2d.ypos = 12.5
out = sherpa_phot(beta2d, s.data)
results.append([p.val for p in beta2d.pars])
res845ar = Table(rows=results, names = [p.name for p in beta2d.pars])
plt.scatter(res621ar['alpha'], res621ar['r0'], c=gaiatab['parallax'])
plt.colorbar()
plt.scatter(res845ar['alpha'], res845ar['r0'], c=gaiatab['parallax'])
plt.colorbar()
out = plt.hist([res621['r0'], res845['r0']], bins=np.arange(1., 2.51, .05))
ind = (np.abs(res621['r0'] - 1.65) < .4) & (np.abs(res845['r0'] - 1.6) < 0.4)
ind.sum(), len(res621)
out = plt.hist([res621['ampl'][ind], res845['ampl'][ind]], bins=20, range=[0,2000])
plt.scatter(res621['ampl'], res845['ampl'], c=ind)
plt.scatter(res621['ampl'] / res845['ampl'], res845['r0'], c=gaiatab['parallax'])
plt.colorbar()
psf_621 = sherpa.astro.models.Beta2D()
psf_621.alpha = 2.2
psf_621.r0 = 1.7
psf_845 = sherpa.astro.models.Beta2D()
psf_845.alpha = 2
psf_845.r0 = 1.625
for this_psf in [psf_621, psf_845]:
# Set starting values
this_psf.xpos = 12.5
this_psf.ypos = 12.5
# Freeze psf values
this_psf.alpha.frozen = True
this_psf.r0.frozen = True
data = stars845[5].data
plt.plot(data[12, :])
psf_845.ampl = np.max(data)
x, y = np.indices(data.shape)
dpsf = sherpa.data.Data2D('', x.flatten(), y.flatten(), data.flatten())
fpsf = sherpa.fit.Fit(dpsf, psf_845, stat=sherpa.stats.LeastSq())
fpsf.fit()
plt.plot(psf_845(x.flatten(), y.flatten()).reshape(data.shape)[12, :])
bright = (res621['ampl'] > 400) & (res845['ampl'] > 400)
(ind & bright).sum()
x, y = np.indices(stars621[0].data.shape)
fig, axes = plt.subplots(nrows=4, ncols=15, figsize=(35,12))
for j, s in enumerate((ind & bright).nonzero()[0][:15]):
for i, stars in enumerate([stars621, stars845]):
image = stars[s].data
imout = axes[2 * i][j].imshow(image)
#axes[0][j].set_title(prftestlist[j][0])
plt.colorbar(imout, ax = axes[2 * i][j])
thispsf = [psf_621, psf_845][i]
photout = sherpa_phot(thispsf, image)
imout = axes[2 * i + 1][j].imshow(photout)
axes[2 * i + 1][j].set_title("residual")
plt.colorbar(imout, ax = axes[2 * i + 1][j])
len(gaiatab), len(bright)
for row in gaiatab[ind & bright]:
print("['{}', {:3.0f}, {:3.0f}],".format(row['file'], row['x'], row['y']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preparation
Step2: Coarse tile position optimization
Step3: Next, we use the information from the previous step to set up and solve a simple optimization problem. Here, every node of an elastic mesh represents a complete tile. The tiles are kept rigid and only allowed to move translationally.
Step4: Fine tile position optimization
Step5: The estimated flow fields might be locally inconsistent or contain invalid entries where locally there is insufficient high frequency image content to establish a good match between the images. We can optionally apply clean up
Step6: Next, we prepare the data for the mesh solver.
Step7: In the following, each tile will be modeled as a spring mesh (see blue edges and red nodes below), and we will be looking for its equilibrium state. There will be two types of forces acting on every node of the mesh -- the spring elastic force (which tries to preserve the original tile geometry), and a force
Step8: Now that the mesh is solved we are ready to render the stitched section.
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/google-research/sofima
import functools as ft
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import PIL
%mkdir tiles
!gsutil -m rsync -r gs://sofima-sample-data/fmi-friedrich-dp tiles
# Define the tile space. This specifies how the different tiles are distributed
# in space, and should normally be derived from the metadata provided by the
# microscope.
tile_id_map = [[461, 462, 463],
[493, 494, 495],
[525, 526, 527]]
tile_id_map = np.array(tile_id_map)
# Load tile images.
tile_map = {}
for y in range(tile_id_map.shape[0]):
for x in range(tile_id_map.shape[1]):
tile_id = tile_id_map[y, x]
with open(f'tiles/20210630_Dp_190326Bb_run04_g0001_t0{tile_id}_s04734.png', 'rb') as fp:
img = PIL.Image.open(fp)
tile_map[(x, y)] = np.array(img)
from sofima import stitch_rigid
tile_space = (3, 3)
cx, cy = stitch_rigid.compute_coarse_offsets(tile_space, tile_map)
# Visualize the relative positions of horizontal and vertical nearest neighbors.
# Every vector represents a tile *pair*.
f, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].quiver((0, 1, 2), (0, 1, 2), cx[0, 0, ...], cx[1, 0, ...])
ax[0].set_ylim(-0.5, 2.5)
ax[0].set_xlim(-0.5, 1.5)
ax[0].set_title('horizontal NNs')
ax[1].quiver((0, 1, 2), (0, 1, 2), cy[0, 0, ...], cy[1, 0, ...])
ax[1].set_ylim(-0.5, 1.5)
ax[1].set_xlim(-0.5, 2.5)
ax[1].set_title('vertical NNs')
coarse_mesh = stitch_rigid.optimize_coarse_mesh(cx, cy)
# Visualize the results. Every vector represents a tile.
plt.quiver((0, 1, 2), (0, 1, 2), coarse_mesh[0, 0, ...], coarse_mesh[1, 0, ...])
from sofima import stitch_elastic
# The stride (in pixels) specifies the resolution at which to compute the flow
# fields between tile pairs. This is the same as the resolution at which the
# mesh is later optimized. The more deformed the tiles initially are, the lower
# the stride needs to be to get good stitching results.
stride = 20
cx = np.squeeze(cx)
cy = np.squeeze(cy)
fine_x, offsets_x = stitch_elastic.compute_flow_map(tile_map, cx, 0, stride=(stride, stride), batch_size=4) # (x,y) -> (x+1,y)
fine_y, offsets_y = stitch_elastic.compute_flow_map(tile_map, cy, 1, stride=(stride, stride), batch_size=4) # (x,y) -> (x,y+1)
from sofima import flow_utils
kwargs = {"min_peak_ratio": 1.4, "min_peak_sharpness": 1.4, "max_deviation": 5, "max_magnitude": 0}
fine_x = {k: flow_utils.clean_flow(v[:, np.newaxis, ...], **kwargs)[:, 0, :, :] for k, v in fine_x.items()}
fine_y = {k: flow_utils.clean_flow(v[:, np.newaxis, ...], **kwargs)[:, 0, :, :] for k, v in fine_y.items()}
kwargs = {"min_patch_size": 10, "max_gradient": -1, "max_deviation": -1}
fine_x = {k: flow_utils.reconcile_flows([v[:, np.newaxis, ...]], **kwargs)[:, 0, :, :] for k, v in fine_x.items()}
fine_y = {k: flow_utils.reconcile_flows([v[:, np.newaxis, ...]], **kwargs)[:, 0, :, :] for k, v in fine_y.items()}
# Visualize the X component of the flow field between the tiles
# (0, 0) and (1, 0). Note that this is flipped by 90 degrees
# to better utilize screen space.
plt.matshow(fine_x[0, 0][0, ...].T)
plt.colorbar()
from sofima import mesh
data_x = (cx, fine_x, offsets_x)
data_y = (cy, fine_y, offsets_y)
fx, fy, x, nbors, key_to_idx = stitch_elastic.aggregate_arrays(
data_x, data_y, tile_map,
coarse_mesh[:, 0, ...], stride=(stride, stride))
@jax.jit
def prev_fn(x):
target_fn = ft.partial(stitch_elastic.compute_target_mesh, x=x, fx=fx, fy=fy)
x = jax.vmap(target_fn)(nbors)
return jnp.transpose(x, [1, 0, 2, 3])
# These detault settings are expect to work well in most configurations. Perhaps
# the most salient parameter is the elasticity ratio k0 / k. The larger it gets,
# the more the tiles will be allowed to deform to match their neighbors (in which
# case you might want use aggressive flow filtering to ensure that there are no
# inaccurate flow vectors). Lower ratios will reduce deformation, which, depending
# on the initial state of the tiles, might result in visible seams.
config = mesh.IntegrationConfig(dt=0.001, gamma=0., k0=0.01, k=0.1, stride=stride,
num_iters=1000, max_iters=20000, stop_v_max=0.001,
dt_max=100, prefer_orig_order=True,
start_cap=0.1, final_cap=10., remove_drift=True)
x, ekin, t = mesh.relax_mesh(x, None, config, prev_fn=prev_fn)
from sofima import warp
# Unpack meshes into a dictionary.
idx_to_key = {v: k for k, v in key_to_idx.items()}
meshes = {idx_to_key[i]: np.array(x[:, i:i+1 :, :]) for i in range(x.shape[1])}
# Warp the tiles into a single image.
stitched, mask = warp.render_tiles(tile_map, meshes, stride=(stride, stride))
# Visualize the stitched tiles with matplotlib..
plt.figure(figsize=(15, 10))
plt.imshow(stitched, cmap=plt.cm.Greys_r)
# Zoom in on one of the seams.
plt.figure(figsize=(25, 5))
plt.imshow(stitched[3800:-200, 2900:3200].T, cmap=plt.cm.Greys_r)
# .. or inspect it interactively in Neuroglancer.
import neuroglancer
dimensions = neuroglancer.CoordinateSpace(names=['x', 'y'], units='nm', scales=[11, 11])
viewer = neuroglancer.Viewer()
with viewer.txn() as s:
s.dimensions = dimensions
s.layers['em'] = neuroglancer.ImageLayer(source=neuroglancer.LocalVolume(stitched.T, dimensions))
viewer
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ネットワーク
Step2: ネットワークの定義
Step3: RandomPyEnvironment を作成し、構造化した観測を生成して実装を検証しましょう。
Step4: 観測をディクショナリとして定義しましたので、観測を処理する前処理レイヤーを作成する必要があります。
Step5: これでアクターネットワークを用意できたので、環境から観測を処理できるようになりました。
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import numpy as np
from tf_agents.environments import random_py_environment
from tf_agents.environments import tf_py_environment
from tf_agents.networks import encoding_network
from tf_agents.networks import network
from tf_agents.networks import utils
from tf_agents.specs import array_spec
from tf_agents.utils import common as common_utils
from tf_agents.utils import nest_utils
class ActorNetwork(network.Network):
def __init__(self,
observation_spec,
action_spec,
preprocessing_layers=None,
preprocessing_combiner=None,
conv_layer_params=None,
fc_layer_params=(75, 40),
dropout_layer_params=None,
activation_fn=tf.keras.activations.relu,
enable_last_layer_zero_initializer=False,
name='ActorNetwork'):
super(ActorNetwork, self).__init__(
input_tensor_spec=observation_spec, state_spec=(), name=name)
# For simplicity we will only support a single action float output.
self._action_spec = action_spec
flat_action_spec = tf.nest.flatten(action_spec)
if len(flat_action_spec) > 1:
raise ValueError('Only a single action is supported by this network')
self._single_action_spec = flat_action_spec[0]
if self._single_action_spec.dtype not in [tf.float32, tf.float64]:
raise ValueError('Only float actions are supported by this network.')
kernel_initializer = tf.keras.initializers.VarianceScaling(
scale=1. / 3., mode='fan_in', distribution='uniform')
self._encoder = encoding_network.EncodingNetwork(
observation_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
conv_layer_params=conv_layer_params,
fc_layer_params=fc_layer_params,
dropout_layer_params=dropout_layer_params,
activation_fn=activation_fn,
kernel_initializer=kernel_initializer,
batch_squash=False)
initializer = tf.keras.initializers.RandomUniform(
minval=-0.003, maxval=0.003)
self._action_projection_layer = tf.keras.layers.Dense(
flat_action_spec[0].shape.num_elements(),
activation=tf.keras.activations.tanh,
kernel_initializer=initializer,
name='action')
def call(self, observations, step_type=(), network_state=()):
outer_rank = nest_utils.get_outer_rank(observations, self.input_tensor_spec)
# We use batch_squash here in case the observations have a time sequence
# compoment.
batch_squash = utils.BatchSquash(outer_rank)
observations = tf.nest.map_structure(batch_squash.flatten, observations)
state, network_state = self._encoder(
observations, step_type=step_type, network_state=network_state)
actions = self._action_projection_layer(state)
actions = common_utils.scale_to_spec(actions, self._single_action_spec)
actions = batch_squash.unflatten(actions)
return tf.nest.pack_sequence_as(self._action_spec, [actions]), network_state
action_spec = array_spec.BoundedArraySpec((3,), np.float32, minimum=0, maximum=10)
observation_spec = {
'image': array_spec.BoundedArraySpec((16, 16, 3), np.float32, minimum=0,
maximum=255),
'vector': array_spec.BoundedArraySpec((5,), np.float32, minimum=-100,
maximum=100)}
random_env = random_py_environment.RandomPyEnvironment(observation_spec, action_spec=action_spec)
# Convert the environment to a TFEnv to generate tensors.
tf_env = tf_py_environment.TFPyEnvironment(random_env)
preprocessing_layers = {
'image': tf.keras.models.Sequential([tf.keras.layers.Conv2D(8, 4),
tf.keras.layers.Flatten()]),
'vector': tf.keras.layers.Dense(5)
}
preprocessing_combiner = tf.keras.layers.Concatenate(axis=-1)
actor = ActorNetwork(tf_env.observation_spec(),
tf_env.action_spec(),
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner)
time_step = tf_env.reset()
actor(time_step.observation, time_step.step_type)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Science Tutorial
Step2: Now we'll specify the url of the file and the file name we will save to
Step3: And make a call to <code>download_file</code>
Step4: Now this might seem like overkill for downloading a single, small csv file, but we can use this same function to access countless APIs available on the World Wide Web by building an API request in the url.
Step5: Whoops, looks like our csv file did not contain a header row. <code>read_csv</code> assumes the first row of the csv is the header by default.
Step6: Lets try the import again, this time specifying the names. When specifying names, the <code>read_csv</code> function requires us to set the <code>header</code> row number to <code>None</code>
Step7: Lets take a look at some simple statistics for the clump_thickness column
Step8: Referring to the documentation link above about the data, the count, range of values (min = 1, max = 10), and data type (dtype = float64) look correct.
Step9: Well at least the count is correct. We were expecting no more than 10 unique values and now the data type is an object.
Step10: Using <code>unique</code> we can see that '?' is one of the distinct values that appears in this series. Looking again at the documentation for this data set, we find the following
Step11: Here we have attempted to convert the bare_nuclei series to a numeric type. Lets see what the unique values are now.
Step12: The decimal point after each number means that it is an integer value being represented by a floating point number. Now instead of our pesky '?' we have <code>nan</code> (not a number). <code>nan</code> is a construct used by pandas to represent the absence of value. It is a data type that comes from the package numpy, used internally by pandas, and is not part of the standard Python library.
Step13: <code>fillna</code> is a dataframe function that replaces all nan values with either a scalar value, a series of values with the same indices as found in the dataframe, or a dataframe that is indexed by the columns of the target dataframe.
Step14: <code>inplace=True</code> allows us to make this modification directly on the dataframe, without having to do any assignment.
Step15: Structurally, Pandas dataframes are a collection of Series objects sharing a common index. In general, the Series object and Dataframe object share a large number of functions with some behavioral differences. In other words, whatever computation you can do on a single column can generally be applied to the entire dataframe.
Step16: Visualizing the Data
Step17: So we enabled plotting in IPython and imported everything from the ggplot package. Now we'll create a plot and then break down the components
Step18: A plot begins with the <code>ggplot</code> function. Here, we pass in the cancer_data pandas dataframe and a special function called <code>aes</code> (short for aesthetic). The values provided to <code>aes</code> change depending on which type of plot is being used. Here we are going to make a histogram from the clump_thickness column in cancer_data, so that column name needs to be passed as the x parameter to <code>aes</code>.
Step19: As you can see, each geom has its own set of parameters specific to the appearance of that geom (also called aesthetics).
Step20: Sometimes when working with integer data, or data that takes on a limited range of values, it is easier to visualize the plot with added jitter to the points. We can do that by adding an aesthetic to <code>geom_point</code>.
Step21: With a simple aesthetic addition, we can see how these two variables play into our cancer classification
Step22: By adding <code>color = 'class'</code> as a parameter to the aes function, we now give a color to each unique value found in that column and automatically get a legend. Remember, 2 is benign and 4 is malignant.
Step23: There is definitely some patterning going on in that plot.
Step24: Rather than set the color equal to the class, we have created two plots based off of the class. With a facet, we can get very detailed. Lets through some more variables into the mix
Step25: Unfortunately, legends for faceting are not yet implemented in the Python ggplot package. In this example we faceted on the x-axis with clump_thickness and along the y-axis with marginal_adhesion, then created 100 plots of uniformity_cell_shape vs. bare_nuclei effect on class.
Step26: Here we call <code>values</code> on the dataframe to extract the values stored in the dataframe as an array of numpy arrays with the same dimensions as our subsetted dataframe. Numpy is a powerful, high performance scientific computing package that implements arrays. It is used internally by pandas. We will use <code>labels</code> and <code>features</code> later on in our machine learning classifier
Step27: An important concept in machine learning is to split the data set into training data and testing data. The machine learning algorithm will use the subset of training data to build a classifier to predict labels. We then test the accuracy of this classifier on the subset of testing data. This is done in order to prevent overfitting the classifier to one given set of data.
Step28: For this example, we will build a Decision Tree Classifier. The goal of a decision tree is to create a prediction by outlining a simple tree of decision rules. These rules are built from the training data by slicing the data on simple boundaries and trying to minimize the prediction error of that boundary. More details on decision trees can be found here
Step29: Next, we create a variable to store the classifier
Step30: Then we have to fit the classifier to the training data. Both the training features (uniformity_cell_shape and bare_nuclei) and the labels (benign vs. malignant) are passed to the fit function
Step31: The classifier is now ready to make some predictions. We can use the score function to see how accurate the classifier is on the test data. The score function will take the data in <code>features_test</code>, make a prediction of benign or malignant based on the decision tree that was fit to the training data, and compare that prediction to the true values in <code>labels_test</code>
Step32: Nearly all classifiers, decision trees included, will have paremeters that can be tuned to build a more accurate model. Without any parameter tuning and using just two features we have made a pretty accurate prediction. Good job!
|
<ASSISTANT_TASK:>
Python Code:
# Function to sum up numbers in a dictionary
# def download file
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
filename = 'breast-cancer-wisconsin.csv'
# execute download file
# pandas, read
# \ allows multi line wrapping
cancer_header = [ \
'sample_code_number', \
'clump_thickness', \
'uniformity_cell_size', \
'uniformity_cell_shape', \
'marginal_adhesion', \
'single_epithelial_cell_size', \
'bare_nuclei', \
'bland_chromatin', \
'normal_nucleoli', \
'mitoses', \
'class']
# read csv
# describe data
# describe data
# unique values
# convert to numeric
cancer_data["bare_nuclei"].unique()
# fillna
cancer_data.mean().round()
cancer_data = pd.read_csv('breast-cancer-wisconsin.csv', header=None, names=cancer_header)
cancer_data = cancer_data.convert_objects(convert_numeric=True)
cancer_data.fillna(cancer_data.mean().round(), inplace=True)
cancer_data["bare_nuclei"].describe()
cancer_data["bare_nuclei"].unique()
# describe
# The following line is NOT Python code, but a special syntax for enabling inline plotting in IPython
%matplotlib inline
from ggplot import *
import warnings
# ggplot usage of pandas throws a future warning
warnings.filterwarnings('ignore')
# ggplot cancer_data
plt = ggplot(aes(x = 'clump_thickness'), data = cancer_data) + \
geom_histogram(binwidth = 1, fill = 'steelblue') + \
geom_vline(xintercept = [cancer_data['clump_thickness'].mean()], linetype='dashed')
print plt
# scatter plot
# scatter with jitter
# colored scatter
plt = ggplot(aes(x = 'uniformity_cell_shape', y = 'bare_nuclei', color = 'class'), data = cancer_data) + \
geom_point(position = 'jitter') + \
ggtitle("The Effect of the Bare Nuclei and Cell Shape Uniformity on Classification") + \
ylab("Amount of Bare Nuclei") + \
xlab("Uniformity in Cell shape")
print plt
plt = ggplot(aes(x = 'uniformity_cell_shape', y = 'bare_nuclei'), data = cancer_data) + \
geom_point(position = 'jitter') + \
ggtitle("The Effect of the Bare Nuclei and Cell Shape Uniformity on Classification") + \
facet_grid('class')
print plt
plt = ggplot(aes(x = 'uniformity_cell_shape', y = 'bare_nuclei', color = 'class'), data = cancer_data) + \
geom_point(position = 'jitter') + \
ggtitle("The Effect of the Bare Nuclei and Cell Shape Uniformity on Classification") + \
facet_grid('clump_thickness', 'marginal_adhesion')
print plt
# cancer features
# labels and features
from sklearn.cross_validation import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features,
labels,
test_size = 0.3,
random_state = 42)
# import decision trees
# create decision tree
#fit classifier
# test accuracy
# plot decisions
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Business Problem
Step2: We have information about 47 homes in this dataset. Each house is described by two characteristics or "features". The 3rd column is the output column -- Price.
Step3: Step 1
Step4: Rescale/Normalize the Data
Step5: Step 2
Step6: Step 2b
Step7: Step 3
Step8: We don't know yet if this is high or low -- we'll have to try out a whole bunch of $W$ values. Or better yet, we can use pick an iterative method and implement it.
Step 5
Step9: Learning Rate - The Intuition
Step10: Step 6
Step11: Let's see how the prediction is sensitive to the number of iterations and the learning rate.
|
<ASSISTANT_TASK:>
Python Code:
# Share functions used in multiple notebooks
%run Shared-Functions.ipynb
# Import our usual libraries
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
%matplotlib inline
import os
# OS-independent way to navigate the file system
# Data directory is one directory up in relation to directory of this notebook
data_dir_root = os.path.normpath(os.getcwd() + os.sep + os.pardir)
# Where the file is
file_url = data_dir_root + os.sep + "Data" + os.sep + "portland-house-prices.txt"
# Load the data into a dataframe
data2 = pd.read_csv(file_url, header=None, names=['Size', 'Bedrooms', 'Price'])
# The number of (rows, columns) in the dataset
data2.shape
# The first few rows of the dataset
data2.head()
# Descriptive statistics of the dataset
data2.describe()
# Smallest and largest square footage in the data set
data2.Size.min(), data2.Size.max()
# Scatter plot of just the size and price
data2.plot.scatter(x='Size', y='Price', figsize=(12,8));
# Scatter plot of just the number of bedrooms and price
data2.plot.scatter(x='Bedrooms', y='Price', figsize=(12,8));
# Scatter plot of bedrooms and size (the 2 features)
data2.plot.scatter(x='Bedrooms', y='Size', figsize=(12,8));
# From https://matplotlib.org/mpl_toolkits/mplot3d/tutorial.html
# 3D scatter plot of size, bedrooms and price
from mpl_toolkits.mplot3d import Axes3D
threeD = plt.figure(figsize=(12,8)).gca(projection='3d')
threeD.scatter(data2['Bedrooms'], data2['Size'], data2['Price'])
threeD.set_xlabel('Bedrooms', fontsize=14)
threeD.set_ylabel('Size', fontsize=14)
threeD.set_zlabel('Price', fontsize=14)
plt.show()
# Normalize the scales in the dataset
# NOTE: the output doesn't have to be rescaled but we've done that
# here anyway
# If you know statistics: What we're doing is rewriting each value
# in terms of standard deviation units
data2Norm = (data2 - data2.mean()) / data2.std()
data2Norm.head()
# In Orange use the Preprocessor widget
# Let's keep track of the mean and standard deviation of the house sizes,
# number of bedrooms, and prices in the dataset.
# We'll need these values when we make predictions
# We can get them easily for size, bedrooms and price by using data2.mean()[0], ..., [2]
data2.mean()[0], data2.mean()[1], data2.mean()[2], data2.std()[0], data2.std()[1], data2.std()[2]
# Number of columns in the dataset
cols = data2Norm.shape[1]
# Inputs are our first two columns
X = data2Norm.iloc[:, 0:cols-1]
# Add an initial column of 1s to X to keep the notation simple
# X.insert(0,'x0',1)
# First few rows of features (remember it's scaled)
X.head()
# The output -- the price of a house
# Don't need to normalize the output
#y = data2['Price']
y = data2.iloc[:, cols-1:cols]
# First few house prices in the dataset
y.head()
# Compute the cost for a given set of W values over the entire dataset
# Get X and y in to matrix form
penalty(X, y, W_init, squaredPenalty)
# Set hyper-parameters
num_iters = 2000 # number of iterations
learning_rate = 0.001 # the learning rate
# Run gradient descent and capture the progression of cost values and the ultimate optimal W values
%time W_opt, final_penalty, running_w, running_penalty = gradientDescent(X, y, W_init, num_iters, learning_rate)
# Get the optimal W values and the last few W values and cost values
W_opt, final_penalty, running_w[-5:], running_penalty[-5:]
# How the cost changes as the number of iterations increase
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(np.arange(num_iters), running_penalty, 'g')
ax.set_xlabel('Number of Iterations')
ax.set_ylabel('Cost')
plt.xlim(0,num_iters)
ax.set_title('Cost vs. Iterations Over the Dataset');
# Run gradient descent for a few different values of the learning rate
learning_rates = [0.0001, 0.003, 0.005, 0.01, 0.03, 0.1]
gdResults = [gradientDescent(X, y, W_init, num_iters, learning_rates[i]) for i in range(len(learning_rates))]
#gdResults
# For each learning rate, get the progression of costs
# for each iteration
penalty_list = [gdResults[i][3] for i in range(len(gdResults))]
penalty_list[0]
# How the cost of the transformation varies with the learning rate
plot_color_list = ['g', 'r', 'c', 'b', 'm', 'y']
fig, ax = plt.subplots(figsize=(12,8))
[ax.plot(np.arange(num_iters), penalty_list[i], plot_color_list[i], label=learning_rates[i]) for i in range(len(learning_rates))]
ax.set_xlabel('Iterations')
ax.set_ylabel('Cost')
ax.legend()
plt.xlim(0,1000)
ax.set_title('Cost vs. Learning Rate');
# Change size and num_bedrooms to make distinct predictions
size = 3000
num_bedrooms = 3
# Remember we've run the model using rescaled house sizes and number of bedrooms
# So we should scale the inputs down and then scale the prediction up when we're done
size_scaled = (size - data2.mean()[0])/data2.std()[0]
beds_scaled = (num_bedrooms - data2.mean()[1])/data2.std()[1]
# This is our model -- we're just using it here to make a calculation
pred_price = (W_opt[0] * 1) + (W_opt[1] * size_scaled) + (W_opt[2] * beds_scaled)
# Format and print the result
print("Predicted Price: ", '${:20,.0f}'.format(math.ceil(pred_price)))
print("Optimal Parameter Values: {}".format(W_opt))
# Try different values
n_iters = [10, 100, 1000, 10000]
l_rate = [0.0001, 0.0005, 0.01]
# Keep these the same
sq_ft = 3000
rooms = 5
# get it into the right format to plug into the gradient descent function
combos_list = []
for i in range(len(n_iters)):
for j in range(len(l_rate)):
combos_list.append([n_iters[i], l_rate[j]])
# Run gradient descent on all the different combinations
gdResults = [gradientDescent(X, y, \
W_init, combos_list[i][0], combos_list[i][1]) for i in range(len(combos_list))]
# Here are the optimal parameter values for these settings
W_values = [gdResults[i][0] for i in range(len(gdResults))]
# Price for each set of optimal values
# Remember we've run the model using rescaled house sizes and number of bedrooms
# So we should scale the inputs down and then scale the prediction up when we're done
size_scaled = (sq_ft - data2.mean()[0])/data2.std()[0]
beds_scaled = (rooms - data2.mean()[1])/data2.std()[1]
# This is our model -- we're just using it here to make a calculation
pred_prices = [(W_values[i][0] * 1) + (W_values[i][1] * size_scaled) + (W_values[i][2] * beds_scaled) \
for i in range(len(W_values))]
# Convert pred_prices from a list of matrices into a simple list
# This makes it suitable for plotting
prices_out = np.array(pred_prices).reshape(1,-1).squeeze()
#print(combos_list)
# Format and print the result
#[print("Predicted Price: ", '${:6,.0f}'.format(math.ceil(pred_price))) for pred_price in pred_prices]
# Plot the results
fig,ax = plt.subplots(figsize=(12,8))
# Define the number of the bars
x_pos = list(range(len(combos_list)))
# Set the bar labels
bar_labels = combos_list
ax.bar(x_pos, prices_out, width=0.8, color='#FFC222')
# set axes labels and title
plt.ylabel('Predicted Price')
plt.xticks(x_pos, bar_labels, rotation=90)
plt.title('Predicted Price for Different Values of [Iterations, Learning Rate]')
# add a grid
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import clean data set
Step2: Additional cleaning
Step3: Raw Counts
Step4: Plot Distribution of cis- and trans-effects
Step8: Plot cis- and trans-effects vs Allelic Proportion
Step9: Plot cis- and trans-effects vs Allelic Proportion for Specific Exonic Regions
Step10: F10317_SI
Step12: F10482_SI
|
<ASSISTANT_TASK:>
Python Code:
# Set-up default environment
%run '../ipython_startup.py'
# Import additional libraries
import sas7bdat as sas
import cPickle as pickle
import statsmodels.formula.api as smf
from ase_cisEq import marenEq
from ase_cisEq import marenPrintTable
from ase_normalization import meanCenter
from ase_normalization import q3Norm
from ase_normalization import meanStd
pjoin = os.path.join
# Import clean dataset
with sas.SAS7BDAT(pjoin(PROJ, 'sas_data/clean_ase_stack.sas7bdat')) as FH:
df = FH.to_data_frame()
dfClean = df[['line', 'mating_status', 'fusion_id', 'flag_AI_combined', 'q5_mean_theta', 'sum_both', 'sum_line', 'sum_tester', 'sum_total', 'mean_apn']]
print 'Rows ' + str(dfClean.shape[0])
print 'Columns ' + str(dfClean.shape[1])
print 'Number of Genotypes ' + str(len(set(dfClean['line'].tolist())))
print 'Number of Exonic Regions ' + str(len(set(dfClean['fusion_id'].tolist())))
# Drop groups with less than 10 lines per fusion
grp = dfClean.groupby(['mating_status', 'fusion_id'])
dfGt10 = grp.filter(lambda x: x['line'].count() >= 10).copy()
print 'Rows ' + str(dfGt10.shape[0])
print 'Columns ' + str(dfGt10.shape[1])
print 'Number of Genotypes ' + str(len(set(dfGt10['line'].tolist())))
print 'Number of Exonic Regions ' + str(len(set(dfGt10['fusion_id'].tolist())))
# Calculate Maren TIG equations by mating status and exonic region
marenRawCounts = marenEq(dfGt10, Eii='sum_line', Eti='sum_tester', group=['mating_status', 'fusion_id'])
marenRawCounts['mag_cis'] = abs(marenRawCounts['cis_line'])
marenRawCounts.columns
# Plot densities
def panelKde(df, **kwargs):
options = {'subplots': True,
'layout': (7, 7),
'figsize': (20, 20),
'xlim': (-500, 500),
'legend': False,
'color': 'k'}
options.update(kwargs)
# Make plot
axes = df.plot(kind='kde', **options)
# Add titles intead of legends
try:
for ax in axes.ravel():
h, l = ax.get_legend_handles_labels()
ax.set_title(l[0])
ax.get_yaxis().set_visible(False)
ax.axvline(0, lw=1)
except:
ax = axes
ax.get_yaxis().set_visible(False)
ax.axvline(0, lw=1)
return plt.gcf()
def linePanel(df, value='cis_line', index='fusion_id', columns='line'):
mymap = {
'cis_line': 'cis-Line Effects',
'trans_line': 'trans-Line Effects',
'line': 'genotype',
'fusion_id': 'exonic_region'
}
# Iterate over mated and virgin
for k, v in {'M': 'Mated', 'V': 'Virgin'}.iteritems():
# Pivot data frame so that the thing you want to make panels by is in columns.
dfPiv = pd.pivot_table(df[df['mating_status'] == k],
values=value, index=index, columns=columns)
# Generate panel plot with at most 49 panels
if value == 'cis_line':
xlim = (-500, 500)
else:
# trans-effects appear to be larger in magnitude
xlim = (-1500, 1500)
fig = panelKde(dfPiv.iloc[:, :49], xlim=xlim)
title = '{}\n{}'.format(mymap[value], v)
fig.suptitle(title, fontsize=18, fontweight='bold')
fname = pjoin(PROJ, 'pipeline_output/cis_effects/density_plot_by_{}_{}_{}.png'.format(mymap[columns], value, v.lower()))
plt.savefig(fname, bbox_inches='tight')
print("Saved figure to: " + fname)
plt.close()
def testerPanel(df, value='cis_tester'):
mymap = {
'cis_tester': 'cis-Tester Effects',
'trans_tester': 'trans-Trans Effects'
}
# Iterate over mated and virgin
for k, v in {'M': 'Mated', 'V': 'Virgin'}.iteritems():
# Split table by mating status and drop duplicates, because
# there is only one tester value for each exonic region
dfSub = df.ix[df['mating_status'] == k,['fusion_id', value]].drop_duplicates()
# Generate Panel Plot
fig = panelKde(dfSub, subplots=False)
title = '{}\n{}'.format(mymap[value], v)
fig.suptitle(title, fontsize=18, fontweight='bold')
fname = pjoin(PROJ, 'pipeline_output/cis_effects/density_plot_{}_{}.png'.format(value, v.lower()))
plt.savefig(fname, bbox_inches='tight')
print("Saved figure to: " + fname)
plt.close()
# Cis and trans line effects by genotype
linePanel(marenRawCounts, value='cis_line', index='fusion_id', columns='line')
linePanel(marenRawCounts, value='trans_line', index='fusion_id', columns='line')
# Cis and trans line effects by exonic region
linePanel(marenRawCounts, value='cis_line', index='line', columns='fusion_id')
linePanel(marenRawCounts, value='trans_line', index='line', columns='fusion_id')
# Cis and trans tester effects
testerPanel(marenRawCounts, value='cis_tester')
testerPanel(marenRawCounts, value='trans_tester')
# Set Globals
SHAPES = {'M': 'o', 'V': '^'}
CMAP='jet'
# Add color column to color by genotype
colors = {}
cnt = 0
genos = set(dfGt10['line'].tolist())
for l in genos:
colors[l] = cnt
cnt += 1
marenRawCounts['color'] = marenRawCounts['line'].map(colors)
# Plotting scatter
def getR2(df, x, y):
Calculate the R-squared using OLS with an intercept
formula = '{} ~ {} + 1'.format(y, x)
return smf.ols(formula, df).fit().rsquared
def scatPlt(df, x, y, c=None, cmap='jet', s=50, marker='o', ax=None, title=None, xlab=None, ylab=None, diag='pos'):
Make a scatter plot using some default options
ax = df.plot(x, y, kind='scatter', ax=ax, c=c, cmap=cmap, s=s, marker=marker, title=title, colorbar=False)
# Add a diag line
if diag == 'neg':
# draw a diag line with negative slope
ax.plot([0, 1], [1, 0], transform=ax.transAxes)
elif diag == 'pos':
# draw a diag line with positive slope
ax.plot([0, 1], [0, 1], transform=ax.transAxes)
ax.set_xlabel(xlab)
ax.set_ylabel(ylab)
return ax
def scatPltPanel(df, line='sum_line', tester='sum_tester', x='cis_line', y='prop', cmap='jet', s=60, panel_title=None, diag='pos'):
Make a panel of scatter plots using pandas
# Plot the cis-line effects x proportion Line by fusion
df['prop'] = 1 - df[tester] / (df[line] + df[tester])
# Create 5x5 panel plot
fig, axes = plt.subplots(5, 5, figsize=(20, 20))
fig.suptitle(panel_title, fontsize=12, fontweight='bold')
axes = axes.ravel()
# Group by fusion_id
for i, (n, gdf) in enumerate(df.groupby('fusion_id')):
ax = axes[i]
# Calculate R-squared value
r2 = getR2(gdf, x, y)
# Make new title with R-squared in it
t = '{}\nR^2: {}'.format(n, round(r2, 3))
# Change marker style based on mating status and plot
for ms, msdf in gdf.groupby('mating_status'):
scatPlt(msdf, x, y, c='color', cmap=cmap, s=s, marker=SHAPES[ms], ax=ax, title=t, xlab=x, ylab=y, diag=diag)
# only plot 25 fusions
if i == 24:
break
fname = pjoin(PROJ, 'pipeline_output/cis_effects/scatter_plot_by_exonic_region_{}_v_{}.png'.format(x, y))
plt.savefig(fname, bbox_inches='tight')
print("Saved figure to: " + fname)
plt.close()
# Plot the cis-line effects x proportion by fusion
scatPltPanel(marenRawCounts, line='sum_line', tester='sum_tester', cmap=CMAP, panel_title='Raw Counts: cis-line')
# Plot the trans-line effects x proportion by fusion
scatPltPanel(marenRawCounts, line='sum_line', tester='sum_tester', x='trans_line', cmap=CMAP, panel_title='Raw Counts: trans-line', diag='neg')
# Plot F10005_SI
FUSION='F10005_SI'
dfFus = marenRawCounts[marenRawCounts['fusion_id'] == FUSION].copy()
dfFus['prop'] = 1 - dfFus['sum_tester'] / (dfFus['sum_line'] + dfFus['sum_tester'])
# Generate 3 panel plot
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
fig.suptitle(FUSION, fontsize=14, fontweight='bold')
for n, mdf in dfFus.groupby('mating_status'):
# Plot the cis-line effects x proportion by fusion
scatPlt(mdf, x='cis_line', y='prop', ax=axes[0], c='color', cmap=CMAP, marker=SHAPES[n], title='cis-line', xlab='cis-line', ylab='prop')
# Plot the trans-line effects x proportion by fusion
scatPlt(mdf, x='trans_line', y='prop', ax=axes[1], c='color', cmap=CMAP, marker=SHAPES[n], title='trans-line', xlab='trans-line', ylab='prop', diag='neg')
# Plot the Tester effects x proportion by fusion
scatPlt(mdf, x='cis_tester', y='prop', ax=axes[2], c='color', cmap=CMAP, marker=SHAPES[n], title='Tester', xlab='cis-tester', ylab='prop', diag=None)
fname = pjoin(PROJ, 'pipeline_output/cis_effects/scatter_plot_{}_effects_v_prop.png'.format(FUSION))
plt.savefig(fname, bbox_inches='tight')
print("Saved figure to: " + fname)
plt.close()
# Plot F10317_SI
FUSION='F10317_SI'
dfFus = marenRawCounts[marenRawCounts['fusion_id'] == FUSION].copy()
dfFus['prop'] = 1 - dfFus['sum_tester'] / (dfFus['sum_line'] + dfFus['sum_tester'])
# Generate 3 panel plot
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
fig.suptitle(FUSION, fontsize=14, fontweight='bold')
for n, mdf in dfFus.groupby('mating_status'):
# Plot the cis-line effects x proportion by fusion
scatPlt(mdf, x='cis_line', y='prop', ax=axes[0], c='color', cmap=CMAP, marker=SHAPES[n], title='cis-line', xlab='cis-line', ylab='prop')
# Plot the trans-line effects x proportion by fusion
scatPlt(mdf, x='trans_line', y='prop', ax=axes[1], c='color', cmap=CMAP, marker=SHAPES[n], title='trans-line', xlab='trans-line', ylab='prop', diag='neg')
# Plot the Tester effects x proportion by fusion
scatPlt(mdf, x='cis_tester', y='prop', ax=axes[2], c='color', cmap=CMAP, marker=SHAPES[n], title='Tester', xlab='cis-tester', ylab='prop', diag=None)
fname = pjoin(PROJ, 'pipeline_output/cis_effects/scatter_plot_{}_effects_v_prop.png'.format(FUSION))
plt.savefig(fname, bbox_inches='tight')
print("Saved figure to: " + fname)
plt.close()
# Plot F10482_SI
FUSION='F10482_SI'
dfFus = marenRawCounts[marenRawCounts['fusion_id'] == FUSION].copy()
dfFus['prop'] = 1 - dfFus['sum_tester'] / (dfFus['sum_line'] + dfFus['sum_tester'])
# Generate 3 panel plot
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
fig.suptitle(FUSION, fontsize=14, fontweight='bold')
for n, mdf in dfFus.groupby('mating_status'):
# Plot the cis-line effects x proportion by fusion
scatPlt(mdf, x='cis_line', y='prop', ax=axes[0], c='color', cmap=CMAP, marker=SHAPES[n], title='cis-line', xlab='cis-line', ylab='prop')
# Plot the trans-line effects x proportion by fusion
scatPlt(mdf, x='trans_line', y='prop', ax=axes[1], c='color', cmap=CMAP, marker=SHAPES[n], title='trans-line', xlab='trans-line', ylab='prop', diag='neg')
# Plot the Tester effects x proportion by fusion
scatPlt(mdf, x='cis_tester', y='prop', ax=axes[2], c='color', cmap=CMAP, marker=SHAPES[n], title='Tester', xlab='cis-tester', ylab='prop', diag=None)
fname = pjoin(PROJ, 'pipeline_output/cis_effects/scatter_plot_{}_effects_v_prop.png'.format(FUSION))
plt.savefig(fname, bbox_inches='tight')
print("Saved figure to: " + fname)
plt.close()
f1005
marenRawCounts.columns
meanByMsLine = marenRawCounts[['mean_apn', 'cis_line', 'mating_status', 'line']].groupby(['mating_status', 'line']).mean()
meanByMsLine.columns
meanByMsLine.plot(kind='scatter', x='mean_apn', y='cis_line')
def cisAPN(df, fusion, value='cis_line', xcutoff='>=150', ycutoff='<=-180'):
Plot effects vs mean apn
# Pull out fusion of interest
dfSub = marenRawCounts[marenRawCounts['fusion_id'] == fusion]
# Make scatter plot
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
dfSub.plot(kind='scatter', x='mean_apn', y='cis_line', ax=ax, title=fusion)
# Annotate outliers
xc =
filt = dfSub.loc[(dfSub[value] eval(ycutoff)) | (dfSub['mean_apn'] eval(xcutoff)), ['line', 'mating_status', 'mean_apn', 'cis_line']]
for row in filt.values:
line, ms, apn, cis = row
ax.annotate(line + '_' + ms, xy=(apn, cis))
fname = pjoin(PROJ, 'pipeline_output/cis_effects/scatter_plot_{}_{}_v_meanApn.png'.format(fusion, value))
plt.savefig(fname, bbox_inches='tight')
eval("{} == 'M'".format(marenRawCounts['mating_status']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup a Kubeflow cluster on GCP
Step2: Create an experiment
Step3: Let's look at the experiments that are running on this cluster. Since you just launched it, you should see only a single "Default" experiment
Step4: Now let's create a 'taxifare' experiment where we could look at all the various runs of our taxifare pipeline
Step5: Let's make sure the experiment has been created correctly
Step6: Packaging your code into Kubeflow components
Step7: Now that the container images are pushed to the registry in your project, we need to create yaml files describing to Kubeflow how to use these containers. It boils down essentially to
Step8: Create a Kubeflow pipeline
Step9: The pipeline function above is then used by the Kubeflow compiler to create a Kubeflow pipeline artifact that can be either uploaded to the Kubeflow cluster from the UI, or programatically, as we will do below
Step10: If you untar and uzip this pipeline artifact, you'll see that the compiler has transformed the
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
pip freeze | grep kfp || pip install kfp
from os import path
import kfp
import kfp.compiler as compiler
import kfp.components as comp
import kfp.dsl as dsl
import kfp.gcp as gcp
import kfp.notebook
HOST = # TODO: fill in the HOST information for the cluster
BUCKET = # TODO: fill in the GCS bucket
client = # TODO: create a Kubeflow client
client.list_experiments()
exp = # TODO: create an experiment called 'taxifare'
client.list_experiments()
# Builds the taxifare trainer container in case you skipped the optional part of lab 1
!taxifare/scripts/build.sh
# Pushes the taxifare trainer container to gcr/io
!taxifare/scripts/push.sh
# Builds the KF component containers and push them to gcr/io
!cd pipelines && make components
%%writefile bq2gcs.yaml
name: bq2gcs
description: |
This component creates the training and
validation datasets as BiqQuery tables and export
them into a Google Cloud Storage bucket at
gs://<BUCKET>/taxifare/data.
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-bq2gcs you just created
args: ["--bucket", {inputValue: Input Bucket}]
%%writefile trainjob.yaml
name: trainjob
description: |
This component trains a model to predict that taxi fare in NY.
It takes as argument a GCS bucket and expects its training and
eval data to be at gs://<BUCKET>/taxifare/data/ and will export
the trained model at gs://<BUCKET>/taxifare/model/.
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-trainjob you just created
args: [{inputValue: Input Bucket}]
%%writefile deploymodel.yaml
name: deploymodel
description: |
This component deploys a trained taxifare model on GCP as taxifare:dnn.
It takes as argument a GCS bucket and expects the model to deploy
to be found at gs://<BUCKET>/taxifare/model/export/savedmodel/
inputs:
- {name: Input Bucket , type: String, description: 'GCS directory path.'}
implementation:
container:
image: # TODO: Reference the image URI for taxifare-deployment you just created
args: [{inputValue: Input Bucket}]
# TODO 3
PIPELINE_TAR = 'taxifare.tar.gz'
BQ2GCS_YAML = './bq2gcs.yaml'
TRAINJOB_YAML = './trainjob.yaml'
DEPLOYMODEL_YAML = './deploymodel.yaml'
@dsl.pipeline(
name='Taxifare',
description='Train a ml model to predict the taxi fare in NY')
def pipeline(gcs_bucket_name='<bucket where data and model will be exported>'):
bq2gcs_op = comp.load_component_from_file(BQ2GCS_YAML)
bq2gcs = bq2gcs_op(
input_bucket=gcs_bucket_name,
)
trainjob_op = # TODO: Load the yaml file for training
trainjob = # TODO: Add your code to run the training job
)
deploymodel_op = # TODO: Load the yaml file for deployment
deploymodel = # TODO: Addd your code to run model deployment
)
# TODO: Add the code to run 'trainjob' after 'bq2gcs' in the pipeline
# TODO: Add the code to run 'deploymodel' after 'trainjob' in the pipeline
# TODO: Compile the pipeline functon above
ls $PIPELINE_TAR
# TODO 4
run = client.run_pipeline(
experiment_id= # TODO: Add code for experiment id
job_name= # TODO: Provide a jobname
pipeline_package_path= # TODO: Add code for pipeline zip file
params={
'gcs_bucket_name': BUCKET,
},
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.svm import LinearSVC
model = LinearSVC()
model_name = type(model).__name__
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 引入波士顿房价数据
Step2: 我们仅使用RM(平均房间数)和Price(按1000美元计的房价中位数)两个指标。
Step3: 为了方便描述,我们这里引入一些符号。$x^{(i)}$ 用来表示输入变量,比如这里的平均房间数,它也被称为特征features。$y^{(i)}$ 用来表示输出变量,或我们希望预测的目标target变量。一对 $(x^{(i)}, y^{(i)})$ 称为一个训练样本training example,而我们将使用的 $m$ 个训练样本 ${(x^{(i)}, y^{(i)});i=1,...,m}$ 的集合称为训练集training set。注意这里的上标 $(i)$ 是对训练集的一个索引编号,不是幂运算。我们用 $\chi$ 表示输入空间,$\gamma$ 表示输出空间。在这个例子中 $\chi=\gamma=\mathbb{R}$
Step6: 对于这个数据集,$x$ 是一个$\mathbb{R}$上的一维向量。$x_1^i$是该区域的平均房间数。
Step7: 下面以数学形式定义梯度下降,随机选择某个初始 $\theta$,之后重复以下更新:
Step8: 可以看出,梯度下降训练出的 $\theta$ 十分接近使用高级优化算法计算出的最优 $\theta$;进一步增加good_enough函数中的精度,可以使梯度下降算法训练出更精确的 $\theta$(同时也需要花费更多的迭代次数,意味着更长的训练时间)。
Step9: 可以看到,使用正规方程计算的最优 $\theta$ 值,和sklearn线性模型计算出来的是一样的。
|
<ASSISTANT_TASK:>
Python Code:
import logging
from collections import namedtuple
import numpy as np
from pandas import DataFrame
from IPython.display import HTML
from numpy.linalg import inv
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import cross_val_score
# 引入绘图包
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import animation
from matplotlib import cm
from matplotlib.colors import LogNorm
import seaborn as sns
sns.set_style('whitegrid')
logging.basicConfig(level=logging.INFO)
%matplotlib inline
from sklearn.datasets import load_boston
boston = load_boston()
print(boston.DESCR)
df = DataFrame(data=boston.data, columns=boston.feature_names)[["RM"]].join(DataFrame(data=boston.target, columns=["Price"]))
plt.scatter(df["RM"], df["Price"])
plt.xlabel("Number of rooms")
plt.ylabel("Price in $1000")
plt.show()
# 预览数据
print(df.head())
# 读取训练集和目标变量
X, y = df[["RM"]].values, df["Price"].values
m, n = X.shape
# 增加截距列
X_intercept = np.column_stack((np.ones((m, 1)), X))
@np.vectorize
def cost_function(*args):
成本函数
theta = np.array(args)
return 0.5 * ((X_intercept @ theta - y) ** 2).sum()
def gradient_descent_optimize(cost_func, init_theta=None):
使用梯度下降寻找最优的theta
alpha = 0.00003
def good_enough(old_cost, new_cost):
return np.abs(new_cost - old_cost) / old_cost < 0.00000001
def improve_theta(old_theta):
return old_theta - alpha * ((X_intercept @ old_theta - y) @ X_intercept)
if init_theta is None:
init_theta = np.array([0, 0])
path_ = [init_theta]
o_theta, o_cost = init_theta, cost_function(*init_theta)
while True:
n_theta = improve_theta(o_theta)
n_cost = cost_func(*n_theta)
path_.append(n_theta)
logging.debug((o_theta, n_theta, o_cost, n_cost))
if good_enough(o_cost, n_cost):
break
o_theta, o_cost = n_theta, n_cost
Result = namedtuple('Result', ['theta', 'path'])
return Result(theta=n_theta, path=np.array(path_))
# 使用梯度下降获取优化结果
init_guess = np.array([-10, 17.5])
gradient_descent_result = gradient_descent_optimize(cost_function, init_theta=init_guess)
path = gradient_descent_result.path
sampled_path = np.concatenate((np.array([path[0, ]]),
path[(2 ** np.array(range(int(np.ceil(np.log2(path.shape[0])))))).astype(np.int32), ]))
sampled_path_mat = sampled_path.T
# 使用sklearn的LinearRegression模型,直接获取最终的theta值
regr = LinearRegression()
regr.fit(X, y)
theta0_optimize, theta1_optimize = regr.intercept_, regr.coef_[0]
minima = np.array([theta0_optimize, theta1_optimize])
minima_ = minima.reshape(-1, 1)
# 根据最优化的theta,生成以此为中心的网格数据
step = 0.2
theta0_min, theta0_max = theta0_optimize - np.abs(theta0_optimize), theta0_optimize + np.abs(theta0_optimize)
theta1_min, theta1_max = theta1_optimize - np.abs(theta1_optimize), theta1_optimize + np.abs(theta1_optimize)
theta0_range, theta1_range = np.arange(theta0_min, theta0_max + step, step), np.arange(
theta1_min, theta1_max + step, step)
theta0_mat, theta1_mat = np.meshgrid(theta0_range, theta1_range)
z = cost_function(theta0_mat.reshape(-1), theta1_mat.reshape(-1)).reshape(theta0_mat.shape)
fig = plt.figure(figsize=(12, 6))
# 绘制surface平面图
ax1 = fig.add_subplot(1, 2, 1, projection='3d', elev=50, azim=-50)
ax1.plot_surface(theta0_mat, theta1_mat, z, norm=LogNorm(), cmap=cm.jet, rstride=1, cstride=1, edgecolor='none',
alpha=.8)
ax1.scatter(sampled_path_mat[0, :-1], sampled_path_mat[1, :-1], cost_function(*sampled_path_mat[::, :-1]))
ax1.plot(*minima_, cost_function(*minima_), 'r*', markersize=10)
ax1.set_xlabel('$theta_0$')
ax1.set_ylabel('$theta_1$')
ax1.set_zlabel('$J(theta)$')
ax1.set_xlim((theta0_min, theta0_max))
ax1.set_ylim((theta1_min, theta1_max))
ax1.set_title("Surface Map of J(theta)")
# 绘制contour轮廓图
ax2 = fig.add_subplot(1, 2, 2)
ax2.contour(theta0_mat, theta1_mat, z, levels=np.logspace(0, 5, 35), norm=LogNorm(), cmap=cm.jet)
ax2.plot(*minima_, 'r*', markersize=18)
ax2.quiver(sampled_path_mat[0, :-1], sampled_path_mat[1, :-1], sampled_path_mat[0, 1:] - sampled_path_mat[0, :-1],
sampled_path_mat[1, 1:] - sampled_path_mat[1, :-1],
scale_units='xy', angles='xy', scale=1, color='k')
ax2.set_xlabel('$theta0$')
ax2.set_ylabel('$theta1$')
ax2.set_xlim((theta0_min, theta0_max))
ax2.set_ylim((theta1_min, theta1_max))
ax2.set_title("Contour Map of J(theta)")
plt.tight_layout()
plt.show()
fig, ax = plt.subplots()
ax.scatter(X.reshape(-1), y, alpha=0.5)
plt.xlabel("Number of rooms")
plt.ylabel("Price in $1000")
x = np.arange(3, 10, 1)
line, = ax.plot([], [], lw=2)
def animate(i):
line.set_data(x, np.column_stack((np.ones((x.size, 1)), x)) @ sampled_path[i])
return line,
def init():
line.set_data([], [])
return line,
anim = animation.FuncAnimation(fig, animate, frames=sampled_path.shape[0], init_func=init, interval=800,
repeat_delay=3000, blit=True)
HTML(anim.to_html5_video())
# 使用批量梯度下降训练出的theta
print(gradient_descent_result.theta)
# 使用sklearn的LinearRegression模型训练出的theta
print(minima)
# 使用正规方程计算出的theta
print(inv(X_intercept.T @ X_intercept) @ X_intercept.T @ y)
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check the version of the Graphistry module
Step2: 800K Edges, 1K Nodes (no attributes)
Step3: 800K Edges, 1K Nodes (5 integer node and edge attributes)
Step4: 50K edges, 100 nodes, 100 integer edges attributes 5 integer node attributes
Step5: 10K Edges, 100 Nodes (100 32 bytes random string edge attributes, 5 integer node attributes)
Step6: Epinions - 75,877 nodes, 508,836 edges
Step7: Facebook - 4039 Nodes, 88234 Edges
Step8: Amazon 262111 nodes , 1,234,877 Edges
|
<ASSISTANT_TASK:>
Python Code:
import random
import graphistry as g
import pandas as pd
from random import choice
from string import ascii_letters
from IPython.display import IFrame
g.__version__
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
edges = pd.DataFrame({'src': [ random.randint(0, 1000) for x in range(800000)],
'dest': [random.randint(0, 1000) for x in range(800000)]})
edges[:3]
g.edges(edges).bind(source='src', destination='dest').plot()
edges2 = edges;
nodes = pd.DataFrame({'name':[x for x in range(0, 1000)]})
for i in range(5):
edges2['intFld' + str(i)] = edges2.src.map(lambda x: random.randint(0, 100000))
for i in range(5):
nodes['intFld' + str(i)] = nodes.name.map(lambda x: random.randint(0, 100000))
g.edges(edges2).nodes(nodes).bind(source='src', destination='dest', node='name').plot()
edges = pd.DataFrame({'src': [ random.randint(0, 100) for x in range(50000)], 'dest': [random.randint(0, 100) for x in range(50000)]})
nodes = pd.DataFrame({'name':[x for x in range(0, 100)]})
for i in range(100):
edges['intFld' + str(i)] = edges.src.map(lambda x: random.randint(0, 100000))
for i in range(5):
nodes['intFld' + str(i)] = nodes.name.map(lambda x: random.randint(0, 100000))
g.edges(edges).nodes(nodes).bind(source='src', destination='dest', node='name').plot()
edges = pd.DataFrame({'src': [ random.randint(0, 100) for x in range(10000)],
'dest': [random.randint(0, 100) for x in range(10000)]})
for i in range(100):
edges['intFld' + str(i)] = edges.src.map(lambda x: (''.join(choice(ascii_letters) for i in range(32))))
for i in range(5):
nodes['intFld' + str(i)] = nodes.name.map(lambda x: random.randint(0, 100000))
g.edges(edges).nodes(nodes).bind(source='src', destination='dest', node='name').plot()
url = 'http://' + SERVER + '/graph/graph.html?dataset=Epinions&scene=default&info=true&play=10000&mapper=splunk&splashAfter=1477695505'
IFrame(url, width=700, height=350)
url = 'http://' + SERVER + '/graph/graph.html?dataset=Facebook&scene=default&info=true&play=10000&mapper=opentsdb&splashAfter=1477695505'
IFrame(url, width=700, height=350)
# Much larger than 800,000 nodes + edges. Does not need to render in 20 seconds.
url = 'http://' + SERVER + '/graph/graph.html?dataset=Amazon&scene=default&info=true&play=10000&mapper=splunk&splashAfter=1477695505'
IFrame(url, width=700, height=350)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: What I will do to get a better view of the file?
Step5: What questions I want to answer?
Step8: One street type needs to be cleaned ('AVE'). We will clean that street type when shapping our data structure. Please find below the function we'll use to clean street types.
Step11: Audit postcodes
Step14: Postcode are already clean.
Step17: Phone numbers need to be cleaned to match the following pattern (+33 X XX XX XX XX). We will clean that street type when shapping our data structure.</br>
Step19: Shape data
Step20: Insert data in MongoDb
Step21: Querying the MongoDb
Step22: Answering question
Step23: Bakery is the most popular shop in Plaisir. No kidding.. Plaisir is in France
Step24: Tennis is the sport with the most facilities in Plaisir.
Step25: There are more restaurant than fast_food in Plaisir.. Good news.....
Step26: What I need to do
|
<ASSISTANT_TASK:>
Python Code:
tags = {}
for event, elem in ET.iterparse("sample.osm"):
if elem.tag not in tags:
tags[elem.tag]= 1
else:
tags[elem.tag] += 1
print tags
tags_details = {}
keys = ["amenity","shop","sport","place","service","building"]
def create_tags_details(binder, list_keys, filename):
Create a dictionnary of every attributes value of the list of attributes key named "keys".
This function aims to help me understand what's inside the datased and what type of analysis could be made.
for key in list_keys:
binder[key] = {}
for event, elem in ET.iterparse(filename, events = ("start",)):
if elem.tag == "tag":
for tag in elem.iter("tag"):
for key in list_keys:
if elem.attrib["k"] == key:
if tag.attrib["v"] not in binder[key]:
binder[key][tag.attrib["v"]] = 1
else:
binder[key][tag.attrib["v"]] += 1
return binder
create_tags_details(tags_details,keys,"sample.osm")
# Create a dict to store weird street types
street_types = col.defaultdict(set)
# Create a list listing expected street types
expected_street_type = ["Rue", "Route", "Ruelle", "Cours", "Avenue", "Impasse", "Mail","Boulevard", "Square", "Place", "Allee"]
# Create a regular expression to isolate weird street types
street_type_re = re.compile(r'^\w+', re.IGNORECASE)
def audit_street(street_types,street_name):
This function aims to check if the first word of every street name matchs the list of expected word name expected_street_type.
If it doesn't, the first word and the complete street name are added to the street_types dictionary as a key / value pair.
Arg1: street_types >> the dictionary where to store the unexpected data.
Arg2: street_name >> the street_name to be audited.
m = street_type_re.search(street_name)
if m:
street_type = m.group()
if street_type not in expected_street_type:
street_types[street_type].add(street_name)
def audit_street_map(file_in, street_types, pretty = False):
This function aims to audit the file by isolating one by one the addr:street tags key/value pair.
The value of each pair is then audited thanks to the audit_street function.
for _, element in ET.iterparse(file_in):
if element.tag == "way" or element.tag == "node":
for tag in element.iter("tag"):
if tag.attrib['k'] == "addr:street":
audit_street(street_types,tag.attrib["v"])
pp.pprint(dict(street_types))
audit_street_map("plaisir.osm",street_types, True)
#check addr: with :
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
def clean_street_type(address_tag,address_dict):
This function aims to transform address tags and add them as a key / value pair of a dedicated address dictionary.
The function will take the address tag and the address dictionary as arguments:
address_tag: every sub tag of a "node" or "way" top level tag.
address_dict: a dictionnary where we store key / value pair of adress elements (postcode, street, etc..)
The function first creates a cleaned key based on the current key of the tag by removing the "addr:" part of the string.
If after removing "addr:" the function still detects a colon, the tag isn't added to the dictionary because the data entry was not clean enought.
Else, if the value of the tag starts with "AVE" as first word, "AVE" is replaced by "Avenue".
Finally, the key / value is added to the address dictionary.
key = re.sub('addr:', '', address_tag['k']).strip()
if lower_colon.match(key):
return None
else:
if address_tag['v'].startswith("AVE"):
address_dict[key] = re.sub(r'^AVE', 'Avenue', address_tag['v'])
else:
address_dict[key] = address_tag['v']
Please find below a test of the function which presents how the function works.
tree = ET.parse(OSM_FILE)
root = tree.getroot()
test_tags = root.findall("./node/tag")
test_address_tags_dict = {}
for tag in test_tags:
tag_dict = {}
if tag.attrib['k'].startswith("addr:"):
clean_street_type(tag.attrib,tag_dict)
if tag_dict:
test_address_tags_dict[tag.attrib['k'] + tag.attrib['v']] = tag_dict
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
pp.pprint(take(20, test_address_tags_dict.iteritems()))
# Create a set to store weird postcodes
postcodes = set()
# Create a regular expression to isolate weird postcodes
postcode_re = re.compile(r'\d\d\d\d\d', re.IGNORECASE)
def audit_postcode(postcodes,postcode):
This function aims to check if the postcode matchs the expected format.
If it doesn't, the postcode is added to a dictionary.
Arg1: postcodes >> the dictionary where to store the unexpected data.
Arg2: postcode >> the postcode to be audited.
m = postcode_re.search(postcode)
if m == None:
postcodes.add(postcode)
def audit_postcode_map(file_in, postcodes, pretty = False):
This function aims to audit the file by isolating one by one the postcode tags key/value pair.
The value of each pair is then audited thanks to the audit_postcode function.
for _, element in ET.iterparse(file_in):
if element.tag == "way" or element.tag == "node":
for tag in element.iter("tag"):
if tag.attrib['k'] == "addr:postcode":
audit_postcode(postcodes,tag.attrib["v"])
pp.pprint(postcodes)
audit_postcode_map("plaisir.osm",postcodes, True)
# Create a list to store weird phone numbers
phone_numbers = []
# Create a regular expression to isolate
phone_re = re.compile(r'\+\d\d\s\d\s\d\d\s\d\d\s\d\d\s\d\d', re.IGNORECASE)
def audit_phone(phone_numbers,phone_number):
This function aims to check if the phone number matchs the expected format.
If it doesn't, the phone number is added to a dictionary.
Arg1: phone_numbers >> the dictionary where to store the unexpected data.
Arg2: phone_number >> the phone number to be audited.
m = phone_re.search(phone_number)
if m == None:
phone_numbers.append(phone_number)
def audit_phone_map(file_in, phone_numbers, pretty = False):
This function aims to audit the file by isolating one by one the phone tags key/value pair.
The value of each pair is then audited thanks to the audit_phone function.
for _, element in ET.iterparse(file_in):
if element.tag == "way" or element.tag == "node":
for tag in element.iter("tag"):
if tag.attrib['k'] == "phone":
audit_phone(phone_numbers,tag.attrib["v"])
return phone_numbers
audit_phone_map("plaisir.osm",phone_numbers, True)
#classic phone number format in France (ie: 01 30 55 84 22)
classic_france = re.compile(r'\d\d\s\d\d\s\d\d\s\d\d\s\d\d')
#classic phone number format with dots in France (ie: 01.30.55.84.22)
classic_france_dot = re.compile(r'\d\d\.\d\d\.\d\d\.\d\d\.\d\d')
#compressed phone number format in France (ie: 0130558422)
classic_france_compiled = re.compile(r'\d\d\d\d\d\d\d\d\d\d')
#wrong format
def clean_phone_numbers(phone_tag,main_dict):
This function aims to clean phone tags and add them as a key / value pair of our node dictionary.
The function will take the phone tag and the main dictionary as arguments:
phone_tag: every phone sub tag of a "node" or "way" top level tag.
main_dict: a dictionnary where we store key / value pair of each elements of our map.
The function first identifies if the phone number follow one of the wrong pattern we identified during our audit.
Using regex, we try to match every pattern and apply the necessary modifications when it's relevant and then store the phone number in the the dictionary.
Else, we store directly the phone number in the dictionary.
if classic_france.match(phone_tag['v']):
value = re.sub(r'^\d', '+33 ', phone_tag['v'])
main_dict[phone_tag['k']] = value
elif classic_france_dot.match(phone_tag['v']):
value = re.sub(r'^\d', '+33 ', phone_tag['v'])
value = re.sub(r'\.', ' ', value)
main_dict[phone_tag['k']] = value
elif classic_france_compiled.match(phone_tag['v']):
value = " ".join(phone_tag['v'][i:i+2] for i in range(0, len(phone_tag['v']), 2))
value = re.sub(r'^\d', '+33 ', value)
main_dict[phone_tag['k']] = value
else:
main_dict[phone_tag['k']] = phone_tag['v']
Please find below a test of the function which presents how the function works.
test_phone_tags_dict = {}
for tag in test_tags:
tag_dict = {}
if tag.attrib['k'] == "phone":
clean_phone_numbers(tag.attrib,tag_dict)
if tag_dict:
test_phone_tags_dict[tag.attrib['k'] + " " + tag.attrib['v']] = tag_dict
pp.pprint(take(20, test_phone_tags_dict.iteritems()))
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
POS = ["lon","lat"]
BUILDING_TYPES = ["amenity","shop","sport","place","service","building","highway"]
def shape_element(element):
This function aims to shape every element of our XML file.
Each top level node of the XML file is reviewed by this function which creates a dedicated dictionary called "node".
Each node dictionary is then added to the data list. This list will be the file inserted into MongoDb.
node = {}
pos = []
node_refs = []
created = {}
address = {}
types = {}
if element.tag == "node" or element.tag == "way" :
types['type'] = element.tag
if 'lat' in element.attrib.keys() and 'lon' in element.attrib.keys():
try:
lat = float(element.attrib['lat'])
lon = float(element.attrib['lon'])
pos.insert(0,lat)
pos.insert(1,lon)
except:
pass
for k, m in element.attrib.items():
if k not in POS:
if k in CREATED:
created[k] = m
else:
node[k] = m
for child in element:
if child.tag == "nd":
node_refs.append(child.attrib['ref'])
elif child.tag == "tag":
if child.attrib['k'].startswith("addr:"):
clean_street_type(child.attrib,address)
elif child.attrib['k'] == 'phone':
clean_phone_numbers(child.attrib,node)
elif child.attrib['k'] in BUILDING_TYPES:
types[child.attrib['k']] = child.attrib['v']
if types:
node['types'] = types
if created:
node['created'] = created
if pos:
node['pos'] = pos
if address:
node['address'] = address
if node_refs:
node['node_refs'] = node_refs
return node
else:
return None
def process_map(file_in, pretty = False):
data = []
for _, element in ET.iterparse(file_in):
el = shape_element(element)
if el:
data.append(el)
return data
data = process_map('plaisir.osm', True)
from pymongo import MongoClient
client = MongoClient("mongodb://localhost:27017")
db = client.osm_udacity
from bson.objectid import ObjectId
def insert_data(data, db):
for item in data:
item['_id'] = ObjectId()
db.plaisir_osm.insert_one(item)
insert_data(data, db)
print db.plaisir_osm.find_one()
def make_group_pipeline(type_node):
pipeline = [{'$group':{'_id':type_node,'count':{'$sum':1}}},
{'$sort':{'count':-1}},
{'$limit' : 5 }
]
return pipeline
def aggregate(db, pipeline):
return [doc for doc in db.aggregate(pipeline)]
pipeline = make_group_pipeline('$types.type')
result = aggregate(db.plaisir_osm, pipeline)
pp.pprint(result)
pipeline_shop = make_group_pipeline('$types.shop')
result_shop = aggregate(db.plaisir_osm, pipeline_shop)
pp.pprint(result_shop)
pipeline_sport = make_group_pipeline('$types.sport')
result_sport = aggregate(db.plaisir_osm, pipeline_sport)
pp.pprint(result_sport)
pipeline_restaurant = [{'$match': {"$or" : [{"types.amenity": "restaurant"},{"types.amenity":"fast_food"}]}},
{'$group':{'_id':'$types.amenity','count':{'$sum':1}}},
{'$sort':{'count':-1}}]
result_restaurant = aggregate(db.plaisir_osm, pipeline_restaurant)
pp.pprint(result_restaurant)
#check of the geospacial index - the few lines below are here to check that the 2d index is properly working.
for doc in db.plaisir_osm.find({'pos': {'$near' : [48.5,1.95]}}):
pp.pprint(doc)
break
#Get max and min latitude and longitude
for doc in db.plaisir_osm.aggregate([
{ "$unwind": "$pos" },
{ "$group": {
"_id": "$_id",
"lat": { "$first": "$pos" },
"lon": { "$last": "$pos" }
}},
{ "$group": {
"_id": "null",
"minLat": { "$min": "$lat" },
"minLon": { "$min": "$lon" },
"maxLat": { "$max": "$lat" },
"maxLon": { "$max": "$lon" }
}}
]):
pp.pprint(doc)
main_dict = {}
def frange(start, stop, step):
i = start
while i < stop:
yield i
i += step
for lat in frange(48.76,48.86,0.01):
for lon in frange(1.85,2.05,0.02):
main_dict[str(lon) + " - " + str(lat)] = {}
bus_stop = 0
amenity = 0
for doc in db.plaisir_osm.find({'pos': { '$geoWithin': { '$box': [ [ lat, lon ], [ (lat + 0.01), (lon + 0.02) ] ] } }}):
if 'highway' in doc['types']:
if doc['types']['highway'] == "bus_stop":
bus_stop += 1
elif 'amenity' in doc['types']:
if doc['types']['amenity'] == 'bench':
pass
else:
amenity += 1
main_dict[str(lon) + " - " + str(lat)]['bus_stop'] = bus_stop
main_dict[str(lon) + " - " + str(lat)]['amenity'] = amenity
new_dict = {}
for key in main_dict:
if main_dict[key]['amenity'] != 0 and main_dict[key]['bus_stop'] != 0:
new_dict[key] = main_dict[key]
#Now that the dictionnary is ready for the analysis, I can move forward using Pandas.
%matplotlib inline
import seaborn as sns
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
df = pd.DataFrame.from_dict(new_dict,orient="index")
df2 = df.groupby('amenity').aggregate(np.average)
df2.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Connect to server
Step2: <hr> Random spatial graphs
Step3: We can add a color to each node. Here we color the same graph based on distance from the origin.
Step4: As with other plots, we can also color using group labels.
Step5: <hr> Edge bundling
Step6: Another option is to bundle edges together using an algorithm by Holton and Van Wijk, emphasizing large tracts.
|
<ASSISTANT_TASK:>
Python Code:
import os
from lightning import Lightning
from numpy import random, asarray, argmin
from colorsys import hsv_to_rgb
import networkx as nx
lgn = Lightning(ipython=True, host='http://public.lightning-viz.org')
G = nx.random_geometric_graph(100, 0.2)
pos = asarray(nx.get_node_attributes(G, 'pos').values())
mat = nx.adjacency_matrix(G).todense()
lgn.graph(pos[:,0], pos[:,1], mat)
dists = [(x - 0.5)**2 + (y - 0.5)**2 for x, y in pos]
lgn.graph(pos[:,0], pos[:,1], mat, values=dists, colormap='Greens')
center = argmin(dists)
p = nx.single_source_shortest_path_length(G, center)
xy = asarray([pos[i,:] for i in p.keys()])
g = p.values()
lgn.graph(xy[:,0], xy[:,1], mat, group=g)
G = nx.random_geometric_graph(50, 0.5)
pos = asarray(nx.get_node_attributes(G, 'pos').values())
dists = [(x - 0.5)**2 + (y - 0.5)**2 for x, y in pos]
mat = nx.adjacency_matrix(G).todense()
lgn.graph(pos[:,0], pos[:,1], mat)
lgn.graphbundled(pos[:,0], pos[:,1], mat)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a single layer RNN with LSTMs and train it with a toy dataset.
Step2: Now we are going to increase the depth of our RNN. Let's train an RNN with 2 and 4 layers. What parameter do you need to set to change the number of layers in your RNN? For a hint look here.
Step3: Compare the loss from our single and multi-layer RNN.
Step4: Train an RNN with MSCOCO Captions
Step5: Now you are ready to train an RNN with the MSCOCO captions. Before you start training define the dropout values you want to use for training. If you need a hint check here.
Step6: Does the single word prediction cell above represent a deployment scenario?
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import time
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
#import reader
import collections
import os
import re
import json
import matplotlib.pyplot as plt
!nvidia-smi
tf.reset_default_graph()
a=np.array([[1,2,3,4,5,0],[1,2,3,4,6,0]])
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
result=tf.nn.embedding_lookup(np.identity(10), a).eval()
example_input=sess.run([tf.unstack(result,6,1)])
coord.request_stop()
coord.join(threads)
sess.close()
print('one-hot encoded inputs')
print(result)
print('shape of the input')
print(result.shape)
print('reshaped input for training')
print(example_input)
plot_loss=[]
num_hidden=24
num_steps=6
dict_length=8
batch_size=2
tf.reset_default_graph()
## Make Variables
variables_dict = {
"weights1":tf.Variable(tf.truncated_normal([num_hidden,dict_length],stddev=1.0,dtype=tf.float32),name="weights1"),
"biases1": tf.Variable(tf.truncated_normal([dict_length],stddev=1.0,dtype=tf.float32), name="biases1")}
# Create input data
small_dict=['EOS','i','will','walk','the','dog','cat','run']
X=np.array([[1,2,7,4,5,0],[1,2,3,4,6,0]],dtype=np.int32)
X_one_hot=tf.nn.embedding_lookup(np.identity(dict_length), X) #[batch,num_steps,dictionary_length][2,6,7]
y=np.zeros((batch_size,num_steps),dtype=np.int32)
y[:,:-1]=X[:,1:]
y_one_hot=tf.unstack(tf.nn.embedding_lookup(np.identity(dict_length), y),num_steps,1) #[batch,num_steps,dictionary_length][2,6,7]
y_target_reshape=tf.reshape(y_one_hot,[batch_size*num_steps,dict_length])
#Create our LSTM
cell = tf.contrib.rnn.LSTMCell(num_units=num_hidden, state_is_tuple=True)
outputs, last_states = tf.contrib.rnn.static_rnn(
cell=cell,
dtype=tf.float32,
inputs=tf.unstack(tf.to_float(X_one_hot),num_steps,1))
output_reshape=tf.reshape(outputs, [batch_size*num_steps,num_hidden]) #[12==batch_size*num_steps,num_hidden==12]
pred=tf.matmul(output_reshape, variables_dict["weights1"]) +variables_dict["biases1"]
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_target_reshape))
optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(300):
loss,_,y_target,y_pred,output=sess.run([cost,optimizer,y_target_reshape,pred,outputs])
plot_loss.append([loss])
if i% 25 ==0:
print("iteration: ",i," loss: ",loss)
print(y_target)
print(np.argmax(y_pred,1))
coord.request_stop()
coord.join(threads)
sess.close()
#Lets look at one input data point at each step and its prediction
print("Input Sentence")
print([small_dict[ind] for ind in X[0,:]])
print("Predicted words")
print([small_dict[ind] for ind in np.argmax(y_pred[0::2],1)])
#Now let's try multiple layers
plot_loss2=[]
num_hidden=24
num_steps=6
dict_length=8
batch_size=2
num_layers=2
tf.reset_default_graph()
## Make Variables
variables_dict = {
"weights1":tf.Variable(tf.truncated_normal([num_hidden,dict_length],stddev=1.0,dtype=tf.float32),name="weights1"),
"biases1": tf.Variable(tf.truncated_normal([dict_length],stddev=1.0,dtype=tf.float32), name="biases1")}
# Create input data
small_dict=['EOS','i','will','walk','the','dog','cat','run']
X=np.array([[1,2,7,4,5,0],[1,2,3,4,6,0]],dtype=np.int32)
X_one_hot=tf.nn.embedding_lookup(np.identity(dict_length), X) #[batch,num_steps,dictionary_length][2,6,7]
y=np.zeros((batch_size,num_steps),dtype=np.int32)
y[:,:-1]=X[:,1:]
y_one_hot=tf.unstack(tf.nn.embedding_lookup(np.identity(dict_length), y),num_steps,1) #[batch,num_steps,dictionary_length][2,6,7]
y_target_reshape=tf.reshape(y_one_hot,[batch_size*num_steps,dict_length])
input_keep_prob=1.0#input_keep_prob,
output_keep_prob=1.0#output_keep_pro
##################### Create a multilayer RNN ####################
layer_cell=[]
for _ in range(num_layers):
lstm_cell = tf.contrib.rnn.LSTMCell(num_units=num_hidden, state_is_tuple=True)
lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob)
layer_cell.append(lstm_cell)
cell = tf.contrib.rnn.MultiRNNCell(layer_cell, state_is_tuple=True)
outputs, last_states = tf.contrib.rnn.static_rnn(
cell=cell,
dtype=tf.float32,
inputs=tf.unstack(tf.to_float(X_one_hot),num_steps,1))
output_reshape=tf.reshape(outputs, [batch_size*num_steps,num_hidden]) #[12==batch_size*num_steps,num_hidden==12]
pred=tf.matmul(output_reshape, variables_dict["weights1"]) +variables_dict["biases1"]
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_target_reshape))
optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(300):
loss,_,y_target,y_pred,output=sess.run([cost,optimizer,y_target_reshape,pred,outputs])
plot_loss2.append([loss])
if i% 25 ==0:
print("iteration: ",i," loss: ",loss)
print(y_target)
print(np.argmax(y_pred,1))
coord.request_stop()
coord.join(threads)
sess.close()
## import matplotlib.pyplot as plt
plt.plot(plot_loss,'r.')
plt.plot(plot_loss2,'b--')
plt.legend(["1-Layer LSTM","Multi-Layer LSTM"])
plt.ylabel("loss")
plt.xlabel("iteration")
plt.title("Loss During Training")
plt.show()
num_steps=20
## Read Training files
with open("/data/mscoco/captions_train2014.json") as data_file:
data=json.load(data_file)
TotalNumberofCaptions=len(data['annotations'])
sentences=[]
##Create a list of all of the sentences.
for i in range(TotalNumberofCaptions):
sentences+=[re.sub('[^A-Za-z0-9]+',' ',data['annotations'][i]['caption']).lower()]
TotalWordList=[]
for i in range(TotalNumberofCaptions):
TotalWordList+=re.sub('[^A-Za-z0-9]+',' ',data['annotations'][i]['caption']).lower().split()
#Determine number of distint words
distintwords=collections.Counter(TotalWordList)
#Order words
count_pairs = sorted(distintwords.items(), key=lambda x: (-x[1], x[0])) #ascending order
words, occurence = list(zip(*count_pairs))
DictionaryLength=occurence.index(4) #index for words that occur 4 times or less
words=['PAD','UNK','EOS']+list(words[:DictionaryLength])
word_to_id=dict(zip(words, range(len(words))))
#Tokenize Sentence
Tokenized=[]
for full_words in sentences:
EmbeddedSentence=[word_to_id[word] for word in full_words.split() if word in word_to_id]+[word_to_id['EOS']]
#Pad sentences that are shorter than the number of steps
if len(EmbeddedSentence)<num_steps:
b=[word_to_id['PAD']]*num_steps
b[:len(EmbeddedSentence)]=EmbeddedSentence
if len(EmbeddedSentence)>num_steps:
b=EmbeddedSentence[:num_steps]
if len(b)==EmbeddedSentence:
b=EmeddedSentence
b=[word_to_id['UNK'] if x>=DictionaryLength else x for x in b] #turn all words used 4 times or less to 'UNK'
#print(b)
Tokenized+=[b]
print(sentences[:10])
print(Tokenized[:10])
############################################# Parameters #####################################################
num_hidden=256
num_steps=20
dict_length=len(words)
batch_size=4
num_layers=1
## Create labels
Label=[]
for caption in Tokenized:
Label+=[caption[1:]+[word_to_id['PAD']]]
NumberofCasestoEvaluate=20
TrainingInputs=Tokenized[:NumberofCasestoEvaluate]
LabelInputs=Label[:NumberofCasestoEvaluate]
#Print out some variables
print(TrainingInputs[0])
print(LabelInputs[0])
print("Number of words in this dictionary ", len(words))
#Create our input queue
def data_input_queue(TrainingInputs, LabelInputs, num_steps):
train_input_queue = tf.train.slice_input_producer(
[TrainingInputs, LabelInputs],
shuffle=True)
##Set our train data and label input shape for the queue
TrainingInput=train_input_queue[0]
LabelInput=train_input_queue[1]
TrainingInput.set_shape([num_steps])
LabelInput.set_shape([num_steps])
min_after_dequeue=100000
capacity = min_after_dequeue + 3 * batch_size
#input_x, target_y
train_x, train_y = tf.train.batch([TrainingInput, LabelInput],
batch_size=batch_size ,
capacity=capacity,
num_threads=4)
return train_x, train_y
tf.reset_default_graph()
loss_mscoco=[]
#######################################################################################################
NumberofCasestoEvaluate=100
TrainingInputs=Tokenized[:NumberofCasestoEvaluate]
LabelInputs=Label[:NumberofCasestoEvaluate]
## Make Variables
# tf Graph input
x = tf.placeholder(dtype=tf.int32, shape=(batch_size , num_steps))
y = tf.placeholder(dtype=tf.int32, shape=(batch_size, num_steps))
#tf.get_variable_scope().reuse_variables()
variables_dict = {
"weights_mscoco":tf.Variable(tf.truncated_normal([num_hidden,dict_length],
stddev=1.0,dtype=tf.float32),name="weights_mscoco"),
"biases_mscoco": tf.Variable(tf.truncated_normal([dict_length],
stddev=1.0,dtype=tf.float32), name="biases_mscoco")}
# Create input data
train_x, train_y =data_input_queue(TrainingInputs, LabelInputs, num_steps)
mscoco_dict=words
X_one_hot=tf.nn.embedding_lookup(np.identity(dict_length), x) #[batch,num_steps,dictionary_length][2,6,7]
y_one_hot=tf.unstack(tf.nn.embedding_lookup(np.identity(dict_length), y),num_steps,1) #[batch,num_steps,dictionary_length][2,6,7]
y_target_reshape=tf.reshape(y_one_hot,[batch_size*num_steps,dict_length])
input_keep_prob=1.0
output_keep_prob=1.0
#Create a multilayer RNN
layer_cell=[]
for _ in range(num_layers):
lstm_cell = tf.contrib.rnn.LSTMCell(num_units=num_hidden, state_is_tuple=True)
############# add dropout #########################
lstm_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
input_keep_prob=input_keep_prob,
output_keep_prob=output_keep_prob)
layer_cell.append(lstm_cell)
cell = tf.contrib.rnn.MultiRNNCell(layer_cell, state_is_tuple=True)
outputs, last_states = tf.contrib.rnn.static_rnn(
cell=lstm_cell,
dtype=tf.float32,
inputs=tf.unstack(tf.to_float(X_one_hot),num_steps,1))
output_reshape=tf.reshape(outputs, [batch_size*num_steps,num_hidden]) #[12==batch_size*num_steps,num_hidden==12]
pred=tf.matmul(output_reshape, variables_dict["weights_mscoco"]) +variables_dict["biases_mscoco"]
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y_target_reshape))
optimizer = tf.train.AdamOptimizer(0.01).minimize(cost,aggregation_method = tf.AggregationMethod.EXPERIMENTAL_TREE)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1500):
x_input,y_input=sess.run([train_x, train_y])
loss,_,y_target,y_pred=sess.run([cost,optimizer,y_target_reshape,pred],feed_dict={x:x_input,y:y_input})
loss_mscoco.append([loss])
if i% 100==0:
print("iteration: ",i, "loss: ",loss)
print("Done Training")
coord.request_stop()
coord.join(threads)
sess.close()
#Lets look at one input data point and its prediction
print("Input Sentence")
batch_element=2
print([words[ind] for ind in x_input[batch_element,:]])
print("Target")
print([words[ind] for ind in y_input[batch_element,:]])
print("Predicted words")
print([words[ind] for ind in np.argmax(y_pred[batch_element::batch_size],1)])
#Free our GPU memory before proceeding to the next part of the lab
import os
os._exit(00)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: B
Step2: C
Step3: D
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.testing.assert_allclose(1.5, flexible_mean(1.0, 2.0))
np.testing.assert_allclose(0.0, flexible_mean(-100, 100))
np.testing.assert_allclose(1303.359375, flexible_mean(1, 5452, 43, 34, 40.23, 605.2, 4239.2, 12.245))
assert make_dict(one = "two", three = "four") == {"one": "two", "three": "four"}
assert make_dict() == {}
l1 = [1, 2, 3, 4, 5, 2]
s1 = 2
a1 = [1, 5]
assert set(a1) == set(find_all(l1, s1))
l2 = ["a", "random", "set", "of", "strings", "for", "an", "interesting", "strings", "problem"]
s2 = "strings"
a2 = [4, 8]
assert set(a2) == set(find_all(l2, s2))
l3 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
s3 = 11
a3 = []
assert set(a3) == set(find_all(l3, s3))
l1 = [1, 2, 3, 4, 5, 2]
s1 = [2, 5]
a1 = {2: 2, 5: 1}
assert a1 == element_counts(l1, s1)
l2 = ["a", "random", "set", "of", "strings", "for", "an", "interesting", "strings", "problem"]
s2 = ["strings", "of", "notinthelist"]
a2 = {"strings": 2, "of": 1, "notinthelist": 0}
assert a2 == element_counts(l2, s2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solvers
Step2: Undeterdetermined case (expect gradient descent to give sublinear convergence)
Step3: Question for thought
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from numpy.linalg import norm
from matplotlib import pyplot as plt
rng = np.random.default_rng()
def gradientDescent(f,grad,stepsize,x0,maxiter=1e3):
x = x0.copy()
fHist = []
for k in range(int(maxiter)):
x -= stepsize*grad(x)
fHist.append( f(x) )
return x, fHist
def NesterovGradientDescent(f,grad,stepsize,x0,maxiter=1e3,restart=np.Inf):
x = x0.copy()
y = x.copy()
fHist = []
kk = 0
for k in range(int(maxiter)):
xOld = x.copy()
x = y - stepsize*grad(y)
kk = kk + 1
if kk > restart:
kk = 0
y = x + kk/(kk+3)*(x-xOld)
fHist.append( f(x) )
return x, fHist
rng = np.random.default_rng(1)
m = 49
n = 50
A = rng.normal( size=(m,n) )
xStar = np.ones( (n,1) )
#b = rng.normal( size=(m,1) )
b = A@xStar
L = norm(A,ord=2)**2
# For gradient descent on 1/2||Ax-b||^2
f = lambda x : norm(A@x-b)**2/2
grad= lambda x : A.T@( A@x-b )
fStar = 0
x0 = np.zeros((n,1))
# And if we do subgradient descent on ||Ax-b||
# (Note: if we measure f2(x) convergence, since it's not squared
# we'd of course at least expect sqrt() slower... )
f2 = lambda x : norm(A@x-b)
def subgrad(x):
r = A@x-b
return A.T@(r/norm(r))
rho = norm(A,ord=2)
B = norm(xStar-x0)
maxiter = 1e4
x_gd, fHist_gd = gradientDescent(f,grad,1/L,x0,maxiter=maxiter)
x_Nest, fHist_Nest = NesterovGradientDescent(f,grad,1/L,x0,maxiter=maxiter)
# subgradient descent:
step = B/rho/np.sqrt(maxiter)
x_sgd, fHist_sgd = gradientDescent(f2,subgrad,step,x0,maxiter=maxiter)
plt.figure(figsize=(12,7))
plt.loglog( fHist_gd, label='Gradient Descent' )
plt.loglog( fHist_Nest, label='Nesterov Acceleration' )
plt.loglog( fHist_sgd, label='Subgradient Descent' )
k = np.arange(1,maxiter)
plt.loglog(k,90/k,'--',label='$1/k$')
plt.loglog(k,90/k**2,'--',label='$1/k^2$')
plt.legend()
plt.grid()
plt.show()
rng = np.random.default_rng(1)
m = 55
n = 50
A = rng.normal( size=(m,n) )
xStar = np.ones( (n,1) )
b = A@xStar
L = norm(A,ord=2)**2
# For gradient descent on 1/2||Ax-b||^2
f = lambda x : norm(A@x-b)**2/2
grad= lambda x : A.T@( A@x-b )
fStar = 0
x0 = np.zeros((n,1))
evals = np.linalg.eigvals(A.T@A)
L = np.max(evals)
mu = np.min(evals)
kappa = L/mu
print(f"L is {L:.2f}, mu is {mu:.2f}, condition number is {kappa:.2e}")
f2 = lambda x : norm(A@x-b)
def subgrad(x):
r = A@x-b
return A.T@(r/norm(r))
rho = norm(A,ord=2)
B = norm(xStar-x0)
maxiter = 1e4
x_gd, fHist_gd = gradientDescent(f,grad,1/L,x0,maxiter=maxiter)
x_Nest, fHist_Nest = NesterovGradientDescent(f,grad,1/L,x0,maxiter=maxiter)
x_Nest2, fHist_Nest2 = NesterovGradientDescent(f,grad,1/L,x0,maxiter=maxiter,restart=500)
# subgradient descent:
step = 1e0*(B/rho)/np.sqrt(maxiter)
x_sgd, fHist_sgd = gradientDescent(f2,subgrad,step,x0,maxiter=maxiter)
plt.figure(figsize=(12,7))
plt.semilogy( fHist_gd, label='Gradient Descent' )
plt.semilogy( fHist_Nest, label='Nesterov Acceleration' )
plt.semilogy( fHist_Nest2, label='Nesterov Acceleration w/ restarts' )
plt.semilogy( fHist_sgd, label='Subgradient Descent' )
k = np.arange(1,maxiter)
plt.semilogy(k,1e-4*(1-1/kappa)**k,'--',label='$(1-\kappa^{-1})^k$')
plt.semilogy(k,1e3*(1-1/np.sqrt(kappa))**k,'--',label='$(1-\kappa^{-1/2})^k$')
plt.ylim(bottom=1e-29,top=1e2)
plt.legend()
plt.grid()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def rounded_avg(n, m):
if m < n:
return -1
summation = 0
for i in range(n, m+1):
summation += i
return bin(round(summation/(m - n + 1)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: AND Neural Network
Step2: Question
Step3: Plotting the NN Output
Step4: <br/>
Step5: XOR NN Output Plot
Step6: The Little Red Riding Hood Neural Network
Step7: Backpropagation
Step8: Prediction
|
<ASSISTANT_TASK:>
Python Code:
from pybrain.tools.shortcuts import buildNetwork
net = buildNetwork(2, 1, outclass=pybrain.SigmoidLayer)
print net.params
def print_pred2(dataset, network):
df = pd.DataFrame(dataset.data['sample'][:dataset.getLength()],columns=['X', 'Y'])
prediction = np.round(network.activateOnDataset(dataset),3)
df['output'] = pd.DataFrame(prediction)
return df
from pybrain.datasets import UnsupervisedDataSet, SupervisedDataSet
D = UnsupervisedDataSet(2) # define a dataset in pybrain
D.addSample([0,0])
D.addSample([0,1])
D.addSample([1,0])
D.addSample([1,1])
print_pred2(D, net)
net.params[:] = [0, 0, 0]
print_pred2(D, net)
def plot_nn_prediction(N):
# a function to plot the binary output of a network on the [0,1]x[0,1] space
x_list = np.arange(0.0,1.0,0.025)
y_list = np.arange(1.0,0.0,-0.025)
z = [0.0 if N.activate([x,y])[0] <0.5 else 1.0 for y in y_list for x in x_list]
z = np.array(z)
grid = z.reshape((len(x_list), len(y_list)))
plt.imshow(grid, extent=(x_list.min(), x_list.max(), y_list.min(), y_list.max()),cmap=plt.get_cmap('Greys_r'))
plt.show()
net.params[:] = [-30, 20, 20]
plot_nn_prediction(net)
Dtrain = SupervisedDataSet(2,1) # define a dataset in pybrain
Dtrain.addSample([0,0],[0])
Dtrain.addSample([0,1],[1])
Dtrain.addSample([1,0],[1])
Dtrain.addSample([1,1],[0])
from pybrain.supervised.trainers import BackpropTrainer
net = buildNetwork(2, 2, 1, hiddenclass=pybrain.SigmoidLayer, outclass=pybrain.SigmoidLayer)
T = BackpropTrainer(net, learningrate=0.1, momentum=0.9)
T.trainOnDataset(Dtrain, 1000)
print_pred2(D, net)
plot_nn_prediction(net)
from pybrain.tools.validation import Validator
validator = Validator()
Dlrrh = SupervisedDataSet(4,4)
Dlrrh.addSample([1,1,0,0],[1,0,0,0])
Dlrrh.addSample([0,1,1,0],[0,0,1,1])
Dlrrh.addSample([0,0,0,1],[0,1,1,0])
df = pd.DataFrame(Dlrrh['input'],columns=['Big Ears', 'Big Teeth', 'Handsome', 'Wrinkled'])
print df.join(pd.DataFrame(Dlrrh['target'],columns=['Scream', 'Hug', 'Food', 'Kiss']))
net = buildNetwork(4, 3, 4, hiddenclass=pybrain.SigmoidLayer, outclass=pybrain.SigmoidLayer)
T = BackpropTrainer(net, learningrate=0.01, momentum=0.99)
scores = []
for i in xrange(1000):
T.trainOnDataset(Dlrrh, 1)
prediction = net.activateOnDataset(Dlrrh)
scores.append(validator.MSE(prediction, Dlrrh.getField('target')))
plt.ylabel('Mean Square Error')
plt.xlabel('Iteration')
plt.plot(scores)
def lrrh_input(vals):
return pd.DataFrame(vals,index=['big ears', 'big teeth', 'handsome', 'wrinkled'], columns=['input'])
def lrrh_output(vals):
return pd.DataFrame(vals,index=['scream', 'hug', 'offer food', 'kiss cheek'], columns=['output'])
in_vals = [1, 1, 0, 0]
lrrh_input(in_vals)
lrrh_output(net.activate(in_vals))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Encapsulation, part 1
Step3: 2. A function to attach time signatures
Step5: 3. A function to pitch notes
Step7: 4. A function to attach articulations and dynamics
Step9: 5. A function to call the other functions
Step10: Now we define input and call just one function to make our staff
|
<ASSISTANT_TASK:>
Python Code:
def make_notes_and_rests(counts, denominator, time_signatures):
Makes notes and rests with repeating pattern of durations.
Output sums to time signatures.
Returns staff.
durations = [_.duration for _ in time_signatures]
total_duration = sum(durations)
talea = rmakers.Talea(counts, denominator)
talea_index = 0
leaves = []
current_duration = abjad.Duration(0)
while current_duration < total_duration:
leaf_duration = talea[talea_index]
if 0 < leaf_duration:
pitch = abjad.NamedPitch("c'")
else:
pitch = None
leaf_duration = abs(leaf_duration)
if total_duration < (leaf_duration + current_duration):
leaf_duration = total_duration - current_duration
leaves_ = abjad.LeafMaker()([pitch], [leaf_duration])
leaves.extend(leaves_)
current_duration += leaf_duration
talea_index += 1
staff = abjad.Staff(leaves)
return staff
def impose_time_signatures(staff, time_signatures):
Splits notes and rests at measure boundaries, attached time signatures, rewrites meter.
selections = abjad.mutate.split(staff[:], time_signatures, cyclic=True)
for time_signature, selection in zip(time_signatures, selections):
abjad.attach(time_signature, selection[0])
measure_selections = abjad.select(staff).leaves().group_by_measure()
for time_signature, measure_selection in zip(time_signatures, measure_selections):
abjad.Meter.rewrite_meter(measure_selection, time_signature)
def pitch_notes(staff, pitches):
Pitches notes.
plts = abjad.select(staff).logical_ties(pitched=True)
for i, plt in enumerate(plts):
pitch = pitches[i]
for note in plt:
note.written_pitch = pitch
def attach_indicators(staff):
Attaches articulations and dynamics to runs.
for run in abjad.select(staff).runs():
articulation = abjad.Articulation("tenuto")
abjad.attach(articulation, run[0])
if 3 <= len(run):
abjad.hairpin("p < f", run)
else:
dynamic = abjad.Dynamic("ppp")
abjad.attach(dynamic, run[0])
abjad.override(staff).dynamic_line_spanner.staff_padding = 4
def make_staff(counts, denominator, time_signatures, pitches):
Makes notes and rests, pitches notes and attaches articulations and dynamics.
Returns staff.
staff = make_notes_and_rests(counts, denominator, time_signatures)
impose_time_signatures(staff, time_signatures)
pitch_notes(staff, pitches)
attach_indicators(staff)
return staff
pairs = [(4, 4), (3, 4), (7, 16), (6, 8)]
time_signatures = [abjad.TimeSignature(_) for _ in pairs]
counts = [1, 2, -3, 4]
denominator = 16
string = "d' fs' a' d'' g' ef'"
pitches = abjad.CyclicTuple(string.split())
staff = make_staff(counts, denominator, time_signatures, pitches)
abjad.show(staff)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.- Introduction to Python dictionaries
Step2: Note
Step3: EXERCISE
Step4: EXERCISE
Step5: 2.- Downloading a webpage
Step6: BeautifulSoup is a powerfull library to postprocess the HTML code, let's see one example, we will extract the text and remove the HTML markup.
Step7: 3.- The Google Translate webpage syntax
Step8: EXERCISE
Step9: 4.- Downloading the html code with the translation
Step10: 5.- Postprocessing the downloaded webpage
Step11: EXERCISE
Step12: 6.- Adding more languages
Step13: EXERCISE
Step14: 7.- Translation to all available languages
Step15: We will now call the translation function to translate a text to every one of the languages.
|
<ASSISTANT_TASK:>
Python Code:
import goslate # pip install goslate
from bs4 import BeautifulSoup # pip install beautifulsoup4
import urllib2 # pip install requests
inventary_dict = {'milk': 23, 'coockies': 12, 'chocolate': 26, 'yogourt': 5}
print "This is the original dictionary:"
print inventary_dict
print " "
print "This is the value associated to 'milk':"
print inventary_dict['milk']
print " "
print "We add a new element to the dictionary:"
inventary_dict.update({'sugar': 103})
print inventary_dict
print " "
print "We increment the value of one of the elements:"
inventary_dict['coockies'] += 10
print inventary_dict
print " "
keys = inventary_dict.keys()
print "These are the keys of the dictionary:"
print keys
print " "
values = inventary_dict.values()
print "These are the values of the dictionary:"
print values
print " "
print "The size of this dictionary is %d, and it stores the following elements:" % len(inventary_dict.keys())
for key in keys:
print key + ": " + str(inventary_dict[key])
languages_dict = <COMPLETAR>
print "Vamos a traducir de %s a %s." % (languages_dict['es'], languages_dict['it'])
def view_codes(mydict):
<COMPLETAR>
view_codes(languages_dict)
agent = {'User-Agent':"Mozilla/4.0"}
url1 = "https://www.u-tad.com/conocenos/conoce-u-tad/"
request = urllib2.Request(url1, headers=agent)
page = urllib2.urlopen(request).read()
n_caracteres_descargados = <COMPLETAR>
print "La página descargada tiene %d caracteres." % n_caracteres_descargados
print "Estos son los primeros 1000 caracteres:"
print "=" * 100
print <COMPLETAR>
print "=" * 100
bs = BeautifulSoup(page, "html5lib")
for script in bs(["script", "style"]):
script.extract()
text_utad = bs.get_text()
text_utad = ' '.join(text_utad.split())
print text_utad
url = "https://translate.google.com/m?hl=de&sl=auto&q=adiós+amigos"
print url
destiny_language = 'it'
my_text = "Hola a todos mis amigos"
def url_translate(destiny_language, text):
url = <COMPLETAR> % (destiny_language, "auto", text.replace(<COMPLETAR>))
return url
url = url_translate(destiny_language, my_text)
print url
def get_html(lang, text):
agent = {'User-Agent':"Mozilla/4.0"}
url = <COMPLETAR>
request = urllib2.Request(url, headers=agent)
html = urllib2.urlopen(request).read()
return html
html = get_html(destiny_language, my_text)
n_caracteres_descargados = <COMPLETAR>
print "La página descargada tiene %d caracteres." % n_caracteres_descargados
print "=" * 100
print html
print "=" * 100
def translate(lang, text):
html = <COMPLETAR>
bs = BeautifulSoup(html, "html5lib")
translation =bs.findAll('div')[2].text
return translation
key = 'en'
print u"Traducción al " + unicode(languages_dict[key],'utf-8') + ":"
print translate(key, my_text)
for <COMPLETAR>:
print u"Traducción al " + unicode(languages_dict[key],'utf-8') + ":"
print <COMPLETAR>
print " "
languages_dict.update(<COMPLETAR>)
languages_dict.<COMPLETAR>
languages_dict.<COMPLETAR>
languages_dict.<COMPLETAR>
languages_dict.<COMPLETAR>
languages_dict.<COMPLETAR>
languages_dict.<COMPLETAR>
view_codes(languages_dict)
<COMPLETAR>
gs = goslate.Goslate()
all_languages_dict = gs.get_languages()
view_codes(<COMPLETAR>)
my_text = 'Ya hemos completado el curso introductorio'
for key in <COMPLETAR>:
print u"\nTraducción al " + <COMPLETAR> + ":"
print <COMPLETAR>
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Predicting if salary is high with a decision tree
Step2: For feature 1 calculate possible splitting points
Step3: split the data using split 5
Step4: Gini
Step5: The gini impurity of the split is the Gini Impurity of each node is weighted by the fraction of points from the parent node in that node.
Step6: test all splits on all features
Step7: Recursively grow the tree
Step8: Prediction
Step9: Using sklearn
Step10: Part 2
Step11: Tuning n_estimators
Step12: Tuning max_features
Step13: Fitting a Random Forest with the best parameters
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
# read in the data
url = 'https://raw.githubusercontent.com/albahnsen/PracticalMachineLearningClass/master/datasets/hitters.csv'
hitters = pd.read_csv(url)
# remove rows with missing values
hitters.dropna(inplace=True)
hitters.head()
# encode categorical variables as integers
hitters['League'] = pd.factorize(hitters.League)[0]
hitters['Division'] = pd.factorize(hitters.Division)[0]
hitters['NewLeague'] = pd.factorize(hitters.NewLeague)[0]
hitters.head()
# allow plots to appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# scatter plot of Years versus Hits colored by Salary
hitters.plot(kind='scatter', x='Years', y='Hits', c='Salary', colormap='jet', xlim=(0, 25), ylim=(0, 250))
# define features: exclude career statistics (which start with "C") and the response (Salary)
feature_cols = hitters.columns[hitters.columns.str.startswith('C') == False].drop('Salary')
feature_cols
hitters.Salary.describe()
# define X and y
X = hitters[feature_cols]
y = (hitters.Salary > 425).astype(int)
X.columns
max_depth = None
num_pct = 10
max_features = None
min_gain=0.001
j = 1
print(X.columns[j])
# Split the variable in num_ctp points
splits = np.percentile(X.iloc[:, j], np.arange(0, 100, 100.0 / num_pct).tolist())
# Only unique values for filter binary and few unique values features
splits = np.unique(splits)
splits
k = 5
filter_l = X.iloc[:, j] < splits[k]
y_l = y.loc[filter_l]
y_r = y.loc[~filter_l]
def gini(y):
if y.shape[0] == 0:
return 0
else:
return 1 - (y.mean()**2 + (1 - y.mean())**2)
gini_l = gini(y_l)
gini_l
gini_r = gini(y_r)
gini_r
def gini_impurity(X_col, y, split):
"Calculate the gain of an split k on feature j"
filter_l = X_col < split
y_l = y.loc[filter_l]
y_r = y.loc[~filter_l]
n_l = y_l.shape[0]
n_r = y_r.shape[0]
gini_y = gini(y)
gini_l = gini(y_l)
gini_r = gini(y_r)
gini_impurity_ = gini_y - (n_l / (n_l + n_r) * gini_l + n_r / (n_l + n_r) * gini_r)
return gini_impurity_
gini_impurity(X.iloc[:, j], y, splits[k])
def best_split(X, y, num_pct=10):
features = range(X.shape[1])
best_split = [0, 0, 0] # j, split, gain
# For all features
for j in features:
splits = np.percentile(X.iloc[:, j], np.arange(0, 100, 100.0 / (num_pct+1)).tolist())
splits = np.unique(splits)[1:]
# For all splits
for split in splits:
gain = gini_impurity(X.iloc[:, j], y, split)
if gain > best_split[2]:
best_split = [j, split, gain]
return best_split
j, split, gain = best_split(X, y, 5)
j, split, gain
filter_l = X.iloc[:, j] < split
y_l = y.loc[filter_l]
y_r = y.loc[~filter_l]
y.shape[0], y_l.shape[0], y_r.shape[0]
y.mean(), y_l.mean(), y_r.mean()
def tree_grow(X, y, level=0, min_gain=0.001, max_depth=None, num_pct=10):
# If only one observation
if X.shape[0] == 1:
tree = dict(y_pred=y.iloc[:1].values[0], y_prob=0.5, level=level, split=-1, n_samples=1, gain=0)
return tree
# Calculate the best split
j, split, gain = best_split(X, y, num_pct)
# save tree and estimate prediction
y_pred = int(y.mean() >= 0.5)
y_prob = (y.sum() + 1.0) / (y.shape[0] + 2.0) # Laplace correction
tree = dict(y_pred=y_pred, y_prob=y_prob, level=level, split=-1, n_samples=X.shape[0], gain=gain)
# Check stooping criteria
if gain < min_gain:
return tree
if max_depth is not None:
if level >= max_depth:
return tree
# No stooping criteria was meet, then continue to create the partition
filter_l = X.iloc[:, j] < split
X_l, y_l = X.loc[filter_l], y.loc[filter_l]
X_r, y_r = X.loc[~filter_l], y.loc[~filter_l]
tree['split'] = [j, split]
# Next iteration to each split
tree['sl'] = tree_grow(X_l, y_l, level + 1, min_gain=min_gain, max_depth=max_depth, num_pct=num_pct)
tree['sr'] = tree_grow(X_r, y_r, level + 1, min_gain=min_gain, max_depth=max_depth, num_pct=num_pct)
return tree
tree_grow(X, y, level=0, min_gain=0.001, max_depth=1, num_pct=10)
tree = tree_grow(X, y, level=0, min_gain=0.001, max_depth=3, num_pct=10)
tree
def tree_predict(X, tree, proba=False):
predicted = np.ones(X.shape[0])
# Check if final node
if tree['split'] == -1:
if not proba:
predicted = predicted * tree['y_pred']
else:
predicted = predicted * tree['y_prob']
else:
j, split = tree['split']
filter_l = (X.iloc[:, j] < split)
X_l = X.loc[filter_l]
X_r = X.loc[~filter_l]
if X_l.shape[0] == 0: # If left node is empty only continue with right
predicted[~filter_l] = tree_predict(X_r, tree['sr'], proba)
elif X_r.shape[0] == 0: # If right node is empty only continue with left
predicted[filter_l] = tree_predict(X_l, tree['sl'], proba)
else:
predicted[filter_l] = tree_predict(X_l, tree['sl'], proba)
predicted[~filter_l] = tree_predict(X_r, tree['sr'], proba)
return predicted
tree_predict(X, tree)
# list of values to try for max_depth
max_depth_range = range(1, 21)
# list to store the average RMSE for each value of max_depth
accuracy_scores = []
# use 10-fold cross-validation with each value of max_depth
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
for depth in max_depth_range:
clf = DecisionTreeClassifier(max_depth=depth, random_state=1)
accuracy_scores.append(cross_val_score(clf, X, y, cv=10, scoring='accuracy').mean())
# plot max_depth (x-axis) versus RMSE (y-axis)
plt.plot(max_depth_range, accuracy_scores)
plt.xlabel('max_depth')
plt.ylabel('Accuracy')
# show the best accuracy and the corresponding max_depth
sorted(zip(accuracy_scores, max_depth_range))[::-1][0]
# max_depth=2 was best, so fit a tree using that parameter
clf = DecisionTreeClassifier(max_depth=4, random_state=1)
clf.fit(X, y)
# compute feature importances
pd.DataFrame({'feature':feature_cols, 'importance':clf.feature_importances_}).sort_values('importance')
pd.Series(cross_val_score(clf, X, y, cv=10)).describe()
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf
pd.Series(cross_val_score(clf, X, y, cv=10)).describe()
# list of values to try for n_estimators
estimator_range = range(10, 310, 10)
# list to store the average Accuracy for each value of n_estimators
accuracy_scores = []
# use 5-fold cross-validation with each value of n_estimators (WARNING: SLOW!)
for estimator in estimator_range:
clf = RandomForestClassifier(n_estimators=estimator, random_state=1, n_jobs=-1)
accuracy_scores.append(cross_val_score(clf, X, y, cv=5, scoring='accuracy').mean())
plt.plot(estimator_range, accuracy_scores)
plt.xlabel('n_estimators')
plt.ylabel('Accuracy')
# list of values to try for max_features
feature_range = range(1, len(feature_cols)+1)
# list to store the average Accuracy for each value of max_features
accuracy_scores = []
# use 10-fold cross-validation with each value of max_features (WARNING: SLOW!)
for feature in feature_range:
clf = RandomForestClassifier(n_estimators=200, max_features=feature, random_state=1, n_jobs=-1)
accuracy_scores.append(cross_val_score(clf, X, y, cv=5, scoring='accuracy').mean())
plt.plot(feature_range, accuracy_scores)
plt.xlabel('max_features')
plt.ylabel('Accuracy')
# max_features=6 is best and n_estimators=200 is sufficiently large
clf = RandomForestClassifier(n_estimators=200, max_features=6, random_state=1, n_jobs=-1)
clf.fit(X, y)
# compute feature importances
pd.DataFrame({'feature':feature_cols, 'importance':clf.feature_importances_}).sort_values('importance')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's compute the utility for "all" values of $x$ in $[0,10]^M$ and plot them.
Step2: Demands
Step3: This is an equilibrium, but of course in an economy with only 1 individual, $(p^, x^)=(p^,\omega)$ is an equilibrium for any $p^$.
Step4: Here we have excess supply for good 1 and excess demand for good 2
Step5: Again we have excess supply for good 1 and excess demand for good 2
Step6: We have excess supply for goods 1 & 3 and excess demand for goods 2 & 4
Step7: Again we have excess supply for good 1 and excess demand for good 2
Step8: Now we have excess supply for goods 1, 7, 8, and 10, and excess demand for the other
Step9: But before we use this to try to find an equilibrium, we need to remember a few things.
Step10: Simple $2\times2$ Economy
Step11: Notice that the excess demand with normalized prices generates the same excess demand as with the original price vector
Step12: Similar $2\times 4 $ Economy
Step13: $6\times 2$ Economy
Step14: And even larger economy $10\times5$
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.misc import derivative
import itertools
%matplotlib widget
import matplotlib.pyplot as plt
def CobbDouglas(x, alpha, h=1e-10, deriv=False):
'''
Compute the utility of an individual with Cobb-Douglas preferences
Additionally it returns the exact and numerical derivatives
x: (1 x N) vector
alpha:(1 x N) vector
'''
lx = len(x)
try:
u = (x ** alpha).prod(axis=1)
except:
u = (x ** alpha).prod()
if deriv:
uprime = alpha * u / x
nuprime = (((x + (np.zeros([lx, lx]) + h * np.eye(lx)))**alpha).prod(axis=1) - ((x + (np.zeros([lx, lx]) - h * np.eye(lx)))**alpha).prod(axis=1)) / (2 * h)
du = [uprime, nuprime]
return [u, du]
else:
return u
N = 1 # Individuals
M = 2 # Goods
alpha = np.array([0.5, 0.5])
x = np.linspace(0, 10, num=100)
X = np.array([i for i in itertools.product(x, repeat=M)])
u = CobbDouglas(X, alpha)
u
fig, ax = plt.subplots(figsize=(15,10))
plt.contourf(x, x, u.reshape(len(x), len(x)), 30)
plt.xlabel('Good 1', size=18)
plt.ylabel('Good 2', size=18)
plt.title('Indiference Map', size=22)
from mpl_toolkits.mplot3d import Axes3D
xx, yy = np.meshgrid(x, x)
fig = plt.figure(figsize=(15, 10))
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(xx, yy, u.reshape(len(x), len(x)), rstride=1, cstride=1, cmap='viridis')
ax.set_xlabel('Good 1', size=18)
ax.set_ylabel('Good 2', size=18)
ax.set_zlabel('Utility', size=18)
plt.show()
def CobbDouglasDemand(p, w, alpha):
'''
Compute the demand vector of an individual with Cobb-Douglas Utility with parameter vector alpha,
given her initial endowments and prices for all goods
'''
# Total income
y = w.dot(p)
ly = len(y)
x = ((alpha / alpha.sum(axis=1, keepdims=True) * y).T / p).T
return x
def ExcessDemand(p, w, alpha):
'''
Compute excess demand function for each good
'''
z = CobbDouglasDemand(p, w, alpha).sum(axis=0) - w.sum(axis=0)
return z
# Let's test to make sure function is well written
alpha = np.array([[0.5, 0.5]])
w = np.array([[1, 2]])
p = np.array([[2, 1]]).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
alpha = np.array([[0.5, 0.5], [0.25, 0.75]])
w = np.array([[1, 2], [2, 2]])
p = np.array([[2, 1]]).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
alpha = np.array([[0.5, 0.5], [0.25, 0.75], [0.5, 0.5]])
w = np.array([[1, 2], [2, 2], [1, 2]])
p = np.array([[2, 1]]).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
# Economy with 2 individuals of each type and 4 goods 2 groups of identical goods
alpha = np.array([[0.5, 0.5] * 2, [0.25, 0.75] * 2])
w = np.array([[1, 2] * 2, [2, 2] * 2] )
p = np.array([[2, 1] * 2]).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
# Same economy with 6 individuals of each type
alpha = np.array([[0.5, 0.5], [0.25, 0.75]] * 6)
w = np.array([[1, 2], [2, 2]] * 6)
p = np.array([[2, 1]]).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
# Generate a random economy
np.random.seed(123456)
N = 5 # Individuals
M = 10 # Goods
alpha = np.random.uniform(0, 1, size=(N, M))
w = np.random.uniform(0, 1, size=(N, M))
p = np.random.uniform(0, 1, size=(1, M)).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
from scipy.optimize import root
def ExcessDemand2(p, w, alpha, normalize=False):
'''
Compute excess demand function for all but the last good
Price of good 1 = 1
Prices are normalized to length of vector is equal to 1
'''
# Ensure p has the right shape
p = p.reshape((np.max(p.shape), 1))
p = np.vstack([[1], p])
if normalize:
p = (p / np.linalg.norm(p))
z = CobbDouglasDemand(p, w, alpha).sum(axis=0) - w.sum(axis=0)
return z[1:]
alpha = np.array([[0.5, 0.5], [0.25, 0.75]])
w = np.array([[1, 2], [2, 2]])
p = np.array([[1, 0.5]]).T
CobbDouglasDemand(p, w, alpha)
ExcessDemand(p, w, alpha)
ExcessDemand2(p[1:], w, alpha)
sol = root(ExcessDemand2, np.array([1]), args=(w, alpha))
sol
pstar = np.vstack([[1], sol.x])
ExcessDemand(pstar, w, alpha)
# Economy with 2 individuals of each type and 4 goods 2 groups of identical goods
alpha = np.array([[0.5, 0.5] * 2, [0.25, 0.75] * 2])
w = np.array([[1, 2] * 2, [2, 2] * 2] )
p = np.array([[2, 1] * 2]).T
p = p / p[0]
ExcessDemand(p, w, alpha)
ExcessDemand2(p[1:], w, alpha)
sol = root(ExcessDemand2, p[1:], args=(w, alpha))
sol
pstar = sol.x
pstar = pstar.reshape((np.max(pstar.shape), 1))
pstar = np.vstack([[1], pstar])
ExcessDemand(pstar, w, alpha)
# Same economy with 6 individuals of each type
alpha = np.array([[0.5, 0.5], [0.25, 0.75]] * 6)
w = np.array([[1, 2], [2, 2]] * 6)
p = np.array([[2, 1]]).T
sol = root(ExcessDemand2, p[1:], args=(w, alpha))
sol
pstar = sol.x
pstar = pstar.reshape((np.max(pstar.shape), 1))
pstar = np.vstack([[1], pstar])
ExcessDemand(pstar, w, alpha)
# Generate a random economy
np.random.seed(123456)
N = 5 # Individuals
M = 10 # Goods
alpha = np.random.uniform(0, 1/M, size=(N, M))
w = np.random.uniform(0, 1/M, size=(N, M))
p = np.random.uniform(0, 1/M, size=(1, M)).T
alpha.sum(axis=1)
ExcessDemand2(np.ones((1, M-1)).T, w, alpha)
sol = root(ExcessDemand2, np.ones((1, M-1)).T, args=(w, alpha))
sol
pstar = sol.x
pstar = pstar.reshape((np.max(pstar.shape), 1))
pstar = np.vstack([[1], pstar])
ExcessDemand(pstar, w, alpha)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: これでdlibとcv2が使えるようになりました。dlib.あるいはcv2.の後に関数名を付けることでそれぞれの機能を呼び出せます。早速WebCAMを使えるようにしましょう。
Step2: カメラのタリーが光りましたか? 光らない場合は括弧の中の数字を1や2に変えてみて下さい。
Step3: capはWebCAMを使うための操縦桿(ハンドル)と思って下さい。それにread(読め)と命令した訳です。では,成功したか確認しましょう。readという関数(機能)は成功したか否かの結果と,画像を返してくれます。
Step4: Trueと出ましたか? 出ていれば成功です。画像を見てみましょう。
Step5: 自分の顔が出てきましたか? waitKey(2000)は2000ms待って終了する意味です。この2000を0にすると特別な意味になり,入力待ちになります。(ウィンドウを選択してアクティブな状態にしてから何かキーを押して下さい。Outに何か数字が出るでしょう。この数字はキーの認識番号とでも思って下さい。)
Step6: detectorはdlibのget(よこせ) frontal(正面の) face(顔) detector(検出器)の結果。という意味です。要するに今度は顔検出の操縦桿がdetectorということです。では早速使ってみましょう。
Step7: "1"以上の数字が出てきたら成功です。これは検出した顔の数です。1行目で画像imgから, upsamplingを1回だけして(色々な大きさの顔に対応する処理),その結果をdetsに入れてます。
Step8: rectangle(xxx, xxx, xxx, xxx)と出てきましたね。これはdlibのrectangleというモノです。訳がわからないのでdlib.rectangle?と実行してみましょう。
Step9: 恐らく
Step10: 答えは四角形の左上,右下の座標です。では画像に四角形を重ねてみましょう。ここではcv2の機能を使います。使い方を見て実行してみましょう。
Step11: 顔に四角形が重なりましたか?失敗した場合には顔が正面を向いていないか,rectangleに渡す座標が間違えています。ちなみにこれを連続的に実行すると以下のようになります。(ウィンドウをアクティブにしてESCキーを押すと止まります)
Step12: 3. 顔ランドマーク検出
Step13: では顔ランドマークの検出器の操縦桿を作りましょう。
Step14: もし,エラーが出てしまったらshape_predictor_68_face_landmarks.datファイルがこのノートブックファイルと同じ場所にないせいです。ネットからダウンロードしましょう。下のセルがdlib.netからbz2圧縮されたファイルを展開して保存する処理なので,一度実行していれば大丈夫です)
Step15: 手順としてはdetectorで顔検出し,predictorで検出した顔領域内の顔ランドマークを検出,という流れです。
Step16: 結果を入れたshapeを見てみようと思ったらdlib.full_object_detection at ....と出てきました。?を使って調べてみましょう。
Step17: ```Python
Step18: Python
Step19: 出ました。0番です。さて,どこでしょう。これはググってみましょう。ついでにdlib.pointも調べてみましょう。
Step20: ```Python
Step21: では取り敢えず右目を囲ってみましょう。左端は36番のx,上端は38番のy,右端は39番のx,下端は41番のyを使ってみます。長くなるのでそれぞれx1, y1, x2, y2に代入してしまいましょう。
Step22: そしてimgに四角形を書き込んでみましょう。
Step23: 先程の連続処理に手を加えてみましょう。
Step24: 4. 顔ランドマークを使って何かやる
Step25: 今度は両目を覆いたいので(x1, y1) = (17のx, 19のy), (x2, y2) = (26のx, 29のy)としました。
Step26: では囲えてるか確認しましょう。
Step27: では画像の一部置き換えです。Pythonを使うと簡単ですが注意が必要です。
Step28: さて,確認してみましょう。
Step29: では連続処理にしてみましょう。
|
<ASSISTANT_TASK:>
Python Code:
import dlib
import cv2
cap = cv2.VideoCapture(0)
ret, img = cap.read()
print(ret)
cv2.imshow('image', img)
cv2.waitKey(2000)
detector = dlib.get_frontal_face_detector()
dets = detector(img, 1)
len(dets)
dets[0]
dlib.rectangle?
print(dets[0].left())
print(dets[0].top())
print(dets[0].right())
print(dets[0].bottom())
cv2.rectangle?
img = cv2.rectangle(img, (dets[0].left(), dets[0].top()), (dets[0].right(), dets[0].bottom()), (255, 0, 0))
cv2.imshow('image', img)
cv2.waitKey(2000)
import cv2
import dlib
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
key = 0
while key != 27:
ret, img = cap.read()
dets = detector(img, 1)
if len(dets) > 0:
img = cv2.rectangle(img, (dets[0].left(), dets[0].top()), (dets[0].right(), dets[0].bottom()), (255, 0, 0))
cv2.imshow('image', img)
else:
cv2.imshow('image', img)
key = cv2.waitKey(10)
ret, img = cap.read()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
import urllib.request
urllib.request.urlretrieve("http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2", "shape_predictor_68_face_landmarks.dat.bz2")
import bz2
f = bz2.open("shape_predictor_68_face_landmarks.dat.bz2", "rb")
d = f.read()
f.close()
f = open("shape_predictor_68_face_landmarks.dat","wb")
f.write(d)
f.close()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
dets = detector(img, 1)
shape = predictor(img, dets[0])
shape
dlib.full_object_detection?
shape.parts?
shape.parts()[0]
dlib.point?
print(shape.parts()[0].x)
print(shape.parts()[0].y)
x1 = shape.parts()[36].x
y1 = shape.parts()[38].y
x2 = shape.parts()[39].x
y2 = shape.parts()[41].y
img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255))
cv2.imshow('image', img)
cv2.waitKey(2000)
import cv2
import dlib
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
key = 0
while key != 27:
ret, img = cap.read()
dets = detector(img, 1)
if len(dets) > 0:
shape = predictor(img, dets[0])
x1 = shape.parts()[36].x
y1 = shape.parts()[38].y
x2 = shape.parts()[39].x
y2 = shape.parts()[41].y
img = cv2.rectangle(img, (dets[0].left(), dets[0].top()), (dets[0].right(), dets[0].bottom()), (255, 0, 0))
img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255))
cv2.imshow('image', img)
key = cv2.waitKey(10)
ret, img = cap.read()
dets = detector(img, 1)
shape = predictor(img, dets[0])
x1 = shape.parts()[17].x
y1 = shape.parts()[19].y
x2 = shape.parts()[26].x
y2 = shape.parts()[29].y
img = cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255))
cv2.imshow('image', img)
cv2.waitKey(2000)
img2 = cv2.imread('cartoon-718659_640.png', cv2.IMREAD_ANYCOLOR)
newSize = (x2 - x1, y2 - y1)
img3 = cv2.resize(img2, newSize)
img[y1:y2, x1:x2] = img3
cv2.imshow('image', img)
cv2.waitKey(2000)
import cv2
import dlib
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')
img2 = cv2.imread('cartoon-718659_640.png', cv2.IMREAD_ANYCOLOR)
key = 0
while key != 27:
ret, img = cap.read()
dets = detector(img, 1)
if len(dets) > 0:
shape = predictor(img, dets[0])
x1 = shape.parts()[17].x
y1 = shape.parts()[19].y
x2 = shape.parts()[26].x
y2 = shape.parts()[29].y
newSize = (x2 - x1, y2 - y1)
img3 = cv2.resize(img2, newSize)
img[y1:y2, x1:x2] = img3
cv2.imshow('image', img)
key = cv2.waitKey(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll use linear SVC to partition our graph into clusters
Step2: By setting up a dense mesh of points in the grid and classifying all of them, we can render the regions of each cluster as distinct colors
Step3: Or just use predict for a given point
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
#Create fake income/age clusters for N people in k clusters
def createClusteredData(N, k):
pointsPerCluster = float(N)/k
X = []
y = []
for i in range (k):
incomeCentroid = np.random.uniform(20000.0, 200000.0)
ageCentroid = np.random.uniform(20.0, 70.0)
for j in range(int(pointsPerCluster)):
X.append([np.random.normal(incomeCentroid, 10000.0), np.random.normal(ageCentroid, 2.0)])
y.append(i)
X = np.array(X)
y = np.array(y)
return X, y
%matplotlib inline
from pylab import *
(X, y) = createClusteredData(100, 5)
plt.figure(figsize=(8, 6))
plt.scatter(X[:,0], X[:,1], c=y.astype(np.float))
plt.show()
from sklearn import svm, datasets
C = 1.0
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
def plotPredictions(clf):
xx, yy = np.meshgrid(np.arange(0, 250000, 10),
np.arange(10, 70, 0.5))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
plt.figure(figsize=(8, 6))
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:,0], X[:,1], c=y.astype(np.float))
plt.show()
plotPredictions(svc)
print(svc.predict([[200000, 40]]))
print(svc.predict([[50000, 65]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Extract Features
Step3: Train SVM on features
Step4: Inline question 1
|
<ASSISTANT_TASK:>
Python Code:
import os
os.chdir(os.getcwd() + '/..')
# Run some setup code for this notebook
import random
import numpy as np
import matplotlib.pyplot as plt
from utils.data_utils import load_CIFAR10
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from utils.data_utils import get_CIFAR10_data
cifar10_dir = 'datasets/cifar-10-batches-py'
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data(cifar10_dir, num_training=49000, num_validation=1000, num_test=1000)
print (X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape)
from utils.features_utils import extract_features, hog_feature, color_histogram_hsv
num_color_bins = 10 # Number of bins in the color histogram
feature_fns = [hog_feature, lambda img: color_histogram_hsv(img, nbin=num_color_bins)]
X_train_feats = extract_features(X_train, feature_fns, verbose=True)
X_val_feats = extract_features(X_val, feature_fns)
X_test_feats = extract_features(X_test, feature_fns)
# Preprocessing: Subtract the mean feature
mean_feat = np.mean(X_train_feats, axis=0, keepdims=True)
X_train_feats -= mean_feat
X_val_feats -= mean_feat
X_test_feats -= mean_feat
# Preprocessing: Divide by standard deviation.
# This ensures that each feature has roughly the same scale.
std_feat = np.std(X_train_feats, axis=0, keepdims=True)
X_train_feats /= std_feat
X_val_feats /= std_feat
X_test_feats /= std_feat
# Preprocessing: Add a bias dimension
X_train_feats = np.hstack([X_train_feats, np.ones([X_train_feats.shape[0], 1])])
X_val_feats = np.hstack([X_val_feats, np.ones([X_val_feats.shape[0], 1])])
X_test_feats = np.hstack([X_test_feats, np.ones([X_test_feats.shape[0], 1])])
# Use the validation set to tune the learning rate and regularization strength
# val accuracy should reach near 0.44
from classifiers.linear_classifier import LinearSVM
learning_rates = [1e-4, 3e-4, 9e-4, 1e-3, 3e-3, 9e-3, 1e-2, 3e-2, 9e-2, 1e-1]
regularization_strengths = [1e-1, 3e-1, 9e-1, 1, 3, 9]
# results[(learning_rate, reg)] = (train_accuracy, val_accuracy)
results = {}
best_val = -1
best_svm = None
for learning_rate in learning_rates:
for reg in regularization_strengths:
model = LinearSVM()
model.train(X_train_feats, y_train, learning_rate=learning_rate, reg=reg, num_iters=5000,
batch_size=300, verbose=True)
y_train_pred = model.predict(X_train_feats)
train_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = model.predict(X_val_feats)
val_accuracy = np.mean(y_val == y_val_pred)
results[(learning_rate, reg)] = (train_accuracy, val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_svm = model
print('lr %e reg %e train_accuracy: %f val_accuracy: %f' % (learning_rate, reg, train_accuracy, val_accuracy))
print
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train_accuracy: %f val_accuracy: %f' % (lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train_accuracy: %f val_accuracy: %f' % (lr, reg, train_accuracy, val_accuracy))
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test_feats)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# An important way to gain intuition about how an algorithm works is to
# visualize the mistakes that it makes. In this visualization, we show examples
# of images that are misclassified by our current system. The first column
# shows images that our system labeled as "plane" but whose true label is
# something other than "plane".
examples_per_class = 8
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for cls, cls_name in enumerate(classes):
idxs = np.where((y_test != cls) & (y_test_pred == cls))[0]
idxs = np.random.choice(idxs, examples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt.subplot(examples_per_class, len(classes), i * len(classes) + cls + 1)
plt.imshow(X_test[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls_name)
plt.show()
print(X_train_feats.shape)
from classifiers.neural_net import TwoLayerNet
input_dim = X_train_feats.shape[1]
hidden_dim = 500
num_classes = 10
learning_rates = [3e-1, 9e-1, 1]
regularization_strengths = [3e-3, 4e-3, 5e-3, 6e-3, 7e-3, 8e-3, 9e-3, 1e-2]
results = {}
best_model = None
best_val = -1
for lr in learning_rates:
for reg in regularization_strengths:
model = TwoLayerNet(input_dim, hidden_dim, num_classes, std=1e-1)
stats = model.train(X_train_feats, y_train, X_val_feats, y_val,
learning_rate=lr, learning_rate_decay=0.95,
reg=reg, num_iters=5000, batch_size=200, verbose=True)
train_acc = (model.predict(X_train_feats) == y_train).mean()
val_acc = (model.predict(X_val_feats) == y_val).mean()
print('lr: %e, reg: %e, train_acc: %f, val_acc: %f' % (lr, reg, train_acc, val_acc))
results[(lr, reg)] = (train_acc, val_acc)
if val_acc > best_val:
best_val = val_acc
best_model = model
print
print('best val_acc: %f' % (best_val))
old_lr = -1
for lr, reg in sorted(results):
if old_lr != lr:
old_lr = lr
print
train_acc, val_acc = results[(lr, reg)]
print('lr: %e, reg: %e, train_acc: %f, val_acc: %f' % (lr, reg, train_acc, val_acc))
old_lr = -1
for lr, reg in sorted(results):
if old_lr != lr:
old_lr = lr
print
train_acc, val_acc = results[(lr, reg)]
print('lr: %e, reg: %e, train_acc: %f, val_acc: %f' % (lr, reg, train_acc, val_acc))
# get more than 55% accuracy
test_acc = (best_model.predict(X_test_feats) == y_test).mean()
print('Test accuracy: ', test_acc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Writing the objective function
Step2: Generating data
Step3: Fitting the model
Step4: Evaluating the model
Step5: Regularization path and feature selection
|
<ASSISTANT_TASK:>
Python Code:
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
def loss_fn(X, Y, beta):
return cp.norm2(cp.matmul(X, beta) - Y)**2
def regularizer(beta):
return cp.norm1(beta)
def objective_fn(X, Y, beta, lambd):
return loss_fn(X, Y, beta) + lambd * regularizer(beta)
def mse(X, Y, beta):
return (1.0 / X.shape[0]) * loss_fn(X, Y, beta).value
def generate_data(m=100, n=20, sigma=5, density=0.2):
"Generates data matrix X and observations Y."
np.random.seed(1)
beta_star = np.random.randn(n)
idxs = np.random.choice(range(n), int((1-density)*n), replace=False)
for idx in idxs:
beta_star[idx] = 0
X = np.random.randn(m,n)
Y = X.dot(beta_star) + np.random.normal(0, sigma, size=m)
return X, Y, beta_star
m = 100
n = 20
sigma = 5
density = 0.2
X, Y, _ = generate_data(m, n, sigma)
X_train = X[:50, :]
Y_train = Y[:50]
X_test = X[50:, :]
Y_test = Y[50:]
beta = cp.Variable(n)
lambd = cp.Parameter(nonneg=True)
problem = cp.Problem(cp.Minimize(objective_fn(X_train, Y_train, beta, lambd)))
lambd_values = np.logspace(-2, 3, 50)
train_errors = []
test_errors = []
beta_values = []
for v in lambd_values:
lambd.value = v
problem.solve()
train_errors.append(mse(X_train, Y_train, beta))
test_errors.append(mse(X_test, Y_test, beta))
beta_values.append(beta.value)
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
def plot_train_test_errors(train_errors, test_errors, lambd_values):
plt.plot(lambd_values, train_errors, label="Train error")
plt.plot(lambd_values, test_errors, label="Test error")
plt.xscale("log")
plt.legend(loc="upper left")
plt.xlabel(r"$\lambda$", fontsize=16)
plt.title("Mean Squared Error (MSE)")
plt.show()
plot_train_test_errors(train_errors, test_errors, lambd_values)
def plot_regularization_path(lambd_values, beta_values):
num_coeffs = len(beta_values[0])
for i in range(num_coeffs):
plt.plot(lambd_values, [wi[i] for wi in beta_values])
plt.xlabel(r"$\lambda$", fontsize=16)
plt.xscale("log")
plt.title("Regularization Path")
plt.show()
plot_regularization_path(lambd_values, beta_values)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Taylor-Hood P2-P1)
Step4: 4.3. Allocate an object of the Stokes class
Step5: 4.4. Prepare reduction with a reduced basis method
Step6: 4.5. Perform the offline phase
Step7: 4.6. Perform an online solve
Step8: 4.7. Perform an error analysis
Step9: 4.8. Perform a speedup analysis
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
from sampling import LinearlyDependentUniformDistribution
@PullBackFormsToReferenceDomain()
@AffineShapeParametrization("data/t_bypass_vertices_mapping.vmp")
class Stokes(StokesProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
StokesProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
up = TrialFunction(V)
(self.u, self.p) = split(up)
vq = TestFunction(V)
(self.v, self.q) = split(vq)
self.dx = Measure("dx")(subdomain_data=self.subdomains)
self.ds = Measure("ds")(subdomain_data=self.boundaries)
# ... as well as forcing terms and inlet velocity
self.inlet = Expression(("- 1./0.25*(x[1] - 1)*(2 - x[1])", "0."), degree=2)
self.f = Constant((0.0, 0.0))
self.g = Constant(0.0)
# Return custom problem name
def name(self):
return "Stokes2RB"
# Return the lower bound for inf-sup constant.
def get_stability_factor_lower_bound(self):
return 1.
# Return theta multiplicative terms of the affine expansion of the problem.
@compute_theta_for_supremizers
def compute_theta(self, term):
if term == "a":
theta_a0 = 1.0
return (theta_a0, )
elif term in ("b", "bt"):
theta_b0 = 1.0
return (theta_b0, )
elif term == "f":
theta_f0 = 1.0
return (theta_f0, )
elif term == "g":
theta_g0 = 1.0
return (theta_g0, )
elif term == "dirichlet_bc_u":
theta_bc0 = 1.
return (theta_bc0, )
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
@assemble_operator_for_supremizers
def assemble_operator(self, term):
dx = self.dx
if term == "a":
u = self.u
v = self.v
a0 = inner(grad(u), grad(v)) * dx
return (a0, )
elif term == "b":
u = self.u
q = self.q
b0 = - q * div(u) * dx
return (b0, )
elif term == "bt":
p = self.p
v = self.v
bt0 = - p * div(v) * dx
return (bt0, )
elif term == "f":
v = self.v
f0 = inner(self.f, v) * dx
return (f0, )
elif term == "g":
q = self.q
g0 = self.g * q * dx
return (g0, )
elif term == "dirichlet_bc_u":
bc0 = [DirichletBC(self.V.sub(0), self.inlet, self.boundaries, 1),
DirichletBC(self.V.sub(0), Constant((0.0, 0.0)), self.boundaries, 3)]
return (bc0,)
elif term == "inner_product_u":
u = self.u
v = self.v
x0 = inner(grad(u), grad(v)) * dx
return (x0, )
elif term == "inner_product_p":
p = self.p
q = self.q
x0 = inner(p, q) * dx
return (x0, )
else:
raise ValueError("Invalid term for assemble_operator().")
mesh = Mesh("data/t_bypass.xml")
subdomains = MeshFunction("size_t", mesh, "data/t_bypass_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/t_bypass_facet_region.xml")
element_u = VectorElement("Lagrange", mesh.ufl_cell(), 2)
element_p = FiniteElement("Lagrange", mesh.ufl_cell(), 1)
element = MixedElement(element_u, element_p)
V = FunctionSpace(mesh, element, components=[["u", "s"], "p"])
problem = Stokes(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [
(0.5, 1.5),
(0.5, 1.5),
(0.5, 1.5),
(0.5, 1.5),
(0.5, 1.5),
(0., pi / 6.)
]
problem.set_mu_range(mu_range)
reduction_method = ReducedBasis(problem)
reduction_method.set_Nmax(25)
reduction_method.set_tolerance(1e-6)
lifting_mu = (1.0, 1.0, 1.0, 1.0, 1.0, 0.0)
problem.set_mu(lifting_mu)
reduction_method.initialize_training_set(100, sampling=LinearlyDependentUniformDistribution())
reduced_problem = reduction_method.offline()
online_mu = (1.0, 1.0, 1.0, 1.0, 1.0, pi / 6.)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
plot(reduced_solution, reduced_problem=reduced_problem, component="u")
plot(reduced_solution, reduced_problem=reduced_problem, component="p")
reduction_method.initialize_testing_set(100, sampling=LinearlyDependentUniformDistribution())
reduction_method.error_analysis()
reduction_method.speedup_analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get train, validation and 2 test data sets - data had previously been split by a Python script.
Step2: Set up base model - had success for this problem with the Xception model. It will not be retrained for the first training phase which will output the training for the added dense layers only.
Step3: Build the model.
Step4: Pre-train the added dense layers. Set workers to a reasonable number for the CPU. I have an 8 core, 16 thread, Ryzen 7. We could go higher on workers but this seemed enough. Note that this is set up to run Keras / TensorFlow with a GPU.
Step5: Set the base model to have the last few layers be trainable. Preserve most of the layers from the pre-trained model.
Step6: Train the model. Now training both the dense layers and last few of the base Xception model.
Step7: Score the model on two previously unseen data sets.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import numpy as np
import matplotlib.pyplot as plt
from keras.applications import Xception
from keras.preprocessing.image import ImageDataGenerator
from keras import models
from keras import layers
from keras import optimizers
import tensorflow as tf
base_dir = r'C:\Users\Vette\Desktop\Regis\#MSDS686 Deep Learning\cats_dogs'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
test2_dir = os.path.join(base_dir, 'test2')
batch_size = 20
seed = 321
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
test2_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(240, 240),
batch_size=50,
class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(validation_dir,
target_size=(240, 240),
batch_size=50,
class_mode='binary')
test_generator = test_datagen.flow_from_directory(test_dir,
target_size=(240, 240),
batch_size=50,
class_mode='binary')
test2_generator = test2_datagen.flow_from_directory(test2_dir,
target_size=(240, 240),
batch_size=50,
class_mode='binary')
conv_base = Xception(weights='imagenet',
include_top=False,
input_shape=(240, 240, 3))
conv_base.summary()
conv_base.trainable = False
def build_model():
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
return model
with tf.device('/gpu:0'):
np.random.seed(seed)
model = build_model()
print('Pre-train dense layers')
history = model.fit_generator(train_generator,
steps_per_epoch=160,
epochs=8,
validation_data=validation_generator,
validation_steps=30,
verbose=1,
workers=10)
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if 'block13' in layer.name:
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
with tf.device('/gpu:0'):
print('Train Model')
np.random.seed(seed)
model = build_model()
history = model.fit_generator(train_generator,
steps_per_epoch=320,
epochs=20,
validation_data=validation_generator,
validation_steps=60,
verbose=1,
initial_epoch=8,
workers=10)
with tf.device('/gpu:0'):
scores = model.evaluate_generator(test_generator, workers=8)
print('#1 Loss, Accuracy: ', scores)
scores = model.evaluate_generator(test2_generator, workers=8)
print('#2 Loss, Accuracy: ', scores)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NOTE
Step2: <b>Restart the kernel</b> (click on the reload button above).
Step8: Input source
Step9: Let's pull this query down into a Pandas DataFrame and take a look at some of the statistics.
Step13: Create ML dataset using tf.transform and Dataflow
Step14: This will take 10-15 minutes. You cannot go on in this lab until your DataFlow job has succesfully completed.
Step15: Train off preprocessed data
Step16: Now let's create fake data in JSON format and use it to serve a prediction with gcloud ai-platform local predict
|
<ASSISTANT_TASK:>
Python Code:
!pip install --user apache-beam[gcp]==2.16.0
!pip install --user tensorflow-transform==0.15.0
!pip download tensorflow-transform==0.15.0 --no-deps
%%bash
pip freeze | grep -e 'flow\|beam'
import tensorflow as tf
import tensorflow_transform as tft
import shutil
print(tf.__version__)
# change these to try this notebook out
BUCKET = 'cloud-example-labs'
PROJECT = 'project-id'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
from google.cloud import bigquery
def create_query(phase, EVERY_N):
Creates a query with the proper splits.
Args:
phase: int, 1=train, 2=valid.
EVERY_N: int, take an example EVERY_N rows.
Returns:
Query string with the proper splits.
base_query =
WITH daynames AS
(SELECT ['Sun', 'Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat'] AS daysofweek)
SELECT
(tolls_amount + fare_amount) AS fare_amount,
daysofweek[ORDINAL(EXTRACT(DAYOFWEEK FROM pickup_datetime))] AS dayofweek,
EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count AS passengers,
'notneeded' AS key
FROM
`nyc-tlc.yellow.trips`, daynames
WHERE
trip_distance > 0 AND fare_amount > 0
if EVERY_N is None:
if phase < 2:
# training
query = {0} AND ABS(MOD(FARM_FINGERPRINT(CAST
(pickup_datetime AS STRING), 4)) < 2.format(base_query)
else:
query = {0} AND ABS(MOD(FARM_FINGERPRINT(CAST(
pickup_datetime AS STRING), 4)) = {1}.format(base_query, phase)
else:
query = {0} AND ABS(MOD(FARM_FINGERPRINT(CAST(
pickup_datetime AS STRING)), {1})) = {2}.format(
base_query, EVERY_N, phase)
return query
query = create_query(2, 100000)
df_valid = bigquery.Client().query(query).to_dataframe()
display(df_valid.head())
df_valid.describe()
import datetime
import tensorflow as tf
import apache_beam as beam
import tensorflow_transform as tft
import tensorflow_metadata as tfmd
from tensorflow_transform.beam import impl as beam_impl
def is_valid(inputs):
Check to make sure the inputs are valid.
Args:
inputs: dict, dictionary of TableRow data from BigQuery.
Returns:
True if the inputs are valid and False if they are not.
try:
pickup_longitude = inputs['pickuplon']
dropoff_longitude = inputs['dropofflon']
pickup_latitude = inputs['pickuplat']
dropoff_latitude = inputs['dropofflat']
hourofday = inputs['hourofday']
dayofweek = inputs['dayofweek']
passenger_count = inputs['passengers']
fare_amount = inputs['fare_amount']
return fare_amount >= 2.5 and pickup_longitude > -78 \
and pickup_longitude < -70 and dropoff_longitude > -78 \
and dropoff_longitude < -70 and pickup_latitude > 37 \
and pickup_latitude < 45 and dropoff_latitude > 37 \
and dropoff_latitude < 45 and passenger_count > 0
except:
return False
def preprocess_tft(inputs):
Preproccess the features and add engineered features with tf transform.
Args:
dict, dictionary of TableRow data from BigQuery.
Returns:
Dictionary of preprocessed data after scaling and feature engineering.
import datetime
print(inputs)
result = {}
result['fare_amount'] = tf.identity(inputs['fare_amount'])
# build a vocabulary
result['dayofweek'] = tft.string_to_int(inputs['dayofweek'])
result['hourofday'] = tf.identity(inputs['hourofday']) # pass through
# scaling numeric values
result['pickuplon'] = (tft.scale_to_0_1(inputs['pickuplon']))
result['pickuplat'] = (tft.scale_to_0_1(inputs['pickuplat']))
result['dropofflon'] = (tft.scale_to_0_1(inputs['dropofflon']))
result['dropofflat'] = (tft.scale_to_0_1(inputs['dropofflat']))
result['passengers'] = tf.cast(inputs['passengers'], tf.float32) # a cast
# arbitrary TF func
result['key'] = tf.as_string(tf.ones_like(inputs['passengers']))
# engineered features
latdiff = inputs['pickuplat'] - inputs['dropofflat']
londiff = inputs['pickuplon'] - inputs['dropofflon']
result['latdiff'] = tft.scale_to_0_1(latdiff)
result['londiff'] = tft.scale_to_0_1(londiff)
dist = tf.sqrt(latdiff * latdiff + londiff * londiff)
result['euclidean'] = tft.scale_to_0_1(dist)
return result
def preprocess(in_test_mode):
Sets up preprocess pipeline.
Args:
in_test_mode: bool, False to launch DataFlow job, True to run locally.
import os
import os.path
import tempfile
from apache_beam.io import tfrecordio
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.beam import tft_beam_io
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
job_name = 'preprocess-taxi-features' + '-'
job_name += datetime.datetime.now().strftime('%y%m%d-%H%M%S')
if in_test_mode:
import shutil
print('Launching local job ... hang on')
OUTPUT_DIR = './preproc_tft'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EVERY_N = 100000
else:
print('Launching Dataflow job {} ... hang on'.format(job_name))
OUTPUT_DIR = 'gs://{0}/taxifare/preproc_tft/'.format(BUCKET)
import subprocess
subprocess.call('gsutil rm -r {}'.format(OUTPUT_DIR).split())
EVERY_N = 10000
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': job_name,
'project': PROJECT,
'num_workers': 1,
'max_num_workers': 1,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True,
'direct_num_workers': 1,
'extra_packages': ['tensorflow-transform-0.15.0.tar.gz']
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
if in_test_mode:
RUNNER = 'DirectRunner'
else:
RUNNER = 'DataflowRunner'
# Set up raw data metadata
raw_data_schema = {
colname: dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation())
for colname in 'dayofweek,key'.split(',')
}
raw_data_schema.update({
colname: dataset_schema.ColumnSchema(
tf.float32, [], dataset_schema.FixedColumnRepresentation())
for colname in
'fare_amount,pickuplon,pickuplat,dropofflon,dropofflat'.split(',')
})
raw_data_schema.update({
colname: dataset_schema.ColumnSchema(
tf.int64, [], dataset_schema.FixedColumnRepresentation())
for colname in 'hourofday,passengers'.split(',')
})
raw_data_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.Schema(raw_data_schema))
# Run Beam
with beam.Pipeline(RUNNER, options=opts) as p:
with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, 'tmp')):
# Save the raw data metadata
(raw_data_metadata |
'WriteInputMetadata' >> tft_beam_io.WriteMetadata(
os.path.join(
OUTPUT_DIR, 'metadata/rawdata_metadata'), pipeline=p))
# Read training data from bigquery and filter rows
raw_data = (p | 'train_read' >> beam.io.Read(
beam.io.BigQuerySource(
query=create_query(1, EVERY_N),
use_standard_sql=True)) |
'train_filter' >> beam.Filter(is_valid))
raw_dataset = (raw_data, raw_data_metadata)
# Analyze and transform training data
transformed_dataset, transform_fn = (
raw_dataset | beam_impl.AnalyzeAndTransformDataset(
preprocess_tft))
transformed_data, transformed_metadata = transformed_dataset
# Save transformed train data to disk in efficient tfrecord format
transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, 'train'), file_name_suffix='.gz',
coder=example_proto_coder.ExampleProtoCoder(
transformed_metadata.schema))
# Read eval data from bigquery and filter rows
raw_test_data = (p | 'eval_read' >> beam.io.Read(
beam.io.BigQuerySource(
query=create_query(2, EVERY_N),
use_standard_sql=True)) | 'eval_filter' >> beam.Filter(
is_valid))
raw_test_dataset = (raw_test_data, raw_data_metadata)
# Transform eval data
transformed_test_dataset = (
(raw_test_dataset, transform_fn) | beam_impl.TransformDataset()
)
transformed_test_data, _ = transformed_test_dataset
# Save transformed train data to disk in efficient tfrecord format
(transformed_test_data |
'WriteTestData' >> tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, 'eval'), file_name_suffix='.gz',
coder=example_proto_coder.ExampleProtoCoder(
transformed_metadata.schema)))
# Save transformation function to disk for use at serving time
(transform_fn |
'WriteTransformFn' >> transform_fn_io.WriteTransformFn(
os.path.join(OUTPUT_DIR, 'metadata')))
# Change to True to run locally
preprocess(in_test_mode=False)
%%bash
# ls preproc_tft
gsutil ls gs://${BUCKET}/taxifare/preproc_tft/
%%bash
rm -r ./taxi_trained
export PYTHONPATH=${PYTHONPATH}:$PWD
python3 -m tft_trainer.task \
--train_data_path="gs://${BUCKET}/taxifare/preproc_tft/train*" \
--eval_data_path="gs://${BUCKET}/taxifare/preproc_tft/eval*" \
--output_dir=./taxi_trained \
!ls $PWD/taxi_trained/export/exporter
%%writefile /tmp/test.json
{"dayofweek":0, "hourofday":17, "pickuplon": -73.885262, "pickuplat": 40.773008, "dropofflon": -73.987232, "dropofflat": 40.732403, "passengers": 2.0}
%%bash
sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
%%bash
model_dir=$(ls $PWD/taxi_trained/export/exporter/)
gcloud ai-platform local predict \
--model-dir=./taxi_trained/export/exporter/${model_dir} \
--json-instances=/tmp/test.json
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Con estas manipulaciones lo que hicimos es cargar en memoria el dataset que prepocesamos anteriormente, le agregamos la nueva columna AGE2, ya que es mejor tener la edad agrupada en rangos en lugar de individualmente, luego eliminamos los valores atípicos que habíamos detectado; y por último, reemplazamos con su equivalente numérico a todas las variables categóricas; ya que para los algoritmos de Scikit-learn es mucho más eficiente trabajar con variables numéricas.
Step2: Como podemos ver, el algoritmo nos seleccionó la cantidad de atributos que le indicamos; en este ejemplo decidimos seleccionar solo 15; obviamente, cuando armemos nuestro modelo final vamos a tomar un número mayor de atributos.
Step3: ¿Cómo funciona?
Step4: Analicemos algunos de estos atributos en forma individual para tener una idea de cuanto puede ser que aporten a la exactitud del modelo. Podríamos comparar por ejemplo, el promedio de donaciones que podríamos obtener con este atributo contra el promedio de todo el dataset. Tomemos por ejemplo al atributo LASTGIFT que representa el importe de la última donación que realizó cada persona incluida en el conjunto de datos. En principio parece lógico que este atributo sea significativo para el modelo, ya que si donó en el pasado, hay bastantes posibilidades de que vuelva a donar en esta oportunidad.
Step5: Este último gráfico nos muestra claramente que con un valor del atributo LASTGIFT menor o igual a 10 las probabilidades de que esa persona realice una donación mejoran, pero veamos que pasa con el importe de la donación.
|
<ASSISTANT_TASK:>
Python Code:
# <!-- collapse=True -->
# Importando las librerías que vamos a utilizar
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cross_validation import train_test_split
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import RFE
from sklearn.ensemble import ExtraTreesClassifier
# graficos incrustados
%matplotlib inline
# parametros esteticos de seaborn
sns.set_palette("deep", desat=.6)
sns.set_context(rc={"figure.figsize": (8, 4)})
# importando el dataset preprocesado.
ONG_data = pd.read_csv('LEARNING_procesado.csv', header=0)
# Agregando la columna AGE2
AGE2 = pd.cut(ONG_data['AGE'], range(0, 100, 10))
ONG_data['AGE2'] = AGE2
# Eliminar columnas con donaciones superiores a 60 (atípicos)
ONG_data = ONG_data[ONG_data.DONOR_AMOUNT < 60]
# Convertir datos categoricos a numericos
tipos = ONG_data.columns.to_series().groupby(ONG_data.dtypes).groups
ctext = tipos[np.dtype('object')]
for c in ctext:
ONG_data[c], _ = pd.factorize(ONG_data[c])
ONG_data['AGE2'], _ = pd.factorize(ONG_data['AGE2'])
# Separamos las columnas objetivo
donor_flag = ONG_data['DONOR_FLAG']
donor_amount = ONG_data['DONOR_AMOUNT']
indice = ONG_data['IDX']
# Aplicando el algoritmo univariante de prueba F.
k = 15 # número de atributos a seleccionar
entrenar = ONG_data.drop(['DONOR_FLAG', 'DONOR_AMOUNT', 'IDX'], axis=1)
columnas = list(entrenar.columns.values)
seleccionadas = SelectKBest(f_classif, k=k).fit(entrenar, donor_flag)
atrib = seleccionadas.get_support()
atributos = [columnas[i] for i in list(atrib.nonzero()[0])]
atributos
# Algoritmo de Eliminación Recursiva de atributos con ExtraTrees
modelo = ExtraTreesClassifier()
era = RFE(modelo, 15) # número de atributos a seleccionar
era = era.fit(entrenar, donor_flag)
# imprimir resultados
atrib = era.support_
atributos = [columnas[i] for i in list(atrib.nonzero()[0])]
atributos
# Importancia de atributos.
modelo.fit(entrenar, donor_flag)
modelo.feature_importances_[:15]
# 15 coeficientes más altos
np.sort(modelo.feature_importances_)[::-1][:15]
# Probabilidad de ser donante de todo el dataset.
prob_gral = (ONG_data[ONG_data.DONOR_AMOUNT > 0]['DONOR_AMOUNT'].count() \
/ ONG_data['DONOR_AMOUNT'].count()) * 100.0
prob_gral
# Probabilidad de realizar donanción con LASTGIFT <= 10
lastgift10 = (ONG_data[(ONG_data.DONOR_AMOUNT > 0) &
(ONG_data.LASTGIFT <= 10)]['DONOR_AMOUNT'].count() \
/ ONG_data[ONG_data.LASTGIFT <= 10]['DONOR_AMOUNT'].count()) * 100.0
lastgift10
# graficando los resultados
lastgift = pd.Series({'promedio gral': prob_gral, 'lastgift<=10': lastgift10})
plot=lastgift.plot(kind='barh',
color=['blue', 'green']).set_title('Pobabilidad de donar')
# importe promedio de donación general
donacion_prom = ONG_data[ONG_data.DONOR_AMOUNT > 0]['DONOR_AMOUNT'].mean()
donacion_prom
# importe promedio de donación lastgift <= 10
lastgift10_imp = ONG_data[(ONG_data.DONOR_AMOUNT > 0) &
(ONG_data.LASTGIFT <= 10)]['DONOR_AMOUNT'].mean()
lastgift10_imp
# graficando los resultados
lastgift = pd.Series({'imp promedio gral': donacion_prom,
'lastgift<=10': lastgift10_imp})
plot=lastgift.plot(kind='barh',
color=['blue', 'green']
).set_title('importe promedio de donación')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Filtering (with)
Step2: 2a. Show me a list of employees that have a lastname that begins with "R".
Step3: 2b. Show me a list of employees that have a lastname that ends with "r"
Step4: 2c. Provide a list of employees that have a hyphenated lastname.
Step5: 3a. Provide a list of employees that are on salary and have more than 35 vacation hours left.
Step6: 3b. Show the same as above but limit it to American employees.
Step7: 3c. Show the same as above but limit it to non-American employees.
Step8: 4a. List the married employees with more than 35 vacation hours, only ones living in Washington state.
Step9: 4b. Change the logic to include anyone who meets any of the 3 conditions (i.e., people who are either married, live in Washington state, or have more than 35 vacation hours left)
Step10: 4c. Show the same as above, but only for Production Technicians
Step11: 5a. List all employees living in Redmond, Seattle, and Bellevue, showing EmployeeID, FirstName, LastName, and City. Sort the list alphabetically by city.
Step12: 5b. For the list above, make sure these are only in the state of Washington, just to be careful.
Step13: 6. Provide a list of employees who have no title, whether it's a NULL or empty string.
Step14: 7a. Provide a list of employees who have at least 60 vacation hours left.
Step15: 7b. Provide a list of employees who have less than 60 vacation hours left.
Step16: 7c. Show me employees who have more than 20 and less than 60 vacation hours left.
Step17: 7d. If you did not use BETWEEN for 7c, do the same but use BETWEEN. If you did use BETWEEN for 7c, do it another way.
Step18: Grouping
Step19: 1b. Add to the above, the most recent birthdate for all employees
Step20: 1c. Show the above results broken down by gender
Step21: 1d. Show the above results broken down by gender, and salaried/hourly
Step22: 2a. What are the average vacation hours for all employees?
Step23: 2b. Add to the above, the minimum vacation hours for all employees
Step24: 2c. Show the above results broken down and ordered by job title
Step25: 2d. Show the above results broken down by job title, and married/single employees
Step26: 2e. Add to the above, the maximum vacation hours per group
Step27: 2f. Show the above results broken down by job title, married/single employees, and State
Step28: 2g. Show the above results but only for American employees
Step29: 2h. Change the grouping above so it's broken down by married/single and State, no more job title
Step30: 2i. Limit the results above to States where the average vacation hours is greater than 30
Step31: 2j. Limit the results above to States where the average vacation hours is greater than 30 and the maximum vacation hours is less than 50
Step32: 2k. Show the same results but only for non-American employees
Step33: 3a. Report how many employees are in the company
Step34: 3b. For the above report, show the number of employees per manager (hint
Step35: 3c. Remove any manager ID's that are NULL from the results above
Step36: 3d. Show the same results as above, but only for managers who have at least 5 employees
Step37: 4a. List the average vacation hours of all employees
Step38: 4b. Break down the results by State
Step39: 4c. Break down the results by city and State
Step40: 4d. Add something that shows the number of employees per city
Step41: 4e. Sort the results by the city and state
Step42: 4f. Make city and State a single column in the format of "City, State"
Step43: 4g. Add a column that shows the difference between the maximum vacation hours and minimum vacation hours for each city
Step44: 4h. Now sort the results by the new column created above
Step45: 4i. Limit the results to cities that have more than 1 employee
Step46: 4j. Limit the results to non-U.S. cities
Step47: Extra
Step48: Summary
Step49: Since expressions return DataFrames we can chain method calls and [...] operators
Step50: Recommended to use .loc .iloc .at methods
Step51: Use 'aggregate' for more complex groupings and aggregations
Step52: Assign intermediate results to new variables, and perform next operation on them
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
%%time
Employees = pd.read_excel('/home/data/AdventureWorks/Employees.xls')
print("shape:", Employees.shape)
%%time
Territory = pd.read_excel('/home/data/AdventureWorks/SalesTerritory.xls')
print("shape:", Territory.shape)
%%time
Customers = pd.read_excel('/home/data/AdventureWorks/Customers.xls')
print("shape:", Customers.shape)
%%time
Orders = pd.read_excel('/home/data/AdventureWorks/ItemsOrdered.xls')
print("shape:", Orders.shape)
Employees.MaritalStatus == 'M'
Employees.loc[Employees.MaritalStatus == 'M', ['EmployeeID', 'FirstName', 'LastName', 'MaritalStatus']]
filter = Employees.LastName.map(lambda x: x[0] == 'R')
Employees[filter]
df = Employees[Employees.LastName.map(lambda x: x[0] == 'R')]
df[['EmployeeID', 'FirstName', 'LastName']]
Employees.loc[Employees.LastName.map(lambda x: x[-1] == 'r'), ['EmployeeID', 'FirstName', 'LastName']]
help(pd.Series.between)
type(Employees.LastName)
dir(type(Employees.LastName))
Employees.loc[Employees.LastName.map(lambda x: '-' in x), ['EmployeeID', 'FirstName', 'LastName']]
Employees.loc[(Employees.SalariedFlag==1) & (Employees.VacationHours>35), ['EmployeeID', 'FirstName', 'LastName', 'SalariedFlag', 'VacationHours']]
Employees.loc[(Employees.SalariedFlag==1) & (Employees.VacationHours>35), ['EmployeeID', 'FirstName', 'LastName', 'SalariedFlag', 'VacationHours']]
Employees.loc[
(Employees.SalariedFlag==1) & (Employees.VacationHours>35) & (Employees.CountryName != 'United States')
, ['EmployeeID', 'FirstName', 'LastName', 'SalariedFlag', 'VacationHours']
]
Employees.loc[
(Employees.SalariedFlag==1) & (Employees.VacationHours>35) & (Employees.StateProvinceName == 'Washington')
, ['EmployeeID', 'FirstName', 'LastName', 'SalariedFlag', 'VacationHours', 'StateProvinceName']
]
Employees.loc[
(Employees.SalariedFlag==1) | (Employees.VacationHours>35) | (Employees.StateProvinceName == 'Washington')
, ['EmployeeID', 'FirstName', 'LastName', 'SalariedFlag', 'VacationHours', 'StateProvinceName']
]
'abc'.startswith('ab')
Employees.loc[ Employees.JobTitle.map(lambda x: x.startswith('Production Technician'))
& (
(Employees.SalariedFlag==1) | (Employees.VacationHours>35) | (Employees.StateProvinceName == 'Washington')
), ['EmployeeID', 'FirstName', 'LastName', 'SalariedFlag', 'VacationHours', 'StateProvinceName', 'JobTitle']
]
Employees.loc[Employees.City.isin(['Redmond', 'Seattle', 'Bellevue']),
['EmployeeID', 'FirstName', 'LastName', 'City']].sort_values('City')
Employees.loc[Employees.City.isin(['Redmond', 'Seattle', 'Bellevue'])
& (Employees.StateProvinceName=='Washington'),
['EmployeeID', 'FirstName', 'LastName', 'City']].sort_values('City')
### Note: when exporting the SQL table to Excel the one row with a ' ' in the Title was already removed...Sorry
Employees.loc[pd.isnull(Employees.Title) | Employees.Title.map(lambda x: len(str(x).strip())==0)
,['EmployeeID', 'Title', 'FirstName', 'LastName', 'City']]
Employees.loc[Employees.VacationHours >= 60
, ['EmployeeID', 'FirstName', 'LastName', 'VacationHours', 'SickLeaveHours']]
Employees.loc[Employees.VacationHours < 60
, ['EmployeeID', 'FirstName', 'LastName', 'VacationHours', 'SickLeaveHours']]
Employees.loc[(Employees.VacationHours > 20) & (Employees.VacationHours < 60)
, ['EmployeeID', 'FirstName', 'LastName', 'VacationHours', 'SickLeaveHours']]
Employees.loc[Employees.VacationHours.between(20, 60, inclusive=False)
, ['EmployeeID', 'FirstName', 'LastName', 'VacationHours', 'SickLeaveHours']]
Employees.BirthDate.min()
(Employees.BirthDate.min(), Employees.BirthDate.max())
Employees.groupby(by=['Gender']).BirthDate.aggregate([min, max])
Employees.groupby(by=['Gender', 'SalariedFlag']).BirthDate.aggregate([min, max])
Employees.VacationHours.mean()
(Employees.VacationHours.mean(), Employees.VacationHours.min())
Employees.groupby(by=['JobTitle']).VacationHours.aggregate([np.mean, np.min])
Employees.groupby(by=['JobTitle', 'MaritalStatus']).VacationHours.aggregate([np.mean, np.min])
Employees.groupby(by=['JobTitle', 'MaritalStatus']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees.groupby(by=['JobTitle', 'MaritalStatus', 'StateProvinceName']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees[Employees.CountryName=='United States'] \
.groupby(by=['JobTitle', 'MaritalStatus', 'StateProvinceName']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees[Employees.CountryName=='United States'] \
.groupby(by=['MaritalStatus', 'StateProvinceName']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees[Employees.CountryName=='United States'] \
.groupby(by=['StateProvinceName']).filter(lambda x: x.VacationHours.mean()>30) \
.groupby(by=['StateProvinceName']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees[Employees.CountryName=='United States'] \
.groupby(by=['StateProvinceName']).filter(lambda x: (x.VacationHours.mean() >30) and (x.VacationHours.max()<50)) \
.groupby(by=['StateProvinceName']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees[Employees.CountryName!='United States'] \
.groupby(by=['StateProvinceName']).filter(lambda x: (x.VacationHours.mean() >30) and (x.VacationHours.max()<50)) \
.groupby(by=['StateProvinceName']).VacationHours.aggregate([np.mean, np.min, np.max])
Employees.shape[0]
Employees.groupby(by=['ManagerID']).EmployeeID.aggregate(lambda x: x.shape[0])
Employees.loc[pd.isnull(Employees.EmployeeID)==False] \
.groupby(by=['ManagerID']).EmployeeID.aggregate(lambda x: x.shape[0])
Employees.loc[pd.isnull(Employees.EmployeeID)==False] \
.groupby(by=['ManagerID']).filter(lambda x: x.shape[0]>=5) \
.groupby(by=['ManagerID']).EmployeeID.aggregate(lambda x: x.shape[0])
Employees.VacationHours.mean()
Employees.groupby(by=['StateProvinceName']).VacationHours.mean()
Employees.groupby(by=['StateProvinceName', 'City']).VacationHours.mean()
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean'
},
'EmployeeID' : {
'NumEmployees' : 'count'
}
}
Employees.groupby(by=['StateProvinceName', 'City'])\
.aggregate(aggregations)
### Since Pandas already sorts the results, we show here how show resutls in DESCENDING order
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean'
},
'EmployeeID' : {
'NumEmployees' : 'count'
}
}
Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations) \
.sort_index(ascending=False, axis=0)
## This adds the row indices back as columns
df = Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations) \
.sort_index(ascending=False, axis=0)
df['SPN'] = df.index.get_level_values(0)
df['C'] = df.index.get_level_values(1)
# Version 1: first create new column 'CityState' ... rest is easy
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean'
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
df = Employees.copy()
df['CityState'] = Employees.City.str.cat(Employees.StateProvinceName, sep=', ')
df.groupby(by = 'CityState').aggregate(aggregations).sort_index(ascending=False, axis=0)
# Version 2: using the multi-indexed result and adding index values as new column 'CityState'
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean'
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
df = Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations) \
.sort_index(ascending=False, axis=0)
df['CityState'] = [
r.City+', '+r.State \
for i, r in pd.DataFrame({
'City': df.index.get_level_values(1),
'State': df.index.get_level_values(0)
}).iterrows()
]
df
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean',
'MinVacationHours' : min,
'MaxVacationHours' : max,
'DeltaVacationHoues' : lambda x: x.max()-x.min()
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations) \
.sort_index(ascending=False, axis=0)
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean',
'MinVacationHours' : min,
'MaxVacationHours' : max,
'DeltaVacationHours' : lambda x: x.max()-x.min()
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
df = Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations)
df.iloc[np.argsort(df['VacationHours']['DeltaVacationHours'])]
def my_own_delta(x):
return x.max()-x.min()
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean',
'MinVacationHours' : np.min,
'MaxVacationHours' : np.max,
'DeltaVacationHours' : my_own_delta, ##lambda x: x.max()-x.min(),
'cnt' : pd.Series.count
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
df = Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations)
df['VacationHours']['DeltaVacationHours']>1
df2 = df[df['VacationHours']['DeltaVacationHours']>1]
df2.columns = df2.columns.droplevel(0)
df2.reset_index()
##df2.iloc[np.argsort(df2['VacationHours']['DeltaVacationHours'])]
##np.argsort(df2['VacationHours']['DeltaVacationHours'])
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean',
'MinVacationHours' : min,
'MaxVacationHours' : max,
'DeltaVacationHours' : lambda x: x.max()-x.min()
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
df = Employees[Employees.CountryName != 'United States'].groupby(by=['StateProvinceName', 'City']).aggregate(aggregations)
df2 = df[df['VacationHours']['DeltaVacationHours']>1]
df2.iloc[np.argsort(df2['VacationHours']['DeltaVacationHours'])]
[cs for cs in map(lambda x,y: str(y)+', '+str(x), Employees.StateProvinceName.values, Employees.City.values)]
Employees.City.str.cat(Employees.StateProvinceName, sep=', ')
# First ten rows
Employees[0:10]
# Rows satisfying a particular condition
Employees[Employees.MaritalStatus=='M']
# Select sertain columns
Employees[['EmployeeID', 'FirstName']]
Employees[0:10][Employees.MaritalStatus=='M'][['EmployeeID', 'FirstName', 'MaritalStatus']]
Employees[Employees.MaritalStatus=='M'][['EmployeeID', 'FirstName', 'MaritalStatus']][0:10]
Employees.groupby(by=['MaritalStatus']).VacationHours.sum()
aggregations = {
'VacationHours' : {
'AverageVacationHours' : 'mean',
'MinVacationHours' : min,
'MaxVacationHours' : max,
'DeltaVacationHoues' : lambda x: x.max()-x.min()
},
'EmployeeID' : {
'NumEmployees' : 'count'
},
}
Employees.groupby(by=['StateProvinceName', 'City']).aggregate(aggregations) \
.sort_index(ascending=False, axis=0)
help(np.min)
help(pd.Series.between)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get cached CatSim stars
Step2: Use Opsim to calculate expected flux in visits.
Step3: Save to a sqlite database
|
<ASSISTANT_TASK:>
Python Code:
import desc.monitor
import pandas as pd
%load_ext autoreload
%autoreload 2
truth_db_conn = desc.monitor.StarCacheDBObj(database='../data/star_cache.db')
truth_db_conn.columns
worker = desc.monitor.TrueStars(truth_db_conn, '../../kraken_1042_sqlite.db')
# Just use one visit here for the demo. Otherwise it will use the list in data/selectedVisits.csv
star_df = worker.get_true_stars(for_obsHistIds=200, catalog_constraints='gmag > 11')
star_df
worker.write_to_db(star_df, 'twinkles_truth_demo.db', if_exists='replace')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 从三种“列表“定义开始
Step2: IPython能够支持自动补全和帮助:
Step3: 论numpy的正确打开方式:少用原生语法、多用ufunc和broadcasting,少用数据转换
Step4: 为了比较性能,使用ipython的“魔法函数”timeit,或者datetime库的计时函数:
Step5: 1.2 Numpy数据类型理解
Step6: int8
Step7: Python不显示指定数据类型与精度,如何控制数据类型?Numpy自定义
Step8: 1.3 Numpy数据操作
Step9: 2 NumPy数学函数
Step10: 2.1 数学函数的测试
Step11: Ufunc类函数返回逐个计算值:
Step12: 过滤值:类似SAS的Compress(其实是反过来)
Step13: 过滤值:类似R语法
Step14: 类似R的语法中,用于过滤条件的np.array是可以进行布尔操作的:
Step15: Python的Clip,对值进行裁剪
Step16: 可以方便的进行时间序列操作——差分
Step17: 差分其实是一种最简单的卷积:
Step18: 差分的反向操作——累计加和(累积乘法类似):
Step19: 排序时间到了:给出排序结果,本地排序,并保留原数组下标位置
Step20: 使用enumerate和sorted排序:
Step21: 3 二维数组与矩阵操作
Step22: 直接填充常用值:
Step23: 善于利用函数者,可以使用函数,传入下标作为参数,生成二维数组:
Step24: 习惯R者,使用broadcasting,生成二维数组,更具体的原理和应用在后续介绍:
Step25: 更进一步缩减代码,生成正交栅格用于广播的方法,支持灵活的运算:
Step26: 习惯数据库的笛卡尔积者,可以用outer关键字进行某些运算,支持传入iterable更能节省空间和时间:
Step27: 二维数组和矩阵有一些不同:
Step28: 一般最小二乘法求解线性回归(OLS)
Step29: 给出模型参数的估计:$\hat{\beta} = (X^TX)^{-1}X^Ty $
Step30: 对照statsmodels的OLS结果:
Step31: Page-Rank算法的核心
Step32: 4. 还有一件事……文件读写
Step33: 保存多个变量时,等号前面是变量的key,从结果r里面可以取到。
Step34: 最后,对txt,csv,tsv的读写:
|
<ASSISTANT_TASK:>
Python Code:
from numpy import cos,sin #避免使用
import numpy as np #np.method()
r1 = range(5)
r2 = np.arange(5)
r3 = xrange(5)
print r1,r2,r3
for i in r1:
print i,
print '\n'
for i in r2:
print i,
print '\n'
for i in r3:
print i,
print type(r1),type(r2),type(r3)
print np.arange(0,5),np.arange(5,0),np.arange(5,0,-1)
np.arange #Tab
np.arange?
def vecadd(n):
a = range(n)
b = range(n)
c = []
for i in range(n):
a[i] = i*2
b[i] = i+3
c.append(a[i]+b[i])
return c
def np_vecadd(n):
a = np.arange(n)*2
b = np.arange(n)+3
c = a+b
return c
N = 10000000
%timeit vecadd(N)
%timeit np_vecadd(N)
import datetime
t1 = datetime.datetime.now()
result = vecadd(N)
t2 = datetime.datetime.now()
t2-t1
np.arange(5)+3 #ufunc or broadcasting
np.arange(5)*2
a,b,c = 0,2,11
print np.linspace(a,b,c),'\n'*2,np.arange(c)/float(c-1)*(b-a)+a
#使用np.ndarray类型的r2
print r2,type(r2),r2.dtype,r2.shape
#Immunity of Mutables
arr1 = np.array([np.arange(5),np.arange(5)])
arr2 = np.array([r2,r2])
arr3 = [r2,r2]
print arr1,'\n'*2,arr2,'\n'*2,arr3
r2[2] = 10
print arr1,'\n'*2,arr2,'\n'*2,arr3
r2[2] = 2
#不需要关心类型是否可变,在np.array创建过程中就已经建立独立拷贝
print arr1,arr1.shape
print arr1[0,0],arr1[1,2],arr1.dtype
print np.float64(arr1) #正确
arr1.dtype = np.float64 #错误
print arr1
arr1.dtype = np.int64
print arr1
# bool : 单个bit的布尔变量,仅存True或者False
# inti : 当前平台的整数类型,int32或int64
import platform
platform.architecture()
i = np.float64(3)
print i
c = np.complex128(1+2j)
print c.real,c.imag
print i.dtype.itemsize,c.dtype.itemsize
sku = np.dtype([('sku_id',np.int32),('desc',np.str_,50),('value',np.float64)])
print sku
sku2 = np.dtype({'names':['sku_id','desc','value'],'formats':['<i4','S50','<f8']})
print sku2
online_shop = np.array([(1,'apple',2.3),(2.1,5,5),(3,'banana',True)],dtype=sku)
print online_shop
arr = np.arange(12)
print arr,arr.shape
print arr[3:5],arr[-5:-2],arr[3:28:5]
arr = arr.reshape(3,2,2)
print arr
arr.resize((3,4))
print arr,'\n',arr.ndim,'\n',arr.shape
print arr.T,'\n'*2,arr.transpose()
arrflat = arr.flat
print arrflat
print np.array([item for item in arrflat],dtype=np.int64)
print arr.flatten(order='C'),'\n'*2, arr.flatten(order='F'),'\n'*2,type(arr.flatten())
print arr.ravel(),'\n'*2,type(arr.ravel())
print np.vstack((arr,arr)),'\n'*2,np.row_stack((arr,arr)),'\n'*2, np.concatenate((arr,arr),axis=0)
print np.hstack((arr,arr)),'\n'*2,np.column_stack((arr,arr)),'\n'*2, np.concatenate((arr,arr),axis=1)
print np.dstack((arr,arr))
arr.resize((3,4,1))
print arr,'\n'*2,np.concatenate((arr,arr),axis=2)
print arr,'\n'*2,np.tile(arr,(1,1,3))
arr2 = np.arange(8).reshape(2,2,2)
print arr2
print np.vsplit(arr2,2), '\n'*2, np.split(arr2,2,axis=0)
print np.hsplit(arr2,2), '\n'*2, np.split(arr2,2,axis=1)
print np.dsplit(arr2,2), '\n'*2, np.split(arr2,2,axis=2)
arr_random = np.random.randint(1,6,15)
print arr_random
print arr_random.max(),arr_random.min(),arr_random.mean(),arr_random.var(),arr_random.std()
print np.log(arr_random),'\n'*2,np.exp(arr_random),'\n'*2,arr_random>2,'\n'*2,np.mod(arr_random,2)
arr_random.compress(arr_random>2)
arr_random[arr_random>2]
print arr_random[(arr_random>1) & (arr_random<4)],
print arr_random[(arr_random>1) ^ (arr_random<4)]
arr_random.clip(2,4)
arr_random = np.random.rand(10)
print arr_random,'\n'*2,np.diff(arr_random)
w = np.array([1,-1])
len = 2
print arr_random,'\n'*2,np.convolve(w,arr_random),'\n'*2,np.convolve(w,arr_random)[len-1:-len+1]
diff = np.convolve(w,arr_random)[0:-1]
print diff
print diff.cumsum(),'\n'*2,diff.cumprod()
arr_random = np.random.rand(10)
print np.sort(arr_random)
print arr_random
arr_random.sort()
print arr_random
arr_random = np.random.rand(10)
s = sorted(enumerate(arr_random),key=lambda x:x[1],reverse=False)
print s
zip(*s)[0]
print type(enumerate(arr_random))
for idx,value in enumerate(arr_random):
print idx,value
print zip(xrange(10),arr_random)
print np.lexsort((np.arange(10),arr_random)) #接受一个参数,传入元组
print np.argsort(arr_random) #相当于lexsort参数元组第一列给下标
arr2d_random = np.random.rand(10).reshape(5,2)
arr2d_zeros = np.zeros_like(arr2d_random)
arr2d_ones = np.ones_like(arr2d_random)
print arr2d_random,'\n'*2,arr2d_zeros,'\n'*2,arr2d_ones
print np.zeros((5,2)), '\n'*2 , np.ones((5,2)),'\n'*2,np.eye(3)
def CameronThePrimeMinister(i,j):
return (i+1)*(j+1)
print np.fromfunction(CameronThePrimeMinister,(9,9))
x = np.arange(1,10)
print x.reshape(-1,1),'\n'*2,x,'\n'*2,x.reshape(-1,1) * x
x,y = np.ogrid[0:1:6j,0:1:6j]
print x,y
print np.exp(-x**2-y**2)
np.multiply.outer(xrange(1,10),xrange(1,10))
cov = np.cov(arr2d_random.T)
print cov
stdiag = np.diag(np.sqrt(np.diag(cov)))
print stdiag
invstdiag = np.array(np.mat(stdiag).I)
print invstdiag
#错误
invstdiag*cov*invstdiag
#正确
np.mat(stdiag).I*np.mat(cov)*np.mat(stdiag).I
#验证
np.corrcoef(arr2d_random.T)
x = np.random.rand(30).reshape(15,2)
y = x[:,0]*0.7 - x[:,1]*0.2 + 0.1+0.1*np.random.rand(15)
print y,'\n'*2,x
Xcvr = np.mat(np.hstack((np.ones(15).reshape(15,1),x)))
print Xcvr
H = Xcvr*(Xcvr.T*Xcvr).I*Xcvr.T
betahats = np.dot((Xcvr.T*Xcvr).I*Xcvr.T,y)
preds = np.dot(H,y)
print betahats,'\n'*2,preds
import statsmodels.formula.api as sm
model = sm.OLS(y,Xcvr).fit()
model.summary()
rand_relation = (np.random.rand(100)>0.7).reshape(10,10)
rand_relation.dtype=np.int8
rand_relation
rand_relation = rand_relation-np.diag(np.diag(rand_relation))
rand_relation
invrowsum = 1/(np.sum(rand_relation,axis=1)+1e-12)
print invrowsum
diag_invrowsum = np.diag(invrowsum)
tpm = np.dot(diag_invrowsum,rand_relation+1e-12/10)
tpm = (tpm>1e-12/10)*tpm
print tpm
import scipy.sparse as ssp
sp_tpm = ssp.csr_matrix(tpm)
sp_tpm
for i in xrange(10):
sp_tpm = sp_tpm.dot(sp_tpm)
print repr(sp_tpm)
sp_tpm_a = sp_tpm.toarray()
print sp_tpm_a,'\n'*2,sp_tpm_a[0,:]
a = np.random.rand(10).reshape(5,2)
print a
np.save('a.npy',a)
a_copy = np.load('a.npy')
print a_copy
b = range(5)
np.savez('a_and_b.npz',a,bdata=b)
r = np.load('a_and_b.npz')
print r["arr_0"],'\n'*2,r["bdata"]
np.savetxt("a.txt",a,delimiter = '\t')
a_copy2 = np.loadtxt("a.txt",delimiter = '\t')
print a_copy2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This must be a decent batch of jokes. From our exploration above, we know most ratings are in the range -1 to 10, and positive ratings are more likely than negative ratings. Let's look at the means for each joke to see if we have any particularly good (or bad) humor here.
Step2: While the majority of the jokes generally get positive feedback from users, there are definitely a few that stand out as poor humor. Let's take a look at the worst and best joke, just for fun.
Step3: Make sense to me. We now know there are definite popularity differences between the jokes. Some of them are simply funnier than others, and some are downright lousy. Looking at the joke means allowed us to discover these general trends. Perhaps there are similar trends across users. It might be the case that some users are simply more easily humored than others. Let's take a look.
Step11: We see even more significant trends here. Some users rate nearly everything highly, and some (though not as many) rate nearly everything negatively. These observations will come in handy when considering models to use for predicting user preferences on unseen jokes.
Step14: Probabilistic Matrix Factorization
Step18: We'll also need functions for calculating the MAP and performing sampling on our PMF model. When the observation noise variance $\alpha$ and the prior variances $\alpha_U$ and $\alpha_V$ are all kept fixed, maximizing the log posterior is equivalent to minimizing the sum-of-squared-errors objective function with quadratic regularization terms.
Step19: So now our PMF class has a map property which will either be found using Powell optimization or loaded from a previous optimization. Once we have the MAP, we can use it as a starting point for our MCMC sampler. We'll need a sampling function in order to draw MCMC samples to approximate the posterior distribution of the PMF model.
Step21: We could define some kind of default trace property like we did for the MAP, but that would mean using possibly nonsensical values for nsamples and njobs. Better to leave it as a non-optional call to draw_samples. Finally, we'll need a function to make predictions using our inferred values for $U$ and $V$. For user $i$ and joke $j$, a prediction is generated by drawing from $\mathcal{N}(U_i V_j^T, \alpha)$. To generate predictions from the sampler, we generate an $R$ matrix for each $U$ and $V$ sampled, then we combine these by averaging over the $K$ samples.
Step23: One final thing to note
Step26: Training Data vs. Test Data
Step27: In order to facilitate reproducibility, I've produced a train/test split using the code above which we'll now use for all the evaluations below.
Step28: Results
Step29: As expected
Step30: Predictions Using MAP
Step31: Excellent. The first thing we want to do is make sure the MAP estimate we obtained is reasonable. We can do this by computing RMSE on the predicted ratings obtained from the MAP values of $U$ and $V$. First we define a function for generating the predicted ratings $R$ from $U$ and $V$. We ensure the actual rating bounds are enforced by setting all values below -10 to -10 and all values above 10 to 10. Finally, we compute RMSE for both the training set and the test set. We expect the test RMSE to be higher. The difference between the two gives some idea of how much we have overfit. Some difference is always expected, but a very low RMSE on the training set with a high RMSE on the test set is a definite sign of overfitting.
Step32: So we see a pretty nice improvement here when compared to our best baseline, which was the mean of means method. We also have a fairly small difference in the RMSE values between the train and the test sets. This indicates that the point estimates for $\alpha_U$ and $\alpha_V$ that we calculated from our data are doing a good job of controlling model complexity. Now let's see if we can improve our estimates by approximating our posterior distribution with MCMC sampling. We'll draw 1000 samples and back them up using the pymc3.backend.Text backend.
Step35: Diagnostics and Posterior Predictive Check
Step37: It appears we get convergence of $U$ and $V$ after about 200 samples. When testing for convergence, we also want to see convergence of the particular statistics we are looking for, since different characteristics of the posterior may converge at different rates. Let's also do a traceplot of the RSME. We'll compute RMSE for both the train and the test set, even though the convergence is indicated by RMSE on the training set alone. In addition, let's compute a running RMSE on the train/test sets to see how aggregate performance improves or decreases as we continue to sample.
Step38: We have some interesting results here. As expected, our MCMC sampler provides lower error on the training set. However, it seems it does so at the cost of overfitting the data. This results in a decrease in test RMSE as compared to the MAP, even though it is still much better than our best baseline. So why might this be the case? Recall that we used point estimates for our precision paremeters $\alpha_U$ and $\alpha_V$ and we chose a fixed precision $\alpha$. It is quite likely that by doing this, we constrained our posterior in a way that biased it towards the training data. In reality, the variance in the user ratings and the joke ratings is unlikely to be equal to the means of sample variances we used. Also, the most reasonable observation precision $\alpha$ is likely different as well.
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import shutil
DATA_DIR = '../data/pmf'
data = pd.read_csv(os.path.join(DATA_DIR, 'jester-dataset-v1-dense-first-1000.csv'))
data.head()
# Extract the ratings from the DataFrame
all_ratings = np.ndarray.flatten(data.values)
ratings = pd.Series(all_ratings)
# Plot histogram and density.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 7))
ratings.plot(kind='density', ax=ax1, grid=False)
ax1.set_ylim(0, 0.08)
ax1.set_xlim(-11, 11)
# Plot histogram
ratings.plot(kind='hist', ax=ax2, bins=20, grid=False)
ax2.set_xlim(-11, 11)
plt.show()
ratings.describe()
joke_means = data.mean(axis=0)
joke_means.plot(kind='bar', grid=False, figsize=(16, 6),
title="Mean Ratings for All 100 Jokes")
import json
# Worst and best joke?
worst_joke_id = joke_means.argmin()
best_joke_id = joke_means.argmax()
# Let's see for ourselves. Load the jokes.
with open(os.path.join(DATA_DIR, 'jokes.json')) as buff:
joke_dict = json.load(buff)
print('The worst joke:\n---------------\n%s\n' % joke_dict[worst_joke_id])
print('The best joke:\n--------------\n%s' % joke_dict[best_joke_id])
user_means = data.mean(axis=1)
_, ax = plt.subplots(figsize=(16, 6))
user_means.plot(kind='bar', grid=False, ax=ax,
title="Mean Ratings for All 1000 Users")
ax.set_xticklabels('') # 1000 labels is nonsensical
from collections import OrderedDict
# Create a base class with scaffolding for our 3 baselines.
def split_title(title):
Change "BaselineMethod" to "Baseline Method".
words = []
tmp = [title[0]]
for c in title[1:]:
if c.isupper():
words.append(''.join(tmp))
tmp = [c]
else:
tmp.append(c)
words.append(''.join(tmp))
return ' '.join(words)
class Baseline(object):
Calculate baseline predictions.
def __init__(self, train_data):
Simple heuristic-based transductive learning to fill in missing
values in data matrix.
self.predict(train_data.copy())
def predict(self, train_data):
raise NotImplementedError(
'baseline prediction not implemented for base class')
def rmse(self, test_data):
Calculate root mean squared error for predictions on test data.
return rmse(test_data, self.predicted)
def __str__(self):
return split_title(self.__class__.__name__)
# Implement the 3 baselines.
class UniformRandomBaseline(Baseline):
Fill missing values with uniform random values.
def predict(self, train_data):
nan_mask = np.isnan(train_data)
masked_train = np.ma.masked_array(train_data, nan_mask)
pmin, pmax = masked_train.min(), masked_train.max()
N = nan_mask.sum()
train_data[nan_mask] = np.random.uniform(pmin, pmax, N)
self.predicted = train_data
class GlobalMeanBaseline(Baseline):
Fill in missing values using the global mean.
def predict(self, train_data):
nan_mask = np.isnan(train_data)
train_data[nan_mask] = train_data[~nan_mask].mean()
self.predicted = train_data
class MeanOfMeansBaseline(Baseline):
Fill in missing values using mean of user/item/global means.
def predict(self, train_data):
nan_mask = np.isnan(train_data)
masked_train = np.ma.masked_array(train_data, nan_mask)
global_mean = masked_train.mean()
user_means = masked_train.mean(axis=1)
item_means = masked_train.mean(axis=0)
self.predicted = train_data.copy()
n, m = train_data.shape
for i in range(n):
for j in range(m):
if np.ma.isMA(item_means[j]):
self.predicted[i,j] = np.mean(
(global_mean, user_means[i]))
else:
self.predicted[i,j] = np.mean(
(global_mean, user_means[i], item_means[j]))
baseline_methods = OrderedDict()
baseline_methods['ur'] = UniformRandomBaseline
baseline_methods['gm'] = GlobalMeanBaseline
baseline_methods['mom'] = MeanOfMeansBaseline
import time
import logging
import pymc3 as pm
import theano
import scipy as sp
# Enable on-the-fly graph computations, but ignore
# absence of intermediate test values.
theano.config.compute_test_value = 'ignore'
# Set up logging.
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class PMF(object):
Probabilistic Matrix Factorization model using pymc3.
def __init__(self, train, dim, alpha=2, std=0.01, bounds=(-10, 10)):
Build the Probabilistic Matrix Factorization model using pymc3.
:param np.ndarray train: The training data to use for learning the model.
:param int dim: Dimensionality of the model; number of latent factors.
:param int alpha: Fixed precision for the likelihood function.
:param float std: Amount of noise to use for model initialization.
:param (tuple of int) bounds: (lower, upper) bound of ratings.
These bounds will simply be used to cap the estimates produced for R.
self.dim = dim
self.alpha = alpha
self.std = np.sqrt(1.0 / alpha)
self.bounds = bounds
self.data = train.copy()
n, m = self.data.shape
# Perform mean value imputation
nan_mask = np.isnan(self.data)
self.data[nan_mask] = self.data[~nan_mask].mean()
# Low precision reflects uncertainty; prevents overfitting.
# Set to the mean variance across users and items.
self.alpha_u = 1 / self.data.var(axis=1).mean()
self.alpha_v = 1 / self.data.var(axis=0).mean()
# Specify the model.
logging.info('building the PMF model')
with pm.Model() as pmf:
U = pm.MvNormal(
'U', mu=0, tau=self.alpha_u * np.eye(dim),
shape=(n, dim), testval=np.random.randn(n, dim) * std)
V = pm.MvNormal(
'V', mu=0, tau=self.alpha_v * np.eye(dim),
shape=(m, dim), testval=np.random.randn(m, dim) * std)
R = pm.Normal(
'R', mu=theano.tensor.dot(U, V.T), tau=self.alpha * np.ones((n, m)),
observed=self.data)
logging.info('done building the PMF model')
self.model = pmf
def __str__(self):
return self.name
try:
import ujson as json
except ImportError:
import json
# First define functions to save our MAP estimate after it is found.
# We adapt these from `pymc3`'s `backends` module, where the original
# code is used to save the traces from MCMC samples.
def save_np_vars(vars, savedir):
Save a dictionary of numpy variables to `savedir`. We assume
the directory does not exist; an OSError will be raised if it does.
logging.info('writing numpy vars to directory: %s' % savedir)
if not os.path.isdir(savedir):
os.mkdir(savedir)
shapes = {}
for varname in vars:
data = vars[varname]
var_file = os.path.join(savedir, varname + '.txt')
np.savetxt(var_file, data.reshape(-1, data.size))
shapes[varname] = data.shape
## Store shape information for reloading.
shape_file = os.path.join(savedir, 'shapes.json')
with open(shape_file, 'w') as sfh:
json.dump(shapes, sfh)
def load_np_vars(savedir):
Load numpy variables saved with `save_np_vars`.
shape_file = os.path.join(savedir, 'shapes.json')
with open(shape_file, 'r') as sfh:
shapes = json.load(sfh)
vars = {}
for varname, shape in shapes.items():
var_file = os.path.join(savedir, varname + '.txt')
vars[varname] = np.loadtxt(var_file).reshape(shape)
return vars
# Now define the MAP estimation infrastructure.
def _map_dir(self):
basename = 'pmf-map-d%d' % self.dim
return os.path.join(DATA_DIR, basename)
def _find_map(self):
Find mode of posterior using Powell optimization.
tstart = time.time()
with self.model:
logging.info('finding PMF MAP using Powell optimization...')
self._map = pm.find_MAP(fmin=sp.optimize.fmin_powell, disp=True)
elapsed = int(time.time() - tstart)
logging.info('found PMF MAP in %d seconds' % elapsed)
# This is going to take a good deal of time to find, so let's save it.
save_np_vars(self._map, self.map_dir)
def _load_map(self):
self._map = load_np_vars(self.map_dir)
def _map(self):
try:
return self._map
except:
if os.path.isdir(self.map_dir):
self.load_map()
else:
self.find_map()
return self._map
# Update our class with the new MAP infrastructure.
PMF.find_map = _find_map
PMF.load_map = _load_map
PMF.map_dir = property(_map_dir)
PMF.map = property(_map)
# Draw MCMC samples.
def _trace_dir(self):
basename = 'pmf-mcmc-d%d' % self.dim
return os.path.join(DATA_DIR, basename)
def _draw_samples(self, nsamples=1000, njobs=2):
# First make sure the trace_dir does not already exist.
if os.path.isdir(self.trace_dir):
shutil.rmtree(self.trace_dir)
with self.model:
logging.info('drawing %d samples using %d jobs' % (nsamples, njobs))
backend = pm.backends.Text(self.trace_dir)
logging.info('backing up trace to directory: %s' % self.trace_dir)
self.trace = pm.sample(draws=nsamples, init='advi',
n_init=150000, njobs=njobs, trace=backend)
def _load_trace(self):
with self.model:
self.trace = pm.backends.text.load(self.trace_dir)
# Update our class with the sampling infrastructure.
PMF.trace_dir = property(_trace_dir)
PMF.draw_samples = _draw_samples
PMF.load_trace = _load_trace
def _predict(self, U, V):
Estimate R from the given values of U and V.
R = np.dot(U, V.T)
n, m = R.shape
sample_R = np.array([
[np.random.normal(R[i,j], self.std) for j in range(m)]
for i in range(n)
])
# bound ratings
low, high = self.bounds
sample_R[sample_R < low] = low
sample_R[sample_R > high] = high
return sample_R
PMF.predict = _predict
# Define our evaluation function.
def rmse(test_data, predicted):
Calculate root mean squared error.
Ignoring missing values in the test data.
I = ~np.isnan(test_data) # indicator for missing values
N = I.sum() # number of non-missing values
sqerror = abs(test_data - predicted) ** 2 # squared error array
mse = sqerror[I].sum() / N # mean squared error
return np.sqrt(mse) # RMSE
import hashlib
# Define a function for splitting train/test data.
def split_train_test(data, percent_test=10):
Split the data into train/test sets.
:param int percent_test: Percentage of data to use for testing. Default 10.
n, m = data.shape # # users, # jokes
N = n * m # # cells in matrix
test_size = N / percent_test # use 10% of data as test set
train_size = N - test_size # and remainder for training
# Prepare train/test ndarrays.
train = data.copy().values
test = np.ones(data.shape) * np.nan
# Draw random sample of training data to use for testing.
tosample = np.where(~np.isnan(train)) # ignore nan values in data
idx_pairs = zip(tosample[0], tosample[1]) # tuples of row/col index pairs
indices = np.arange(len(idx_pairs)) # indices of index pairs
sample = np.random.choice(indices, replace=False, size=test_size)
# Transfer random sample from train set to test set.
for idx in sample:
idx_pair = idx_pairs[idx]
test[idx_pair] = train[idx_pair] # transfer to test set
train[idx_pair] = np.nan # remove from train set
# Verify everything worked properly
assert(np.isnan(train).sum() == test_size)
assert(np.isnan(test).sum() == train_size)
# Finally, hash the indices and save the train/test sets.
index_string = ''.join(map(str, np.sort(sample)))
name = hashlib.sha1(index_string).hexdigest()
savedir = os.path.join(DATA_DIR, name)
save_np_vars({'train': train, 'test': test}, savedir)
# Return train set, test set, and unique hash of indices.
return train, test, name
def load_train_test(name):
Load the train/test sets.
savedir = os.path.join(DATA_DIR, name)
vars = load_np_vars(savedir)
return vars['train'], vars['test']
# train, test, name = split_train_test(data)
train, test = load_train_test('6bb8d06c69c0666e6da14c094d4320d115f1ffc8')
# Let's see the results:
baselines = {}
for name in baseline_methods:
Method = baseline_methods[name]
method = Method(train)
baselines[name] = method.rmse(test)
print('%s RMSE:\t%.5f' % (method, baselines[name]))
# We use a fixed precision for the likelihood.
# This reflects uncertainty in the dot product.
# We choose 2 in the footsteps Salakhutdinov
# Mnihof.
ALPHA = 2
# The dimensionality D; the number of latent factors.
# We can adjust this higher to try to capture more subtle
# characteristics of each joke. However, the higher it is,
# the more expensive our inference procedures will be.
# Specifically, we have D(N + M) latent variables. For our
# Jester dataset, this means we have D(1100), so for 5
# dimensions, we are sampling 5500 latent variables.
DIM = 5
pmf = PMF(train, DIM, ALPHA, std=0.05)
# Find MAP for PMF.
pmf.find_map()
def eval_map(pmf_model, train, test):
U = pmf_model.map['U']
V = pmf_model.map['V']
# Make predictions and calculate RMSE on train & test sets.
predictions = pmf_model.predict(U, V)
train_rmse = rmse(train, predictions)
test_rmse = rmse(test, predictions)
overfit = test_rmse - train_rmse
# Print report.
print('PMF MAP training RMSE: %.5f' % train_rmse)
print('PMF MAP testing RMSE: %.5f' % test_rmse)
print('Train/test difference: %.5f' % overfit)
return test_rmse
# Add eval function to PMF class.
PMF.eval_map = eval_map
# Evaluate PMF MAP estimates.
pmf_map_rmse = pmf.eval_map(train, test)
pmf_improvement = baselines['mom'] - pmf_map_rmse
print('PMF MAP Improvement: %.5f' % pmf_improvement)
# Draw MCMC samples.
pmf.draw_samples(300)
# uncomment to load previous trace rather than drawing new samples.
# pmf.load_trace()
def _norms(pmf_model, monitor=('U', 'V'), ord='fro'):
Return norms of latent variables at each step in the
sample trace. These can be used to monitor convergence
of the sampler.
monitor = ('U', 'V')
norms = {var: [] for var in monitor}
for sample in pmf_model.trace:
for var in monitor:
norms[var].append(np.linalg.norm(sample[var], ord))
return norms
def _traceplot(pmf_model):
Plot Frobenius norms of U and V as a function of sample #.
trace_norms = pmf_model.norms()
u_series = pd.Series(trace_norms['U'])
v_series = pd.Series(trace_norms['V'])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 7))
u_series.plot(kind='line', ax=ax1, grid=False,
title="$\|U\|_{Fro}^2$ at Each Sample")
v_series.plot(kind='line', ax=ax2, grid=False,
title="$\|V\|_{Fro}^2$ at Each Sample")
ax1.set_xlabel("Sample Number")
ax2.set_xlabel("Sample Number")
PMF.norms = _norms
PMF.traceplot = _traceplot
pmf.traceplot()
def _running_rmse(pmf_model, test_data, train_data, burn_in=0, plot=True):
Calculate RMSE for each step of the trace to monitor convergence.
burn_in = burn_in if len(pmf_model.trace) >= burn_in else 0
results = {'per-step-train': [], 'running-train': [],
'per-step-test': [], 'running-test': []}
R = np.zeros(test_data.shape)
for cnt, sample in enumerate(pmf_model.trace[burn_in:]):
sample_R = pmf_model.predict(sample['U'], sample['V'])
R += sample_R
running_R = R / (cnt + 1)
results['per-step-train'].append(rmse(train_data, sample_R))
results['running-train'].append(rmse(train_data, running_R))
results['per-step-test'].append(rmse(test_data, sample_R))
results['running-test'].append(rmse(test_data, running_R))
results = pd.DataFrame(results)
if plot:
results.plot(
kind='line', grid=False, figsize=(15, 7),
title='Per-step and Running RMSE From Posterior Predictive')
# Return the final predictions, and the RMSE calculations
return running_R, results
PMF.running_rmse = _running_rmse
predicted, results = pmf.running_rmse(test, train, burn_in=200)
# And our final RMSE?
final_test_rmse = results['running-test'].values[-1]
final_train_rmse = results['running-train'].values[-1]
print('Posterior predictive train RMSE: %.5f' % final_train_rmse)
print('Posterior predictive test RMSE: %.5f' % final_test_rmse)
print('Train/test difference: %.5f' % (final_test_rmse - final_train_rmse))
print('Improvement from MAP: %.5f' % (pmf_map_rmse - final_test_rmse))
print('Improvement from Mean of Means: %.5f' % (baselines['mom'] - final_test_rmse))
size = 100 # RMSE doesn't really change after 100th sample anyway.
all_results = pd.DataFrame({
'uniform random': np.repeat(baselines['ur'], size),
'global means': np.repeat(baselines['gm'], size),
'mean of means': np.repeat(baselines['mom'], size),
'PMF MAP': np.repeat(pmf_map_rmse, size),
'PMF MCMC': results['running-test'][:size],
})
fig, ax = plt.subplots(figsize=(10, 5))
all_results.plot(kind='line', grid=False, ax=ax,
title='RMSE for all methods')
ax.set_xlabel("Number of Samples")
ax.set_ylabel("RMSE")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualizing the Data
Step2: Peak Precision
Step3: Looks like $2.58023 \pm 0.00006$ hours
Step4: Required Grid Spacing
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
def get_LINEAR_lightcurve(lcid):
from astroML.datasets import fetch_LINEAR_sample
LINEAR_sample = fetch_LINEAR_sample()
data = pd.DataFrame(LINEAR_sample[lcid],
columns=['t', 'mag', 'magerr'])
data.to_csv('LINEAR_{0}.csv'.format(lcid), index=False)
# Uncomment to download the data
# get_LINEAR_lightcurve(lcid=11375941)
data = pd.read_csv('LINEAR_11375941.csv')
data.head()
data.shape
(data.t.max() - data.t.min()) / 365.
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.style.use('seaborn-whitegrid')
fig, ax = plt.subplots(figsize=(8, 3))
ax.errorbar(data.t, data.mag, data.magerr,
fmt='.k', ecolor='gray', capsize=0)
ax.set(xlabel='time (MJD)',
ylabel='magnitude',
title='LINEAR object 11375941')
ax.invert_yaxis()
fig.savefig('fig01_LINEAR_data.pdf');
from astropy.timeseries import LombScargle
ls = LombScargle(data.t, data.mag, data.magerr)
frequency, power = ls.autopower(nyquist_factor=500,
minimum_frequency=0.2)
period_days = 1. / frequency
period_hours = period_days * 24
best_period = period_days[np.argmax(power)]
phase = (data.t / best_period) % 1
print("Best period: {0:.2f} hours".format(24 * best_period))
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
# PSD has a _LOT_ of elements. Rasterize it so it can be displayed as PDF
ax[0].plot(period_days, power, '-k', rasterized=True)
ax[0].set(xlim=(0, 2.5), ylim=(0, 0.8),
xlabel='Period (days)',
ylabel='Lomb-Scargle Power',
title='Lomb-Scargle Periodogram')
ax[1].errorbar(phase, data.mag, data.magerr,
fmt='.k', ecolor='gray', capsize=0)
ax[1].set(xlabel='phase',
ylabel='magnitude',
title='Phased Data')
ax[1].invert_yaxis()
ax[1].text(0.02, 0.03, "Period = {0:.2f} hours".format(24 * best_period),
transform=ax[1].transAxes)
inset = fig.add_axes([0.25, 0.6, 0.2, 0.25])
inset.plot(period_hours, power, '-k', rasterized=True)
inset.xaxis.set_major_locator(plt.MultipleLocator(1))
inset.yaxis.set_major_locator(plt.MultipleLocator(0.2))
inset.set(xlim=(1, 5),
xlabel='Period (hours)',
ylabel='power')
fig.savefig('fig02_LINEAR_PSD.pdf');
f, P = ls.autopower(nyquist_factor=500,
minimum_frequency=9.3,
maximum_frequency=9.31,
samples_per_peak=20,
normalization='psd')
P = np.exp(P)
P /= P.max()
h = 24. / f
plt.plot(h, P, '-k')
plt.fill(h, np.exp(-0.5 * (h - 2.58014) ** 2 / 0.00004 ** 2), color='gray', alpha=0.3)
plt.xlim(2.58, 2.5803)
fig, ax = plt.subplots(figsize=(10, 3))
phase_model = np.linspace(-0.5, 1.5, 100)
best_frequency = frequency[np.argmax(power)]
mag_model = ls.model(phase_model / best_frequency, best_frequency)
for offset in [-1, 0, 1]:
ax.errorbar(phase + offset, data.mag, data.magerr, fmt='.',
color='gray', ecolor='lightgray', capsize=0);
ax.plot(phase_model, mag_model, '-k', lw=2)
ax.set(xlim=(-0.5, 1.5),
xlabel='phase',
ylabel='mag')
ax.invert_yaxis()
fig.savefig('fig18_ls_model.pdf')
period_hours_bad = np.linspace(1, 6, 10001)
frequency_bad = 24 / period_hours_bad
power_bad = ls.power(frequency_bad)
mask = (period_hours > 1) & (period_hours < 6)
fig, ax = plt.subplots(figsize=(10, 3))
ax.plot(period_hours[mask], power[mask], '-', color='lightgray',
rasterized=True, label='Well-motivated frequency grid')
ax.plot(period_hours_bad, power_bad, '-k',
rasterized=True, label='10,000 equally-spaced periods')
ax.grid(False)
ax.legend()
ax.set(xlabel='period (hours)',
ylabel='Lomb-Scargle Power',
title='LINEAR object 11375941')
fig.savefig('fig19_LINEAR_coarse_grid.pdf')
!head LINEAR_11375941.csv
n_digits = 6
f_ny = 0.5 * 10 ** n_digits
T = (data.t.max() - data.t.min())
n_o = 5
delta_f = 1. / n_o / T
print("f_ny =", f_ny)
print("T =", T)
print("n_grid =", f_ny / delta_f)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A memory network is a network that can retain information; it can be trained on a structured story and will learn how to answer questions about said story.
Step2: This parser formats the story into a time-order labelled sequence of sentences, followed by the question and the labelled answer.
Step3: Next we download and parse the data set.
Step4: Here we calculate upper bounds for things like words in sentence, sentences in a story, etc. for the corpus, which will be useful later.
Step5: Create vocabulary of corpus and find size, including a padding element.
Step6: Now the dataset is in the correct format.
Step7: Create an index mapping for the vocabulary.
Step8: Next we vectorize our dataset by mapping words to their indices. We enforce consistent dimension by padding vectors up to the upper bounds we calculated earlier with our pad element.
Step9: Our inputs for keras.
Step10: Model
Step11: We use <tt>TimeDistributed</tt> here to apply the embedding to every element of the sequence, then the <tt>Lambda</tt> layer adds them up
Step12: The embedding works as desired; the raw input has 10 sentences of 8 words, and the output has 10 sentence embeddings of length 20.
Step13: We do the same for the queries, omitting the <tt>TimeDistributed</tt> since there is only one query. We use <tt>Reshape</tt> to match the rank of the input.
Step14: The actual memory network is incredibly simple.
Step15: Next, we construct a second, separate, embedding function for the sentences
Step16: And it works extremely well
Step17: Test
Step18: Sure enough, for the question "Where is Sandra?", the largest weight is the last sentence with the name Sandra, sentence 1 with 0.98.
Step19: Multi hop
Step20: We can see that the question "Where is the milk?" requires to supporting facts to answer, "Daniel traveled to the hallway" and "Daniel left the milk there".
Step21: The approach is basically the same; we add more embedding dimensions to account for the increased task complexity.
Step22: The main difference is that we are going to do the same process twice. Here we've defined a "hop" as the operation that returns the weighted average of the input sentence embeddings.
Step23: We do one hop, and repeat the process using the resulting weighted sentence average as the new weights.
Step24: Fitting this model can be tricky.
Step25: Custom bias layer
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import importlib, utils2; importlib.reload(utils2)
from utils2 import *
np.set_printoptions(4)
cfg = K.tf.ConfigProto(gpu_options={'allow_growth': True})
K.set_session(K.tf.Session(config=cfg))
def tokenize(sent):
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
def parse_stories(lines):
data = []
story = []
for line in lines:
line = line.decode('utf-8').strip()
nid, line = line.split(' ', 1)
if int(nid) == 1: story = []
if '\t' in line:
q, a, supporting = line.split('\t')
q = tokenize(q)
substory = None
substory = [[str(i)+":"]+x for i,x in enumerate(story) if x]
data.append((substory, q, a))
story.append('')
else: story.append(tokenize(line))
return data
path = get_file('babi-tasks-v1-2.tar.gz',
origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
'two_supporting_facts_1k': 'tasks_1-20_v1-2/en/qa2_two-supporting-facts_{}.txt',
}
challenge_type = 'single_supporting_fact_10k'
# challenge_type = 'two_supporting_facts_10k'
challenge = challenges[challenge_type]
def get_stories(f):
data = parse_stories(f.readlines())
return [(story, q, answer) for story, q, answer in data]
train_stories = get_stories(tar.extractfile(challenge.format('train')))
test_stories = get_stories(tar.extractfile(challenge.format('test')))
stories = train_stories + test_stories
story_maxlen = max((len(s) for x, _, _ in stories for s in x))
story_maxsents = max((len(x) for x, _, _ in stories))
query_maxlen = max(len(x) for _, x, _ in stories)
def do_flatten(el):
return isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes))
def flatten(l):
for el in l:
if do_flatten(el): yield from flatten(el)
else: yield el
vocab = sorted(set(flatten(stories)))
vocab.insert(0, '<PAD>')
vocab_size = len(vocab)
story_maxsents, vocab_size, story_maxlen, query_maxlen, len(train_stories), len(test_stories)
test_stories[534]
word_idx = dict((c, i) for i, c in enumerate(vocab))
def vectorize_stories(data, word_idx, story_maxlen, query_maxlen):
X = []; Xq = []; Y = []
for story, query, answer in data:
x = [[word_idx[w] for w in s] for s in story]
xq = [word_idx[w] for w in query]
y = [word_idx[answer]]
X.append(x); Xq.append(xq); Y.append(y)
return ([pad_sequences(x, maxlen=story_maxlen) for x in X],
pad_sequences(Xq, maxlen=query_maxlen), np.array(Y))
inputs_train, queries_train, answers_train = vectorize_stories(train_stories,
word_idx, story_maxlen, query_maxlen)
inputs_test, queries_test, answers_test = vectorize_stories(test_stories,
word_idx, story_maxlen, query_maxlen)
def stack_inputs(inputs):
for i,it in enumerate(inputs):
inputs[i] = np.concatenate([it,
np.zeros((story_maxsents-it.shape[0],story_maxlen), 'int')])
return np.stack(inputs)
inputs_train = stack_inputs(inputs_train)
inputs_test = stack_inputs(inputs_test)
inputs_train.shape, inputs_test.shape
inps = [inputs_train, queries_train]
val_inps = [inputs_test, queries_test]
emb_dim = 20
parms = {'verbose': 2, 'callbacks': [TQDMNotebookCallback(leave_inner=False)]}
def emb_sent_bow(inp):
emb = TimeDistributed(Embedding(vocab_size, emb_dim))(inp)
return Lambda(lambda x: K.sum(x, 2))(emb)
inp_story = Input((story_maxsents, story_maxlen))
emb_story = emb_sent_bow(inp_story)
inp_story.shape, emb_story.shape
inp_q = Input((query_maxlen,))
emb_q = Embedding(vocab_size, emb_dim)(inp_q)
emb_q = Lambda(lambda x: K.sum(x, 1))(emb_q)
emb_q = Reshape((1, emb_dim))(emb_q)
inp_q.shape, emb_q.shape
x = dot([emb_story, emb_q], 2) # Keras 2
x = Reshape((story_maxsents,))(x)
x = Activation('softmax')(x)
match = Reshape((story_maxsents,1))(x)
match.shape
emb_c = emb_sent_bow(inp_story)
x = dot([match, emb_c], 1) # Keras 2
response = Reshape((emb_dim,))(x)
res = Dense(vocab_size, activation='softmax')(response)
answer = Model([inp_story, inp_q], res)
answer.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
K.set_value(answer.optimizer.lr, 1e-2)
hist=answer.fit(inps, answers_train, **parms, epochs=4, batch_size=32,
validation_data=(val_inps, answers_test))
f = Model([inp_story, inp_q], match)
qnum=6
l_st = len(train_stories[qnum][0])+1
train_stories[qnum]
np.squeeze(f.predict([inputs_train[qnum:qnum+1], queries_train[qnum:qnum+1]]))[:l_st]
answers_train[qnum:qnum+10,0]
np.argmax(answer.predict([inputs_train[qnum:qnum+10], queries_train[qnum:qnum+10]]), 1)
answer.predict([inputs_train[qnum:qnum+1], queries_train[qnum:qnum+1]])
vocab[19]
test_stories[534]
inputs_train.shape, inputs_test.shape
parms = {'verbose': 2, 'callbacks': [TQDMNotebookCallback(leave_inner=False)]}
emb_dim = 30
def emb_sent_bow(inp):
emb_op = TimeDistributed(Embedding(vocab_size, emb_dim))
emb = emb_op(inp)
emb = Lambda(lambda x: K.sum(x, 2))(emb)
# return Elemwise(0, False)(emb), emb_op
return emb, emb_op
inp_story = Input((story_maxsents, story_maxlen))
inp_q = Input((query_maxlen,))
emb_story, emb_story_op = emb_sent_bow(inp_story)
emb_q = emb_story_op.layer(inp_q)
emb_q = Lambda(lambda x: K.sum(x, 1))(emb_q)
h = Dense(emb_dim)
def one_hop(u, A):
C, _ = emb_sent_bow(inp_story)
x = Reshape((1, emb_dim))(u)
x = dot([A, x], 2) # Keras 2
x = Reshape((story_maxsents,))(x)
x = Activation('softmax')(x)
match = Reshape((story_maxsents,1))(x)
x = dot([match, C], 1) # Keras 2
x = Reshape((emb_dim,))(x)
x = h(x)
x = add([x, emb_q]) # Keras 2
return x, C
response, emb_story = one_hop(emb_q, emb_story)
response, emb_story = one_hop(response, emb_story)
# response, emb_story = one_hop(response, emb_story)
res = Dense(vocab_size, activation='softmax')(response)
answer = Model([inp_story, inp_q], res)
answer.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
K.set_value(answer.optimizer.lr, 5e-3)
hist=answer.fit(inps, answers_train, **parms, epochs=8, batch_size=32,
validation_data=(val_inps, answers_test))
np.array(hist.history['val_acc'])
class Elemwise(Layer):
def __init__(self, axis, is_mult, init='glorot_uniform', **kwargs):
self.init = initializations.get(init)
self.axis = axis
self.is_mult = is_mult
super(Elemwise, self).__init__(**kwargs)
def build(self, input_shape):
input_dims = input_shape[1:]
dims = [1] * len(input_dims)
dims[self.axis] = input_dims[self.axis]
self.b = self.add_weight(dims, self.init, '{}_bo'.format(self.name))
self.built = True
def call(self, x, mask=None):
return x * self.b if self.is_mult else x + self.b
def get_output_shape_for(self, input_shape):
return input_shape
def get_config(self):
config = {'init': self.init.__name__, 'axis': self.axis}
base_config = super(Dense, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Решение задачи на sleepsort
Step2: Асинхронность и параллельность
Step3: Ключевые слова async и await
Step4: Упражнение
Step5: Django
Step6: А где там html?
Step7: Меняем пути в шаблоне
|
<ASSISTANT_TASK:>
Python Code:
import yaml
import random
with open("answers.yaml", "r") as conf:
config = yaml.load(conf)
def get_answer(message):
lower_msg = message.lower()
for key in config['answers']:
if key in lower_msg:
return random.choice(config['answers'][key])
import random
import threading
import time
class SleepThread(threading.Thread):
def __init__(self, num):
super().__init__()
self.num = num
def run(self):
time.sleep(self.num)
print(self.num)
a = [random.randint(0, 10) for _ in range(10)]
threads = [SleepThread(i) for i in a]
for t in threads:
t.start()
def sleep_print(num):
time.sleep(num)
print(num)
a = [random.randint(0, 10) for _ in range(10)]
threads = [
threading.Thread(target=sleep_print, args=(i,))
for i in a
]
for t in threads:
t.start()
import concurrent.futures as cf
def hold_my_beer(num):
time.sleep(num)
return num
a = [random.randint(0, 10) for _ in range(10)]
with cf.ThreadPoolExecutor(max_workers=len(a)) as pool:
for future in cf.as_completed([
pool.submit(hold_my_beer, i) for i in a
]):
print(future.result())
import asyncio
asyncio.Queue() # асинхронная очередь
asyncio.sleep(10) # асинхронный "сон"
asyncio.create_subprocess_exec() # асинхронный subprocess
asyncio.Lock() # асинхронный мьютекс
asyncio.ensure_future() # ручное добавление корутины в event loop
asyncio.gather() # дождаться окончания работы списка корутин
import asyncio
async def hello(name):
return "Hello, {}!".format(name)
hello("Vasya")
await hello("Vasya")
import asyncio
async def hello(name):
return "Hello, {}!".format(name)
async def call_vasya():
greeting = await hello("Vasya")
return greeting
loop = asyncio.get_event_loop()
print(loop.run_until_complete(call_vasya()))
import asyncio
import random
async def hold(num):
await asyncio.sleep(num)
return num
a = [random.randint(0, 10) for _ in range(10)]
# hello/views.py
from django.http import HttpResponse
def index(request):
return HttpResponse("Hello!")
# hello/urls.py
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
]
# urls.py
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^hello/', include('hello.urls')),
url(r'^admin/', admin.site.urls),
]
# Добавляем в settings.py строчки
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
# а также похожую строчку в TEMPLATES["DIRS"]:
os.path.join(BASE_DIR, "templates")
# а в urls.py делаем так:
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^hello/', include('hello.urls')),
url(r'^admin/', admin.site.urls),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# hello/views.py
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'index.html', {})
# my_application.py
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello, World!"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lectura y grafica de los datos de 'TSI2.tx'
Step2: Transformada de fourier de los datos
Step3: Análisis
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib
import pylab as plt
import scipy.misc as pim
from scipy import stats
% matplotlib inline
font = {'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
x,y = np.loadtxt('TSI2.txt', usecols=[0,1], dtype='float', unpack='True',delimiter=None)
plt.figure(figsize=(16,8))
plt.plot(x,y,marker='.',color='blue',linewidth=1,markersize=6)
plt.title('Radiation cycles of the Sun', fontsize=24)
plt.xlabel('years', fontsize=18)
plt.ylabel('Radiation [w/m²]', fontsize=18)
plt.grid()
dy = 1
fty = np.fft.fftshift(np.fft.fft(np.fft.fftshift(y)))*dy
v = np.arange(-1/(2*dy),1/(2*dy),1/(dy*x.size))
ti = 0
tf =0.5
plt.figure(figsize=(16,8))
plt.plot(v,abs(fty)**(1/4),marker='.',color='blue',linewidth=1,markersize=6)
plt.plot((0, 0.5), (2, 2), 'r-')
plt.title('FT Radiation cycles of the Sun', fontsize=24)
plt.xlabel('Freq.', fontsize=18)
plt.ylabel('Amp', fontsize=18)
plt.xlim(ti,tf)
plt.grid()
yinv=y[::-1] #Invertir el orden de los datos del arreglo
yinv=yinv[1:] #Eliminar primer elemento del nuevo arreglo
yinv=yinv[:-1] #Eliminar ultimo elemento del nuevo arreglo
yf=[]
k=0
while k<50: #Ciclo while para meter en un arreglo los valores de la señal, la inversa de la señal en el tiempo el numero que deseemos repertila
for i in range(0,len(y)):
yf.append(y[i])#Señal
#print len(data3)
for i in range(0,len(yinv)):#Señal invertida en el tiempo
yf.append(yinv[i])
k+=1
dy = 1
t1 = np.arange(0,len(yf),dy)
fty2 = np.fft.fftshift(np.fft.fft(np.fft.fftshift(yf)))*dy
v = np.arange(-1/(2*dy),1/(2*dy),1/(dy*t1.size))
ti = 0
tf =0.1
plt.figure(figsize=(16,8))
plt.plot(v,abs(fty2)**(1/4),marker='.',color='blue',linewidth=1,markersize=6)
plt.plot((0, 0.1), (5.5, 5.5), 'r-')
plt.title('More Resolution FT Radiation cycles of the Sun', fontsize=24)
plt.xlabel('Freq.', fontsize=18)
plt.ylabel('Amp', fontsize=18)
plt.xlim(ti,tf)
plt.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Contour plots of 2d wavefunctions
Step3: The contour, contourf, pcolor and pcolormesh functions of Matplotlib can be used for effective visualizations of 2d scalar fields. Use the Matplotlib documentation to learn how to use these functions along with the numpy.meshgrid function to visualize the above wavefunction
Step4: Next make a visualization using one of the pcolor functions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def well2d(x, y, nx, ny, L=1.0):
Compute the 2d quantum well wave function.
sci=2/L*np.sin((nx*np.pi*x)/L)*np.sin((ny*np.pi*y)/L)
return sci
psi = well2d(np.linspace(0,1,10), np.linspace(0,1,10), 1, 1)
assert len(psi)==10
assert psi.shape==(10,)
x=np.linspace(0.0,1.0,100)
y=np.linspace(0.0,1.0,100)
n,m=np.meshgrid(x,y)#makes the grid
plt.contour(well2d(n,m,3,2))
plt.title('Wave Function Visualization')#makes it pretty
plt.xlabel('x')
plt.ylabel('y')
assert True # use this cell for grading the contour plot
plt.pcolor(well2d(n,m,3,2))
plt.title('Wave Function Visulization')
plt.xlabel('x')
plt.ylabel('y')
assert True # use this cell for grading the pcolor plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1 - Feature Engineering
Step2: Tasks - A - Feature Engineering
Step3: Tasks - B - Feature Engineering
Step4: Breakpoint
Step5: Visualizing the Results
Step6: Now let's use a Seaborn pairplot as well. This will really show you which features have clear dividing lines between the classes. Docs are available here
Step7: Finally, let's try making a RadViz of the features. This visualization will help us see whether there is too much noise to make accurate classifications.
|
<ASSISTANT_TASK:>
Python Code:
## Load data
df = pd.read_csv('../../data/dga_data_small.csv')
df.drop(['host', 'subclass'], axis=1, inplace=True)
print(df.shape)
df.sample(n=5).head() # print a random sample of the DataFrame
df[df.isDGA == 'legit'].head()
# Google's 10000 most common english words will be needed to derive a feature called ngrams...
# therefore we already load them here.
top_en_words = pd.read_csv('../../data/google-10000-english.txt', header=None, names=['words'])
top_en_words.sample(n=5).head()
# Source: https://github.com/first20hours/google-10000-english
def H_entropy (x):
# Calculate Shannon Entropy
prob = [ float(x.count(c)) / len(x) for c in dict.fromkeys(list(x)) ]
H = - sum([ p * np.log2(p) for p in prob ])
return H
def vowel_consonant_ratio (x):
# Calculate vowel to consonant ratio
x = x.lower()
vowels_pattern = re.compile('([aeiou])')
consonants_pattern = re.compile('([b-df-hj-np-tv-z])')
vowels = re.findall(vowels_pattern, x)
consonants = re.findall(consonants_pattern, x)
try:
ratio = len(vowels) / len(consonants)
except: # catch zero devision exception
ratio = 0
return ratio
# derive features
df['length'] = df.domain.str.len()
df['digits'] = df.domain.str.count('[0-9]')
df['entropy'] = df.domain.apply(H_entropy)
df['vowel-cons'] = df.domain.apply(vowel_consonant_ratio)
# encode strings of target variable as integers
df.isDGA = df.isDGA.replace(to_replace = 'dga', value=1)
df.isDGA = df.isDGA.replace(to_replace = 'legit', value=0)
print(df.isDGA.value_counts())
# check intermediate 2D pandas DataFrame
df.sample(n=5).head()
# ngrams: Implementation according to Schiavoni 2014: "Phoenix: DGA-based Botnet Tracking and Intelligence"
# http://s2lab.isg.rhul.ac.uk/papers/files/dimva2014.pdf
def ngrams(word, n):
# Extract all ngrams and return a regular Python list
# Input word: can be a simple string or a list of strings
# Input n: Can be one integer or a list of integers
# if you want to extract multipe ngrams and have them all in one list
l_ngrams = []
if isinstance(word, list):
for w in word:
if isinstance(n, list):
for curr_n in n:
ngrams = [w[i:i+curr_n] for i in range(0,len(w)-curr_n+1)]
l_ngrams.extend(ngrams)
else:
ngrams = [w[i:i+n] for i in range(0,len(w)-n+1)]
l_ngrams.extend(ngrams)
else:
if isinstance(n, list):
for curr_n in n:
ngrams = [word[i:i+curr_n] for i in range(0,len(word)-curr_n+1)]
l_ngrams.extend(ngrams)
else:
ngrams = [word[i:i+n] for i in range(0,len(word)-n+1)]
l_ngrams.extend(ngrams)
# print(l_ngrams)
return l_ngrams
def ngram_feature(domain, d, n):
# Input is your domain string or list of domain strings
# a dictionary object d that contains the count for most common english words
# finally you n either as int list or simple int defining the ngram length
# Core magic: Looks up domain ngrams in english dictionary ngrams and sums up the
# respective english dictionary counts for the respective domain ngram
# sum is normalized
l_ngrams = ngrams(domain, n)
# print(l_ngrams)
count_sum=0
for ngram in l_ngrams:
if d[ngram]:
count_sum+=d[ngram]
try:
feature = count_sum/(len(domain)-n+1)
except:
feature = 0
return feature
def average_ngram_feature(l_ngram_feature):
# input is a list of calls to ngram_feature(domain, d, n)
# usually you would use various n values, like 1,2,3...
return sum(l_ngram_feature)/len(l_ngram_feature)
l_en_ngrams = ngrams(list(top_en_words['words']), [1,2,3])
d = Counter(l_en_ngrams)
from six.moves import cPickle as pickle
with open('../../data/d_common_en_words' + '.pickle', 'wb') as f:
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
df['ngrams'] = df.domain.apply(lambda x: average_ngram_feature([ngram_feature(x, d, 1),
ngram_feature(x, d, 2),
ngram_feature(x, d, 3)]))
# check final 2D pandas DataFrame containing all final features and the target vector isDGA
df.sample(n=5).head()
df_final = df
df_final = df_final.drop(['domain'], axis=1)
df_final.to_csv('../../data/dga_features_final_df.csv', index=False)
df_final.head()
df_final = pd.read_csv('../../data/dga_features_final_df.csv')
print(df_final.isDGA.value_counts())
df_final.head()
feature_names = ['length','digits','entropy','vowel-cons','ngrams']
features = df_final[feature_names]
target = df_final.isDGA
visualizer = Rank2D(algorithm='pearson',features=feature_names)
visualizer.fit_transform( features )
visualizer.poof()
sns.pairplot(df_final, hue='isDGA', vars=feature_names)
X = df_final[feature_names].as_matrix()
y = df_final.isDGA.as_matrix()
radvizualizer = RadViz(classes=['Benign','isDga'], features=feature_names)
radvizualizer.fit_transform( X, y)
radvizualizer.poof()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Remember how we could call methods on a list?
Step2: What we will basically be doing in this lecture is exploring how we could create an Object type like a list. We've already learned about how to create functions. So lets explore Objects in general
Step3: So we know all these things are objects, so how can we create our own Object types? That is where the class keyword comes in.
Step4: By convention we give classes a name that starts with a capital letter. Note how x is now the reference to our new instance of a Sample class. In other words, we instanciate the Sample class.
Step5: Lets break down what we have above.The special method
Step6: Note how we don't have any parenthesis after breed, this is because it is an attribute and doesn't take any arguments.
Step7: Note that the Class Object Attribute is defined outside of any methods in the class. Also by convention, we place them first before the init.
Step8: Methods
Step9: Great! Notice how we used self. notation to reference attributes of the class within the method calls. Review how the code above works and try creating your own method
Step10: In this example, we have two classes
|
<ASSISTANT_TASK:>
Python Code:
l = [1,2,3]
l.count(2)
print type(1)
print type([])
print type(())
print type({})
# Create a new object type called Sample
class Sample(object):
pass
# Instance of Sample
x = Sample()
print type(x)
class Dog(object):
def __init__(self,breed):
self.breed = breed
sam = Dog(breed='Lab')
frank = Dog(breed='Huskie')
sam.breed
frank.breed
class Dog(object):
# Class Object Attribute
species = 'mammal'
def __init__(self,breed,name):
self.breed = breed
self.name = name
sam = Dog('Lab','Sam')
sam.name
sam.species
class Circle(object):
pi = 3.14
# Circle get instantiaed with a radius (default is 1)
def __init__(self, radius=1):
self.radius = radius
# Area method calculates the area. Note the use of self.
def area(self):
return self.radius * self.radius * Circle.pi
# Method for resetting Radius
def setRadius(self, radius):
self.radius = radius
# Method for getting radius (Same as just calling .radius)
def getRadius(self):
return self.radius
c = Circle()
c.setRadius(2)
print 'Radius is: ',c.getRadius()
print 'Area is: ',c.area()
class Animal(object):
def __init__(self):
print "Animal created"
def whoAmI(self):
print "Animal"
def eat(self):
print "Eating"
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print "Dog created"
def whoAmI(self):
print "Dog"
def bark(self):
print "Woof!"
d = Dog()
d.whoAmI()
d.eat()
d.bark()
class Book(object):
def __init__(self, title, author, pages):
print "A book is created"
self.title = title
self.author = author
self.pages = pages
def __str__(self):
return "Title:%s , author:%s, pages:%s " %(self.title, self.author, self.pages)
def __len__(self):
return self.pages
def __del__(self):
print "A book is destroyed"
book = Book("Python Rocks!", "Jose Portilla", 159)
#Special Methods
print book
print len(book)
del book
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will create a grid with 41 rows and 5 columns, and dx is 5 m (a long, narrow, hillslope). The initial elevation is 0 at all nodes.
Step2: Now we import and initialize the LinearDiffuser component.
Step3: We now initialize a few more parameters.
Step4: Now we figure out the analytical solution for the elevation of the steady-state profile.
Step5: Before we evolve the landscape, let's look at the initial topography. (This is just verifying that it is flat with zero elevation.)
Step6: Now we are ready to evolve the landscape and compare it to the steady state solution.
Step7: Now we plot the final cross-section.
Step8: Now we plot the steepest slope in the downward direction across the landscape.
|
<ASSISTANT_TASK:>
Python Code:
# below is to make plots show up in the notebook
%matplotlib inline
# Code Block 1
import numpy as np
from matplotlib.pyplot import figure, legend, plot, show, title, xlabel, ylabel, ylim
from landlab.plot.imshow import imshow_grid
# Code Block 2
# setup grid
from landlab import RasterModelGrid
mg = RasterModelGrid((41, 5), 5.0)
z_vals = mg.add_zeros("topographic__elevation", at="node")
# initialize some values for plotting
ycoord_rast = mg.node_vector_to_raster(mg.node_y)
ys_grid = ycoord_rast[:, 2]
# set boundary condition.
mg.set_closed_boundaries_at_grid_edges(True, False, True, False)
# Code Block 3
from landlab.components import LinearDiffuser
D = 0.01 # initial value of 0.01 m^2/yr
lin_diffuse = LinearDiffuser(mg, linear_diffusivity=D)
# Code Block 4
# Uniform rate of rock uplift
uplift_rate = 0.0001 # meters/year, originally set to 0.0001
# Total time in years that the model will run for.
runtime = 1000000 # years, originally set to 1,000,000
# Stability criteria for timestep dt. Coefficient can be changed
# depending on our tolerance for stability vs tolerance for run time.
dt = 0.5 * mg.dx * mg.dx / D
# nt is number of time steps
nt = int(runtime // dt)
# Below is to keep track of time for labeling plots
time_counter = 0
# length of uplift over a single time step, meters
uplift_per_step = uplift_rate * dt
# Code Block 5
ys = np.arange(mg.number_of_node_rows * mg.dx - mg.dx)
# location of divide or ridge crest -> middle of grid
# based on boundary conds.
divide_loc = (mg.number_of_node_rows * mg.dx - mg.dx) / 2
# half-width of the ridge
half_width = (mg.number_of_node_rows * mg.dx - mg.dx) / 2
# analytical solution for elevation under linear diffusion at steady state
zs = (uplift_rate / (2 * D)) * (np.power(half_width, 2) - np.power(ys - divide_loc, 2))
# Code Block 6
figure(1)
imshow_grid(mg, "topographic__elevation")
title("initial topography")
figure(2)
elev_rast = mg.node_vector_to_raster(mg.at_node["topographic__elevation"])
plot(ys_grid, elev_rast[:, 2], "r-", label="model")
plot(ys, zs, "k--", label="analytical solution")
ylim((-5, 50)) # may want to change upper limit if D changes
xlabel("horizontal distance (m)")
ylabel("vertical distance (m)")
legend(loc="lower center")
title("initial topographic cross section")
# Code Block 7
for i in range(nt):
mg["node"]["topographic__elevation"][mg.core_nodes] += uplift_per_step
lin_diffuse.run_one_step(dt)
time_counter += dt
# All landscape evolution is the first two lines of loop.
# Below is simply for plotting the topography halfway through the run
if i == int(nt // 2):
figure(1)
imshow_grid(mg, "topographic__elevation")
title("topography at time %s, with D = %s" % (time_counter, D))
figure(2)
elev_rast = mg.node_vector_to_raster(mg.at_node["topographic__elevation"])
plot(ys_grid, elev_rast[:, 2], "k-", label="model")
plot(ys, zs, "g--", label="analytical solution - SS")
plot(ys, zs * 0.75, "b--", label="75% of analytical solution")
plot(ys, zs * 0.5, "r--", label="50% of analytical solution")
xlabel("horizontal distance (m)")
ylabel("vertical distance (m)")
legend(loc="lower center")
title("topographic__elevation at time %s, with D = %s" % (time_counter, D))
# Code Block 8
elev_rast = mg.node_vector_to_raster(mg.at_node["topographic__elevation"])
plot(ys_grid, elev_rast[:, 2], "k-", label="model")
plot(ys, zs, "g--", label="analytical solution - SS")
plot(ys, zs * 0.75, "b--", label="75% of analytical solution")
plot(ys, zs * 0.5, "r--", label="50% of analytical solution")
xlabel("horizontal distance (m)")
ylabel("vertical distance (m)")
legend(loc="lower center")
title("topographic cross section at time %s, with D = %s" % (time_counter, D))
# Code Block 9
from landlab.components import FlowAccumulator
fr = FlowAccumulator(mg) # intializing flow routing
fr.run_one_step()
plot(
mg.node_y[mg.core_nodes],
mg.at_node["topographic__steepest_slope"][mg.core_nodes],
"k-",
)
xlabel("horizontal distance (m)")
ylabel("topographic slope (m/m)")
title("slope of the hillslope at time %s, with D = %s" % (time_counter, D))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
digit =[0 ] *(100000 )
def findDigits(n ) :
count = 0
while(n != 0 ) :
digit[count ] = n % 10 ;
n = n // 10 ;
count += 1
return count
def OR_of_Digits(n , count ) :
ans = 0
for i in range(count ) :
ans = ans | digit[i ]
return ans
def AND_of_Digits(n , count ) :
ans = 0
for i in range(count ) :
ans = ans & digit[i ]
return ans
def XOR_of_Digits(n , count ) :
ans = 0
for i in range(count ) :
ans = ans ^ digit[i ]
return ans
def bitwise_operation(N ) :
countOfDigit = findDigits(N )
print("XOR ▁ = ▁ ", XOR_of_Digits(N , countOfDigit ) )
print("OR ▁ = ▁ ", OR_of_Digits(N , countOfDigit ) )
print("AND ▁ = ▁ ", AND_of_Digits(N , countOfDigit ) )
N = 123456 ;
bitwise_operation(N )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also have a Python file containing implementations for several functions that will be used during the course of this assignment.
Step2: Load Wikipedia data and extract TF-IDF features
Step3: Using a utility we provide, we will create a sparse matrix representation of the documents. This is the same utility function you used during the previous assignment on k-means with text data.
Step4: As in the previous assignment, we will normalize each document's TF-IDF vector to be a unit vector.
Step5: We can check that the length (Euclidean norm) of each row is now 1.0, as expected.
Step6: EM in high dimensions
Step7: Initializing cluster weights
Step8: Initializing covariances
Step9: Running EM
Step10: Interpret clustering results
Step11: Quiz Question. Select all the topics that have a cluster in the model created above. [multiple choice]
Step12: Quiz Question
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
'''Check GraphLab Create version'''
from distutils.version import StrictVersion
assert (StrictVersion(graphlab.version) >= StrictVersion('1.8.5')), 'GraphLab Create must be version 1.8.5 or later.'
from em_utilities import *
wiki = graphlab.SFrame('people_wiki.gl/').head(5000)
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
tf_idf = normalize(tf_idf)
for i in range(5):
doc = tf_idf[i]
print(np.linalg.norm(doc.todense()))
from sklearn.cluster import KMeans
np.random.seed(5)
num_clusters = 25
# Use scikit-learn's k-means to simplify workflow
kmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1, n_jobs=-1)
kmeans_model.fit(tf_idf)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
means = [centroid for centroid in centroids]
num_docs = tf_idf.shape[0]
weights = []
for i in xrange(num_clusters):
# Compute the number of data points assigned to cluster i:
num_assigned = ... # YOUR CODE HERE
w = float(num_assigned) / num_docs
weights.append(w)
covs = []
for i in xrange(num_clusters):
member_rows = tf_idf[cluster_assignment==i]
cov = (member_rows.power(2) - 2*member_rows.dot(diag(means[i]))).sum(axis=0).A1 / member_rows.shape[0] \
+ means[i]**2
cov[cov < 1e-8] = 1e-8
covs.append(cov)
out = EM_for_high_dimension(tf_idf, means, covs, weights, cov_smoothing=1e-10)
out['loglik']
# Fill in the blanks
def visualize_EM_clusters(tf_idf, means, covs, map_index_to_word):
print('')
print('==========================================================')
num_clusters = len(means)
for c in xrange(num_clusters):
print('Cluster {0:d}: Largest mean parameters in cluster '.format(c))
print('\n{0: <12}{1: <12}{2: <12}'.format('Word', 'Mean', 'Variance'))
# The k'th element of sorted_word_ids should be the index of the word
# that has the k'th-largest value in the cluster mean. Hint: Use np.argsort().
sorted_word_ids = ... # YOUR CODE HERE
for i in sorted_word_ids[:5]:
print '{0: <12}{1:<10.2e}{2:10.2e}'.format(map_index_to_word['category'][i],
means[c][i],
covs[c][i])
print '\n=========================================================='
'''By EM'''
visualize_EM_clusters(tf_idf, out['means'], out['covs'], map_index_to_word)
np.random.seed(5) # See the note below to see why we set seed=5.
num_clusters = len(means)
num_docs, num_words = tf_idf.shape
random_means = []
random_covs = []
random_weights = []
for k in range(num_clusters):
# Create a numpy array of length num_words with random normally distributed values.
# Use the standard univariate normal distribution (mean 0, variance 1).
# YOUR CODE HERE
mean = ...
# Create a numpy array of length num_words with random values uniformly distributed between 1 and 5.
# YOUR CODE HERE
cov = ...
# Initially give each cluster equal weight.
# YOUR CODE HERE
weight = ...
random_means.append(mean)
random_covs.append(cov)
random_weights.append(weight)
# YOUR CODE HERE. Use visualize_EM_clusters, which will require you to pass in tf_idf and map_index_to_word.
...
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulando jogo de dados
Step2: Simulando decaimento radiativo
Step3: Simulando um andar de bebado
Step4: Integração Monte Carlo
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
s = np.random.uniform(8,10., 100000)
count, bins, ignored = plt.hist(s, 30)
#print (count, bins, ignored)
import numpy as np
import matplotlib.pyplot as plt
mean = [0, 0]
cov = [[1, 10], [5, 10]] # covariancia diagonal
x, y = np.random.multivariate_normal(mean, cov, 500).T
plt.plot(x, y, 'x')
plt.axis('equal')
import numpy as np
p = np.random.random()
if p > 0.2:
print ('cara', p)
else:
print ('coroa', p)
NL1 = 1000
NPb = 0
tau = 3.053*60.
h = 1
tmax = 1000
p = 1 - 2**(-h/tau)
tlist = np.arange(0.,tmax,h)
Tl1list = []
Pblist = []
for t in tlist:
Tl1list.append(NL1)
Pblist.append(NPb)
decay = 0
for i in range(NL1):
if np.random.random() < p:
decay += 1
NL1 -= decay
NPb += decay
plt.plot(tlist,Tl1list)
plt.plot(tlist,Pblist)
plt.show()
print (p)
import numpy as np
import matplotlib.pyplot as plt
dt = 1.0 # minutos
tmax = 1200.
tempo = np.arange(0.,tmax,dt)
# ponto de partida
x0 = 0.
y0 = 0.
posicao = []
for t in tempo:
#sorteia um numero de 0 a 1
passo = np.random.random()
sentido = np.random.random()
if (passo < 0.9):
if(sentido <0.5):
posicao.append([x0+0.5,y0+0.])
x0 += 0.5
else:
posicao.append([x0-0.5,y0+0.])
x0 -= 0.5
else:
if(sentido <0.5):
posicao.append([x0,y0+0.5])
y0 += 0.5
else:
posicao.append([x0,y0-0.5])
y0 -= 0.5
posicao = np.array(posicao)
plt.plot(posicao[:,0],posicao[:,1])
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def easy_function(x):
return((3)*(x**2))
def hard_function(x):
return((1/np.sqrt(2*np.pi))*np.exp(-(x**2)/2))
X=np.linspace(-20,20,1000)
plt.plot(X,easy_function(X))
plt.show()
plt.plot(X,hard_function(X))
plt.show()
def integrate(x1,x2,func=easy_function,n=100000):
X=np.linspace(x1,x2,1000)
y1=0
y2=max((func(X)))+1
print(x1,x2,y1,y2)
area=(x2-x1)*(y2-y1)
check=[]
xs=[]
ys=[]
for i in range(n):
x=np.random.uniform(x1,x2,1)
xs.append(x)
y=np.random.uniform(y1,y2,1)
ys.append(y)
if abs(y)>abs(func(x)) or y<0:
check.append(0)
else:
check.append(1)
return(np.mean(check)*area,xs,ys,check)
print(integrate(0.3,2.5)[0])
print(integrate(0.3,2.5,hard_function)[0])
_,x,y,c=integrate(0.3,2.5,n=100)
df=pd.DataFrame()
df['x']=x
df['y']=y
df['c']=c
X=np.linspace(0.3,2.5,1000)
plt.plot(X,easy_function(X))
plt.scatter(df[df['c']==0]['x'],df[df['c']==0]['y'],color='red')
plt.scatter(df[df['c']==1]['x'],df[df['c']==1]['y'],color='blue')
plt.show()
import numpy as np
np.random.seed(32)
print(np.random.random(5))
print(np.random.random(5))
print('')
np.random.seed(32)
print(np.random.random(10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Brax Config
Step2: We visualize this system config like so
Step3: Brax State
Step4: Brax Step Function
Step5: Joints
Step6: Here is our system at rest
Step7: Let's observe $\text{step}(\text{config}, \text{qp}_t)$ by smacking the bottom ball with an initial impulse, simulating a pendulum swing.
Step8: Actuators
Step9: Let's observe $\text{step}(\text{config}, \text{qp}_t, \text{act})$ by raising the middle ball to a desired target angle
|
<ASSISTANT_TASK:>
Python Code:
#@title Colab setup and imports
from matplotlib.lines import Line2D
from matplotlib.patches import Circle
import matplotlib.pyplot as plt
import numpy as np
try:
import brax
except ImportError:
from IPython.display import clear_output
!pip install git+https://github.com/google/brax.git@main
clear_output()
import brax
#@title A bouncy ball scene
bouncy_ball = brax.Config(dt=0.05, substeps=20, dynamics_mode='pbd')
# ground is a frozen (immovable) infinite plane
ground = bouncy_ball.bodies.add(name='ground')
ground.frozen.all = True
plane = ground.colliders.add().plane
plane.SetInParent() # for setting an empty oneof
# ball weighs 1kg, has equal rotational inertia along all axes, is 1m long, and
# has an initial rotation of identity (w=1,x=0,y=0,z=0) quaternion
ball = bouncy_ball.bodies.add(name='ball', mass=1)
cap = ball.colliders.add().capsule
cap.radius, cap.length = 0.5, 1
# gravity is -9.8 m/s^2 in z dimension
bouncy_ball.gravity.z = -9.8
def draw_system(ax, pos, alpha=1):
for i, p in enumerate(pos):
ax.add_patch(Circle(xy=(p[0], p[2]), radius=cap.radius, fill=False, color=(0, 0, 0, alpha)))
if i < len(pos) - 1:
pn = pos[i + 1]
ax.add_line(Line2D([p[0], pn[0]], [p[2], pn[2]], color=(1, 0, 0, alpha)))
_, ax = plt.subplots()
plt.xlim([-3, 3])
plt.ylim([0, 4])
draw_system(ax, [[0, 0, 0.5]])
plt.title('ball at rest')
plt.show()
qp = brax.QP(
# position of each body in 3d (z is up, right-hand coordinates)
pos = np.array([[0., 0., 0.], # ground
[0., 0., 3.]]), # ball is 3m up in the air
# velocity of each body in 3d
vel = np.array([[0., 0., 0.], # ground
[0., 0., 0.]]), # ball
# rotation about center of body, as a quaternion (w, x, y, z)
rot = np.array([[1., 0., 0., 0.], # ground
[1., 0., 0., 0.]]), # ball
# angular velocity about center of body in 3d
ang = np.array([[0., 0., 0.], # ground
[0., 0., 0.]]) # ball
)
#@title Simulating the bouncy ball config { run: "auto"}
bouncy_ball.elasticity = 0.85 #@param { type:"slider", min: 0, max: 1.0, step:0.05 }
ball_velocity = 1 #@param { type:"slider", min:-5, max:5, step: 0.5 }
sys = brax.System(bouncy_ball)
# provide an initial velocity to the ball
qp.vel[1, 0] = ball_velocity
_, ax = plt.subplots()
plt.xlim([-3, 3])
plt.ylim([0, 4])
for i in range(100):
draw_system(ax, qp.pos[1:], i / 100.)
qp, _ = sys.step(qp, [])
plt.title('ball in motion')
plt.show()
#@title A pendulum config for Brax
pendulum = brax.Config(dt=0.01, substeps=20, dynamics_mode='pbd')
# start with a frozen anchor at the root of the pendulum
anchor = pendulum.bodies.add(name='anchor', mass=1.0)
anchor.frozen.all = True
# now add a middle and bottom ball to the pendulum
pendulum.bodies.append(ball)
pendulum.bodies.append(ball)
pendulum.bodies[1].name = 'middle'
pendulum.bodies[2].name = 'bottom'
# connect anchor to middle
joint = pendulum.joints.add(name='joint1', parent='anchor',
child='middle', angular_damping=20)
joint.angle_limit.add(min = -180, max = 180)
joint.child_offset.z = 1.5
joint.rotation.z = 90
# connect middle to bottom
pendulum.joints.append(joint)
pendulum.joints[1].name = 'joint2'
pendulum.joints[1].parent = 'middle'
pendulum.joints[1].child = 'bottom'
# gravity is -9.8 m/s^2 in z dimension
pendulum.gravity.z = -9.8
_, ax = plt.subplots()
plt.xlim([-3, 3])
plt.ylim([0, 4])
# rather than building our own qp like last time, we ask brax.System to
# generate a default one for us, which is handy
qp = brax.System(pendulum).default_qp()
draw_system(ax, qp.pos)
plt.title('pendulum at rest')
plt.show()
#@title Simulating the pendulum config { run: "auto"}
ball_impulse = 8 #@param { type:"slider", min:-15, max:15, step: 0.5 }
sys = brax.System(pendulum)
qp = sys.default_qp()
# provide an initial velocity to the ball
qp.vel[2, 0] = ball_impulse
_, ax = plt.subplots()
plt.xlim([-3, 3])
plt.ylim([0, 4])
for i in range(50):
draw_system(ax, qp.pos, i / 50.)
qp, _ = sys.step(qp, [])
plt.title('pendulum in motion')
plt.show()
#@title A single actuator on the pendulum
actuated_pendulum = brax.Config()
actuated_pendulum.CopyFrom(pendulum)
# actuating the joint connecting the anchor and middle
angle = actuated_pendulum.actuators.add(name='actuator', joint='joint1',
strength=100).angle
angle.SetInParent() # for setting an empty oneof
#@title Simulating the actuated pendulum config { run: "auto"}
target_angle = 45 #@param { type:"slider", min:-90, max:90, step: 1 }
sys = brax.System(actuated_pendulum)
qp = sys.default_qp()
act = np.array([target_angle])
_, ax = plt.subplots()
plt.xlim([-3, 3])
plt.ylim([0, 4])
for i in range(100):
draw_system(ax, qp.pos, i / 100.)
qp, _ = sys.step(qp, act)
plt.title('actuating a pendulum joint')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Series from a dictionary
Step2: Accesing an item from a series
Step3: BOOLEAN indexing for selection
Step4: Not null function
Step5: Data Frame
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.set_option('max_columns', 50)
%matplotlib inline
series = pd.Series([1, "number", 6, "Happy Series!"])
series
dictionary = {'Favorite Food': 'mexican', 'Favorite city': 'Portland', 'Hometown': 'Mexico City'}
favorite = pd.Series(dictionary)
favorite
favorite['Favorite Food']
favorite[favorite=='mexican']
favorite.notnull()
favorite[favorite.notnull()]
data = {'year': [2010, 2011, 2012, 2011, 2012, 2010, 2011, 2012],
'team': ['Bears', 'Bears', 'Bears', 'Packers', 'Packers', 'Lions', 'Lions', 'Lions'],
'wins': [11, 8, 10, 15, 11, 6, 10, 4],
'losses': [5, 8, 6, 1, 5, 10, 6, 12]}
football = pd.DataFrame(data, columns=['year', 'team', 'wins', 'losses'])
football
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: BERT End to End (Fine-tuning + Predicting) in 5 minutes with Cloud TPU
Step2: Prepare and import BERT modules
Step3: Prepare for training
Step4: Now let's load tokenizer module from TF Hub and play with it.
Step5: Also we initilize our hyperprams, prepare the training data and initialize TPU config.
Step6: Fine-tune and Run Predictions on a pretrained BERT Model from TF Hub
Step7: At this point, you can now fine-tune the model, evaluate it, and run predictions on it.
Step8: Fine-tune and run predictions on a pre-trained BERT model from checkpoints
Step9: Now, you can repeat the training, evaluation, and prediction steps.
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import datetime
import json
import os
import pprint
import random
import string
import sys
import tensorflow as tf
assert 'COLAB_TPU_ADDR' in os.environ, 'ERROR: Not connected to a TPU runtime; please see the first cell in this notebook for instructions!'
TPU_ADDRESS = 'grpc://' + os.environ['COLAB_TPU_ADDR']
print('TPU address is', TPU_ADDRESS)
from google.colab import auth
auth.authenticate_user()
with tf.Session(TPU_ADDRESS) as session:
print('TPU devices:')
pprint.pprint(session.list_devices())
# Upload credentials to TPU.
with open('/content/adc.json', 'r') as f:
auth_info = json.load(f)
tf.contrib.cloud.configure_gcs(session, credentials=auth_info)
# Now credentials are set for all future sessions on this TPU.
import sys
!test -d bert_repo || git clone https://github.com/google-research/bert bert_repo
if not 'bert_repo' in sys.path:
sys.path += ['bert_repo']
# import python modules defined by BERT
import modeling
import optimization
import run_classifier
import run_classifier_with_tfhub
import tokenization
# import tfhub
import tensorflow_hub as hub
TASK = 'MRPC' #@param {type:"string"}
assert TASK in ('MRPC', 'CoLA'), 'Only (MRPC, CoLA) are demonstrated here.'
# Download glue data.
! test -d download_glue_repo || git clone https://gist.github.com/60c2bdb54d156a41194446737ce03e2e.git download_glue_repo
!python download_glue_repo/download_glue_data.py --data_dir='glue_data' --tasks=$TASK
TASK_DATA_DIR = 'glue_data/' + TASK
print('***** Task data directory: {} *****'.format(TASK_DATA_DIR))
!ls $TASK_DATA_DIR
BUCKET = 'YOUR_BUCKET' #@param {type:"string"}
assert BUCKET, 'Must specify an existing GCS bucket name'
OUTPUT_DIR = 'gs://{}/bert-tfhub/models/{}'.format(BUCKET, TASK)
tf.gfile.MakeDirs(OUTPUT_DIR)
print('***** Model output directory: {} *****'.format(OUTPUT_DIR))
# Available pretrained model checkpoints:
# uncased_L-12_H-768_A-12: uncased BERT base model
# uncased_L-24_H-1024_A-16: uncased BERT large model
# cased_L-12_H-768_A-12: cased BERT large model
BERT_MODEL = 'uncased_L-12_H-768_A-12' #@param {type:"string"}
BERT_MODEL_HUB = 'https://tfhub.dev/google/bert_' + BERT_MODEL + '/1'
tokenizer = run_classifier_with_tfhub.create_tokenizer_from_hub_module(BERT_MODEL_HUB)
tokenizer.tokenize("This here's an example of using the BERT tokenizer")
TRAIN_BATCH_SIZE = 32
EVAL_BATCH_SIZE = 8
PREDICT_BATCH_SIZE = 8
LEARNING_RATE = 2e-5
NUM_TRAIN_EPOCHS = 3.0
MAX_SEQ_LENGTH = 128
# Warmup is a period of time where hte learning rate
# is small and gradually increases--usually helps training.
WARMUP_PROPORTION = 0.1
# Model configs
SAVE_CHECKPOINTS_STEPS = 1000
SAVE_SUMMARY_STEPS = 500
processors = {
"cola": run_classifier.ColaProcessor,
"mnli": run_classifier.MnliProcessor,
"mrpc": run_classifier.MrpcProcessor,
}
processor = processors[TASK.lower()]()
label_list = processor.get_labels()
# Compute number of train and warmup steps from batch size
train_examples = processor.get_train_examples(TASK_DATA_DIR)
num_train_steps = int(len(train_examples) / TRAIN_BATCH_SIZE * NUM_TRAIN_EPOCHS)
num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)
# Setup TPU related config
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(TPU_ADDRESS)
NUM_TPU_CORES = 8
ITERATIONS_PER_LOOP = 1000
def get_run_config(output_dir):
return tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=output_dir,
save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=ITERATIONS_PER_LOOP,
num_shards=NUM_TPU_CORES,
per_host_input_for_training=tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2))
# Force TF Hub writes to the GS bucket we provide.
os.environ['TFHUB_CACHE_DIR'] = OUTPUT_DIR
model_fn = run_classifier_with_tfhub.model_fn_builder(
num_labels=len(label_list),
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=True,
bert_hub_module_handle=BERT_MODEL_HUB
)
estimator_from_tfhub = tf.contrib.tpu.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=get_run_config(OUTPUT_DIR),
train_batch_size=TRAIN_BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=PREDICT_BATCH_SIZE,
)
# Train the model
def model_train(estimator):
print('MRPC/CoLA on BERT base model normally takes about 2-3 minutes. Please wait...')
# We'll set sequences to be at most 128 tokens long.
train_features = run_classifier.convert_examples_to_features(
train_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
print('***** Started training at {} *****'.format(datetime.datetime.now()))
print(' Num examples = {}'.format(len(train_examples)))
print(' Batch size = {}'.format(TRAIN_BATCH_SIZE))
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = run_classifier.input_fn_builder(
features=train_features,
seq_length=MAX_SEQ_LENGTH,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
print('***** Finished training at {} *****'.format(datetime.datetime.now()))
model_train(estimator_from_tfhub)
def model_eval(estimator):
# Eval the model.
eval_examples = processor.get_dev_examples(TASK_DATA_DIR)
eval_features = run_classifier.convert_examples_to_features(
eval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
print('***** Started evaluation at {} *****'.format(datetime.datetime.now()))
print(' Num examples = {}'.format(len(eval_examples)))
print(' Batch size = {}'.format(EVAL_BATCH_SIZE))
# Eval will be slightly WRONG on the TPU because it will truncate
# the last batch.
eval_steps = int(len(eval_examples) / EVAL_BATCH_SIZE)
eval_input_fn = run_classifier.input_fn_builder(
features=eval_features,
seq_length=MAX_SEQ_LENGTH,
is_training=False,
drop_remainder=True)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
print('***** Finished evaluation at {} *****'.format(datetime.datetime.now()))
output_eval_file = os.path.join(OUTPUT_DIR, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
print("***** Eval results *****")
for key in sorted(result.keys()):
print(' {} = {}'.format(key, str(result[key])))
writer.write("%s = %s\n" % (key, str(result[key])))
model_eval(estimator_from_tfhub)
def model_predict(estimator):
# Make predictions on a subset of eval examples
prediction_examples = processor.get_dev_examples(TASK_DATA_DIR)[:PREDICT_BATCH_SIZE]
input_features = run_classifier.convert_examples_to_features(prediction_examples, label_list, MAX_SEQ_LENGTH, tokenizer)
predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=True)
predictions = estimator.predict(predict_input_fn)
for example, prediction in zip(prediction_examples, predictions):
print('text_a: %s\ntext_b: %s\nlabel:%s\nprediction:%s\n' % (example.text_a, example.text_b, str(example.label), prediction['probabilities']))
model_predict(estimator_from_tfhub)
# Setup task specific model and TPU running config.
BERT_PRETRAINED_DIR = 'gs://cloud-tpu-checkpoints/bert/' + BERT_MODEL
print('***** BERT pretrained directory: {} *****'.format(BERT_PRETRAINED_DIR))
!gsutil ls $BERT_PRETRAINED_DIR
CONFIG_FILE = os.path.join(BERT_PRETRAINED_DIR, 'bert_config.json')
INIT_CHECKPOINT = os.path.join(BERT_PRETRAINED_DIR, 'bert_model.ckpt')
model_fn = run_classifier.model_fn_builder(
bert_config=modeling.BertConfig.from_json_file(CONFIG_FILE),
num_labels=len(label_list),
init_checkpoint=INIT_CHECKPOINT,
learning_rate=LEARNING_RATE,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=True,
use_one_hot_embeddings=True
)
OUTPUT_DIR = OUTPUT_DIR.replace('bert-tfhub', 'bert-checkpoints')
tf.gfile.MakeDirs(OUTPUT_DIR)
estimator_from_checkpoints = tf.contrib.tpu.TPUEstimator(
use_tpu=True,
model_fn=model_fn,
config=get_run_config(OUTPUT_DIR),
train_batch_size=TRAIN_BATCH_SIZE,
eval_batch_size=EVAL_BATCH_SIZE,
predict_batch_size=PREDICT_BATCH_SIZE,
)
model_train(estimator_from_checkpoints)
model_eval(estimator_from_checkpoints)
model_predict(estimator_from_checkpoints)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preperation
Step2: Now, let us see a preview of what the dataset looks like.
Step3: Build the word count vector for each review
Step4: Now, we will perform 2 simple data transformations
Step5: Now, let us explore what the sample example above looks like after these 2 transformations. Here, each entry in the word_count column is a dictionary where the key is the word and the value is a count of the number of times the word occurs.
Step6: Extract sentiments
Step7: Now, we will assign reviews with a rating of 4 or higher to be positive reviews, while the ones with rating of 2 or lower are negative. For the sentiment column, we use +1 for the positive class label and -1 for the negative class label.
Step8: Now, we can see that the dataset contains an extra column called sentiment which is either positive (+1) or negative (-1).
Step9: Train a sentiment classifier with logistic regression
Step10: Aside. You may get an warning to the effect of "Terminated due to numerical difficulties --- this model may not be ideal". It means that the quality metric (to be covered in Module 3) failed to improve in the last iteration of the run. The difficulty arises as the sentiment model puts too much weight on extremely rare words. A way to rectify this is to apply regularization, to be covered in Module 4. Regularization lessens the effect of extremely rare words. For the purpose of this assignment, however, please proceed with the model above.
Step11: There are a total of 121713 coefficients in the model. Recall from the lecture that positive weights $w_j$ correspond to weights that cause positive sentiment, while negative weights correspond to negative sentiment.
Step12: Quiz question
Step13: Let's dig deeper into the first row of the sample_test_data. Here's the full review
Step14: That review seems pretty positive.
Step15: We will now make a class prediction for the sample_test_data. The sentiment_model should predict +1 if the sentiment is positive and -1 if the sentiment is negative. Recall from the lecture that the score (sometimes called margin) for the logistic regression model is defined as
Step16: Predicting sentiment
Step17: Checkpoint
Step18: Quiz Question
Step19: Now, let's compute the classification accuracy of the sentiment_model on the test_data.
Step20: Quiz Question
Step21: For each review, we will use the word_count column and trim out all words that are not in the significant_words list above. We will use the SArray dictionary trim by keys functionality. Note that we are performing this on both the training and test set.
Step22: Let's see what the first example of the dataset looks like
Step23: The word_count column had been working with before looks like the following
Step24: Since we are only working with a subet of these words, the column word_count_subset is a subset of the above dictionary. In this example, only 2 significant words are present in this review.
Step25: Train a logistic regression model on a subset of data
Step26: We can compute the classification accuracy using the get_classification_accuracy function you implemented earlier.
Step27: Now, we will inspect the weights (coefficients) of the simple_model
Step28: Let's sort the coefficients (in descending order) by the value to obtain the coefficients with the most positive effect on the sentiment.
Step29: Quiz Question
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import graphlab
import math
import string
products = graphlab.SFrame('amazon_baby.gl/')
products
products[269]
def remove_punctuation(text):
import string
return text.translate(None, string.punctuation)
review_without_puctuation = products['review'].apply(remove_punctuation)
products['word_count'] = graphlab.text_analytics.count_words(review_without_puctuation)
products[269]['word_count']
products = products[products['rating'] != 3]
len(products)
products['sentiment'] = products['rating'].apply(lambda rating : +1 if rating > 3 else -1)
products
train_data, test_data = products.random_split(.8, seed=1)
print len(train_data)
print len(test_data)
sentiment_model = graphlab.logistic_classifier.create(train_data,
target = 'sentiment',
features=['word_count'],
validation_set=None)
sentiment_model
weights = sentiment_model.coefficients
weights.column_names()
num_positive_weights = ...
num_negative_weights = ...
print "Number of positive weights: %s " % num_positive_weights
print "Number of negative weights: %s " % num_negative_weights
sample_test_data = test_data[10:13]
print sample_test_data['rating']
sample_test_data
sample_test_data[0]['review']
sample_test_data[1]['review']
scores = sentiment_model.predict(sample_test_data, output_type='margin')
print scores
print "Class predictions according to GraphLab Create:"
print sentiment_model.predict(sample_test_data)
print "Class predictions according to GraphLab Create:"
print sentiment_model.predict(sample_test_data, output_type='probability')
def get_classification_accuracy(model, data, true_labels):
# First get the predictions
## YOUR CODE HERE
...
# Compute the number of correctly classified examples
## YOUR CODE HERE
...
# Then compute accuracy by dividing num_correct by total number of examples
## YOUR CODE HERE
...
return accuracy
get_classification_accuracy(sentiment_model, test_data, test_data['sentiment'])
significant_words = ['love', 'great', 'easy', 'old', 'little', 'perfect', 'loves',
'well', 'able', 'car', 'broke', 'less', 'even', 'waste', 'disappointed',
'work', 'product', 'money', 'would', 'return']
len(significant_words)
train_data['word_count_subset'] = train_data['word_count'].dict_trim_by_keys(significant_words, exclude=False)
test_data['word_count_subset'] = test_data['word_count'].dict_trim_by_keys(significant_words, exclude=False)
train_data[0]['review']
print train_data[0]['word_count']
print train_data[0]['word_count_subset']
simple_model = graphlab.logistic_classifier.create(train_data,
target = 'sentiment',
features=['word_count_subset'],
validation_set=None)
simple_model
get_classification_accuracy(simple_model, test_data, test_data['sentiment'])
simple_model.coefficients
simple_model.coefficients.sort('value', ascending=False).print_rows(num_rows=21)
num_positive = (train_data['sentiment'] == +1).sum()
num_negative = (train_data['sentiment'] == -1).sum()
print num_positive
print num_negative
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Regresion Lineal
Step2: Regresion lineal con p y pearsonr
Step3: OLS Regression
Step4: Histogramas seaborn
|
<ASSISTANT_TASK:>
Python Code:
# librerias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.formula.api as sm
%matplotlib inline
plt.style.use('ggplot')
# leer archivo
data = pd.read_csv('../data/dataFromAguascalientestTest.csv')
# verificar su contenido
data.head()
# diferencia entre valores de precipitacion, humedad relativa y temperatura promedio
data['diffRain'] = data['Rain'] - data['RainWRF']
data['diffHr'] = data['Hr'] - data['HrWRF']
data['diffTpro'] = data['Tpro'] - data['TproWRF']
# verificar contenido
data.head()
# histograma de diferencias Hr
data['diffHr'].hist()
# comportamiento de los datos por mes
data.groupby(['Month']).mean()[['Hr','HrWRF']]
# visualizar los datos en grafica
data.groupby(['Month']).mean()[['Hr','HrWRF']].plot.bar()
# histograma de diferencias Tpro
data['diffTpro'].hist()
# comportamiento de los datos por mes
data.groupby(['Month']).mean()[['Tpro','TproWRF']]
# visualizar los datos en grafica
data.groupby(['Month']).mean()[['Tpro','TproWRF']].plot.bar()
# histograma de diferencias Rain
data['diffRain'].hist()
# comportamiento de los datos por mes
data.groupby(['Month']).mean()[['Rain','RainWRF']]
# visualizar los datos en grafica
data.groupby(['Month']).mean()[['Rain','RainWRF']].plot.bar()
# librerias seabron as sns
import seaborn as sns
# Hr
sns.lmplot(x='Hr',y='HrWRF',data=data, col='Month', aspect=0.6, size=8)
# Tpro
sns.lmplot(x='Tpro',y='TproWRF',data=data, col='Month', aspect=0.6, size=8)
# Rain
sns.lmplot(x='Rain',y='RainWRF',data=data, col='Month', aspect=0.6, size=8)
# Rain polynomial regression
sns.lmplot(x='Rain',y='RainWRF',data=data, col='Month', aspect=0.6, size=8, order=2)
# Hr
sns.jointplot("Hr", "HrWRF", data=data, kind="reg")
# Tpro
sns.jointplot("Tpro", "TproWRF", data=data, kind="reg")
# Rain
sns.jointplot("Rain", "RainWRF", data=data, kind="reg")
# HR
result = sm.ols(formula='HrWRF ~ Hr', data=data).fit()
print(result.params)
print(result.summary())
# Tpro
result = sm.ols(formula='TproWRF ~ Tpro', data=data).fit()
print(result.params)
print(result.summary())
# Rain
result = sm.ols(formula='RainWRF ~ Rain', data=data).fit()
print(result.params)
print(result.summary())
# Hr
sns.distplot(data['diffHr'])
# Tpro
sns.distplot(data['diffTpro'])
# Rain
sns.distplot(data['diffRain'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: This CTD was equipped with backup sensors to provide more robustness.
Step3: Considering the unusual magnitudes and variability near the bottom, there are clearly bad measurements in this profile.
Step4: Great, we already identified a fair number of bad measurements.
Step5: Let's use that to check what are the unfeasible values of temperature.
Step7: The Global Range is a trivial one to implement, but there are other checks that are more complex and CoTeDe provides a solution for that.
Step8: This is already implemented in CoTeDe as qctests.spike, and we could use it as shown below
Step9: The same could be done for salinity, such as
Step10: Like the Global Range, it provides the quality flags obtained from this procedure.
Step11: The magnitudes of the tests are stored in features.
Step12: These already implemented tests are useful, but it could be easier.
Step13: That's it, the temperature and salinity from the primary sensor were evaluated.
Step14: But only the variables in the cfg dictionary were QC'd
Step15: The flags are on IOC standard, thus 1 means good while 4 means bad.
Step16: The flags are on IOC standard, thus 1 means good while 4 means bad.
Step17: More tests
Step18: The Easiest Way
Step19: QC with more control
Step20: Low level
|
<ASSISTANT_TASK:>
Python Code:
from bokeh.io import output_notebook, show
from bokeh.layouts import row
from bokeh.plotting import figure
import numpy as np
import cotede
from cotede import datasets, qctests
output_notebook()
data = cotede.datasets.load_ctd()
print("The variables are: ", ", ".join(sorted(data.keys())))
print("There is a total of {} observed depths.".format(len(data["TEMP"])))
p1 = figure(plot_width=420, plot_height=600)
p1.circle(data['TEMP'], -data['PRES'],
size=8, line_color="seagreen", fill_color="mediumseagreen", fill_alpha=0.3)
p1.xaxis.axis_label = "Temperature [C]"
p1.yaxis.axis_label = "Depth [m]"
p2 = figure(plot_width=420, plot_height=600)
p2.y_range = p1.y_range
p2.circle(data['PSAL'], -data['PRES'],
size=8, line_color="seagreen", fill_color="mediumseagreen", fill_alpha=0.3)
p2.xaxis.axis_label = "Salinity"
p2.yaxis.axis_label = "Depth [m]"
p = row(p1, p2)
show(p)
# ToDo: Include a shaded area for unfeasible values
idx_valid = (data['TEMP'] > -2) & (data['TEMP'] < 40)
p1 = figure(plot_width=420, plot_height=600, title="Global Range Check (-2 <= T <= 40)")
p1.circle(data['TEMP'][idx_valid], -data['PRES'][idx_valid], size=8, line_color="seagreen", fill_color="mediumseagreen", fill_alpha=0.3, legend_label="Good values")
p1.triangle(data['TEMP'][~idx_valid], -data['PRES'][~idx_valid], size=8, line_color="red", fill_color="red", fill_alpha=0.3, legend_label="Bad values")
p1.xaxis.axis_label = "Temperature [C]"
p1.yaxis.axis_label = "Depth [m]"
idx_valid = (data['PSAL'] > 0) & (data['PSAL'] < 41)
p2 = figure(plot_width=420, plot_height=600, title="Global Range Check (0 <= S <= 41)")
p2.y_range = p1.y_range
p2.circle(data['PSAL'][idx_valid], -data['PRES'][idx_valid], size=8, line_color="seagreen", fill_color="mediumseagreen", fill_alpha=0.3, legend_label="Good values")
p2.triangle(data['PSAL'][~idx_valid], -data['PRES'][~idx_valid], size=8, line_color="red", fill_color="red", fill_alpha=0.3, legend_label="Bad values")
p2.xaxis.axis_label = "Pratical Salinity"
p2.yaxis.axis_label = "Depth [m]"
p = row(p1, p2)
show(p)
y = qctests.GlobalRange(data, varname='TEMP', cfg={"minval": -2, "maxval": 40})
y.flags
flag = y.flags["global_range"]
data["TEMP"][flag==4]
def spike(x):
Spike check as defined by GTSPP
Notes
-----
- Check CoTeDe's manual for more details.
y = np.nan * x
y[1:-1] = np.abs(x[1:-1] - (x[:-2] + x[2:]) / 2.0) - np.abs((x[2:] - x[:-2]) / 2.0)
return y
temp_spike = qctests.spike(data["TEMP"])
print("The largest spike observed was: {:.3f}".format(np.nanmax(np.abs(temp_spike))))
y_spike = qctests.Spike(data, "TEMP", cfg={"threshold": 2.0})
y_spike.flags
y_spike.features
y_gradient = qctests.Gradient(data, "TEMP", cfg={"threshold": 10})
y_gradient.flags
y_tukey53H = qctests.Tukey53H(data, "TEMP", cfg={"threshold": 2.0})
y_tukey53H.flags
cfg = {
"TEMP": {
"global_range": {"minval": -2, "maxval": 40},
"gradient": {"threshold": 10.0},
"spike": {"threshold": 2.0},
"tukey53H": {"threshold": 1.5},
},
"PSAL": {
"global_range": {"minval": 0, "maxval": 41},
"gradient": {"threshold": 5.0},
"spike": {"threshold": 0.3},
"tukey53H": {"threshold": 1.0},
}
}
pqc = cotede.ProfileQC(data, cfg=cfg)
print("Variables available in data: {}\n".format(", ".join(data.keys())))
print("Variables available in pqc: {}\n".format(", ".join(pqc.keys())))
print("Variables flagged in pqc: {}\n".format(", ".join(pqc.flags.keys())))
print("Flags available for temperature {}\n".format(pqc.flags["TEMP"].keys()))
print("Flags available for salinity {}\n".format(pqc.flags["PSAL"].keys()))
flag = pqc.flags["TEMP"]["overall"]
print('Overall flags for TEMP:', flag)
print("Variables available in data: {}\n".format(data.keys()))
print("Variables available in pqc: {}\n".format(pqc.keys()))
print("Flags available for temperature {}\n".format(pqc.flags["TEMP"].keys()))
print("Flags available for salinity {}\n".format(pqc.flags["PSAL"].keys()))
# ToDo: Include a shaded area for unfeasible values
idx_valid = (pqc.flags["TEMP"]["overall"] <= 2)
p1 = figure(plot_width=420, plot_height=600, title="Global Range Check (-2 <= T <= 40)")
p1.circle(data['TEMP'][idx_valid], -data['PRES'][idx_valid], size=8, line_color="seagreen", fill_color="mediumseagreen", fill_alpha=0.3, legend_label="Good values")
p1.triangle(data['TEMP'][~idx_valid], -data['PRES'][~idx_valid], size=8, line_color="red", fill_color="red", fill_alpha=0.3, legend_label="Bad values")
p1.xaxis.axis_label = "Temperature [C]"
p1.yaxis.axis_label = "Depth [m]"
idx_valid = (pqc.flags["PSAL"]["overall"] <= 2)
p2 = figure(plot_width=420, plot_height=600, title="Global Range Check (0 <= S <= 41)")
p2.y_range = p1.y_range
p2.circle(data['PSAL'][idx_valid], -data['PRES'][idx_valid], size=8, line_color="seagreen", fill_color="mediumseagreen", fill_alpha=0.3, legend_label="Good values")
p2.triangle(data['PSAL'][~idx_valid], -data['PRES'][~idx_valid], size=8, line_color="red", fill_color="red", fill_alpha=0.3, legend_label="Bad values")
p2.xaxis.axis_label = "Pratical Salinity"
p2.yaxis.axis_label = "Depth [m]"
p = row(p1, p2)
show(p)
y_gradient = qctests.Gradient(data, 'TEMP', cfg={"threshold": 10})
y_gradient.flags
y_spike = qctests.Spike(data, 'TEMP', cfg={"threshold": 2.0})
y_spike.flags
pqced = cotede.ProfileQCed(data, cfg='eurogoos')
p = figure(plot_width=500, plot_height=600)
p.circle(pqced['TEMP'], -pqced['PRES'], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
show(p)
pqc = cotede.ProfileQC(data, cfg='eurogoos')
pqc.keys()
pqc.flags["TEMP"]
data.keys()
from cotede import qctests
y = qctests.GlobalRange(data, 'TEMP', cfg={'minval': -4, "maxval": 45 })
y.flags
y = qctests.Tukey53H(data, 'TEMP', cfg={'threshold': 6, "l": 12})
y.features["tukey53H"]
p = figure(plot_width=500, plot_height=600)
p.circle(y.features["tukey53H"], -data['PRES'], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
show(p)
cfg = {'TEMP': {'global_range': {'minval': -4, 'maxval': 45}}}
pqc = ProfileQC(data, cfg)
pqc.flags['TEMP']
pqc.flags['TEMP']['overall']
idx_good = pqc.flags['TEMP']['overall'] <= 2
idx_bad = pqc.flags['TEMP']['overall'] >= 3
p = figure(plot_width=500, plot_height=600)
p.circle(data['TEMP'][idx_good], -data['PRES'][idx_good], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
p.triangle(data['TEMP'][idx_bad], -data['PRES'][idx_bad], size=8, line_color="red", fill_color="red", fill_alpha=0.3)
show(p)
cfg['TEMP']['spike'] = {'threshold': 6}
pqc = ProfileQC(data, cfg)
pqc.flags['TEMP']
pqc.flags['TEMP']['overall']
idx_good = pqc.flags['TEMP']['overall'] <= 2
idx_bad = pqc.flags['TEMP']['overall'] >= 3
p = figure(plot_width=500, plot_height=600)
p.circle(data['TEMP'][idx_good], -data['PRES'][idx_good], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
p.triangle(data['TEMP'][idx_bad], -data['PRES'][idx_bad], size=8, line_color="red", fill_color="red", fill_alpha=0.3)
show(p)
cfg['TEMP']['woa_normbias'] = {'threshold': 6}
pqc = ProfileQC(data, cfg)
pqc.flags['TEMP']
pqc.flags['TEMP']['overall']
idx_good = pqc.flags['TEMP']['overall'] <= 2
idx_bad = pqc.flags['TEMP']['overall'] >= 3
p = figure(plot_width=500, plot_height=600)
p.circle(data['TEMP'][idx_good], -data['PRES'][idx_good], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
p.triangle(data['TEMP'][idx_bad], -data['PRES'][idx_bad], size=8, line_color="red", fill_color="red", fill_alpha=0.3)
show(p)
cfg['TEMP']['spike_depthconditional'] = {"pressure_threshold": 500, "shallow_max": 6.0, "deep_max": 2.0}
pqc = ProfileQC(data, cfg)
pqc.flags['TEMP']
pqc.flags['TEMP']['overall']
idx_good = pqc.flags['TEMP']['overall'] <= 2
idx_bad = pqc.flags['TEMP']['overall'] >= 3
p = figure(plot_width=500, plot_height=600)
p.circle(data['TEMP'][idx_good], -data['PRES'][idx_good], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
p.triangle(data['TEMP'][idx_bad], -data['PRES'][idx_bad], size=8, line_color="red", fill_color="red", fill_alpha=0.3)
show(p)
## The Easiest Way: High level
Let's evaluate this profile using EuroGOOS standard tests.
pqced = cotede.ProfileQCed(data, cfg='eurogoos')
p = figure(plot_width=500, plot_height=600)
p.circle(pqced['TEMP'], -pqced['PRES'], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
show(p)
## QC with more control: "medium" level
pqc = cotede.ProfileQC(data, cfg='eurogoos')
pqc.keys()
pqc.flags["TEMP"]
data.keys()
### Low level
from cotede import qctests
y = qctests.GlobalRange(data, 'TEMP', cfg={'minval': -4, "maxval": 45 })
y.flags
y = qctests.Tukey53H(data, 'TEMP', cfg={'threshold': 6, "l": 12})
y.features["tukey53H"]
p = figure(plot_width=500, plot_height=600)
p.circle(y.features["tukey53H"], -data['PRES'], size=8, line_color="green", fill_color="green", fill_alpha=0.3)
show(p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1 - Gradient Descent
Step4: Expected Output
Step6: Expected Output
Step8: Expected Output
Step10: Expected Output
Step12: Expected Output
Step13: Expected Output
Step15: We have already implemented a 3-layer neural network. You will train it with
Step16: You will now run this 3 layer neural network with each of the 3 optimization methods.
Step17: 5.2 - Mini-batch gradient descent with momentum
Step18: 5.3 - Mini-batch with Adam mode
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*grads['dW' + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*grads['db' + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,k * mini_batch_size:(k + 1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:,k * mini_batch_size:(k + 1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
#end = m - mini_batch_size * math.floor(m / mini_batch_size)
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:,num_complete_minibatches * mini_batch_size:]
mini_batch_Y = shuffled_Y[:,num_complete_minibatches * mini_batch_size:]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)])
v["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)])
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta * v["dW" + str(l+1)] + (1 - beta) * grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta * v["db" + str(l+1)] + (1 - beta) * grads['db' + str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)])
v["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)])
s["dW" + str(l+1)] = np.zeros_like(parameters["W" + str(l+1)])
s["db" + str(l+1)] = np.zeros_like(parameters["b" + str(l+1)])
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)]/(1 - np.power(beta1, t))
v_corrected["db" + str(l+1)] = v["db" + str(l+1)]/(1 - np.power(beta1, t))
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * np.square(grads["dW" + str(l+1)])
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * np.square(grads["db" + str(l+1)])
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)]/(1 - np.power(beta2, t))
s_corrected["db" + str(l+1)] = s["db" + str(l+1)]/(1 - np.power(beta2, t))
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v_corrected["dW" + str(l+1)] / np.sqrt(s_corrected["dW" + str(l+1)] + epsilon)
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v_corrected["db" + str(l+1)] / np.sqrt(s_corrected["db" + str(l+1)] + epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
train_X, train_Y = load_dataset()
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 1
Step2: Problem 2
Step3: Problem 3
Step4: Problem 4
Step5: Problem 5
Step6: Problem 6
Step7: Problem 7
Step8: Problem 8
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # np.array (and used internally in cvxpy)
import cvxpy as cvx
import sys
print("Using CVX version", cvx.__version__)
print(" and python version", sys.version)
A = np.array([[1, 6,11, 5,10, 4, 9, 3, 8, 2],
[2, 7, 1, 6,11, 5,10, 4, 9, 3],
[3, 8, 2, 7, 1, 6,11, 5,10, 4],
[4, 9, 3, 8, 2, 7, 1, 6,11, 5],
[5,10, 4, 9, 3, 8, 2, 7, 1, 6]])
y = np.array([1,2,3,4,5]).T
tol = 1e-12
def print_status(prob, x):
print("Problem status: ", prob.status);
print("Used the solver: ",
prob.solver_stats.solver_name, "with",
prob.solver_stats.num_iters, "iterations.")
print("Optimal value: ", prob.value)
print("Optimal var:\n", x.value)
x = cvx.Variable(10) # column vector with 10 elements
obj = cvx.Minimize(cvx.norm(x)) # cvx.norm defaults to the 2-norm
constraints = [cvx.norm(A*x-y) <= 0.1] # specify a list of constraints
prob = cvx.Problem(obj, constraints)
prob.solve(abstol=tol,reltol=tol)
print_status(prob, x)
x = cvx.Variable(10)
obj = cvx.Minimize(cvx.norm(x)**2) # cvxpy objects implement the standard python ops
constraints = [cvx.norm(A*x-y) <= 0.1]
prob = cvx.Problem(obj, constraints)
prob.solve(abstol=tol,reltol=tol)
print_status(prob, x)
x = cvx.Variable(10)
obj = cvx.Minimize(cvx.norm(x, p=1))
constraints = [cvx.norm(A*x-y) <= 0.1]
prob = cvx.Problem(obj, constraints)
prob.solve(abstol=tol,reltol=tol)
print_status(prob, x)
def get_problem1_dual_value():
obj = cvx.Minimize(cvx.norm(x))
constraints = [cvx.norm(A*x-y) <= 0.1]
prob = cvx.Problem(obj, constraints)
prob.solve(abstol=tol,reltol=tol)
return constraints[0].dual_value
x = cvx.Variable(10)
# resolve problem 1 and return dual value for the constraint
l = get_problem1_dual_value()
print("dual variable: ", l)
obj = cvx.Minimize(cvx.norm(x) + l*cvx.norm(A*x-y))
prob = cvx.Problem(obj)
prob.solve(abstol=tol,reltol=tol)
# note that the solution is the same, but the optimal value is different,
# since for problem 1 we form the Lagrangian \|x\|_2 + \lambda(\|Ax-y\|_2-0.1)
print_status(prob, x)
# the optimal value for problem 1 should be
print("problem 1 optimal value: ", prob.value - 0.1*l)
# x = cvx.Variable(5) # we need to make this an explicit column vector
x = cvx.Variable((5,1))
ones = np.ones((10,1))
obj = cvx.Minimize(sum(cvx.norm(A-x@ones.T, axis=0))) # cvx.norm behaves like np.linalg.norm
prob = cvx.Problem(obj)
prob.solve(abstol=tol,reltol=tol)
print_status(prob, x)
x = cvx.Variable((5,1)) # again, be careful here
ones = np.ones((10,1))
obj = cvx.Minimize(cvx.norm(A-x*ones.T))
prob = cvx.Problem(obj)
# ECOS solver won't solve this kind of thing. SCS is the new default
prob.solve(verbose=False,eps=tol) # ~1e-7 duality gap, but CVXOPT gets a singular KKT system
print_status(prob, x)
prob.solve(verbose=False, solver='CVXOPT', kktsolver='robust')
print_status(prob, x)
# X = cvx.Variable(5,10) # old syntax
X = cvx.Variable((5,10))
obj = cvx.Minimize(cvx.norm(X-A, 'fro'))
constraints = [ np.ones((5,)).T*X*np.ones((10,)) == 1. ]
prob = cvx.Problem(obj, constraints)
prob.solve()
print_status(prob, X)
B = A[:,0:5]
X = cvx.Variable((5,5)) # could use Semidef or Symmetric here instead
obj = cvx.Minimize(cvx.norm(X-B, 'fro'))
constraints = [ X == X.T, X >> 0 ] # X is PSD
prob = cvx.Problem(obj, constraints)
prob.solve()
print_status(prob, X)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import tensorflow and enable eager execution.
Step2: Download the dataset
Step3: Read the dataset
Step4: Creating dictionaries to map from characters to their indices and vice-versa, which will be used to vectorize the inputs
Step5: Creating the input and output tensors
Step6: Creating batches and shuffling them using tf.data
Step7: Creating the model
Step8: Call the model and set the optimizer and the loss function
Step9: Train the model
Step10: Predicting using our trained model
|
<ASSISTANT_TASK:>
Python Code:
!pip install unidecode
# Import TensorFlow >= 1.9 and enable eager execution
import tensorflow as tf
# Note: Once you enable eager execution, it cannot be disabled.
tf.enable_eager_execution()
import numpy as np
import re
import random
import unidecode
import time
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
text = unidecode.unidecode(open(path_to_file).read())
# length of text is the number of characters in it
print (len(text))
# unique contains all the unique characters in the file
unique = sorted(set(text))
# creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(unique)}
idx2char = {i:u for i, u in enumerate(unique)}
# setting the maximum length sentence we want for a single input in characters
max_length = 100
# length of the vocabulary in chars
vocab_size = len(unique)
# the embedding dimension
embedding_dim = 256
# number of RNN (here GRU) units
units = 1024
# batch size
BATCH_SIZE = 64
# buffer size to shuffle our dataset
BUFFER_SIZE = 10000
input_text = []
target_text = []
for f in range(0, len(text)-max_length, max_length):
inps = text[f:f+max_length]
targ = text[f+1:f+1+max_length]
input_text.append([char2idx[i] for i in inps])
target_text.append([char2idx[t] for t in targ])
print (np.array(input_text).shape)
print (np.array(target_text).shape)
dataset = tf.data.Dataset.from_tensor_slices((input_text, target_text)).shuffle(BUFFER_SIZE)
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(BATCH_SIZE))
class Model(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, units, batch_size):
super(Model, self).__init__()
self.units = units
self.batch_sz = batch_size
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
if tf.test.is_gpu_available():
self.gru = tf.keras.layers.CuDNNGRU(self.units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
self.gru = tf.keras.layers.GRU(self.units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
def call(self, x, hidden):
x = self.embedding(x)
# output shape == (batch_size, max_length, hidden_size)
# states shape == (batch_size, hidden_size)
# states variable to preserve the state of the model
# this will be used to pass at every step to the model while training
output, states = self.gru(x, initial_state=hidden)
# reshaping the output so that we can pass it to the Dense layer
# after reshaping the shape is (batch_size * max_length, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# The dense layer will output predictions for every time_steps(max_length)
# output shape after the dense layer == (max_length * batch_size, vocab_size)
x = self.fc(output)
return x, states
model = Model(vocab_size, embedding_dim, units, BATCH_SIZE)
optimizer = tf.train.AdamOptimizer()
# using sparse_softmax_cross_entropy so that we don't have to create one-hot vectors
def loss_function(real, preds):
return tf.losses.sparse_softmax_cross_entropy(labels=real, logits=preds)
# Training step
EPOCHS = 30
for epoch in range(EPOCHS):
start = time.time()
# initializing the hidden state at the start of every epoch
hidden = model.reset_states()
for (batch, (inp, target)) in enumerate(dataset):
with tf.GradientTape() as tape:
# feeding the hidden state back into the model
# This is the interesting step
predictions, hidden = model(inp, hidden)
# reshaping the target because that's how the
# loss function expects it
target = tf.reshape(target, (-1,))
loss = loss_function(target, predictions)
grads = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(grads, model.variables), global_step=tf.train.get_or_create_global_step())
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(epoch+1,
batch,
loss))
print ('Epoch {} Loss {:.4f}'.format(epoch+1, loss))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
# Evaluation step(generating text using the model learned)
# number of characters to generate
num_generate = 1000
# You can change the start string to experiment
start_string = 'Q'
# converting our start string to numbers(vectorizing!)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# empty string to store our results
text_generated = ''
# low temperatures results in more predictable text.
# higher temperatures results in more surprising text
# experiment to find the best setting
temperature = 1.0
# hidden state shape == (batch_size, number of rnn units); here batch size == 1
hidden = [tf.zeros((1, units))]
for i in range(num_generate):
predictions, hidden = model(input_eval, hidden)
# using a multinomial distribution to predict the word returned by the model
predictions = predictions / temperature
predicted_id = tf.multinomial(tf.exp(predictions), num_samples=1)[0][0].numpy()
# We pass the predicted word as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated += idx2char[predicted_id]
print (start_string + text_generated)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Instantiate the estimator and the SearchCV objects
Step2: Fit the BayesSearchCV object locally
Step3: Everything up to this point is what you would do when training locally. With larger amount of data it would take much longer.
Step4: A Google Cloud Storage bucket belonging to your project created through either
Step5: Pick a cluster id for the cluster on Google Container Engine we will create. Preferably not an existing cluster to avoid affecting its workload.
Step6: Choose a name for the image that will be running on the container.
Step7: Choose a zone to host the cluster.
Step8: Change this only if you have customized the source.
Step9: Build a Docker image
Step10: Create a cluster
Step11: For GCE instance pricing
Step12: Refresh access token to the cluster
Step13: Deploy the fitting task
Step14: Inspect the GKEParallel object
Step15: Similarly, each job is given a job_name. The dictionary of job_names can be accessed as follows. Each worker pod handles one job processing one of the smaller search_spaces.
Step16: Cancel the task
Step17: Monitor the progress
Step18: When all the jobs are finished, the pods will stop running (but the cluster will remain), and we can retrieve the fitted model.
Step19: You can also get the logs from the pods
Step20: Once the jobs are finished, the cluster can be deleted. All the fitted models are stored on GCS.
Step21: The next cell continues to poll the jobs until they are all finished, downloads the results, and deletes the cluster.
Step22: Restore the GKEParallel object
Step23: Inspect the result
Step24: You can also call predict(), which deligates the call to the best_estimator_.
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import fetch_mldata
from sklearn.utils import shuffle
mnist = fetch_mldata('MNIST original', data_home='./mnist_data')
X, y = shuffle(mnist.data[:60000], mnist.target[:60000])
X_small = X[:100]
y_small = y[:100]
# Note: using only 10% of the training data
X_large = X[:6000]
y_large = y[:6000]
from sklearn.ensemble import RandomForestClassifier
from skopt import BayesSearchCV
from skopt.space import Integer, Real
rfc = RandomForestClassifier(n_jobs=-1)
search_spaces = {
'max_features': Real(0.5, 1.0),
'n_estimators': Integer(10, 200),
'max_depth': Integer(5, 45),
'min_samples_split': Real(0.01, 0.1)
}
search = BayesSearchCV(estimator=rfc, search_spaces=search_spaces, n_jobs=-1, verbose=3, n_iter=100)
%time search.fit(X_small, y_small)
print(search.best_score_, search.best_params_)
project_id = 'YOUR-PROJECT-ID'
bucket_name = 'YOUR-BUCKET-NAME'
cluster_id = 'YOUR-CLUSTER-ID'
image_name = 'YOUR-IMAGE-NAME'
zone = 'us-central1-b'
source_dir = 'source'
from helpers.cloudbuild_helper import build
build(project_id, source_dir, bucket_name, image_name)
from helpers.gke_helper import create_cluster
create_cluster(project_id, zone, cluster_id, n_nodes=1, machine_type='n1-standard-64')
from sklearn.ensemble import RandomForestClassifier
from skopt import BayesSearchCV
from skopt.space import Integer, Real
rfc = RandomForestClassifier(n_jobs=-1)
search_spaces = {
'max_features': Real(0.5, 1.0),
'n_estimators': Integer(10, 200),
'max_depth': Integer(5, 45),
'min_samples_split': Real(0.01, 0.1)
}
search = BayesSearchCV(estimator=rfc, search_spaces=search_spaces, n_jobs=-1, verbose=3, n_iter=100)
from gke_parallel import GKEParallel
gke_search = GKEParallel(search, project_id, zone, cluster_id, bucket_name, image_name)
! bash get_cluster_credentials.sh $cluster_id $zone
gke_search.fit(X_large, y_large)
gke_search.search_spaces
gke_search.task_name
gke_search.job_names
#gke_search.cancel()
gke_search.done(), gke_search.dones
result = gke_search.result(download=False)
from helpers.kubernetes_helper import get_pod_logs
for pod_name, log in get_pod_logs().items():
print('=' * 20)
print('\t{}\n'.format(pod_name))
print(log)
from helpers.gke_helper import delete_cluster
#delete_cluster(project_id, zone, cluster_id)
import time
from helpers.gke_helper import delete_cluster
while not gke_search.done():
n_done = len([d for d in gke_search.dones.values() if d])
print('{}/{} finished'.format(n_done, len(gke_search.job_names)))
time.sleep(60)
delete_cluster(project_id, zone, cluster_id)
result = gke_search.result(download=True)
from helpers.gcs_helper import download_uri_and_unpickle
gcs_uri = 'gs://YOUR-BUCKET-NAME/YOUR-CLUSTER-ID.YOUR-IMAGE-NAME.UNIX-TIME/gke_search.pkl'
gke_search_restored = download_uri_and_unpickle(gcs_uri)
gke_search.best_score_, gke_search.best_params_, gke_search.best_estimator_
predicted = gke_search.predict(mnist.data[60000:])
print(len([p for i, p in enumerate(predicted) if p == mnist.target[60000:][i]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, let’s create a small corpus of nine short documents [1]_
Step2: This is a tiny corpus of nine documents, each consisting of only a single sentence.
Step3: Your way of processing the documents will likely vary; here, I only split on whitespace
Step4: Here we assigned a unique integer id to all words appearing in the corpus with the
Step5: To actually convert tokenized documents to vectors
Step6: The function
Step7: By now it should be clear that the vector feature with id=10 stands for the question "How many
Step8: The full power of Gensim comes from the fact that a corpus doesn't have to be
Step9: Download the sample mycorpus.txt file here <https
Step10: Corpus is now an object. We didn't define any way to print it, so print just outputs address
Step11: Although the output is the same as for the plain Python list, the corpus is now much
Step12: And that is all there is to it! At least as far as bag-of-words representation is concerned.
Step13: Other formats include Joachim's SVMlight format <http
Step14: Conversely, to load a corpus iterator from a Matrix Market file
Step15: Corpus objects are streams, so typically you won't be able to print them directly
Step16: Instead, to view the contents of a corpus
Step17: or
Step18: The second way is obviously more memory-friendly, but for testing and development
Step19: In this way, gensim can also be used as a memory-efficient I/O format conversion tool
Step20: and from/to scipy.sparse matrices
Step21: What Next
|
<ASSISTANT_TASK:>
Python Code:
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
documents = [
"Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey",
]
from pprint import pprint # pretty-printer
from collections import defaultdict
# remove common words and tokenize
stoplist = set('for a of the and to in'.split())
texts = [
[word for word in document.lower().split() if word not in stoplist]
for document in documents
]
# remove words that appear only once
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [
[token for token in text if frequency[token] > 1]
for text in texts
]
pprint(texts)
from gensim import corpora
dictionary = corpora.Dictionary(texts)
dictionary.save('/tmp/deerwester.dict') # store the dictionary, for future reference
print(dictionary)
print(dictionary.token2id)
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
print(new_vec) # the word "interaction" does not appear in the dictionary and is ignored
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize('/tmp/deerwester.mm', corpus) # store to disk, for later use
print(corpus)
from smart_open import open # for transparently opening remote files
class MyCorpus:
def __iter__(self):
for line in open('https://radimrehurek.com/mycorpus.txt'):
# assume there's one document per line, tokens separated by whitespace
yield dictionary.doc2bow(line.lower().split())
# This flexibility allows you to create your own corpus classes that stream the
# documents directly from disk, network, database, dataframes... The models
# in Gensim are implemented such that they don't require all vectors to reside
# in RAM at once. You can even create the documents on the fly!
corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print(corpus_memory_friendly)
for vector in corpus_memory_friendly: # load one vector into memory at a time
print(vector)
# collect statistics about all tokens
dictionary = corpora.Dictionary(line.lower().split() for line in open('https://radimrehurek.com/mycorpus.txt'))
# remove stop words and words that appear only once
stop_ids = [
dictionary.token2id[stopword]
for stopword in stoplist
if stopword in dictionary.token2id
]
once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.items() if docfreq == 1]
dictionary.filter_tokens(stop_ids + once_ids) # remove stop words and words that appear only once
dictionary.compactify() # remove gaps in id sequence after words that were removed
print(dictionary)
corpus = [[(1, 0.5)], []] # make one document empty, for the heck of it
corpora.MmCorpus.serialize('/tmp/corpus.mm', corpus)
corpora.SvmLightCorpus.serialize('/tmp/corpus.svmlight', corpus)
corpora.BleiCorpus.serialize('/tmp/corpus.lda-c', corpus)
corpora.LowCorpus.serialize('/tmp/corpus.low', corpus)
corpus = corpora.MmCorpus('/tmp/corpus.mm')
print(corpus)
# one way of printing a corpus: load it entirely into memory
print(list(corpus)) # calling list() will convert any sequence to a plain Python list
# another way of doing it: print one document at a time, making use of the streaming interface
for doc in corpus:
print(doc)
corpora.BleiCorpus.serialize('/tmp/corpus.lda-c', corpus)
import gensim
import numpy as np
numpy_matrix = np.random.randint(10, size=[5, 2]) # random matrix as an example
corpus = gensim.matutils.Dense2Corpus(numpy_matrix)
# numpy_matrix = gensim.matutils.corpus2dense(corpus, num_terms=number_of_corpus_features)
import scipy.sparse
scipy_sparse_matrix = scipy.sparse.random(5, 2) # random sparse matrix as example
corpus = gensim.matutils.Sparse2Corpus(scipy_sparse_matrix)
scipy_csc_matrix = gensim.matutils.corpus2csc(corpus)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('run_corpora_and_vector_spaces.png')
imgplot = plt.imshow(img)
_ = plt.axis('off')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Define a function to split the input file into training and testing datasets.
Step6: Define functions to train, evaluate, and save the trained model.
Step8: Define a class for your model, with methods for training and prediction.
Step9: Train an XGBoost model in a notebook
Step10: Set up Kubeflow Fairing for training and predictions on GCP
Step11: Train an XGBoost model remotely on Kubeflow
Step12: Deploy the trained model to Kubeflow for predictions
Step13: Call the prediction endpoint
|
<ASSISTANT_TASK:>
Python Code:
!pip install -r requirements.txt
import argparse
import logging
import joblib
import sys
import pandas as pd
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
logging.basicConfig(format='%(message)s')
logging.getLogger().setLevel(logging.INFO)
def read_input(file_name, test_size=0.25):
Read input data and split it into train and test.
data = pd.read_csv(file_name)
data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = data.SalePrice
X = data.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object'])
train_X, test_X, train_y, test_y = train_test_split(X.values,
y.values,
test_size=test_size,
shuffle=False)
imputer = SimpleImputer()
train_X = imputer.fit_transform(train_X)
test_X = imputer.transform(test_X)
return (train_X, train_y), (test_X, test_y)
def train_model(train_X,
train_y,
test_X,
test_y,
n_estimators,
learning_rate):
Train the model using XGBRegressor.
model = XGBRegressor(n_estimators=n_estimators, learning_rate=learning_rate)
model.fit(train_X,
train_y,
early_stopping_rounds=40,
eval_set=[(test_X, test_y)])
print("Best RMSE on eval: %.2f with %d rounds",
model.best_score,
model.best_iteration+1)
return model
def eval_model(model, test_X, test_y):
Evaluate the model performance.
predictions = model.predict(test_X)
logging.info("mean_absolute_error=%.2f", mean_absolute_error(predictions, test_y))
def save_model(model, model_file):
Save XGBoost model for serving.
joblib.dump(model, model_file)
logging.info("Model export success: %s", model_file)
class HousingServe(object):
def __init__(self):
self.train_input = "ames_dataset/train.csv"
self.n_estimators = 50
self.learning_rate = 0.1
self.model_file = "trained_ames_model.dat"
self.model = None
def train(self):
(train_X, train_y), (test_X, test_y) = read_input(self.train_input)
model = train_model(train_X,
train_y,
test_X,
test_y,
self.n_estimators,
self.learning_rate)
eval_model(model, test_X, test_y)
save_model(model, self.model_file)
def predict(self, X):
Predict using the model for given ndarray.
if not self.model:
self.model = joblib.load(self.model_file)
# Do any preprocessing
prediction = self.model.predict(data=X)
# Do any postprocessing
return [prediction]
HousingServe().train()
import os
from kubeflow import fairing
# Setting up google container repositories (GCR) for storing output containers
# You can use any docker container registry istead of GCR
GCP_PROJECT = fairing.cloud.gcp.guess_project_name()
DOCKER_REGISTRY = 'gcr.io/{}/fairing-job'.format(GCP_PROJECT)
PY_VERSION = ".".join([str(x) for x in sys.version_info[0:3]])
BASE_IMAGE = 'python:{}'.format(PY_VERSION)
from kubeflow import fairing
from kubeflow.fairing.builders.cluster import gcs_context
from kubeflow.fairing.builders.cluster.cluster import ClusterBuilder
from kubeflow.fairing.deployers.tfjob.tfjob import TfJob
from kubeflow.fairing.preprocessors.function import FunctionPreProcessor
preprocessor = FunctionPreProcessor(function_obj=HousingServe, input_files=['ames_dataset/train.csv', "requirements.txt"])
builder = ClusterBuilder(registry=DOCKER_REGISTRY, base_image=BASE_IMAGE, preprocessor=preprocessor,
pod_spec_mutators=[fairing.cloud.gcp.add_gcp_credentials_if_exists],
context_source=gcs_context.GCSContextSource())
builder.build()
pod_spec = builder.generate_pod_spec()
deployer = TfJob(pod_spec_mutators=[fairing.cloud.gcp.add_gcp_credentials_if_exists],
worker_count=1, chief_count=0)
deployer.deploy(pod_spec)
from kubeflow import fairing
from kubeflow.fairing.builders.cluster import gcs_context
from kubeflow.fairing.builders.cluster.cluster import ClusterBuilder
from kubeflow.fairing.deployers.job.job import Job
from kubeflow.fairing.deployers.serving.serving import Serving
from kubeflow.fairing.preprocessors.function import FunctionPreProcessor
preprocessor = FunctionPreProcessor(function_obj=HousingServe, input_files=['trained_ames_model.dat', "requirements.txt"])
builder = ClusterBuilder(registry=DOCKER_REGISTRY, base_image=BASE_IMAGE, preprocessor=preprocessor,
pod_spec_mutators=[fairing.cloud.gcp.add_gcp_credentials_if_exists],
context_source=gcs_context.GCSContextSource())
builder.build()
pod_spec = builder.generate_pod_spec()
deployer = Serving(serving_class="HousingServe")
endpoint = deployer.deploy(pod_spec)
print(endpoint)
!curl REPLACE_WITH_ENDPOINT/predict -H "Content-Type: application/x-www-form-urlencoded" -d 'json={"data":{"tensor":{"shape":[1,37],"values":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37]}}}'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's see what dataset looks like
Step2: One of the missions of deepchem is to form a synapse between the chemical and the algorithmic worlds
Step3: Now that we're oriented, let's use ML to do some chemistry.
Step4: Note how we separate our featurizers into those that featurize individual chemical compounds, compound_featurizers, and those that featurize molecular complexes, complex_featurizers.
Step5: Now, we conduct a train-test split. If you'd like, you can choose splittype="scaffold" instead to perform a train-test split based on Bemis-Murcko scaffolds.
Step6: We generate separate instances of the Dataset() object to hermetically seal the train dataset from the test dataset. This style lends itself easily to validation-set type hyperparameter searches, which we will illustate in a separate section of this tutorial.
Step7: The performance of many ML algorithms hinges greatly on careful data preprocessing. Deepchem comes standard with a few options for such preprocessing.
Step8: Now, we're ready to do some learning! To set up a model, we will need
Step9: In this simple example, in few yet intuitive lines of code, we traced the machine learning arc from featurizing a raw dataset to fitting and evaluating a model.
Step10: The protein-ligand complex view.
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%pdb off
# set DISPLAY = True when running tutorial
DISPLAY = False
# set PARALLELIZE to true if you want to use ipyparallel
PARALLELIZE = False
import warnings
warnings.filterwarnings('ignore')
dataset_file= "../datasets/pdbbind_core_df.pkl.gz"
from deepchem.utils.save import load_from_disk
dataset = load_from_disk(dataset_file)
print("Type of dataset is: %s" % str(type(dataset)))
print(dataset[:5])
print("Shape of dataset is: %s" % str(dataset.shape))
import nglview
import tempfile
import os
import mdtraj as md
import numpy as np
import deepchem.utils.visualization
from deepchem.utils.visualization import combine_mdtraj, visualize_complex, convert_lines_to_mdtraj
first_protein, first_ligand = dataset.iloc[0]["protein_pdb"], dataset.iloc[0]["ligand_pdb"]
protein_mdtraj = convert_lines_to_mdtraj(first_protein)
ligand_mdtraj = convert_lines_to_mdtraj(first_ligand)
complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj)
if DISPLAY:
ngltraj = visualize_complex(complex_mdtraj)
ngltraj
from deepchem.featurizers.fingerprints import CircularFingerprint
from deepchem.featurizers.basic import RDKitDescriptors
from deepchem.featurizers.nnscore import NNScoreComplexFeaturizer
from deepchem.featurizers.grid_featurizer import GridFeaturizer
grid_featurizer = GridFeaturizer(voxel_width=16.0, feature_types="voxel_combined", voxel_feature_types=["ecfp",
"splif", "hbond", "pi_stack", "cation_pi", "salt_bridge"], ecfp_power=5, splif_power=5,
parallel=True, flatten=True)
compound_featurizers = [CircularFingerprint(size=128)]
# TODO(rbharath, enf): The grid featurizer breaks. Need to debug before code release
complex_featurizers = []
#complex_featurizers = [grid_featurizer]
#Make a directory in which to store the featurized complexes.
import tempfile, shutil
base_dir = "./tutorial_output"
if not os.path.exists(base_dir):
os.makedirs(base_dir)
data_dir = os.path.join(base_dir, "data")
if not os.path.exists(data_dir):
os.makedirs(data_dir)
featurized_samples_file = os.path.join(data_dir, "featurized_samples.joblib")
feature_dir = os.path.join(base_dir, "features")
if not os.path.exists(feature_dir):
os.makedirs(feature_dir)
samples_dir = os.path.join(base_dir, "samples")
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
train_dir = os.path.join(base_dir, "train")
if not os.path.exists(train_dir):
os.makedirs(train_dir)
valid_dir = os.path.join(base_dir, "valid")
if not os.path.exists(valid_dir):
os.makedirs(valid_dir)
test_dir = os.path.join(base_dir, "test")
if not os.path.exists(test_dir):
os.makedirs(test_dir)
model_dir = os.path.join(base_dir, "model")
if not os.path.exists(model_dir):
os.makedirs(model_dir)
import deepchem.featurizers.featurize
from deepchem.featurizers.featurize import DataFeaturizer
featurizers = compound_featurizers + complex_featurizers
featurizer = DataFeaturizer(tasks=["label"],
smiles_field="smiles",
protein_pdb_field="protein_pdb",
ligand_pdb_field="ligand_pdb",
compound_featurizers=compound_featurizers,
complex_featurizers=complex_featurizers,
id_field="complex_id",
verbose=False)
if PARALLELIZE:
from ipyparallel import Client
c = Client()
dview = c[:]
else:
dview = None
featurized_samples = featurizer.featurize(dataset_file, feature_dir, samples_dir,
worker_pool=dview, shard_size=32)
from deepchem.utils.save import save_to_disk, load_from_disk
save_to_disk(featurized_samples, featurized_samples_file)
featurized_samples = load_from_disk(featurized_samples_file)
splittype = "random"
train_samples, test_samples = featurized_samples.train_test_split(
splittype, train_dir, test_dir, seed=2016)
from deepchem.utils.dataset import Dataset
train_dataset = Dataset(data_dir=train_dir, samples=train_samples,
featurizers=compound_featurizers, tasks=["label"])
test_dataset = Dataset(data_dir=test_dir, samples=test_samples,
featurizers=compound_featurizers, tasks=["label"])
from deepchem.transformers import NormalizationTransformer
from deepchem.transformers import ClippingTransformer
input_transformers = [NormalizationTransformer(transform_X=True, dataset=train_dataset),
ClippingTransformer(transform_X=True, dataset=train_dataset)]
output_transformers = [NormalizationTransformer(transform_y=True, dataset=train_dataset)]
transformers = input_transformers + output_transformers
for transformer in transformers:
transformer.transform(train_dataset)
for transformer in transformers:
transformer.transform(test_dataset)
from sklearn.ensemble import RandomForestRegressor
from deepchem.models.standard import SklearnModel
task_types = {"label": "regression"}
model_params = {"data_shape": train_dataset.get_data_shape()}
model = SklearnModel(task_types, model_params, model_instance=RandomForestRegressor())
model.fit(train_dataset)
model_dir = tempfile.mkdtemp()
model.save(model_dir)
from deepchem.utils.evaluate import Evaluator
import pandas as pd
evaluator = Evaluator(model, train_dataset, output_transformers, verbose=True)
with tempfile.NamedTemporaryFile() as train_csv_out:
with tempfile.NamedTemporaryFile() as train_stats_out:
_, train_r2score = evaluator.compute_model_performance(
train_csv_out, train_stats_out)
evaluator = Evaluator(model, test_dataset, output_transformers, verbose=True)
test_csv_out = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as test_stats_out:
_, test_r2score = evaluator.compute_model_performance(
test_csv_out, test_stats_out)
print test_csv_out.name
train_test_performance = pd.concat([train_r2score, test_r2score])
train_test_performance["split"] = ["train", "test"]
train_test_performance
predictions = pd.read_csv(test_csv_out.name)
predictions = predictions.sort(['label'], ascending=[0])
from deepchem.utils.visualization import visualize_ligand
top_ligand = predictions.iloc[0]['ids']
ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==top_ligand]['ligand_pdb'].values[0])
if DISPLAY:
ngltraj = visualize_ligand(ligand1)
ngltraj
worst_ligand = predictions.iloc[predictions.shape[0]-2]['ids']
ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==worst_ligand]['ligand_pdb'].values[0])
if DISPLAY:
ngltraj = visualize_ligand(ligand1)
ngltraj
import deepchem.models.standard
from deepchem.models.standard import SklearnModel
from deepchem.utils.dataset import Dataset
from deepchem.utils.evaluate import Evaluator
from deepchem.hyperparameters import HyperparamOpt
train_dir, validation_dir, test_dir = tempfile.mkdtemp(), tempfile.mkdtemp(), tempfile.mkdtemp()
splittype="random"
train_samples, validation_samples, test_samples = featurized_samples.train_valid_test_split(
splittype, train_dir, validation_dir, test_dir, seed=2016)
task_types = {"label": "regression"}
performance = pd.DataFrame()
def model_builder(task_types, params_dict, verbosity):
n_estimators = params_dict["n_estimators"]
return SklearnModel(
task_types, params_dict,
model_instance=RandomForestRegressor(n_estimators=n_estimators))
params_dict = {
"n_estimators": [10, 20, 40, 80, 160],
"data_shape": [train_dataset.get_data_shape()],
}
optimizer = HyperparamOpt(model_builder, {"pIC50": "regression"})
for feature_type in (complex_featurizers + compound_featurizers):
train_dataset = Dataset(data_dir=train_dir, samples=train_samples,
featurizers=[feature_type], tasks=["label"])
validation_dataset = Dataset(data_dir=validation_dir, samples=validation_samples,
featurizers=[feature_type], tasks=["label"])
for transformer in transformers:
transformer.transform(train_dataset)
for transformer in transformers:
transformer.transform(test_dataset)
best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search(
params_dict, train_dataset, test_dataset, output_transformers, metric="r2_score")
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# TODO(rbharath, enf): Need to fix this to work with new hyperparam-opt framework.
#df = pd.DataFrame(performance[['r2_score','split','featurizer']].values, index=performance['n_trees'].values, columns=['r2_score', 'split', 'featurizer'])
#df = df.loc[df['split']=="validation"]
#df = df.drop('split', 1)
#fingerprint_df = df[df['featurizer'].str.contains('fingerprint')].drop('featurizer', 1)
#print fingerprint_df
#fingerprint_df.columns = ['ligand fingerprints']
#grid_df = df[df['featurizer'].str.contains('grid')].drop('featurizer', 1)
#grid_df.columns = ['complex features']
#df = pd.concat([fingerprint_df, grid_df], axis=1)
#print(df)
#plt.clf()
#df.plot()
#plt.ylabel("$R^2$")
#plt.xlabel("Number of trees")
train_dir, validation_dir, test_dir = tempfile.mkdtemp(), tempfile.mkdtemp(), tempfile.mkdtemp()
splittype="random"
train_samples, validation_samples, test_samples = featurized_samples.train_valid_test_split(
splittype, train_dir, validation_dir, test_dir, seed=2016)
feature_type = complex_featurizers
train_dataset = Dataset(data_dir=train_dir, samples=train_samples,
featurizers=feature_type, tasks=["label"])
validation_dataset = Dataset(data_dir=validation_dir, samples=validation_samples,
featurizers=feature_type, tasks=["label"])
test_dataset = Dataset(data_dir=test_dir, samples=test_samples,
featurizers=feature_type, tasks=["label"])
for transformer in transformers:
transformer.transform(train_dataset)
for transformer in transformers:
transformer.transform(valid_dataset)
for transformer in transformers:
transformer.transform(test_dataset)
model_params = {"data_shape": train_dataset.get_data_shape()}
rf_model = SklearnModel(task_types, model_params, model_instance=RandomForestRegressor(n_estimators=20))
rf_model.fit(train_dataset)
model_dir = tempfile.mkdtemp()
rf_model.save(model_dir)
evaluator = Evaluator(rf_model, train_dataset, output_transformers, verbose=True)
with tempfile.NamedTemporaryFile() as train_csv_out:
with tempfile.NamedTemporaryFile() as train_stats_out:
_, train_r2score = evaluator.compute_model_performance(
train_csv_out, train_stats_out)
evaluator = Evaluator(rf_model, test_dataset, output_transformers, verbose=True)
test_csv_out = tempfile.NamedTemporaryFile()
with tempfile.NamedTemporaryFile() as test_stats_out:
predictions, test_r2score = evaluator.compute_model_performance(
test_csv_out, test_stats_out)
train_test_performance = pd.concat([train_r2score, test_r2score])
train_test_performance["split"] = ["train", "test"]
train_test_performance["featurizer"] = [str(feature_type.__class__), str(feature_type.__class__)]
train_test_performance["n_trees"] = [n_trees, n_trees]
print(train_test_performance)
import deepchem.models.deep
from deepchem.models.deep import SingleTaskDNN
import numpy.random
from operator import mul
import itertools
params_dict = {"activation": ["relu"],
"momentum": [.9],
"batch_size": [50],
"init": ["glorot_uniform"],
"data_shape": [train_dataset.get_data_shape()],
"learning_rate": np.power(10., np.random.uniform(-5, -2, size=5)),
"decay": np.power(10., np.random.uniform(-6, -4, size=5)),
"nb_hidden": [1000],
"nb_epoch": [40],
"nesterov": [False],
"dropout": [.5],
"nb_layers": [1],
"batchnorm": [False],
}
optimizer = HyperparamOpt(SingleTaskDNN, task_types)
best_dnn, best_hyperparams, all_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, output_transformers, metric="r2_score", verbosity=None)
dnn_test_csv_out = tempfile.NamedTemporaryFile()
dnn_test_stats_out = tempfile.NamedTemporaryFile()
dnn_test_evaluator = Evaluator(best_dnn, test_dataset)
dnn_test_df, dnn_test_r2score = dnn_test_evaluator.compute_model_performance(
dnn_test_csv_out, dnn_test_stats_out)
dnn_test_r2_score = dnn_test_r2score.iloc[0]["r2_score"]
print("DNN Test set R^2 %f" % (dnn_test_r2_score))
task = "label"
dnn_predicted_test = np.array(dnn_test_df[task + "_pred"])
dnn_true_test = np.array(dnn_test_df[task])
plt.clf()
plt.scatter(dnn_true_test, dnn_predicted_test)
plt.xlabel('Predicted Ki')
plt.ylabel('True Ki')
plt.title(r'DNN predicted vs. true Ki')
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.plot([-3, 3], [-3, 3], marker=".", color='k')
rf_test_csv_out = tempfile.NamedTemporaryFile()
rf_test_stats_out = tempfile.NamedTemporaryFile()
rf_test_evaluator = Evaluator(rf_model, test_dataset)
rf_test_df, rf_test_r2score = rf_test_evaluator.compute_model_performance(
rf_test_csv_out, rf_test_stats_out)
rf_test_r2_score = rf_test_r2score.iloc[0]["r2_score"]
print("RF Test set R^2 %f" % (rf_test_r2_score))
plt.show()
task = "label"
rf_predicted_test = np.array(rf_test_df[task + "_pred"])
rf_true_test = np.array(rf_test_df[task])
plt.scatter(rf_true_test, rf_predicted_test)
plt.xlabel('Predicted Ki')
plt.ylabel('True Ki')
plt.title(r'RF predicted vs. true Ki')
plt.xlim([-2, 2])
plt.ylim([-2, 2])
plt.plot([-3, 3], [-3, 3], marker=".", color='k')
plt.show()
predictions = dnn_test_df.sort(['label'], ascending=[0])
top_complex = predictions.iloc[0]['ids']
best_complex = dataset.loc[dataset['complex_id']==top_complex]
protein_mdtraj = convert_lines_to_mdtraj(best_complex["protein_pdb"].values[0])
ligand_mdtraj = convert_lines_to_mdtraj(best_complex["ligand_pdb"].values[0])
complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj)
if DISPLAY:
ngltraj = visualize_complex(complex_mdtraj)
ngltraj
top_complex = predictions.iloc[1]['ids']
best_complex = dataset.loc[dataset['complex_id']==top_complex]
protein_mdtraj = convert_lines_to_mdtraj(best_complex["protein_pdb"].values[0])
ligand_mdtraj = convert_lines_to_mdtraj(best_complex["ligand_pdb"].values[0])
complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj)
if DISPLAY:
ngltraj = visualize_complex(complex_mdtraj)
ngltraj
top_complex = predictions.iloc[predictions.shape[0]-1]['ids']
best_complex = dataset.loc[dataset['complex_id']==top_complex]
protein_mdtraj = convert_lines_to_mdtraj(best_complex["protein_pdb"].values[0])
ligand_mdtraj = convert_lines_to_mdtraj(best_complex["ligand_pdb"].values[0])
complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj)
if DISPLAY:
ngltraj = visualize_complex(complex_mdtraj)
ngltraj
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Case1
Step2: Case 2
Step3: Case 3
Step4: Case 4
|
<ASSISTANT_TASK:>
Python Code:
# Definitions of parameters of the circuit
# Capacitance of generator [F]
C = 1e-6
# Parallel resistance (discharging the capacitor in the generator forming the tail of the impulse) [Ohm]
R1 = 4
# Series resistance (forming the head) [Ohm]
R2 = 150
# Inductance of the loop [H]
L = 1e-3
# Capacitance of the test object [F]
Co = 1e-6
# defining the time of the analysis
import numpy as np
t = np.linspace(0,6e-4, 1000)
# Case 1 two capacitors and the resistor
def dx1(y,t):
uco = y[0]
duco = 1/(R2*Co)*(Uo - uco - Co/C*uco)
return [duco]
# initial conditions
Uo = 200
y0 = [0.0]
# time constant
Cz = C*Co/(C+Co)
R2*Cz
from scipy.integrate import odeint, ode, romb, cumtrapz
X1 = odeint(dx1, y0, t)
uco = X1[:,0]
# the current and the other capacitor voltage
i = 1/(R2)*(Uo - uco - Co/C*uco)
uc = Uo - Co/C*uco
# current starting value
Uo/R2
# discharge voltage
ud = C*Uo/(C+Co)
Uo/R2, ud
from bokeh.plotting import figure, output_notebook, show
from bokeh.layouts import column
from bokeh.palettes import RdYlGn4
output_notebook()
p = figure(plot_width=800, plot_height=400)
p.line(t, uco, color="firebrick", legend = "cap charging")
p.line(t, uc, legend = "cap discharging")
p.xaxis.axis_label = "time [s]"
p.xaxis.axis_label_text_font_size = "10pt"
p.yaxis.axis_label = "voltage [V]"
p.yaxis.axis_label_text_font_size = "10pt"
p2 = figure(plot_width=800, plot_height=400)
p2.line(t, i, color='#1a9641')
p2.xaxis.axis_label = "time [s]"
p2.xaxis.axis_label_text_font_size = "10pt"
p2.yaxis.axis_label = "current [A]"
p2.yaxis.axis_label_text_font_size = "10pt"
show(column(p, p2))
# Adjustment of parameters for case 2
L = 1e-2 # interesting values for L: 1e-3 (very little influence); 5e-3 (small oscillation); 1e-2 bigger oscillation
t2 = np.linspace(0,10e-4,1000)
# Case 2: two capacitors, resistor and inductor
def dx2(y,t):
i, uco = y[0], y[1]
di = 1/L*(Uo - Co/C*uco - i*R2 - uco)
duco = 1/Co*i
return [di, duco]
# initial conditions
Uo = 200
y02 = [0.0, 0.0]
X2 = odeint(dx2, y02, t2)
i2 = X2[:,0]
uco2 = X2[:,1]
uc2 = Uo - Co/C*uco2
p3 = figure(plot_width=800, plot_height=400)
p3.line(t2, uco2, color="firebrick", legend = "cap charging")
p3.line(t2, uc2, legend = "cap discharging")
p3.xaxis.axis_label = "time [s]"
p3.xaxis.axis_label_text_font_size = "10pt"
p3.yaxis.axis_label = "voltage [V]"
p3.yaxis.axis_label_text_font_size = "10pt"
p4 = figure(plot_width=800, plot_height=400)
p4.line(t2, i2, color='#1a9641')
p4.xaxis.axis_label = "time [s]"
p4.xaxis.axis_label_text_font_size = "10pt"
p4.yaxis.axis_label = "current [A]"
p4.yaxis.axis_label_text_font_size = "10pt"
show(column(p3, p4))
# Adjustment of parameters for case 3
L = 1e-3 # interesting values for L: 1e-3 (very little influence); 5e-3 (small oscillation); 1e-2 bigger oscillation
t3 = np.linspace(0,10e-4,1000)
# Case 3: circuit for the impulse withstand tester
def dx(y,t):
#state vector variables
i2, uco, uc = y[0], y[1], y[2]
di2 = 1/L*(uc-i2*R2-uco)
duco = 1/Co*i2
duc = -1/C*(uc/R1+i2)
return [di2, duco, duc]
# initial conditions
# capacitor voltage [V]
Uo = 200
# vector of the initial conditions
y0 = [0.0, 0.0, Uo]
#from scipy.integrate import odeint, ode, romb, cumtrapz
X = odeint(dx, y0, t3)
i2 = X[:,0]
uco = X[:,1]
uc = X[:,2]
#output_notebook()
p5 = figure(plot_width=800, plot_height=400)
p5.line(t3, uco, color="firebrick", legend ="Object")
p5.xaxis.axis_label = "time [s]"
p5.xaxis.axis_label_text_font_size = "10pt"
p5.yaxis.axis_label = "voltage [V]"
p5.yaxis.axis_label_text_font_size = "10pt"
p7 = figure(plot_width=800, plot_height=400)
p7.line(t3, uc, legend = "Main capacitor")
p7.xaxis.axis_label = "time [s]"
p7.xaxis.axis_label_text_font_size = "10pt"
p7.yaxis.axis_label = "voltage [V]"
p7.yaxis.axis_label_text_font_size = "10pt"
p6 = figure(plot_width=800, plot_height=400)
p6.line(t3, i2, color='#1a9641')
p6.xaxis.axis_label = "time [s]"
p6.xaxis.axis_label_text_font_size = "10pt"
p6.yaxis.axis_label = "current [A]"
p6.yaxis.axis_label_text_font_size = "10pt"
show(column(p5, p7, p6))
# Capacitor discharging to a resistor
def dk(y,t):
u = y[0]
du = -1/(R1*C)*u
return [du]
y03 = [200.0]
X5 = odeint(dk, y03, t3)
u = X5[:,0]
p7 = figure(plot_width=800, plot_height=400, x_range = (0,4e-5))
p7.line(t3, u, color="firebrick")
p7.xaxis.axis_label = "time [s]"
p7.xaxis.axis_label_text_font_size = "10pt"
p7.yaxis.axis_label = "voltage [V]"
p7.yaxis.axis_label_text_font_size = "10pt"
show(p7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add some parameters in order to generate a dataset
Step3: An initial method to semi supervised learning
Step4: Create the labled dataset, and with 1% used lables and the rest is set to NAN.
Step5: The dataset with lables and available lables plotted
Step6: The initial try at classifying the data, using logistic regression
Step7: Lets take a look at the semi supervised approach
|
<ASSISTANT_TASK:>
Python Code:
%run -i initilization.py
from pyspark.sql import functions as F
from pyspark.ml import clustering
from pyspark.ml import feature
from pyspark.sql import DataFrame
from pyspark.sql import Window
from pyspark.ml import Pipeline
from pyspark.ml import classification
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.tuning import ParamGridBuilder, CrossValidator
from shared import Plot2DGraphs, create_dummy_data
from semisupervised import batch_generative_model
mean_1 = [3.0, 3.0]
std_1 = [2, 2]
mean_2 = [-3.0, -3.0]
std_2 = [1. , 1.0]
n_1 = 300
n_2 = 300
n = [n_1, n_2]
mean = [mean_1, mean_2]
std = [std_1, std_2]
def compute_error_rate(data_frame, truth_label='real_label', found_label='prediction'):
df_stats = (data_frame
.groupBy([truth_label, found_label])
.agg(F.count('prediction').alias('Prediction Count'))
)
n = (df_stats
.select(F.sum(F.col('Prediction Count')).alias('n'))
.collect()[0]['n']
)
wrong_guess = (df_stats
.filter((F.col(truth_label) != F.col(found_label)))
.select(F.sum(F.col('Prediction Count')).alias('errors'))
.collect()[0]['errors']
)
df_stats.show()
print(n)
print(wrong_guess)
print('Error-rate: {}'.format(wrong_guess/n))
tester = create_dummy_data.create_labeled_data_with_clusters(n, mean, std, 0.01)
df_tester = spark.createDataFrame(tester)
Plot2DGraphs.plot_known_and_unknown_data(tester)
df_train = df_tester.filter((F.col('used_label') != np.NaN))
df_test = df_tester.filter((F.col('used_label') == np.NaN))
vec_assembler = feature.VectorAssembler(
inputCols=['x','y'],
outputCol='features')
lg = classification.LogisticRegression(
featuresCol=vec_assembler.getOutputCol(),
labelCol='used_label')
pipeline = Pipeline(stages=[vec_assembler, lg])
# CrossValidation gets build here!
param_grid = (ParamGridBuilder()
.addGrid(lg.regParam, [0.1, 0.01])
.build()
)
evaluator = BinaryClassificationEvaluator(
rawPredictionCol=lg.getRawPredictionCol(),
labelCol=lg.getLabelCol())
cross_validator = CrossValidator(
estimator=pipeline,
estimatorParamMaps=param_grid,
evaluator=evaluator,
numFolds=3)
cross_validator_model = cross_validator.fit(df_train)
df_without_semisupervised = cross_validator_model.transform(df_test)
Plot2DGraphs.plot_known_and_unknown_data(
df_without_semisupervised.toPandas(),
labelCol='prediction')
compute_error_rate(df_without_semisupervised)
df_output = batch_generative_model.semi_supervised_batch_single_classifier_generate_approach(df_tester,['x','y'])
df_output.limit(5).toPandas()
compute_error_rate(df_output)
Plot2DGraphs.plot_known_and_unknown_data(df_output.toPandas(), labelCol='prediction')
df = spark.read.parquet('/home/svanhmic/workspace/data/DABAI/sparkdata/parquet/double_helix.parquet/')
df.write.csv('/home/svanhmic/workspace/data/DABAI/sparkdata/csv/double_helix.csv/')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DB2 Extensions
Step2: <a id='top'></a>
Step3: If you connected to the SAMPLE database, you will have the EMPLOYEE and DEPARTMENT tables available to you. However, if you are connecting to a different database, you will need to execute the next command to populate the tables for you. Note, if you run this command and the two tables already exist, the tables will not be replaced. So don't worry if you execute this command by mistake.
Step4: Requesting data from DB2 using the standard %sql (ibm_db) interface is relatively straight-forward. We just need to place the SQL in the command and execute it to get the results.
Step5: Now that we have a working DB2 connection, we will need to set up an OData service to talk to DB2.
Step6: Back to Top
Step7: Back to Top
Step8: The results will show the URL service command used (http
Step9: One drawback of OData is that we don't get the actual error text returned. We know that the error code is, but the message isn't that descriptive. Using the %sql (DB2) command, we can find out that the table doesn't exist.
Step10: Back to Top
Step11: To limit the results from a OData request, you must add the \$top=x modifier at the end of the service request. The format then becomes
Step12: The last example illustrates two additional features of the %odata command. First, you can span statements over multiple lines by using the backslash character ('\'). You could also use the %%odata command to do this without backslashes, but it unfortunately will not allow for variable substitution. The current settings being used by OData can be found by issuing the SETTINGS command.
Step13: You can also refer to these values by using the settings['name'] variable. So the DROP statement just took the current DATABASE and SCHEMA settings and deleted the definition for the EMPLOYEE table. You could have done this directly with
Step14: And this command will show the connection service being created for us.
Step15: Back to Top
Step16: You can use this URL to directly access the results through a browser, or any application that can read the results returned by the OData gateway. The print statement below will display the URL as an active link. Click on that to see the results in another browser window.
Step17: When a URL is generated, we need to append the \$format=json tag at the end to tell the OData service and the browser how to handle the results. When we run OData and RESTful calls from a programming language (like Python), we are able to send information in the header which tells the API how to handle the results and parameters. All of the RESTful calls to the OData gateway use the following header information
Step18: Back to Top
Step19: You will notice that not all of the rows have been displayed. The output has been limited to 10 lines. 5 lines from the start of the answer set and 5 lines from the bottom of the answer set are displayed. If you want to change the maximum number of rows to be displayed, use the MAXROWS setting.
Step20: If you want an unlimited number of rows returned, set maxrows to -1.
Step21: It is better to limit the results from the answer set by using the LIMIT clause in the SELECT statement. LIMIT will force DB2 to stop retrieving rows after "x" number have been read, while the MAXROWS setting will retrieve all rows and then only display a portion of them. The one advantage of MAXROWS is that you see the bottom 5 rows while you would only be able to do that with DB2 if you could reverse sort the output. The current OData implementation does not have the ability to $orderby, so sorting to reverse the output is not possible.
Step22: Example
Step23: Back to Top
Step24: The COUNT(*) function is available as part of a SELECT list and it cannot include any other column names. If you do include other column names they will be ignored.
Step25: One of the unusual behaviors of the COUNT(*) function is that will actually return the entire answer set under the covers. The %odata command strips the count out from the results and doesn't display the rows returned. That is probably not would you expect from this syntax! The COUNT function is better described as the count of physical rows returned. Here is the same example with 5 rows returned and the JSON records.
Step26: One of the recommendations would be not to use the COUNT(*) function to determine the amount of rows that will be retrieved, especially if you expect there to a large of number rows. To minimize the data returned, you can use the form COUNT(column) which will modify the OData request to return the count and ONLY that column in the result set. This is a compromise in terms of the amount of data returned. This example using the -r (raw) flag which results in all of the JSON headers and data to be displayed. The JSON flag (-j) will not display any records.
Step27: Back to Top
Step28: This actually can cause some issues if you try to reuse the connection information that was created with the UNKNOWN_TBL. Since the service could not determine the structure of the table, the service will not return any column information with a select statement. The next SQL statement will create the UNKNOWN_TBL.
Step29: Retrying the SELECT statement will result in 43 rows with no columns returned!
Step30: To correct this situation, you need to DROP the connect that the %odata program is using and reissue the SELECT statement.
Step31: Now you can try the SQL statement again.
Step32: Back to Top
Step33: The datatypes are not the same as what one expect from a relational database. You get generic information on the character columns (String), and the numbers (Int16, Decimal). The Decimal specification actually contains the number of digits and decimal places but that isn't returned when using the table display.
Step34: Example
Step35: Example
Step36: Example
Step37: Example
Step38: Example
Step39: Converting to OData will mean that the search will look across the entire string, not just the beginning.
Step40: Back to Top
Step41: Back to Top
Step42: We also need to remove the connection information from the system in the event we've run this example before.
Step43: A couple of things about the table design. The salary is NOT NULL, while the BONUS allows for nulls. Unfortunately, the DESCRIBE command only tells us about the columns in the table and their OData data type, and no indication of whether table.
Step44: The initial INSERT will populate the table with valid data. The echo option will show the json document that is sent via the POST command to OData to insert the row.
Step45: Just to make sure things were inserted properly, we retrieve the contents of the table.
Step46: OData (and DB2) will return an error message about our missing SALARY column which requires a value.
Step47: We can try this on the DB2 side as well to get the details of the error.
Step48: Back to Top
Step49: A primary key is required to issue a DELETE command. You also need to make sure that the primary key column does not contain NULLs because a primary key must always contain a value. The following SQL tries to fix the primary key issue.
Step50: Check to see if we can delete the row yet.
Step51: Adding a primary key after the fact won't help because the service URL would have already recorded the information about the table (and the fact it didn't have a primary key at the time). We need to drop our SERVICE URL and generate another one.
Step52: We do a describe on the table and this will force another service URL to be generated for us.
Step53: Trying the DELETE this time will work.
Step54: Deleting the record again still gives you a successful return code. The call always returns a successful status even if the record doesn't exist.
Step55: Back to Top
Step56: At this point we can update their salary.
Step57: We doublecheck the results to make sure we got it right!
Step58: Back to Top
Step59: We also need to drop any service connection you may have created in the past with this table name.
Step60: Now that we have created the view, we can retrieve rows from it just like a standard table.
Step61: You can also create sophisticated VIEWS that can take parameters to adjust the results returned. For instance, consider the following SQL statement which gives me count of employees that work in SYSTEMS departments.
Step62: There are two departments with the name SYSTEMS in them, but there is no easy way to create a view for every possible combination of searches that you may want. Instead what we do is create a table that contains the pattern we want to look for and create the view so that it references this table.
Step63: Now we create a view that access this PATTERN table to do the actual search. Note that values that are inserted into the PATTERN table must have the SQL special characters like % to make sure patterns can be anywhere in the string.
Step64: In order for our view to work properly, we must populate our PATTERN table with a value. To test the view we will use %SYSTEMS% as our first example.
Step65: And now we can test our view by selecting from it.
Step66: Now that we have it working, we can try exactly the same thing but with OData. Our first transaction will update the search key to SERVICE.
Step67: The next OData statement should select the count of employees working in service departments.
|
<ASSISTANT_TASK:>
Python Code:
%run db2odata.ipynb
%run db2.ipynb
%sql connect reset
%sql connect
%sql -sampledata
%sql SELECT * FROM EMPLOYEE
%odata register
%odata RESET TABLE EMPLOYEE
s = %odata -e SELECT lastname, salary from employee where salary > 50000
s = %odata -e SELECT * FROM EMPLOYEE
%odata select * from unknown_table
%sql select * from unknown_table
s = %odata -e -j SELECT * FROM EMPLOYEE LIMIT 1
%odata \
RESET \
DATABASE {odata_settings['database']} \
SCHEMA {odata_settings['schema']} \
TABLE EMPLOYEE
%odata settings
%odata set DATABASE {odata_settings['database']} SCHEMA {odata_settings['schema']}
u = %odata -e select * from employee limit 1
url = %odata -e select * from employee limit 1
print(url)
%odata delete
s = %odata -e SELECT * FROM EMPLOYEE
%odata set maxrows 10
%odata set maxrows -1
%odata select * from employee
%odata set maxrows 10
s = %odata -e SELECT * FROM EMPLOYEE LIMIT 5
s = %odata -e SELECT FIRSTNME, LASTNAME FROM EMPLOYEE LIMIT 5
s = %odata -e SELECT COUNT(*) FROM EMPLOYEE LIMIT 1
s = %odata -e -r SELECT COUNT(*) FROM EMPLOYEE LIMIT 5
s = %odata -e -r SELECT COUNT(EMPNO) FROM EMPLOYEE LIMIT 5
%sql -q DROP TABLE UNKNOWN_TBL
%odata RESET TABLE UNKNOWN_TBL
s = %odata -e SELECT * FROM UNKNOWN_TBL
%sql CREATE TABLE UNKNOWN_TBL AS (SELECT * FROM EMPLOYEE) WITH DATA
s = %odata -e SELECT * FROM UNKNOWN_TBL
%odata RESET TABLE UNKNOWN_TBL
s = %odata -e SELECT * FROM UNKNOWN_TBL
%odata DESCRIBE EMPLOYEE
s = %odata -e SELECT EMPNO, WORKDEPT, SALARY FROM EMPLOYEE WHERE SALARY < 40000
s = %odata -e SELECT EMPNO, WORKDEPT, SALARY FROM EMPLOYEE WHERE SALARY < 40000 AND WORKDEPT = 'E21'
s = %odata -e \
SELECT EMPNO, WORKDEPT, SALARY \
FROM EMPLOYEE \
WHERE SALARY < 40000 AND WORKDEPT = 'E21' OR WORKDEPT = 'E11'
s = %odata -e \
SELECT EMPNO, WORKDEPT, SALARY \
FROM EMPLOYEE \
WHERE SALARY < 40000 AND (WORKDEPT = 'E21' OR WORKDEPT = 'E11')
s = %odata -e SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%AA%'
%sql SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%ON'
s = %odata -e SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%ON'
s = %odata -e SELECT * FROM EMPLOYEE LIMIT 5
%%sql -q
DROP TABLE TESTODATA;
CREATE TABLE TESTODATA
(
EMPNO INT NOT NULL,
LASTNAME VARCHAR(10) NOT NULL,
SALARY INT NOT NULL,
BONUS INT
);
%sql select * from testodata
%odata -e select * from testodata
%odata RESET TABLE TESTODATA
%odata -e DESCRIBE TESTODATA
%odata -e INSERT INTO TESTODATA(EMPNO, LASTNAME, SALARY, BONUS) VALUES (1,'Fred',10000,1000)
%odata SELECT * FROM TESTODATA
%odata -e INSERT INTO TESTODATA(EMPNO, LASTNAME, BONUS) VALUES (2,'Wilma',50000)
%sql INSERT INTO TESTODATA(EMPNO, LASTNAME, BONUS) VALUES (2,'Wilma',50000)
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%sql ALTER TABLE TESTODATA ADD CONSTRAINT PKTD PRIMARY KEY (EMPNO)
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%odata RESET TABLE TESTODATA
%odata DESCRIBE TESTODATA
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%odata -e DELETE FROM TESTODATA WHERE EMPNO=2
%odata -e \
INSERT INTO TESTODATA(EMPNO, LASTNAME, SALARY, BONUS) \
VALUES (1,'Fred',10000,1000)
%odata -e UPDATE TESTODATA SET BONUS=2000 WHERE EMPNO=1
%odata SELECT * FROM TESTODATA
%%sql
CREATE OR REPLACE VIEW EMPDEPT AS
(
SELECT LASTNAME, DEPTNAME
FROM EMPLOYEE E, DEPARTMENT D
WHERE E.WORKDEPT = D.DEPTNO
)
%odata RESET TABLE EMPDEPT
%odata SELECT LASTNAME, DEPTNAME FROM EMPDEPT LIMIT 5
%%sql
SELECT
COUNT(*)
FROM
EMPLOYEE E, DEPARTMENT D
WHERE
E.WORKDEPT = D.DEPTNO
AND D.DEPTNAME LIKE '%SYSTEMS%'
%%sql -q
DROP TABLE PATTERN;
CREATE TABLE PATTERN
(
PATTERN_NUMBER INT NOT NULL PRIMARY KEY,
SEARCH VARCHAR(16)
);
%odata RESET TABLE EMPDEPT
%odata RESET TABLE PATTERN
%%sql
CREATE OR REPLACE VIEW EMPDEPT AS
(
SELECT
COUNT(*) AS COUNT
FROM
EMPLOYEE E, DEPARTMENT D
WHERE
E.WORKDEPT = D.DEPTNO
AND D.DEPTNAME LIKE
(
SELECT SEARCH FROM PATTERN WHERE PATTERN_NUMBER=1
)
);
%sql INSERT INTO PATTERN VALUES(1,'%SYSTEMS%')
%sql SELECT * FROM EMPDEPT
%odata UPDATE PATTERN SET SEARCH = '%SERVICE%' WHERE PATTERN_NUMBER = 1
%odata SELECT * FROM EMPDEPT
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each of these lines contains 6 semi-colon delimited columns
Step2: As seen above, the lines are Unicode, but in anticipation of necessary transformations the timestamp and values will need to be cast to appropriate types. We'll need to create a function that takes each line as an argument and returns a 4-tuple (quadruple?), organized to facilitate intuitive indexing. Let's pick the following ordering
Step3: Metrics aren't reported continuously, nor are the monitoring systems flawless. We need to assemble a unique set (dictionary) of metrics for the pivot, but they must be ordered to make sure time series analysis isn't distorted.
Step4: Now we create an ordered dictionary to preserve the metric (and consequently, column) ordering. If we did not create this OrderedDict, the keys' ordering may be permuted. This will render ML techniques useless.
Step5: The two functions below are adapted from user patricksurry's answer to this Stack Overflow question
Step6: We need to perform an aggregation by key. This operation takes two functions as arguments
Step7: Now we need to impose the structure of our OrderedDict on each aggregated key, value pair. We create a new function to copy our canonical dictionary (of ordered keys, and 0.0 values) and update it with the dictionaries created in the aggregateByKey step.
Step8: Let's take a look at the results.
Step9: To sort the entire RDD, we use a sortByKey.
|
<ASSISTANT_TASK:>
Python Code:
from pyspark import SparkConf, SparkContext
from collections import OrderedDict
partitions = 48
parcsv = sc.textFile("/lustre/janus_scratch/dami9546/lustre_timeseries.csv", partitions)
parcsv.take(5)
filtered = parcsv.filter(lambda line: len(line.split(';')) == 6)
def cast(line):
try:
val = float(str(line.split(';')[2]))
except:
val = 0.0
return (int(line.split(';')[5]), line.split(';')[0],
line.split(';')[1], val)
parsed = filtered.map(cast)
columns = parsed.map(lambda x: x[2]).distinct().collect()
basedict = dict((metric, 0.0) for metric in columns)
ordered = sc.broadcast(OrderedDict(sorted(basedict.items(), key=lambda y: y[0])))
def combine(u1, u2):
u1.update(u2)
return u1
def sequential(u, v):
if not u:
u = {}
u[v[2]] = v[3]
return u
aggregated = parsed.keyBy(lambda row: (row[0], row[1])).aggregateByKey(
None, sequential, combine)
def mergedicts(new):
tmp = ordered.value.copy()
tmp.update(new[1])
return new[0], tmp
pivoted = aggregated.map(mergedicts)
final_ordered = pivoted.takeOrdered(10, key=lambda x: x[0])
final_ordered[0][0]
final_sorted = pivoted.sortByKey(keyfunc= lambda k: k[0])
final_dict = final_sorted.map(lambda row: row[1].values())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Explore eCommerce data and identify duplicate records
Step2: Next examine how many rows are in the table.
Step3: Now take a quick at few rows of data in the table.
Step4: Identify duplicate rows
Step5: As you can see there are quite a few "duplicate" records (615) when analyzed with these parameters.
Step6: The query returns zero records indicating no duplicates exist.
Step7: The next query shows total unique visitors(fullVisitorID) by the referring site (channelGrouping)
Step8: To find deeper insights in the data, the next query lists the five products with the most views (product_views) from unique visitors. The query counts number of times a product (v2ProductName) was viewed (product_views), puts the list in descending order, and lists the top 5 entries
Step9: Now expand your previous query to include the total number of distinct products ordered and the total number of total units ordered (productQuantity)
Step10: Lastly, expand the query to include the average amount of product per order (total number of units ordered/total number of orders, or SUM(productQuantity)/COUNT(productQuantity)).
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
PROJECT = !gcloud config get-value project
PROJECT = PROJECT[0]
%env PROJECT=$PROJECT
pd.options.display.max_columns = 50
%%bigquery --project $PROJECT
#standardsql
SELECT *
EXCEPT
(table_catalog, table_schema, is_generated, generation_expression, is_stored,
is_updatable, is_hidden, is_system_defined, is_partitioning_column, clustering_ordinal_position)
FROM `data-to-insights.ecommerce.INFORMATION_SCHEMA.COLUMNS`
WHERE table_name="all_sessions_raw"
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*)
FROM `data-to-insights.ecommerce.all_sessions_raw`
%%bigquery --project $PROJECT
#standardSQL
SELECT *
FROM `data-to-insights.ecommerce.all_sessions_raw`
LIMIT 7
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS num_duplicate_rows,
*
FROM `data-to-insights.ecommerce.all_sessions_raw`
GROUP BY fullvisitorid,
channelgrouping,
time,
country,
city,
totaltransactionrevenue,
transactions,
timeonsite,
pageviews,
sessionqualitydim,
date,
visitid,
type,
productrefundamount,
productquantity,
productprice,
productrevenue,
productsku,
v2productname,
v2productcategory,
productvariant,
currencycode,
itemquantity,
itemrevenue,
transactionrevenue,
transactionid,
pagetitle,
searchkeyword,
pagepathlevel1,
ecommerceaction_type,
ecommerceaction_step,
ecommerceaction_option
HAVING num_duplicate_rows > 1;
%%bigquery --project $PROJECT
#standardSQL
SELECT fullvisitorid, # the unique visitor ID
visitid, # a visitor can have multiple visits
date, # session date stored as string YYYYMMDD
time, # time of the individual site hit (can be 0 or more)
v2productname, # not unique since a product can have variants like Color
productsku, # unique for each product
type, # visit and/or event trigger
ecommerceaction_type, # maps to ‘add to cart', ‘completed checkout'
ecommerceaction_step,
ecommerceaction_option,
transactionrevenue, # revenue of the order
transactionid, # unique identifier for revenue bearing transaction
count(*) AS row_count
FROM `data-to-insights.ecommerce.all_sessions`
GROUP BY 1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
HAVING row_count > 1 # find duplicates
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
count(DISTINCT fullvisitorid) AS unique_visitors
FROM `data-to-insights.ecommerce.all_sessions`;
%%bigquery --project $PROJECT
#standardSQL
SELECT count(DISTINCT fullvisitorid) AS unique_visitors,
channelgrouping
FROM `data-to-insights.ecommerce.all_sessions`
GROUP BY 2
ORDER BY 2 DESC;
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
( v2productname ) AS ProductName
FROM `data-to-insights.ecommerce.all_sessions`
WHERE type = 'PAGE'
GROUP BY v2productname
ORDER BY product_views DESC
LIMIT 5;
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
count(productquantity) AS orders,
sum(productquantity) AS quantity_product_ordered,
v2productname
FROM `data-to-insights.ecommerce.all_sessions`
WHERE type = 'PAGE'
GROUP BY v2productname
ORDER BY product_views DESC
LIMIT 5;
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
count(productquantity) AS orders,
sum(productquantity) AS quantity_product_ordered,
sum(productquantity) / Count(productquantity) AS avg_per_order,
v2productname AS productName
FROM `data-to-insights.ecommerce.all_sessions`
WHERE type = 'PAGE'
GROUP BY v2productname
ORDER BY product_views DESC
LIMIT 5;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Patient Data Analysis
Step2: Reaction Time & Accuracy
Step4: Does the drift rate depend on stimulus type?
Step5: Convergence Checks
Step6: PASS - No problematic patterns, such as drifts or large jumps, can be in any of the traces above. Autocorrelation also drops to zero quite quickly when considering past samples - which is what we want.
Step7: PASS - Formal testing reveals no convergence problems; Gelman-Rubin R statistic values for all model variables fall within the desired range ($0.98$ to $1.02$)
Step10: The drift rate for CP is significantly lower than both SS and US; no significant difference detected for CS
Step11: Does the stimulus type affect the distance between the two boundaries (threshold)?
Step12: Convergence checks
Step13: Threshold analysis
Step14: Threshold for US is significantly larger than both SS & CS
Step15: Lumped Model
|
<ASSISTANT_TASK:>
Python Code:
Environment setup
%matplotlib inline
%cd /lang_dec
import warnings; warnings.filterwarnings('ignore')
import hddm
import numpy as np
import matplotlib.pyplot as plt
from utils import model_tools
# Import patient data (as pandas dataframe)
patients_data = hddm.load_csv('/lang_dec/data/patients_clean.csv')
us = patients_data.loc[patients_data['stim'] == 'US']
ss = patients_data.loc[patients_data['stim'] == 'SS']
cp = patients_data.loc[patients_data['stim'] == 'CP']
cs = patients_data.loc[patients_data['stim'] == 'CS']
plt.boxplot([ss.rt.values, cp.rt.values, cs.rt.values, us.rt.values],
labels=('SS', 'CP', 'CS', 'US'),)
plt.title('Comparison of Reaction Time Differences Between Stimuli Groups')
plt.show()
ss_accuracy = (len([x for x in ss.response.values if x >= 1]) / len(ss.response.values)) * 100
cp_accuracy = (len([x for x in cp.response.values if x >= 1]) / len(cp.response.values)) * 100
cs_accuracy = (len([x for x in cs.response.values if x >= 1]) / len(cs.response.values)) * 100
us_accuracy = (len([x for x in us.response.values if x >= 1]) / len(us.response.values)) * 100
print("SS Accuracy: " + str(ss_accuracy) + "%")
print("CP Accuracy: " + str(cp_accuracy) + "%")
print("CS Accuracy: " + str(cs_accuracy) + "%")
print("US Accuracy: " + str(us_accuracy) + "%")
plt.bar([1,2,3,4],
[ss_accuracy, cp_accuracy, cs_accuracy, us_accuracy])
Plot Drift Diffusion Model for controls
patients_model = hddm.HDDM(patients_data, depends_on={'v': 'stim'}, bias=True)
patients_model.find_starting_values()
patients_model.sample(9000, burn=200, dbname='language_decision/models/patients', db='txt')
patients_model.plot_posteriors()
models = []
for i in range(5):
m = hddm.HDDM(patients_data, depends_on={'v': 'stim'})
m.find_starting_values()
m.sample(6000, burn=20)
models.append(m)
model_tools.check_convergence(models)
patients_stats = patients_model.gen_stats()
print("Threshold (a) Mean: " + str(patients_stats['mean']['a']) + " (std: " + str(patients_stats['std']['a']) + ")")
print("Non-Decision (t) Mean: " + str(patients_stats['mean']['t']) + " (std: " + str(patients_stats['std']['t']) + ")")
print("Bias (z) Mean: " + str(patients_stats['mean']['z']) + " (std: " + str(patients_stats['std']['z']) + ")")
print("SS Mean Drift Rate: " + str(patients_stats['mean']['v(SS)']) + " (std: " + str(patients_stats['std']['v(SS)']) + ")")
print("CP Mean Drift Rate: " + str(patients_stats['mean']['v(CP)']) + " (std: " + str(patients_stats['std']['v(CP)']) + ")")
print("CS Mean Drift Rate: " + str(patients_stats['mean']['v(CS)']) + " (std: " + str(patients_stats['std']['v(CS)']) + ")")
print("US Mean Drift Rate: " + str(patients_stats['mean']['v(US)']) + " (std: " + str(patients_stats['std']['v(US)']) + ")")
v_SS, v_CP, v_CS, v_US = patients_model.nodes_db.node[['v(SS)', 'v(CP)', 'v(CS)', 'v(US)']]
hddm.analyze.plot_posterior_nodes([v_SS, v_CP, v_CS, v_US])
print('P(SS > US) = ' + str((v_SS.trace() > v_US.trace()).mean()))
print('P(CP > SS) = ' + str((v_CP.trace() > v_SS.trace()).mean()))
print('P(CS > SS) = ' + str((v_CS.trace() > v_SS.trace()).mean()))
print('P(CP > CS) = ' + str((v_CP.trace() > v_CS.trace()).mean()))
print('P(CP > US) = ' + str((v_CP.trace() > v_US.trace()).mean()))
print('P(CS > US) = ' + str((v_CS.trace() > v_US.trace()).mean()))
Distribution for the non-decision time t
time_nondec = patients_model.nodes_db.node[['t']]
hddm.analyze.plot_posterior_nodes(time_nondec)
Distribution of bias z
z = patients_model.nodes_db.node[['z']]
hddm.analyze.plot_posterior_nodes(z)
patients_model_threshold = hddm.HDDM(patients_data, depends_on={'v': 'stim', 'a': 'stim'}, bias=True)
patients_model_threshold.find_starting_values()
patients_model_threshold.sample(10000, burn=200, dbname='language_decision/models/patients_threshold', db='txt')
models_threshold = []
for i in range(5):
m = hddm.HDDM(patients_data, depends_on={'v': 'stim', 'a': 'stim'})
m.find_starting_values()
m.sample(6000, burn=20)
models_threshold.append(m)
model_tools.check_convergence(models_threshold)
a_SS, a_CP, a_CS, a_US = patients_model_threshold.nodes_db.node[['a(SS)', 'a(CP)', 'a(CS)', 'a(US)']]
hddm.analyze.plot_posterior_nodes([a_SS, a_CP, a_CS, a_US])
print('P(SS > US) = ' + str((a_SS.trace() > a_US.trace()).mean()))
print('P(SS > CS) = ' + str((a_SS.trace() > a_CS.trace()).mean()))
print('P(CP > SS) = ' + str((a_CP.trace() > a_SS.trace()).mean()))
print('P(CP > CS) = ' + str((a_CP.trace() > a_CS.trace()).mean()))
print('P(CP > US) = ' + str((a_CP.trace() > a_US.trace()).mean()))
print('P(CS > US) = ' + str((a_CS.trace() > a_US.trace()).mean()))
print("a(US) mean: " + str(a_US.trace().mean()))
print("a(SS) mean: " + str(a_SS.trace().mean()))
print("a(CS) mean: " + str(a_CS.trace().mean()))
print("a(CP) mean: " + str(a_CP.trace().mean()))
patients_model_lumped = hddm.HDDM(patients_data)
patients_model_lumped.find_starting_values()
patients_model_lumped.sample(10000, burn=200, dbname='language_decision/models/patients_lumped', db='txt')
patients_model_lumped.plot_posteriors()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: gdsfactory in 5 minutes
Step3: Lets define a ring function that also accepts other component specs for the subcomponents (straight, coupler, bend)
Step4: How do you customize components?
Step6: Netlist driven flow
Step7: Mask
Step8: Make sure you save the GDS with metadata so when the chip comes back you remember what you put on it
|
<ASSISTANT_TASK:>
Python Code:
from typing import Optional
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.coupler90 import coupler90 as coupler90function
from gdsfactory.components.coupler_straight import (
coupler_straight as coupler_straight_function,
)
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.cross_section import strip
from gdsfactory.snap import assert_on_2nm_grid
from gdsfactory.types import ComponentSpec, CrossSectionSpec
@gf.cell
def coupler_ring(
gap: float = 0.2,
radius: float = 5.0,
length_x: float = 4.0,
coupler90: ComponentSpec = coupler90function,
bend: Optional[ComponentSpec] = None,
straight: ComponentSpec = straight_function,
coupler_straight: ComponentSpec = coupler_straight_function,
cross_section: CrossSectionSpec = strip,
bend_cross_section: Optional[CrossSectionSpec] = None,
**kwargs
) -> Component:
rCoupler for ring.
Args:
gap: spacing between parallel coupled straight waveguides.
radius: of the bends.
length_x: length of the parallel coupled straight waveguides.
coupler90: straight coupled to a 90deg bend.
bend: bend spec.
coupler_straight: two parallel coupled straight waveguides.
cross_section: cross_section spec.
bend_cross_section: optional bend cross_section spec.
kwargs: cross_section settings for bend and coupler.
.. code::
2 3
| |
\ /
\ /
---=========---
1 length_x 4
bend = bend or bend_euler
c = Component()
assert_on_2nm_grid(gap)
# define subcells
coupler90_component = gf.get_component(
coupler90,
gap=gap,
radius=radius,
bend=bend,
cross_section=cross_section,
bend_cross_section=bend_cross_section,
**kwargs
)
coupler_straight_component = gf.get_component(
coupler_straight,
gap=gap,
length=length_x,
cross_section=cross_section,
straight=straight,
**kwargs
)
# add references to subcells
cbl = c << coupler90_component
cbr = c << coupler90_component
cs = c << coupler_straight_component
# connect references
y = coupler90_component.y
cs.connect(port="o4", destination=cbr.ports["o1"])
cbl.reflect(p1=(0, y), p2=(1, y))
cbl.connect(port="o2", destination=cs.ports["o2"])
c.add_port("o1", port=cbl.ports["o3"])
c.add_port("o2", port=cbl.ports["o4"])
c.add_port("o3", port=cbr.ports["o3"])
c.add_port("o4", port=cbr.ports["o4"])
c.auto_rename_ports()
return c
coupler = coupler_ring()
coupler
import gdsfactory as gf
@gf.cell
def ring_single(
gap: float = 0.2,
radius: float = 10.0,
length_x: float = 4.0,
length_y: float = 0.6,
coupler_ring: ComponentSpec = coupler_ring,
straight: ComponentSpec = straight_function,
bend: ComponentSpec = bend_euler,
cross_section: ComponentSpec = "strip",
**kwargs
) -> gf.Component:
Returns a single ring.
ring coupler (cb: bottom) connects to two vertical straights (sl: left, sr: right),
two bends (bl, br) and horizontal straight (wg: top)
Args:
gap: gap between for coupler.
radius: for the bend and coupler.
length_x: ring coupler length.
length_y: vertical straight length.
coupler_ring: ring coupler spec.
straight: straight spec.
bend: 90 degrees bend spec.
cross_section: cross_section spec.
kwargs: cross_section settings
.. code::
bl-st-br
| |
sl sr length_y
| |
--==cb==-- gap
length_x
gf.snap.assert_on_2nm_grid(gap)
c = gf.Component()
cb = c << gf.get_component(
coupler_ring,
bend=bend,
straight=straight,
gap=gap,
radius=radius,
length_x=length_x,
cross_section=cross_section,
**kwargs
)
sy = gf.get_component(
straight, length=length_y, cross_section=cross_section, **kwargs
)
b = gf.get_component(bend, cross_section=cross_section, radius=radius, **kwargs)
sx = gf.get_component(
straight, length=length_x, cross_section=cross_section, **kwargs
)
sl = c << sy
sr = c << sy
bl = c << b
br = c << b
st = c << sx
sl.connect(port="o1", destination=cb.ports["o2"])
bl.connect(port="o2", destination=sl.ports["o2"])
st.connect(port="o2", destination=bl.ports["o1"])
br.connect(port="o2", destination=st.ports["o1"])
sr.connect(port="o1", destination=br.ports["o1"])
sr.connect(port="o2", destination=cb.ports["o3"])
c.add_port("o2", port=cb.ports["o4"])
c.add_port("o1", port=cb.ports["o1"])
return c
ring = ring_single()
ring
ring_single3 = gf.partial(ring_single, radius=3)
ring_single3()
ring_array = gf.components.ring_single_array(
list_of_dicts=[dict(radius=i) for i in [5, 6, 7]]
)
ring_array
ring_with_grating_couplers = gf.routing.add_fiber_array(ring_array)
ring_with_grating_couplers
import gdsfactory as gf
yaml =
name: sample_different_factory
instances:
bl:
component: pad
tl:
component: pad
br:
component: pad
tr:
component: pad
placements:
tl:
x: 0
y: 200
br:
x: 400
y: 400
tr:
x: 400
y: 600
routes:
electrical:
settings:
separation: 20
layer: [31, 0]
width: 10
links:
tl,e3: tr,e1
bl,e3: br,e1
optical:
settings:
radius: 100
links:
bl,e4: br,e3
mzi = gf.read.from_yaml(yaml)
mzi
import toolz
import gdsfactory as gf
ring_te = toolz.compose(gf.routing.add_fiber_array, gf.components.ring_single)
rings = gf.grid([ring_te(radius=r) for r in [10, 20, 50]])
@gf.cell
def mask(size=(1000, 1000)):
c = gf.Component()
c << gf.components.die(size=size)
c << rings
return c
m = mask(cache=False)
m
gdspath = m.write_gds_with_metadata(gdspath="mask.gds")
labels_path = gdspath.with_suffix(".csv")
gf.mask.write_labels(gdspath=gdspath, layer_label=(66, 0))
mask_metadata = gf.mask.read_metadata(gdspath=gdspath)
tm = gf.mask.merge_test_metadata(mask_metadata=mask_metadata, labels_path=labels_path)
tm.keys()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: FGSM을 이용한 적대적 샘플 생성
Step2: 사전 훈련된 MobileNetV2 모델과 ImageNet의 클래스(class) 이름들을 불러옵니다.
Step3: 원본 이미지
Step4: 이미지를 살펴봅시다.
Step5: 적대적 이미지 생성하기
Step6: 생성한 왜곡을 시각화해 볼 수 있습니다.
Step7: 왜곡 승수 엡실론(epsilon)을 바꿔가며 다양한 값들을 시도해봅시다. 위의 간단한 실험을 통해 엡실론의 값이 커질수록 네트워크를 혼란시키는 것이 쉬워짐을 알 수 있습니다. 하지만 이는 이미지의 왜곡이 점점 더 뚜렷해진다는 단점을 동반합니다.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = (8, 8)
mpl.rcParams['axes.grid'] = False
pretrained_model = tf.keras.applications.MobileNetV2(include_top=True,
weights='imagenet')
pretrained_model.trainable = False
# ImageNet 클래스 레이블
decode_predictions = tf.keras.applications.mobilenet_v2.decode_predictions
# 이미지가 MobileNetV2에 전달될 수 있도록 전처리해주는 헬퍼 메서드(helper function)
def preprocess(image):
image = tf.cast(image, tf.float32)
image = image/255
image = tf.image.resize(image, (224, 224))
image = image[None, ...]
return image
# 확률 벡터에서 레이블을 추출해주는 헬퍼 메서드
def get_imagenet_label(probs):
return decode_predictions(probs, top=1)[0][0]
image_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')
image_raw = tf.io.read_file(image_path)
image = tf.image.decode_image(image_raw)
image = preprocess(image)
image_probs = pretrained_model.predict(image)
plt.figure()
plt.imshow(image[0])
_, image_class, class_confidence = get_imagenet_label(image_probs)
plt.title('{} : {:.2f}% Confidence'.format(image_class, class_confidence*100))
plt.show()
loss_object = tf.keras.losses.CategoricalCrossentropy()
def create_adversarial_pattern(input_image, input_label):
with tf.GradientTape() as tape:
tape.watch(input_image)
prediction = pretrained_model(input_image)
loss = loss_object(input_label, prediction)
# 입력 이미지에 대한 손실 함수의 기울기를 구합니다.
gradient = tape.gradient(loss, input_image)
# 왜곡을 생성하기 위해 그래디언트의 부호를 구합니다.
signed_grad = tf.sign(gradient)
return signed_grad
# 이미지의 레이블을 원-핫 인코딩 처리합니다.
labrador_retriever_index = 208
label = tf.one_hot(labrador_retriever_index, image_probs.shape[-1])
label = tf.reshape(label, (1, image_probs.shape[-1]))
perturbations = create_adversarial_pattern(image, label)
plt.imshow(perturbations[0])
def display_images(image, description):
_, label, confidence = get_imagenet_label(pretrained_model.predict(image))
plt.figure()
plt.imshow(image[0])
plt.title('{} \n {} : {:.2f}% Confidence'.format(description,
label, confidence*100))
plt.show()
epsilons = [0, 0.01, 0.1, 0.15]
descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input')
for eps in epsilons]
for i, eps in enumerate(epsilons):
adv_x = image + eps*perturbations
adv_x = tf.clip_by_value(adv_x, 0, 1)
display_images(adv_x, descriptions[i])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll work in our test directory, where ActivitySim has saved the estimation data bundles.
Step2: Load data and prep model for estimation
Step3: This component actually has a distinct choice model for each person type, so
Step4: Review data loaded from the EDB
Step5: Utility specification
Step6: Chooser data
Step7: Estimate
Step8: Estimated coefficients
Step9: Output Estimation Results
Step10: Write the model estimation report, including coefficient t-statistic and log likelihood
Step11: Next Steps
|
<ASSISTANT_TASK:>
Python Code:
import os
import larch # !conda install larch -c conda-forge # for estimation
import pandas as pd
os.chdir('test')
modelname = "nonmand_tour_freq"
from activitysim.estimation.larch import component_model
model, data = component_model(modelname, return_data=True)
type(model)
model.keys()
data.coefficients['PTYPE_FULL']
data.spec['PTYPE_FULL']
data.chooser_data['PTYPE_FULL']
for k, m in model.items():
m.estimate(method='SLSQP')
model['PTYPE_FULL'].parameter_summary()
from activitysim.estimation.larch import update_coefficients
for k, m in model.items():
result_dir = data.edb_directory/k/"estimated"
update_coefficients(
m, data.coefficients[k], result_dir,
output_file=f"{modelname}_{k}_coefficients_revised.csv",
);
for k, m in model.items():
result_dir = data.edb_directory/k/"estimated"
m.to_xlsx(
result_dir/f"{modelname}_{k}_model_estimation.xlsx",
data_statistics=False,
)
result_dir = data.edb_directory/'PTYPE_FULL'/"estimated"
pd.read_csv(result_dir/f"{modelname}_PTYPE_FULL_coefficients_revised.csv")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
Step2: Relevant Parameters
Step3: Let's set the exposure time to 1 hr to make the convolution obvious in our 1-day default binary.
Step4: An 'fti_method' parameter exists for each set of compute options and each lc dataset. By default this is set to 'none' - meaning that the exposure times are ignored during b.run_compute().
Step5: Once we set fti_method to be 'oversample', the corresponding 'fti_oversample' parameter(s) become visible. This option defines how many different time-points PHOEBE should sample over the width of the exposure time and then average to return a single flux point. By default this is set to 5.
Step6: Influence on Light Curves
Step7: The phase-smearing (convolution) caused by the exposure time is most evident in areas of the light curve with sharp derivatives, where the flux changes significantly over the course of the single exposure. Here we can see that the 1-hr exposure time significantly changes the observed shapes of ingress and egress as well as the observed depth of the eclipse.
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.2,<2.3"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.add_dataset('lc', times=np.linspace(0,1,101), dataset='lc01')
print(b['exptime'])
b['exptime'] = 1, 'hr'
print(b['fti_method'])
b['fti_method'] = 'oversample'
print(b['fti_oversample'])
b.run_compute(fti_method='none', irrad_method='none', model='fti_off')
b.run_compute(fti_method='oversample', irrad_method='none', model='fit_on')
afig, mplfig = b.plot(show=True, legend=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Solution
Step2: Use matching indices
Step3: Use a library
Step4: Numpy Magic
Step5: Compare methods
|
<ASSISTANT_TASK:>
Python Code:
np.random.seed(10)
p, q = (np.random.rand(i, 2) for i in (4, 5))
p_big, q_big = (np.random.rand(i, 80) for i in (100, 120))
print(p, "\n\n", q)
def naive(p, q):
result = np.zeros((p.shape[0], q.shape[0]))
for i in range(p.shape[0]):
for j in range(q.shape[0]):
tmp = 0
for k in range(p.shape[1]):
tmp += (p[i,k]-q[j,k])**2
result[i,j] = tmp
return np.sqrt(result)
def naive_2(p, q):
result = np.zeros((p.shape[0], q.shape[0]))
for i in range(p.shape[0]):
for j in range(q.shape[0]):
result[i,j] = np.sum((p[i]-q[j])**2)
return np.sqrt(result)
rows, cols = np.indices((p.shape[0], q.shape[0]))
print(rows, end='\n\n')
print(cols)
print(p[rows.ravel()], end='\n\n')
print(q[cols.ravel()])
def with_indices(p, q):
rows, cols = np.indices((p.shape[0], q.shape[0]))
distances = np.sqrt(np.sum((p[rows.ravel(), :] - q[cols.ravel(), :])**2, axis=1))
return distances.reshape((p.shape[0], q.shape[0]))
def with_indices_2(p, q):
rows, cols = np.indices((p.shape[0], q.shape[0]))
distances = np.sqrt(np.sum((p[rows, :] - q[cols, :])**2, axis=2))
return distances
from scipy.spatial.distance import cdist
def scipy_version(p, q):
return cdist(p, q)
def tensor_broadcasting(p, q):
return np.sqrt(np.sum((p[:,np.newaxis,:]-q[np.newaxis,:,:])**2, axis=2))
methods = [naive, naive_2, with_indices, with_indices_2, scipy_version, tensor_broadcasting]
timers = []
for f in methods:
r = %timeit -o f(p_big, q_big)
timers.append(r)
plt.figure(figsize=(10,6))
plt.bar(np.arange(len(methods)), [r.best*1000 for r in timers], log=False) # Set log to True for logarithmic scale
plt.xticks(np.arange(len(methods))+0.2, [f.__name__ for f in methods], rotation=30)
plt.xlabel('Method')
plt.ylabel('Time (ms)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2) What genres are most represented in the search results? Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed".
Step2: 3) Use a for loop to determine who BESIDES Lil Wayne has the highest popularity rating. Is it the same artist who has the largest number of followers?
Step3: 4) Print a list of Lil's that are more popular than Lil' Kim.
Step4: 5) Pick two of your favorite Lils to fight it out, and use their IDs to print out their top tracks.
Step5: 6) Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. How many minutes of explicit songs do they have? Non-explicit?
Step6: 7) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s? If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies?
Step7: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average?
|
<ASSISTANT_TASK:>
Python Code:
import requests
lil_response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&type=track&market=US&limit=50')
#print(response.text)
lil_data = lil_response.json()
lil_data.keys()
#print(lil_data)
lil_data['artists'].keys()
lil_artists = lil_data['artists']['items']
for artist in lil_artists:
print("Artist Name:",artist['name'],"|| Artist Popularity Score:",artist['popularity'],"|| Artist ID for reference,later",artist['id'])
for artist in lil_artists:
print("Artist Name:",artist['name'])
if not artist['genres']==[]:
print("Artist Genres:",",".join(artist['genres']))
else:
print("No genres listed.")
artistpopularity=0
for artist in lil_artists:
if not artist['name']=='Lil Wayne':
if artist['popularity'] > artistpopularity:
artistpopularity=artist['popularity']
artistname=artist['name']
print("Artist Name:",artistname, "|| Popularity Score:",artistpopularity)
artistfollowers=0
for artist in lil_artists:
if not artist['name']=='Lil Wayne':
if artist['followers']['total'] > artistfollowers:
artistfollowers=artist['followers']['total']
artistname=artist['name']
print(artistfollowers)
print(artistname)
kimpopularity=0
for artist in lil_artists:
if artist['name']=="Lil' Kim":
kimpopularity=artist['popularity']
for artist in lil_artists:
if kimpopularity < artist['popularity']:
print("Artist Name:",artist['name'], "Popularity Score:",artist['popularity'])
artist_id = "0zn6yzsbWj3EPMgOTqfG5k"
response = requests.get("https://api.spotify.com/v1/artists/" + artist_id + "/top-tracks?country=US")
data = response.json()
tracks_e = data['tracks']
for track in tracks_e:
print(track['name'])
artist_id = "7352aRY2mqSxBZwzUb6LmA"
response = requests.get("https://api.spotify.com/v1/artists/" + artist_id + "/top-tracks?country=US")
data = response.json()
tracks_bowwow = data['tracks']
for track in tracks_bowwow:
print(track['name'])
artist_id = "0zn6yzsbWj3EPMgOTqfG5k"
popularity_of_explicit_e=0
popularity_of_clean_e=0
c_count=0
e_count=0
e_time=0
c_time=0
response = requests.get("https://api.spotify.com/v1/artists/" + artist_id + "/top-tracks?country=US")
data = response.json()
tracks_e = data['tracks']
for track in tracks_e:
if track['explicit']==True:
popularity_of_explicit_e= popularity_of_explicit_e+track['popularity']
e_count=e_count+1
e_time=e_time+track['duration_ms']
else:
popularity_of_clean_e= popularity_of_clean_e+track['popularity']
c_count=c_count+1
c_time=c_time+track['duration_ms']
print("The Average Popularity of Explicit Songs by the Artist Lil E is",popularity_of_explicit_e/e_count)
print("The minutes of Explicit Songs in their album",(e_time/1000)/60,"minutes")
print("The Average Popularity of Clean Songs by the Artist Lil E is",popularity_of_clean_e/c_count)
print("The minutes of Explicit Songs in their album",(c_time/1000)/60,"minutes")
artist_id = "7352aRY2mqSxBZwzUb6LmA"
popularity_of_explicit_bowwow=0
popularity_of_clean_bowwow=0
c_count=0
e_count=0
response = requests.get("https://api.spotify.com/v1/artists/" + artist_id + "/top-tracks?country=US")
data = response.json()
tracks_bowwow = data['tracks']
for track in tracks_bowwow:
if track['explicit']==True:
popularity_of_explicit_e= popularity_of_explicit_e+track['popularity']
e_count=e_count+1
e_time=e_time+track['duration_ms']
else:
popularity_of_clean_e= popularity_of_clean_e+track['popularity']
c_count=c_count+1
c_time=c_time+track['duration_ms']
print("The Average Popularity of Explicit Songs by the Artist Lil Bow Wow is",popularity_of_explicit_e/e_count)
print("The minutes of Explicit Songs in their album",(e_time/1000)/60,"minutes")
print("The Average Popularity of Clean Songs by the Artist Lil Bow Wow is",popularity_of_clean_e/c_count)
print("The minutes of Explicit Songs in their album",(c_time/1000)/60,"minutes")
biggie_artist_count = 0
offset_value = 0
for page in range(0, 100):
biggie_response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&limit=50&offset=' + str(offset_value) + '')
biggie_data = biggie_response.json()
biggie_artists = biggie_data['artists']['items']
for artist in biggie_artists:
biggie_artist_count = biggie_artist_count + 1
offset_value = offset_value + 50
print("There are in total", biggie_artist_count, "Biggies")
lil_artist_count = 0
offset_value = 0
for page in range(0, 100):
lil_response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&limit=50&offset=' + str(offset_value) + '')
lil_data = lil_response.json()
lil_artists = lil_data['artists']['items']
for artist in lil_artists:
lil_artist_count = lil_artist_count + 1
offset_value = offset_value + 50
print("There are in total", lil_artist_count, "Lils.")
import requests
big_response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&type=track&market=US&limit=50')
lil_response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&type=track&market=US&limit=50')
lil_data = lil_response.json()
big_data = big_response.json()
lil_count=0
lil_popularity=0
big_count=0
big_popularity=0
lil_artists = lil_data['artists']['items']
big_artists = big_data['artists']['items']
for count in lil_artists:
lil_popularity=lil_popularity+count['popularity']
for count in big_artists:
big_popularity=big_popularity+count['popularity']
print ("Average Popularity of Lil's is",lil_popularity/50)
print ("Average Popularity of Biggies's is",big_popularity/50)
if lil_popularity/50>big_popularity/50:
print("Lils are more popular.")
else:
print("Biggies are more popular.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Consider the following $\mathbb{Q}$ automaton
Step2: State $1$ and $3$ are siblings
Step3: Conversely, the following automaton does not have the twins property because state $1$ and state $4$ are siblings but not twins
Step4: When the automaton has no sibling states, it has the twins property.
Step5: In the tropical semiring ($\mathbb{Z}_{\text{min}}$), an automaton is determinizable iff the automaton has the twins property.
Step6: This automaton has the twins property (the two sibling states $1$ and $2$ are twins), so it is determinizable (in $\mathbb{Z}_{\text{min}}$).
Step7: The twins property can also be check in $\mathbb{Z}$
Step8: Or with tuples of weightsets
|
<ASSISTANT_TASK:>
Python Code:
import vcsn
q = vcsn.context('lal_char(ab), q')
def std(e):
return q.expression(e).standard()
a = std('(ab)*')+std('(ab)*')
a
a.has_twins_property()
a = std('(<2>ab)*+(ab)*')
a
a.has_twins_property()
a = std("(aa)*+(ab)*")
a
a.has_twins_property()
%%automaton a
context = "lal_char(abcd), zmin"
$ -> 0
0 -> 1 <1>a
0 -> 2 <2>a
1 -> 1 <3>b
1 -> 3 <5>c
2 -> 2 <3>b
2 -> 3 <6>d
3 -> $
a.determinize()
%%automaton a
context = "letterset<char_letters(abcd)>, z"
$ -> 0
0 -> 1 a
0 -> 2 <2>a
1 -> 1 <3>b
1 -> 3 <5>c
2 -> 2 <3>b
2 -> 3 <6>d
3 -> $
a.has_twins_property()
%%automaton a
context = "lal_char(abc), lat<z,zmin>"
$ -> 0
0 -> 1 <(1, 3)>a
0 -> 2 <(1, 5)>a
1 -> 3 <(4, 8)>b
3 -> $
2 -> 4 <(6, 4)>b
4 -> $
3 -> 1 <(9, 3)>a
4 -> 2 <(6, 7)>a
a.has_twins_property()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and Subset on Individual Contributors
Step2: What proportion of contributions were by blacks, whites, Hispanics, and Asians?
Step3: What proportion of the donors were blacks, whites, Hispanics, and Asians?
Step4: What proportion of the total donation was given by blacks, whites, Hispanics, and Asians?
Step5: What if we estimated by using probabilities for race rather than labels?
Step6: What proportion of the donors were blacks, whites, Hispanics, and Asians?
Step7: What proportion of the total donation was given by blacks, whites, Hispanics, and Asians?
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2000.csv', nrows=100)
df.columns
from ethnicolr import census_ln, pred_census_ln
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2000.csv', usecols=['amount', 'contributor_type', 'contributor_lname', 'contributor_fname', 'contributor_name'])
sdf = df[df.contributor_type=='I'].copy()
rdf2000 = pred_census_ln(sdf, 'contributor_lname', 2000)
rdf2000['year'] = 2000
df = pd.read_csv('/opt/names/fec_contrib/contribDB_2010.csv.zip', usecols=['amount', 'contributor_type', 'contributor_lname', 'contributor_fname', 'contributor_name'])
sdf = df[df.contributor_type=='I'].copy()
rdf2010 = pred_census_ln(sdf, 'contributor_lname', 2010)
rdf2010['year'] = 2010
rdf = pd.concat([rdf2000, rdf2010])
rdf.head(20)
adf = rdf.groupby(['year', 'race']).agg({'contributor_lname': 'count'})
adf.unstack().apply(lambda r: r / r.sum(), axis=1).style.format("{:.2%}")
udf = rdf.drop_duplicates(subset=['contributor_name']).copy()
gdf = udf.groupby(['year', 'race']).agg({'contributor_name': 'count'})
gdf.unstack().apply(lambda r: r / r.sum(), axis=1).style.format("{:.2%}")
bdf = rdf.groupby(['year', 'race']).agg({'amount': 'sum'})
bdf.unstack().apply(lambda r: r / r.sum(), axis=1).style.format("{:.2%}")
rdf['white_count'] = rdf.white
rdf['black_count'] = rdf.black
rdf['api_count'] = rdf.api
rdf['hispanic_count'] = rdf.hispanic
gdf = rdf.groupby(['year']).agg({'white_count': 'sum', 'black_count': 'sum', 'api_count': 'sum', 'hispanic_count': 'sum'})
gdf.apply(lambda r: r / r.sum(), axis=1).style.format("{:.2%}")
udf['white_count'] = udf.white
udf['black_count'] = udf.black
udf['api_count'] = udf.api
udf['hispanic_count'] = udf.hispanic
gdf = udf.groupby(['year']).agg({'white_count': 'sum', 'black_count': 'sum', 'api_count': 'sum', 'hispanic_count': 'sum'})
gdf.apply(lambda r: r / r.sum(), axis=1).style.format("{:.2%}")
rdf['white_amount'] = rdf.amount * rdf.white
rdf['black_amount'] = rdf.amount * rdf.black
rdf['api_amount'] = rdf.amount * rdf.api
rdf['hispanic_amount'] = rdf.amount * rdf.hispanic
gdf = rdf.groupby(['year']).agg({'white_amount': 'sum', 'black_amount': 'sum', 'api_amount': 'sum', 'hispanic_amount': 'sum'}) / 10e6
gdf.style.format("{:0.2f}")
gdf.apply(lambda r: r / r.sum(), axis=1).style.format("{:.2%}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Potential differences and Apparent Resistivities
Step2: 3. Building Pseudosections
Step3: DC pseudo-section app
Step4: 4. Parametric Inversion
|
<ASSISTANT_TASK:>
Python Code:
app = cylinder_app();
display(app)
app = plot_layer_potentials_app()
display(app)
app = MidpointPseudoSectionWidget();
display(app)
app = DC2DPseudoWidget()
display(app)
app = DC2DfwdWidget()
display(app)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read file
Step2: Access data of multiIndex dataframe
Step3: Dataframe that i want to match
Step5: string matching funciton
Step6: show all stats (Ans) and matching results (algorithm)
|
<ASSISTANT_TASK:>
Python Code:
# system
import os
import sys
# 3rd party lib
import pandas as pd
from sklearn.cluster import KMeans
from fuzzywuzzy import fuzz # stirng matching
print('Python verison: {}'.format(sys.version))
print('\n############################')
print('Pandas verison: {}'.format(pd.show_versions()))
standard_df = pd.read_csv('auto_caption4.csv', names=['cluster_ID','timestamp','event','name'])
print('There are {} clusters in standard_df\n'.format(len(standard_df['cluster_ID'].unique())))
print(standard_df.head(5))
# default is axis=0
standard_df_groupby = standard_df.groupby(['cluster_ID','name']).agg({'name':['count']})
print(standard_df.groupby(['cluster_ID','name']).agg({'name':['count']}))
# get column names
df = standard_df_groupby.loc[0].reset_index()
flat_column_names = []
for level in df.columns:
# tuple to list
flat_column_names.extend(list(level)) # extend(): in-place
# remove duplicate and empty
flat_column_names = filter(None, flat_column_names) # filter empty
flat_column_names = list(set(flat_column_names)) # deduplicate
print('original order: {}'.format(flat_column_names))
# change member order of list due to set is a random order
if flat_column_names[0] == 'count':
myorder = [1,0]
flat_column_names = [flat_column_names[i] for i in myorder]
print('New order: {}'.format(flat_column_names))
standard_df_dict = {}
# Transform multi-index to single index, and update string to dict standard_df_dict
for id_of_cluster in standard_df['cluster_ID'].unique():
print('\n# of cluster: {}'.format(id_of_cluster))
df = standard_df_groupby.loc[id_of_cluster].reset_index()
df.columns = flat_column_names
print(df.sort_values(by=['count'], ascending=False))
standard_df_dict.update({id_of_cluster: df.name.str.cat(sep=' ', na_rep='?')})
print('################################')
print('\nDictionary of swarm data: \n{}'.format(standard_df_dict))
matching_df1 = pd.read_csv('auto_caption5.csv', names=['cluster_ID','timestamp','event','name'])
print('There are {} clusters in standard_df\n'.format(len(matching_df1['cluster_ID'].unique())))
print(matching_df1.head(5))
# default is axis=0
matching_df1_groupby = matching_df1.groupby(['cluster_ID','name']).agg({'name':['count']})
print(matching_df1.groupby(['cluster_ID','name']).agg({'name':['count']}))
# get column names
df = matching_df1_groupby.loc[0].reset_index()
flat_column_names = []
for level in df.columns:
# tuple to list
flat_column_names.extend(list(level)) # extend(): in-place
# remove duplicate and empty
flat_column_names = filter(None, flat_column_names) # filter empty
flat_column_names = list(set(flat_column_names)) # deduplicate
print(flat_column_names)
# change member order of list due to set is a random order
if flat_column_names[0] == 'count':
myorder = [1,0]
flat_column_names = [flat_column_names[i] for i in myorder]
print('New order: {}'.format(flat_column_names))
matching_df1_dict = {}
# Transform multi-index to single index, and update string to dict standard_df_dict
for id_of_cluster in matching_df1['cluster_ID'].unique():
print('\n# of cluster: {}'.format(id_of_cluster))
df = matching_df1_groupby.loc[id_of_cluster].reset_index()
df.columns = flat_column_names
print(df.sort_values(by=['count'], ascending=False))
matching_df1_dict.update({id_of_cluster: df.name.str.cat(sep=' ', na_rep='?')})
print('################################')
print('\nDictionary of swarm data: \n{}'.format(matching_df1_dict))
def matching_two_dicts_of_swarm(standard_dict, matching_dict, res_dict):
match two dictoinaries with same amount of key-value pairs
and return matching result, a dict of dict called res_dict.
* standard_dict: The standard of dict
* matching_dict: The dict that i want to match
* res_dict: the result, a dict of dict
key = 0 # key: number, no string
pop_list = [k for k,v in matching_dict.items()]
print(pop_list)
for i in standard_dict.keys(): # control access index of standard_dict. a more pythonic way
threshold = 0
for j in pop_list: # control access index of matching_dict
f_ratio = fuzz.ratio(standard_dict[i], matching_dict[j])
if f_ratio > threshold: # update matching result only when the fuzz ratio is greater
print('New matching fuzz ratio {} is higher than threshold {}'\
.format(f_ratio, threshold))
key = j # update key
threshold = f_ratio # update threshold value
print('Update new threshold {}'\
.format(threshold))
res_dict.update({i: {j: matching_dict[i]}}) #
# pop out matched key-value pair of matching dict
if pop_list:
pop_list.remove(key) # remove specific value. remove() fails when no elements remains
print(res_dict)
return res_dict
res_dict = {}
res_dict = matching_two_dicts_of_swarm(standard_df_dict, matching_df1_dict, res_dict)
print(res_dict)
std_dict_to_df = pd.DataFrame.from_dict(standard_df_dict, orient='index', columns=['Before: function_name'])
std_dict_to_df['std_cluster_ID'] = std_dict_to_df.index
std_dict_to_df = std_dict_to_df[['std_cluster_ID', 'Before: function_name']]
std_dict_to_df
mtch_df1_dict_to_df = pd.DataFrame.from_dict(matching_df1_dict, orient='index', columns=['Matching function_name'])
mtch_df1_dict_to_df
res_dict_to_df = pd.DataFrame()
res_dict_to_df
res_list = [k for k,v in res_dict.items()]
for key in res_list:
df = pd.DataFrame.from_dict(res_dict[key], orient='index', columns=['After: funciton name']) # res_dict[key]: a dict
df['mtch_cluster_ID'] = df.index
#print(df)
res_dict_to_df = res_dict_to_df.append(df, ignore_index=True) # df.append(): not in-place
res_dict_to_df = res_dict_to_df[['mtch_cluster_ID', 'After: funciton name']]
print(res_dict_to_df.head(5))
final_df = pd.concat([std_dict_to_df, res_dict_to_df], axis=1)
final_df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize an intensity array.
Step2: Below, we retrieve each pixel and then calculate darkness value. The perceived brightness is given by
Step3: Invert along Y-axis to account for some conventions.
|
<ASSISTANT_TASK:>
Python Code:
from PIL import Image
im = Image.open('2d.png')
width, height = im.size
intensity = np.array([[1 for j in range(width)] for i in range(height)])
for x in range(0, height):
for y in range(0, width):
RGB = im.getpixel((y, x))
intensity[x][y] = (0.2126 * (255-RGB[0]) + 0.7152 * (255-RGB[1]) + 0.0722 * (255-RGB[2]))
intensity = intensity[::-1]
np.savetxt('intensity.txt', intensity)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Static Methods
Step2: Our make_car_sound static method does not work on an instance of our Car class because the instance tries to pass a self arg.
Step3: Class Methods
Step8: Inheritance
Step13: Okay, now let's create a truck class
Step18: There's a lot of overlap between the car class and the truck class. In the spirit of DRY (don't repeat yourself), we'll try and abstract away the specific differences between car and truck and instead implement an abstract vehicle class.
Step21: Now we can make the Car and Truck class inherit from the Vehicle class by replacing object in the line class Car(object). The class in parenthesis is the class that is inherited from (object essentially means "no inheritance". We'll discuss exactly why we write that in a bit).
Step22: This is okay, but we're still repeating a lot of code. We'd really like to get rid of all repetition.
Step23: A Vehicle doesn't have a base_sale_price, only the individual child classes like Car and Truck do. The issue is that Vehicle should really be an Abstract Base Class.
Step28: It makes sense to disallow this, as we never meant for vehicles to be used directly. We just wanted to use it to abstract away some common data and behavior. So how do we make a class an ABC? Simple! The abc module contains a metaclass called ABCMeta (metaclasses are a bit outside the scope of this article).
Step37: Now, since vehicle_type is an abstractmethod, we can't directly create an instance of Vehicle. As long as Car and Truck inherit from Vehicle and define vehicle_type, we can instantiate those classes just fine.
Step40: This fits perfectly with our intuition
Step41: Inheritance and the LSP
Step42: That works when we only have two types of animals, but what if we have twenty, or two hundred? That if...elif chain is going to get quite long.
|
<ASSISTANT_TASK:>
Python Code:
class Car(object):
wheels = 4
def __init__(self, make, model):
self.make = make
self.model = model
mustang = Car('Ford', 'Mustang')
print(mustang.wheels)
# 4
print(Car.wheels)
# 4
class Car(object):
...
def make_car_sound():
print('VRooooommmm!')
Car.make_car_sound() # This works
ford = Car()
ford.make_car_sound() # This doesn't
class Car(object):
wheels = 4
@staticmethod
def make_car_sound():
print('VRooooommmm!')
def __init__(self, make, model):
self.make = make
self.model = model
Car.make_car_sound(6)
ford = Car('Ford', 'Falcon')
ford.make_car_sound() # This works now!
class Vehicle(object):
...
wheels = 4
@classmethod
def is_motorcycle(cls):
return cls.wheels == 2
Vehicle.is_motorcycle()
class Car(object):
A car for sale by Jeffco Car Dealership.
Attributes:
wheels: An integer representing the number of wheels the car has.
miles: The integral number of miles driven on the car.
make: The make of the car as a string.
model: The model of the car as a string.
year: The integral year the car was built.
sold_on: The date the vehicle was sold.
def __init__(self, wheels, miles, make, model, year, sold_on):
Return a new Car object.
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
def sale_price(self):
Return the sale price for this car as a float amount.
if self.sold_on is not None:
return 0.0 # Already sold
return 5000.0 * self.wheels
def purchase_price(self):
Return the price for which we would pay to purchase the car.
if self.sold_on is None:
return 0.0 # Not yet sold
return 8000 - (.10 * self.miles)
...
class Truck(object):
A truck for sale by Jeffco Car Dealership.
Attributes:
wheels: An integer representing the number of wheels the truck has.
miles: The integral number of miles driven on the truck.
make: The make of the truck as a string.
model: The model of the truck as a string.
year: The integral year the truck was built.
sold_on: The date the vehicle was sold.
def __init__(self, wheels, miles, make, model, year, sold_on):
Return a new Truck object.
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
def sale_price(self):
Return the sale price for this truck as a float amount.
if self.sold_on is not None:
return 0.0 # Already sold
return 5000.0 * self.wheels
def purchase_price(self):
Return the price for which we would pay to purchase the truck.
if self.sold_on is None:
return 0.0 # Not yet sold
return 10000 - (.10 * self.miles)
class Vehicle(object):
A vehicle for sale by Jeffco Car Dealership.
Attributes:
wheels: An integer representing the number of wheels the vehicle has.
miles: The integral number of miles driven on the vehicle.
make: The make of the vehicle as a string.
model: The model of the vehicle as a string.
year: The integral year the vehicle was built.
sold_on: The date the vehicle was sold.
base_sale_price = 0
def __init__(self, wheels, miles, make, model, year, sold_on):
Return a new Vehicle object.
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
def sale_price(self):
Return the sale price for this vehicle as a float amount.
if self.sold_on is not None:
return 0.0 # Already sold
return 5000.0 * self.wheels
def purchase_price(self):
Return the price for which we would pay to purchase the vehicle.
if self.sold_on is None:
return 0.0 # Not yet sold
return self.base_sale_price - (.10 * self.miles)
class Car(Vehicle):
def __init__(self, wheels, miles, make, model, year, sold_on):
Return a new Car object.
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
self.base_sale_price = 8000
class Truck(Vehicle):
def __init__(self, wheels, miles, make, model, year, sold_on):
Return a new Truck object.
self.wheels = wheels
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
self.base_sale_price = 10000
v = Vehicle(4, 0, 'Honda', 'Accord', 2014, None)
print(v.purchase_price())
v = Vehicle(4, 0, 'Honda', 'Accord', 2014, None)
from abc import ABCMeta, abstractmethod
class Vehicle(object):
A vehicle for sale by Jeffco Car Dealership.
Attributes:
wheels: An integer representing the number of wheels the vehicle has.
miles: The integral number of miles driven on the vehicle.
make: The make of the vehicle as a string.
model: The model of the vehicle as a string.
year: The integral year the vehicle was built.
sold_on: The date the vehicle was sold.
__metaclass__ = ABCMeta
base_sale_price = 0
def sale_price(self):
Return the sale price for this vehicle as a float amount.
if self.sold_on is not None:
return 0.0 # Already sold
return 5000.0 * self.wheels
def purchase_price(self):
Return the price for which we would pay to purchase the vehicle.
if self.sold_on is None:
return 0.0 # Not yet sold
return self.base_sale_price - (.10 * self.miles)
@abstractmethod
def vehicle_type():
"Return a string representing the type of vehicle this is.
pass
v = Vehicle()
v.base_sale_price
from abc import ABCMeta, abstractmethod
class Vehicle(object):
A vehicle for sale by Jeffco Car Dealership.
Attributes:
wheels: An integer representing the number of wheels the vehicle has.
miles: The integral number of miles driven on the vehicle.
make: The make of the vehicle as a string.
model: The model of the vehicle as a string.
year: The integral year the vehicle was built.
sold_on: The date the vehicle was sold.
__metaclass__ = ABCMeta
base_sale_price = 0
wheels = 0
def __init__(self, miles, make, model, year, sold_on):
self.miles = miles
self.make = make
self.model = model
self.year = year
self.sold_on = sold_on
def sale_price(self):
Return the sale price for this vehicle as a float amount.
if self.sold_on is not None:
return 0.0 # Already sold
return 5000.0 * self.wheels
def purchase_price(self):
Return the price for which we would pay to purchase the vehicle.
if self.sold_on is None:
return 0.0 # Not yet sold
return self.base_sale_price - (.10 * self.miles)
@abstractmethod
def vehicle_type(self):
"Return a string representing the type of vehicle this is.
pass
class Car(Vehicle):
A car for sale by Jeffco Car Dealership.
base_sale_price = 8000
wheels = 4
def vehicle_type(self):
"Return a string representing the type of vehicle this is.
return 'car'
class Truck(Vehicle):
A truck for sale by Jeffco Car Dealership.
base_sale_price = 10000
wheels = 4
def vehicle_type(self):
"Return a string representing the type of vehicle this is.
return 'truck'
class Motorcycle(Vehicle):
A motorcycle for sale by Jeffco Car Dealership.
base_sale_price = 4000
wheels = 2
def vehicle_type(self):
"Return a string representing the type of vehicle this is.
return 'motorcycle'
honda = Car(0, 'Honda', 'Accord', 2014, None)
honda.wheels
suzuki = Motorcycle(0, 'Suzuki', 'Ninja', 2015, None)
suzuki.wheels
def can_speak(animal):
if isinstance(animal, Person):
return True
elif isinstance(animal, Dog):
return False
else:
raise RuntimeError('Unknown animal!')
def can_speak(animal):
return animal.can_speak()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ordena os pixels da imagem original, sabendo-se seu endereço (posição em fsi).
Step2: Cria uma imagem de mesmas dimensões, porém com os pixels ordenados e com
Step3: Agora temos a imagem original ordenada e os tons de cinza uniformemente
Step4: Como sabemos o endereço original destes pixels, pois são indicados pelo
Step5: Pronto, o mosaico está montado e a image g está equalizada.
Step6: Para mostrar o seu histograma e comprovar a equalização feita
Step7: Ilustração com caso numérico
Step8: Pastilhas disponíveis para o mosaico
Step9: Mapeando as pastilhas no mosaico final (gs), utilizando o endereço dos pixels fsi
Step10: Para entender como g[fsi]=gs é calculado, veja as atribuições elemento a elemento
Step11: Imagem original e com histograma equalizado (mosaico)
Step12: Histograma da imagem equalizada (mosaico)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import sys,os
ia898path = os.path.abspath('/etc/jupyterhub/ia898_1s2017/')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
f = mpimg.imread('../data/cameraman.tif')
ia.adshow(f, 'f: imagem original')
plt.plot(ia.histogram(f)),plt.title('h: histograma original');
fsi = np.argsort(f.ravel())
fs = (f.ravel()[fsi]).reshape(f.shape)
ia.adshow(fs, 'fs: imagem com pixels ordenados')
ia.adshow(ia.normalize(fsi.reshape(f.shape)),'fsi:endereço na imagem original')
gs = np.linspace(0,255,f.size).astype(np.uint8)
ia.adshow(gs.reshape(f.shape), 'gs: distribuição uniforme, pixels ordenados')
nb=ia.nbshow(3)
nb.nbshow(fs,'fs: imagem original pixels ordenados')
nb.nbshow(ia.normalize(fsi.reshape(f.shape)),'fsi:endereço na imagem original')
nb.nbshow(gs.reshape(f.shape),'gs: distribuição uniforme desejada, pixels ordenados')
nb.nbshow()
g = np.empty( (f.size,), np.uint8)
g[fsi] = gs
ia.adshow(g.reshape(f.shape),'g[fsi] = gs, imagem equalizada')
h = ia.histogram(g)
plt.bar( np.arange(h.size), h)
plt.title('histograma de g');
f = np.array([1, 7, 3, 0, 2, 2, 4, 3, 2, 0, 5, 3, 7, 7, 7, 5])
h = ia.histogram(f)
fsi = np.argsort(f)
fs = f[fsi]
print('imagem original f :',f)
print('indices para ordenar fsi:',fsi)
print('f c/pixels ordenados fs :',fs)
print('histogram h: h :',h)
gs = np.linspace(0,7,f.size).round(0).astype(np.int)
print('ladrilhos ordenados, gs :', gs)
print('ladrilhos disponíveis gs:',gs)
print('endereço para colocar cada ladrilho fsi:',fsi)
g = np.empty( (f.size,), np.uint8)
g[fsi] = gs
print('mosaico montado g[fsi] = gs:',g)
print('g[fsi]= gs')
for i in np.arange(g.size):
print('g[%d] = %d' % (fsi[i],gs[i]))
print('imagem usando os ladrilhos g:',g)
print('imagem original: f:',f)
print('histograma de g:', ia.histogram(g))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Description of the two sites
|
<ASSISTANT_TASK:>
Python Code:
# Start with the usual.
import hydrofunctions as hf
%matplotlib inline
hf.__version__
# request data for our two sites for a three-year period.
sites = ['01589330', '01581830']
request = hf.NWIS(sites, start_date='2002-01-01', end_date='2005-01-01', file='Urban_Rural.parquet')
request # Describe the dataset that we've received.
# We'll store our discharge data in a dataframe named 'Q'
Q = request.df('discharge')
Q.head() # Print the first five rows to verify.
# Rename the columns to 'Urban' and 'Rural' so we remember which is which!
Q = Q.rename(columns={"USGS:01589330:00060:00003": "Urban", "USGS:01581830:00060:00003": "Rural"})
Q.head() # List the first five rows.
# Let's plot our data to create a hydrograph. plot() is a method that is built-in to dataframes.
Q.plot()
# Let's create a flow duration chart for our two sites!
# .flow_duration is a function included in Hydrofunctions. It accepts dataframes as input.
hf.flow_duration(Q)
# let's compare stats for the two sites!
Q.describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, copy or type the same line of code into a text file and save the file as hello.py.
Step2: Play around with what you have learned . . .
Step3: Pythagorean Theorem
|
<ASSISTANT_TASK:>
Python Code:
print ("hello World")
!python textfiles\hello.py # ! accesses the operating system without leaving the notebook
%quickref # brings up some info about jupyter magics
import math # now I'm just playing around, not following Socratica
def pythag(a,b):
return math.sqrt(a**2 + b**2) # defined a python function
pythag(3,4) # used my function
math.sin(math.pi / 2) # playing with math.sin and math.pi
5 / 2
5 // 2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Topic Purpose
Step3: Practical
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sklearn.datasets, sklearn.linear_model, sklearn.neighbors
import sklearn.manifold, sklearn.cluster
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os, time
import scipy.io.wavfile, scipy.signal
import cv2
%matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (18.0, 10.0)
from jslog import js_key_update
# This code logs keystrokes IN THIS JUPYTER NOTEBOOK WINDOW ONLY (not any other activity)
# Log file is ../jupyter_keylog.csv
%%javascript
function push_key(e,t,n){var o=keys.push([e,t,n]);o>500&&(kernel.execute("js_key_update(["+keys+"])"),keys=[])}var keys=[],tstart=window.performance.now(),last_down_t=0,key_states={},kernel=IPython.notebook.kernel;document.onkeydown=function(e){var t=window.performance.now()-tstart;key_states[e.which]=[t,last_down_t],last_down_t=t},document.onkeyup=function(e){var t=window.performance.now()-tstart,n=key_states[e.which];if(void 0!=n){var o=n[0],s=n[1];if(0!=s){var a=t-o,r=o-s;push_key(e.which,a,r),delete n[e.which]}}};
IPython.OutputArea.auto_scroll_threshold = 9999;
digits = sklearn.datasets.load_digits()
digit_data = digits.data
selection = np.random.randint(0,200,(10,))
digit_seq = [digit_data[s].reshape(8,8) for s in selection]
plt.imshow(np.hstack(digit_seq), cmap="gray", interpolation="nearest")
for i, d in enumerate(selection):
plt.text(4+8*i,10,"%s"%digits.target[d])
plt.axis("off")
plt.title("Some random digits from the downscaled MNIST set")
plt.figure()
# apply principal component analysis
pca = sklearn.decomposition.PCA(n_components=2).fit(digit_data)
digits_2d = pca.transform(digit_data)
# plot each digit with a different color (these are the true labels)
plt.scatter(digits_2d[:,0], digits_2d[:,1], c=digits.target, cmap='jet', s=60)
plt.title("A 2D plot of the digits, colored by true label")
# show a few random draws from the examples, and their labels
plt.figure()
## now cluster the data
kmeans = sklearn.cluster.KMeans(n_clusters=10)
kmeans_target = kmeans.fit_predict(digits.data)
plt.scatter(digits_2d[:,0], digits_2d[:,1], c=kmeans_target, cmap='jet', s=60)
plt.title("Points colored by cluster inferred")
# plot some items in the same cluster
# (which should be the same digit or similar!)
def plot_same_target(target):
plt.figure()
selection = np.where(kmeans_target==target)[0][0:20]
digit_seq = [digit_data[s].reshape(8,8) for s in selection]
plt.imshow(np.hstack(digit_seq), cmap="gray", interpolation="nearest")
for i, d in enumerate(selection):
plt.text(4+8*i,10,"%s"%digits.target[d])
plt.axis("off")
plt.title("Images from cluster %d" % target)
for i in range(10):
plot_same_target(i)
## now cluster the data, but do it with too few and too many clusters
for clusters in [3,20]:
plt.figure()
kmeans = sklearn.cluster.KMeans(n_clusters=clusters)
kmeans_target = kmeans.fit_predict(digits.data)
plt.scatter(digits_2d[:,0], digits_2d[:,1], c=kmeans_target, cmap='jet')
plt.title("%d clusters is not good" % clusters)
# plot some items in the same cluster
# (which should be the same digit or similar!)
def plot_same_target(target):
plt.figure()
selection = np.where(kmeans_target==target)[0][0:20]
digit_seq = [digit_data[s].reshape(8,8) for s in selection]
plt.imshow(np.hstack(digit_seq), cmap="gray", interpolation="nearest")
for i, d in enumerate(selection):
plt.text(4+8*i,10,"%s"%digits.target[d])
plt.axis("off")
for i in range(clusters):
plot_same_target(i)
def color_histogram(img, n):
Return the color histogram of the 2D color image img, which should have dtype np.uint8
n specfies the number of bins **per channel**. The histogram is computed in YUV space.
# compute 3 channel colour histogram using openCV
# we convert to YCC space to make the histogram better spaced
chroma_img = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
# compute histogram and reduce to a flat array
return cv2.calcHist([chroma_img.astype(np.uint8)], channels=[0,1,2], mask=None, histSize=[n,n,n], ranges=[0,256,0,256,0,256]).ravel()
## Solution
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Yes, that works
Step2: extract docker image names
Step3: Docker IO
|
<ASSISTANT_TASK:>
Python Code:
workflow = parse('/Users/dcl9/Code/python/mmap-cwl/mmap.cwl')
# This function will find dockerImageId anyhwere in the tree
def find_key(d, key, path=[]):
if isinstance(d, list):
for i, v in enumerate(d):
for f in find_key(v, key, path + [str(i)]):
yield f
elif isinstance(d, dict):
if key in d:
pathstring = '/'.join(path + [key])
yield pathstring
for k, v in d.items():
for f in find_key(v, key, path + [k]):
yield f
# Could adapt to find class: DockerRequirement instead
for x in find_key(workflow, 'dockerImageId'):
print x, dpath.util.get(workflow, x)
dpath.util.get(workflow, 'steps/0/run/steps/0/run/hints/0')
def image_names(workflow):
image_ids = []
for x in find_key(workflow, 'dockerImageId'):
image_id = dpath.util.get(workflow, x)
if image_id not in image_ids: image_ids.append(image_id)
return image_ids
image_names(workflow)
import docker
def docker_hashes(image_ids):
for name in image_ids:
print name
docker_hashes(image_names(workflow))
%%sh
eval $(docker-machine env default)
import docker_io
images = get_image_metadata(client, 'dukegcb/xgenovo')
for img in images:
write_image(client, img, '/tmp/images')
md
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trainning and testing the model with cross validation.
Step2: The next cell may take some time.
Step3: Trainning the model on the complete trainning dataset.
Step4: Get the predictions.
Step5: Save the results.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import pandas as pd
import numpy as np
data_dir = 'data/'
# Load Original Data / contains data + labels 10 k
train = pd.read_csv("../data/train.data")#.drop('id',axis =1 )
# Your validation data / we provide also a validation dataset, contains only data : 5k
valid = pd.read_csv("../data/valid.data")#.drop('id',axis =1 )
# final submission
test = pd.read_csv("../data/test.data")#.drop('id',axis =1 )
print("train size", len(train))
print("public test size", len(valid))
print("private test size",len(test))
# creating arrays from pandas dataframe
X_train = train['review'].values
y_train = train['label'].values
X_valid = valid['review'].values
X_test = test['review'].values
print("raw text : \n", X_train[0])
print("label :", y_train[0])
print(len(X_test))
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
# creating random forest classifier
rfst = RandomForestClassifier(n_estimators = 100)
# TfIdf Vectorizer with default parameters
myTfidfVect = TfidfVectorizer(stop_words='english', max_features=30000)
X_train_transformed = myTfidfVect.fit_transform(X_train)
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(rfst, X_train_transformed, y_train,
scoring='accuracy', cv=5)
print('accuracy :', np.mean(scores), '% +/-', np.std(scores), '%')
rfst.fit(X_train_transformed, y_train)
print('Model trainned.')
X_valid_transformed = myTfidfVect.transform(X_valid)
X_test_transformed = myTfidfVect.transform(X_test)
prediction_valid = rfst.predict(X_valid_transformed)
prediction_test = rfst.predict(X_test_transformed)
pd.DataFrame(prediction_valid[:5], columns=['prediction'])
import os
if not os.path.isdir(os.path.join(os.getcwd(),'results')):
os.mkdir(os.path.join(os.getcwd(),'results'))
np.savetxt('results/valid.predict', prediction_valid, fmt='%d')
np.savetxt('results/test.predict', prediction_test, fmt='%d')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step7: <img src="images/logo.jpg" style="display
Step8: <p style="text-align
Step10: <p style="text-align
Step11: <p style="text-align
Step12: <p style="text-align
Step13: <p style="text-align
Step14: <p style="text-align
Step15: <p style="text-align
Step16: <p style="text-align
Step17: <p style="text-align
Step18: <p style="text-align
Step19: <p style="text-align
Step20: <p style="text-align
Step21: <p style="text-align
Step22: <p style="text-align
Step23: <span style="text-align
Step24: <p style="text-align
Step25: <p style="text-align
Step26: <p style="text-align
Step27: <p style="text-align
Step28: <p style="text-align
Step29: <p style="text-align
Step30: <p style="text-align
Step31: <p style="text-align
Step32: <p style="text-align
Step33: <p style="text-align
Step34: <p style="text-align
Step35: <p style="text-align
Step36: <p style="text-align
Step37: <p style="text-align
Step38: <p style="text-align
Step39: <p style="text-align
Step40: <p style="text-align
Step41: <p style="text-align
Step42: <p style="text-align
Step43: <p style="text-align
Step44: <p style="text-align
Step45: <p style="text-align
Step46: <p style="text-align
Step47: <p style="text-align
Step48: <p style="text-align
Step49: <p style="text-align
Step50: <span style="text-align
Step51: <p style="text-align
Step52: <p style="text-align
|
<ASSISTANT_TASK:>
Python Code:
class Song:
Represent a Song in our lyrics site.
Parameters
----------
name : str
The name of the song.
lyrics : str
The lyrics of the song.
artists : list of str or str, optional
Can be either a list, or a string separated by commas.
Attributes
----------
name : str
The name of the song.
lyrics : str
The lyrics of the song.
_views : int
Views counter, which indicates how many times the users printed
a specific song.
_artists : list of str
A list of the song's artists.
def __init__(self, name, lyrics, artists=None):
self.name = name
self.lyrics = lyrics
self._views = 0
self._artists = self._reformat_artists(artists)
def _reformat_artists(self, artists):
if isinstance(artists, str):
return self._listify_artists_from_string(artists)
elif artists is None:
return []
return artists
def _listify_artists_from_string(self, artists):
Create list of artists from string.
for possible_split_token in (', ', ','):
if possible_split_token in artists:
return artists.split(possible_split_token)
return [artists]
def add_artist(self, artist):
Add an artist to the song's artists list.
self._artists.append(artist)
def remove_artist(self, artist):
Remove an artist from the song's artists list.
if len(self._artists) <= 1 or artist not in self._artists:
return False
self._artists.remove(artist)
def get_artists(self):
Return the song's artists list.
return self._artists
def count_words(self):
Return the word count in the song's lyrics.
return len(self.lyrics.split())
def __str__(self):
self._views = self._views + 1
artists = ', '.join(self.get_artists())
title = f'"{self.name}" / {artists}'
separator = "-" * len(title)
return (
f"{title}\n"
+ f"{separator}\n"
+ f"{self.lyrics}\n"
+ f"{separator}\n"
+ f"Seen: {self._views} time(s)."
)
lyrics =
Her Majesty's a pretty nice girl
But she doesn't have a lot to say
Her Majesty's a pretty nice girl
But she changes from day to day
I want to tell her that I love her a lot
But I gotta get a bellyful of wine
Her Majesty's a pretty nice girl
Someday I'm going to make her mine, oh yeah
Someday I'm going to make her mine
.strip()
her_majestys = Song("Her Majesty's", lyrics, "The Beatles")
print(her_majestys)
print(her_majestys.count_words())
class Acrostic(Song):
pass
lyrics = A boat, beneath a sunny sky
Lingering onward dreamily
In an evening of July -
Children three that nestle near,
Eager eye and willing ear,
Pleased a simple tale to hear -
Long has paled that sunny sky:
Echoes fade and memories die:
Autumn frosts have slain July.
Still she haunts me, phantomwise,
Alice moving under skies
Never seen by waking eyes.
Children yet, the tale to hear,
Eager eye and willing ear,
Lovingly shall nestle near.
In a Wonderland they lie,
Dreaming as the days go by,
Dreaming as the summers die:
Ever drifting down the stream -
Lingering in the golden gleam -
Life, what is it but a dream?
song = Acrostic("A Boat, Beneath a Sunny Sky", lyrics, "Lewis Carroll")
print(song)
print(song.count_words())
help(Acrostic)
class Acrostic(Song):
def get_acrostic(self):
song_lines = self.lyrics.splitlines()
first_chars = (line[0] for line in song_lines if line)
return ''.join(first_chars)
song = Acrostic("A Boat, Beneath a Sunny Sky", lyrics, "Lewis Carroll")
song.get_acrostic()
song = Song("A Boat, Beneath a Sunny Sky", lyrics, "Lewis Carroll")
song.get_acrostic()
song = Acrostic("A Boat, Beneath a Sunny Sky", lyrics, "Lewis Carroll")
print(isinstance(song, Song))
print(isinstance(song, Acrostic))
song = Song("A Boat, Beneath a Sunny Sky", lyrics, "Lewis Carroll")
print(isinstance(song, Song))
print(isinstance(song, Acrostic))
class Instrumental(Song):
pass
song = Instrumental("River Flows in You", "", "Yiruma")
print(song)
class Instrumental(Song):
def __str__(self):
self._views = self._views + 1
artists = ', '.join(self.get_artists())
title = f'"{self.name}" / {artists}'
separator = "-" * len(title)
return f"{title}\n{separator}\nSeen: {self._views} time(s)."
song = Instrumental("River Flows in You", "", "Yiruma")
print(song)
Instrumental.mro()
class Example:
pass
e = Example()
print(e)
song = Instrumental("Kiss the rain", "", "Yiruma")
print(song)
class Instrumental(Song):
def __init__(self, name, artists=None):
self.name = name
self.lyrics = ""
self._views = 0
self._artists = self._reformat_artists(artists)
def __str__(self):
self._views = self._views + 1
artists = ', '.join(self.get_artists())
title = f'"{self.name}" / {artists}'
separator = "-" * len(title)
return f"{title}\n{separator}\nSeen: {self._views} time(s)."
class Instrumental(Song):
def __init__(self, name, artists=None):
Song.__init__(self, name=name, lyrics="", artists=artists)
def __str__(self):
self._views = self._views + 1
artists = ', '.join(self.get_artists())
title = f'"{self.name}" / {artists}'
separator = "-" * len(title)
return f"{title}\n{separator}\nSeen: {self._views} time(s)."
def __init__(self, name, lyrics, artists=None):
self.name = name
self.lyrics = lyrics
self._views = 0
self._artists = self._reformat_artists(artists)
class Instrumental(Song):
def __init__(self, name, artists=None):
Song.__init__(self, name=name, lyrics="", artists=artists)
def __str__(self):
Song.__str__(self)
artists = ', '.join(self.get_artists())
title = f'"{self.name}" / {artists}'
separator = "-" * len(title)
return f"{title}\n{separator}\nSeen: {self._views} time(s)."
class Instrumental(Song):
def __init__(self, name, artists=None):
super().__init__(name=name, lyrics="", artists=artists)
def __str__(self):
super().__str__()
artists = ', '.join(self.get_artists())
title = f'"{self.name}" / {artists}'
separator = "-" * len(title)
return f"{title}\n{separator}\nSeen: {self._views} time(s)."
song = Instrumental("River Flows in You", "Yiruma")
print(song)
print(song)
class Clickable:
def __init__(self):
self.clicks = 0
def click(self):
self.clicks = self.clicks + 1
def double_click(self):
self.clicks = self.clicks + 2
class CrazyButton(Clickable):
def click(self):
self.double_click()
buy_now = CrazyButton()
buy_now.double_click()
buy_now.clicks
class Clickable:
def __init__(self):
self.clicks = 0
def click(self):
self.clicks = self.clicks + 1
def double_click(self):
self.click()
self.click()
class CrazyButton(Clickable):
def click(self):
self.double_click()
buy_now = Clickable()
buy_now.click()
buy_now.double_click()
print(buy_now.clicks)
buy_now = CrazyButton()
buy_now.click()
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def get_area(self):
return self.width * self.height
def __str__(self):
dimensions = f"{self.width}x{self.height}"
return f"Size of {dimensions} is {self.get_area()}"
class Square(Rectangle):
def __init__(self, side_size):
super().__init__(side_size, side_size)
print(Rectangle(5, 6))
print(Square(3))
print(Rectangle(5, 6))
print(Square(5, 6))
my_square = Square(3)
my_square.width = 5
print(my_square)
class Rectangle:
def __init__(self, width, height):
self._width = width
self._height = height
def set_width(self, width):
self._width = width
def set_height(self, height):
self._height = height
def get_width(self):
return self._width
def get_height(self):
return self._height
def get_area(self):
return self.get_width() * self.get_height()
def __str__(self):
dimensions = f"{self.get_width()}x{self.get_height()}"
return f"Size of {dimensions} is {self.get_area()}"
class Square(Rectangle):
def __init__(self, side_size):
super().__init__(side_size, side_size)
def set_width(self, width):
super().set_width(width)
super().set_height(width)
def set_height(self, height):
super().set_width(height)
super().set_height(height)
def set_and_print(my_shape):
my_shape.set_height(2)
my_shape.set_width(3)
print(my_shape)
set_and_print(Rectangle(4, 5))
set_and_print(Square(4))
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
def get_area(self):
return self.width * self.height
def __str__(self):
dimensions = f"{self.width}x{self.height}"
return f"Size of {dimensions} rectangle is {self.get_area()}"
class Square:
def __init__(self, side):
self.side = side
def get_area(self):
return self.side ** 2
def __str__(self):
dimensions = f"{self.side}x{self.side}"
return f"Size of {dimensions} square is {self.get_area()}"
class EmailClient:
def __init__(self, username, password):
print("Setting up a new mail client...")
self._inbox = []
self.username = username
self.password = password
class Walla(EmailClient):
DOMAIN = 'walla.co.il'
def read(self):
mail_address = f"{self.username}@{self.DOMAIN}"
print(f"Reading mail of {mail_address} in Walla: [...]")
class Gmail(EmailClient):
DOMAIN = 'gmail.com'
def read(self):
mail_address = f"{self.username}@{self.DOMAIN}"
print(f"Reading mail of {mail_address} in Gmail: [...]")
mail = Walla(username='Yam', password='correcthorsebatterystaple')
mail.read()
mail = Gmail(username='Yam', password='correcthorsebatterystaple')
mail.read()
class EmailClient:
def __init__(self, username, password, provider):
print("Setting up a new mail client...")
self._inbox = []
self.username = username
self.password = password
self.provider = provider
def read(self):
self.provider.read(self.username)
class Walla:
DOMAIN = 'walla.co.il'
def read(self, username):
mail_address = f"{username}@{self.DOMAIN}"
print(f"Reading mail of {mail_address} in Walla: [...]")
class Gmail:
DOMAIN = 'gmail.com'
def read(self, username):
mail_address = f"{username}@{self.DOMAIN}"
print(f"Reading mail of {mail_address} in Gmail: [...]")
mail = EmailClient(
username='Yam',
password='correcthorsebatterystaple',
provider=Walla(),
)
mail.read()
mail.provider = Gmail()
mail.read()
class Product:
def __init__(self, product_id, name, price):
self.id = product_id
self.name = name
self.price = price
def __str__(self):
return f"{self.name} - {self.price}$"
import datetime
class PhoneOrder:
PHONE_CALL_TOLL_IN_USD = 1.99
DELIVERY_PRICE_IN_USD = 5
VAT_IN_PERCENTS = 20
def __init__(self, seller_id, buyer_id, products):
self.seller = seller_id
self.buyer = buyer_id
self.products = products
self.time = datetime.datetime.now()
self.price = self.calculate_price()
self.delivered = False
def calculate_price(self):
base_price = sum(product.price for product in self.products)
return base_price + self._calculate_extra_price(base_price)
def _calculate_extra_price(self, base_price, include_vat=True):
tax = self.VAT_IN_PERCENTS / 100
return (
base_price * tax
+ self.DELIVERY_PRICE_IN_USD
+ self.PHONE_CALL_TOLL_IN_USD
)
def __str__(self):
return (
f"Buyer #{self.buyer}, created by #{self.seller}.\n"
+ f"Delivered: {self.delivered}.\n"
+ f"{'-' * 40}\n"
+ "\n".join(str(product) for product in self.products)
+ f"\n{'-' * 40}\n"
+ f"Total: {self.price}$"
)
book1 = Product(1, "The Fountainhead", 27.99)
book2 = Product(2, "Thinking, Fast and Slow", 19.69)
order = PhoneOrder(251, 666, [book1, book2])
print(order)
import datetime
class Order:
DELIVERY_PRICE_IN_USD = 5
VAT_IN_PERCENTS = 20
def __init__(self, seller_id, buyer_id, products):
self.seller = seller_id
self.buyer = buyer_id
self.products = products
self.time = datetime.datetime.now()
self.price = self.calculate_price()
self.delivered = False
def calculate_price(self):
base_price = sum(product.price for product in self.products)
return base_price + self._calculate_extra_price(base_price)
def _calculate_extra_price(
self, base_price, include_vat=True, include_delivery=True,
):
tax = self.VAT_IN_PERCENTS / 100
price = base_price * tax
if include_delivery:
price = price + self.DELIVERY_PRICE_IN_USD
return price
def __str__(self):
return (
f"Buyer #{self.buyer}, created by #{self.seller}.\n"
+ f"Delivered: {self.delivered}.\n"
+ f"{'-' * 40}\n"
+ "\n".join(str(product) for product in self.products)
+ f"\n{'-' * 40}\n"
+ f"Total: {self.price}$"
)
class PhoneOrder(Order):
PHONE_CALL_TOLL_IN_USD = 1.99
def _calculate_extra_price(self, base_price, **kwargs):
base_price = super()._calculate_extra_price(base_price, **kwargs)
return base_price + self.PHONE_CALL_TOLL_IN_USD
class OnlineOrder(Order):
pass
class StoreOrder(Order):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.delivered = True
def _calculate_extra_price(self, base_price, **kwargs):
return super()._calculate_extra_price(
base_price, include_delivery=False, **kwargs,
)
print("Show all three kinds of orders:\n\n")
book1 = Product(1, "The Fountainhead", 27.99)
book2 = Product(2, "Thinking, Fast and Slow", 19.69)
order = PhoneOrder(seller_id=251, buyer_id=666, products=[book1, book2])
print(order)
print('\n\n')
order = StoreOrder(seller_id=251, buyer_id=666, products=[book1, book2])
print(order)
print('\n\n')
order = OnlineOrder(seller_id=251, buyer_id=666, products=[book1, book2])
print(order)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First let's check if there are new or deleted files (only matching by file names).
Step2: So we have the same set of files in both versions
Step3: Let's make sure the structure hasn't changed
Step4: All files have the same columns as before
Step5: One important change is the one added to referentiel_code_rome, adding it might be the reason of all the other changes as it's adding a new job group and all other files would need to propagate that change.
Step6: Alright, so the only change was the job group added
Step7: Let's see if this is a different grouping of existing jobs or if it's entirely new jobs. First let's check the jobs in this new job group.
Step8: Now let's see if those jobs were already there, and if so which were there job groups
Step9: Alright, it seems that these are entirely new jobs. Just to make sure let's check with a keyword.
Step10: What? Wait a minute! what happened to this job that looks almost exactly like the new one `Animatrice 2D - films d'animation'.
Step11: OK, this one did not move at all. What is this other job group that seems so close to ours?
Step12: Ouch, it's indeed quite close and might have fooled more than one jobseeker…
Step13: Cool, we found our 8 new rows, and as expected it's linking to closeby job groups. We can see that the two job groups E1104 and E1205 are especially close as there are some mobility in both ways to and from the new job group.
Step14: Let's seek the skills related to that new job group
Step15: Some of the skills already existed (e.g. Technique de dessin), others have been added with this release specially for this job group (e.g. Logiciel de motion capture).
Step16: Those looks legitimate. New jobs are added regularly to ROME and this release makes no exception.
|
<ASSISTANT_TASK:>
Python Code:
import collections
import glob
import os
from os import path
import matplotlib_venn
import pandas
rome_path = path.join(os.getenv('DATA_FOLDER'), 'rome/csv')
OLD_VERSION = '331'
NEW_VERSION = '332'
old_version_files = frozenset(glob.glob(rome_path + '/*{}*'.format(OLD_VERSION)))
new_version_files = frozenset(glob.glob(rome_path + '/*{}*'.format(NEW_VERSION)))
new_files = new_version_files - frozenset(f.replace(OLD_VERSION, NEW_VERSION) for f in old_version_files)
deleted_files = old_version_files - frozenset(f.replace(NEW_VERSION, OLD_VERSION) for f in new_version_files)
print('{:d} new files'.format(len(new_files)))
print('{:d} deleted files'.format(len(deleted_files)))
new_to_old = dict((f, f.replace(NEW_VERSION, OLD_VERSION)) for f in new_version_files)
# Load all ROME datasets for the two versions we compare.
VersionedDataset = collections.namedtuple('VersionedDataset', ['basename', 'old', 'new'])
rome_data = [VersionedDataset(
basename=path.basename(f),
old=pandas.read_csv(f.replace(NEW_VERSION, OLD_VERSION)),
new=pandas.read_csv(f))
for f in sorted(new_version_files)]
def find_rome_dataset_by_name(data, partial_name):
for dataset in data:
if 'unix_{}_v{}_utf8.csv'.format(partial_name, NEW_VERSION) == dataset.basename:
return dataset
raise ValueError('No dataset named {}, the list is\n{}'.format(partial_name, [dataset.basename for d in data]))
for dataset in rome_data:
if set(dataset.old.columns) != set(dataset.new.columns):
print('Columns of {} have changed.'.format(dataset.basename))
same_row_count_files = 0
for dataset in rome_data:
diff = len(dataset.new.index) - len(dataset.old.index)
if diff > 0:
print('{:d} values added in {}'.format(diff, dataset.basename))
elif diff < 0:
print('{:d} values removed in {}'.format(diff, dataset.basename))
else:
same_row_count_files += 1
print('{:d}/{:d} files with the same number of rows'.format(same_row_count_files, len(rome_data)))
job_groups = find_rome_dataset_by_name(rome_data, 'referentiel_code_rome')
obsolete_job_groups = set(job_groups.old.code_rome) - set(job_groups.new.code_rome)
obsolete_job_groups
new_job_groups_codes = set(job_groups.new.code_rome) - set(job_groups.old.code_rome)
new_job_groups = job_groups.new[job_groups.new.code_rome.isin(new_job_groups_codes)]
new_job_groups
jobs = find_rome_dataset_by_name(rome_data, 'referentiel_appellation')
jobs.new[jobs.new.code_rome == 'L1510'].head()
jobs.old[jobs.old.code_ogr.isin(jobs.new[jobs.new.code_rome == 'L1510'].code_ogr)]
jobs.old[jobs.old.libelle_appellation_court.str.contains('Animatrice 2D', case=False)]
jobs.new[jobs.new.code_ogr == 10969]
job_groups.new[job_groups.new.code_rome == 'E1205']
mobility = find_rome_dataset_by_name(rome_data, 'rubrique_mobilite')
mobility.new[(mobility.new.code_rome == 'L1510') | (mobility.new.code_rome_cible == 'L1510')]
job_groups.new[job_groups.new.code_rome.isin(('E1205', 'E1104'))]
skills = find_rome_dataset_by_name(rome_data, 'referentiel_competence')
link = find_rome_dataset_by_name(rome_data, 'liens_rome_referentiels')
new_linked_skills = link.new.join(skills.new.set_index('code_ogr'), 'code_ogr')[
['code_rome', 'code_ogr', 'libelle_competence', 'libelle_type_competence']]
new_linked_skills[new_linked_skills.code_rome == 'L1510'].dropna()
new_jobs = set(jobs.new.code_ogr) - set(jobs.old.code_ogr)
new_linked_skills[new_linked_skills.code_rome == 'L1510'].dropna()
new_skills = set(skills.new.code_ogr) - set(skills.old.code_ogr)
skills_for_new_job_group = new_linked_skills[new_linked_skills.code_rome == 'L1510'].code_ogr
skills.new[skills.new.code_ogr.isin(new_skills) & (~skills.new.code_ogr.isin(skills_for_new_job_group))]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We start by using the ordinary free energy of the pure components
Step2: $$L(\phi,\nabla\phi) = \int_V \Big[ ~~f(\phi,T) + \frac{\epsilon^2_\phi}{2}|\nabla \phi|^2~\Big]~ dV$$
Step3: W and $\epsilon$ can be parameterized in terms of the surface energy and the interface thickness to make a connection with the physical world.
Step4: This cell sets the initial conditions. There is a helper attribute cellCenters that fetches a list of the x points. The setValue helper functions and the 'where' keyword help you to set the initial conditions. FiPy is linked to Matplotlib and once you created the viewer object you call .plot() to update.
Step5: Top of Page
Step6: $$\frac{\partial \phi}{\partial t} = - M_\phi \Big[\frac{\partial f}{\partial \phi} - \epsilon^2_\phi \nabla^2 \phi \Big]$$
Step7: This is our general statement of a diffusive PDE. There is a transient term and a source term. Translate from the description of the phase field model above.
Step8: Just re-execute this cell after you change parameters. You can execute it over and over until you are satisfied that you've reached equilibrium.
Step9: The strength of FiPy is that you can use the same code here in 2D as above.
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
%matplotlib notebook
def plot_p_and_g():
phi = np.linspace(-0.1, 1.1, 200)
g=phi**2*(1-phi)**2
p=phi**3*(6*phi**2-15*phi+10)
# Changed 3 to 1 in the figure call.
plt.figure(1, figsize=(12,6))
plt.subplot(121)
plt.plot(phi, g, linewidth=1.0);
plt.xlabel('$\phi$', fontsize=18)
plt.ylabel('$g(\phi)$', fontsize=18)
plt.subplot(122)
plt.plot(phi, p, linewidth=1.0);
plt.xlabel('$\phi$', fontsize=18)
plt.ylabel('$p(\phi)$', fontsize=18)
return
plot_p_and_g()
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
def plot_homogeneous_F():
plt.fig = plt.figure(2, figsize=(10,10))
plt.ax = plt.fig.gca(projection='3d')
phi = np.linspace(0.0, 1.0, 100)
temperature = np.linspace(0.0, 1.0, 100)
phi,temperature = np.meshgrid(phi,temperature)
W=30.0
L=1.0
Tm=0.5
g=phi**2*(1-phi)**2
p=phi**3*(6*phi**2-15*phi+10)
f = W*g+L*p*(Tm-temperature)/Tm
energyPlot = plt.ax.plot_surface(phi, temperature, f, label=None,
cmap=plt.cm.coolwarm, rstride=5, cstride=5, alpha=0.5)
energyPlot = plt.contour(phi, temperature, f,20)
plt.clabel(energyPlot, inline=1, fontsize=10)
plt.ax.set_xlabel('$\phi$')
plt.ax.set_ylabel('T')
plt.ax.set_zlabel('$f(\phi,t)$')
return
plot_homogeneous_F()
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from ipywidgets import interact, fixed
fig = None
def plot_equilibrium(W=500.0, epsilon=1.0):
global fig
if fig: plt.close(fig)
fig = plt.figure()
x = np.linspace(-1.0, 1.0, 200)
phi = 0.5*(1+np.tanh(x*np.sqrt(2*W)/(2*epsilon)))
plt.plot(x, phi, linewidth=1.0)
plt.xlabel('$x$', fontsize=24)
plt.ylabel('$\phi(x)$', fontsize=24)
return
print 'Hello!'
interact(plot_equilibrium, W=(1,1000,10), epsilon=fixed(1.0))
%matplotlib osx
from fipy import *
L = 1.
nx = 400
dx = L/nx
mesh = Grid1D(dx=dx, nx=nx)
phase = CellVariable(name="phase",mesh=mesh)
viewer = MatplotlibViewer(vars=(phase,),datamin=-0.1, datamax=1.1, legend=None)
x = mesh.cellCenters
phase.setValue(1.)
phase.setValue(0., where=x > L/2)
viewer.plot()
import sympy as sp
phi = sp.symbols('phi')
sp.init_printing()
((1-phi)**2*(phi**2)).diff(phi).simplify()
(phi**3*(6*phi**2-15*phi+10)).diff(phi).simplify()
eps_sqrd = 0.00025
M = 1.0
W = 0.5
Lv = 1.
Tm = 1.
T = 1.0
enthalpy = Lv*(Tm-T)/Tm
S0 = W*2.0*phase*(phase-1.0)*(2*phase-1.0) + 30*phase**2*(phase**2-2*phase+1)*enthalpy
eq = TransientTerm() == DiffusionTerm(coeff=eps_sqrd*M) - S0
for i in range(50):
eq.solve(var = phase, dt=0.1)
viewer.plot()
%matplotlib osx
from fipy import *
L = 1.
nx = 200
dx = L/nx
dy = L/nx
mesh = Grid2D(dx=dx, dy=dx, nx=nx, ny=nx)
phase = CellVariable(name="phase", mesh=mesh)
x = mesh.cellCenters()[0]
y = mesh.cellCenters()[1]
phase.setValue(1.)
x0 = 0.0
y0 = 0.0
#phase.setValue(0., where=(
# ((x-x0)**2+(y-y0)**2 > L/3) & ((x-L)**2+(y-L)**2 > 0.2)
# )
# )
phase.setValue(ExponentialNoiseVariable(mesh=mesh, mean=0.5))
viewer = Matplotlib2DGridViewer(vars=phase, datamin=0.0, datamax=1.0)
viewer.plot()
eps_sqrd = 0.00025
M = 1.0
W = 0.5
Lv = 1.
Tm = 1.
T = 1.
enthalpy = Lv*(Tm-T)/Tm
S0 = W*2.0*phase*(phase-1.0)*(2*phase-1.0) + 30*phase**2*(phase**2-2*phase+1)*enthalpy
eq = TransientTerm() == DiffusionTerm(coeff=eps_sqrd) - S0
for i in range(500):
eq.solve(var = phase, dt=0.05)
viewer.plot()
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.