Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
3,500
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
%load_ext Cython
%%cython
cimport cython
cimport numpy as np
@cython.wraparound(False)
@cython.boundscheck(False)
def cython_diff2d(np.ndarray[double, ndim=2] u,np.ndarray[double, ndim=2] v, double dx2, double dy2, double c):
cdef unsigned int i, j
for i in xrange(1,u.shape[0]-1):
for j in xrange(1, u.shape[1]-1):
v[i,j] = u[i,j] + c*( (u[i+1, j] + u[i-1, j]-2.0*u[i,j])/dy2 +
(u[i, j+1] + u[i, j-1]-2.0*u[i,j])/dx2 )
def numpy_diff2d(u,v,dx2,dy2,c):
v[1:-1,1:-1] =u[1:-1,1:-1] + c*((u[2:,1:-1]+u[:-2,1:-1]-2.0*u[1:-1,1:-1])/dy2 +
(u[1:-1,2:] + u[1:-1,:-2]-2.0*u[1:-1,1:-1])/dx2)
def numpy_diff2d_a(u,v,dx2,dy2,c):
A = (1.0-2.0*(c/dx2+c/dy2))
v[1:-1,1:-1] =A*u[1:-1,1:-1] + c/dy2*(u[2:,1:-1] + u[:-2,1:-1]) + \
c/dx2*(u[1:-1,2:] + u[1:-1,:-2])
def numpy_diff2d_b(u,v,dx2,dy2,c):
v[1:-1,1:-1] =u[1:-1,1:-1] + c/dx2*(np.diff(u,2,axis=0)[:,1:-1] + np.diff(u,2,axis=1)[1:-1,:])
def calc(N, Niter, func, dx2,dy2,c):
u = np.zeros([N, N])
v = np.zeros_like(u)
u[u.shape[0]//2,u.shape[1]//2] = 1.0/np.sqrt(dx2*dy2)
for i in range(Niter//2):
func(u,v,dx2,dy2,c)
func(v,u,dx2,dy2,c)
return u
N = 100
dx = 0.1
dy = 0.1
dx2 = dx*dx
dy2 = dy*dy
dt = 0.01
D = 0.1
c = D*dt
print("CLF = ",c/dx2,c/dy2)
u = calc(N,125,numpy_diff2d_b,dx2,dy2,c)
plt.imshow(u)
u = calc(N,125,cython_diff2d,dx2,dy2,c)
plt.imshow(u)
Lx,Ly = N/2*dx,N/2*dy
x = np.linspace(-Lx,Lx,N)
y = np.linspace(-Ly,Ly,N)
X,Y = np.meshgrid(x,y )
Niter = 125
t = dt*Niter
P = 1/(4*np.pi*D*t)*np.exp(-(X**2+Y**2)/(4*D*t) )
plt.contourf(X,Y,P)
plt.axes().set_aspect('equal')
u = calc(N,Niter,cython_diff2d,dx2,dy2,c)
np.sum(P)*dx2,np.sum(u)*dx2
plt.plot(X[X.shape[0]//2,:],P[X.shape[0]//2,:],'b')
plt.plot(X[X.shape[0]//2,:],u[X.shape[0]//2,:],'r')
%%time
u = calc(1000,200,cython_diff2d,dx2,dy2,c)
%%time
u = calc(1000,200,numpy_diff2d,dx2,dy2,c)
%%time
u = calc(1000,200,numpy_diff2d_a,dx2,dy2,c)
%%time
u = calc(1000,200,numpy_diff2d_b,dx2,dy2,c)
N = 1000
fortran_source =
subroutine fortran_diff2d(u, v, dx2, dy2, c)
real(8), intent(inout) :: u({0}, {1})
real(8), intent(inout) :: v({0}, {1})
real(8), intent(in) :: dx2, dy2, c
v(2:{0}-1,2:{1}-1) = u(2:{0}-1,2:{1}-1)+ c*( (u(3:,2:{1}-1)+u(:{0}-2,2:{1}-1))/dy2 + &
(u(2:{0}-1,3:) + u(2:{0}-1,:{1}-2))/dx2)
end subroutine
.format(N,N)
fp = open("myfile.f90", "w")
fp.write(fortran_source)
fp.close()
!cat myfile.f90
%%capture f2py.log
!f2py -c -m my_fortran_module myfile.f90
!ls -l
from my_fortran_module import fortran_diff2d
def calcF(N, Niter, func, dx2,dy2,c):
u = np.zeros([N, N],order='F')
v = np.zeros_like(u)
u[u.shape[0]//2,u.shape[1]//2] = 1.0/np.sqrt(dx2*dy2)
for i in range(Niter//2):
func(u,v,dx2,dy2,c)
func(v,u,dx2,dy2,c)
return u
%%time
u = calcF(1000,200,fortran_diff2d,dx2,dy2,c)
import subprocess
subprocess.check_output(["pwd"])
import subprocess
import importlib
counter = 12
def prepare_fortran_module(N=100):
global counter
fortran_source =
subroutine fortran_diff2d(u, v, dx2, dy2, c)
real(8), intent(in) :: u({0}, {1})
real(8), intent(inout) :: v({0}, {1})
real(8), intent(in) :: dx2, dy2, c
v(2:{0}-1,2:{1}-1) = u(2:{0}-1,2:{1}-1)+ c*( (u(3:,2:{1}-1)+u(:{0}-2,2:{1}-1))/dy2 + &
(u(2:{0}-1,3:) + u(2:{0}-1,:{1}-2))/dx2)
end subroutine
subroutine fortran_diff2d_a(u, dx2, dy2, c)
real(8), intent(inout) :: u({0}, {1})
real(8), intent(in) :: dx2, dy2, c
u(2:{0}-1,2:{1}-1) = u(2:{0}-1,2:{1}-1)+ c*( (u(3:,2:{1}-1)+u(:{0}-2,2:{1}-1))/dy2 + &
(u(2:{0}-1,3:) + u(2:{0}-1,:{1}-2))/dx2)
end subroutine
.format(N,N)
fp = open("myfile.f90", "w")
fp.write(fortran_source)
fp.close()
counter=counter+1
try:
output = subprocess.check_output(["f2py", "-c","-m", "fortran_module%05d"%counter, "myfile.f90"])
m = importlib.import_module("fortran_module%05d"%counter)
except:
print ("problem z kompilacja!")
return output
return m
fortran_module = prepare_fortran_module(N=1000)
fortran_diff2d, fortran_diff2d_a = fortran_module.fortran_diff2d, fortran_module.fortran_diff2d_a
def calcF(N, Niter, func, dx2,dy2,c):
u = np.zeros([N, N],order='F')
v = np.zeros_like(u)
u[u.shape[0]//2,u.shape[1]//2] = 1.0/np.sqrt(dx2*dy2)
for i in range(Niter//2):
func(u,v,dx2,dy2,c)
func(v,u,dx2,dy2,c)
return u
N = 1000
fortran_module = prepare_fortran_module(N=N)
fortran_diff2d, fortran_diff2d_a = fortran_module.fortran_diff2d, fortran_module.fortran_diff2d_a
u = calcF(N,1225,fortran_diff2d,dx2,dy2,c)
plt.imshow(u)
def calcF_a(N, Niter, func, dx2,dy2,c):
u = np.zeros([N, N],order='F')
v = np.zeros_like(u)
u[u.shape[0]//2,u.shape[1]//2] = 1.0/np.sqrt(dx2*dy2)
for i in range(Niter):
func(u,dx2,dy2,c)
return u
u = calcF_a(1000,1225,fortran_diff2d_a,dx2,dy2,c)
plt.imshow(u)
%%time
u = calc(1000,200,numpy_diff2d_a,dx2,dy2,c)
%%time
u = calc(1000,200,cython_diff2d,dx2,dy2,c)
fortran_diff2d = prepare_fortran_module(N=1000).fortran_diff2d
%%time
u = calcF(1000,200,fortran_diff2d,dx2,dy2,c)
%%time
u = calcF_a(1000,200,fortran_diff2d_a,dx2,dy2,c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parametry symulacji
Step2: Walidacja wyników
Step3: znamy rozwiązanie równania dyfuzji na nieskończonym obszarze startujące z punktu
Step5: Benchmarks
Step7: Najszybsza wersja - wektorowy Fortran
Step8: teraz możemy skompilować wielokrotnie ten sam kod i załadować modluł
|
3,501
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
x=pd.DataFrame() #Mejor hasta ahora
for m in range(1995,2018):
if m < 2016:
o='.xlsx'
else:
o='.xls'
if m < 2000:
sK=3
else:
sK=2
n='Precio_Bolsa_Nacional_($kwh)_' + str(m) + o
y=pd.read_excel(n, skiprows=sK, parse_cols=24)
x= x.append(y)
print(x.head())
print(x.tail())
print(str(len(x.index)) + ' Filas y ' + str(len(x.columns)) + ' Columnas')
print('Hay un total de ' + str(len(x)-len(x.dropna())) + ' registros con datos faltantes.')
print('Hay un total de ' + str(len(x)-len(x.drop_duplicates())) + ' registros con datos duplicados.')
print('Hay un total de ' + str(len(x))+' Registros.')
z=x.dropna().drop_duplicates()
print('Al eliminar los registros duplicados o con datos faltantes quedan ' + str(len(z))+' registros completos.')
print('Finalmente quedan ' + str((len(z)-len(z.dropna()))+(len(z)-len(z.drop_duplicates()))) +
' registros duplicados o con datos faltantes, es decir, ninguno.')
import matplotlib
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
z['Prom']=z.mean(axis=1)
z.groupby('Fecha').mean()['Prom'].plot(kind='line', title='Precio Promedio Diario Erroneamente comenzando en 1996')
w=[]
for n in range(len(z['Fecha'])):
w.append(str(z.iloc[n,0])[0:10])
z['Fecha']=w
z['Prom']=z.mean(axis=1)
z.groupby('Fecha').mean()['Prom'].plot(kind='line', title='Precio Promedio Diario Correcto comenzando en 1995', y='FFF')
z['Max']=z.max(axis=1)
w=[]
for n in range(len(z['Fecha'])):
w.append(str(z.iloc[n,0])[0:7])
z['Ano-Mes']=w
z.groupby('Ano-Mes').max()['Max'].plot(kind='line', title='Precio Máximo Mensual')
z['Min']=z.min(axis=1)
z.groupby('Ano-Mes').min()['Min'].plot(kind='line', title='Precio Mínimo Mensual')
z.groupby('Ano-Mes').max()['Max'].plot(kind='line', legend='true')
z.groupby('Ano-Mes').mean()['Prom'].plot(kind='line', legend='true', title='Comparación Precio Promedio y Precio Máximo Mensual')
import datetime
w=[]
v=[]
for n in range(len(z['Fecha'])):
temp=str(z.iloc[n,0])
ano, mes, dia = temp.split('-')
dia=str(dia)[0:3]
year=int(ano)
month=int(mes)
day=int(dia)
semnum=datetime.date(year, month, day).weekday()
if semnum>4:
Labor=0
else:
Labor=1
w.append(semnum)
v.append(Labor)
z['Semana']=w
z[['Semana']]=z[['Semana']].apply(pd.to_numeric)
z['Labor']=v
z[['Labor']]=z[['Labor']].apply(pd.to_numeric)
w=[]
w=z[z['Labor']==1]
v=[]
for n in range(len(w['Fecha'])):
for m in range(1,25):
if w.iloc[n,m]==w.iloc[n,26]:
v.append(m-1)
continue
ParaHist=pd.DataFrame()
ParaHist['Maximo para Laborales']=v
ParaHist.plot.hist(alpha=0.5, title='Histograma con el Precio Maximo Diario para dias Laborales')
s=[]
zZ=[]
tT=[]
for n in range(len(w['Fecha'])):
s=w.iloc[n].values[1:25]
tT=[i for i, e in enumerate(s) if e == max(s)]
zZ.append(tT[0])
tT=[]
continue
ParaHist=pd.DataFrame()
ParaHist['Maximo para Laborales']=zZ
ParaHist.plot.hist(alpha=0.5, title='Histograma con el Precio Maximo Diario para dias Laborales')
w=z[z['Semana']==5]
v=[]
for n in range(len(w['Fecha'])):
for m in range(1,25):
if w.iloc[n,m]==w.iloc[n,26]:
v.append(m-1)
continue
ParaHist=pd.DataFrame()
ParaHist['Maximo para Sabados']=v
ParaHist.plot.hist(alpha=0.5, title='Histograma con el Precio Maximo Diario para los Sabados')
s=[]
zZ=[]
tT=[]
for n in range(len(w['Fecha'])):
s=w.iloc[n].values[1:25]
tT=[i for i, e in enumerate(s) if e == max(s)]
zZ.append(tT[0])
tT=[]
continue
ParaHist=pd.DataFrame()
ParaHist['Maximo para Sabados']=zZ
ParaHist.plot.hist(alpha=0.5, title='Histograma con el Precio Maximo Diario para los Sabados')
w=z[z['Semana']==6]
v=[]
for n in range(len(w['Fecha'])):
for m in range(1,25):
if w.iloc[n,m]==w.iloc[n,26]:
v.append(m-1)
continue
ParaHist=pd.DataFrame()
ParaHist['Maximo para Domingos']=v
ParaHist.plot.hist(alpha=0.5, title='Histograma con el Precio Maximo Diario para los Domingos')
s=[]
zZ=[]
tT=[]
for n in range(len(w['Fecha'])):
s=w.iloc[n].values[1:25]
tT=[i for i, e in enumerate(s) if e == max(s)]
zZ.append(tT[0])
tT=[]
continue
ParaHist=pd.DataFrame()
ParaHist['Maximo para Domingos']=zZ
ParaHist.plot.hist(alpha=0.5, title='Histograma con el Precio Maximo Diario para los Domingos')
w=[]
for n in range(len(z['Fecha'])):
w.append(str(z.iloc[n,0])[0:4])
z['Ano']=w
w=pd.DataFrame()
w=z.groupby(['Ano']).min()['Min']
w
Lt=pd.DataFrame()
Lt['Fecha']=z['Fecha']
Lt['Ano-Mes']=z['Ano-Mes']
Lt['Prom']=z['Prom']
print(Lt.groupby(Lt['Ano-Mes']).mean().plot(kind='line',legend='false'))
print(Lt.groupby(Lt['Fecha']).mean().plot(kind='line',legend='false'))
z.groupby('Fecha').mean()['Prom'].plot(kind='line', legend='false')
z.groupby('Ano-Mes').mean()['Prom'].plot(kind='line', legend='false')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.-- Compute e imprima el número de registros con datos faltantes.
Step2: 3.-- Compute e imprima el número de registros duplicados.
Step3: 4.-- Elimine los registros con datos duplicados o datos faltantes, e imprima la cantidad de registros que quedan (registros completos).
Step4: 5.-- Compute y grafique el precio primedio diario.
Step5: Ya que la grafica comienza en 1996, se corrigen los valores de la fecha para verla completa
Step6: 6.-- Compute y grafique el precio máximo por mes.
Step7: 7.-- Compute y grafique el precio mínimo mensual.
Step8: 8.-- Haga un gráfico para comparar el precio máximo del mes (para cada mes) y el precio promedio mensual.
Step9: 9.-- Haga un histograma que muestre a que horas se produce el máximo precio diario para los días laborales.
Step10: Este metodo tiene en cuenta el valor maximo diario, y todas las horas a las cuales se presenta.
Step11: Este metodo tiene en cuenta unicamente la primera hora a la cual se presenta el valor maximo en el dia
Step12: Este metodo tiene en cuenta el valor maximo diario, y todas las horas a las cuales se presenta.
Step13: Este metodo tiene en cuenta unicamente la primera hora a la cual se presenta el valor maximo en el dia
Step14: Este metodo tiene en cuenta el valor maximo diario, y todas las horas a las cuales se presenta.
Step15: Este metodo tiene en cuenta unicamente la primera hora a la cual se presenta el valor maximo en el dia
Step16: 13.-- Haga una gráfica en que se muestre el precio promedio diario y el precio promedio mensual.
|
3,502
|
<ASSISTANT_TASK:>
Python Code:
from pymldb import Connection
mldb = Connection()
%%bash
mkdir -p /mldb_data/data
curl "file://mldb/mldb_test_data/ml-20m.zip" 2>/dev/null > /mldb_data/data/ml-20m.zip
unzip /mldb_data/data/ml-20m.zip -d /mldb_data/data
%%bash
head /mldb_data/data/ml-20m/README.txt
%%bash
head /mldb_data/data/ml-20m/ratings.csv
%%time
print mldb.put('/v1/procedures/import_mvlns', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/ratings.csv",
"outputDataset": "mvlns_ratings_csv",
"runOnCreation": True
}
})
print mldb.put('/v1/procedures/process_mvlns', {
"type": "transform",
"params": {
"inputData":
select pivot(movieId, rating) as *
named userId
from mvlns_ratings_csv
group by userId
,
"outputDataset": "mvlns_ratings",
"runOnCreation": True
}
})
mldb.query("select * from mvlns_ratings limit 3")
print mldb.put('/v1/procedures/mvlns_svd', {
"type" : "svd.train",
"params" : {
"trainingData" : "select COLUMN EXPR (where rowCount() > 3) from mvlns_ratings",
"columnOutputDataset" : "mvlns_svd_embedding",
"modelFileUrl": "file://models/mvlns.svd",
"functionName": "mvlns_svd_embedder",
"runOnCreation": True
}
})
from ipywidgets import interact, interact_manual
from uuid import uuid4
print mldb.put('/v1/procedures/import_movies', {
"type": "import.text",
"params": {
"dataFileUrl":"file:///mldb_data/data/ml-20m/movies.csv",
"outputDataset": "movies",
"select": "title, movieId",
"named": "movieId",
"runOnCreation": True
}
})
@interact
def movie_search(x = "toy story"):
return mldb.query("select title from movies where regex_match(lower(title), '.*%s.*')" % x.strip().lower())
print mldb.put("/v1/datasets/mvlns_user_prefs", {"type": "sparse.mutable"})
print mldb.put("/v1/functions/preferences", {
"type": "sql.query",
"params": {
"query": "select {*} as p from mvlns_user_prefs where rowName()=$user"
}
})
def save_prefs(user_id, likes, dislikes):
for rating, search_terms in zip([5,1],[likes, dislikes]):
for x in search_terms.split(","):
if len(x) > 3:
mldb.post("/v1/datasets/mvlns_user_prefs/rows", {
"rowName":user_id,
"columns": [[str(m), rating, 0] for m in movie_search(x).index]
})
mldb.post("/v1/datasets/mvlns_user_prefs/commit", {})
save_prefs("janedoe", "Toy Story", "Terminator")
mldb.query("select preferences({ user: 'janedoe' })[p] as *")
print mldb.put("/v1/functions/nearest_movies", {
"type": "embedding.neighbors",
"params": {
"dataset": "mvlns_svd_embedding",
"defaultNumNeighbors": 25,
"columnName": "embedding"
}
})
print mldb.put("/v1/functions/recommendations", {
"type": "sql.query",
"params": {
"query":
select nearest_movies({
coords: mvlns_svd_embedder({
row: preferences({ user: $user })[p]
})[embedding]
})[distances] as r
}
})
def recommend(likes="Toy Story, Terminator", dislikes="Star Trek"):
# here we simulate a new user saving these preferences
user_id = str(uuid4())
save_prefs(user_id, likes, dislikes)
# we can then run an SQL query to:
# - retrieve recommendations
# - transpose and join them to movies to get titles
# - exclude the already-rated movies from the result
return mldb.query(
select m.title
named m.movieId
from
transpose(( select recommendations({ user: '%(user)s' }) )) as r
join movies as m on r.rowPathElement(2) = m.rowPathElement(0)
where m.movieId not in (keys of preferences({ user: '%(user)s' })[p])
order by r.result
% dict(user=user_id))
recommend(likes="Toy Story, Terminator", dislikes="Star Trek")
interact_manual(recommend)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download the MovieLens 20M data
Step3: Load the data into MLDB
Step4: Take a peek at the dataset
Step5: Singular Value Decomposition (SVD)
Step6: Explore the results!
Step7: A simple search function to find all movies (and corresponding movieIds) whose names contain a string.
Step8: Now let's create a dataset to hold user preferences, and a simple function to simulate a user rating movies they like and movies they dislike, based on the movie_search function above.
Step10: With all that done, we can now build a recommendation engine out of a simple SQL query by mapping a user's preferences into the same space as the movie embeddings (i.e. embedding the user's preferences) and looking for the nearest movies.
Step12: Here's a simple function which lets you simulate the results of liking and disliking certain movies and getting back the resulting recommendations.
Step13: Here's an interactive form that lets you play with this function to see if you agree with the recommendations!
|
3,503
|
<ASSISTANT_TASK:>
Python Code:
N = 6000
known_labels_ratio = 0.1
X, y = make_moons(n_samples=N, noise=0.1, shuffle=True)
rp = np.random.permutation(int(N/2))
data_P = X[y==1][rp[:int(len(rp)*known_labels_ratio)]]
data_U = np.concatenate((X[y==1][rp[int(len(rp)*known_labels_ratio):]], X[y==0]), axis=0)
print("Amount of labeled samples: %d" % (data_P.shape[0]))
plt.figure(figsize=(8, 4.5))
plt.scatter(data_U[:, 0], data_U[:, 1], c='k', marker='.', linewidth=1, s=1, alpha=0.5, label='Unlabeled')
plt.scatter(data_P[:, 0], data_P[:, 1], c='b', marker='o', linewidth=0, s=20, alpha=0.5, label='Positive')
plt.grid()
plt.legend()
NP = data_P.shape[0]
NU = data_U.shape[0]
T = 1000
K = NP
train_label = np.zeros(shape=(NP+K,))
train_label[:NP] = 1.0
n_oob = np.zeros(shape=(NU,))
f_oob = np.zeros(shape=(NU, 2))
for i in range(T):
# Bootstrap resample
bootstrap_sample = np.random.choice(np.arange(NU), replace=True, size=K)
# Positive set + bootstrapped unlabeled set
data_bootstrap = np.concatenate((data_P, data_U[bootstrap_sample, :]), axis=0)
# Train model
model = DecisionTreeClassifier(max_depth=None, max_features=None,
criterion='gini', class_weight='balanced')
model.fit(data_bootstrap, train_label)
# Index for the out of the bag (oob) samples
idx_oob = sorted(set(range(NU)) - set(np.unique(bootstrap_sample)))
# Transductive learning of oob samples
f_oob[idx_oob] += model.predict_proba(data_U[idx_oob])
n_oob[idx_oob] += 1
predict_proba = f_oob[:, 1]/n_oob
# Plot the class probabilities for the unlabeled samples
fig = plt.figure(figsize=(12, 4))
ax1 = fig.add_subplot(1, 2, 1)
sp= ax1.scatter(data_U[:, 0], data_U[:, 1], c=predict_proba,
linewidth=0, s=5, alpha=0.5, cmap=plt.cm.plasma, label='unlabeled')
plt.grid()
plt.colorbar(sp, label='Class probability on Unlabeled set')
true_labels = np.zeros(shape=(data_U.shape[0]))
true_labels[:int(len(rp)*(1.0-known_labels_ratio))] = 1.0
precision, recall, th = precision_recall_curve(true_labels, predict_proba)
ax2 = fig.add_subplot(1, 2, 2)
f1s = precision[:-1]*recall[:-1]
ax2.plot(th, f1s, linewidth=2, alpha=0.5)
best_th = np.argmax(f1s)
ax2.plot(th[best_th], f1s[best_th], c='r', marker='o')
ax2.plot([th[best_th], th[best_th]], [0.0,f1s[best_th] ], 'r--')
ax2.plot([0.0, th[best_th]], [f1s[best_th], f1s[best_th] ], 'r--')
ax2.annotate('Pre: %0.3f, Rec: %0.3f' %(precision[best_th], recall[best_th]),
xy=(th[best_th]+ 0.01, f1s[best_th]-0.05))
ax2.set_ylabel('F1 score')
ax2.set_xlabel('Probability threshold')
plt.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Transductive PU learning
|
3,504
|
<ASSISTANT_TASK:>
Python Code:
DISPLAY_ROWS = 6 # screen is 6 pixels tall
DISPLAY_COLS = 50 # screen is 50 pixels wide
display = [ # set display pixels to False
[False for i in range(0, DISPLAY_COLS)]
for i in range(0, DISPLAY_ROWS)]
def rect(display, a, b):
'''rect AxB turns on all of the pixels in a rectangle at the top-left of the screen which is A wide and B tall'''
# a: col b: row
for i in range(0, b):
for j in range(0, a):
display[i][j] = True
def rotate_row(display, a, b):
'''rotate row y=A by B shifts all of the pixels in row A (0 is the top row) right by B pixels'''
display[a] = display[a][-b:] + display[a][:-b]
def rotate_col(display, a, b):
'''rotate column x=A by B shifts all of the pixels in column A (0 is the left column) down by B pixels'''
new_col_a = [display[i][a] for i in range(DISPLAY_ROWS - b, DISPLAY_ROWS)]
new_col_b = [display[i][a] for i in range(0, DISPLAY_ROWS - b)]
new_col = new_col_a + new_col_b
for i in range(0, DISPLAY_ROWS):
display[i][a] = new_col[i]
import re
instructions = {
re.compile(r'rect (\d+)x(\d+)'): rect,
re.compile(r'rotate row y=(\d+) by (\d+)'): rotate_row,
re.compile(r'rotate column x=(\d+) by (\d+)'): rotate_col,
}
with open('../inputs/day08.txt', 'r') as f:
data = [line.strip() for line in f.readlines()]
# TEST DATA from problem description
# data = [
# 'rect 3x2',
# 'rotate column x=1 by 1',
# 'rotate row y=0 by 4',
# 'rotate column x=1 by 1'
# ]
for line in data:
for pattern, handler in instructions.items():
match = pattern.match(line)
if match:
inputs = [int(i) for i in match.groups()]
handler(display, *inputs)
break
else:
print('ERROR', line)
def display_pixels_lit(display):
'''return number of pixels that are ON'''
return sum(sum(row) for row in display)
display_pixels_lit(display)
def print_display(display):
for row in display:
print(''.join(['#' if i else '.' for i in row]))
print_display(display)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Instructions patterns and handlers
Step2: Retrieving the input
Step3: Counting number of pixels that are ON
Step4: Part Two
|
3,505
|
<ASSISTANT_TASK:>
Python Code:
import skrf as rf
ring_slot = rf.Network('data/ring slot.s2p')
from skrf.data import ring_slot
ring_slot
short = rf.data.wr2p2_short
delayshort = rf.data.wr2p2_delayshort
short - delayshort
short/delayshort
short = rf.data.wr2p2_short
line = rf.data.wr2p2_line
delayshort = line ** short
short = line.inv ** delayshort
# display plots in notebook
%matplotlib inline
from pylab import *
rf.stylely()
ring_slot.plot_s_db()
ring_slot.plot_s_deg(m=0,n=1)
ring_slot.plot_s_smith(lw=2)
title('Big ole Smith Chart')
rf.read_all('data/', contains='ro')
from skrf import NetworkSet
ro_dict = rf.read_all('data/', contains='ro')
ro_ns = NetworkSet(ro_dict, name='ro set') # name is optional
ro_ns
ro_ns.mean_s
ro_ns.std_s
ro_ns.std_s.plot_s_mag(label='S11')
ylabel('Standard Deviation')
title('Standard Deviation of RO');
ro_ns.plot_uncertainty_bounds_s_db(label='S11');
from skrf import Frequency
from skrf.media import CPW, Coaxial
freq = Frequency(75,110,101,'ghz')
cpw = CPW(freq, w=10e-6, s=5e-6, ep_r=10.6)
cpw
cpw.line(d=90,unit='deg', name='line')
freq = Frequency(1,10,101,'ghz')
coax = Coaxial(frequency=freq, Dint=1e-3, Dout=2e-3)
coax
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If this produces an error, please see the installation tutorial.
Step2: If you cant find ring slot.s2p, then just import it from the skrf.data module.
Step3: A short description of the network will be printed out if entered onto the command line
Step4: The basic attributes of a microwave Network are provided by the
Step5: The complex difference between their s-parameters is computed with
Step6: This returns a new Network. Other arrimetic operators are overloaded as well,
Step7: Cascading and De-embedding
Step8: De-embedding can be accomplished by cascading the inverse of a network. The inverse of a network is accessed through the property Network.inv.
Step9: For more information on the functionality provided by the Network object, such as interpolation, stitching, n-port connections, and IO support see the Networks tutorial.
Step10: The methods of the Network class provide convenient ways to plot components of the network parameters,
Step11: Or plot the phase of $S_{12}$
Step12: For more detailed information about plotting see the Plotting tutorial
Step13: This dictionary can be passed directly to the NeworkSet constructor,
Step14: NeworkSet's are list-like.
Step15: The returned results are stored in a Networks s-parameters, regardless of the type of the output. Similarly, to calculate the complex standard deviation of the set,
Step16: Because these methods return a Network object the results can be
Step17: Plotting Uncertainty Bounds
Step18: See the networkset tutorial for more information.
Step19: Coax
|
3,506
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.model_selection import train_test_split, KFold
from sklearn.linear_model import LinearRegression, Ridge, SGDRegressor, ElasticNet
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from yellowbrick.features import Rank2D, JointPlotVisualizer
from yellowbrick.regressor import ResidualsPlot, PredictionError, ManualAlphaSelection, CooksDistance
from yellowbrick.model_selection import cv_scores, LearningCurve, FeatureImportances, ValidationCurve
import pandas as pd
import numpy as np
# GET THE CURRENT WORKING DIRECTORY SO YOU CAN LOAD THE PATH TO THE WO_MEN.XLSX FILE
import os
os.getcwd()
# YOU WILL HAVE TO INSTALL OPENPYXL - pip install openpyxl - TO BE ABLE TO OPEN EXCEL FILES WITH PANDAS
df = pd.read_excel('data/wo_men.xlsx', sheet_name='wo_men')
df.head(2)
ds = df.drop(['time', 'sex', 'height', 'shoe_size - German', 'height in feet - String', 'height in inches'], axis=1)
ds.shape
ds.columns
X = ds.drop(['shoe_size-american'], axis=1)
y = ds['shoe_size-american']
viz = Rank2D(algorithm='pearson')
viz.fit_transform(ds)
viz.show()
viz = JointPlotVisualizer(columns=['Height in Feet', 'shoe_size-american'])
viz.fit_transform(ds)
viz.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=42)
models = [
LinearRegression(),
Ridge(alpha=2),
SGDRegressor(max_iter=100),
KernelRidge(alpha=2),
SVR(),
RandomForestRegressor(n_estimators=5),
GradientBoostingRegressor(n_estimators=5)
]
def visualize_model(X, y, estimator, **kwargs):
viz = ResidualsPlot(estimator, **kwargs)
viz.fit(X.values, y)
viz.score(X.values, y)
viz.show()
viz = PredictionError(model)
viz.fit(X_train.values, y_train)
viz.score(X_test.values, y_test)
viz.show()
for model in models:
visualize_model(X, y, model)
model = RandomForestRegressor(n_estimators=5)
cv = KFold(n_splits=5, shuffle=True, random_state=42)
viz = cv_scores(model, X, y, cv=cv, scoring='r2')
viz = LearningCurve(model, cv=cv, scoring='r2', )
viz.fit(X, y)
viz.show()
viz = FeatureImportances(model, stack=True, relative=False)
viz.fit(X, y)
viz.show()
viz = ValidationCurve(RandomForestRegressor(), param_name='n_estimators', param_range=range(1, 10), cv=cv, scoring='r2')
viz.fit(X, y)
viz.show()
alphas = np.logspace(1, 2, 20)
viz = ManualAlphaSelection(Ridge(), alphas=alphas, cv=cv, scoring='r2')
viz.fit(X, y)
viz.show()
viz = CooksDistance()
viz.fit(X, y)
viz.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: **DATASET WAS TAKEN FROM https
Step2: Women is coded as 1 vs Man being 0 so that's why there is negative correlation between sex and shoe size
Step3: SPECIFIC MODEL TUNING
Step4: RANDOM FOREST REGRESSOR HYPERPARAMETER TUNING
Step5: USE MODEL THAT HAS ALPHA AS PARAMETERS - RIDGE
|
3,507
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# read data
df = pd.read_csv('HR_comma_sep.csv')
# print first rows
df.head()
# print info, we have no nulls
df.info()
# describe numeric columns
# satisfaction_level and last_evaluation seems percentages
# work_accident, left and promotion are booleans
df.describe()
# describe object columns and print unique values
print(df[['sales', 'salary']].describe())
print(df.sales.unique())
print(df.salary.unique())
n_employees = len(df)
left = df.left.sum()
accident = df.Work_accident.sum()
accident_left = len(df[(df['Work_accident'] == 1) & (df['left'] == 1)])
# probability that a randomly selected employee left the company
print(left/n_employees)
# probability that experienced a work accident
print(accident/n_employees)
# probability that a randomly selected employee left the company and experienced a work accident
print(accident_left/n_employees)
# Creating two dataframes, one for employees who left and one for those who stayed
df_left = df[df['left'] == 1]
df_stayed = df[df['left'] == 0]
# Compute the 25th, 50th, and 90th percentiles for the satisfaction level score for all employees that left the company.
print('Employees who left 25th, 50th and 90th percentile: {}, {}, {}'
.format(df_left.satisfaction_level.quantile(0.25),
df_left.satisfaction_level.quantile(0.5),
df_left.satisfaction_level.quantile(0.9)))
# Compare these results to the same percentiles for those that did not leave. What can you say about the results?
print('Employees who stayed 25th, 50th and 90th percentile: {}, {}, {}'
.format(df_stayed.satisfaction_level.quantile(0.25),
df_stayed.satisfaction_level.quantile(0.5),
df_stayed.satisfaction_level.quantile(0.9)))
# Compute the variance and standard deviation of hours worked.
print(df.average_montly_hours.var())
print(df.average_montly_hours.std())
# Compare the variance between the satisfaction levels of employees who left versus those who stayed.
# Which is larger? What does this mean?
print(df_left.satisfaction_level.var())
print(df_stayed.satisfaction_level.var())
# Compute the mean satisfaction level for each salary category. Comment on your results.
df.groupby('salary').satisfaction_level.mean()
# Given an employees salary level (low, medium, or high), calculate the probability that
# they worked more than two standard deviations of the average monthly hours across all groups.
# In other words, compute P(hours > 2*sigma|salary) = P(salary|hours > 2*\sigma)*P(hours > 2*sigma)/P(salary)
# Creating a dataset for each salary level
df_low = df[df['salary'] == 'low']
df_medium = df[df['salary'] == 'medium']
df_high = df[df['salary'] == 'high']
# And one for employees who have worked more than two std above
sigma_hours = df.average_montly_hours.mean() + 2*df.average_montly_hours.std()
df_above = df[df['average_montly_hours'] > sigma_hours]
P_hours = len(df_above) / n_employees
for d, level in zip([df_low, df_medium, df_high], ['low', 'medium', 'high']):
P_salary = len(d) / n_employees
P_salary_hours = len(df_above[df_above['salary'] == level]) / len(df_above)
P = (P_salary_hours * P_hours) / P_salary
P_hours_salary = len(d[d['average_montly_hours'] > sigma_hours]) / len(d)
print('{} salary level probability: {:.5f}, {:.5f}'.format(level, P, P_hours_salary))
# Repeat parts 6 and 7 for P(left|salary) = P(salary|left)*P(left)/P(salary)
P_left = len(df_left) / n_employees
for d, level in zip([df_low, df_medium, df_high], ['low', 'medium', 'high']):
P_salary = len(d) / n_employees
P_salary_left = len(df_left[df_left['salary'] == level]) / len(df_left)
P = (P_salary_left * P_left) / P_salary
P_left_salary = len(d[d['left'] == 1]) / len(d)
print('{} salary level probability: {:.5f}, {:.5f}'.format(level, P, P_left_salary))
# What is the odds ratio of an employee with a high salary getting a promotion
# within the past five years versus a low salary employee? Comment on your results.
p_high = df_high.promotion_last_5years.value_counts() / len(df_high)
p_low = df_low.promotion_last_5years.value_counts() / len(df_low)
print(p_high)
print(p_low)
print((p_high[1] / p_high[0]) / (p_low[1] / p_low[0]))
import random
# Demonstrate your assertions by writing some python code to do just that.
random.seed(7)
size = 50
s = random.sample(range(0, n_employees), size)
print('Dataset mean: {}\nSample mean: {}'.format(df.satisfaction_level.mean(), df.iloc[s].satisfaction_level.mean()))
random.seed(7)
size = 50
n_samples = 10
mean = 0
for i in range(n_samples):
s = random.sample(range(0, n_employees), size)
mean += df.iloc[s].satisfaction_level.mean()
mean = mean / n_samples
print('Dataset mean: {}\nMean of sample means: {}'.format(df.satisfaction_level.mean(), mean))
from scipy import stats
# For the variables you identified in part 1, compute the probabilities p_k, of each having a positive (x = 1) result,
# where k is a placeholder for each variable.
bernoulli = ['Work_accident', 'left', 'promotion_last_5years']
for var in bernoulli:
print('probability for {}: {:.5f}'.format(var, df[var].sum() / n_employees))
# Compute the variance of each of the variables in part 2 using p_k as described above.
for var in bernoulli:
p = df[var].sum() / n_employees
print('variance for {}: {:.5f}'.format(var, p*(1-p)))
# For each of the k variables, compute the probability of randomly selecting 3500 employees with a positive result.
# Comment on your answer.
x = 3500
for var in bernoulli:
p = df[var].sum() / n_employees
print('PMF for {}: {:.5f}'.format(var, stats.binom.pmf(x, n_employees, p)))
# For each of the k variables, compute the probability of randomly selecting 3500 or less with a positive result.
# Comment on your answer.
x = 3500
for var in bernoulli:
p = df[var].sum() / n_employees
print('CDF for {}: {:.5f}'.format(var, stats.binom.cdf(x, n_employees, p)))
# Now plot both the PMF and CDF as a function of the number of drawn samples for each of the k variables.
# Comment on your results.
x = np.arange(0, n_employees)
fig, ax = plt.subplots(3, 2, figsize=(16, 15))
i = j = 0
for var in bernoulli:
p = df[var].sum() / n_employees
ax[i, 0].plot(x, stats.binom.pmf(x, n_employees, p))
ax[i, 0].set_title(var +' PMF')
ax[i, 1].plot(x, stats.binom.cdf(x, n_employees, p))
ax[i, 1].set_title(var + ' CDF')
i += 1
# For the variables in part 1, plot some histograms.
normal = ['satisfaction_level', 'last_evaluation', 'number_project', 'average_montly_hours']
fig, ax = plt.subplots(2, 2, figsize=(16, 10))
i = j = 0
for var in normal:
if j == 2:
i += 1
j = 0
ax[i, j].hist(df[var], bins=50)
ax[i, j].set_title(var)
j += 1
# Compute the mean and variance for each of the variables used in parts 1 and 2.
for var in normal:
print('{}\n\tmean = {:.5f}\n\tvariance = {:.5f}'.format(var, df[var].mean(), df[var].var()))
# Using the mean and variance in part 3, construct normal distributions for each
# and overlay them on top of the histograms you made in part one.
# Are they well approximated by normals?
fig, ax = plt.subplots(2, 2, figsize=(16, 10))
i = j = 0
for var in normal:
x = np.linspace(min(df[var]), max(df[var]), 1000)
Z = stats.norm.pdf(x, loc=df[var].mean(), scale=df[var].std())
if j == 2:
i += 1
j = 0
ax[i, j].hist(df[var], bins=50, normed=True)
ax[i, j].set_title(var)
ax[i, j].plot(x, Z)
j += 1
# For each variable in part 1, divide each by salary and fit a Poisson distribution to each.
poisson = ['time_spend_company']
for var in poisson:
for level in ['low', 'medium', 'high']:
mu = df[df['salary'] == level][var].mean()
fit = stats.poisson(mu)
print('{} {} mean and Poisson fit mean: {:.5f}, {:.5f}'.format(var, level, mu, fit.mean()))
x = np.arange(0, 20)
fig, ax = plt.subplots(3, 1, figsize=(16, 15))
i = 0
for var in poisson:
for level in ['low', 'medium', 'high']:
mu = df[df['salary'] == level][var].mean()
ax[i].set_title(var + ' ' + level + ' salary')
ax[i].plot(x, stats.poisson.pmf(x, mu))
ax[i].hist(df[df['salary'] == level][var], bins=20, normed=True)
i += 1
# For each salary level, compute the probability of obtaining at least the mean of each variable
# regardless of salary level by using the Poisson distributions you constructed in part 2.
# Comment on your results.
for var in poisson:
global_mean = df[var].mean()
# print(global_mean)
for level in ['low', 'medium', 'high']:
mu = df[df['salary'] == level][var].mean()
# print(mu, df[df['salary'] == level][var].median())
print('{} {} salary level probability is {:.5f}'.format(var, level, stats.poisson.sf(global_mean, mu)))
# Choose two variables which may be good candidates to test this theorem.
central = ['average_montly_hours', 'last_evaluation']
# Using the variables chosen in part 1, randomly select a set of n = 10 samples and take the mean.
# Repeat this 1000 times for each variable.
def make_samples(n_samples, size, seed):
res = {}
for var in central:
random.seed(seed)
mean = []
for i in range(n_samples):
s = random.sample(range(0, n_employees), size)
mean.append(df.iloc[s][var].mean())
# from solution using a list comprehension:
# mean = [df[var].sample(size).mean() for i in range(n_samples)]
res[var] = mean
return res
sampsize10 = make_samples(1000, 10, 7)
# Plot a histogram for each variable used in part 2. Comment on your results.
def plot_samples(samples):
fig, ax = plt.subplots(1, 2, figsize=(16, 5))
i = 0
for var in central:
ax[i].set_title(var)
ax[i].hist(samples[var], bins=50)
i += 1
plot_samples(sampsize10)
# Repeat parts 2-3 for n = 100, n = 500, and n = 1000. Comment on your results.
sampsize100 = make_samples(1000, 100, 7)
plot_samples(sampsize100)
sampsize500 = make_samples(1000, 500, 7)
plot_samples(sampsize500)
sampsize1000 = make_samples(1000, 1000, 7)
plot_samples(sampsize1000)
# Overlay an normal curve on your n = 1000 plots, using the mean and variance computed from the data.
# Comment on your results.
fig, ax = plt.subplots(1, 2, figsize=(16, 5))
i = 0
for var in central:
x = np.linspace(min(sampsize1000[var]), max(sampsize1000[var]), 1000)
ax[i].set_title(var)
ax[i].hist(sampsize1000[var], bins=50, normed=True)
# from solutions: divide by sqrt(1000) to find the std of a sampled distribution!
# but if I do it results are not so good!
ax[i].plot(x, stats.norm.pdf(x, loc=pd.Series(sampsize1000[var]).mean(), scale=pd.Series(sampsize1000[var]).std()), color='red')
i += 1
# Compute a confidence interval for satisfaction levels, at the 95% confidence level,
# of employees who left the company and those who didn't.
# Do this using both a t distribution and a normal. Comment on your results.
import statsmodels.stats.api as sm
# checking mean and variance
print('Employees who left mean and variance are {:.5f} and {:.5f}'.
format(df_left.satisfaction_level.mean(),
df_left.satisfaction_level.var()))
print('Employees who stayed mean and variance are {:.5f} and {:.5f}'.
format(df_stayed.satisfaction_level.mean(),
df_stayed.satisfaction_level.var()))
# using normal distribution
print('\nNormal Distribution\n')
# df_left_norm_confidence = df_left.satisfaction_level.mean() + df_left.satisfaction_level.std() / np.sqrt(len(df_left)) * np.array(stats.norm.ppf([0.025, 0.975]))
# print('Employees who left 95% confidence interval: {}'.format(df_left_norm_confidence))
# print('Employees who left 95% confidence interval: {}'.
# format(stats.norm.interval(0.95,
# df_left.satisfaction_level.mean(),
# df_left.satisfaction_level.std()/np.sqrt(len(df_left)))))
print('Employees who left 95% confidence interval: {}'.format(sm.DescrStatsW(df_left.satisfaction_level).zconfint_mean(alpha=0.05)))
# df_stayed_norm_confidence = df_stayed.satisfaction_level.mean() + df_stayed.satisfaction_level.std() / np.sqrt(len(df_stayed)) * np.array(stats.norm.interval(0.95))
# print('Employees who stayed 95% confidence interval: {}'.format(df_stayed_norm_confidence))
# print('Employees who stayed 95% confidence interval: {}'.
# format(stats.norm.interval(0.95,
# df_stayed.satisfaction_level.mean(),
# df_stayed.satisfaction_level.std()/np.sqrt(len(df_stayed)))))
print('Employees who stayed 95% confidence interval: {}'.format(sm.DescrStatsW(df_stayed.satisfaction_level).zconfint_mean(alpha=0.05)))
# using t distribution
print('\nT Distribution with n={}\n'.format(n_employees))
# df_left_t_confidence = df_left.satisfaction_level.mean() + df_left.satisfaction_level.std() / np.sqrt(len(df_left)) * np.array(stats.t.interval(0.95, n_employees))
# print('Employees who left 95% confidence interval: {}'.format(df_left_t_confidence))
# print('Employees who left 95% confidence interval: {}'.
# format(stats.t.interval(0.95,
# n_employees,
# df_left.satisfaction_level.mean(),
# df_left.satisfaction_level.std()/np.sqrt(len(df_left)))))
print('Employees who left 95% confidence interval: {}'.format(sm.DescrStatsW(df_left.satisfaction_level).tconfint_mean(alpha=0.05)))
# df_stayed_t_confidence = df_stayed.satisfaction_level.mean() + df_stayed.satisfaction_level.std() / np.sqrt(len(df_stayed)) * np.array(stats.t.interval(0.95, n_employees))
# print('Employees who stayed 95% confidence interval: {}'.format(df_stayed_t_confidence))
# print('Employees who stayed 95% confidence interval: {}'.
# format(stats.t.interval(0.95,
# n_employees,
# df_stayed.satisfaction_level.mean(),
# df_stayed.satisfaction_level.std()/np.sqrt(len(df_stayed)))))
print('Employees who stayed 95% confidence interval: {}'.format(sm.DescrStatsW(df_stayed.satisfaction_level).tconfint_mean(alpha=0.05)))
# Use a t-test to test the hypothesis that employees who left the company,
# had lower satisfaction levels than those who did not. If significant, is the mean difference?
# Comment on your results. (Hint: Do the two populations have equal variance?)
print('Test assuming equal variance: {}'.format(stats.ttest_ind(df_left.satisfaction_level,
df_stayed.satisfaction_level,
equal_var=True)))
print('Test assuming different variance: {}'.format(stats.ttest_ind(df_left.satisfaction_level,
df_stayed.satisfaction_level,
equal_var=False)))
# Fit a normal curve to each group in part 2 and put them on the same plot next to each other. Comment on your results.
fig = plt.figure(figsize=(15, 10))
ax = plt.axes()
x1 = np.linspace(df_left.satisfaction_level.min(), df_left.satisfaction_level.max(), 1000)
ax.plot(x1, stats.norm.pdf(x1, df_left.satisfaction_level.mean(), df_left.satisfaction_level.std() / np.sqrt(len(df_left))), label='left')
x2 = np.linspace(df_stayed.satisfaction_level.min(), df_stayed.satisfaction_level.max(), 1000)
ax.plot(x2, stats.norm.pdf(x2, df_stayed.satisfaction_level.mean(), df_stayed.satisfaction_level.std() / np.sqrt(len(df_stayed))), label='stayed')
ax.legend();
# Test the hypothesis that the satisfaction level between each salary group, denoted k,
# differs signicantly from the mean. Namely
# H0:μ−μk=0H0:μ−μk=0
# Ha:μ−μk≠0Ha:μ−μk≠0
# print(df.satisfaction_level.mean())
# print(df_high.satisfaction_level.mean())
# print(df_medium.satisfaction_level.mean())
# print(df_low.satisfaction_level.mean())
# print(df.satisfaction_level.var())
# print(df_high.satisfaction_level.var())
# print(df_medium.satisfaction_level.var())
# print(df_low.satisfaction_level.var())
print('High level test: {}'.format(stats.ttest_1samp(df_high.satisfaction_level, df.satisfaction_level.mean())))
print('Medium level test: {}'.format(stats.ttest_1samp(df_medium.satisfaction_level, df.satisfaction_level.mean())))
print('Low level test: {}'.format(stats.ttest_1samp(df_low.satisfaction_level, df.satisfaction_level.mean())))
# Generate plots for part 4 as you did in part 3. What conclusions can you draw from the plot?
fig = plt.figure(figsize=(15, 10))
ax = plt.axes()
# x1 = np.linspace(df.satisfaction_level.min(), df.satisfaction_level.max(), 1000)
# ax.plot(x1, stats.norm.pdf(x1, df.satisfaction_level.mean(), df.satisfaction_level.std() / np.sqrt(len(df))), label='all')
x2 = np.linspace(df_low.satisfaction_level.min(), df_low.satisfaction_level.max(), 1000)
ax.plot(x2, stats.norm.pdf(x2, df_low.satisfaction_level.mean(), df_low.satisfaction_level.std() / np.sqrt(len(df_low))), label='low')
x3 = np.linspace(df_medium.satisfaction_level.min(), df_medium.satisfaction_level.max(), 1000)
ax.plot(x3, stats.norm.pdf(x3, df_medium.satisfaction_level.mean(), df_medium.satisfaction_level.std() / np.sqrt(len(df_medium))), label='medium')
x4 = np.linspace(df_high.satisfaction_level.min(), df_high.satisfaction_level.max(), 1000)
ax.plot(x4, stats.norm.pdf(x4, df_high.satisfaction_level.mean(), df_high.satisfaction_level.std() / np.sqrt(len(df_high))), label='high')
ax.legend();
# Repeat parts 4-6 on a hypothesis of your choosing.
# Last evaluation mean differs between people who left and people who stayed
print('Last evaluation mean and variance for employees who left: {}, {}'.format(df_left.last_evaluation.mean(),
df_left.last_evaluation.var()))
print('Last evaluation mean and variance for employees who stayed: {}, {}'.format(df_stayed.last_evaluation.mean(),
df_stayed.last_evaluation.var()))
print('Test assuming different variance: {}'.format(stats.ttest_ind(df_left.last_evaluation,
df_stayed.last_evaluation,
equal_var=False)))
fig = plt.figure(figsize=(15, 10))
ax = plt.axes()
x1 = np.linspace(df_left.last_evaluation.min(), df_left.last_evaluation.max(), 1000)
ax.plot(x1, stats.norm.pdf(x1, df_left.last_evaluation.mean(), df_left.last_evaluation.std() / np.sqrt(len(df_left))), label='left')
x2 = np.linspace(df_stayed.last_evaluation.min(), df_stayed.last_evaluation.max(), 1000)
ax.plot(x2, stats.norm.pdf(x2, df_stayed.last_evaluation.mean(), df_stayed.last_evaluation.std() / np.sqrt(len(df_stayed))), label='stayed')
ax.legend();
# when it is false (thus more power is good). Compute the power for the
# hypothesis that the satisfaction level of high paid employees is different than
# that of medium paid employees using a t distribution.
import statsmodels.stats.power as smp
# From solution the size effect to use is high-medium divided by std of all the data:
effect_size = (df_high.satisfaction_level.mean() - df_medium.satisfaction_level.mean()) / df.satisfaction_level.std()
print(smp.TTestIndPower().solve_power(effect_size,
nobs1=len(df_high),
ratio=len(df_high)/len(df_medium),
alpha=0.05,
alternative='two-sided'))
# This is used in the solution but I don't understand the use of the number of employees who stayed as nobs1...
print(sm.TTestIndPower().power(effect_size, nobs1=len(df_stayed), ratio=len(df_high)/len(df_medium), alpha=0.05))
def bootstrap(n, b, var):
statistics = []
for i in range(b):
sample = pd.Series(np.random.choice(np.array(df[var]), size=n))
statistics.append(sample.median())
return pd.Series(statistics)
n = 100
b = 100
var = 'satisfaction_level'
sat_lvl_bootstrapped = bootstrap(n, b, var)
print('Bootstrapped samples median mean and variance: {:.5f}, {:.5f}'.format(sat_lvl_bootstrapped.mean(), sat_lvl_bootstrapped.std()))
print('True median: {:.5f}'.format(df[var].median()))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Probability, Expectation Values, and Variance
Step2: There seems to be a difference but before we draw any conclusion we would need to perform a hypothesis test
Step3: The variance in the satisfaction levels is larger for employees who left, so the satisfaction level for this employees is more spread out around the mean. This may indicate that the employees leaving the company have a level of satisfaction more variable than those who stay.
Step4: The satisfaction level increases with the salary, as expected. It seems though to be a more sensible difference between low and medium than medium and high salaries, but again we would need to test to see if the difference is significant.
Step5: What can you say about your results in part 6?
Step6: As above the probability that an employee left the company given a certain salary level is higher for low and medium salary levels than for high ones.
Step7: The probability is sensibly higher for high salary level than low.
Step8: Distributions and The Central Limit Theorem
Step9: Which variables in the HR data can be said to be Bernoulli random variables?
Step10: The Normal Distribution
Step11: The Poisson Distribution
Step12: The Central Limit Theorem
Step13: Hypothesis Testing
Step14: The results are almost the same because for $n \rightarrow \infty$ the t distribution tends to the normal.
Step15: The difference is significant, and since the mean of employees who left is lower we can say that the hypothesis that they had lower satisfaction level is statistically relevant (we have a two-tailed test but the p-value is very small in either case).
Step16: From the plots we can see that the peaks of the normal are far apart, thus backing up our test results.
Step17: How would you interpret your results in part 4?
Step18: In this case the curves are not that far apart as in the previous case but they still seem reasonably distant.
Step19: We can't reject the null hypothesis, let's plot the data
Step20: Indeed the means seems very close!
Step21: Bootstrapping
|
3,508
|
<ASSISTANT_TASK:>
Python Code:
from dynamite.operators import sigmax, sigmaz, index_sum, op_sum
# the None default argument will be important later
def build_hamiltonian(L):
interaction = op_sum(index_sum(sigmax(0)*sigmax(i), size=L) for i in range(1,L))
uniform_field = 0.5*index_sum(sigmaz(), size=L)
return interaction + uniform_field
# look at an example
build_hamiltonian(20)
%matplotlib inline
build_hamiltonian(8).spy()
H = build_hamiltonian(20)
print('full space dimension: ', H.dim)
from dynamite.subspaces import Parity
H.subspace = Parity('even')
print('parity subspace dimension:', H.dim)
from dynamite.states import State
ket = State(L=20, subspace=Parity('even'))
print('vector length:', ket.vec.size)
from dynamite import config
config.L = 20
config.subspace = Parity('even')
# now we never have to specify the subspace! and we only need to give
# build_hamiltonian the value of L so it knows the longest long-range interaction
H = build_hamiltonian(config.L)
ket = State()
print('H size:', H.L)
print('H subspace:', H.subspace)
print('ket subspace:', ket.subspace)
from dynamite.operators import sigmay
def build_XXYY(L=None):
return index_sum(sigmax(0)*sigmax(1) + sigmay(0)*sigmay(1), size=L)
# our operator size is still set from config
build_XXYY()
config.L = 8
build_XXYY().spy()
from dynamite.subspaces import Auto
H = build_XXYY()
# we want the subspace conserved by Hamiltonian H, that contains
# the state with four up spins followed by four down spins
subspace = Auto(H, 'UUUUDDDD')
H.subspace = subspace
H.spy()
from math import factorial
def choose(n, k):
return factorial(n) // (factorial(k)*factorial(n-k))
print('subspace dimension:', subspace.get_dimension())
print('8 choose 4: ', choose(8, 4))
# only three down spins
subspace = Auto(H, 'UUUUUDDD')
print('subspace dimension:', subspace.get_dimension())
print('8 choose 3: ', choose(8, 3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If we look at the nonzero structure of the matrix, it's not at all clear that it's block diagonal
Step2: This is a graphical representation of the matrix, where each black dot represents a nonzero element.
Step3: As expected, the dimension was cut in half! The same subspace can be applied to states, and even globally
Step4: Let's set everything globally so we don't have to keep writing lengths and subspaces everywhere.
Step5: The Auto subspace
Step6: How can we take advantage of conservation of total magnetization? With the Auto subspace
Step7: As expected, the dimension has been reduced significantly! In fact, it has been reduced to 8 choose 4, which is what we would expect for total spin conservation
Step8: Or we can do a different total spin sector
|
3,509
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import yahoo_finance
from yahoo_finance import Share
import numpy as np
import pandas
import matplotlib.pyplot as plt
import datetime
import cvxopt as opt
from cvxopt import blas, solvers
# We will do a lot of optimizations,
# and don't want to see each step.
solvers.options['show_progress'] = False
def getTimeSeries( ticker, start_date='2012-01-01', end_date='2012-02-01'):
# yahoo_finance API to load list of dictionaries
obj = Share(ticker)
ts = obj.get_historical(start_date,end_date)
# yahoo_finance indexes most recent date first, reverse this
ts = list(reversed(ts))
# Convert date strings to python datetime objects for easier manipulation
dates = [datetime.datetime.strptime(ts[i]['Date'],'%Y-%m-%d').date() for i in range(len(ts))]
# Convert close price strings to floats for numerical manipulation
prices = [float(ts[i]['Adj_Close']) for i in range(len(ts))]
# Create DataFrame from the list produced - python will recognize as Series
time_series = pandas.DataFrame( prices, index = dates, columns = [ticker])
return time_series
def getMultTimeSeries( tickers = ['XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLK','XLU'],
start_date = '2012-01-01', end_date = '2012-02-01'):
# Initialize DataFrame
time_series_dataframe = pandas.DataFrame()
# Iterate over all tickers and append column to DataFrame
for ticker in tickers:
# Use helper function to get single column DataFrame
df = getTimeSeries( ticker, start_date, end_date)
# Concatanate on axis = 1
time_series_dataframe = pandas.concat([time_series_dataframe,df],axis = 1)
return time_series_dataframe
def markowitzReturns( returns, tickers, explain = False):
n = len(returns)
returns_df = returns
returns = np.asmatrix(returns)
mus = [10**(5.0 * t/50 - 1.0) for t in range(50)]
# Convert to cvxopt matrices
Sigma = opt.matrix(np.cov(returns))
q = opt.matrix(np.mean(returns, axis=1))
# Create constraint matrices
G = -opt.matrix(np.eye(n)) # negative n x n identity matrix
h = opt.matrix(0.0, (n ,1)) # -I*w < 0 i.e. no shorts
A = opt.matrix(1.0, (1, n)) # A is all ones so A*w = w
b = opt.matrix(1.0) # Dot product sums to 1
# Calculate efficient frontier weights using quadratic programming
ports = [solvers.qp(mu*Sigma, -q, G, h, A, b)['x'] for mu in mus]
# Calculate risks and returns of frontier
returns = [blas.dot(q, x) for x in ports]
risks = [np.sqrt(blas.dot(x, Sigma*x)) for x in ports]
# Fit polynomial to frontier curve
m = np.polyfit(returns, risks, 2)
x = np.sqrt(m[2]/m[0])
# Calculate optimal portfolio weights
optimal_weights = solvers.qp(opt.matrix(x * Sigma), -q, G, h, A, b)['x']
optimal_return = blas.dot(q, optimal_weights)
optimal_risk = np.sqrt(blas.dot(optimal_weights, Sigma*optimal_weights))
# Method to justify this portfolio distribution if asked for
if( explain ):
date_text =
--------------------------------------------------------------------------------------------------
Using returns data from {0} to {1} a careful mean - variance analysis was performed.
The analysis found a number of portfolios lying on the markowitz efficient frontier and they are
found below. The analysis indicates that the optimal portfolio for the next trading day will have
the following distribution:
print(date_text.format(returns_df.columns[0],returns_df.columns[len(returns_df.columns)-1]))
# Print optimal weights
weights = np.asarray(optimal_weights)
weights = [float(weights[i]) for i in range(len(weights))]
wts = dict(zip(tickers,weights))
for k in wts:
weight_text = "\t{0} : {1:.4f}%"
print(weight_text.format(str(k),float(wts[k])*100))
returns_text =
This portfolio distribution has an expected return of:
{0:.4f}%
print(returns_text.format(float(optimal_return)*100))
risk_text =
And the associated risk (standard deviation) is:
{0:.4f}
print(risk_text.format(float(optimal_risk)))
break_text=
--------------------------------------------------------------------------------------------------
print(break_text)
plt.plot(risks, returns, 'b-o')
plt.title('Efficient Portfolios on {}'.format(returns_df.columns[len(returns_df.columns)-1]))
plt.ylabel('Returns (%)')
plt.xlabel('Risk (STD)')
return np.asarray(optimal_weights), returns, risks
def backtest( tickers = ['XLY','XLP','XLE','XLF','XLV','XLI','XLB','XLK','XLU'],
start_date = '2012-01-01', end_date = '2012-01-20', start = 10, max_lookback = 100,
explain = False):
timeseries = getMultTimeSeries( tickers, start_date, end_date)
returns = timeseries.pct_change().dropna()
weights_df = pandas.DataFrame()
for i in range(len(returns)):
if ( i > start ):
if( i < max_lookback ):
returns_window = returns[0:i]
else:
returns_window = returns[(i-max_lookback):i]
try:
if( explain ):
weights, returns_window, risks = markowitzReturns(returns_window.T, tickers, explain = True)
else:
weights, returns_window, risks = markowitzReturns(returns_window.T, tickers, explain = False)
except ValueError as e:
# Sometimes CVXOPT fails (infrequently)
# "ValueError: Rank(A) < p or Rank([P; A; G]) < n"
# In this case just do nothing (keep current weights)
weights, returns_window, risks = weights_prev, returns_window_prev, risks_prev
weights = [float(weights[i]) for i in range(len(weights))]
wts = dict(zip(tickers,weights))
df = pandas.DataFrame(wts, index = [returns.index[i]])
weights_df = pandas.concat([weights_df, df])
weights_prev, returns_window_prev, risks_prev = weights, returns_window, risks
total_returns = pandas.DataFrame(weights_df.values*returns[(start+1)::],
columns = returns.columns, index = returns.index)
naive_returns = [np.sum(total_returns[[i]]) for i in range(len(total_returns.columns))]
naive_return = np.sum(naive_returns)
return weights_df, total_returns.dropna(), naive_return
weights, returns, naive_return = backtest(explain = True)
weights, returns, naive_return = backtest(start_date='2012-01-01',end_date='2012-12-31')
def analyzeResults( weights_df, total_returns, naive_return, commission = .0004):
start_date = weights_df.index[0]
end_date = weights_df.index[len(weights_df.index)-1]
# Get cummulative sum of returns for plotting
return_sums = total_returns.cumsum()
return_sums['total_return'] = return_sums.sum(axis=1)
# Analyze data with commission costs
weights_diff = weights_df.diff()
weights_diff['total_delta'] = weights_diff.abs().sum(axis = 1)
portfolio_movement = pandas.DataFrame(weights_diff['total_delta']/2)
portfolio_movement['commissions'] = portfolio_movement['total_delta']*commission
portfolio_movement['naive_return'] = total_returns.sum(axis=1)
portfolio_movement['real_return'] = (portfolio_movement['naive_return'] - portfolio_movement['commissions'])
real_sums = portfolio_movement.cumsum()
real_return = portfolio_movement['real_return'].sum()
# Print naive_return and real_return + analysis
naive_return_text =
--------------------------------------------------------------------------------------------------
In trading from {0} to {1} the total return ignoring commission fees was:
{2:.4f}%
After factoring in commission fees of {3} the total return was:
{4:.4f}%
--------------------------------------------------------------------------------------------------
print(naive_return_text.format( start_date, end_date, naive_return*100, commission ,real_return*100) )
# Get plot of naive_returns and real returns over time
plt.figure(figsize=(12,6))
plt.plot(return_sums.index,return_sums['total_return'],label='Naive Returns')
plt.plot(real_sums.index,real_sums['real_return'],label='Real Returns')
plt.title('Returns over Time')
plt.xlabel('Time')
plt.ylabel('Returns (%)')
plt.xticks(rotation=70)
plt.legend()
plt.legend(bbox_to_anchor=(1.01, .5), loc=2, borderaxespad=0.)
return
analyzeResults( weights, returns, naive_return, commission = .0004)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: getTimeSeries( ticker, start_date, end_date)
Step2: getMultTimeSeries( tickers, start_date, end_date)
Step7: markowitzReturns( returns)
Step8: backtest( tickers, start_date, end_date, start, max_lookback, explain)
Step10: analyzeResults( weights_df, total_returns, naive_return, commission)
|
3,510
|
<ASSISTANT_TASK:>
Python Code:
# restart your notebook if prompted on Colab
try:
import verta
except ImportError:
!pip install verta
HOST = "app.verta.ai"
PROJECT_NAME = "Census Income Classification"
EXPERIMENT_NAME = "Logistic Regression"
WORKSPACE = "XXXXX"
import os
os.environ['VERTA_EMAIL'] = 'XXXXXXXXXX'
os.environ['VERTA_DEV_KEY'] = 'XXXXXXXXXXXXXXXXXXXX'
from __future__ import print_function
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings("ignore", category=ConvergenceWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
import itertools
import os
import time
import six
import numpy as np
import pandas as pd
import sklearn
from sklearn import model_selection
from sklearn import linear_model
from sklearn import metrics
try:
import wget
except ImportError:
!pip install wget # you may need pip3
import wget
from verta import Client
from verta.utils import ModelAPI
client = Client(HOST)
proj = client.set_project(PROJECT_NAME, workspace=WORKSPACE, public_within_org=True)
expt = client.set_experiment(EXPERIMENT_NAME)
train_data_url = "http://s3.amazonaws.com/verta-starter/census-train.csv"
train_data_filename = wget.detect_filename(train_data_url)
if not os.path.isfile(train_data_filename):
wget.download(train_data_url)
test_data_url = "http://s3.amazonaws.com/verta-starter/census-test.csv"
test_data_filename = wget.detect_filename(test_data_url)
if not os.path.isfile(test_data_filename):
wget.download(test_data_url)
from verta.dataset import Path
dataset = client.set_dataset(name="Census Income Local")
dataset_version = dataset.create_version(Path(train_data_filename))
df_train = pd.read_csv(train_data_filename)
X_train = df_train.iloc[:,:-1]
y_train = df_train.iloc[:, -1]
df_train.head()
hyperparam_candidates = {
'C': [1e-6, 1e-4],
'solver': ['lbfgs'],
'max_iter': [15, 28],
}
hyperparam_sets = [dict(zip(hyperparam_candidates.keys(), values))
for values
in itertools.product(*hyperparam_candidates.values())]
def run_experiment(hyperparams):
# create object to track experiment run
run = client.set_experiment_run()
# log attributes
run.log_attributes({
'library': "scikit-learn",
'model_type': "logistic regression",
})
# create validation split
(X_val_train, X_val_test,
y_val_train, y_val_test) = model_selection.train_test_split(X_train, y_train,
test_size=0.2,
shuffle=True)
# log hyperparameters
run.log_hyperparameters(hyperparams)
print(hyperparams, end=' ')
# create and train model
model = linear_model.LogisticRegression(**hyperparams)
model.fit(X_train, y_train)
# calculate and log validation accuracy
val_acc = model.score(X_val_test, y_val_test)
run.log_metric("val_acc", val_acc)
print("Validation accuracy: {:.4f}".format(val_acc))
# create deployment artifacts
model_api = ModelAPI(X_train, model.predict(X_train))
requirements = ["scikit-learn"]
# save and log model
run.log_model(model, model_api=model_api, custom_modules=[])
run.log_requirements(requirements)
# log training data
run.log_dataset_version("census_data", dataset_version) # log dataset metadata
# log git information
run.log_code(
repo_url="git@github.com:VertaAI/modeldb.git",
commit_hash="d412a0d9",
autocapture=False,
)
# NOTE: run_experiment() could also be defined in a module, and executed in parallel
for hyperparams in hyperparam_sets:
run_experiment(hyperparams)
best_run = expt.expt_runs.sort("metrics.val_acc", descending=True)[0]
print("Validation Accuracy: {:.4f}".format(best_run.get_metric("val_acc")))
best_hyperparams = best_run.get_hyperparameters()
print("Hyperparameters: {}".format(best_hyperparams))
model = linear_model.LogisticRegression(multi_class='auto', **best_hyperparams)
model.fit(X_train, y_train)
train_acc = model.score(X_train, y_train)
print("Training accuracy: {:.4f}".format(train_acc))
registered_model = client.get_or_create_registered_model(name="census", workspace=WORKSPACE, public_within_org=True)
registered_model.create_version_from_run(best_run.id, name="v0")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This example features
Step2: Phase 1
Step3: Prepare data
Step4: Prepare hyperparameters
Step5: Train models
Step6: Revisit Workflow
Step7: Train on full dataset
Step8: Calculate accuracy on full training set
Step9: Phase 2
|
3,511
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from mlstatpy.data.wikipedia import download_pageviews
import os
from datetime import datetime
download_pageviews(datetime(2018,2,1), folder=".")
with open("pageviews-20180201-000000", "r", encoding="utf-8") as f:
fr = filter(lambda line: line.startswith("fr "), f)
with open("pageviews-20180201-000000.fr.txt", "w", encoding="utf-8") as g:
for line in fr:
g.write(line)
import pandas
df = pandas.read_csv("pageviews-20180201-000000.fr.txt", encoding="utf-8", sep=" ", header=None)
df.columns="country page impressions _".split()
df = df[["page", "impressions"]]
df = df.sort_values("impressions", ascending=False)
print(df.shape)
df.head()
df["ch1"] = df["page"].apply(lambda r: r[0] if isinstance(r, str) else r)
df.head()
co = df.copy()
co["volume"] = 1
gr = co.groupby("ch1", as_index=False).sum().sort_values("ch1")
gr.head()
gr[(gr["ch1"] >= "A") & (gr["ch1"] <= "Z")].plot(x="ch1", y=["impressions", "volume"], kind="bar", figsize=(14,4))
import hashlib
def hash(text):
md5 = hashlib.md5()
md5.update(text.encode('utf-8'))
return md5.hexdigest()
hash("France")
ha = co.copy()
ha["hash"] = ha["page"].apply(lambda r: hash(r) if isinstance(r, str) else r)
ha.head()
ha["ch2"] = ha["hash"].apply(lambda r: r[0] if isinstance(r, str) else r)
ha.head()
gr = ha.groupby("ch2", as_index=False).sum().sort_values("ch2")
gr.head()
gr.plot(x="ch2", y=["impressions", "volume"], kind="bar", figsize=(14,4))
def substr(s, size=2):
if isinstance(s, str):
if len(s) < size:
return s
else:
return s[:size]
else:
return s
ha["ch12"] = ha["hash"].apply(lambda r: substr(r))
gr = ha.groupby("ch12", as_index=False).sum().sort_values("ch12")
gr.plot(x="ch12", y=["impressions", "volume"], figsize=(14,4))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Récupérer un fichier wikipédia
Step2: On ne garde que les pages françaises.
Step3: Les données sont biaisées car les pages non démandées par les utilisateurs sur cett date ne sont pas répertoriées mais cela ne nuit pas à la démonstration faite ci-dessous.
Step4: Il est possible de distribuer les impressions et les volumns sur plusieurs machines mais ce serait beaucoup plus simple si les volumes (volume de données) et les impressions (usage de ces données) suivent des distributions identiques.
Step5: Après avoir appliqué une fonction de hashage, les deux distributions volumes et impressions sont presque uniforme par rapport au premier caractère du hash. Il reste un pic
|
3,512
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! pip install -q tensorflow-model-optimization
import tensorflow as tf
import numpy as np
import tempfile
import zipfile
import os
# Load MNIST dataset
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3),
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
validation_split=0.1,
epochs=10
)
_, baseline_model_accuracy = model.evaluate(
test_images, test_labels, verbose=0)
print('Baseline test accuracy:', baseline_model_accuracy)
_, keras_file = tempfile.mkstemp('.h5')
print('Saving model to: ', keras_file)
tf.keras.models.save_model(model, keras_file, include_optimizer=False)
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.ConstantSparsity(0.5, begin_step=0, frequency=100)
}
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep()
]
pruned_model = prune_low_magnitude(model, **pruning_params)
# Use smaller learning rate for fine-tuning
opt = tf.keras.optimizers.Adam(learning_rate=1e-5)
pruned_model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=opt,
metrics=['accuracy'])
pruned_model.summary()
# Fine-tune model
pruned_model.fit(
train_images,
train_labels,
epochs=3,
validation_split=0.1,
callbacks=callbacks)
def print_model_weights_sparsity(model):
for layer in model.layers:
if isinstance(layer, tf.keras.layers.Wrapper):
weights = layer.trainable_weights
else:
weights = layer.weights
for weight in weights:
if "kernel" not in weight.name or "centroid" in weight.name:
continue
weight_size = weight.numpy().size
zero_num = np.count_nonzero(weight == 0)
print(
f"{weight.name}: {zero_num/weight_size:.2%} sparsity ",
f"({zero_num}/{weight_size})",
)
stripped_pruned_model = tfmot.sparsity.keras.strip_pruning(pruned_model)
print_model_weights_sparsity(stripped_pruned_model)
stripped_pruned_model_copy = tf.keras.models.clone_model(stripped_pruned_model)
stripped_pruned_model_copy.set_weights(stripped_pruned_model.get_weights())
# Clustering
cluster_weights = tfmot.clustering.keras.cluster_weights
CentroidInitialization = tfmot.clustering.keras.CentroidInitialization
clustering_params = {
'number_of_clusters': 8,
'cluster_centroids_init': CentroidInitialization.KMEANS_PLUS_PLUS
}
clustered_model = cluster_weights(stripped_pruned_model, **clustering_params)
clustered_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
print('Train clustering model:')
clustered_model.fit(train_images, train_labels,epochs=3, validation_split=0.1)
stripped_pruned_model.save("stripped_pruned_model_clustered.h5")
# Sparsity preserving clustering
from tensorflow_model_optimization.python.core.clustering.keras.experimental import (
cluster,
)
cluster_weights = cluster.cluster_weights
clustering_params = {
'number_of_clusters': 8,
'cluster_centroids_init': CentroidInitialization.KMEANS_PLUS_PLUS,
'preserve_sparsity': True
}
sparsity_clustered_model = cluster_weights(stripped_pruned_model_copy, **clustering_params)
sparsity_clustered_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
print('Train sparsity preserving clustering model:')
sparsity_clustered_model.fit(train_images, train_labels,epochs=3, validation_split=0.1)
print("Clustered Model sparsity:\n")
print_model_weights_sparsity(clustered_model)
print("\nSparsity preserved clustered Model sparsity:\n")
print_model_weights_sparsity(sparsity_clustered_model)
def get_gzipped_model_size(file):
# It returns the size of the gzipped model in kilobytes.
_, zipped_file = tempfile.mkstemp('.zip')
with zipfile.ZipFile(zipped_file, 'w', compression=zipfile.ZIP_DEFLATED) as f:
f.write(file)
return os.path.getsize(zipped_file)/1000
# Clustered model
clustered_model_file = 'clustered_model.h5'
# Save the model.
clustered_model.save(clustered_model_file)
#Sparsity Preserve Clustered model
sparsity_clustered_model_file = 'sparsity_clustered_model.h5'
# Save the model.
sparsity_clustered_model.save(sparsity_clustered_model_file)
print("Clustered Model size: ", get_gzipped_model_size(clustered_model_file), ' KB')
print("Sparsity preserved clustered Model size: ", get_gzipped_model_size(sparsity_clustered_model_file), ' KB')
stripped_sparsity_clustered_model = tfmot.clustering.keras.strip_clustering(sparsity_clustered_model)
converter = tf.lite.TFLiteConverter.from_keras_model(stripped_sparsity_clustered_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
sparsity_clustered_quant_model = converter.convert()
_, pruned_and_clustered_tflite_file = tempfile.mkstemp('.tflite')
with open(pruned_and_clustered_tflite_file, 'wb') as f:
f.write(sparsity_clustered_quant_model)
print("Sparsity preserved clustered Model size: ", get_gzipped_model_size(sparsity_clustered_model_file), ' KB')
print("Sparsity preserved clustered and quantized TFLite model size:",
get_gzipped_model_size(pruned_and_clustered_tflite_file), ' KB')
def eval_model(interpreter):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the "test" dataset.
prediction_digits = []
for i, test_image in enumerate(test_images):
if i % 1000 == 0:
print(f"Evaluated on {i} results so far.")
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output()[0])
prediction_digits.append(digit)
print('\n')
# Compare prediction results with ground truth labels to calculate accuracy.
prediction_digits = np.array(prediction_digits)
accuracy = (prediction_digits == test_labels).mean()
return accuracy
# Keras model evaluation
stripped_sparsity_clustered_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
_, sparsity_clustered_keras_accuracy = stripped_sparsity_clustered_model.evaluate(
test_images, test_labels, verbose=0)
# TFLite model evaluation
interpreter = tf.lite.Interpreter(pruned_and_clustered_tflite_file)
interpreter.allocate_tensors()
sparsity_clustered_tflite_accuracy = eval_model(interpreter)
print('Pruned, clustered and quantized Keras model accuracy:', sparsity_clustered_keras_accuracy)
print('Pruned, clustered and quantized TFLite model accuracy:', sparsity_clustered_tflite_accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sparsity preserving clustering Keras example
Step2: Train a tf.keras model for MNIST to be pruned and clustered
Step3: Evaluate the baseline model and save it for later usage
Step4: Prune and fine-tune the model to 50% sparsity
Step5: Fine-tune the model, check sparsity, and evaluate the accuracy against baseline
Step6: Define helper functions to calculate and print the sparsity of the model.
Step7: Check that the model kernels was correctly pruned. We need to strip the pruning wrapper first. We also create a deep copy of the model to be used in the next step.
Step8: Apply clustering and sparsity preserving clustering and check its effect on model sparsity in both cases
Step9: Check sparsity for both models.
Step10: Create 1.6x smaller models from clustering
Step11: Create a TFLite model from combining sparsity preserving weight clustering and post-training quantization
Step12: See the persistence of accuracy from TF to TFLite
Step13: You evaluate the model, which has been pruned, clustered and quantized, and then see that the accuracy from TensorFlow persists in the TFLite backend.
|
3,513
|
<ASSISTANT_TASK:>
Python Code:
from databaker.framework import *
# put your input-output files here
inputfile = "example1.xls"
outputfile = "example1.csv"
previewfile = "preview.html"
from databaker.framework import *
tab = loadxlstabs("example1.xls", sheetids="stones", verbose=True)[0]
print(tab)
cellbag = tab
print("Numbered cells only:", cellbag.is_number())
print()
print("Not numbers:", cellbag.is_not_number())
print()
print("Not numbers and not whitespace:", cellbag.is_not_number().is_not_whitespace())
print()
print("Cells that seem to be a date:", cellbag.is_date())
from databaker.framework import * # restated import so you can run from this cell
cellbag = tab = loadxlstabs("example1.xls", sheetids="stones", verbose=True)[0]
print("Get some matching cells", cellbag.one_of(["Rocks", "ice", "mud"]))
print("A3 is", cellbag.excel_ref("A3"))
print("A3:B4 is", cellbag.excel_ref("A2:B4"))
print()
print("The second cell in the whole table is", tab.by_index(2))
ngreater20 = cellbag.is_number().filter(lambda c: c.value>20)
nlessthan20 = cellbag.is_number().filter(lambda c: c.value<20)
print("Numbers greater than 20", ngreater20)
print("Numbers less than 20", nlessthan20)
# Uncomment this line to see these selections in contents
# savepreviewhtml([ngreater20, nlessthan20])
colC = tab.excel_ref("D3:D5")
rowC = tab.excel_ref("A4:D4")
print("colC", colC)
print("rowC", rowC)
print()
print("Union is", colC.union(rowC))
print("Difference is", colC.difference(rowC))
print("Intersection is", colC.intersection(rowC))
print()
print("Union is", (colC | rowC))
print("Difference is", (colC - rowC))
print("Intersection is", (colC & rowC))
c = tab.excel_ref("D3") | tab.excel_ref("E4")
d = tab.excel_ref("A6:A7")
print("Waffle:")
savepreviewhtml([c,d, c.waffle(d)])
print("Junction output:")
for s in c.junction(d):
print(" ", s)
print("Cells column A that are in same row as", c, "are", tab.excel_ref("A").same_row(c))
print("Cells column 7 that are in same column as", c, "are", tab.excel_ref("7").same_col(c))
c = tab.excel_ref("B4")
print("Shift RIGHT from", c, "is", c.shift(RIGHT))
print("Shift (-1,-2) from", c, "is", c.shift((-1, -2)))
print("Fill UP from", c, "is", c.fill(UP))
print("Expand UP from", c, "is", c.expand(UP))
print()
print("How it works: UP=", UP, " DOWN=", DOWN, " LEFT=", LEFT, " RIGHT=", RIGHT)
print()
print("Extrude two cells rightwards", c.extrude(2,0))
from databaker.framework import *
tab = loadxlstabs("example1.xls", sheetids="stones", verbose=False)[0]
rocks = tab.filter("Rocks").fill(DOWN)
years = tab.filter("Year").fill(DOWN).is_not_whitespace()
cost = tab.filter("cost").fill(DOWN)
print(rocks)
# savepreviewhtml([rocks, years, cost]) # <-- uncomment this line to see the table
hrocks = HDim(rocks, "ROCKS!", DIRECTLY, LEFT)
hrocks.AddCellValueOverride("granite", "gneiss")
hyears = HDim(years, "yyyy", CLOSEST, UP)
for ob in cost:
print(ob, "\t", hyears.cellvalobs(ob), "\t", hrocks.cellvalobs(ob))
# savepreviewhtml([hrocks, hyears, cost]) # <-- uncomment to see as a coloured table
from databaker.framework import *
times = [2017.0, "Q32017", "Mar 2017"]
for t in times:
print(t, "is\t", Ldatetimeunitloose(t), "corrected to\t", Ldatetimeunitforce(t, Ldatetimeunitloose(t)))
from databaker.framework import *
tab = loadxlstabs("example1.xls", sheetids="stones", verbose=False)[0]
cs = ConversionSegment(tab.filter("cost").fill(DOWN), [
HDim(tab.filter("Year").fill(DOWN).is_not_whitespace(), "year", CLOSEST, UP),
HDim(tab.filter("Month").fill(DOWN).is_not_whitespace(), "month", DIRECTLY, LEFT)
])
###################
# savepreviewhtml(cs) # <-- uncomment this to see the interactive table
dcs = cs.topandas()
# print(dcs) # uncomment to see the table
# concatenate the month and year into a time
dcs["TIME"] = dcs.month + " " + dcs.year
pdguessforceTIMEUNIT(dcs) # <-- fixes the date format (removing the '.0's on the years)
# print(dcs) # uncomment to see the table at this point
# delete the now redundant columns
dcs.drop(['year', "month"], axis=1, inplace=True)
#print(dcs) # uncomment to see pandas table
# Output the finished WDA file where the dates should all work!
print(writetechnicalCSV(None, dcs))
import urllib, re, os
# url containing the index of a set of spreadsheets
ddurl = "https://www.ons.gov.uk/businessindustryandtrade/constructionindustry/datasets/outputintheconstructionindustry/current"
req1 = urllib.request.Request(ddurl, headers={'User-Agent' : "Sensible code"})
dhtml = urllib.request.urlopen(req1).read().decode("utf8")
print("Downloaded a webpage with", len(dhtml), "bytes")
# make the download directory
dfiles = "downloaddir"
if not os.path.isdir(dfiles):
print("making directory", dfiles)
os.mkdir(dfiles)
# quick and dirty regular expression for pullint out the links to relevant xls spreadsheets
xllinklist = re.findall('href="(/file\?uri=/businessindustryandtrade.*?/([^/"]*\.xls))"', dhtml)
for xl, xln in xllinklist:
lxln = os.path.join(dfiles, xln)
if os.path.exists(lxln):
continue # <-- we avoid downloading the same file a second time, in this case
furl = urllib.parse.urljoin(ddurl, xl)
req = urllib.request.Request(furl, headers={'User-Agent' : "Sensible code"})
xp = urllib.request.urlopen(req).read()
print("Downloading", xln, len(xp), "bytes")
fout = open(lxln, "wb")
fout.write(xp)
fout.close()
fnames = [ os.path.join(dfiles, f) for f in os.listdir(dfiles) if f[-4:] == '.xls' ]
print("Your list of xls files is:\n", "\n ".join(fnames))
import urllib, re
# fetch the front page and find the link to the zip file we want
iurl = "https://www.ons.gov.uk/employmentandlabourmarket/peopleinwork/workplacepensions/datasets/annualsurveyofhoursandearningspensiontablespensiontypebyagegroupandbygrossweeklyearningsbandsp1"
req = urllib.request.Request(iurl, headers={'User-Agent' : "Sensible Code"})
ipage = urllib.request.urlopen(req).read()
# search the link to the zip file and "join" against the baseurl to get the full url (there's a space -> %20 bug problem)
zyears = [ urllib.parse.urljoin(iurl, z.replace(" ", "%20")) for z in re.findall('<a href="([^"]*?\.zip)"', str(ipage)) ]
zurl = zyears[0]
print("We are about to download the file:\n", zurl)
zfilename = "downloaded.zip"
zurl = zurl.replace(" ", "%20") # spaces in the url get escaped in the browser
req = urllib.request.Request(zurl, headers={'User-Agent' : "Sensible Code"})
zbytes = urllib.request.urlopen(req).read()
fout = open(zfilename, "wb")
fout.write(zbytes)
fout.close()
print(zfilename, "is", len(zbytes), "bytes long.")
import zipfile
zfilename = "downloaded.zip"
# open the zipfile
zdir = zipfile.ZipFile(zfilename)
print("The files in", zfilename, "are:\n", "\n ".join(zdir.namelist()))
zmember0 = zdir.namelist()[0]
xlsfilename = "downloaded0.xls"
fout = open(xlsfilename, "wb")
xlsbindata = zdir.read(zmember0)
fout.write(xlsbindata)
fout.close()
print()
print("We have unzipped:\n", zmember0, "\nand saved it as", xlsfilename, "with", len(xlsbindata), "bytes")
# now we can load this file into databaker and continue with our work
from databaker.framework import *
tabs = loadxlstabs(xlsfilename)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cell bag selection
Step2: cellbag.is_XXX()
Step3: cellbag.filter(word)
Step4: cellbag1.union(cellbag2)
Step5: cellbag1.waffle(cellbag2)
Step6: cellbag.shift(direction)
Step and include this many cells between 0 and dx and dy.
Step7: Dimensions
Step8: Conversion Segments
Step9: Downloading excel and unzipping files
Step10: What to do when you have zip files
|
3,514
|
<ASSISTANT_TASK:>
Python Code:
import math
def euclidean_distance(x1, y1, x2, y2):
return math.sqrt((x1 - x2) ** 2 + (y1-y2) ** 2)
euclidean_distance(0,0,1,1)
values_list = [0,0,1,1]
euclidean_distance(*values_list)
values_tuple = (0,0,1,1)
euclidean_distance(*values_tuple)
values_dict = { 'x1': 0, 'y1': 0, 'x2': 1, 'y2': 1 }
euclidean_distance(**values_dict)
list(zip([1,2,3,4,5,6]))
# List comprehension
[num ** 2 for num in range(-10, 11)]
[num ** 2 for num in range(-10, 11) if num > 0]
# Set comprehension
names = [ 'Bob', 'JOHN', 'alice', 'bob', 'ALICE', 'J', 'Bob' ]
{ name[0].upper() + name[1:].lower() for name in names if len(name) > 1 }
s = "Action Is Eloquence"
counts = dict()
for char in s:
counts[char] = counts.get(char, 0) + 1
counts
freq = {
k.lower() : counts.get(k.lower(), 0) + counts.get(k.upper(), 0)
for k in counts.keys() if k.isalpha()
}
freq
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can unpack a list or tuple into positional arguments using a star *
Step2: Similarly, we can use double star ** to unpack a dictionary into keyword arguments.
Step3: Comprehensions
Step4: Dictionary comprehension
|
3,515
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from ray import tune
def train_function(config, checkpoint_dir=None):
for i in range(30):
loss = config["mean"] + config["sd"] * np.random.randn()
tune.report(loss=loss)
api_key = "YOUR_COMET_API_KEY"
project_name = "YOUR_COMET_PROJECT_NAME"
# This cell is hidden from the rendered notebook. It makes the
from unittest.mock import MagicMock
from ray.tune.integration.comet import CometLoggerCallback
CometLoggerCallback._logger_process_cls = MagicMock
api_key = "abc"
project_name = "test"
from ray.tune.integration.comet import CometLoggerCallback
analysis = tune.run(
train_function,
name="comet",
metric="loss",
mode="min",
callbacks=[
CometLoggerCallback(
api_key=api_key, project_name=project_name, tags=["comet_example"]
)
],
config={"mean": tune.grid_search([1, 2, 3]), "sd": tune.uniform(0.2, 0.8)},
)
print(analysis.best_config)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now, given that you provide your Comet API key and your project name like so
Step2: You can add a Comet logger by specifying the callbacks argument in your tune.run accordingly
|
3,516
|
<ASSISTANT_TASK:>
Python Code:
# Login information (Edit here or be prompted by the next cell)
email = None
mcurl = "https://materialscommons.org/api"
# Construct a Materials Commons client
from materials_commons.cli.user_config import make_client_and_login_if_necessary
if email is None:
print("Account (email):")
email = input()
client = make_client_and_login_if_necessary(email=email, mcurl=mcurl)
import materials_commons.api as mcapi
# Project name
name = "ExampleProjectFromJupyter"
# Projct summary - short description to show in tables
summary = "Example project created via Jupyter notebook"
# Project description - describes the project, may be more detailed
description = "This project was created as an example of how to create "\
"and use Materials Commons projects from within a Jupyter notebook"
# Create a new project (or return existing one with same name)
request = mcapi.CreateProjectRequest(description=description, summary=summary)
remote_mc_proj = client.create_project(name, request)
print(str(remote_mc_proj))
print("URL:", client.base_url)
print("Project ID:", remote_mc_proj.id)
print("Project name:", remote_mc_proj.name)
import os
import pathlib
import shutil
from materials_commons.cli.cloned_project import ClonedProject
cloned_mc_proj = ClonedProject(email=email, mcurl=mcurl, proj_id=remote_mc_proj.id)
print(str(cloned_mc_proj))
print("Cloned project local path:", cloned_mc_proj.local_path)
parent_path = pathlib.Path.home() / "mc_projects"
os.makedirs(parent_path, exist_ok=True)
cloned_mc_proj = ClonedProject(email=email,
mcurl=mcurl,
proj_id=remote_mc_proj.id,
parent_path=parent_path, # must exist
name=None) # default uses project name
print(str(cloned_mc_proj))
print("Cloned project local path:", cloned_mc_proj.local_path)
cloned_mc_proj = ClonedProject(email=email,
mcurl=mcurl,
proj_id=remote_mc_proj.id,
path=pathlib.Path.home() / "mc_projects" / "ExampleProjectFromJupyter")
print(str(cloned_mc_proj))
print("Cloned project local path:", cloned_mc_proj.local_path)
print(str(cloned_mc_proj.proj))
print(str(cloned_mc_proj.proj.remote))
print(type(cloned_mc_proj.local_path), str(cloned_mc_proj.local_path))
example_file1 = cloned_mc_proj.local_path / "example_file1.txt"
with open(example_file1, 'w') as f:
f.write("Hello World!\n")
example_file2 = cloned_mc_proj.local_path / "example_file2.txt"
with open(example_file2, 'w') as f:
f.write("Hello World, again!\n")
example_dir = cloned_mc_proj.local_path / "dir"
os.makedirs(example_dir, exist_ok=True)
example_file3 = example_dir / "example_file3.txt"
with open(example_file3, 'w') as f:
f.write("Got some data here!\n")
example_file4 = example_dir / "example_file4.txt"
with open(example_file4, 'w') as f:
f.write("So much data!\n")
cloned_mc_proj.upload(example_file1)
cloned_mc_proj.upload(example_file1)
cloned_mc_proj.upload(example_file1, no_compare=True)
cloned_mc_proj.upload(example_file1, example_file2)
cloned_mc_proj.upload(example_dir, recursive=True)
nb_name = "MaterialsCommons-Project-Example.ipynb"
notebook_local_abspath = os.path.join(os.getcwd(), nb_name)
notebook_upload_as = cloned_mc_proj.local_path / "notebooks" / nb_name
cloned_mc_proj.upload(notebook_local_abspath, upload_as=notebook_upload_as)
for file in [example_file1, example_file2]:
if os.path.exists(file):
os.remove(file)
if os.path.exists(example_dir):
shutil.rmtree(example_dir)
print("Local project directory contents:", os.listdir(cloned_mc_proj.local_path))
cloned_mc_proj.download(example_file1)
cloned_mc_proj.download(example_file1, no_compare=True)
cloned_mc_proj.download(example_file2)
shutil.copyfile(example_file2, example_file1)
cloned_mc_proj.download(example_file1, force=True)
cloned_mc_proj.download(example_file1, example_file2, force=True)
cloned_mc_proj.download(example_dir, recursive=True)
cloned_mc_proj.download(example_file1, output=cloned_mc_proj.local_path / "example_file3.txt")
cloned_mc_proj.upload(example_file1, globus=True)
cloned_mc_proj.download(example_file2, globus=True, force=True)
! cd {cloned_mc_proj.local_path} && mc globus upload && mc globus download
! globus task list
from materials_commons.cli.functions import read_project_config
project_config = read_project_config(cloned_mc_proj.local_path)
! cd {cloned_mc_proj.local_path} && mc globus upload --id {project_config.globus_upload_id} --finish --force
! cd {cloned_mc_proj.local_path} && mc globus download --id {project_config.globus_download_id} --delete --force
# Delete the remote project
projs = client.get_all_projects()
for proj in projs:
if proj.name == "ExampleProjectFromJupyter":
client.delete_project(proj.id)
# Delete the local project
local_project_path = pathlib.Path.home() / "mc_projects" / "ExampleProjectFromJupyter"
if os.path.exists(local_project_path):
shutil.rmtree(local_project_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Cloning a project
Step2: Example 1
Step3: Example 2
Step4: Example 3
Step5: Using the ClonedProject
Step6: File transfer
Step7: Upload one file
Step8: By default, files that already exist will be skipped
Step9: Upload multiple files
Step10: Upload files and directories, recursively
Step11: Uploading the notebook itself / use of "upload_as"
Step12: Setup for download examples
Step13: Download one file
Step14: By default, files that already exist will be skipped
Step15: Download multiple files
Step16: Download files and directories, recursively
Step17: Download with different name
Step18: Using Globus file transfer
Step19: Monitor transfer status
Step20: Finish the transfer
Step21: Example cleanup
|
3,517
|
<ASSISTANT_TASK:>
Python Code:
[x**2 for x in range(0,10)]
[x for x in range(1,20) if x%2==0 ]
[x for x in 'MATHEMATICS' if x in ['A','E','I','O','U']]
for i in range(1,101):
if int(i**0.5)==i**0.5:
print i
[i for i in range(1,101) if int(i**0.5)==i**0.5]
import numpy as np
# matrix = [ range(0,5), range(5,10), range(10,15) ]
# print matrix
def eg1_for(matrix):
flat = []
for row in matrix:
for x in row:
flat.append(x)
return flat
def eg1_lc(matrix):
return [x for row in matrix for x in row ]
matrix = [ range(0,5), range(5,10), range(10,15) ]
print "Original Matrix: " + str(matrix)
print "FOR-loop result: " + str(eg1_for(matrix))
print "LC result : " + str(eg1_lc(matrix))
%timeit eg1_for(matrix)
%timeit eg1_lc(matrix)
def eg2_for(sentence):
vowels = 'aeiou'
filtered_list = []
for l in sentence:
if l not in vowels:
filtered_list.append(l)
return ''.join(filtered_list)
eg2_for('My name is Aarshay Jain!')
def eg2_lc(sentence):
vowels = 'aeiou'
return ''.join([ l for l in sentence if l not in vowels])
eg2_for('My name is Aarshay Jain!')
sentence = 'My name is Aarshay Jain!'
print "FOR-loop result: " + eg2_for(sentence)
print "LC result : " + eg2_lc(sentence)
%timeit eg2_for('My name is Aarshay Jain!')
%timeit eg2_lc('My name is Aarshay Jain!')
country = ['India', 'Pakistan', 'Nepal', 'Bhutan', 'China', 'Bangladesh']
capital = ['New Delhi', 'Islamabad','Kathmandu', 'Thimphu', 'Beijing', 'Dhaka']
def eg3_for(keys, values):
dic = {}
for i in range(len(keys)):
dic[keys[i]] = values[i]
return dic
eg3_for(country,capital)
def eg3_lc(keys, values):
return { keys[i] : values[i] for i in range(len(keys)) }
eg3_lc(country,capital)
country = ['India', 'Pakistan', 'Nepal', 'Bhutan', 'China', 'Bangladesh']
capital = ['New Delhi', 'Islamabad','Kathmandu', 'Thimphu', 'Beijing', 'Dhaka']
print "FOR-loop result: " + str(eg3_for(country, capital))
print "LC result : " + str(eg3_lc(country, capital))
%timeit eg3_for(country,capital)
%timeit eg3_lc(country,capital)
#FOR:
def eg4_for(N):
non_primes = []
for i in range(2,int(N**0.5)+1):
for j in range(i,N,i):
# print j
non_primes.append(j)
primes = []
for i in range(2,N):
if i not in non_primes:
primes.append(i)
return primes
print eg4_for(100)
%timeit eg4_for(100)
#LC:
def eg4_lc(N):
non_primes = [ j for i in range(2,int(N**0.5)+1) for j in range(i,N,i)]
return [ i for i in range(2,N) if i not in non_primes]
print eg4_lc(100)
%timeit eg4_lc(100)
mat1 = [ range(0,5), range(5,10) ]
mat2 = [ range(0,2), range(2,4), range(4,6), range(6,8), range(8,10) ]
print mat1 , mat2
def eg2_for(mat1, mat2):
mat1_row = len(mat1)
mat2_row = len(mat2) #also num of col of mat1
mat2_col = len(mat2[0])
matm2 = [ [0]*mat2_col for i in range(mat1_row) ]
for row in range(mat1_row):
for col in range(mat2_col):
for i in range(mat2_row):
matm2[row][col] += (mat1[row][i]*mat2[i][col])
return matm2
print eg2_for(mat1,mat2)
%timeit eg2_for(mat1,mat2)
def eg2_lc(mat1, mat2):
mat1_row = len(mat1)
mat2_row = len(mat2) #also num of col of mat1
mat2_col = len(mat2[0])
matm = [ sum( [mat1[row][i]*mat2[i][col] for i in range(mat2_row)] ) for row in range(mat1_row) for col in range(mat2_col) ]
return matm
print eg2_lc(mat1,mat2)
%timeit eg2_lc(mat1,mat2)
%timeit eg2_for(mat1,mat2)
%timeit eg2_lc(mat1,mat2)
def tri_for(N):
L=[]
for i in range(1,N-2):
for j in range(i+1,N-1):
for k in range(j+1, N):
if (i+j<k) | (i+k<j) | (j+k<i):
L.append((i,j,k))
return L
def tri_lc(N):
return [(i,j,k) for i in range(1,N-2) for j in range(i+1,N-1) for k in range(j+1,N) if ((i+j<k) | (i+k<j) | (j+k<i))]
# [ (i,j,k) for i in range(1,N-2) for j in range(i+1,N-1) for k in range(j+1,N) ]
print tri_for(10)
%timeit tri_for(10)
print tri_lc(10)
%timeit tri_lc(10)
arr = range(10) #contains [0,1,...,9]
map(lambda x: x*(x+1), arr)
#Method 1: For-Loop
def square_for(arr):
result = []
for i in arr:
result.append(i**2)
return result
print square_for(range(1,11))
#Method 2: Map Function
def square_map(arr):
return map(lambda x: x**2, arr)
print square_map(range(1,11))
#Method 3: List comprehension:
def square_lc(arr):
return [i**2 for i in arr]
print square_lc(range(1,11))
#Method 1: For-Loop
def square_even_for(arr):
result = []
for i in arr:
if i%2 == 0:
result.append(i**2)
return result
print square_even_for(range(1,11))
#Method 2: Map Function
def square_even_map(arr):
return filter(lambda x: x is not None,map(lambda x: x**2 if x%2==0 else None, arr))
print square_even_map(range(1,11))
#Method 3: List comprehension:
def square_even_lc(arr):
return [i**2 for i in arr if i%2==0]
print square_even_lc(range(1,11))
%timeit square_for(range(1,11))
%timeit square_map(range(1,11))
%timeit square_lc(range(1,11))
%timeit square_even_for(range(1,11))
%timeit square_even_map(range(1,11))
%timeit square_even_lc(range(1,11))
#Method 1: For-loop:
def empty_for(arr):
for i in arr:
pass
%timeit empty_for(range(1,11))
#Method 2: Map
def empty_map(arr):
map(lambda x: None,arr)
%timeit empty_map(range(1,11))
#Method 3: LC
def empty_lc(arr):
[None for i in arr]
%timeit empty_lc(range(1,11))
#Method 1: For-loop:
def x2_for(arr):
for i in arr:
i*2
%timeit x2_for(range(1,11))
#Method 2: Map
def x2_map(arr):
map(lambda x: x*2,arr)
%timeit x2_map(range(1,11))
#Method 3: LC
def x2_lc(arr):
[i*2 for i in arr]
%timeit x2_lc(range(1,11))
def store_for(arr):
result=[]
for i in arr:
result.append(i*2)
return result
%timeit store_for(range(1,11))
def x2_lc(arr):
def mul(x):
return x*2
[mul(i) for i in arr]
%timeit x2_lc(range(1,11))
def my_first_gen(n):
for i in range(n):
yield i
print my_first_gen(10)
gen = my_first_gen(3)
print gen.next()
def flow_of_info_gen(N):
print 'function runs for first time'
for i in range(N):
print 'execution before yielding value %d' % i
yield i
print 'execution after yielding value %d' % i
print 'function runs for last time'
gen2 = flow_of_info_gen(3)
gen2.next()
gen2.next()
gen2.next()
gen2.next()
gen3 = my_first_gen(10)
gen3.next()
gen3.next()
gen3.next()
gen3.next()
sum(gen3)
#LC returning a list
[x for x in range(10)]
#LC working as a generator
(x for x in range(10))
sum(x for x in range(10))
def sum_list(N):
return sum([x for x in range(N)])
def sum_gen(N):
return sum((x for x in range(N)))
N=1000
print 'Time for LC : ',
%timeit sum_list(N)
print '\nTime for Generator : ',
%timeit sum_gen(N)
N=100000 #100K
print 'Time for LC : ',
%timeit sum_list(N)
print '\nTime for Generator : ',
%timeit sum_gen(N)
N=10000000 #10Mn
print 'Time for LC : ',
%timeit sum_list(N)
print '\nTime for Generator : ',
%timeit sum_gen(N)
N=100000000 #100Mn
print '\nTime for Generator : ',
%timeit sum_gen(N)
print 'Time for LC : ',
%timeit sum_list(N)
import pandas as pd
data = pd.read_csv("skills.csv")
print data
#Split text with the separator ';'
data['skills_list'] = data['skills'].apply(lambda x: x.split(';'))
print data['skills_list']
#Initialize the set
skills_unq = set()
#Update each entry into set. Since it takes only unique value, duplicates will be ignored automatically.
skills_unq.update( (sport for l in data['skills_list'] for sport in l) )
print skills_unq
#Convert set to list:
skills_unq = list(skills_unq)
sport_matrix = [ [1 if skill in row else 0 for skill in skills_unq] for row in data['skills_list'] ]
sport_matrix
data = pd.concat([data, pd.DataFrame(sport_matrix,columns=skills_unq)],axis=1)
print data
data2 = pd.DataFrame([1,2,3,4,5], columns=['number'])
print data2
deg = 6
cols = ['power_%d'%i for i in range(2,deg+1)]
print cols
power_matrix = [ [i**p for p in range(2,deg+1) ] for i in data2['number'] ]
power_matrix
data2 = pd.concat([data2, pd.DataFrame(power_matrix,columns=cols)],axis=1)
print data2
cols = ['a','b','c','d','a_transform','b_transform','c_transform','d_power2','d_power3','d_power4','d_power5','temp1','temp2']
#Here a,b,c,d are original variables; transform are transformation, power are for polynomial reg, temp are intermediate
#Select only variables with 'transform':
col_set1 = [x for x in cols if x.endswith('transform')]
col_set2 = [x for x in cols if 'power' in x]
col_set3 = [x for x in cols if (x.endswith('transform')) | ('power' in x)]
col_set4 = [x for x in cols if x not in ['temp1','temp2']]
print 'Set1: ', col_set1
print 'Set2: ', col_set2
print 'Set3: ', col_set3
print 'Set4: ', col_set4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Eg1
Step2: Eg2
Step3: Eg3
Step4: Additional examples (mentioned as exercise for users)
Step5: Eg
Step6: Eg
Step7: The Time Advantage
Step8: Here we have used the Python temporary function lambda. This can be replaced with a standard Python function or a user-defined function declared earlier.
Step9: Though the three techniques produce the same result, we can see that LC is the most elegant and readable technique. You might argue that even the map function is not bad in this case. But map has its own limitations which are not evident in this example.
Step10: It is clearly evident that with the slight increase in complexity, both for and map routines became bulkier and less readable. However, the LC routine is still concise and required a minor modification.
Step11: Here we can see that in this case LC is ~30% faster than for-loop and ~45% faster than map function.
Step12: In this case, LC is ~20% faster than for-loop and ~65% faster than map function.
Step13: Here we see that for-loop is fasters. This is because in a for-loop, we need not return an element and just move onto next iteration using "pass".
Step14: Here we see a similar trend as before. So till the point of iterating and making slight modifications, for-loop is clear winner.
Step15: This is interesting! So the runtime jumps to almost twice just because of storing the information. The reason being that we have to define an empty list and append the result to each in each iteration.
Step16: Aha! So the guess was right. When we force LC to make function calls, it ends up being more expensive than map function.
Step17: Data Science Examples
Step18: Eg5
Step19: Eg6
|
3,518
|
<ASSISTANT_TASK:>
Python Code:
import collections
print(collections.Counter(['a', 'b', 'c', 'a', 'b', 'b']))
print(collections.Counter({'a': 2, 'b': 3, 'c': 1}))
print(collections.Counter(a=2, b=3, c=1))
import collections
c = collections.Counter()
print('Initial :', c)
c.update('abcdaab')
print('Sequence:', c)
c.update({'a': 1, 'd': 5})
print('Dict :', c)
import collections
c = collections.Counter('abcdaab')
for letter in 'abcde':
print('{} : {}'.format(letter, c[letter]))
import collections
c = collections.Counter('extremely')
c['z'] = 0
print(c)
print(list(c.elements()))
import collections
c1 = collections.Counter(['a', 'b', 'c', 'a', 'b', 'b'])
c2 = collections.Counter('alphabet')
print('C1:', c1)
print('C2:', c2)
print('\nCombined counts:')
print(c1 + c2)
print('\nSubtraction:')
print(c1 - c2)
print('\nIntersection (taking positive minimums):')
print(c1 & c2)
print('\nUnion (taking maximums):')
print(c1 | c2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: An empty Counter can be constructed with no arguments and populated via the update() method
Step2: Accessing Counts
Step3: The elements() method returns an iterator that produces all of the items known to the Counter.
Step4: Arithmetic
|
3,519
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
from numpy import nonzero
import matplotlib.pyplot as plt # to generate plots
from mpl_toolkits.basemap import Basemap # plot on map projections
import matplotlib.dates as mdates
import datetime
from netCDF4 import Dataset # http://unidata.github.io/netcdf4-python/
from netCDF4 import netcdftime
from netcdftime import utime
ncfile = 'data\skt.mon.mean.nc'
fh = Dataset(ncfile, mode='r') # file handle, open in read only mode
lon = fh.variables['lon'][:]
lat = fh.variables['lat'][:]
nctime = fh.variables['time'][:]
t_unit = fh.variables['time'].units
skt = fh.variables['skt'][:]
try :
t_cal = fh.variables['time'].calendar
except AttributeError : # Attribute doesn't exist
t_cal = u"gregorian" # or standard
fh.close() # close the file
utime = netcdftime.utime(t_unit, calendar = t_cal)
datevar = utime.num2date(nctime)
print(datevar.shape)
datevar[0:5]
idx_lat_n3 = (lat>=-5.0) * (lat<=5.0)
idx_lon_n3 = (lon>=210.0) * (lon<=270.0)
years = np.array([idx.year for idx in datevar])
idx_tim_n3 = (years>=1970) * (years<=1999)
idxtim = nonzero(idx_tim_n3)[0]
#idxlat = nonzero(idx_lat_n3)[0]
idxlon = nonzero(idx_lon_n3)[0]
idxlon
lat_n3 = lat[idx_lat_n3]
lon_n3 = lon[idx_lon_n3]
dates_n3 = datevar[idx_tim_n3]
skt_n3 = skt[idx_tim_n3, :, :][:,idx_lat_n3,:][:,:,idx_lon_n3]
print(skt_n3.shape)
print(dates_n3.shape)
skt_n3 = np.reshape(skt_n3, (12,30,6,33), order='F')
skt_n3 = np.transpose(skt_n3, (1, 0, 2, 3))
skt_n3.shape
clima_skt_n3 = np.mean(skt_n3, axis=0)
clima_skt_n3.shape
num_repeats = 30 # 30 years
clima_skt_n3 = np.vstack([clima_skt_n3]*num_repeats)
clima_skt_n3.shape
clima_skt_n3 = np.reshape(clima_skt_n3, (12,30,6,33),order='F')
clima_skt_n3 = np.transpose(clima_skt_n3, (1, 0, 2, 3))
clima_skt_n3.shape
ssta = skt_n3-clima_skt_n3
ssta2 = np.reshape(ssta,(30,12,6*33), order='F') # 30x12x198
ssta3 = np.mean(ssta2, axis=2); # 30x12
ssta3.shape
ssta_series = np.reshape(ssta3.T,(12*30,1), order='F'); # 1x360
ssta_series.shape
import matplotlib.dates as mdates
from matplotlib.dates import MonthLocator, WeekdayLocator, DateFormatter
import matplotlib.ticker as ticker
fig, ax = plt.subplots(1, 1 , figsize=(15,5))
ax.plot(dates_n3, ssta_series)
ax.set_ylim((-4,4))
#horiz_line_data = np.array([0 for i in np.arange(len(dates_n3))])
#ax.plot(dates_n3, horiz_line_data, 'r--')
ax.axhline(0, color='r')
ax.set_title('NINO3 SSTA 1970-1999')
ax.set_ylabel(['$^oC$'])
ax.set_xlabel('Date')
# rotate and align the tick labels so they look better
fig.autofmt_xdate()
# use a more precise date string for the x axis locations in the toolbar
ax.fmt_xdata = mdates.DateFormatter('%Y')
np.savez('data/ssta.nino3.30y.npz', ssta_series=ssta_series)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Set and read input NetCDF file info
Step2: 2.2 Parse time
Step3: 3. Subregion for nino3 area
Step4: time
Step5: Get Index using np.nonzero
Step6: 3.2 Extract data over nino3 area
Step7: 4. Calculate region means
Step8: 4.2 Calculate monthly climatology
Step9: 4.3 Calculate anomaly of SST over nino3 area
Step10: 5. Have a beautiful look
Step11: 6. Save data
|
3,520
|
<ASSISTANT_TASK:>
Python Code:
#import modules
import numpy as np
from matplotlib import pyplot as plt
# Help function
def is_zero_vector(v):
Check whether vector v is a zero vector
Arguments:
- v : vector
Return:
- True if v is a nonzero vector. Otherwise, false
Exceptions:
- TypeError:
- v is not a vector
if v.ndim != 1 and not np.isscalar(v):
raise TypeError('v is not a vector')
return not np.any(v)
def power_iteration(A, x, k=10):
Compute the dominant eigenvalue and eigenvector of a matrix A
Arguments:
- A : A matrix
- x : initial and nonzero vector
- k : number of steps (default:10)
Returns:
- eigval : dominant eigenvalue
- eigvec : dominant eigenvector
Exceptions:
- ValueError:
- x is a zero vector
if is_zero_vector(x):
raise ValueError('x is a zero vector')
for _ in range(k):
eigvec = x / np.linalg.norm(x)
x = np.matmul(A, eigvec)
eigval = np.matmul(np.matmul(eigvec.T, A), eigvec)
eigvec = x / np.linalg.norm(x)
return eigval, eigvec
A = np.array([
[1, 3],
[2, 2]
])
x = np.random.rand(2)
power_iteration(A, x, 25)
def inverse_power_iteration(A, x, s, k=10):
Compute eigenvalue and eigenvector of a matrix A nearest to input s
Arguments:
- A : A matrix
- x : nonzero initial vector
- s : shift
- k : number of steps (default:10)
Returns:
- eigval : dominant eigenvalue
- eigvec : dominant eigenvector
As = A - s * np.eye(A.shape[0])
for _ in range(k):
eigvec = x / np.linalg.norm(x)
x = np.linalg.solve(As, eigvec)
eigval = np.matmul(eigvec.T, x)
u = x / np.linalg.norm(x)
eigval = 1 / eigval + s
return eigval, eigvec
A = np.array([
[3, 2, 4],
[2, 1, 2],
[4, 2, 3]
])
x = np.random.rand(3)
print(inverse_power_iteration(A, x, -1.1))
print(inverse_power_iteration(A, x, 0))
print(inverse_power_iteration(A, x, 8))
def rayleigh_quotient_iteration(A, x, k=10):
Compute the dominant eigenvalue and eigenvector of a matrix A
Arguments:
- A : A matrix
- x : nonzero initial vector
- k : number of steps (default:10)
Returns:
- eigval : dominant eigenvalue
- eigvec : dominant eigenvector
for _ in range(k):
eigvec = x / np.linalg.norm(x)
eigval = np.matmul(np.matmul(eigvec.T, A), eigvec)
x = np.linalg.solve(A - eigval * np.eye(A.shape[0]), eigvec)
eigvec = x / np.linalg.norm(x)
eigval = np.matmul(np.matmul(eigvec.T, A), eigvec)
return eigval, eigvec
A = np.array([
[3, 2, 4],
[2, 1, 2],
[4, 2, 3]
])
x = np.random.rand(3)
print(rayleigh_quotient_iteration(A, x))
def normalized_simultaneous_iteration(A, k=10):
Compute the eigenvalue and eigenvector of a symmetric matrix A
Arguments:
- A : A matrix
- k : number of steps (default:10)
Returns:
- eigval : dominant eigenvalue
- eigvec : dominant eigenvector
m, n = A.shape
Q = np.eye(m)
for _ in range(k):
Q, R = np.linalg.qr(np.matmul(A, Q))
eigval = np.diag(np.matmul(np.matmul(Q.T, A), Q))
eigvec = Q
return eigval, eigvec
A = np.array([
[3, 2, 4],
[2, 1, 2],
[4, 2, 3]
])
eigval, eigvec = normalized_simultaneous_iteration(A)
print('eigenvalues : ')
print(eigval)
print()
print('eigenvectors : ')
print(eigvec)
print()
eigval, eigvec = np.linalg.eig(A)
print('eigenvalues (np.linalg.eig) : ')
print(eigval)
print()
print('eigenvectors (np.linalg.eig) : ')
print(eigvec)
def unshifted_qr(A, k=10):
Compute the eigenvalue and eigenvector of a symmetric matrix A
Arguments:
- A : A matrix
- k : number of steps (default:10)
Returns:
- eigval : dominant eigenvalue
- eigvec : dominant eigenvector
m, n = A.shape
Q = np.eye(m)
Qbar = Q
R = A
for _ in range(k):
Q, R = np.linalg.qr(np.matmul(R, Q))
Qbar = np.matmul(Qbar, Q)
eigval = np.diag(np.matmul(R, Q))
eigvec = Qbar
return eigval, eigvec
A = np.array([
[3, 2, 4],
[2, 1, 2],
[4, 2, 3]
])
eigval, eigvec = unshifted_qr(A)
print('eigenvalues : ')
print(eigval)
print()
print('eigenvectors : ')
print(eigvec)
print()
eigval, eigvec = np.linalg.eig(A)
print('eigenvalues (np.linalg.eig) : ')
print(eigval)
print()
print('eigenvectors (np.linalg.eig) : ')
print(eigvec)
def shifted_qr(A, tol=1e-14, max_count=1000):
m = A.shape[0] # row size
eigval = np.zeros(m)
n = m
while n > 1:
count = 0
while np.max(A[n-1, 0:n-1]) > tol and count < max_count:
count += 1
shift = A[n-1, n-1]
Q, R = np.linalg.qr(A - shift * np.eye(n))
A = np.matmul(R, Q) + shift * np.eye(n)
if count < max_count:
eigval[n-1] = A[n-1, n-1]
n -= 1
A = A[0:n, 0:n]
else:
disc = (A[n-2, n-2] - A[n-1, n-1])^2 + 4 * A[n-1, n-2] * A[n-2, n-1]
eigval[n-1] = (A[n-2, n-2] + A[n-1,n-1] + np.sqrt(disc)) / 2
eigval[n-2] = (A[n-2, n-2] + A[n-1,n-1] - np.sqrt(disc)) / 2
n -= 2
A = A[0:n, 0:n]
if n > 0:
eigval[0] = A[0, 0]
return eigval
A = np.array([
[3, 2, 4],
[2, 1, 2],
[4, 2, 3]
])
print(shifted_qr(A))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ★ Eigenvalues And Singular Values ★
Step3: 12.1 power Iteration methods
Step4: Example
Step6: Theorem
Step7: Example
Step9: Rayleigh Quotient Iteration
Step10: Example
Step12: 12.2 QR Algorithm
Step13: Example
Step15: Unshifted QR Algorithm
Step16: Theorem
Step18: Real Schur form and the QR algorithm
Step19: Example
|
3,521
|
<ASSISTANT_TASK:>
Python Code:
import magma as m
import mantle
@m.circuit.combinational
def full_adder(A: m.Bit, B: m.Bit, C: m.Bit) -> (m.Bit, m.Bit):
return A ^ B ^ C, A & B | B & C | C & A # sum, carry
import fault
tester = fault.PythonTester(full_adder)
assert tester(1, 0, 0) == (1, 0), "Failed"
assert tester(0, 1, 0) == (1, 0), "Failed"
assert tester(1, 1, 0) == (0, 1), "Failed"
assert tester(1, 0, 1) == (0, 1), "Failed"
assert tester(1, 1, 1) == (1, 1), "Failed"
print("Success!")
assert tester(1, 0, 0) == full_adder(1, 0, 0), "Failed"
assert tester(0, 1, 0) == full_adder(0, 1, 0), "Failed"
assert tester(1, 1, 0) == full_adder(1, 1, 0), "Failed"
assert tester(1, 0, 1) == full_adder(1, 0, 1), "Failed"
assert tester(1, 1, 1) == full_adder(1, 1, 1), "Failed"
print("Success!")
class FullAdder(m.Circuit):
io = m.IO(I0=m.In(m.Bit),
I1=m.In(m.Bit),
CIN=m.In(m.Bit),
O=m.Out(m.Bit),
COUT=m.Out(m.Bit))
O, COUT = full_adder(io.I0, io.I1, io.CIN)
io.O @= O
io.COUT @= COUT
print(repr(FullAdder))
print(repr(full_adder.circuit_definition))
with open(".magma/full_adder.py") as f:
print(f.read())
import logging
logging.basicConfig(level=logging.INFO)
import fault
tester = fault.Tester(FullAdder)
tester.circuit.I0 = 1
tester.circuit.I1 = 1
tester.circuit.CIN = 1
tester.eval()
tester.circuit.O.expect(1)
tester.circuit.COUT.expect(1)
# compile_and_run throws an exception if the test fails
tester.compile_and_run("verilator")
O, COUT = tester(1, 0, 0)
tester.expect(O, 1)
tester.expect(COUT, 0)
tester.compile_and_run("verilator")
m.compile("build/FullAdder", FullAdder, inline=True)
%cat build/FullAdder.v
%cat build/FullAdder.json
!coreir -i build/FullAdder.json -p instancecount
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A full adder has three single bit inputs, and returns the sum and the carry. The sum is the exclusive or of the 3 bits, the carry is 1 if any two of the inputs bits are 1. Here is a schematic of a full adder circuit (from logisim).
Step2: We can test our combinational function to verify that our implementation behaves as expected fault.
Step3: combinational functions are polymorphic over Python and magma types. If the function is called with magma values, it will produce a circuit instance, wire up the inputs, and return references to the outputs. Otherwise, it will invoke the function in Python. For example, we can use the Python function to verify the circuit simulation.
Step4: Circuits
Step5: First, notice that the FullAdder is a subclass of Circuit. All magma circuits are classes in python.
Step6: We see that it has created an instance of the full_adder combinational function and wired up the interface.
Step7: We can also inspect the code generated by the m.circuit.combinational decorator by looking in the .magma directory for a file named .magma/full_adder.py. When using m.circuit.combinational, magma will generate a file matching the name of the decorated function. You'll notice that the generated code introduces an extra temporary variable (this is an artifact of the SSA pass that magma runs to handle if/else statements).
Step8: In the code above, a mux is imported and named phi. If the combinational circuit contains any if-then-else constructs, they will be transformed into muxes.
Step9: Earlier in the notebook, we showed an example using fault.PythonTester to simulate a circuit. This uses an interactive programming model where test actions are immediately dispatched to the underlying simulator (which is why we can perform assertions on the simulation values in Python.
Step10: An instance of a Tester has an attribute .circuit that enables the user to record test actions. For example, inputs to a circuit can be poked by setting the attribute corresponding to the input port name.
Step11: fault's default Tester provides the semantics of a cycle accurate simulator, so, unlike verilog, pokes do not create events that trigger computation. Instead, these poke values are staged, and the propogation of their effect occurs when the user calls the eval action.
Step12: To assert that the output of the circuit is equal to a value, we use the expect method that are defined on the attributes corresponding to circuit output ports
Step13: Because fault is a staged programming environment, the above actions are not executed until we have advanced to the next stage. In the first stage, the user records test actions (e.g. poke, eval, expect). In the second stage, the test is compiled and run using a target runtime. Here's examples of running the test using magma's python simulator, the coreir c++ simulator, and verilator.
Step14: The tester also provides the same convenient __call__ interface we saw before.
Step15: Generate Verilog
Step16: Generate CoreIR
Step17: Here's an example of running a CoreIR pass on the intermediate representation.
|
3,522
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import openpathsampling as paths
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from openpathsampling.visualize import PathTreeBuilder, PathTreeBuilder
from IPython.display import SVG, HTML
def ipynb_visualize(movevis):
Default settings to show a movevis in an ipynb.
view = movevis.renderer
view.zoom = 1.5
view.scale_y = 18
view.scale_th = 20
view.font_size = 0.4
return view
old_store = paths.AnalysisStorage("mstis.nc")
#old_store = paths.Storage("mstis.nc") # if not actually doing analysis, but loading network, etc
network = old_store.networks[0]
engine = old_store.engines[0]
template = old_store.snapshots[0]
# this is how we would get it out of a simulation (although the actual simulation here has bad stats)
# first, we need the crossing probabilities, which we get when we calculate the rate
network.hist_args['max_lambda'] = { 'bin_width' : 0.02, 'bin_range' : (0.0, 0.5) }
network.hist_args['pathlength'] = { 'bin_width' : 5, 'bin_range' : (0, 150) }
rates = network.rate_matrix(old_store.steps)
# just use the analyzed network to make the bias
bias = paths.SRTISBiasFromNetwork(network)
bias.df
# For better stats, use the results that I got from a 20k MC step run
# We can create fake TCPs and force them on the network.
tcp_A = paths.numerics.LookupFunction.from_dict(
{0.2: 1.0,
0.3: 0.13293104100673198,
0.4: 0.044370838092911397,
0.5: 0.021975696374764188}
)
tcp_B = paths.numerics.LookupFunction.from_dict(
{0.2: 1.0,
0.3: 0.13293104100673198,
0.4: 0.044370838092911397,
0.5: 0.021975696374764188}
)
tcp_C = paths.numerics.LookupFunction.from_dict(
{0.2: 1.0,
0.3: 0.19485705066078274,
0.4: 0.053373003923696649,
0.5: 0.029175949467020165}
)
# load states for identification purposes
stateA = old_store.volumes['A']
stateB = old_store.volumes['B']
stateC = old_store.volumes['C']
# use the sampling transitions; in MSTIS, these are also stored in from_state
network.from_state[stateA].tcp = tcp_A
network.from_state[stateB].tcp = tcp_B
network.from_state[stateC].tcp = tcp_C
bias = paths.SRTISBiasFromNetwork(network)
bias.df
scheme = paths.SRTISScheme(network, bias=bias, engine=engine)
movevis = paths.visualize.MoveTreeBuilder()
#movevis.mover(scheme.move_decision_tree(), network.all_ensembles)
#SVG(ipynb_visualize(movevis).to_svg())
final_samp0 = old_store.steps[len(old_store.steps)-1].active[network.sampling_ensembles[-1]]
sset = paths.SampleSet([final_samp0])
storage = paths.Storage("srtis.nc", "w")
storage.save(template)
srtis = paths.PathSampling(
storage=storage,
sample_set=sset,
move_scheme=scheme
)
n_steps_to_run = int(scheme.n_steps_for_trials(
mover=scheme.movers['minus'][0],
n_attempts=1
))
print(n_steps_to_run)
# logging creates ops_output.log file with details of what the calculation is doing
#import logging.config
#logging.config.fileConfig("logging.conf", disable_existing_loggers=False)
%%time
multiplier = 2
srtis.run_until(multiplier*n_steps_to_run)
#storage.close()
%%time
#storage = paths.AnalysisStorage("srtis.nc")
#scheme = storage.schemes[0]
scheme.move_summary(storage.steps)
scheme.move_summary(storage.steps, 'shooting')
scheme.move_summary(storage.steps, 'minus')
scheme.move_summary(storage.steps, 'repex')
scheme.move_summary(storage.steps, 'pathreversal')
replica = storage.samplesets[0].samples[0].replica
ensemble_trace = paths.trace_ensembles_for_replica(replica, storage.steps)
print len(ensemble_trace)
srtis_ensembles = scheme.network.sampling_ensembles+scheme.network.special_ensembles['ms_outer'].keys()
srtis_ensemble_numbers = {e : srtis_ensembles.index(e) for e in srtis_ensembles}
# this next is just for pretty printing
srtis_numbers_ensemble = {srtis_ensemble_numbers[e] : e for e in srtis_ensemble_numbers}
for k in sorted(srtis_numbers_ensemble.keys()):
print k, ":", srtis_numbers_ensemble[k].name
plt.plot([srtis_ensemble_numbers[e] for e in ensemble_trace])
count = 0
for i in range(len(ensemble_trace)-1):
[this_val, next_val] = [srtis_ensemble_numbers[ensemble_trace[k]] for k in [i,i+1]]
if this_val == 1 and next_val == 0:
count += 1
count
hist_numbers = [srtis_ensemble_numbers[e] for e in ensemble_trace]
bins = [i-0.5 for i in range(len(srtis_ensembles)+1)]
plt.hist(hist_numbers, bins=bins);
import pandas as pd
hist = paths.analysis.Histogram(bin_width=1.0, bin_range=[-0.5,9.5])
colnames = {i : srtis_numbers_ensemble[i].name for i in range(len(srtis_ensembles))}
df = pd.DataFrame(columns=[colnames[i] for i in colnames])
for i in range(len(hist_numbers)):
hist.add_data_to_histogram([hist_numbers[i]])
if i % 100 == 0:
normalized = hist.normalized()
local_df = pd.DataFrame([normalized.values()], index=[i], columns=[colnames[k] for k in normalized.keys()])
df = df.append(local_df)
plt.pcolormesh(df.fillna(0.0), cmap="bwr", vmin=0.0, vmax=0.2);
plt.gca().invert_yaxis()
plt.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Single Replica TIS
Step2: Open the storage and load things from it.
Step3: One of the points of SRTIS is that we use a bias (which comes from an estimate of the crossing probability) in order to improve our sampling.
Step4: Here we actually set up the SRTIS move scheme for the given network. It only requires one line
Step5: Now we'll visualize the SRTIS move scheme.
Step6: Next we need to set up an appropriate single-replica initial sampleset. We'll take the last version of from one of the outer TIS ensembles.
Step7: Finally, we set up the new storage file and the new simulation.
Step8: From here, we'll be doing the analysis of the SRTIS run.
|
3,523
|
<ASSISTANT_TASK:>
Python Code:
from os import path as p, chdir
if 'examples' in p.abspath('.'):
chdir('..')
from riko.modules.fetchpage import pipe
url = 'https://news.ycombinator.com/'
next(pipe(conf={'url': url, 'start': '<title>', 'end': '</title>'}))
from riko.modules.xpathfetchpage import pipe
xpath = '/html/body/center/table/tr[3]/td/table/tr[1]/td[3]/a'
next(pipe(conf={'url': 'https://news.ycombinator.com/', 'xpath': xpath}))
### Create a SyncPipe flow ###
#
# `SyncPipe` is a convenience class that creates chainable flows
# and allows for parallel processing.
from riko.collections import SyncPipe
### Set the pipe configurations ###
#
# Notes:
# 1. the `detag` option will strip all html tags from the result
# 2. fetch the text contained inside the 'body' tag of the hackernews homepage
# 3. replace newlines with spaces and assign the result to 'content'
# 4. tokenize the resulting text using whitespace as the delimeter
# 5. count the number of times each token appears
# 6. obtain the raw stream
# 7. extract the first word and its count
url = 'https://news.ycombinator.com/'
fetch_conf = {'url': url, 'start': '<body>', 'end': '</body>', 'detag': True} # 1
replace_conf = {'rule': [{'find': '\r\n', 'replace': ' '}, {'find': '\n', 'replace': ' '}]}
flow = (
SyncPipe('fetchpage', conf=fetch_conf) # 2
.strreplace(conf=replace_conf, assign='content') # 3
.tokenizer(conf={'delimiter': ' '}, emit=True) # 4
.count(conf={'count_key': 'content'})) # 5
stream = flow.output # 6
next(stream) # 7
from riko.modules.fetch import pipe
### Fetch an RSS feed ###
stream = pipe(conf={'url': 'https://news.ycombinator.com/rss'})
item = next(stream)
item['title'], item['link'], item['comments']
from riko.modules.fetchsitefeed import pipe
### Fetch the first RSS feed found ###
#
# Note: regardless of how you fetch an RSS feed, it will have the same
# structure
stream = pipe(conf={'url': 'http://arstechnica.com/rss-feeds/'})
item = next(stream)
item.keys()
item['title'], item['author'], item['id']
from riko.collections import SyncPipe
### Set the pipe configurations ###
fetch_conf = {'url': 'https://news.ycombinator.com/rss'}
filter_rule = {'field': 'link', 'op': 'contains', 'value': '.com'}
xpath = '/html/body/center/table/tr[3]/td/table[2]/tr[1]/td/table/tr/td[3]/span/span'
xpath_conf = {'url': {'subkey': 'comments'}, 'xpath': xpath}
### Create a SyncPipe flow ###
#
# `SyncPipe` is a convenience class that creates chainable flows
# and allows for parallel processing.
#
# The following flow will:
# 1. fetch the hackernews RSS feed
# 2. filter for items with '.com' in the link
# 3. sort the items ascending by title
# 4. fetch the first comment from each item
# 5. flatten the result into one raw stream
# 6. extract the first item's content
#
# Note: sorting is not lazy so take caution when using this pipe
flow = (
SyncPipe('fetch', conf=fetch_conf) # 1
.filter(conf={'rule': filter_rule}) # 2
.sort(conf={'rule': {'sort_key': 'title'}}) # 3
.xpathfetchpage(conf=xpath_conf)) # 4
stream = flow.output # 5
next(stream)['content'] # 6
from riko.collections import SyncPipe
### Set the pipe configurations ###
fetch_conf = {'url': 'https://news.ycombinator.com/rss'}
filter_rule = {'field': 'link', 'op': 'contains', 'value': '.com'}
xpath = '/html/body/center/table/tr[3]/td/table[2]/tr[1]/td/table/tr/td[3]/span/span'
xpath_conf = {'url': {'subkey': 'comments'}, 'xpath': xpath}
### Create a parallel SyncPipe flow ###
#
# The following flow will:
# 1. fetch the hackernews RSS feed
# 2. filter for items with '.com' in the article link
# 3. fetch the first comment from all items in parallel (using 4 workers)
# 4. flatten the result into one raw stream
# 5. extract the first item's content
#
# Note: no point in sorting after the filter since parallel fetching doesn't guarantee
# order
flow = (
SyncPipe('fetch', conf=fetch_conf, parallel=True, workers=4) # 1
.filter(conf={'rule': filter_rule}) # 2
.xpathfetchpage(conf=xpath_conf)) # 3
stream = flow.output # 4
next(stream)['content'] # 5
from riko.bado import coroutine, react
from riko.collections import AsyncPipe
### Set the pipe configurations ###
fetch_conf = {'url': 'https://news.ycombinator.com/rss'}
filter_rule = {'field': 'link', 'op': 'contains', 'value': '.com'}
xpath = '/html/body/center/table/tr[3]/td/table[2]/tr[1]/td/table/tr/td[3]/span/span'
xpath_conf = {'url': {'subkey': 'comments'}, 'xpath': xpath}
### Create an AsyncPipe flow ###
#
# The following flow will:
# 1. fetch the hackernews RSS feed
# 2. filter for items with '.com' in the article link
# 3. asynchronously fetch the first comment from each item (using 4 connections)
# 4. flatten the result into one raw stream
# 5. extract the first item's content
#
# Note: no point in sorting after the filter since async fetching doesn't guarantee
# order
@coroutine
def run(reactor):
stream = yield (
AsyncPipe('fetch', conf=fetch_conf, connections=4) # 1
.filter(conf={'rule': filter_rule}) # 2
.xpathfetchpage(conf=xpath_conf) # 3
.output) # 4
print(next(stream)['content']) # 5
try:
react(run)
except SystemExit:
pass
from riko.modules.reverse import pipe
stream = [{'title': 'riko pt. 1'}, {'title': 'riko pt. 2'}]
next(pipe(stream))
from riko.modules.hash import pipe
item = {'title': 'riko pt. 1'}
stream = pipe(item, field='title')
next(stream)
from riko.modules.tokenizer import pipe
item = {'title': 'riko pt. 1'}
tokenizer_conf = {'delimiter': ' '}
stream = pipe(item, conf=tokenizer_conf, field='title')
next(stream)
# In this case, if we just want the result, we can `emit` it instead
stream = pipe(item, conf=tokenizer_conf, field='title', emit=True)
next(stream)
from riko.modules.count import pipe
stream = [{'title': 'riko pt. 1'}, {'title': 'riko pt. 2'}]
next(pipe(stream))
from riko.modules.itembuilder import pipe
attrs = {'key': 'title', 'value': 'riko pt. 1'}
next(pipe(conf={'attrs': attrs}))
from riko.modules import fetchpage, count
fetchpage.async_pipe.__dict__
count.pipe.__dict__
from riko.collections import SyncPipe
attrs = [
{'key': 'title', 'value': 'riko pt. 1'},
{'key': 'content', 'value': "Let's talk about riko!"}]
flow = SyncPipe('itembuilder', conf={'attrs': attrs}).hash()
flow.list[0]
from __future__ import print_function
from riko.collections import SyncPipe
conf1 = {'attrs': [{'value': 'https://google.com', 'key': 'content'}]}
conf2 = {'rule': [{'find': 'com', 'replace': 'co.uk'}]}
def pipe(test=False):
kwargs = {'conf': conf1, 'test': test}
flow = SyncPipe('itembuilder', **kwargs).strreplace(conf=conf2)
stream = flow.output
for i in stream:
print(i)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: Fetch a webpage using an xpath
Step3: Word Count
Step4: Fetching feeds
Step5: Please see the FAQ for a complete list of supported file types and
Step6: Please see alternate workflow creation for an alternative (function based) method for
Step7: Asynchronous processing
Step8: Design Principles
Step9: processors process individual items and can be parallelized across
Step10: Some processors, e.g., pipetokenizer, return multiple results.
Step11: operators are split into sub-types of aggregators
Step12: processors are split into sub-types of source and transformer.
Step13: The following table summaries these observations
Step14: The SyncPipe and AsyncPipe classes (among other things) perform this
Step15: Please see the cookbook for advanced examples including how to wire in
|
3,524
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
import socnet as sn
sn.node_size = 3
sn.node_color = (0, 0, 0)
sn.edge_width = 1
sn.edge_color = (192, 192, 192)
sn.node_label_position = 'top center'
g = sn.load_graph('twitter.gml')
sn.show_graph(g, nlab=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Carregando e visualizando o grafo
|
3,525
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
from sklearn.cross_validation import cross_val_score
from sklearn.svm import LinearSVC
cross_val_score(LinearSVC(), X, y, cv=5)
cross_val_score(LinearSVC(), X, y, cv=5, scoring="f1_macro")
y % 2
cross_val_score(LinearSVC(), X, y % 2)
cross_val_score(LinearSVC(), X, y % 2, scoring="average_precision")
cross_val_score(LinearSVC(), X, y % 2, scoring="roc_auc")
from sklearn.metrics.scorer import SCORERS
print(SCORERS.keys())
def my_accuracy_scoring(est, X, y):
return np.mean(est.predict(X) == y)
cross_val_score(LinearSVC(), X, y, scoring=my_accuracy_scoring)
def my_super_scoring(est, X, y):
return np.mean(est.predict(X) == y) - np.mean(est.coef_ != 0)
from sklearn.grid_search import GridSearchCV
y = iris.target
grid = GridSearchCV(LinearSVC(C=.01, dual=False),
param_grid={'penalty' : ['l1', 'l2']},
scoring=my_super_scoring)
grid.fit(X, y)
print(grid.best_params_)
from sklearn.cross_validation import ShuffleSplit
shuffle_split = ShuffleSplit(len(X), 10, test_size=.4)
cross_val_score(LinearSVC(), X, y, cv=shuffle_split)
from sklearn.cross_validation import StratifiedKFold, KFold, ShuffleSplit
def plot_cv(cv, n_samples):
masks = []
for train, test in cv:
mask = np.zeros(n_samples, dtype=bool)
mask[test] = 1
masks.append(mask)
plt.matshow(masks)
plot_cv(StratifiedKFold(y, n_folds=5), len(y))
plot_cv(KFold(len(iris.target), n_folds=5), len(iris.target))
plot_cv(ShuffleSplit(len(iris.target), n_iter=20, test_size=.2),
len(iris.target))
# %load solutions/cross_validation_iris.py
kf = KFold(len(X), n_folds=5)
print( cross_val_score(LinearSVC(), X, y, cv=kf) )
print( cross_val_score(LinearSVC(), X, y, cv=KFold(len(X), n_folds=3)) )
skf = StratifiedKFold(y, n_folds=5)
cross_val_score(LinearSVC(), X, y, cv=skf)
plot_cv(KFold(len(X), n_folds=3), len(y))
y
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go to a binary task for a moment
Step2: Implementing your own scoring metric
Step3: There are other ways to do cross-valiation
Step4: Exercises
|
3,526
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.5
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
print(tf.__version__)
x = tf.constant([2, 3, 4])
x
x = tf.Variable(2.0, dtype=tf.float32, name='my_variable')
x.assign(45.8) # TODO 1
x
x.assign_add(4) # TODO 2
x
x.assign_sub(3) # TODO 3
x
a = tf.constant([5, 3, 8]) # TODO 1
b = tf.constant([3, -1, 2])
c = tf.add(a, b)
d = a + b
print("c:", c)
print("d:", d)
a = tf.constant([5, 3, 8]) # TODO 2
b = tf.constant([3, -1, 2])
c = tf.multiply(a, b)
d = a * b
print("c:", c)
print("d:", d)
# tf.math.exp expects floats so we need to explicitly give the type
a = tf.constant([5, 3, 8], dtype=tf.float32)
b = tf.math.exp(a)
print("b:", b)
# native python list
a_py = [1, 2]
b_py = [3, 4]
tf.add(a_py, b_py) # TODO 1
# numpy arrays
a_np = np.array([1, 2])
b_np = np.array([3, 4])
tf.add(a_np, b_np) # TODO 2
# native TF tensor
a_tf = tf.constant([1, 2])
b_tf = tf.constant([3, 4])
tf.add(a_tf, b_tf) # TODO 3
a_tf.numpy()
X = tf.constant(range(10), dtype=tf.float32)
Y = 2 * X + 10
print("X:{}".format(X))
print("Y:{}".format(Y))
X_test = tf.constant(range(10, 20), dtype=tf.float32)
Y_test = 2 * X_test + 10
print("X_test:{}".format(X_test))
print("Y_test:{}".format(Y_test))
y_mean = Y.numpy().mean()
def predict_mean(X):
y_hat = [y_mean] * len(X)
return y_hat
Y_hat = predict_mean(X_test)
errors = (Y_hat - Y)**2
loss = tf.reduce_mean(errors)
loss.numpy()
def loss_mse(X, Y, w0, w1):
Y_hat = w0 * X + w1
errors = (Y_hat - Y)**2
return tf.reduce_mean(errors)
# TODO 1
def compute_gradients(X, Y, w0, w1):
with tf.GradientTape() as tape:
loss = loss_mse(X, Y, w0, w1)
return tape.gradient(loss, [w0, w1])
w0 = tf.Variable(0.0)
w1 = tf.Variable(0.0)
dw0, dw1 = compute_gradients(X, Y, w0, w1)
print("dw0:", dw0.numpy())
print("dw1", dw1.numpy())
STEPS = 1000
LEARNING_RATE = .02
MSG = "STEP {step} - loss: {loss}, w0: {w0}, w1: {w1}\n"
w0 = tf.Variable(0.0)
w1 = tf.Variable(0.0)
for step in range(0, STEPS + 1):
dw0, dw1 = compute_gradients(X, Y, w0, w1)
w0.assign_sub(dw0 * LEARNING_RATE)
w1.assign_sub(dw1 * LEARNING_RATE)
if step % 100 == 0:
loss = loss_mse(X, Y, w0, w1)
print(MSG.format(step=step, loss=loss, w0=w0.numpy(), w1=w1.numpy()))
loss = loss_mse(X_test, Y_test, w0, w1)
loss.numpy()
X = tf.constant(np.linspace(0, 2, 1000), dtype=tf.float32)
Y = X * tf.exp(-X**2)
%matplotlib inline
plt.plot(X, Y)
def make_features(X):
f1 = tf.ones_like(X) # Bias.
f2 = X
f3 = tf.square(X)
f4 = tf.sqrt(X)
f5 = tf.exp(X)
return tf.stack([f1, f2, f3, f4, f5], axis=1)
def predict(X, W):
return tf.squeeze(X @ W, -1)
def loss_mse(X, Y, W):
Y_hat = predict(X, W)
errors = (Y_hat - Y)**2
return tf.reduce_mean(errors)
def compute_gradients(X, Y, W):
with tf.GradientTape() as tape:
loss = loss_mse(Xf, Y, W)
return tape.gradient(loss, W)
# TODO 2
STEPS = 2000
LEARNING_RATE = .02
Xf = make_features(X)
n_weights = Xf.shape[1]
W = tf.Variable(np.zeros((n_weights, 1)), dtype=tf.float32)
# For plotting
steps, losses = [], []
plt.figure()
for step in range(1, STEPS + 1):
dW = compute_gradients(X, Y, W)
W.assign_sub(dW * LEARNING_RATE)
if step % 100 == 0:
loss = loss_mse(Xf, Y, W)
steps.append(step)
losses.append(loss)
plt.clf()
plt.plot(steps, losses)
print("STEP: {} MSE: {}".format(STEPS, loss_mse(Xf, Y, W)))
plt.figure()
plt.plot(X, Y, label='actual')
plt.plot(X, predict(Xf, W), label='predicted')
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Operations on Tensors
Step2: Point-wise operations
Step3: NumPy Interoperability
Step4: You can convert a native TF tensor to a NumPy array using .numpy()
Step5: Linear Regression
Step6: Let's also create a test dataset to evaluate our models
Step7: Loss Function
Step8: Using mean squared error, our loss is
Step9: This values for the MSE loss above will give us a baseline to compare how a more complex model is doing.
Step10: Gradient Function
Step11: Training Loop
Step12: Now let's compare the test loss for this linear regression to the test loss from the baseline model that outputs always the mean of the training set
Step13: This is indeed much better!
|
3,527
|
<ASSISTANT_TASK:>
Python Code:
# Inicializacao
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
# Abrindo conjunto de dados
import csv
with open("biometria.csv", 'rb') as f:
dados = list(csv.reader(f))
rotulos_volei = [d[0] for d in dados[1:-1] if d[0] is 'V']
rotulos_futebol = [d[0] for d in dados[1:-1] if d[0] is 'F']
altura_volei = [[float(d[1])] for d in dados[1:-1] if d[0] is 'V']
altura_futebol = [[float(d[1])] for d in dados[1:-1] if d[0] is 'F']
peso_volei = [[float(d[2])] for d in dados[1:-1] if d[0] is 'V']
peso_futebol = [[float(d[2])] for d in dados[1:-1] if d[0] is 'F']
from sklearn import mixture
from sklearn.cross_validation import train_test_split
def treinamento_GMM_ML(train_size=0.3, n_components=2):
# Separar dados adequadamente
dados_treino, dados_teste, rotulos_treino, rotulos_teste =\
train_test_split(altura_volei + altura_futebol, rotulos_volei + rotulos_futebol, train_size=train_size)
treino_futebol = [dados_treino[i] for i in xrange(len(dados_treino)) if rotulos_treino[i] == 'F']
treino_volei = [dados_treino[i] for i in xrange(len(dados_treino)) if rotulos_treino[i] == 'V']
# Especificar parametros da mistura
g1 = mixture.GMM(n_components=n_components)
g2 = mixture.GMM(n_components=n_components)
# Treinar modelo GMM
g1.fit(treino_futebol)
g2.fit(treino_volei)
# Executar modelos sobre conjunto de teste
p_futebol = g1.score(dados_teste)
p_volei = g2.score(dados_teste)
# Verificar qual modelo mais provavelmente gerou os dados de teste
x = []
for i in xrange(len(dados_teste)):
if p_futebol[i] > p_volei[i]:
x.append('F')
else:
x.append('V')
# Verificar quantidade de acertos
acertos = 0.0
for i in xrange(len(x)):
if x[i] == rotulos_teste[i]:
acertos += 1
acertos *= 100.0/float(len(x))
return acertos
print "Acertos:", treinamento_GMM_ML(), "%"
# Parametros para executar busca exaustiva
train_size_min = 0.35
train_size_max = 0.95
train_size_step = 0.05
# Numero de iteracoes para cada tamanho de conjunto de treino
n_iter = 100
# Listas que armazenarao os resultados
steps = []
medias = []
variancias = []
train_size_atual = train_size_min
while train_size_atual <= train_size_max: # para cada tamanho do conjunto de treino
acertos = []
for k in xrange(n_iter): # para cada iteracao do processo Monte Carlo
dados_treino, dados_teste, rotulos_treino, rotulos_teste =\
train_test_split(altura_volei + altura_futebol, rotulos_volei + rotulos_futebol, train_size=train_size_atual)
score = treinamento_GMM_ML(train_size=train_size_atual, n_components=2)
acertos.append(score)
steps.append(train_size_atual)
medias.append(np.mean(np.array(acertos)))
variancias.append(np.std(np.array(acertos)))
train_size_atual += train_size_step
plt.figure();
plt.errorbar(steps, medias, yerr=variancias);
plt.ylabel('Indice de acertos');
plt.xlabel('Tamanho do conjunto de treino');
import math
def treinamento_GMM_MAP(train_size=0.3, n_components=2):
# Separar dados adequadamente
dados_treino, dados_teste, rotulos_treino, rotulos_teste =\
train_test_split(altura_volei + altura_futebol, rotulos_volei + rotulos_futebol, train_size=train_size)
treino_futebol = [dados_treino[i] for i in xrange(len(dados_treino)) if rotulos_treino[i] == 'F']
treino_volei = [dados_treino[i] for i in xrange(len(dados_treino)) if rotulos_treino[i] == 'V']
# Especificar parametros da mistura
g1 = mixture.GMM(n_components=n_components)
g2 = mixture.GMM(n_components=n_components)
# Treinar modelo GMM
g1.fit(treino_futebol)
g2.fit(treino_volei)
# Treino das probabilidades a priori
prior_futebol = len([rotulo for rotulo in rotulos_treino if rotulo == 'F']) / float(len(rotulos_treino))
prior_volei = len([rotulo for rotulo in rotulos_treino if rotulo == 'V']) / float(len(rotulos_treino))
# Executar modelos sobre conjunto de teste
p_futebol = g1.score(dados_teste) + math.log(prior_futebol)
p_volei = g2.score(dados_teste) + math.log(prior_volei)
# Verificar qual modelo mais provavelmente gerou os dados de teste
x = []
for i in xrange(len(dados_teste)):
if p_futebol[i] > p_volei[i]:
x.append('F')
else:
x.append('V')
# Verificar quantidade de acertos
acertos = 0.0
for i in xrange(len(x)):
if x[i] == rotulos_teste[i]:
acertos += 1
acertos *= 100.0/float(len(x))
return acertos
print "Acertos:", treinamento_GMM_MAP(), "%"
# Parametros para executar busca exaustiva
train_size_min = 0.35
train_size_max = 0.95
train_size_step = 0.05
# Numero de iteracoes para cada tamanho de conjunto de treino
n_iter = 100
# Listas que armazenarao os resultados
steps1 = []
medias1 = []
variancias1 = []
train_size_atual = train_size_min
while train_size_atual <= train_size_max: # para cada tamanho do conjunto de treino
acertos = []
for k in xrange(n_iter): # para cada iteracao do processo Monte Carlo
dados_treino, dados_teste, rotulos_treino, rotulos_teste =\
train_test_split(altura_volei + altura_futebol, rotulos_volei + rotulos_futebol, train_size=train_size_atual)
score = treinamento_GMM_ML(train_size=train_size_atual, n_components=2)
acertos.append(score)
steps1.append(train_size_atual)
medias1.append(np.mean(np.array(acertos)))
variancias1.append(np.std(np.array(acertos)))
train_size_atual += train_size_step
plt.figure();
plt.errorbar(steps, medias, yerr=variancias);
plt.errorbar(steps1, medias1, yerr=variancias1, color='red');
plt.ylabel('Indice de acertos');
plt.xlabel('Tamanho do conjunto de treino');
def treinamento_GMM_nao_supervisionado():
# Especificar parametros da mistura
g = mixture.GMM(n_components=2)
# Treinar modelo GMM
g.fit(altura_volei + altura_futebol)
# Verificar qual Gaussiana corresponde a cada rótulo
if g.means_[0][0] > g.means_[1][0]:
rotulos = ('V', 'F')
else:
rotulos = ('F', 'V')
# Executar modelos sobre conjunto de teste
p = g.predict_proba(altura_volei + altura_futebol)
# Verificar qual modelo mais provavelmente gerou os dados de teste
x = []
for i in xrange(len(altura_volei + altura_futebol)):
if p[i][0] > p[i][1]:
x.append(rotulos[0])
else:
x.append(rotulos[1])
# Verificar quantidade de acertos
acertos = 0.0
for i in xrange(len(x)):
if x[i] == (rotulos_volei + rotulos_futebol)[i]:
acertos += 1
acertos *= 100.0/float(len(x))
return acertos
acertos_nao_supervisionados = treinamento_GMM_nao_supervisionado()
print "Acertos:", acertos_nao_supervisionados, "%"
plt.figure();
plt.errorbar(steps, medias, yerr=variancias);
plt.errorbar(steps1, medias1, yerr=variancias1, color='red');
plt.plot(steps, [acertos_nao_supervisionados] * len(steps), ':', color='green')
plt.ylabel('Indice de acertos');
plt.xlabel('Tamanho do conjunto de treino');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Teorema de Bayes
Step2: Podemos verificar a estabilidade do modelo para diferentes tamanhos de conjunto de treino de forma semelhante a que fizemos no caso de KNN
Step3: Classificador de Máxima Probabilidade À Posteriori (MAP)
Step4: O critério MAP minimiza a probabilidade de erro teórico do estimador. Embora esse seja um resultado relevante, também é importante considerar que a estimativa das probabilidades envolvidas pode não ser sempre ótima.
Step5: Veja que, embora MAP tenha uma possibilidade teórica de conseguir um erro menor que ML, seu erro médio é bastante semelhante. A variância do erro também apresenta um comportamento semelhante, aumentando à medida que o conjunto de treino aumenta. Essa variância não decorre de uma degradação do modelo, mas sim da diminuição do conjunto de testes
Step6: Podemos verificar que o treinamento não-supervisionado, por utilizar todo o conjunto de dados para treino/teste, não apresenta flutuações de desempenho. Ao mesmo tempo, não é possível dizer que esse modelo generaliza para outros pontos, já que é um modelo que foi treinado e testado no mesmo conjunto de dados. A não-generalização, neste caso, não é um grande problema, já que o problema está restrito à base de dados que temos. Neste caso, realizamos o agrupamento dos dados do nosso conjunto através do modelo GMM, e então interpretamos manualmente os resultados de acordo com nosso conhecimento prévio.
|
3,528
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
### Forward pass ###
hidden_inputs = np.dot(self.weights_input_to_hidden,inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)
final_outputs = final_inputs
### Backward pass ###
output_errors = targets-final_outputs
output_grad = output_errors
hidden_errors = np.dot(self.weights_hidden_to_output.T,output_errors)
hidden_grad = self.activation_function_derivative(hidden_inputs)
self.weights_hidden_to_output += self.lr*np.dot(output_grad,hidden_outputs.T)
self.weights_input_to_hidden += self.lr*np.dot(hidden_errors* hidden_grad,inputs.T)
def activation_function(self,x):
return 1/(1 + np.exp(-x))
def activation_function_derivative(self,x):
return self.activation_function(x)*(1-self.activation_function(x))
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
hidden_inputs = np.dot(self.weights_input_to_hidden,inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs)
final_outputs = final_inputs
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 2000
learning_rate = 0.008
hidden_nodes = 10
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
if e%(epochs/10) == 0:
sys.stdout.write("\nProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], 'r',label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, 'g', label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
def runTest(self):
pass
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner(verbosity=1).run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
3,529
|
<ASSISTANT_TASK:>
Python Code:
%%sh
# ls -l ~/Downloads/G20*csv
# mv ~/Downloads/G20*csv G20.csv
data = pd.read_csv('G20.csv')
cols = ['Area', 'Population_2010', 'Population_2011',
'Population_2012', 'Population_2013', 'Population_2014',
'Population_2015', 'GDP_2010', 'GDP_2011', 'GDP_2012', 'GDP_2013',
'GDP_2014', 'GDP_2015', 'GDP_PCI_2010', 'GDP_PCI_2011', 'GDP_PCI_2012',
'GDP_PCI_2013', 'GDP_PCI_2014', 'GDP_PCI_2015', 'GDP_PPP_2010',
'GDP_PPP_2011', 'GDP_PPP_2012', 'GDP_PPP_2013', 'GDP_PPP_2014',
'GDP_PPP_2015']
data[cols] = data[cols].applymap(lambda x: float(str(x).replace(',', '')))
all_countries = sorted(data.Country.unique())
country_labler = all_countries.index
# country_labler('India')
# data.Country = data.Country.map(country_labler)
sorted(data.columns.tolist())
cols1 = ['GDP_2010',
'GDP_2011',
'GDP_2012',
'GDP_2013',
'GDP_2014',
'GDP_2015',]
cols2 = [
'GDP_PPP_2010',
'GDP_PPP_2011',
'GDP_PPP_2012',
'GDP_PPP_2013',
'GDP_PPP_2014',
'GDP_PPP_2015']
cols3 = []
data1 = data[['Area',
'Country',
'GDP_2010',
'GDP_2011',
'GDP_2012',
'GDP_2013',
'GDP_2014',
'GDP_2015',]].copy()
data2 = data[['Area',
'Country',
'GDP_PPP_2010',
'GDP_PPP_2011',
'GDP_PPP_2012',
'GDP_PPP_2013',
'GDP_PPP_2014',
'GDP_PPP_2015',]].copy()
data3 = data[['Area',
'Country',
'GDP_PCI_2010',
'GDP_PCI_2011',
'GDP_PCI_2012',
'GDP_PCI_2013',
'GDP_PCI_2014',
'GDP_PCI_2015',]].copy()
data4 = data[['Area',
'Country',
'Population_2010',
'Population_2011',
'Population_2012',
'Population_2013',
'Population_2014',
'Population_2015']].copy()
import sklearn.cluster
clf = sklearn.cluster.AgglomerativeClustering(5)
pred = clf.fit_predict(data1['GDP_2010 GDP_2011 GDP_2012 GDP_2013 GDP_2014 GDP_2015'.split()])
pred
new_data.metric.unique()
new_data.head(20).copy(deep=True)
# segregating year & param
new_data['year'] = new_data.metric.map(lambda x: int(x.rsplit('_')[-1]))
new_data['param'] = new_data.metric.map(lambda x: ''.join(x.rsplit('_')[:-1]))
# drop metric column
new_data.drop('metric', axis=1, inplace=True)
# converting data into integers
# Key values to check how the world
print('Country', new_data.country.unique())
print('Country', new_data.param.unique())
temp = new_data[(new_data.country == 'USA') & (new_data.param == 'GDP')].copy(deep=True)
temp
X_Label = 'USA'
Y_Label = 'GDP'
plt.figure(figsize=(15, 5))
temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True)
_x, _y = temp.year.values, temp.value.values
plt.plot(_x, _y)
plt.xticks(_x, map(str, _x))
X_Label = 'European Union'
Y_Label = 'GDP'
plt.figure(figsize=(15, 5))
temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True)
_x, _y = temp.year.values, temp.value.values
plt.plot(_x, _y)
plt.xticks(_x, map(str, _x))
X_Label = 'USA'
Y_Label = 'GDP'
plt.figure(figsize=(15, 5))
temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True)
_x, _y = temp.year.values, temp.value.values
plt.plot(_x, _y)
plt.xticks(_x, map(str, _x))
_y
_y - _y.min()
Y_Label = 'Population'
plt.figure(figsize=(15, 8))
all_countries = new_data.country.unique()[:5]
for X_Label in all_countries:
temp = new_data[(new_data.country == X_Label) & (new_data.param == Y_Label)].copy(deep=True)
_x, _y = temp.year.values, temp.value.values
_y = _y - _y.min()
plt.plot(_x, _y)
plt.xticks(_x, map(str, _x))
plt.legend(all_countries)
country_codes = {'Argentina': 'ARG',
'Australia': 'AUS',
'Brazil': 'BRA',
'Canada': 'CAN',
'China': 'CHN',
'European Union': 'USA',
'France': 'FRA',
'Germany': 'DEU',
'India': 'IND',
'Indonesia': 'IDN',
'Italy': 'ITA',
'Japan': 'JPN',
'Mexico': 'MEX',
'Republic of Korea': 'USA',
'Russia': 'RUS',
'Saudi Arabia': 'SAU',
'South Africa': 'ZAF',
'Turkey': 'TUR',
'USA': 'USA',
'United Kingdom': 'GBR'}
chart_colors = ["rgb(0,0,0)",
"rgb(255,255,255)",
"rgb(255,0,0)",
"rgb(0,255,0)",
"rgb(0,0,255)",
"rgb(255,255,0)",
"rgb(0,255,255)",
"rgb(255,0,255)",
"rgb(192,192,192)",
"rgb(128,128,128)",
"rgb(128,0,0)",
"rgb(128,128,0)",
"rgb(0,128,0)",
"rgb(128,0,128)",
"rgb(0,128,128)",
"rgb(0,0,128)",]
chart_colors += chart_colors
chart_colors = chart_colors[:len(country_codes)]
data1['Country_Codes'] = data1['Country'].map(lambda x: country_codes[x])
import sklearn.cluster
clf = sklearn.cluster.AgglomerativeClustering(5)
pred = clf.fit_predict(data1['GDP_2010 GDP_2011 GDP_2012 GDP_2013 GDP_2014 GDP_2015'.split()])
pred
data1['cluster'] = pred
data1['text'] = 'Cluster ID' + data1.cluser
data1.head()
import plotly.plotly as py
import pandas as pd
# df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2014_world_gdp_with_codes.csv')
data = [ dict(
type = 'choropleth',
locations = data1['Country_Codes'],
z = data1['cluser'],
text = data1['Country_Codes'],
# colorscale = [[0,"rgb(5, 10, 172)"],[0.35,"rgb(40, 60, 190)"],[0.5,"rgb(70, 100, 245)"],\
# [0.6,"rgb(90, 120, 245)"],[0.7,"rgb(106, 137, 247)"],[1,"rgb(220, 220, 220)"]],
# autocolorscale = True,
# reversescale = True,
# marker = dict(
# line = dict (
# color = 'rgb(180,180,180)',
# width = 0.5
# ) ),
colorbar = dict(
autotick = False,
tickprefix = '$',
title = 'GDP<br>Billions US$'),
) ]
layout = dict(
title = 'G-20"s GDP',
geo = dict(
showframe = False,
showcoastlines = False,
projection = dict(
type = 'Mercator'
)
)
)
fig = dict(data=data, layout=layout)
# py.iplot( fig, validate=False, filename='d3-world-map' )
plot( fig, validate=False, filename='d3-world-map')
fig = {
'data': [
{
'x': df2007.gdpPercap,
'y': df2007.lifeExp,
'text': df2007.country,
'mode': 'markers',
'name': '2007'},
{
'x': df1952.gdpPercap,
'y': df1952.lifeExp,
'text': df1952.country,
'mode': 'markers',
'name': '1952'}
],
'layout': {
'xaxis': {'title': 'GDP per Capita', 'type': 'log'},
'yaxis': {'title': "Life Expectancy"}
}
}
data = []
year = 'GDP_2015'
data.append({
'x': data1[year],
'y': data1['cluster'],
'mode': 'markers',
'text': data1['Country'],
'name': year,
'colors': chart_colors
})
fig = dict(data=data, layout=layout)
# py.iplot( fig, validate=False, filename='d3-world-map' )
plot( fig, validate=False, filename='d3-world-map')
from sklearn import datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data # [:, :2] # we only take the first two features.
Y = iris.target
X[:5]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.25, random_state=0)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf = clf.fit(X_train, y_train)
accuracy_score(clf.predict(X_train), y_train)
accuracy_score(clf.predict(X_test), y_test)
accuracy_score(clf.predict(X), Y)
from sklearn import svm
clf = svm.SVC(kernel='linear', C=2)
clf = clf.fit(X_train, y_train)
accuracy_score(clf.predict(X_train), y_train)
accuracy_score(clf.predict(X_test), y_test)
accuracy_score(clf.predict(X), Y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Cleanup
Step2: Experiments
Step3: Ideas
Step4: IRIS Dataset
Step5: Random Forest
Step6: SVM
|
3,530
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import astropy.table
import astropy.cosmology
import astropy.io.fits as fits
import astropy.units as u
import os.path
assert 'DESIMODEL' in os.environ
assert os.path.exists(os.path.join(os.getenv('DESIMODEL'), 'data', 'spectra', 'spec-sky.dat'))
import desimodel
import specsim
print(f'Using desimodel {desimodel.__version__}, specsim {specsim.__version__}')
elg_spec = astropy.table.Table.read(
os.path.join(os.environ['DESIMODEL'], 'data', 'spectra', 'spec-elg-o2flux-8e-17-average-line-ratios.dat'),
format='ascii')
elg_wlen0 = elg_spec['col1'].data
elg_flux0 = 1e-17 * elg_spec['col2'].data
def get_elg_nz():
# Read the nz file from $DESIMODEL.
full_name = os.path.join(os.environ['DESIMODEL'], 'data', 'targets', 'nz_elg.dat')
table = astropy.table.Table.read(full_name, format='ascii')
# Extract the n(z) histogram into numpy arrays.
z_lo, z_hi = table['col1'], table['col2']
assert np.all(z_hi[:-1] == z_lo[1:])
z_edge = np.hstack((z_lo, [z_hi[-1]]))
nz = table['col3']
# Trim to bins where n(z) > 0.
non_zero = np.where(nz > 0)[0]
lo, hi = non_zero[0], non_zero[-1] + 1
nz = nz[lo: hi]
z_edge = z_edge[lo: hi + 1]
return nz, z_edge
elg_nz, elg_z_edge = get_elg_nz()
def get_nz_weight(z):
Calculate n(z) weights corresponding to input z values.
nz = np.zeros_like(z)
idx = np.digitize(z, elg_z_edge)
sel = (idx > 0) & (idx <= len(elg_nz))
nz[sel] = elg_nz[idx[sel] - 1]
return nz
def generate_elg_z(n=100, seed=123):
cdf = np.cumsum(elg_nz)
cdf = np.hstack(([0], cdf / cdf[-1]))
gen = np.random.RandomState(seed)
return np.interp(gen.rand(n), cdf, elg_z_edge)
z=generate_elg_z(n=20000)
plt.hist(z, bins=elg_z_edge, histtype='stepfilled')
plt.xlim(elg_z_edge[0], elg_z_edge[-1])
print(f'Mean ELG redshift is {np.mean(z):.3f}')
LCDM = astropy.cosmology.Planck15
def generate_elg_profiles(z, seed=123, verbose=False):
ELG profiles are assumed to be disk (Sersic n=1) only.
gen = np.random.RandomState(seed)
nsrc = len(z)
source_fraction = np.zeros((nsrc, 2))
source_half_light_radius = np.zeros((nsrc, 2))
source_minor_major_axis_ratio = np.zeros((nsrc, 2))
source_position_angle = 360. * gen.normal(size=(nsrc, 2))
# Precompute cosmology scale factors.
angscale = (
LCDM.angular_diameter_distance(1.0) /
LCDM.angular_diameter_distance(z)).to(1).value
if verbose:
print(f'mean n(z) DA(1.0)/DA(z) = {np.mean(angscale):.3f}')
# Disk only with random size and ellipticity.
source_fraction[:, 0] = 1.
source_half_light_radius[:, 0] = 0.427 * np.exp(0.25 * gen.normal(size=nsrc)) * angscale
source_minor_major_axis_ratio[:, 0] = np.minimum(0.99, 0.50 * np.exp(0.15 * gen.normal(size=nsrc)))
if verbose:
print(f'mean HLR = {np.mean(source_half_light_radius[:, 0]):.3f}"')
return dict(
source_fraction=source_fraction,
source_half_light_radius=source_half_light_radius,
source_minor_major_axis_ratio=source_minor_major_axis_ratio,
source_position_angle=source_position_angle)
def plot_elg_profiles(save=None):
z = generate_elg_z(50000)
sources = generate_elg_profiles(z, verbose=True)
fig, ax = plt.subplots(2, 2, figsize=(8, 6))
ax = ax.flatten()
ax[0].hist(sources['source_minor_major_axis_ratio'][:, 0], range=(0,1), bins=25)
ax[0].set_xlabel('ELG minor/major axis ratio')
ax[0].set_xlim(0, 1)
ax[1].hist(z, bins=np.arange(0.6, 1.8, 0.1))
ax[1].set_xlim(0.6, 1.7)
ax[1].set_xlabel('ELG redshift')
ax[2].hist(sources['source_half_light_radius'][:, 0], bins=25)
ax[2].set_xlabel('ELG half-light radius [arcsec]')
ax[2].set_xlim(0.1, 1.1)
ax[3].scatter(z, sources['source_half_light_radius'][:, 0], s=0.5, alpha=0.5)
ax[3].set_xlabel('ELG redshift')
ax[3].set_ylabel('ELG half-light radius [arcsec]')
ax[3].set_xlim(0.6, 1.7)
ax[3].set_ylim(0.1, 1.1)
plt.tight_layout()
if save:
plt.savefig(save)
plot_elg_profiles(save='elg-sample.png')
def calculate_elg_snr(simulator, save, description,
z1=0.6, z2=1.65, dz=0.002, zref=1.20,
seed=123, wlen=elg_wlen0, flux=elg_flux0):
Calculate the ELG [OII] SNR as a function of redshift.
Parameters
----------
simulator : specsim.simulator.Simulator
Instance of an initialized Simulator object to use. Each fiber will
be simulated independently to study variations across the focal plane.
save : str
Filename to use for saving FITS results.
description : str
Short description for the saved file header, also used for plots later.
z1 : float
Minimum ELG redshift to calculate.
z2 : float
Maximum ELG redshift to calculate.
dz : float
Spacing of equally spaced grid to cover [z1, z2]. z2 will be increased
by up to dz if necessary.
zref : float
Reference redshift used to save signal, noise and fiberloss. Must be
on the grid specified by (z1, z2, dz).
seed : int or None
Random seed used to generate fiber positions and galaxy profiles.
wlen : array
1D array of N rest wavelengths in Angstroms.
flux : array
1D array of N corresponding rest fluxes in erg / (s cm2 Angstrom).
zooms = (3715., 3742.), (4850., 4875.), (4950., 5020.)
gen = np.random.RandomState(seed=seed)
# Generate random focal plane (x,y) positions for each fiber in mm units.
nfibers = simulator.num_fibers
focal_r = np.sqrt(gen.uniform(size=nfibers)) * simulator.instrument.field_radius
phi = 2 * np.pi * gen.uniform(size=nfibers)
xy = (np.vstack([np.cos(phi), np.sin(phi)]) * focal_r).T
# Build the grid of redshifts to simulate.
nz = int(np.ceil((z2 - z1) / dz)) + 1
z2 = z1 + (nz - 1) * dz
z_grid = np.linspace(z1, z2, nz)
iref = np.argmin(np.abs(z_grid - zref))
assert np.abs(zref - z_grid[iref]) < 1e-5, 'zref not in z_grid'
snr2 = np.zeros((4, nz, simulator.num_fibers))
# Initialize the results.
hdus = fits.HDUList()
hdus.append(fits.PrimaryHDU(
header=fits.Header({'SEED': seed, 'NFIBERS': nfibers, 'DESCRIBE': description})))
# Zero-pad the input spectrum if necessary.
wlo = 0.99 * desi.simulated['wavelength'][0] / (1 + z2)
if wlen[0] > wlo:
wlen = np.hstack([[wlo], wlen])
flux = np.hstack([[0.], flux])
# Simulate the specified rest-frame flux.
simulator.source.update_in(
'ELG [OII] doublet', 'elg',
wlen * u.Angstrom, flux * u.erg/(u.s * u.cm**2 * u.Angstrom), z_in=0.)
# Simulate each redshift.
for i, z in enumerate(z_grid):
# Redshift the ELG spectrum.
simulator.source.update_out(z_out=z)
source_flux = np.tile(simulator.source.flux_out, [nfibers, 1])
# Generate source profiles for each target at this redshift. Since the seed is
# fixed, only the redshift scaling of the HLR will change.
sources = generate_elg_profiles(np.full(nfibers, z), seed=seed)
# Simulate each source.
simulator.simulate(source_fluxes=source_flux, focal_positions=xy, **sources)
# Calculate the quadrature sum of SNR in each camera, by fiber.
for output in simulator.camera_output:
rest_wlen = output['wavelength'] / (1 + z)
# Loop over emission lines.
for j, (lo, hi) in enumerate(zooms):
sel = (rest_wlen >= lo) & (rest_wlen < hi)
if not np.any(sel):
continue
# Sum SNR2 over pixels.
pixel_snr2 = output['num_source_electrons'][sel] ** 2 / output['variance_electrons'][sel]
snr2[j, i] += pixel_snr2.sum(axis=0)
if i == iref:
# Save the fiberloss fraction and total variance tabulated on the simulation grid.
table = astropy.table.Table(meta={'ZREF': zref})
sim = simulator.simulated
table['WLEN'] = sim['wavelength'].data
table['FLUX'] = sim['source_flux'].data
table['FIBERLOSS'] = sim['fiberloss'].data
table['NSRC'] = sim['num_source_electrons_b'] + sim['num_source_electrons_r'] + sim['num_source_electrons_z']
table['SKYVAR'] = sim['num_sky_electrons_b'] + sim['num_sky_electrons_r'] + sim['num_sky_electrons_z']
table['NOISEVAR'] = (
sim['read_noise_electrons_b'] ** 2 + sim['read_noise_electrons_r'] ** 2 + sim['read_noise_electrons_z'] ** 2 +
sim['num_dark_electrons_b'] + sim['num_dark_electrons_r'] + sim['num_dark_electrons_z'])
hdus.append(fits.table_to_hdu(table))
hdus[-1].name = 'REF'
# Calculate the n(z) weighted mean SNR for [OII], using the median over fibers at each redshift.
snr_oii = np.median(np.sqrt(snr2[0]), axis=-1)
wgt = get_nz_weight(z_grid)
snr_oii_eff = np.sum(snr_oii * wgt) / np.sum(wgt)
print(f'n(z)-weighted effective [OII] SNR = {snr_oii_eff:.3f}')
# Save the SNR vs redshift arrays for each emission line.
table = astropy.table.Table(meta={'SNREFF': snr_oii_eff})
table['Z'] = z_grid
table['ZWGT'] = wgt
table['SNR_OII'] = np.sqrt(snr2[0])
table['SNR_HBETA'] = np.sqrt(snr2[1])
table['SNR_OIII'] = np.sqrt(snr2[2])
hdus.append(fits.table_to_hdu(table))
hdus[-1].name = 'SNR'
hdus.writeto(save, overwrite=True)
def get_flux_limits(z, snr, nominal_flux=8., nominal_snr=7., ax=None):
fluxlim = np.zeros_like(snr)
nonzero = snr > 0
fluxlim[nonzero] = nominal_flux * (nominal_snr / snr[nonzero])
bins = np.linspace(0.6, 1.6, 6)
nlim = len(bins) - 1
medians = np.empty(nlim)
for i in range(nlim):
sel = (z >= bins[i]) & (z < bins[i + 1])
medians[i] = np.median(fluxlim[sel])
if ax is not None:
zmid = 0.5 * (bins[1:] + bins[:-1])
dz = 0.5 * (bins[1] - bins[0])
ax.errorbar(zmid, medians, xerr=dz, color='b', fmt='o', zorder=10, capsize=3)
return fluxlim, medians
def plot_elg_snr(name, save=True):
Plot a summary of results saved by calculate_elg_snr().
Parameters
----------
name : str
Name of the FITS file saved by calculate_elg_snr().
hdus = fits.open(name)
hdr = hdus[0].header
nfibers = hdr['NFIBERS']
description = hdr['DESCRIBE']
fig, axes = plt.subplots(2, 1, figsize=(8, 6))
plt.suptitle(description, fontsize=14)
snr_table = astropy.table.Table.read(hdus['SNR'])
snr_oii_eff = snr_table.meta['SNREFF']
ref_table = astropy.table.Table.read(hdus['REF'])
zref = ref_table.meta['ZREF']
ax = axes[0]
color = 'rgb'
labels = '[OII]', 'H$\\beta$', '[OIII]'
z_grid = snr_table['Z'].data
for i, tag in enumerate(('SNR_OII', 'SNR_HBETA', 'SNR_OIII')):
snr = snr_table[tag].data
snr_q = np.percentile(snr, (5, 50, 95), axis=-1)
ax.fill_between(z_grid, snr_q[0], snr_q[2], color=color[i], alpha=0.25, lw=0)
ax.plot(z_grid, snr_q[1], c=color[i], ls='-', label=labels[i])
ax.plot([], [], 'k:', label='n(z)')
ax.legend(ncol=4)
ax.set_xlabel('ELG redshift')
ax.set_ylabel(f'Total signal-to-noise ratio')
ax.axhline(7, c='k', ls='--')
rhs = ax.twinx()
rhs.plot(z_grid, snr_table['ZWGT'], 'k:')
rhs.set_yticks([])
ax.set_xlim(z_grid[0], z_grid[-1])
ax.set_ylim(0, 12)
rhs.set_ylim(0, None)
ax.text(0.02, 0.03, f'n(z)-wgtd [OII] SNR={snr_oii_eff:.3f}',
fontsize=12, transform=ax.transAxes)
# Calculate the median [OII] flux limits.
_, fluxlim = get_flux_limits(z_grid, np.median(snr_table['SNR_OII'], axis=-1))
# Print latex-format results for DESI-3977 Table 2.
print(f'&{snr_oii_eff:7.3f}', end='')
for m in fluxlim:
print(f' &{m:5.1f}', end='')
print(' \\\\')
ax = axes[1]
wlen = ref_table['WLEN'].data
dwlen = wlen[1] - wlen[0]
sky_q = np.percentile(ref_table['SKYVAR'].data, (5, 50, 95), axis=-1)
sky_q[sky_q > 0] = 1 / sky_q[sky_q > 0]
ax.fill_between(wlen, sky_q[0], sky_q[2], color='b', alpha=0.5, lw=0)
ax.plot([], [], 'b-', label='sky ivar')
ax.plot(wlen, sky_q[1], 'b.', ms=0.25, alpha=0.5)
noise_q = np.percentile(ref_table['NOISEVAR'].data, (5, 50, 95), axis=-1)
noise_q[noise_q > 0] = 1 / noise_q[noise_q > 0]
ax.fill_between(wlen, noise_q[0], noise_q[2], color='r', alpha=0.25, lw=0)
ax.plot(wlen, noise_q[1], c='r', ls='-', label='noise ivar')
floss_q = np.percentile(ref_table['FIBERLOSS'].data, (5, 50, 95), axis=-1)
ax.plot([], [], 'k-', label='fiberloss')
rhs = ax.twinx()
rhs.fill_between(wlen, floss_q[0], floss_q[2], color='k', alpha=0.25, lw=0)
rhs.plot(wlen, floss_q[1], 'k-')
rhs.set_ylim(0.2, 0.6)
rhs.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.1))
rhs.set_ylabel('Fiberloss')
ax.set_xlabel('Wavelength [A]')
ax.set_ylabel(f'Inverse Variance / {dwlen:.1f}A')
ax.set_xlim(wlen[0], wlen[-1])
ax.set_ylim(0, 0.25)
ax.legend(ncol=3)
plt.subplots_adjust(wspace=0.1, top=0.95, bottom=0.08, left=0.10, right=0.92)
if save:
base, _ = os.path.splitext(name)
plot_name = base + '.png'
plt.savefig(plot_name)
print(f'Saved {plot_name}')
import specsim.simulator
desi = specsim.simulator.Simulator('desi', num_fibers=100)
%time calculate_elg_snr(desi, save='desimodel-0.9.6.fits', description='desimodel 0.9.6')
plot_elg_snr('desimodel-0.9.6.fits')
desi.instrument.fiberloss_method = 'galsim'
%time calculate_elg_snr(desi, save='desimodel-0.9.6-galsim.fits', description='desimodel 0.9.6 (galsim)')
plot_elg_snr('desimodel-0.9.6-galsim.fits')
desi867 = astropy.table.Table.read('elg_snr2_desimodel-0-3-1.fits', hdu=1)
def desi_867_fig1():
z = desi867['Z']
snr_all = np.sqrt(desi867['SNR2'])
snr_oii = np.sqrt(desi867['SNR2_OII'])
fig = plt.figure(figsize=(6, 5))
plt.plot(z, snr_all, 'k-', lw=1, label='all lines')
plt.plot(z, snr_oii, 'r-', lw=1, label='[OII] only')
plt.legend(fontsize='large')
plt.axhline(7, c='b', ls='--')
plt.ylim(0, 22)
plt.xlim(z[0], z[-1])
plt.xticks([0.5, 1.0, 1.5])
plt.xlabel('Redshift')
plt.ylabel('S/N')
desi_867_fig1()
def desi_867_fig2():
z = desi867['Z']
snr_all = np.sqrt(desi867['SNR2'])
snr_oii = np.sqrt(desi867['SNR2_OII'])
flux_limit_all, _ = get_flux_limits(z, snr_all)
flux_limit_oii, medians = get_flux_limits(z, snr_oii)
fig = plt.figure(figsize=(6, 5))
plt.plot(z, flux_limit_all, 'k-', lw=1, label='all lines')
plt.plot(z, flux_limit_oii, 'r-', lw=1, label='[OII] only')
plt.legend(loc='upper right', fontsize='large')
_, _ = get_flux_limits(z, snr_oii, ax=plt.gca())
plt.ylim(0, 40)
plt.xlim(z[0], z[-1])
plt.xticks([0.5, 1.0, 1.5])
plt.xlabel('Redshift')
plt.ylabel('[OII] Flux limit ($10^{-17}$ ergs cm$^{-2}$ s$^{-1}$)')
desi_867_fig2()
def cdr_summary():
z = desi867['Z']
snr_oii = np.sqrt(desi867['SNR2_OII'])
wgt = get_nz_weight(z)
snreff = np.sum(wgt * snr_oii) / wgt.sum()
_, medians = get_flux_limits(z, snr_oii)
print(f'0.3.1 (CDR) & {snreff:6.3f}', end='')
for m in medians:
print(f' &{m:5.1f}', end='')
print(' \\\\')
cdr_summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parts of this notebook assume that the desimodel package is installed (both its git and svn components) and its data/ directory is accessible via the $DESIMODEL environment variable
Step2: Document relevant version numbers
Step3: ELG Spectrum
Step4: DESI ELG Sample
Step6: Calculate n(z) weights corresponding to an array of ELG redshifts
Step7: Sample random redshifts from n(z)
Step8: Define a background cosmology for the angular-diameter distance used to scale galaxy angular sizes
Step10: Generate random ELG profiles for each target. The mean half-light radius is 0.45" and scales with redshift.
Step11: Diagnostic plot showing the assumed ELG population (Figure 1 of DESI-3977)
Step13: Simulated SNR
Step14: Calculate flux limits in bins of redshift, to compare with SRD L3.1.3
Step16: Plot a summary of the results saved by calculate_elg_snr(). Shaded bands show the 5-95 percentile range, with the median drawn as a solid curve. The fiberloss in the lower plot is calculated at the redshift zref specified in calculate_elg_snr() (since the ELG size distribution is redshift dependent).
Step17: Examples
Step18: NOTE
Step19: Plot the results (Figure 2 of DESI-3977)
Step20: Check that the results with GalSim are compatible with those using the (default) fastsim mode of fiberloss calculations
Step21: NOTE
Step22: This comparison shows that the "fastsim" fiberloss fractions are about 1% (absolute) higher than "galsim", leading to a slight increase in signal and therefore SNR. The reason for this increase is that "fastsim" assumes a fixed minor / major axis ratio of 0.7 while our ELG population has a distribution of ratios with a median of 0.5. The weighted [OII] SNR values are 6.764 (fastsim) and 6.572 (galsim), which agree at the few percent level.
Step23: Check that we can reproduce the figures from DESI-867
Step24: Print a summary for Table 2 of DESI-3977
|
3,531
|
<ASSISTANT_TASK:>
Python Code:
import os
import zipfile
from math import log, sqrt
import numpy as np
import pandas as pd
from sklearn import linear_model
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
%matplotlib inline
# Put files in current direction into a list
files_list = [f for f in os.listdir('.') if os.path.isfile(f)]
# Filenames of unzipped files
unzip_files = ['kc_house_data.csv','wk3_kc_house_train_data.csv',
'wk3_kc_house_test_data.csv', 'wk3_kc_house_train_data.csv',
'wk3_kc_house_valid_data.csv']
# If upzipped file not in files_list, unzip the file
for filename in unzip_files:
if filename not in files_list:
zip_file = filename + '.zip'
unzipping = zipfile.ZipFile(zip_file)
unzipping.extractall()
unzipping.close
# Dictionary with the correct dtypes for the DataFrame columns
dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int,
'sqft_living15':float, 'grade':int, 'yr_renovated':int,
'price':float, 'bedrooms':float, 'zipcode':str,
'long':float, 'sqft_lot15':float, 'sqft_living':float,
'floors':float, 'condition':int, 'lat':float, 'date':str,
'sqft_basement':int, 'yr_built':int, 'id':str,
'sqft_lot':int, 'view':int}
sales = pd.read_csv('kc_house_data.csv', dtype=dtype_dict)
sales['sqft_living_sqrt'] = sales['sqft_living'].apply(sqrt)
sales['sqft_lot_sqrt'] = sales['sqft_lot'].apply(sqrt)
sales['bedrooms_square'] = sales['bedrooms']*sales['bedrooms']
sales['floors_square'] = sales['floors']*sales['floors']
all_features = ['bedrooms', 'bedrooms_square',
'bathrooms',
'sqft_living', 'sqft_living_sqrt',
'sqft_lot', 'sqft_lot_sqrt',
'floors', 'floors_square',
'waterfront', 'view', 'condition', 'grade',
'sqft_above',
'sqft_basement',
'yr_built', 'yr_renovated']
model_all = linear_model.Lasso(alpha=5e2, normalize=True) # set parameters
model_all.fit(sales[all_features], sales['price']) # learn weights
print model_all.coef_
for feat, weight in zip(all_features, model_all.coef_):
if weight != 0.0:
print feat + ':', weight
testing = pd.read_csv('wk3_kc_house_test_data.csv', dtype=dtype_dict)
training = pd.read_csv('wk3_kc_house_train_data.csv', dtype=dtype_dict)
validation = pd.read_csv('wk3_kc_house_valid_data.csv', dtype=dtype_dict)
testing['sqft_living_sqrt'] = testing['sqft_living'].apply(sqrt)
testing['sqft_lot_sqrt'] = testing['sqft_lot'].apply(sqrt)
testing['bedrooms_square'] = testing['bedrooms']*testing['bedrooms']
testing['floors_square'] = testing['floors']*testing['floors']
training['sqft_living_sqrt'] = training['sqft_living'].apply(sqrt)
training['sqft_lot_sqrt'] = training['sqft_lot'].apply(sqrt)
training['bedrooms_square'] = training['bedrooms']*training['bedrooms']
training['floors_square'] = training['floors']*training['floors']
validation['sqft_living_sqrt'] = validation['sqft_living'].apply(sqrt)
validation['sqft_lot_sqrt'] = validation['sqft_lot'].apply(sqrt)
validation['bedrooms_square'] = validation['bedrooms']*validation['bedrooms']
validation['floors_square'] = validation['floors']*validation['floors']
l1_pen_val = np.logspace(1, 7, num=13)
models_diff_l1 = {}
for i in range(len(l1_pen_val)):
key_val = str(i)
models_diff_l1[key_val] = linear_model.Lasso(alpha=l1_pen_val[i], normalize=True) # set parameters
models_diff_l1[key_val].fit(training[all_features], training['price']) # learn weights
def RSS_val(output_vals, predictions):
RSS_error = sum( (output_vals - predictions)**2.0 )
return RSS_error
RSS_L1_vals = []
for i in range(len(l1_pen_val)):
key_val = str(i)
pred_vals = models_diff_l1[key_val].predict(validation[all_features])
RSS = RSS_val(validation['price'], pred_vals)
RSS_L1_vals.append( (RSS, i) )
print l1_pen_val[ min(RSS_L1_vals)[1] ]
print '%.2e' % ( min(RSS_L1_vals)[0] )
print ( np.count_nonzero(models_diff_l1[ str(min(RSS_L1_vals)[1]) ].coef_) +
np.count_nonzero(models_diff_l1[ str(min(RSS_L1_vals)[1]) ].intercept_) )
max_nonzeros = 7
l1_penalty_values = np.logspace(1, 4, num=20)
list_l1_pen_n_less_nmax = []
list_l1_pen_n_larger_nmax = []
for i in range(len(l1_penalty_values)):
mod_diff_l1_n7 = linear_model.Lasso(alpha=l1_penalty_values[i], normalize=True) # set parameters
mod_diff_l1_n7.fit(training[all_features], training['price']) # learn weights
non_0_weights = ( np.count_nonzero(mod_diff_l1_n7.coef_) +
np.count_nonzero(mod_diff_l1_n7.intercept_) )
if non_0_weights<max_nonzeros:
list_l1_pen_n_less_nmax.append(l1_penalty_values[i])
if non_0_weights>max_nonzeros:
list_l1_pen_n_larger_nmax.append(l1_penalty_values[i])
l1_penalty_min = max(list_l1_pen_n_larger_nmax)
l1_penalty_max = min(list_l1_pen_n_less_nmax)
print 'l1_penalty_min: ', round(l1_penalty_min,0)
print 'l1_penalty_max: ', round(l1_penalty_max,0)
l1_penalty_values = np.linspace(l1_penalty_min,l1_penalty_max,20)
RSS_L1_vals_ref = []
for i in range(len(l1_penalty_values)):
mod_diff_l1_ref = linear_model.Lasso(alpha=l1_penalty_values[i], normalize=True) # set parameters
mod_diff_l1_ref.fit(training[all_features], training['price']) # learn weights
non_0_weights = ( np.count_nonzero(mod_diff_l1_ref.coef_) +
np.count_nonzero(mod_diff_l1_ref.intercept_) )
if non_0_weights==max_nonzeros:
pred_vals = mod_diff_l1_ref.predict(validation[all_features])
RSS = RSS_val(validation['price'], pred_vals)
RSS_L1_vals_ref.append( (RSS, i) )
print round( l1_penalty_values[ min(RSS_L1_vals_ref)[1] ] , 0 )
best_L1_index = min(RSS_L1_vals_ref)[1]
mod_diff_l1_ref = linear_model.Lasso(alpha=l1_penalty_values[ best_L1_index ], normalize=True) # set parameters
mod_diff_l1_ref.fit(training[all_features], training['price']) # learn weights
if mod_diff_l1_ref.intercept_ != 0:
print 'intercept: %.2e' % (mod_diff_l1_ref.intercept_)
for feat, weight in zip(all_features, mod_diff_l1_ref.coef_):
if weight != 0.0:
print feat + ':', weight
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unzipping files with house sales data
Step2: Load in house sales data
Step3: Create new features
Step4: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this variable will mostly affect houses with many bedrooms.
Step5: Using the entire house dataset, learn regression weights using an L1 penalty of 5e2. Make sure to add "normalize=True" when creating the Lasso object.
Step6: Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection.
Step7: Note that a majority of the weights have been set to zero. So by setting an L1 penalty that's large enough, we are performing a subset selection.
Step8: Selecting an L1 penalty
Step9: Make sure to create the 4 features as we did above
Step10: Next, we write a loop that does the following
Step11: Creating a dictionary to store the regression models for each L1 penalty. The key of the dictionary will be the index of the l1_pen_val array, passed as a string
Step12: Creating a regression model for each L1 penalty
Step13: Making a function to compute the RSS on the validation data
Step14: Making a list to store tuples of the form (RSS value for a L1 penalty, index of L1 penalty array)
Step15: In this loop, we use the repression model to calculate the predicted output values. We then use the predicted values and observed output value to calculate the RSS error. We then fill in values for the RSS_L1_vals.
Step16: QUIZ QUESTIONS
Step17: QUIZ QUESTION
Step18: Limit the number of nonzero weights
Step19: Exploring the larger range of values to find a narrow range with the desired sparsity
Step20: Now, implement a loop that search through this space of possible l1_penalty values
Step21: Creating a regression model for each L1 penalty. Then, finding the non-zero entries for the regression models. If number of non-zero weights are larger or smaller than max_nonzeros, store the number of non_zero weights
Step22: Out of this large range, we want to find the two ends of our desired narrow range of l1_penalty. At one end, we will have l1_penalty values that have too few non-zeros, and at the other end, we will have an l1_penalty that has too many non-zeros.
Step23: Exploring the narrow range of values to find the solution with the right number of non-zeros that has lowest RSS on the validation set
Step24: For l1_penalty in np.linspace(l1_penalty_min,l1_penalty_max,20)
Step25: Creating a regression model for each L1 penalty. If the the number of non-zero weights is equal to max_nonzeros, storing the RSS on the validation set and the index for this L1 penalty in the l1_penalty_values list
Step26: QUIZ QUESTIONS
Step27: Q2. What features in this model have non-zero coefficients?
Step28: Printing the features with non-zero weights and the values of the weights.
|
3,532
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style('white')
from scipy.interpolate import griddata
xb = np.array([-5,-4,-3,-2,-1,0,1,2,3,4,5])
yb = np.array([-5,-5,-5,-5,-5,-5,-5,-5,-5,-5,-5])
yt = np.array([5]*11)
yc = np.array(0)
x = np.hstack((xb,xb,yb[1:10],yt[1:10],yc))
y = np.hstack((yb,yt,xb[1:10],xb[1:10],yc))
f1 = np.array([0]*40)
f2 = [1]
f = np.hstack((f1,f2))
plt.scatter(x, y);
assert x.shape==(41,)
assert y.shape==(41,)
assert f.shape==(41,)
assert np.count_nonzero(f)==1
xnew = np.linspace(-5,5,100)
ynew = np.linspace(-5,5,100)
Xnew,Ynew = np.meshgrid(xnew,ynew)
Fnew = griddata((x,y),f,(Xnew,Ynew), method = 'cubic')
assert xnew.shape==(100,)
assert ynew.shape==(100,)
assert Xnew.shape==(100,100)
assert Ynew.shape==(100,100)
assert Fnew.shape==(100,100)
plt.contourf(Fnew)
ax = plt.gca()
plt.title('Interpolated Scaler Field')
plt.xlabel('X')
plt.ylabel('Y')
assert True # leave this to grade the plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sparse 2d interpolation
Step2: The following plot should show the points on the boundary and the single point in the interior
Step3: Use meshgrid and griddata to interpolate the function $f(x,y)$ on the entire square domain
Step4: Plot the values of the interpolated scalar field using a contour plot. Customize your plot to make it effective and beautiful.
|
3,533
|
<ASSISTANT_TASK:>
Python Code:
import random
num = [random.randint(0,10) for i in range(1000)]
hist = {}
for i in num:
hist[i] = hist.get(i, 0) + 1
hist
def count1(num):
hist = {}
for i in num:
hist[i] = hist.get(i, 0) + 1
return hist
%timeit count1(num)
def count2(num):
hist = {}
for i in num:
if i in hist:
hist[i] += 1
else:
hist[i] = 1
return hist
%timeit count2(num)
from collections import Counter
def count3(num):
return Counter(num)
%timeit count3(num)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Mesurer le temps que cela prend
Step2: Comparons avec une autre implémentation
Step3: Et une dernière version, la plus rapide
|
3,534
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'dwd', 'sandbox-3', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
3,535
|
<ASSISTANT_TASK:>
Python Code:
year = arange(1955,2005,5)
y = array([ -0.0480, -0.0180, -0.0360, -0.0120, -0.0040,
0.1180, 0.2100, 0.3320, 0.3340, 0.4560 ])
fig,ax = subplots()
ax.scatter(year,y,color="k",label="data")
xlabel("year")
ylabel("anomaly (degrees C)")
title("World temperature anomaly");
t = (year-1950)/10
V = vander(t)
c = solve(V,y)
p = poly1d(c)
tt = linspace(1955,2000,500)
ax.plot(tt,p((tt-1950)/10),label="interpolant")
ax.legend();
fig
year = arange(1955,2005,5)
y = array([ -0.0480, -0.0180, -0.0360, -0.0120, -0.0040,
0.1180, 0.2100, 0.3320, 0.3340, 0.4560 ])
t = (year-1950)/10
V = [ [t[i],1] for i in range(t.size) ] # Vandermonde-ish matrix
c,res,rank,sv = lstsq(V,y)
fig,ax = subplots()
ax.scatter(year,y,color="k",label="data")
p = poly1d(c)
tt = linspace(1955,2000,500)
ax.plot(tt,p((tt-1950)/10),label="linear fit")
xlabel("year")
ylabel("anomaly (degrees C)")
title("World temperature anomaly");
ax.legend();
V = [ [t[i]**3,t[i]**2,t[i],1] for i in range(t.size) ] # Vandermonde-ish matrix
c,res,rank,sv = lstsq(V,y,rcond=None)
p = poly1d(c)
ax.plot(tt,p((tt-1950)/10),label="cubic fit")
fig
a = array([1/(k+1)**2 for k in range(100)])
s = cumsum(a) # cumulative summation
p = sqrt(6*s)
plot(range(100),p,"o")
xlabel("$k$")
ylabel("$p_k$")
title("Sequence convergence");
ep = abs(pi-p) # error sequence
loglog(range(100),ep,"o")
xlabel("$k$")
ylabel("error")
title("Sequence convergence");
V = [ [1,log(k+1)] for k in range(100) ] # fitting matrix
c = lstsq(V,log(ep),rcond=None)[0] # coefficients of linear fit
print(c)
a,b = exp(c[0]),c[1]
print("b:",b)
loglog(range(100),ep,"o",label="sequence")
k = arange(1,100)
plot(k,a*k**b,"--",label="power fit")
xlabel("$k$"); ylabel("error");
legend(); title("Sequence convergence");
t = linspace(0,3,400)
A = array([ [ sin(t)**2,cos((1+1e-7)*t)**2,1 ] for t in t ])
kappa = cond(A)
print(kappa)
x = array([1.,2,1])
b = A@x
x_BS = lstsq(A,b,rcond=None)[0]
print("observed error:",norm(x_BS-x)/norm(x))
print("max error:",kappa/2**52)
N = A.T@A
x_NE = solve(N,A.T@b)
print("observed error:",norm(x_NE-x)/norm(x))
print("digits:",-log10(norm(x_NE-x)/norm(x)))
A = 1.0 + floor(9*rand(6,4))
A.shape
Q,R = qr(A)
print("Q:",Q)
print("R:",R)
norm(Q.T@Q - eye(4))
Q,R = qr(A,"complete")
print(Q.shape)
norm(Q.T@Q - eye(6))
A = 1.0 + floor(9*rand(6,4))
m,n = A.shape
z = A[:,0]
v = z - norm(z)*hstack([1,zeros(m-1)])
P = eye(m) - (2/dot(v,v))*outer(v,v) # reflector
P@z
A = P@A
print(A)
A[1:,1:]
z = A[1:,1]
v = z - norm(z)*hstack([1,zeros(m-2)])
P = eye(m-1) - (2/dot(v,v))*outer(v,v)
A[1:,1:] = P@A[1:,1:]
print(A)
for j in [2,3]:
z = A[j:,j]
v = z - norm(z)*hstack([1,zeros(m-j-1)])
P = eye(m-j) - (2/dot(v,v))*outer(v,v)
A[j:,j:] = P@A[j:,j:]
R = A
print(R)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A polynomial interpolant can be used to fit the data. Here we build one using a Vandermonde matrix. First, though, we express time as decades since 1950, as it improves the condition number of the matrix.
Step2: As you can see, the interpolant does represent the data, in a sense. However it's a crazy-looking curve for the application. Trying too hard to reproduce all the data exactly is known as overfitting.
Step3: The standard best-fit line results from using a linear polynomial that meets the least squares criterion.
Step4: If we use a global cubic polynomial, the points are fit more closely.
Step5: If we were to continue increasing the degree of the polynomial, the residual at the data points would get smaller, but overfitting would increase.
Step6: This graph suggests that $p_k\to \pi$ but doesn't give much information about the rate of convergence. Let $\epsilon_k=|\pi-p_k|$ be the sequence of errors. By plotting the error sequence on a log-log scale, we can see a nearly linear relationship.
Step7: This suggests a power-law relationship where $\epsilon_k\approx a k^b$, or $\log \epsilon_k \approx b (\log k) + \log a$.
Step8: In terms of the parameters $a$ and $b$ used above, we have
Step9: It's tempting to conjecture that $b\to -1$ asymptotically. Here is how the numerical fit compares to the original convergence curve.
Step10: Example 3.2.1
Step11: Now we set up an artificial linear least squares problem with a known exact solution that actually makes the residual zero.
Step12: Using backslash to find the solution, we get a relative error that is about $\kappa$ times machine epsilon.
Step13: If we formulate and solve via the normal equations, we get a much larger relative error. With $\kappa^2\approx 10^{14}$, we may not be left with more than about 2 accurate digits.
Step14: Example 3.3.1
Step15: Here is a standard call
Step16: We can test that $\mathbf{Q}$ has orthonormal columns.
Step17: Here's the full or "complete" factorization.
Step18: Example 3.4.1
Step19: Our first step is to introduce zeros below the diagonal in column 1. Define the vector
Step20: Applying the Householder definitions gives us
Step21: By design we can use the reflector to get the zero structure we seek
Step22: Now we let
Step23: We are set to put zeros into column 2. We must not use row 1 in any way, lest it destroy the zeros we just introduced. To put it another way, we can repeat the process we just did on the smaller submatrix
Step24: We now apply the reflector to the submatrix.
Step25: We need two more iterations of this process.
Step26: We have now reduced the original to an upper triangular matrix using four orthogonal Householder reflections
|
3,536
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
module_path = 'https://tfhub.dev/deepmind/bigbigan-resnet50/1' # ResNet-50
# module_path = 'https://tfhub.dev/deepmind/bigbigan-revnet50x4/1' # RevNet-50 x4
import io
import IPython.display
import PIL.Image
from pprint import pformat
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_hub as hub
def imgrid(imarray, cols=4, pad=1, padval=255, row_major=True):
Lays out a [N, H, W, C] image array as a single image grid.
pad = int(pad)
if pad < 0:
raise ValueError('pad must be non-negative')
cols = int(cols)
assert cols >= 1
N, H, W, C = imarray.shape
rows = N // cols + int(N % cols != 0)
batch_pad = rows * cols - N
assert batch_pad >= 0
post_pad = [batch_pad, pad, pad, 0]
pad_arg = [[0, p] for p in post_pad]
imarray = np.pad(imarray, pad_arg, 'constant', constant_values=padval)
H += pad
W += pad
grid = (imarray
.reshape(rows, cols, H, W, C)
.transpose(0, 2, 1, 3, 4)
.reshape(rows*H, cols*W, C))
if pad:
grid = grid[:-pad, :-pad]
return grid
def interleave(*args):
Interleaves input arrays of the same shape along the batch axis.
if not args:
raise ValueError('At least one argument is required.')
a0 = args[0]
if any(a.shape != a0.shape for a in args):
raise ValueError('All inputs must have the same shape.')
if not a0.shape:
raise ValueError('Inputs must have at least one axis.')
out = np.transpose(args, [1, 0] + list(range(2, len(a0.shape) + 1)))
out = out.reshape(-1, *a0.shape[1:])
return out
def imshow(a, format='png', jpeg_fallback=True):
Displays an image in the given format.
a = a.astype(np.uint8)
data = io.BytesIO()
PIL.Image.fromarray(a).save(data, format)
im_data = data.getvalue()
try:
disp = IPython.display.display(IPython.display.Image(im_data))
except IOError:
if jpeg_fallback and format != 'jpeg':
print ('Warning: image was too large to display in format "{}"; '
'trying jpeg instead.').format(format)
return imshow(a, format='jpeg')
else:
raise
return disp
def image_to_uint8(x):
Converts [-1, 1] float array to [0, 255] uint8.
x = np.asarray(x)
x = (256. / 2.) * (x + 1.)
x = np.clip(x, 0, 255)
x = x.astype(np.uint8)
return x
# module = hub.Module(module_path, trainable=True, tags={'train'}) # training
module = hub.Module(module_path) # inference
for signature in module.get_signature_names():
print('Signature:', signature)
print('Inputs:', pformat(module.get_input_info_dict(signature)))
print('Outputs:', pformat(module.get_output_info_dict(signature)))
print()
class BigBiGAN(object):
def __init__(self, module):
Initialize a BigBiGAN from the given TF Hub module.
self._module = module
def generate(self, z, upsample=False):
Run a batch of latents z through the generator to generate images.
Args:
z: A batch of 120D Gaussian latents, shape [N, 120].
Returns: a batch of generated RGB images, shape [N, 128, 128, 3], range
[-1, 1].
outputs = self._module(z, signature='generate', as_dict=True)
return outputs['upsampled' if upsample else 'default']
def make_generator_ph(self):
Creates a tf.placeholder with the dtype & shape of generator inputs.
info = self._module.get_input_info_dict('generate')['z']
return tf.placeholder(dtype=info.dtype, shape=info.get_shape())
def gen_pairs_for_disc(self, z):
Compute generator input pairs (G(z), z) for discriminator, given z.
Args:
z: A batch of latents (120D standard Gaussians), shape [N, 120].
Returns: a tuple (G(z), z) of discriminator inputs.
# Downsample 256x256 image x for 128x128 discriminator input.
x = self.generate(z)
return x, z
def encode(self, x, return_all_features=False):
Run a batch of images x through the encoder.
Args:
x: A batch of data (256x256 RGB images), shape [N, 256, 256, 3], range
[-1, 1].
return_all_features: If True, return all features computed by the encoder.
Otherwise (default) just return a sample z_hat.
Returns: the sample z_hat of shape [N, 120] (or a dict of all features if
return_all_features).
outputs = self._module(x, signature='encode', as_dict=True)
return outputs if return_all_features else outputs['z_sample']
def make_encoder_ph(self):
Creates a tf.placeholder with the dtype & shape of encoder inputs.
info = self._module.get_input_info_dict('encode')['x']
return tf.placeholder(dtype=info.dtype, shape=info.get_shape())
def enc_pairs_for_disc(self, x):
Compute encoder input pairs (x, E(x)) for discriminator, given x.
Args:
x: A batch of data (256x256 RGB images), shape [N, 256, 256, 3], range
[-1, 1].
Returns: a tuple (downsample(x), E(x)) of discriminator inputs.
# Downsample 256x256 image x for 128x128 discriminator input.
x_down = tf.nn.avg_pool(x, ksize=2, strides=2, padding='SAME')
z = self.encode(x)
return x_down, z
def discriminate(self, x, z):
Compute the discriminator scores for pairs of data (x, z).
(x, z) must be batches with the same leading batch dimension, and joint
scores are computed on corresponding pairs x[i] and z[i].
Args:
x: A batch of data (128x128 RGB images), shape [N, 128, 128, 3], range
[-1, 1].
z: A batch of latents (120D standard Gaussians), shape [N, 120].
Returns:
A dict of scores:
score_xz: the joint scores for the (x, z) pairs.
score_x: the unary scores for x only.
score_z: the unary scores for z only.
inputs = dict(x=x, z=z)
return self._module(inputs, signature='discriminate', as_dict=True)
def reconstruct_x(self, x, use_sample=True, upsample=False):
Compute BigBiGAN reconstructions of images x via G(E(x)).
Args:
x: A batch of data (256x256 RGB images), shape [N, 256, 256, 3], range
[-1, 1].
use_sample: takes a sample z_hat ~ E(x). Otherwise, deterministically
use the mean. (Though a sample z_hat may be far from the mean z,
typically the resulting recons G(z_hat) and G(z) are very
similar.
upsample: if set, upsample the reconstruction to the input resolution
(256x256). Otherwise return the raw lower resolution generator output
(128x128).
Returns: a batch of recons G(E(x)), shape [N, 256, 256, 3] if
`upsample`, otherwise [N, 128, 128, 3].
if use_sample:
z = self.encode(x)
else:
z = self.encode(x, return_all_features=True)['z_mean']
recons = self.generate(z, upsample=upsample)
return recons
def losses(self, x, z):
Compute per-module BigBiGAN losses given data & latent sample batches.
Args:
x: A batch of data (256x256 RGB images), shape [N, 256, 256, 3], range
[-1, 1].
z: A batch of latents (120D standard Gaussians), shape [M, 120].
For the original BigBiGAN losses, pass batches of size N=M=2048, with z's
sampled from a 120D standard Gaussian (e.g., np.random.randn(2048, 120)),
and x's sampled from the ImageNet (ILSVRC2012) training set with the
"ResNet-style" preprocessing from:
https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_preprocessing.py
Returns:
A dict of per-module losses:
disc: loss for the discriminator.
enc: loss for the encoder.
gen: loss for the generator.
# Compute discriminator scores on (x, E(x)) pairs.
# Downsample 256x256 image x for 128x128 discriminator input.
scores_enc_x_dict = self.discriminate(*self.enc_pairs_for_disc(x))
scores_enc_x = tf.concat([scores_enc_x_dict['score_xz'],
scores_enc_x_dict['score_x'],
scores_enc_x_dict['score_z']], axis=0)
# Compute discriminator scores on (G(z), z) pairs.
scores_gen_z_dict = self.discriminate(*self.gen_pairs_for_disc(z))
scores_gen_z = tf.concat([scores_gen_z_dict['score_xz'],
scores_gen_z_dict['score_x'],
scores_gen_z_dict['score_z']], axis=0)
disc_loss_enc_x = tf.reduce_mean(tf.nn.relu(1. - scores_enc_x))
disc_loss_gen_z = tf.reduce_mean(tf.nn.relu(1. + scores_gen_z))
disc_loss = disc_loss_enc_x + disc_loss_gen_z
enc_loss = tf.reduce_mean(scores_enc_x)
gen_loss = tf.reduce_mean(-scores_gen_z)
return dict(disc=disc_loss, enc=enc_loss, gen=gen_loss)
bigbigan = BigBiGAN(module)
# Make input placeholders for x (`enc_ph`) and z (`gen_ph`).
enc_ph = bigbigan.make_encoder_ph()
gen_ph = bigbigan.make_generator_ph()
# Compute samples G(z) from encoder input z (`gen_ph`).
gen_samples = bigbigan.generate(gen_ph)
# Compute reconstructions G(E(x)) of encoder input x (`enc_ph`).
recon_x = bigbigan.reconstruct_x(enc_ph, upsample=True)
# Compute encoder features used for representation learning evaluations given
# encoder input x (`enc_ph`).
enc_features = bigbigan.encode(enc_ph, return_all_features=True)
# Compute discriminator scores for encoder pairs (x, E(x)) given x (`enc_ph`)
# and generator pairs (G(z), z) given z (`gen_ph`).
disc_scores_enc = bigbigan.discriminate(*bigbigan.enc_pairs_for_disc(enc_ph))
disc_scores_gen = bigbigan.discriminate(*bigbigan.gen_pairs_for_disc(gen_ph))
# Compute losses.
losses = bigbigan.losses(enc_ph, gen_ph)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
feed_dict = {gen_ph: np.random.randn(32, 120)}
_out_samples = sess.run(gen_samples, feed_dict=feed_dict)
print('samples shape:', _out_samples.shape)
imshow(imgrid(image_to_uint8(_out_samples), cols=4))
def get_flowers_data():
Returns a [32, 256, 256, 3] np.array of preprocessed TF-Flowers samples.
import tensorflow_datasets as tfds
ds, info = tfds.load('tf_flowers', split='train', with_info=True)
# Just get the images themselves as we don't need labels for this demo.
ds = ds.map(lambda x: x['image'])
# Filter out small images (with minor edge length <256).
ds = ds.filter(lambda x: tf.reduce_min(tf.shape(x)[:2]) >= 256)
# Take the center square crop of the image and resize to 256x256.
def crop_and_resize(image):
imsize = tf.shape(image)[:2]
minor_edge = tf.reduce_min(imsize)
start = (imsize - minor_edge) // 2
stop = start + minor_edge
cropped_image = image[start[0] : stop[0], start[1] : stop[1]]
resized_image = tf.image.resize_bicubic([cropped_image], [256, 256])[0]
return resized_image
ds = ds.map(crop_and_resize)
# Convert images from [0, 255] uint8 to [-1, 1] float32.
ds = ds.map(lambda image: tf.cast(image, tf.float32) / (255. / 2.) - 1)
# Take the first 32 samples.
ds = ds.take(32)
return np.array(list(tfds.as_numpy(ds)))
test_images = get_flowers_data()
test_images_batch = test_images[:16]
_out_recons = sess.run(recon_x, feed_dict={enc_ph: test_images_batch})
print('reconstructions shape:', _out_recons.shape)
inputs_and_recons = interleave(test_images_batch, _out_recons)
print('inputs_and_recons shape:', inputs_and_recons.shape)
imshow(imgrid(image_to_uint8(inputs_and_recons), cols=2))
_out_features = sess.run(enc_features, feed_dict={enc_ph: test_images_batch})
print('AvePool features shape:', _out_features['avepool_feat'].shape)
print('BN+CReLU features shape:', _out_features['bn_crelu_feat'].shape)
feed_dict = {enc_ph: test_images, gen_ph: np.random.randn(32, 120)}
_out_scores_enc, _out_scores_gen, _out_losses = sess.run(
[disc_scores_enc, disc_scores_gen, losses], feed_dict=feed_dict)
print('Encoder scores:', {k: v.mean() for k, v in _out_scores_enc.items()})
print('Generator scores:', {k: v.mean() for k, v in _out_scores_gen.items()})
print('Losses:', _out_losses)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: BigBiGAN으로 이미지 생성하기
Step2: 설정
Step7: 이미지를 표시하는 일부 함수 정의하기
Step8: BigBiGAN TF Hub 모듈을 로드하고 사용 가능한 기능 표시하기
Step19: 다양한 함수에 편리하게 액세스할 수 있도록 래퍼 클래스 정의하기
Step20: 나중에 샘플, 재구성, 판별자 점수 및 손실 계산에 사용할 텐서 만들기
Step21: TensorFlow 세션을 만들고 변수 초기화하기
Step22: 생성기 샘플
Step24: TF-Flowers 데이터세트에서 test_images 로드하기
Step25: 재구성
Step26: 인코더 특성
Step27: 판별자 점수 및 손실
|
3,537
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (10, 20)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
def to_id(text, vocab, add_eos=False):
ids = []
for sentence in text.split('\n'):
sw_ids = [] # sentence words ids
for word in sentence.split():
sw_ids.append(vocab[word])
if add_eos:
sw_ids.append(vocab['<EOS>'])
ids.append(sw_ids)
return ids
source_id_text = to_id(source_text, source_vocab_to_int)
target_id_text = to_id(target_text, target_vocab_to_int, True)
return source_id_text, target_id_text
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
import problem_unittests as tests
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
inputs = tf.placeholder(tf.int32, (None, None), name='input')
targets = tf.placeholder(tf.int32, (None, None), name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
tsl = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
mtl = tf.reduce_max(tsl, name='max_target_len')
ssl = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return inputs, targets, lr, keep_prob, tsl, mtl, ssl
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
go_id = target_vocab_to_int['<GO>']
# Ref: udacity/deep-learning.git:seq2seq/sequence_to_sequence_implementation.ipynb
target_data = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
return tf.concat([tf.fill([batch_size, 1], go_id), target_data], 1)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_encoding_input(process_decoder_input)
from imp import reload
reload(tests)
# RNN cell
def make_cell(rnn_size, seed=42):
initializer = tf.random_uniform_initializer(-0.1, 0.1, seed=seed)
cell = tf.contrib.rnn.LSTMCell(rnn_size, initializer=initializer)
return cell
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
inputs = tf.contrib.layers.embed_sequence(rnn_inputs,
source_vocab_size,
encoding_embedding_size)
cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
return tf.nn.dynamic_rnn(cell, inputs,
sequence_length=source_sequence_length,
dtype=tf.float32)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
s2s = tf.contrib.seq2seq
# Apply dropout
drop_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, keep_prob)
# Create the decoder
helper = s2s.TrainingHelper(dec_embed_input, target_sequence_length)
decoder = s2s.BasicDecoder(drop_cell, helper, encoder_state, output_layer)
# Perform dynamic decoding
return s2s.dynamic_decode(decoder, impute_finished=True,
maximum_iterations=max_summary_length)[0]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
s2s = tf.contrib.seq2seq
# vocab_size is not in use?
start_tokens = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32),
[batch_size], name='start_tokens')
helper = s2s.GreedyEmbeddingHelper(dec_embeddings, start_tokens,
end_of_sequence_id)
drop_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, keep_prob)
decoder = s2s.BasicDecoder(drop_cell, helper, encoder_state, output_layer)
return s2s.dynamic_decode(decoder, impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
out_kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1)
output_layer = Dense(target_vocab_size, kernel_initializer=out_kernel_initializer)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
with tf.variable_scope("decoding") as decoding_scope:
train_logits = decoding_layer_train(encoder_state,
dec_cell,
dec_embed_input,
target_sequence_length,
max_target_sequence_length,
output_layer,
keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
inference_logits = decoding_layer_infer(encoder_state,
dec_cell,
dec_embeddings,
start_of_sequence_id,
end_of_sequence_id,
max_target_sequence_length,
target_vocab_size,
output_layer,
batch_size,
keep_prob)
return train_logits, inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
_, encoder_state = encoding_layer(input_data, rnn_size,
num_layers, keep_prob,
source_sequence_length,
source_vocab_size,
enc_embedding_size)
dec_input = process_decoder_input(target_data,
target_vocab_to_int,
batch_size)
return decoding_layer(dec_input, encoder_state,
target_sequence_length,
max_target_sentence_length,
rnn_size,
num_layers,
target_vocab_to_int,
target_vocab_size,
batch_size,
keep_prob,
dec_embedding_size)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 4
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 512
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 50
decoding_embedding_size = 50
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.5
display_step = 80
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
def pad_sentence_batch(sentence_batch, pad_int):
Pad sentences with <PAD> so that each sentence of a batch has the same length
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
Batch targets, sources, and the lengths of their sentences together
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
DON'T MODIFY ANYTHING IN THIS CELL
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
return [
vocab_to_int[x] if x in vocab_to_int else vocab_to_int['<UNK>']
for x in sentence.lower().split()
]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoder Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step40: Batch and pad the source and target sequences
Step43: Train
Step45: Save Parameters
Step47: Checkpoint
Step50: Sentence to Sequence
Step52: Translate
|
3,538
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sampling_rate = 20 # This quantity is on Hertz
step = 1.0 / sampling_rate
Tmax = 20.0
time = np.arange(0, Tmax, step)
N_to_use = 1024 # Should be a power of two.
print("The smalles frequency that the FFT will discern: ", sampling_rate / N_to_use)
print("Nyquist Frequency: ", sampling_rate / 2)
f1 = 1.0
f2 = 2.0
f3 = 4.0 # All of this on Hertz
y1 = np.sin(2 * np.pi * f1 * time)
y2 = np.sin(2 * np.pi * f2 * time)
y3 = np.sin(2 * np.pi * f3 * time)
y = y1 + y2 + y3
transform = np.fft.fft(y, N_to_use)
# We get the proper frequencies for the FFT
frequencies = np.fft.fftfreq(N_to_use, d=step)
%matplotlib inline
plt.plot(frequencies, np.abs(transform))
plt.title('Fast Fourier Transform')
plt.xlabel('Frequencies (Hz)')
plt.ylabel('Power Spectrum')
plt.xlim([-6, 6])
aux = int(N_to_use / 2)
freq_aux = frequencies[0: aux]
plt.plot(freq_aux, np.abs(transform[:aux]))
plt.title('Fast Fourier Transform')
plt.xlabel('Frequencies (Hz)')
plt.ylabel('Power Spectrum')
plt.xlim([0, 6])
sampling_rate = 100 # This quantity is on Hertz
step = 1.0 / sampling_rate
Tmax = 20.0
time = np.arange(0, Tmax, step)
N_to_use = 1024 * 2 # Should be a power of two.
T = 10.0 # Period
f = 1.0 / T # Frequency relationship
y = np.sin(2 * np.pi * f * time)
transform = np.fft.fft(y, N_to_use)
inverse = np.fft.ifft(transform, N_to_use)
time_inverse = np.arange(0, N_to_use * step, step)
# Now we plot this.
plt.subplot(1, 2, 1)
plt.title('Original Signal')
plt.plot(time, y)
plt.subplot(1, 2, 2)
plt.title('Recovered Signal')
plt.plot(time_inverse, inverse.real)
sampling_rate * T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Size of the FFT
Step2: Analysis of the sampling rate on the limits of what the FFT can tell us.
Step3: A word about frequencies units and the pi value
Step4: Final Comments
Step5: Implementation Number 2
Step6: Here we will give the frequency in terms of the period for ease of interpretation.
Step7: About the Period of the Recovered Signal
|
3,539
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch
from mne.datasets import somato
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
# crop and resample just to reduce computation time
raw.crop(120, 360).load_data().resample(200)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
epochs.plot_psd(fmin=2., fmax=40., average=True, spatial_colors=False)
epochs.plot_psd_topomap(ch_type='grad', normalize=False)
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)
psds = 10 * np.log10(psds) # convert to dB
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
plt.show()
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=1)
psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs)
psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx),
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
plt.show()
psds_welch_unagg, freqs_unagg = psd_welch(epochs, average=None, **kwargs)
print(psds_welch_unagg.shape)
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', show=False)
mne.viz.tight_layout()
plt.show()
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(.5, 10), (1.3, 8)])
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Frequency analysis
Step3: Now, let's take a look at the spatial distributions of the PSD, averaged
Step4: Alternatively, you can also create PSDs from ~mne.Epochs with functions
Step5: Notably,
Step6: Lastly, we can also retrieve the unaggregated segments by passing
Step7: Time-frequency analysis
Step8: Inspect power
Step9: Joint Plot
Step10: Inspect ITC
|
3,540
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
#Load libraries for data processing
import pandas as pd #data processing, CSV file I/O (e.g. pd.read_csv)
import numpy as np
from scipy.stats import norm
## Supervised learning.
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.metrics import confusion_matrix
from sklearn import metrics, preprocessing
from sklearn.metrics import classification_report
# visualization
import seaborn as sns
plt.style.use('fivethirtyeight')
sns.set_style("white")
plt.rcParams['figure.figsize'] = (8,4)
#plt.rcParams['axes.titlesize'] = 'large'
data = pd.read_csv('data/clean-data.csv', index_col=False)
data.drop('Unnamed: 0',axis=1, inplace=True)
#data.head()
#Assign predictors to a variable of ndarray (matrix) type
array = data.values
X = array[:,1:31] # features
y = array[:,0]
#transform the class labels from their original string representation (M and B) into integers
le = LabelEncoder()
y = le.fit_transform(y)
# Normalize the data (center around 0 and scale to remove the variance).
scaler =StandardScaler()
Xs = scaler.fit_transform(X)
# 5. Divide records in training and testing sets.
X_train, X_test, y_train, y_test = train_test_split(Xs, y, test_size=0.3, random_state=2, stratify=y)
# 6. Create an SVM classifier and train it on 70% of the data set.
clf = SVC(probability=True)
clf.fit(X_train, y_train)
#7. Analyze accuracy of predictions on 30% of the holdout test sample.
classifier_score = clf.score(X_test, y_test)
print '\nThe classifier accuracy score is {:03.2f}\n'.format(classifier_score)
# Get average of 3-fold cross-validation score using an SVC estimator.
n_folds = 3
cv_error = np.average(cross_val_score(SVC(), Xs, y, cv=n_folds))
print '\nThe {}-fold cross-validation accuracy score for this classifier is {:.2f}\n'.format(n_folds, cv_error)
from sklearn.feature_selection import SelectKBest, f_regression
clf2 = make_pipeline(SelectKBest(f_regression, k=3),SVC(probability=True))
scores = cross_val_score(clf2, Xs, y, cv=3)
# Get average of 3-fold cross-validation score using an SVC estimator.
n_folds = 3
cv_error = np.average(cross_val_score(SVC(), Xs, y, cv=n_folds))
print '\nThe {}-fold cross-validation accuracy score for this classifier is {:.2f}\n'.format(n_folds, cv_error)
print scores
avg = (100*np.mean(scores), 100*np.std(scores)/np.sqrt(scores.shape[0]))
print "Average score and uncertainty: (%.2f +- %.3f)%%"%avg
# The confusion matrix helps visualize the performance of the algorithm.
y_pred = clf.fit(X_train, y_train).predict(X_test)
cm = metrics.confusion_matrix(y_test, y_pred)
#print(cm)
%matplotlib inline
import matplotlib.pyplot as plt
from IPython.display import Image, display
fig, ax = plt.subplots(figsize=(5, 5))
ax.matshow(cm, cmap=plt.cm.Reds, alpha=0.3)
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(x=j, y=i,
s=cm[i, j],
va='center', ha='center')
plt.xlabel('Predicted Values', )
plt.ylabel('Actual Values')
plt.show()
print(classification_report(y_test, y_pred ))
from sklearn.metrics import roc_curve, auc
# Plot the receiver operating characteristic curve (ROC).
plt.figure(figsize=(10,8))
probas_ = clf.predict_proba(X_test)
fpr, tpr, thresholds = roc_curve(y_test, probas_[:, 1])
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold (area = %0.2f)' % (roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Random')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.axes().set_aspect(1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classification with cross-validation
Step2: To get a better measure of prediction accuracy (which you can use as a proxy for “goodness of fit” of the model), you can successively split the data into folds that you will use for training and testing
Step3: The above evaluations were based on using the entire set of features. You will now employ the correlation-based feature selection strategy to assess the effect of using 3 features which have the best correlation with the class labels.
Step4: From the above results, you can see that only a fraction of the features are required to build a model that performs similarly to models based on using the entire set of features.
Step5: Observation
|
3,541
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
def bike_to_wellesley():
bikeshare.olin -= 1
bikeshare.wellesley += 1
def bike_to_wellesley(state):
state.olin -= 1
state.wellesley += 1
bikeshare = State(olin=10, wellesley=2)
bike_to_wellesley(bikeshare)
bikeshare1 = State(olin=10, wellesley=2)
bikeshare2 = State(olin=2, wellesley=10)
bike_to_wellesley(bikeshare1)
bike_to_wellesley(bikeshare2)
def bike_to_olin(state):
Move one bike from Wellesley to Olin.
state: bikeshare State object
# We decrease one state variable and increase the
# other, so the total number of bikes is unchanged.
state.wellesley -= 1
state.olin += 1
def bike_to_olin(state):
if state.wellesley == 0:
return
state.wellesley -= 1
state.olin += 1
bikeshare = State(olin=12, wellesley=0)
bike_to_olin(bikeshare)
bikeshare
x = 5
x == 5
if x == 5:
print('yes, x is 5')
def bike_to_olin(state):
if state.wellesley == 0:
state.wellesley_empty += 1
return
state.wellesley -= 1
state.olin += 1
bikeshare = State(olin=12, wellesley=0,
wellesley_empty=0)
bike_to_olin(bikeshare)
bikeshare
def run_simulation(state, p1, p2, num_steps):
Simulate the given number of time steps.
state: State object
p1: probability of an Olin->Wellesley customer arrival
p2: probability of a Wellesley->Olin customer arrival
num_steps: number of time steps
results = TimeSeries()
results[0] = state.olin
for i in range(num_steps):
step(state, p1, p2)
results[i+1] = state.olin
results.plot(label='Olin')
decorate(title='Olin-Wellesley Bikeshare',
xlabel='Time step (min)',
ylabel='Number of bikes')
def step(state, p1, p2):
Simulate one time step.
state: bikeshare State object
p1: probability of an Olin->Wellesley ride
p2: probability of a Wellesley->Olin ride
if flip(p1):
bike_to_wellesley(state)
if flip(p2):
bike_to_olin(state)
def bike_to_olin(state):
Move one bike from Wellesley to Olin.
state: bikeshare State object
if state.wellesley == 0:
state.wellesley_empty += 1
return
state.wellesley -= 1
state.olin += 1
def bike_to_wellesley(state):
Move one bike from Olin to Wellesley.
state: bikeshare State object
state.olin -= 1
state.wellesley += 1
# Solution
def bike_to_wellesley(state):
Move one bike from Olin to Wellesley.
state: bikeshare State object
if state.olin == 0:
state.olin_empty += 1
return
state.olin -= 1
state.wellesley += 1
# Solution
bikeshare = State(olin=0, wellesley=12,
olin_empty=0, wellesley_empty=0)
# Solution
bike_to_wellesley(bikeshare)
# Solution
bikeshare
# Solution
bikeshare = State(olin=10, wellesley=2,
olin_empty=0, wellesley_empty=0)
run_simulation(bikeshare, 0.3, 0.2, 60)
bikeshare
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To paraphrase two Georges, "All models are wrong, but some models are
Step2: When this function is called, it modifies bikeshare. As long as there
Step3: The name of the parameter is state rather than bikeshare as a
Step4: Again, the argument we provide gets assigned to the parameter, so this
Step5: And update them independently
Step7: Changes in bikeshare1 do not affect bikeshare2, and vice versa. So
Step8: Docstrings follow a conventional format
Step9: The first line checks whether the number of bikes at Wellesley is zero. If so, it uses a return statement, which causes the function to end immediately, without running the rest of the statements. So if there are no bikes at Wellesley, we "return" from bike_to_olin without changing the state.
Step10: The state of the system should be unchanged.
Step11: No more negative bikes (at least at Wellesley).
Step12: On the other hand, the following statement checks whether x is 5 and
Step13: You can use the equals operator in an if statement, like this
Step14: If you make a mistake and use = in an if statement, like this
Step15: If a customer arrives at the Wellesley station and finds no bike
Step16: We can test it by calling bike_to_olin
Step17: There should be 12 bikes at Olin, no bikes at Wellesley, and one unhappy customer.
Step22: Looks good!
Step24: Exercise
Step25: Exercise
|
3,542
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
# In the dataset, 'floors' was defined with type string,
# so we'll convert them to int, before using it below
sales['floors'] = sales['floors'].astype(int)
import numpy as np # note this allows us to refer to numpy as np instead
def get_numpy_data(data_sframe, features, output):
data_sframe['constant'] = 1 # this is how you add a constant column to an SFrame
# add the column 'constant' to the front of the features list so that we can extract it along with the others:
features = ['constant'] + features # this is how you combine two lists
# select the columns of data_SFrame given by the features list into the SFrame features_sframe (now including constant):
features_sframe = data_sframe[features]
# the following line will convert the features_SFrame into a numpy matrix:
feature_matrix = features_sframe.to_numpy()
# assign the column of data_sframe associated with the output to the SArray output_sarray
output_sarray = data_sframe[output]
# the following will convert the SArray into a numpy array by first converting it to a list
output_array = output_sarray.to_numpy()
return(feature_matrix, output_array)
def predict_output(feature_matrix, weights):
# assume feature_matrix is a numpy matrix containing the features as columns and weights is a corresponding numpy array
# create the predictions vector by using np.dot()
predictions = np.dot(feature_matrix, weights)
return(predictions)
X = np.array([[3.,5.,8.],[4.,12.,15.]])
print X
norms = np.linalg.norm(X, axis=0) # gives [norm(X[:,0]), norm(X[:,1]), norm(X[:,2])]
print norms
print X / norms # gives [X[:,0]/norm(X[:,0]), X[:,1]/norm(X[:,1]), X[:,2]/norm(X[:,2])]
def normalize_features(feature_matrix):
norms = np.linalg.norm(feature_matrix, axis=0)
normalized_features = feature_matrix / norms
return (normalized_features, norms)
features, norms = normalize_features(np.array([[3.,6.,9.],[4.,8.,12.]]))
print features
# should print
# [[ 0.6 0.6 0.6]
# [ 0.8 0.8 0.8]]
print norms
# should print
# [5. 10. 15.]
simple_features = ['sqft_living', 'bedrooms']
my_output = 'price'
(simple_feature_matrix, output) = get_numpy_data(sales, simple_features, my_output)
simple_feature_matrix, norms = normalize_features(simple_feature_matrix)
weights = np.array([1., 4., 1.])
prediction = predict_output(simple_feature_matrix, weights)
ro = {}
for i in range(len(weights)):
feature = simple_feature_matrix[:, i]
ro[i] = (feature * (output - prediction) + weights[i] * feature).sum()
print ro
def check(weight, l1_penalty):
return 0 if -l1_penalty/2 < weight < l1_penalty/2 else weight
for i in [1.4e8, 1.64e8, 1.73e8, 1.9e8, 2.3e8]:
w1 = check(ro[1], i)
w2 = check(ro[2], i)
if w1 != 0 and w2 ==0:
print "%e" % i
for i in [1.4e8, 1.64e8, 1.73e8, 1.9e8, 2.3e8]:
w1 = check(ro[1], i)
w2 = check(ro[2], i)
if w1 == w2 == 0:
print "%e %d" % (i, i / ro[1])
def lasso_coordinate_descent_step(i, feature_matrix, output, weights, l1_penalty):
# compute prediction
prediction = predict_output(feature_matrix, weights)
# compute ro[i] = SUM[ [feature_i]*(output - prediction + weight[i]*[feature_i]) ]
feature = feature_matrix[:, i]
ro_i = (feature * (output - prediction + weights[i] * feature)).sum()
if i == 0: # intercept -- do not regularize
new_weight_i = ro_i
elif ro_i < -l1_penalty/2.:
new_weight_i = (ro_i + l1_penalty/2.)
elif ro_i > l1_penalty/2.:
new_weight_i = (ro_i - l1_penalty/2.)
else:
new_weight_i = 0.
return new_weight_i
# should print 0.425558846691
import math
print lasso_coordinate_descent_step(1, np.array([[3./math.sqrt(13),1./math.sqrt(10)],[2./math.sqrt(13),3./math.sqrt(10)]]),
np.array([1., 1.]), np.array([1., 4.]), 0.1)
def lasso_cyclical_coordinate_descent(feature_matrix, output, initial_weights, l1_penalty, tolerance):
converged = False
weights = np.array(initial_weights.copy())
while not converged:
change = list()
for i in range(len(weights)):
old_weights_i = weights[i]
weights[i] = lasso_coordinate_descent_step(i, feature_matrix, output, weights, l1_penalty)
change_in_weight = abs(old_weights_i - weights[i])
change.append(change_in_weight)
if max(change) < tolerance:
converged = True
return weights
simple_features = ['sqft_living', 'bedrooms']
my_output = 'price'
initial_weights = np.zeros(3)
l1_penalty = 1e7
tolerance = 1.0
(simple_feature_matrix, output) = get_numpy_data(sales, simple_features, my_output)
(normalized_simple_feature_matrix, simple_norms) = normalize_features(simple_feature_matrix) # normalize features
weights = lasso_cyclical_coordinate_descent(normalized_simple_feature_matrix, output,
initial_weights, l1_penalty, tolerance)
predictions = predict_output(normalized_simple_feature_matrix, weights)
residuals = predictions - output
RSS = (residuals**2).sum()
print "RSS", RSS
print weights
for i in range(len(weights)):
if i == 0:
pass
n = check(weights[i], l1_penalty)
if n == 0:
print simple_features[i - 1]
train_data,test_data = sales.random_split(.8,seed=0)
all_features = ['bedrooms',
'bathrooms',
'sqft_living',
'sqft_lot',
'floors',
'waterfront',
'view',
'condition',
'grade',
'sqft_above',
'sqft_basement',
'yr_built',
'yr_renovated']
my_output = 'price'
initial_weights = np.zeros(len(all_features) + 1)
l1_penalty = 1e7
tolerance = 1.0
(feature_matrix, output) = get_numpy_data(train_data, all_features, my_output)
(normalized_feature_matrix, norms) = normalize_features(feature_matrix) # normalize features
def show_nnz_features(w, f):
for i in range(len(w)):
if i == 0:
continue
weight = w[i]
if weight != 0.:
print f[i - 1]
weights1e7 = lasso_cyclical_coordinate_descent(normalized_feature_matrix, output, initial_weights, l1_penalty, tolerance)
print weights1e7
show_nnz_features(weights1e7, all_features)
l1_penalty = 1e8
initial_weights = np.zeros(len(all_features) + 1)
weights1e8 = lasso_cyclical_coordinate_descent(normalized_feature_matrix, output, initial_weights, l1_penalty, tolerance)
print weights1e8
l1_penalty=1e4
initial_weights = np.zeros(len(all_features) + 1)
weights1e4 = lasso_cyclical_coordinate_descent(normalized_feature_matrix, output, initial_weights, l1_penalty, tolerance)
print weights1e4
normalized_weights1e4 = weights1e4 / norms
normalized_weights1e7 = weights1e7 / norms
normalized_weights1e8 = weights1e8 / norms
print normalized_weights1e7[3]
(test_feature_matrix, test_output) = get_numpy_data(test_data, all_features, 'price')
predictions = predict_output(test_feature_matrix, normalized_weights1e4)
residuals = predictions - test_output
RSS = (residuals**2).sum()
print "RSS", RSS
predictions = predict_output(test_feature_matrix, normalized_weights1e7)
residuals = predictions - test_output
RSS = (residuals**2).sum()
print "RSS", RSS
predictions = predict_output(test_feature_matrix, normalized_weights1e8)
residuals = predictions - test_output
RSS = (residuals**2).sum()
print "RSS", RSS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features.
Step3: Also, copy and paste the predict_output() function to compute the predictions for an entire matrix of features given the matrix and the weights
Step4: Normalize features
Step5: Numpy provides a shorthand for computing 2-norms of each column
Step6: To normalize, apply element-wise division
Step7: Using the shorthand we just covered, write a short function called normalize_features(feature_matrix), which normalizes columns of a given feature matrix. The function should return a pair (normalized_features, norms), where the second item contains the norms of original features. As discussed in the lectures, we will use these norms to normalize the test data in the same way as we normalized the training data.
Step8: To test the function, run the following
Step9: Implementing Coordinate Descent with normalized features
Step10: Don't forget to normalize features
Step11: We assign some random set of initial weights and inspect the values of ro[i]
Step12: Use predict_output() to make predictions on this data.
Step13: Compute the values of ro[i] for each feature in this simple model, using the formula given above, using the formula
Step14: QUIZ QUESTION
Step15: QUIZ QUESTION
Step16: So we can say that ro[i] quantifies the significance of the i-th feature
Step17: To test the function, run the following cell
Step18: Cyclical coordinate descent
Step19: Using the following parameters, learn the weights on the sales dataset.
Step20: First create a normalized version of the feature matrix, normalized_simple_feature_matrix
Step21: Then, run your implementation of LASSO coordinate descent
Step22: QUIZ QUESTIONS
Step23: Let us consider the following set of features.
Step24: First, create a normalized feature matrix from the TRAINING data with these features. (Make you store the norms for the normalization, since we'll use them later)
Step25: First, learn the weights with l1_penalty=1e7, on the training data. Initialize weights to all zeros, and set the tolerance=1. Call resulting weights weights1e7, you will need them later.
Step26: QUIZ QUESTION
Step27: QUIZ QUESTION
Step28: QUIZ QUESTION
Step29: To check your results, if you call normalized_weights1e7 the normalized version of weights1e7, then
Step30: Compute the RSS of each of the three normalized weights on the (unnormalized) test_feature_matrix
|
3,543
|
<ASSISTANT_TASK:>
Python Code:
def arb(M):
for x in M:
return x
assert False, 'Error: arb called with empty set!'
def cart_prod(A, B):
return { (x, y) for x in A for y in B }
def separate(Pairs, States, Σ, 𝛿):
Result = { (q1, q2) for q1 in States
for q2 in States
for c in Σ
if (𝛿[q1, c], 𝛿[q2, c]) in Pairs
}
return Result
def find_equivalence_class(p, Partition):
return arb({ C for C in Partition if p in C })
def reachable(q0, Σ, 𝛿):
Result = { q0 }
while True:
NewStates = { 𝛿[p, c] for p in Result for c in Σ }
if NewStates <= Result:
return Result
Result |= NewStates
def all_separable(Q, A, Σ, 𝛿):
Separable = cart_prod(Q - A, A) | cart_prod(A, Q - A)
while True:
NewPairs = separate(Separable, Q, Σ, 𝛿)
if NewPairs <= Separable:
return Separable
Separable |= NewPairs
def minimize(F):
Q, Σ, 𝛿, q0, A = F
Q = reachable(q0, Σ, 𝛿)
Separable = all_separable(Q, A, Σ, 𝛿)
Equivalent = cart_prod(Q, Q) - Separable
EquivClasses = { frozenset({ p for p in Q if (p, q) in Equivalent })
for q in Q
}
newQ0 = arb({ M for M in EquivClasses if q0 in M })
newAccept = { M for M in EquivClasses if arb(M) in A }
newDelta = {}
for q in Q:
for c in Σ:
p = 𝛿.get((q, c))
if p != None:
classOfP = find_equivalence_class(p, EquivClasses)
classOfQ = find_equivalence_class(q, EquivClasses)
newDelta[(classOfQ, c)] = classOfP
else:
newDelta[(classOfQ, c)] = frozenset()
return EquivClasses, Σ, newDelta, newQ0, newAccept
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function cart_prod(A, B) computes the Cartesian product $A \times B$ of the sets $A$ and $B$ where $A \times B$ is defined as follows
Step2: The function separate takes four arguments
Step3: Given a state p and a Partition of the set of all states, the function find_equivalence_class(p, Partition) returns the equivalence class of p, i.e. it returns the set from Partition that contains x.
Step4: The function reachable(q0, Σ, 𝛿) takes three arguments
Step5: The function all_separable(Q, A, Σ, 𝛿) takes four arguments
Step6: The function minimize(A) takes a deterministic
|
3,544
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
np.random.seed(0)
p = [3.2, 5.6, 9.2]
x = np.arange(-8., 5., 0.1)
y = np.polyval(p, x) + np.random.randn(x.shape[0])*1.
plt.plot(x, y);
# STEP 1 - define your model
def my_model(p, x):
return np.polyval(p, x)
# STEP 2 - define your cost function
def my_costfun(p, x, y):
return np.sum((my_model(p, x) - y)**2)
# STEP 3 - minimize cost function
from scipy.optimize import minimize
result = minimize(my_costfun, np.array([2., 3., 5.]), args=(x,y) )
print result
print 'RESULT:\n', result
print ''
print 'RELATIVE ERROR:\n', (result.x - p)/p*100., '%'
print ''
print 'Hessian ERROR:' #err = sqrt(diag(inv(Hessian)))
hess_err = np.sqrt(np.diag(result['hess_inv']))
print hess_err
from emcee import EnsembleSampler
def lnprob(theta):
theta = np.array(theta)
if np.all(theta>-3.) and np.all(theta<3.):
return 0
return -np.inf
nwalkers = 10
ndim = 3
p0 = [np.random.rand(ndim) for i in range(nwalkers)]
sampler = EnsembleSampler(nwalkers, ndim, lnprob)
pos = sampler.run_mcmc(p0, 2000)
np.corrcoef(sampler.flatchain[0:2000, 0], sampler.flatchain[2000:4000, 0])
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(311)
ax.plot(sampler.chain[:,:,0].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(312)
ax.plot(sampler.chain[:,:,1].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(313)
ax.plot(sampler.chain[:,:,2].T, '-', color='k', alpha=0.3);
import corner
fig = corner.corner(sampler.flatchain, labels=["p0", "p1", "p2"],
truths=[0., 0., 0.])
# fig.savefig("triangle.png")
def lnprob(x, mu, ivar):
# if np.all(np.abs(x)<100.):
x = x.reshape(-1, 1)
mu = mu.reshape(-1, 1)
return -np.dot(np.dot((x-mu).T, ivar), x-mu)
# else:
# return -np.inf
mu = np.array([0.1, 0.2, 0.5])
cov = np.array([[1.0, 0.0, 0.0],
[0.0, 10, 9],
[0.0, 9, 10]])
ivar = np.linalg.inv(cov)
print 'ivar: \n', ivar
print 'det(cov): \n', np.linalg.det(cov)
print 'det(ivar): \n', np.linalg.det(ivar)
nwalkers = 10
ndim = 3
p0 = [np.random.rand(ndim) for i in range(nwalkers)]
sampler = EnsembleSampler(nwalkers, ndim, lnprob, args=(mu, ivar), threads=10)
pos,prob,state = sampler.run_mcmc(p0, 2000)
p0
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(311)
ax.plot(sampler.chain[:,:,0].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(312)
ax.plot(sampler.chain[:,:,1].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(313)
ax.plot(sampler.chain[:,:,2].T, '-', color='k', alpha=0.3);
fig = corner.corner(sampler.flatchain, labels=["mu1", "mu2", "mu3"],
truths=mu)
print mu
print ivar
def lnprior(theta):
if np.all(np.abs(theta)<10000.):
return 0
else:
return -np.inf
def lnlike(theta, x, y):
y_model = np.polyval(theta, x)
return -np.sum((y_model-y)**2)
def lnprob(theta, x, y):
return lnprior(theta)+lnlike(theta, x, y)
nwalkers = 10
ndim = 3
p0 = [np.random.rand(ndim) for i in range(nwalkers)]
sampler = EnsembleSampler(nwalkers, ndim, lnprob, args=(x, y), threads=10)
pos,prob,state = sampler.run_mcmc(p0, 500)
np.corrcoef(sampler.flatchain[0:500, 0], sampler.flatchain[500:1000, 0])
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(311)
ax.plot(sampler.chain[:,:,0].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(312)
ax.plot(sampler.chain[:,:,1].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(313)
ax.plot(sampler.chain[:,:,2].T, '-', color='k', alpha=0.3);
fig = corner.corner(sampler.flatchain, labels=["p0", "p1", "p2"],
truths=p)
sampler.reset()
pos,prob,state = sampler.run_mcmc(pos, 2000)
np.corrcoef(sampler.flatchain[0:2000, 0], sampler.flatchain[4000:6000, 0])
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(311)
ax.plot(sampler.chain[:,:,0].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(312)
ax.plot(sampler.chain[:,:,1].T, '-', color='k', alpha=0.3)
ax = fig.add_subplot(313)
ax.plot(sampler.chain[:,:,2].T, '-', color='k', alpha=0.3);
fig = corner.corner(sampler.flatchain, labels=["p0", "p1", "p2"],
truths=p)
fig = corner.corner(sampler.flatchain, labels=["p0", "p1", "p2"],
truths=result.x)
# truth
p
# MCMC results
np.percentile(sampler.flatchain, [15., 50., 85.], axis=0)
print result.x - hess_err
print result.x
print result.x + hess_err
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MCMC (emcee)
Step2: a simple example - draw sample from uniformly distribution
Step3: how about Gaussian distribution?
Step4: how to use MCMC to estimate model parameters?
Step5: comparison with the results from optimization
|
3,545
|
<ASSISTANT_TASK:>
Python Code:
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data_utils
import numpy as np
word_pair = [['고양이', '흰'],
['고양이', '동물'],
['국화', '흰'],
['국화', '식물'],
['선인장', '초록'],
['선인장', '식물'],
['강아지', '검은'],
['강아지', '동물'],
['타조', '회색'],
['타조', '동물'],
['코끼리', '회색'],
['코끼리', '동물'],
['장미', '빨간'],
['장미', '식물'],
['자동차', '빨간'],
['그릇', '빨간'],
['민들레', '식물'],
['민들레', '흰']]
word_list = set(np.array(word_pair).flatten())
word_dict = {w: i for i, w in enumerate(word_list)}
skip_grams = [[word_dict[word[0]], word_dict[word[1]]] for word in word_pair]
label = torch.LongTensor(skip_grams)[:, 0].contiguous()
context = torch.LongTensor(skip_grams)[:, 1].contiguous()
skip_grams_dataset = data_utils.TensorDataset(label, context)
train_loader = torch.utils.data.DataLoader(skip_grams_dataset, batch_size=8, shuffle=True)
test_loader = torch.utils.data.DataLoader(skip_grams_dataset, batch_size=1, shuffle=False)
class _model(nn.Module) :
def __init__(self):
super(_model, self).__init__()
self.embedding = nn.Embedding(len(word_list), 2)
self.linear = nn.Linear(2, len(word_list), bias=True)
def forward(self, x):
x = self.embedding(x)
x = self.linear(x)
return F.log_softmax(x)
model = _model()
loss_fn = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.1)
model.train()
for epoch in range(100):
for data, target in train_loader:
data, target = Variable(data), Variable(target) #(입력 생성)
output = model(data) # model 생성
loss = F.nll_loss(output, target) #loss 생성
optimizer.zero_grad() # zeroGrad
loss.backward() # calc backward gradients
optimizer.step() # update parameters
model.eval()
invDic = { i : w for w, i in word_dict.items()}
print('Input : true : pred')
for x, y in test_loader :
x, y = Variable(x.squeeze()), y.squeeze()
y_pred = model(x).max(1)[1].data[0][0]
print('{:s} : {:s} : {:s}'.format(invDic[x.data[0]], invDic[y[0]], invDic[y_pred]))
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
matplotlib.rc('font', family="NanumGothic")
for i in label :
x = Variable(torch.LongTensor([i]))
fx, fy = model.embedding(x).squeeze().data
plt.scatter(fx, fy)
plt.annotate(invDic[i], xy=(fx, fy), xytext=(5, 2),
textcoords='offset points', ha='right', va='bottom')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Dataset 준비
Step2: Dataset Loader 설정
Step3: 2. 사전 설정
Step4: 3. Trainning loop
Step5: 4. Predict & Evaluate
Step6: 5. plot embedding space
|
3,546
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
%matplotlib inline
import cartopy
import cartopy.crs as ccrs
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
print('axes type:', type(ax))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_global()
plt.plot([-100, 50], [25, 25], linewidth=4, color='r', transform=ccrs.PlateCarree())
plt.plot([-100, 50], [25, 25], linewidth=4, color='b', transform=ccrs.Geodetic())
ax = plt.axes(projection=ccrs.Mercator())
ax.coastlines()
gl = ax.gridlines(draw_labels=True)
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LATITUDE_FORMATTER
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
gl = ax.gridlines(draw_labels=True)
gl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180])
gl.yformatter = LATITUDE_FORMATTER
fig = plt.figure(figsize=(9, 6))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree())
ax.set_global()
lons = -75, 77.2, 151.2, -75
lats = 43, 28.6, -33.9, 43
ax.plot(lons, lats,
color='green', linewidth=2, marker='o', ms=10,
transform=ccrs.Geodetic())
# feature = cartopy.feature.LAND
feature = cartopy.feature.NaturalEarthFeature(name='land', category='physical',
scale='110m',
edgecolor='red', facecolor='black')
ax.add_feature(feature)
_ = ax.add_feature(cartopy.feature.LAKES, facecolor='b')
states = cartopy.feature.NaturalEarthFeature(category='cultural', scale='50m', facecolor='none',
name='admin_1_states_provinces_lines')
_ = ax.add_feature(states, edgecolor='gray')
url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_wmts(url, 'VIIRS_CityLights_2012')
import numpy as np
x = np.linspace(310, 390, 25)
y = np.linspace(-24, 25, 35)
x2d, y2d = np.meshgrid(x, y)
data = np.cos(np.deg2rad(y2d) * 4) + np.sin(np.deg2rad(x2d) * 4)
rot_crs = ccrs.RotatedPole(177.5, 37.5)
ax = plt.axes(projection=rot_crs)
ax.coastlines()
fig = plt.figure()
ax = fig.add_subplot(111, projection=ccrs.PlateCarree())
ax.contourf(x2d, y2d, data, transform=rot_crs)
ax.coastlines()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then let's import the cartopy
Step2: In addition, we import cartopy's coordinate reference system submodule
Step3: Creating GeoAxes
Step4: Here we are using a Plate Carrée projection, which is one of equidistant cylindrical projections.
Step5: Notice that unless we specify a map extent (we did so via the set_global method in this case) the map will zoom into the range of the plotted data.
Step6: Unfortunately, gridline labels work only in PlateCarree and Mercator projections.
Step7: Plotting layers directly from Web Map Service (WMS) and Web Map Tile Service (WMTS)
Step8: Exercise
Step9: Idea 1
|
3,547
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import make_blobs
#create data
data = make_blobs(n_samples=200,n_features=2,centers=4,cluster_std=1.8,random_state=101)
plt.scatter(data[0][:,0],data[0][:,1],c=data[1],cmap='rainbow')
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=4)
kmeans.fit(data[0])
kmeans.cluster_centers_
kmeans.labels_
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True,figsize=(10,6))
ax1.set_title('K Means')
ax1.scatter(data[0][:,0],data[0][:,1],c=kmeans.labels_,cmap='rainbow')
ax2.set_title("Original")
ax2.scatter(data[0][:,0],data[0][:,1],c=data[1],cmap='rainbow')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create some data
Step2: Visualize data
Step3: Creating Clusters
|
3,548
|
<ASSISTANT_TASK:>
Python Code:
from pytrends.request import TrendReq
google_username = "mm.trends.api@gmail.com"
google_password = ""
path = ""
# connect to Google
pytrend = TrendReq(google_username, google_password, custom_useragent='Pytrends')
trend_payload = {'q': 'Pizza, Italian, Spaghetti, Breadsticks, Sausage', 'cat': '0-71'}
# trend
trend = pytrend.trend(trend_payload)
print(trend)
df = pytrend.trend(trend_payload, return_type='dataframe')
df
# toprelated
toprelated = pytrend.related(trend_payload, related_type='top')
print(toprelated)
risingrelated = pytrend.related(trend_payload, related_type='rising')
print(risingrelated)
# top30in30
top30in30 = pytrend.top30in30()
print(top30in30)
country_payload = {'geo': 'US'}
# hottrends
hottrends = pytrend.hottrends(country_payload)
print(hottrends)
# hottrendsdetail
# returns XML data
hottrendsdetail = pytrend.hottrendsdetail(country_payload)
print(hottrendsdetail)
payload = {'date': '201601', 'geo': 'US'}
# alltopcharts
topcharts = pytrend.topcharts(payload)
print(topcharts)
keyword = 'pizza'
# suggestions
suggestions = pytrend.suggestions(keyword)
print(suggestions)
gt = pd.read_table?
gt = pd.read_clipboard(index_col)
gt
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: GOOGLEINDEX_US
|
3,549
|
<ASSISTANT_TASK:>
Python Code:
%run db2.ipynb
%%sql -q
DROP TABLE CENTRAL_LINE;
CREATE TABLE CENTRAL_LINE
(
STATION_NO INTEGER GENERATED ALWAYS AS IDENTITY,
STATION VARCHAR(31),
UPPER_STATION VARCHAR(31) GENERATED ALWAYS AS (UCASE(STATION))
)
;
INSERT INTO CENTRAL_LINE(STATION)
VALUES 'West Ruislip','Ruislip Gardens','South Ruislip','Northolt','Greenford',
'Perivale','Hanger Lane','Ealing Broadway','West Acton','North Acton',
'East Acton','White City','Shepherd''s Bush','Holland Park','Notting Hill Gate',
'Queensway','Lancaster Gate','Marble Arch','Bond Street','Oxford Circus',
'Tottenham Court Road','Holborn','Chancery Lane','St. Paul''s','Bank',
'Liverpool Street','Bethnal Green','Mile End','Stratford','Leyton',
'Leytonstone','Wanstead','Redbridge','Gants Hill','Newbury Park',
'Barkingside','Fairlop','Hainault','Grange Hill','Chigwell',
'Roding Valley','Snaresbrook','South Woodford','Woodford','Buckhurst Hill',
'Loughton','Debden','Theydon Bois','Epping'
;
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'Ruislip')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE STATION LIKE '%Ruislip%'
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE STATION LIKE 'Ruislip%'
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'RUISLIP','i')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'^Ruislip')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'Ruislip$')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'^Leyton$')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'^Leyton');
%%sql -q
DROP TABLE LONGLINE;
CREATE TABLE LONGLINE (NAME VARCHAR(255));
INSERT INTO LONGLINE
VALUES 'George' || CHR(10) || 'Katrina';
%%sql
SELECT COUNT(*) FROM LONGLINE
WHERE REGEXP_LIKE(NAME,'^Katrina$')
%%sql
SELECT COUNT(*) FROM LONGLINE
WHERE REGEXP_LIKE(NAME,'^Katrina$','m')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'way|ing')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'way| ing')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'way| ing','x')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(way)|(ing)')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(ing)*.(way)')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(ing).*(way)')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(an)+')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(an){1,}')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(an){2}')
%%sql
SELECT STATION,
REGEXP_INSTR(STATION,'(an)') AS LOCATION,
REGEXP_EXTRACT(STATION,'(an)') AS EXTRACT
FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(an)')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'((an).*){2}')
%%sql
SELECT STATION,
REGEXP_INSTR(STATION,'((an).*){2}') AS LOCATION,
REGEXP_EXTRACT(STATION,'((an).*){2}') AS EXTRACT
FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'((an).*){2}')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'^[P-R]')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'[p-rP-R]')
%%sql
VALUES
CASE
WHEN REGEXP_LIKE('123-34-1422','[0-9]{3}-[0-9]{2}-[0-9]{4}') THEN 'Valid'
ELSE 'Invalid'
END
%%sql
WITH SSNS(SSN) AS (
VALUES
'123-34-1322',
'ABC-34-9999',
'X123-44-0001',
'123X-Y44-Z0001',
'111-222-111'
)
SELECT SSN,
CASE
WHEN REGEXP_LIKE(SSN,'[0-9]{3}-[0-9]{2}-[0-9]{4}') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
%%sql
WITH SSNS(SSN) AS (
VALUES
'123-34-1322',
'ABC-34-9999',
'X123-44-0001',
'123X-Y44-Z0001',
'111-222-111'
)
SELECT SSN,
CASE
WHEN REGEXP_LIKE(SSN,'^[0-9]{3}-[0-9]{2}-[0-9]{4}$') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
%%sql -a
WITH SSNS(SSN) AS (
VALUES
'123-34-1322',
'ABC-34-9999',
'X123-44-0001',
'123X-Y44-Z0001',
'111-222-111'
)
SELECT 'Original', SSN,
CASE
WHEN REGEXP_LIKE(SSN,'^[0-9]{3}-[0-9]{2}-[0-9]{4}$') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
UNION ALL
SELECT 'Posix', SSN,
CASE
WHEN REGEXP_LIKE(SSN,'^[:digit:]{3}-[:digit:]{2}-[:digit:]{4}$') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
UNION ALL
SELECT 'Escape', SSN,
CASE
WHEN REGEXP_LIKE(SSN,'^\d{3}-\d{2}-\d{4}$') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'^West')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE NOT REGEXP_LIKE(STATION,'^West')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'^(?!West)')
%%sql
WITH SSNS(SSN) AS (
VALUES
'123-456-789',
'123-555-123',
'890-533-098',
'123-456-456'
)
SELECT SSN,
CASE
WHEN REGEXP_LIKE(SSN,'^([0-9]{3})-([0-9]{3})-([0-9]{3})$') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
%%sql
WITH SSNS(SSN) AS (
VALUES
'123-456-789',
'123-555-123',
'890-533-098',
'123-456-456'
)
SELECT SSN,
CASE
WHEN REGEXP_LIKE(SSN,'^([0-9]{3})-([0-9]{3})-(?!\1)([0-9]{3})$') THEN 'Valid'
ELSE 'Invalid'
END
FROM SSNS
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(West)')
%%sql
SELECT STATION FROM CENTRAL_LINE
WHERE REGEXP_LIKE(STATION,'(?:West)')
%%sql -d -q
DROP TABLE TEMP_LINE
@
CREATE TABLE TEMP_LINE AS (SELECT * FROM CENTRAL_LINE) DEFINITION ONLY NOT LOGGED INITIALLY
@
BEGIN
DECLARE I INTEGER DEFAULT 0;
WHILE I <= 1000 DO
INSERT INTO TEMP_LINE SELECT * FROM CENTRAL_LINE;
SET I = I + 1;
END WHILE;
END
@
SELECT COUNT(*) FROM TEMP_LINE
@
results_like = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE STATION LIKE '%West%'
results_string = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'West')
results_capturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(West)')
results_noncapturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(?:West)')
%sql -q DROP TABLE RESULTS
%sql CREATE TABLE RESULTS(TYPE VARCHAR(16), RESULT DEC(9,2))
%sql INSERT INTO RESULTS VALUES ('LIKE', {results_like} ), \
('REGX STRING', {results_string} ), \
('REGX CAPTURE', {results_capturing} ), \
('REGX NONCAPTURE', {results_noncapturing} )
%sql SELECT * FROM RESULTS
%sql -pb SELECT * FROM RESULTS
%%sql -q
DROP INDEX TEMP_STATION_INDEX;
CREATE INDEX TEMP_STATION_INDEX ON TEMP_LINE(STATION);
results_like = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE STATION LIKE '%West%'
results_string = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'West')
results_capturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(West)')
results_noncapturing = %sql -t SELECT COUNT(*) FROM TEMP_LINE WHERE REGEXP_LIKE(STATION,'(?:West)')
%sql -q DROP TABLE RESULTS
%sql -q CREATE TABLE RESULTS(TYPE VARCHAR(16), RESULT DEC(9,2))
%sql -q INSERT INTO RESULTS VALUES ('LIKE', {results_like} ), \
('REGX STRING', {results_string} ), \
('REGX CAPTURE', {results_capturing} ), \
('REGX NONCAPTURE', {results_noncapturing} )
%sql SELECT * FROM RESULTS
%sql -pb SELECT * FROM RESULTS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table of Contents
Step2: Back to Top
Step3: The pattern 'Ruislip' will look for a match of Ruislip
Step4: If you didn't place the % at the beginning of the LIKE
Step5: If you want to match Ruislip with upper or lower case being
Step6: Back to Top
Step7: To match a pattern at the end of the string, the dollar sign
Step8: To force an exact match with a string you would use both the
Step9: Note that if we didn't use the end anchor, we are going to
Step10: Back to Top
Step11: Searching for Katrina at the beginning and end of string
Step12: We can override the regular expression search by telling it
Step13: Back to Top
Step14: Some things to be aware of when creating the search pattern.
Step15: Using the "x" flag will ignore blanks in your pattern, so
Step16: Brackets can be used to make it clear what the pattern is
Step17: Back to Top
Step18: The previous answer gave you two results (Ealing Broadway
Step19: Finding at least one occurrence of a pattern requires the
Step20: If we want to find an exact number of occurrences, we need
Step21: If we want to match exactly 2 'an' patterns in a string, we
Step22: Sadly, we get no results! This would appear to be the wrong
Step23: What you should see in the previous result is the location
Step24: You should find that two stations match the pattern. The
Step25: Back to Top
Step26: If you wanted to include all stations that have the letter
Step27: Back to our SSN question. Can a regular expression pattern
Step28: The SSN is valid in the example above. Here are some other
Step29: If you check closely, one of the strings was marked as
Step30: Back to Top
Step31: Back to Top
Step32: Adding the NOT modifier in front of the REGEXP function
Step33: You can also negate some of the searches in a pattern by
Step34: Back to Top
Step35: All of these numbers fit the pattern and should be valid.
Step36: In many cases it may be easier to find the patterns that
Step37: The following SQL is equivalent, except that the matched
Step38: Back to Top
Step39: We will run four queries in the following SQL.
Step40: The results are placed into a temporary table for easier formatting.
Step41: The results are clearer when we plot them all on one graph!
Step42: Every system will have different performance
Step43: We will try the 4 queries again and plot the result.
|
3,550
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 2
sample_id = 7
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# TODO: Implement Function
return x/255;
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
one_hot_map = np.eye(10)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# TODO: Implement Function
return one_hot_map[x]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
# TODO: Implement Function
print( len(image_shape) )
x = tf.placeholder(tf.float32,(None,)+image_shape, name="x")
return x
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
# TODO: Implement Function
y = tf.placeholder(tf.float32,[None,n_classes], name="y")
return y
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
# TODO: Implement Function
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return keep_prob
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
# find # of input channels and create weight tensor
channels = x_tensor.get_shape().as_list()[3]
weight_dimension = conv_ksize + (channels,) + (conv_num_outputs,)
weight = tf.Variable( tf.truncated_normal( weight_dimension, mean=0.0, stddev=0.1 ) )
# conv layer
bias = tf.Variable(tf.zeros(conv_num_outputs))
conv_layer = tf.nn.conv2d(x_tensor, weight, (1,) + conv_strides + (1,), padding='SAME')
conv_layer = tf.nn.bias_add(conv_layer, bias)
conv_layer = tf.nn.relu(conv_layer)
# max pooling
conv_layer = tf.nn.max_pool( conv_layer, (1,) + pool_ksize + (1,), (1,) + pool_strides + (1,), padding='SAME')
return conv_layer
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# TODO: Implement Function
return tf.contrib.layers.flatten(x_tensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.contrib.layers.fully_connected(
inputs=x_tensor,
num_outputs=num_outputs,
activation_fn=tf.nn.relu,
biases_initializer=tf.zeros_initializer,
weights_initializer=lambda size, dtype, partition_info: tf.truncated_normal(shape=size,dtype=dtype,mean=0.0,stddev=0.1)
)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
# TODO: Implement Function
return tf.contrib.layers.fully_connected(
inputs=x_tensor,
num_outputs=num_outputs,
weights_initializer=lambda size, dtype, partition_info: tf.truncated_normal(shape=size,dtype=dtype,mean=0.0,stddev=0.1)
)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x = conv2d_maxpool(x, 16, (4,4), (1,1), (2,2), (1,1))
x = conv2d_maxpool(x, 32, (4,4), (1,1), (2,2), (1,1))
x = conv2d_maxpool(x, 64, (4,4), (1,1), (2,2), (1,1))
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 512)
x = tf.nn.dropout(x, keep_prob)
x = fully_conn(x, 256)
x = tf.nn.dropout(x, keep_prob)
x = fully_conn(x, 64)
x = tf.nn.dropout(x, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
logits = output(x,10)
# TODO: return output
return logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# TODO: Implement Function
session.run( optimizer, feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: keep_probability
})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
cost = session.run( cost, feed_dict={
x: feature_batch,
y: label_batch,
keep_prob: 1.0
})
validation = session.run( accuracy, feed_dict={
x: valid_features,
y: valid_labels,
keep_prob: 1.0
})
print( "cost: {}, accuracy: {}".format(cost, validation))
# TODO: Tune Parameters
epochs = 20
batch_size = 128
keep_probability = 0.5
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
3,551
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import torch
a, b = load_data()
ab = torch.cat((a, b), 0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,552
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import math
import cmath
from scipy.optimize import root
import matplotlib.pyplot as plt
%matplotlib inline
a = ("Table1.txt")
a
class InterfazPolimero:
def __init__ (self,a):
self.a=a
def Lire(self):
self.tab = pd.read_csv(self.a,sep=" ")
coef =self.tab.values
self.Experiment = coef[:,0]
self.Thickness = coef[:,1]
self.FoodSimulant = coef[:,2]
self.Cpo = coef[:,3]
self.K = coef [:,4]
self.Dp = coef[:,5]
self.RMSE = coef[:,6]
self.k = coef[:,7]
self.c4 = coef[:,8]
# self.c1 =coef[:,9]
self.c2 = np.zeros(10)
return self.tab
def inicializarC2(self):
self.c2 = np.zeros(10)
self.dimension = np.shape(self.c2)
print(self.dimension)
return self.c2
def calcul(self):
self.tab["j1"] = (self.tab["Dp"] / (self.tab["Thickness"] / 2)) * (self.tab["Cpo"] - self.c2)
print(self.tab["j1"])
self.c3 = self.c2 / self.K
self.j2 = self.k * (self.c3 - self.tab["c4"])
return (self.tab["j1"] - self.j2) / self.tab["j1"]
def calcul2(self):
i = 0
for self.tab["Thickness"], self.tab["Dp"], self.tab["K"], self.tab["k"], self.tab["c"] in enumerate(tab):
self.sol = root(calcul,15,args=(float(self.tab["Dp"]),float(self.tab["k"]),float(self.tab["K"]),float(self.tab["c4"]),float(self.tab["Cpo"]),float(self.tab["Thickness"])))
c2[i]= self.sol.x
i = i + 1
print(self.c2)
return self.c2
def Garder(self):
raw_data ={"résultat" : [1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793]}
df = pd.DataFrame(raw_data,index=["1","2","3","4","5","6","7","8","9","10"])
df.to_csv("c2rep")
return df
def Graphique(self):
plt.plot(self.tab["Dp"],self.Cpo,"^")
plt.title("f(Dp)=Cpo")
plt.xlabel("Dp")
plt.ylabel("Cpo")
def Graphique2(self):
plt.plot(self.tab["Dp"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Dp)=c2")
plt.xlabel("Dp")
plt.ylabel("c2")
def Graphique3(self):
plt.plot(self.tab["Cpo"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Cpo)=c2")
plt.xlabel("Cpo")
plt.ylabel("c2")
def Graphique4(self):
plt.plot(self.tab["Thickness"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
plt.title("f(Epaisseur)=c2")
plt.xlabel("Epaisseur")
plt.ylabel("c2")
def Graphique5(self):
fig,axes=plt.subplots(2,2)
axes[0,0].plot(self.tab["Dp"],self.Cpo,"^")
axes[1,1].plot(self.tab["Dp"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
axes[0,1].plot(self.tab["Cpo"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
axes[1,0].plot(self.tab["Thickness"],[1.115510936772821, 1.0542169426645587, 1.041340418781726, 1.0219,1.4353658536585368, 1.0542169426645587, 1.058921125781793,1.0217682926829268, 1.05340368852459, 1.058921125781793],"^")
p = InterfazPolimero("Table1.txt")
p
p.Lire()
p.calcul()
p.Graphique()
p.Graphique2()
p.Graphique3()
p.Graphique4()
p.Graphique5()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Polymère
Step2: Calcul de la concentration finale
Step3: Table des valeurs
Step4: Calcul de c2
Step5: Graphique
Step6: Graphique
Step7: Graphique
Step8: Grapgique
Step9: Nous remarquons que réunir tous les graphiques n'est pas spécialement adéquat car les résultats sont totalement illisibles.
|
3,553
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import colab
!pip install --upgrade pip
except:
pass
!pip install tensorflow==2.2.0
import pprint
import tempfile
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
raw_data = [
{'x': 1, 'y': 1, 's': 'hello'},
{'x': 2, 'y': 2, 's': 'world'},
{'x': 3, 'y': 3, 's': 'hello'}
]
raw_data_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec({
'y': tf.io.FixedLenFeature([], tf.float32),
'x': tf.io.FixedLenFeature([], tf.float32),
's': tf.io.FixedLenFeature([], tf.string),
}))
def preprocessing_fn(inputs):
Preprocess input columns into transformed columns.
x = inputs['x']
y = inputs['y']
s = inputs['s']
x_centered = x - tft.mean(x)
y_normalized = tft.scale_to_0_1(y)
s_integerized = tft.compute_and_apply_vocabulary(s)
x_centered_times_y_normalized = (x_centered * y_normalized)
return {
'x_centered': x_centered,
'y_normalized': y_normalized,
's_integerized': s_integerized,
'x_centered_times_y_normalized': x_centered_times_y_normalized,
}
def main():
# Ignore the warnings
with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
transformed_dataset, transform_fn = ( # pylint: disable=unused-variable
(raw_data, raw_data_metadata) | tft_beam.AnalyzeAndTransformDataset(
preprocessing_fn))
transformed_data, transformed_metadata = transformed_dataset # pylint: disable=unused-variable
print('\nRaw data:\n{}\n'.format(pprint.pformat(raw_data)))
print('Transformed data:\n{}'.format(pprint.pformat(transformed_data)))
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 使用 TensorFlow Transform 预处理数据
Step2: 安装 TensorFlow Transform
Step3: 是否已重新启动运行时?
Step4: 数据:创建一些虚拟数据
Step6: Transform:创建一个预处理函数
Step7: 总结
|
3,554
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
# Code here
from sklearn.datasets import load_iris
iris_dataset = load_iris()
features = iris_dataset.feature_names
data = iris_dataset.data
targets = iris_dataset.target
df = pd.DataFrame(data, columns=features)
pd.plotting.scatter_matrix(df, c=targets, figsize=(15,15),
marker='o', hist_kwds={'bins': 20}, s=60,
alpha=.8);
# Code here
# Code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Question 1
Step2: Question 2
Step3: Create a pair-plot of the iris dataset similar to this figure using only numpy and
Step4: Question 3
|
3,555
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# Configurable test settings
channel_count = 3 # Simulate sampling from multiple channels.
sample_count = 8 # Number of samples (each sample -> one value per channel).
N = channel_count * sample_count
src_data = np.arange(1, N + 1, dtype='uint8')
src_chunks = [src_data[i * channel_count:(i + 1) * channel_count]
for i in xrange(sample_count)]
dst_data = np.column_stack(src_chunks).ravel()
for i, chunk in enumerate(src_chunks):
print 'SOURCE%d:' % i, chunk
# Show interleaved result
print 'TARGET:', dst_data
from teensy_minimal_rpc import SerialProxy
import teensy_minimal_rpc.DMA as dma
# Disconnect from existing proxy (if available)
try:
del proxy
except NameError:
pass
proxy = SerialProxy()
proxy.free_all()
# Allocate source array
src_addr = proxy.mem_alloc(N)
# Allocate destination array
dst_addr = proxy.mem_alloc(N)
src_addrs = [src_addr + i * channel_count for i in xrange(sample_count)]
tcds_addr = proxy.mem_aligned_alloc(32, sample_count * 32)
hw_tcds_addr = 0x40009000
tcd_addrs = [tcds_addr + 32 * i for i in xrange(sample_count)]
hw_tcd_addrs = [hw_tcds_addr + 32 * i for i in xrange(sample_count)]
# Fill first 16 bytes of source array with the numbers 1-N
proxy.mem_cpy_host_to_device(src_addr, src_data)
for i in xrange(sample_count):
print 'SOURCE%d: ' % i, proxy.mem_cpy_device_to_host(src_addrs[i], channel_count)
# Create Transfer Control Descriptor configuration for first chunk, encoded
# as a Protocol Buffer message.
tcd0_msg = dma.TCD(CITER_ELINKNO=dma.R_TCD_ITER_ELINKNO(ITER=1),
BITER_ELINKNO=dma.R_TCD_ITER_ELINKNO(ITER=1),
ATTR=dma.R_TCD_ATTR(SSIZE=dma.R_TCD_ATTR._8_BIT,
DSIZE=dma.R_TCD_ATTR._8_BIT),
NBYTES_MLNO=channel_count,
SADDR=int(src_addrs[0]),
SOFF=1,
SLAST=-channel_count,
DADDR=int(dst_addr),
DOFF=sample_count,
DLASTSGA=int(tcd_addrs[1]),
CSR=dma.R_TCD_CSR(START=0, DONE=False, ESG=True))
# Convert Protocol Buffer encoded TCD to bytes structure.
tcd0 = proxy.tcd_msg_to_struct(tcd0_msg)
# Create binary TCD struct for each TCD protobuf message and copy to device
# memory.
for i, src_addr_i in enumerate(src_addrs):
tcd_i = tcd0.copy()
tcd_i['SADDR'] = src_addr_i
tcd_i['DADDR'] = dst_addr + i
tcd_i['DLASTSGA'] = tcd_addrs[(i + 1) % len(tcd_addrs)]
tcd_i['CSR'] |= (1 << 4) # | 0x1 # Automatically start transfers 2-n
# __N.B.,__ Setting `START` bit causes destination bus error.
# if i > 0:
# tcd_i['CSR'] |= 0x1 # Automatically start transfers 2-n
proxy.mem_cpy_host_to_device(tcd_addrs[i], tcd_i.tostring())
# Fill the destination array with all zeros (to show transfer progress below).
proxy.mem_fill_uint32(dst_addr, 0, N / 4)
for i in xrange(sample_count):
print 'SOURCE%d: ' % i, proxy.mem_cpy_device_to_host(src_addrs[i], channel_count)
# Load initial TCD to DMA channel 0.
proxy.mem_cpy_host_to_device(hw_tcd_addrs[0], tcd0.tostring())
print 'DEST:'
# Trigger once per chunk
for i in xrange(sample_count):
proxy.update_dma_registers(dma.Registers(SSRT=0))
device_dst_data = proxy.mem_cpy_device_to_host(dst_addr, N)
print ' Trigger %d:' % i, device_dst_data
# Verify device result matches expected result computed on host.
assert((device_dst_data == dst_data).all())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulate concatenate behaviour on host (i.e., using numpy)
Step2: Device
Step3: Allocate arrays
Step4: Create Transfer Control Descriptor (TCD) configurations
Step5: Load first TCD in scatter chain and enable scatter/gather
|
3,556
|
<ASSISTANT_TASK:>
Python Code:
from notebook_preamble import J, V, define
define('pair_up == dup uncons swap unit concat zip')
J('[1 2 3] pair_up')
J('[1 2 2 3] pair_up')
define('total_matches == 0 swap [i [=] [pop +] [popop] ifte] step')
J('[1 2 3] pair_up total_matches')
J('[1 2 2 3] pair_up total_matches')
define('AoC2017.1 == pair_up total_matches')
J('[1 1 2 2] AoC2017.1')
J('[1 1 1 1] AoC2017.1')
J('[1 2 3 4] AoC2017.1')
J('[9 1 2 1 2 1 2 9] AoC2017.1')
J('[9 1 2 1 2 1 2 9] AoC2017.1')
J('[1 2 3 4] dup size 2 / [drop] [take reverse] cleave concat zip')
J('[1 2 3 4] dup size 2 / [drop] [take reverse] cleave zip')
define('AoC2017.1.extra == dup size 2 / [drop] [take reverse] cleave zip swap pop total_matches 2 *')
J('[1 2 1 2] AoC2017.1.extra')
J('[1 2 2 1] AoC2017.1.extra')
J('[1 2 3 4 2 5] AoC2017.1.extra')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I'll assume the input is a Joy sequence of integers (as opposed to a string or something else.)
Step2: Now we need to derive total_matches. It will be a step function
Step3: Now we can define our main program and evaluate it on the examples.
Step4: pair_up == dup uncons swap unit concat zip
Step5: I realized that each pair is repeated...
|
3,557
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import shutil
print(tf.__version__)
tf.enable_eager_execution()
CSV_COLUMN_NAMES = ["fare_amount","dayofweek","hourofday","pickuplon","pickuplat","dropofflon","dropofflat"]
CSV_DEFAULTS = [[0.0],[1],[0],[-74.0], [40.0], [-74.0], [40.7]]
def parse_row(row):
fields = # TODO: Your code goes here
features = # TODO: Your code goes here
labels = # TODO: Your code goes here
return features, label
a_row = "0.0,1,0,-74.0,40.0,-74.0,40.7"
features, labels = parse_row(a_row)
assert labels.numpy() == 0.0
assert features["pickuplon"].numpy() == -74.0
print("You rock!")
def read_dataset(csv_path):
dataset = # TODO: Your code goes here
dataset = # TODO: Your code goes here
return dataset
%%writefile test.csv
fare_amount,dayofweek,hourofday,pickuplon,pickuplat,dropofflon,dropofflat
28,1,0,-73.0,41.0,-74.0,20.7
12.3,1,0,-72.0,44.0,-75.0,40.6
10,1,0,-71.0,41.0,-71.0,42.9
for feature, label in read_dataset("./test.csv"):
print("dropofflat:", feature["dropofflat"].numpy())
print("fare_amount:", label.numpy())
dataset= read_dataset("./test.csv")
dataset_iterator = dataset.make_one_shot_iterator()
features, labels = dataset_iterator.get_next()
assert features['dayofweek'].numpy() == 1
assert labels.numpy() == 28
print("You rock!")
def train_input_fn(csv_path, batch_size = 128):
dataset = # TODO: Your code goes here
dataset = # TODO: Your code goes here
return dataset
def eval_input_fn(csv_path, batch_size = 128):
dataset = # TODO: Your code goes here
dataset = # TODO: Your code goes here
return dataset
FEATURE_NAMES = CSV_COLUMN_NAMES[1:] # all but first column
print(FEATURE_NAMES)
feature_cols = # TODO: Your code goes here
print(feature_cols)
OUTDIR = "taxi_trained"
model = # TODO: Your code goes here
%%time
tf.logging.set_verbosity(tf.logging.INFO) # so loss is printed during training
shutil.rmtree(path = OUTDIR, ignore_errors = True) # start fresh each time
model.train(
input_fn = # TODO: Your code goes here,
steps = # TODO: Your code goes here
)
metrics = # TODO: Your code goes here
print("RMSE on dataset = {}".format(# TODO: Your code goes here))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input function reading from CSV
Step2: Run the following test to make sure your implementation is correct
Step3: Exercise 2
Step4: Tests
Step5: You should be able to iterate over what's returned by read_dataset. We'll print the dropofflat and fare_amount for each entry in ./test.csv
Step6: Run the following test cell to make sure you function works properly
Step7: Exercise 3
Step8: Exercise 4
Step9: Create feature columns
Step10: Exercise 5
Step11: Choose Estimator
Step12: Train
Step13: Evaluate
|
3,558
|
<ASSISTANT_TASK:>
Python Code:
print(conf.toDebugString()) #Instance of SparkConf with options set by the extension
conf.setAppName('ExtensionTestingApp')
#conf.setMaster('spark://dell-inspiron:7077') # if master is started using command line
conf.setMaster('local[*]')
from pyspark import SparkContext
sc=SparkContext.getOrCreate(conf=conf) #Start the spark context
import time
b=sc.broadcast([3,5]) #Creating a broadcast variable available on all executors
a=sc.accumulator(0) #Creating an accumulator for adding values across executors
RDD0=sc.parallelize([y for y in range(0,5)]) #RDD from input python collection
RDD2=sc.parallelize([z for z in range(10,15)])
RDD1=RDD0.cartesian(RDD2)
cached=RDD2.cache() #Testing cached RDD
RDD22=RDD1.map(lambda x:x[0]+x[1]+b.value[0])
RDD3=RDD22.repartition(5) # To trigger a new stage.
RDD4=RDD2.map(lambda x: 3*x-b.value[0])
RDD5=RDD3.filter(lambda x:x%2==0)
RDD6=RDD4.filter(lambda x:x%2!=0)
RDD7=RDD5.cartesian(RDD6)
RDD8=RDD7.flatMap(lambda x: [x[i] for i in range(0,2)])
RDD9=RDD8.union(cached)
ans=RDD9.reduce(lambda x,y: x+y) # Doing a simple sum on the random data.
print(ans)
def f(x):
global a
time.sleep(0.5) #Making the job run a little longer
a+=x
RDD9.foreach(f)
print(a.value)
#Display should appear automatically
sc.parallelize(range(0,100)).count()
sc.parallelize(range(0,100)).count()
sc.parallelize(range(0,100)).count()
sc.parallelize(range(0,100)).count()
sc.parallelize(range(0,100)).count()
sc.parallelize(range(0,100)).map(lambda x:x*x).filter(lambda x:x%2==0).count()
sc.parallelize(range(0,100)).map(lambda x:x*x).filter(lambda x:x%2==0).count()
sc.parallelize(range(0,100)).map(lambda x:x*x).filter(lambda x:x%2==0).count()
sc.parallelize(range(0,100)).map(lambda x:x*x).filter(lambda x:x%2==0).count()
sc.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: User adds other options and starts the spark context
Step2: Example spark job
|
3,559
|
<ASSISTANT_TASK:>
Python Code:
import sys # system module
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import numpy as np
%matplotlib inline
plt.style.use("ggplot")
# quandl package
import quandl
# check versions (overkill, but why not?)
print('Python version:', sys.version)
print('Pandas version: ', pd.__version__)
print('quandl version: ', quandl.version.VERSION)
print('Today: ', dt.date.today())
# helper function to print info about dataframe
def df_info(df):
print("Shape: ", df.shape)
print("dtypes: ", df.dtypes.to_dict())
print("index dtype: ", df.index.dtype)
return pd.concat([df.head(3), df.tail(3)])
us_tax = quandl.get("OECD/REV_NES_TOTALTAX_TAXUSD_USA")
df_info(us_tax)
us_tax_recent = quandl.get("OECD/REV_NES_TOTALTAX_TAXUSD_USA", start_date="2000-01-01")
df_info(us_tax_recent)
# For multiple data sources, it's useful to define a list/dict
my_data = {"FRED/DFF": "risk_free_rate",
"NVCA/VENTURE_3_09C": "vc_investments"}
dfs = []
for k in my_data.keys():
dfs.append(quandl.get(k))
df_info(dfs[0])
quandl.get(['NVCA/VENTURE_3_09C.2', 'NVCA/VENTURE_3_09C.5']).head()
quandl.get(['NSE/OIL.1', 'WIKI/AAPL.4'])
mix = quandl.get(['NSE/OIL.1', 'WIKI/AAPL.4'])
mix.plot(subplots=True)
df_info(dfs[1])
dfs[1].rename(columns={"DFF": my_data["FRED/DFF"]}, inplace=True)
df_info(dfs[1])
quart = quandl.get("FRED/DFF", collapse='quarterly')
print(quart.head())
quart.plot()
diff = quandl.get("FRED/DFF", transformation='rdiff')
diff.plot()
ffr = dfs[1]
vc = dfs[0]
today = dt.date.today()
print("the type of today is ", type(today))
print("the day of the month is: ", today.day)
print("we are curretly in month number", today.month)
print("The year is", today.year)
# construct a date by hand
new_years_eve = dt.date(2017, 12, 31)
until_nye = new_years_eve - today
type(until_nye)
until_nye.days
def days_until(date):
today = dt.date.today()
numb_days = date - today
return numb_days.days
project_due = dt.date(2017, 5, 5)
days_until(project_due)
spencer_bday = dt.date(1989, 4, 25)
# NOTE: add 7 for the 7 leap years between 1989 and 2019
thirty_years = dt.timedelta(days=365*30 + 7)
# check to make sure it is still April 25th
spencer_bday + thirty_years
days_to_30 = (spencer_bday + thirty_years - today).days
print("Spencer will be 30 in {} days".format(days_to_30))
now = dt.datetime.now()
print("type of now:", type(now))
now
print("the day of the month is: ", now.day)
print("we are curretly in month number", now.month)
print("The year is", now.year)
now.weekday()
# NOTE: we can only do arithmetic between many date objects or datetime obejcts
# we cannot add or subtract a datetime to/from a date. So, we need to
# re-create spencer_bday as a datetime object.
# NOTE: The timedelta object is already compatible with date and datetime objects
spencer_bday_time = dt.datetime(1989, 4, 25, 16, 33, 5)
seconds_to_30 = (spencer_bday_time + thirty_years - now).seconds
print("Spencer will be 30 in {} seconds".format(seconds_to_30))
print(today.strftime("Today is %Y-%m-%d"))
spencer_bday_time.strftime("Spencer was born on %Y-%m-%d")
spencer_bday_time.strftime("Spencer was born on a %A")
spencer_bday_time.strftime("Spencer was born on %A, %b %dth")
spencer_bday_time.strftime("Spencer was born on %A, %B %dth at %I:%M %p")
type(ffr.index)
ffr2008 = ffr["2008"]
print("ffr2008 is a", type(ffr2008))
df_info(ffr2008)
ffr2008.plot()
ffr_sep2008 = ffr["2008-09"]
df_info(ffr_sep2008)
ffr_sep2008.plot()
ffr2 = ffr["2007-06":"2011-03"]
df_info(ffr2)
ffr2.plot()
vc['2013':'2016'].plot()
# MS means "month start"
ffrM_resample = ffr.resample("MS")
type(ffrM_resample)
ffrM = ffrM_resample.first()
df_info(ffrM)
ffr.resample("2w")
ffr.resample("2w").mean()
ffr.resample('QS').mean().head()
ffr.resample('A').mean().head()
ffr.resample("M").first().head()
ffr.resample("M").last().head()
ffr.resample("MS").first().head()
ffr.resample("MS").last().head()
ffr.index.year
ffr.index.day
ffr.index.month
fig, ax = plt.subplots()
ffr.rolling(window=7).max().plot(ax=ax)
ffr.rolling(window=7).min().plot(ax=ax)
ax.legend(["max", "min"])
ffr.rolling(window=7).max().head(10)
ffr.resample("7D").max().head(10)
# do a left merge on the index (date info)
df = pd.merge(ffr, vc, left_index=True, right_index=True, how="left")
df_info(df)
vc.head()
ffr_recent = ffr["1985":]
ffr_recentM = ffr_recent.resample("M").first()
vc_M = vc.resample("M").pad()
vc_M.head()
df = pd.merge(ffr_recentM, vc_M, left_index=True, right_index=True, how="left")
print(df.head(6))
print("\n\n", df.tail(8))
# subset the data, then remove datetime index as we don't need it again
post_dotcom = df["1995":].reset_index(drop=True)
post_housing = df["2004":].reset_index(drop=True)
# take logs so we can do growth rates as log(x_{t+N}) - log(x_t)
post_dotcom = np.log(post_dotcom)
post_housing = np.log(post_housing)
dotcom_growth = post_dotcom - post_dotcom.iloc[0, :]
housing_growth = post_housing - post_housing.iloc[0, :]
fig, axs = plt.subplots(3, 1, figsize=(10, 5))
variables = ["risk_free_rate", "Early Stage", "Total"]
for i in range(len(variables)):
var = variables[i]
# add dotcom line
dotcom_growth[var].plot(ax=axs[i])
# add housing line
housing_growth[var].plot(ax=axs[i])
# set title
axs[i].set_title(var)
# set legend and xlabel on last plot only
axs[-1].legend(["dotcom", "housing"])
axs[-1].set_xlabel("Quarters since boom")
# make subplots not overlap
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quandl <a id=data></a>
Step2: We can also pass start_date and end_date parameters to control the dates for the downloaded data
Step3: Now, let's read in the data sets we found were interesting. Feel free to use the codes you looked up, or the ones I'm using here.
Step4: To request specific columns use column indices (NOT 0-based)
Step5: To combine variables from different datasets
Step6: What happened to the first column?
Step7: So, "FRED/DFF" is the federal funds rate, or the interest rate at which banks can trade federal assets with each other overnight. This is often used as a proxy for the risk free rate in economic analysis.
Step8: We can change the sampling frequency
Step9: Or we can perform elementary calculations on the data
Step10: The other dataframe we dowloaded (using code NVCA/VENTURE_3_09C) contains quarterly data on total investment by venture capital firms in the US, broken down by the stage of the project.
Step11: Usage Limits
Step12: Given that we have an object of type datetime.date we can do things like ask for the day, month, and year
Step13: timedelta
Step14: We can get the number of days until new years eve by looking at until_nye.days
Step15: Exercise
Step16: We could also construct a datetime.timedelta by hand and add it to an existing date. Here's an example to see how many days until Spencer turns 30
Step17: datetime
Step18: The numbers in the printout above are year, month, day, hour, minute, second, millisecond.
Step19: Exercise
Step20: Time deltas work the same way with datetime objects as they did with date objects.
Step21: strftime
Step22: Notice that the argument to strftime is a python string that can contain normal text (e.g. Today is) and a special formatters (the stuff starting with %). We haven't talked much about how to do string formatting, but in Python and many other languages using % inside strings has special meaning.
Step23: "Spencer was born on a Tuesday"
Step24: "Spencer was born on Tuesday, Apr 25th"
Step25: (bonus) "Spencer was born on Tuesday, April 25th at 04
Step26: Dates in Pandas <a id=pandas_dates></a>
Step27: Here we have a DatetimeIndex, which means pandas recogizes this DataFrame as containing time series data.
Step28: Suppose we want to restrict to September 2008
Step29: We can use this same functionality to extract ranges of dates. To get the data starting in june 2007 and going until march 2011 we would do
Step30: Exercise Using one of your datasets from quandl, plot one or more variables for the last 3 years (2013 through 2016)
Step31: resampling
Step32: Notice that when we call resample we don't get back a DataFrame at that frequency. This is because there is some ambiguity regarding just how the frequency should be converted
Step33: Note that we can also combine numbers with the specification of the resampling frequency. As an example, we can resample to a bi-weekly frequency using
Step34: Exercise
Step35: An annual frequency -- use the end of the year
Step36: more than you need
Step37: Notice that the index is the same on both, but the data is clearly different.
Step38: Notice how the data associated with "M" and first is the same as the data for "MS" and first. The same holds for last.
Step39: Rolling computations
Step40: Note that this is different from just resampling because we will have an observation for every date in the original dataframe (except the number of dates at the front needed to construct the initial window).
Step41: Merging with dates
Step42: Notice that we ended up with a lot of missing data. This happened for two reasons
Step43: To resolve the second issue we will do two-steps
Step44: Notice that using pad here just copied data forwards to fill in missing months (e.g. the data for March 1985 was applied to April and May)
Step45: That looks much better -- we have missing data at the top and the bottom for months that aren't available in the venture capital dataset, but nothing else should be missing.
|
3,560
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Arrays
import numpy as np
# Plotting
import matplotlib.pyplot as plt
# pairinteraction :-)
from pairinteraction import pireal as pi
qd = pi.QuantumDefect("Rb", 50, 0, 0.5)
print("Core polarizability: ac =",qd.ac)
print("Effective coulomb potential")
print(" Z =",qd.Z,"(core charge)")
print(" a1 =",qd.a1)
print(" a2 =",qd.a2)
print(" a3 =",qd.a3)
print(" a4 =",qd.a4)
print("Effective core radius: rc =",qd.rc)
print("Effective quantum number: n* =",qd.nstar)
print("State energy: E(n*) =",qd.energy)
n = pi.Numerov(qd).integrate()
w = pi.Whittaker(qd).integrate()
plt.xlabel("$r$ ($a_0$)")
plt.ylabel("$r^2|\Psi(r)|^2$ [a.u.]")
plt.plot(n[:,0]**2,np.abs(np.sqrt(n[:,0])*n[:,1])**2,'-',label="numeric WF")
plt.plot(w[:,0]**2,np.abs(w[:,1])**2,'--',label="Coulomb WF")
plt.legend();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Our code starts with loading the required modules for the computation. It is irrelevant whether we use the pireal or picomplex modules here, because we do not calculate any matrix elements.
Step2: Keep in mind that pairinteraction is a Rydberg interaction calculator. All techniques presented below work well for the high-$n$ states of Rydberg atoms, but fail miserably for low-lying states.
Step3: The parameters of the model potentials can be accessed like member variables of the qd object. They have mnemonic names to mirror their meaning in the model potentials presented above.
Step4: The effective principal quantum number in quantum defect theory is defined as series expansion
Step5: Even though these parameters can be accessed like member variables, they are read-only values and attempting to change them will result in an error.
Step6: The wavefunctions which we obtain from these methods are unscaled, i.e. they have their original scaling as defined by the calculation. The $x$-axes are square root scaled. The result returned by the Coulomb wavefunction method is $r \;\Psi^{\text{rad}}(r)$. The result calculated by Numerov's method is $X(x)$, as defined above, and must be multiplied by $\sqrt{x}$ to get $r \;\Psi^{\text{rad}}(r)$.
|
3,561
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
import copy
import os
import pandas as pd
import matplotlib.pyplot as plt
import tsam.timeseriesaggregation as tsam
%matplotlib inline
raw = pd.read_csv('testdata.csv', index_col = 0)
def plotTS(data, periodlength, vmin, vmax, label = 'T [°C]'):
fig, axes = plt.subplots(figsize = [6, 2], dpi = 100, nrows = 1, ncols = 1)
stacked, timeindex = tsam.unstackToPeriods(copy.deepcopy(data), periodlength)
cax = axes.imshow(stacked.values.T, interpolation = 'nearest', vmin = vmin, vmax = vmax)
axes.set_aspect('auto')
axes.set_ylabel('Hour')
plt.xlabel('Day')
fig.subplots_adjust(right = 1.2)
cbar=plt.colorbar(cax)
cbar.set_label(label)
aggregation = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 10, hoursPerPeriod = 24,
clusterMethod = 'hierarchical')
typPeriods = aggregation.createTypicalPeriods()
predictedPeriods = aggregation.predictOriginalData()
aggregation.accuracyIndicators()
aggregationSeg = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 20, hoursPerPeriod = 24,
clusterMethod = 'hierarchical', segmentation=True, noSegments=12)
typPeriodsSeg = aggregationSeg.createTypicalPeriods()
predictedPeriodsSeg = aggregationSeg.predictOriginalData()
aggregationSeg.accuracyIndicators()
fig, axes = plt.subplots(figsize = [6, 2], dpi = 100, nrows = 1, ncols = 1)
raw['Load'].sort_values(ascending=False).reset_index(drop=True).plot(label = 'Original')
predictedPeriods['Load'].sort_values(ascending=False).reset_index(drop=True).plot(label = '10 with 24 hours')
predictedPeriodsSeg['Load'].sort_values(
ascending=False).reset_index(drop=True).plot(label = '20 with 12 Seg')
plt.legend()
plt.xlabel('Hours [h]')
plt.ylabel('Duration Load [MW]')
param = 'GHI'
plotTS(raw[param], 24, vmin = raw[param].min(), vmax = raw[param].max(), label = param)
plotTS(predictedPeriods[param], 24, vmin = raw[param].min(), vmax = raw[param].max(), label = param)
plotTS(predictedPeriodsSeg[param], 24, vmin = raw[param].min(), vmax = raw[param].max(), label = param)
fig, axes = plt.subplots(figsize = [6, 2], dpi = 100, nrows = 1, ncols = 1)
raw['Load']['20100210':'20100218'].plot(label = 'Original')
predictedPeriods['Load']['20100210':'20100218'].plot(label = '10 with 24 hours')
predictedPeriodsSeg['Load']['20100210':'20100218'].plot(label = '20 with 12 seg')
plt.legend()
plt.ylabel('Load [MW]')
raw.mean()
predictedPeriods.mean()
predictedPeriodsSeg.mean()
aggregation.createTypicalPeriods().loc[0,:].mean()
aggregationSegTest = tsam.TimeSeriesAggregation(raw, noTypicalPeriods = 10, hoursPerPeriod = 24,
clusterMethod = 'hierarchical', segmentation=True, noSegments=12)
segmentDurations=aggregationSegTest.createTypicalPeriods().loc[0,:].reset_index(0, drop=True).index.values
aggregationSegTest.createTypicalPeriods().loc[0,:].mul(segmentDurations, axis=0).sum()/segmentDurations.sum()
aggregationSeg.createTypicalPeriods()
aggregation.createTypicalPeriods()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input data
Step2: Create a plot function for the temperature for a visual comparison of the time series
Step3: Hierarchical aggregation with medoid representation and 10 typical days with 24 hourly segments
Step4: Create the typical periods
Step5: Predict original data
Step6: Get accuracy indicators
Step7: Hierarchical aggregation with medoid representation and 20 typical days with 12 irregular segments
Step8: Create the typical periods
Step9: Predict original data
Step10: Get accuracy indicators
Step11: Comparison of the aggregations
Step12: Validation
Step13: Check that a segmented period has the same column-wise means as a non-segmented period for if the periods are the same.
Step14: Print out the (segmented) typical periods.
|
3,562
|
<ASSISTANT_TASK:>
Python Code:
from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.simulation import simulate_evoked
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4])
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, -pred_evoked], weights='equal')
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2], **plot_params)
plt.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's localize the N100m (using MEG only)
Step2: Calculate and visualise magnetic field predicted by dipole with maximum GOF
Step3: Estimate the time course of a single dipole with fixed position and
|
3,563
|
<ASSISTANT_TASK:>
Python Code:
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print("Downloaded " + local)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkstats2.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/thinkplot.py")
import numpy as np
import thinkstats2
import thinkplot
thinkplot.PrePlot(3)
for lam in [2.0, 1, 0.5]:
xs, ps = thinkstats2.RenderExpoCdf(lam, 0, 3.0, 50)
label = r"$\lambda=%g$" % lam
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title="Exponential CDF", xlabel="x", ylabel="CDF", loc="lower right")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/analytic.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/babyboom.dat")
import analytic
df = analytic.ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label="actual")
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel="Time between births (minutes)", ylabel="CDF")
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(
xlabel="Time between births (minutes)",
ylabel="CCDF",
yscale="log",
loc="upper right",
)
thinkplot.PrePlot(3)
mus = [1.0, 2.0, 3.0]
sigmas = [0.5, 0.4, 0.3]
for mu, sigma in zip(mus, sigmas):
xs, ps = thinkstats2.RenderNormalCdf(mu=mu, sigma=sigma, low=-1.0, high=4.0)
label = r"$\mu=%g$, $\sigma=%g$" % (mu, sigma)
thinkplot.Plot(xs, ps, label=label)
thinkplot.Config(title="Normal CDF", xlabel="x", ylabel="CDF", loc="upper left")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/nsfg.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/first.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dct")
download(
"https://github.com/AllenDowney/ThinkStats2/raw/master/code/2002FemPreg.dat.gz"
)
import nsfg
import first
preg = nsfg.ReadFemPreg()
weights = preg.totalwgt_lb.dropna()
# estimate parameters: trimming outliers yields a better fit
mu, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
print("Mean, Var", mu, var)
# plot the model
sigma = np.sqrt(var)
print("Sigma", sigma)
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=12.5)
thinkplot.Plot(xs, ps, label="model", color="0.6")
# plot the data
cdf = thinkstats2.Cdf(weights, label="data")
thinkplot.PrePlot(1)
thinkplot.Cdf(cdf)
thinkplot.Config(title="Birth weights", xlabel="Birth weight (pounds)", ylabel="CDF")
n = 1000
thinkplot.PrePlot(3)
mus = [0, 1, 5]
sigmas = [1, 1, 2]
for mu, sigma in zip(mus, sigmas):
sample = np.random.normal(mu, sigma, n)
xs, ys = thinkstats2.NormalProbability(sample)
label = "$\mu=%d$, $\sigma=%d$" % (mu, sigma)
thinkplot.Plot(xs, ys, label=label)
thinkplot.Config(
title="Normal probability plot",
xlabel="standard normal sample",
ylabel="sample values",
)
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color="0.8")
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label="all live")
thinkplot.Config(
title="Normal probability plot",
xlabel="Standard deviations from mean",
ylabel="Birth weight (lbs)",
)
full_term = preg[preg.prglngth >= 37]
term_weights = full_term.totalwgt_lb.dropna()
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-4, 4]
fxs, fys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(fxs, fys, linewidth=4, color="0.8")
thinkplot.PrePlot(2)
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label="all live")
xs, ys = thinkstats2.NormalProbability(term_weights)
thinkplot.Plot(xs, ys, label="full term")
thinkplot.Config(
title="Normal probability plot",
xlabel="Standard deviations from mean",
ylabel="Birth weight (lbs)",
)
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/brfss.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/CDBRFS08.ASC.gz")
import brfss
df = brfss.ReadBrfss()
weights = df.wtkg2.dropna()
def MakeNormalModel(weights):
Plots a CDF with a Normal model.
weights: sequence
cdf = thinkstats2.Cdf(weights, label="weights")
mean, var = thinkstats2.TrimmedMeanVar(weights)
std = np.sqrt(var)
print("n, mean, std", len(weights), mean, std)
xmin = mean - 4 * std
xmax = mean + 4 * std
xs, ps = thinkstats2.RenderNormalCdf(mean, std, xmin, xmax)
thinkplot.Plot(xs, ps, label="model", linewidth=4, color="0.8")
thinkplot.Cdf(cdf)
MakeNormalModel(weights)
thinkplot.Config(
title="Adult weight, linear scale",
xlabel="Weight (kg)",
ylabel="CDF",
loc="upper right",
)
log_weights = np.log10(weights)
MakeNormalModel(log_weights)
thinkplot.Config(
title="Adult weight, log scale",
xlabel="Weight (log10 kg)",
ylabel="CDF",
loc="upper right",
)
def MakeNormalPlot(weights):
Generates a normal probability plot of birth weights.
weights: sequence
mean, var = thinkstats2.TrimmedMeanVar(weights, p=0.01)
std = np.sqrt(var)
xs = [-5, 5]
xs, ys = thinkstats2.FitLine(xs, mean, std)
thinkplot.Plot(xs, ys, color="0.8", label="model")
xs, ys = thinkstats2.NormalProbability(weights)
thinkplot.Plot(xs, ys, label="weights")
MakeNormalPlot(weights)
thinkplot.Config(
title="Adult weight, normal plot",
xlabel="Weight (kg)",
ylabel="CDF",
loc="upper left",
)
MakeNormalPlot(log_weights)
thinkplot.Config(
title="Adult weight, lognormal plot",
xlabel="Weight (log10 kg)",
ylabel="CDF",
loc="upper left",
)
xmin = 0.5
thinkplot.PrePlot(3)
for alpha in [2.0, 1.0, 0.5]:
xs, ps = thinkstats2.RenderParetoCdf(xmin, alpha, 0, 10.0, n=100)
thinkplot.Plot(xs, ps, label=r"$\alpha=%g$" % alpha)
thinkplot.Config(title="Pareto CDF", xlabel="x", ylabel="CDF", loc="lower right")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/populations.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/PEP_2012_PEPANNRES_with_ann.csv")
import populations
pops = populations.ReadData()
print("Number of cities/towns", len(pops))
log_pops = np.log10(pops)
cdf = thinkstats2.Cdf(pops, label="data")
cdf_log = thinkstats2.Cdf(log_pops, label="data")
# pareto plot
xs, ys = thinkstats2.RenderParetoCdf(xmin=5000, alpha=1.4, low=0, high=1e7)
thinkplot.Plot(np.log10(xs), 1 - ys, label="model", color="0.8")
thinkplot.Cdf(cdf_log, complement=True)
thinkplot.Config(
xlabel="log10 population", ylabel="CCDF", yscale="log", loc="lower left"
)
thinkplot.PrePlot(cols=2)
mu, sigma = log_pops.mean(), log_pops.std()
xs, ps = thinkstats2.RenderNormalCdf(mu, sigma, low=0, high=8)
thinkplot.Plot(xs, ps, label="model", color="0.8")
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel="log10 population", ylabel="CDF", loc="lower right")
thinkstats2.NormalProbabilityPlot(log_pops, label="data")
thinkplot.Config(xlabel="Random variate", ylabel="log10 population", xlim=[-5, 5])
import random
def expovariate(lam):
p = random.random()
x = -np.log(1 - p) / lam
return x
t = [expovariate(lam=2) for _ in range(1000)]
cdf = thinkstats2.Cdf(t)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel="Exponential variate", ylabel="CCDF", yscale="log")
# Solution
def sample_pareto(alpha, x_m, size):
u = np.random.random(size)
return x_m * pow(1 - u, -1 / alpha)
# Solution
sample = sample_pareto(1, 2, 1000)
cdf = thinkstats2.Cdf(sample)
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel="Random values", ylabel="CCDF", xscale="log", yscale="log")
import scipy.stats
mu = 178
sigma = 7.7
dist = scipy.stats.norm(loc=mu, scale=sigma)
type(dist)
dist.mean(), dist.std()
dist.cdf(mu - sigma)
# Solution
low = dist.cdf(177.8) # 5'10"
high = dist.cdf(185.4) # 6'1"
low, high, high - low
alpha = 1.7
xmin = 1 # meter
dist = scipy.stats.pareto(b=alpha, scale=xmin)
dist.median()
# Solution
dist.mean()
# Solution
dist.cdf(dist.mean())
# Solution
(1 - dist.cdf(1000)) * 7e9, dist.sf(1000) * 7e9
# Solution
# One way to solve this is to search for a height that we
# expect one person out of 7 billion to exceed.
# It comes in at roughly 600 kilometers.
dist.sf(600000) * 7e9
# Solution
# Another way is to use `ppf`, which evaluates the "percent point function", which
# is the inverse CDF. So we can compute the height in meters that corresponds to
# the probability (1 - 1/7e9).
dist.ppf(1 - 1 / 7e9)
sample = [random.weibullvariate(2, 1) for _ in range(1000)]
cdf = thinkstats2.Cdf(sample)
thinkplot.Cdf(cdf, transform="weibull")
thinkplot.Config(xlabel="Weibull variate", ylabel="CCDF")
import analytic
df = analytic.ReadBabyBoom()
diffs = df.minutes.diff()
cdf = thinkstats2.Cdf(diffs, label="actual")
n = len(diffs)
lam = 44.0 / 24 / 60
sample = [random.expovariate(lam) for _ in range(n)]
1 / lam, np.mean(sample)
# Solution
model = thinkstats2.Cdf(sample, label='model')
thinkplot.PrePlot(2)
thinkplot.Cdfs([cdf, model], complement=True)
thinkplot.Config(xlabel='Time between births (minutes)',
ylabel='CCDF',
yscale='log')
# Solution
# If you plot distributions for a large number of samples, you get a sense
# of how much random variation to expect. In this case, the data fall within
# the range we expect, so there is no compelling reason to think it is
# not exponential.
for i in range(100):
sample = [random.expovariate(lam) for _ in range(n)]
thinkplot.Cdf(thinkstats2.Cdf(sample), complement=True, color="0.9")
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(xlabel="Time between births (minutes)", ylabel="CCDF", yscale="log")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/hinc.py")
download("https://github.com/AllenDowney/ThinkStats2/raw/master/code/hinc06.csv")
import hinc
df = hinc.ReadData()
df
xs, ps = df.income.values, df.ps.values
cdf = thinkstats2.Cdf(xs, ps, label="data")
cdf_log = thinkstats2.Cdf(np.log10(xs), ps, label="data")
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel="household income", ylabel="CDF")
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5, low=0, high=250000)
thinkplot.Plot(xs, 1 - ys, label="model", color="0.8")
thinkplot.Cdf(cdf, complement=True)
thinkplot.Config(
xlabel="log10 household income",
ylabel="CCDF",
xscale="log",
yscale="log",
loc="lower left",
)
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label="model", color="0.8")
thinkplot.Cdf(cdf_log)
thinkplot.Config(xlabel="log10 household income", ylabel="CDF")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exponential distribution
Step2: Here's the distribution of interarrival times from a dataset of birth times.
Step3: Here's what the CCDF looks like on a log-y scale. A straight line is consistent with an exponential distribution.
Step4: Normal distribution
Step5: I'll use a normal model to fit the distribution of birth weights from the NSFG.
Step6: Here's the observed CDF and the model. The model fits the data well except in the left tail.
Step7: A normal probability plot is a visual test for normality. The following example shows that if the data are actually from a normal distribution, the plot is approximately straight.
Step8: Here's the normal probability plot for birth weights, showing that the lightest babies are lighter than we expect from the normal mode, and the heaviest babies are heavier.
Step9: If we suspect that the deviation in the left tail is due to preterm babies, we can check by selecting only full term births.
Step10: Now the deviation in the left tail is almost gone, but the heaviest babies are still heavy.
Step11: Lognormal model
Step13: The following function estimates the parameters of a normal distribution and plots the data and a normal model.
Step14: Here's the distribution of adult weights and a normal model, which is not a very good fit.
Step15: Here's the distribution of adult weight and a lognormal model, plotted on a log-x scale. The model is a better fit for the data, although the heaviest people are heavier than the model expects.
Step17: The following function generates a normal probability plot.
Step18: When we generate a normal probability plot with adult weights, we can see clearly that the data deviate from the model systematically.
Step19: If we make a normal probability plot with log weights, the model fit the data well except in the tails, where the heaviest people exceed expectations.
Step20: Pareto distribution
Step21: The distribution of populations for cities and towns is sometimes said to be Pareto-like.
Step22: Here's the distribution of population for cities and towns in the U.S., along with a Pareto model. The model fits the data well in the tail.
Step23: The lognormal model might be a better fit for this data (as is often the case for things that are supposed to be Pareto).
Step24: Here's a normal probability plot for the log-populations. The model fits the data well except in the right tail, where the biggest cities are bigger than expected.
Step25: Random variates
Step26: We can test it by generating a sample.
Step27: And plotting the CCDF on a log-y scale.
Step28: A straight line is consistent with an exponential distribution.
Step29: Exercise
Step30: For example <tt>scipy.stats.norm</tt> represents a normal distribution.
Step31: A "frozen random variable" can compute its mean and standard deviation.
Step32: It can also evaluate its CDF. How many people are below the mean by more than one standard deviation? About 16%
Step33: How many people are between 5'10" and 6'1"?
Step34: Exercise
Step35: What is the mean height in Pareto world?
Step36: What fraction of people are shorter than the mean?
Step37: Out of 7 billion people, how many do we expect to be taller than 1 km? You could use <tt>dist.cdf</tt> or <tt>dist.sf</tt>.
Step38: How tall do we expect the tallest person to be?
Step39: Exercise
Step40: Exercise
Step41: Bonus Example
Step42: Here's what the CDF looks like on a linear scale.
Step43: To check whether a Pareto model describes the data well, I plot the CCDF on a log-log scale.
Step44: For the lognormal model I estimate mu and sigma using percentile-based statistics (median and IQR).
Step45: Here's what the distribution, and fitted model, look like on a log-x scale.
|
3,564
|
<ASSISTANT_TASK:>
Python Code:
config = configparser.ConfigParser()
config.sections()
config.read('example.ini')
config.sections()
'bitbucket.org' in config
'bytebong.com' in config
config['bitbucket.org']['User']
config['DEFAULT']['Compression']
topsecret = config['topsecret.server.com']
topsecret['ForwardX11']
topsecret['Port']
for key in config['bitbucket.org']:
print(key)
config['bitbucket.org']['ForwardX11']
type(topsecret['Port'])
type(int(topsecret['Port']))
type(topsecret.getint('Port'))
type(topsecret.getfloat('Port'))
int(topsecret['Port']) - 22.0
int(topsecret['Port']) - 22
try:
topsecret.getint('ForwardX11')
except ValueError:
print(True)
topsecret.getboolean('ForwardX11')
config['bitbucket.org'].getboolean('ForwardX11')
config.getboolean('bitbucket.org', 'Compression')
topsecret.get('Port')
topsecret.get('CompressionLevel')
topsecret.get('Cipher')
topsecret.get('Cipher', '3des-cbc')
topsecret.get('CompressionLevel', '3')
'BatchMode' in topsecret
topsecret.getboolean('BatchMode', fallback=True)
config['DEFAULT']['BatchMode'] = 'no'
topsecret.getboolean('BatchMode', fallback=True)
import yaml
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
for section in cfg:
print(section)
print(cfg['mysql'])
print(cfg['other'])
# Load the configuration file
with open("config.yml") as f:
sample_config = f.read()
config = configparser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(sample_config))
# List all contents
print("List all contents")
for section in config.sections():
print("Section: %s" % section)
for options in config.options(section):
print("x %s:::%s:::%s" % (options,
config.get(section, options),
str(type(options))))
# Print some contents
print("\nPrint some contents")
print(config.get('other', 'use_anonymous')) # Just get the value
print(config.getboolean('other', 'use_anonymous')) # You know the datatype?
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Please note that default values have precedence over fallback values. For instance, in our example the 'CompressionLevel' key was specified only in the 'DEFAULT' section. If we try to get it from the section 'topsecret.server.com', we will always get the default, even if we specify a fallback
Step2: The same fallback argument can be used with the getint(), getfloat() and getboolean() methods, for example
|
3,565
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
url = 'https://raw.githubusercontent.com/henriquepgomide/caRtola/master/data/2019/2019-medias-jogadores.csv'
medias = pd.read_csv(url)
medias.head()
medias.shape
medias.columns
qtd_atletas = len(medias['player_id'].unique())
print(qtd_atletas)
posicoes = medias['player_position'].unique()
medias['Rank'] = None
for posicao in posicoes:
rank = medias[medias['player_position'] == posicao].player_id.rank(method='min')
rank = rank - 1
medias.iloc[rank.index,-1] = rank
colunas_unicos = ['Rank','player_id','player_position']
atletas = medias[colunas_unicos].drop_duplicates()
atletas.head()
atletas.shape
partidas = pd.read_csv(r'https://raw.githubusercontent.com/henriquepgomide/caRtola/master/data/2019/2019_partidas.csv')
partidas['home_score_norm'] = partidas['home_score'] / max(partidas['home_score'])
partidas['away_score_norm'] = partidas['away_score'] / max(partidas['away_score'])
partidas.head()
partidas.shape
df_partidas = pd.DataFrame()
for rodada in range(1,39):
df_rodada = pd.read_csv(f'https://raw.githubusercontent.com/henriquepgomide/caRtola/master/data/2019/rodada-{rodada}.csv')
df_rodada['round'] = rodada
df_partidas =df_partidas.append(df_rodada,sort=False)
df_partidas.shape
df_partidas = df_partidas[df_partidas['atletas.posicao_id'] != 'tec']
df_partidas = df_partidas.set_index('atletas.atleta_id').join(atletas.set_index('player_id'))
df_partidas.head()
df_partidas['Rank']
df_partidas.drop(df_partidas[df_partidas['Rank'].isnull()].index, inplace=True)
df_partidas['Rank'] = df_partidas['Rank'].astype(int)
import numpy as np
posicao = 'ata'
qtd_atletas = len(atletas[atletas.player_position == posicao])
M = np.zeros((qtd_atletas,qtd_atletas))
M
M.shape
df_partidas_posicao = df_partidas[df_partidas['atletas.posicao_id'] == posicao].copy()
for partida in range(len(partidas)-1): #Vamos deixar a última partida de fora para testes
df_rodada = df_partidas_posicao[df_partidas_posicao['round'] == partidas['round'][partida]]
jogadores_casa = df_rodada[df_rodada['atletas.clube_id'] == partidas['home_team'][partida]]
jogadores_visitantes = df_rodada[df_rodada['atletas.clube_id'] == partidas['away_team'][partida]]
for j_casa in range(len(jogadores_casa)):
for j_visitante in range(len(jogadores_visitantes)):
score_casa = 0
score_visitante = 0
pontos_j_casa = jogadores_casa['atletas.pontos_num'].iloc[j_casa]
pontos_j_visitante = jogadores_visitantes['atletas.pontos_num'].iloc[j_visitante]
soma = pontos_j_casa + pontos_j_visitante
if soma != 0:
score_casa = pontos_j_casa / soma
score_visitante = pontos_j_visitante / soma
j1 = jogadores_casa['Rank'].iloc[j_casa]
j2 = jogadores_visitantes['Rank'].iloc[j_visitante]
M[j1,j1] = M[j1,j1] + partidas['home_score_norm'][partida] + score_casa
M[j1,j2] = M[j1,j2] + partidas['away_score_norm'][partida] + score_visitante
M[j2,j1] = M[j2,j1] + partidas['home_score_norm'][partida] + score_casa
M[j2,j2] = M[j2,j2] + partidas['away_score_norm'][partida] + score_visitante
M
M = M / np.sum(M,axis=1)
evals, evecs = np.linalg.eig(M.T)
evec1 = evecs[:,np.isclose(evals, 1)]
evec1 = evec1[:,0]
stationary = evec1 / evec1.sum()
stationary = stationary.real
stationary
medias[medias.player_position == posicao][list(stationary > 0.015)]
stationaries = {}
for posicao in posicoes:
qtd_atletas = len(atletas[atletas.player_position == posicao])
M = np.zeros((qtd_atletas,qtd_atletas))
df_partidas_posicao = df_partidas[df_partidas['atletas.posicao_id'] == posicao].copy()
for partida in range(len(partidas)-1): #Vamos deixar a última partida de fora para testes
df_rodada = df_partidas_posicao[df_partidas_posicao['round'] == partidas['round'][partida]]
jogadores_casa = df_rodada[df_rodada['atletas.clube_id'] == partidas['home_team'][partida]]
jogadores_visitantes = df_rodada[df_rodada['atletas.clube_id'] == partidas['away_team'][partida]]
for j_casa in range(len(jogadores_casa)):
for j_visitante in range(len(jogadores_visitantes)):
score_casa = 0
score_visitante = 0
pontos_j_casa = jogadores_casa['atletas.pontos_num'].iloc[j_casa]
pontos_j_visitante = jogadores_visitantes['atletas.pontos_num'].iloc[j_visitante]
soma = pontos_j_casa + pontos_j_visitante
if soma != 0:
score_casa = pontos_j_casa / soma
score_visitante = pontos_j_visitante / soma
def_n_vazada_casa = 0 if partidas['away_score_norm'][partida] > 0 else 1
def_n_vazada_visitante = 0 if partidas['home_score_norm'][partida] > 0 else 1
if posicao == 'ata':
pontos_casa = partidas['home_score_norm'][partida] + score_casa
pontos_visitante = partidas['away_score_norm'][partida] + score_visitante
elif posicao == 'mei':
pontos_casa = partidas['home_score_norm'][partida] + def_n_vazada_casa + score_casa
pontos_visitante = partidas['away_score_norm'][partida] + def_n_vazada_visitante + score_visitante
else:
pontos_casa = def_n_vazada_casa + score_casa
pontos_visitante = def_n_vazada_visitante + score_visitante
j1 = jogadores_casa['Rank'].iloc[j_casa]
j2 = jogadores_visitantes['Rank'].iloc[j_visitante]
M[j1,j1] = M[j1,j1] + pontos_casa
M[j1,j2] = M[j1,j2] + pontos_visitante
M[j2,j1] = M[j2,j1] + pontos_casa
M[j2,j2] = M[j2,j2] + pontos_visitante
M = M / np.sum(M,axis=1)
evals, evecs = np.linalg.eig(M.T)
evec1 = evecs[:,np.isclose(evals, 1)]
evec1 = evec1[:,0]
stationary = evec1 / evec1.sum()
stationary = stationary.real
stationaries[posicao] = stationary
rodada = 38
df_rodada = df_partidas[df_partidas['round'] == rodada].copy()
df_rodada['Rank'] = df_rodada['Rank'].astype(int)
df_rodada['probs'] = 0
for jogador in range(len(df_rodada)):
posicao = df_rodada.iloc[jogador]['player_position']
rank = df_rodada.iloc[jogador]['Rank']
if rank:
df_rodada.iloc[jogador,-1] = stationaries[posicao][rank]
df_rodada = df_rodada[df_rodada['atletas.status_id'] == 'Provável'].copy()
df_rodada.head()
formacao = {
'ata': 3,
'mei': 3,
'lat': 2,
'zag': 2,
'gol':1
}
cartoletas = 140
df_rodada.set_index('atletas.slug',inplace=True)
z = df_rodada['probs'].to_dict()
c = df_rodada['atletas.preco_num'].to_dict()
dummies_posicao = pd.get_dummies(df_rodada['atletas.posicao_id'])
dummies_posicao = dummies_posicao.to_dict()
!pip install pulp
from pulp import LpMaximize, LpProblem, lpSum, LpVariable
prob = LpProblem("Melhor_Escalacao", LpMaximize)
y = LpVariable.dicts("Atl",df_rodada.index,0,1,cat='Binary')
prob += lpSum([z[i] * y[i] for i in y])
prob += lpSum([c[i] * y[i] for i in y]) <= cartoletas, "Limite de Cartoletas"
prob += lpSum([dummies_posicao['ata'][i] * y[i] for i in y]) == formacao['ata'], "Quantidade Atacantes"
prob += lpSum([dummies_posicao['lat'][i] * y[i] for i in y]) == formacao['lat'], "Quantidade Laterais"
prob += lpSum([dummies_posicao['mei'][i] * y[i] for i in y]) == formacao['mei'], "Quantidade Meio"
prob += lpSum([dummies_posicao['zag'][i] * y[i] for i in y]) == formacao['zag'], "Quantidade Zagueiros"
prob += lpSum([dummies_posicao['gol'][i] * y[i] for i in y]) == formacao['gol'], "Quantidade Goleiro"
prob.solve()
escalados = []
for v in prob.variables():
if v.varValue == 1:
atleta = v.name.replace('Atl_','').replace('_','-')
escalados.append(atleta)
print(atleta, "=", v.varValue)
colunas = ['atletas.posicao_id','atletas.clube.id.full.name','atletas.pontos_num','atletas.preco_num']
df_rodada.loc[escalados][colunas]
df_rodada.loc[escalados]['atletas.pontos_num'].sum()
df_rodada.loc[escalados]['atletas.preco_num'].sum()
jogar_em_casa = 5
times = {
'Internacional':3,
'Fortaleza':2
}
times_casa = partidas[partidas['round'] == rodada]['home_team']
df_rodada.loc[df_rodada['atletas.clube_id'].isin(times_casa),'probs'] = df_rodada.loc[
df_rodada['atletas.clube_id'].isin(times_casa),'probs'] * (jogar_em_casa / 10 + 1)
for time in times:
df_rodada.loc[df_rodada['atletas.clube.id.full.name'] == time,'probs'] = df_rodada.loc[
df_rodada['atletas.clube.id.full.name'] == time,'probs'] * (times[time] / 10 + 1)
z = df_rodada['probs'].to_dict()
prob = LpProblem("Melhor_Escalacao", LpMaximize)
y = LpVariable.dicts("Atl",df_rodada.index,0,1,cat='Binary')
prob += lpSum([z[i] * y[i] for i in y])
prob += lpSum([c[i] * y[i] for i in y]) <= cartoletas, "Limite de Cartoletas"
prob += lpSum([dummies_posicao['ata'][i] * y[i] for i in y]) == formacao['ata'], "Quantidade Atacantes"
prob += lpSum([dummies_posicao['lat'][i] * y[i] for i in y]) == formacao['lat'], "Quantidade Laterais"
prob += lpSum([dummies_posicao['mei'][i] * y[i] for i in y]) == formacao['mei'], "Quantidade Meio"
prob += lpSum([dummies_posicao['zag'][i] * y[i] for i in y]) == formacao['zag'], "Quantidade Zagueiros"
prob += lpSum([dummies_posicao['gol'][i] * y[i] for i in y]) == formacao['gol'], "Quantidade Goleiro"
prob.solve()
escalados = []
for v in prob.variables():
if v.varValue == 1:
atleta = v.name.replace('Atl_','').replace('_','-')
escalados.append(atleta)
print(atleta, "=", v.varValue)
colunas = ['atletas.posicao_id','atletas.clube.id.full.name','atletas.pontos_num','atletas.preco_num']
df_rodada.loc[escalados][colunas]
df_rodada.loc[escalados]['atletas.pontos_num'].sum()
df_rodada.loc[escalados]['atletas.preco_num'].sum()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quantidade única de jogadores é do mesmo tamanho do Dataframe.
Step2: Para o contexto desse estudo, vamos analisar cada posição utilizada no Cartola separadamente. Sendo assim criamos uma lista com todas as posições existentes no arquivo médias.
Step3: Para facilitar a localização de cada jogador nas matrizes que construirmos, vamos criar um índice baseado no rankeamento do "player_id". Como teremos matrizes para cada posição, criamos um ranking para cada posição.
Step4: Partidas
Step5: Uma das hipóteses testadas nesse estudo, é o impacto da quantidade de gols do time na performance do jogador. Para facilitar a utilização desses dados, vamos normalizar as colunas de quantidade de gols dos time de casa e visitante.
Step6: Dados das rodadas
Step7: Para o contexto desse estudo não vamos analisar a performance de técnicos.
Step8: Para colocar cada jogador em uma posição específica na matriz, vamos trazer a informação de ranking que criamos para o dataframe de partidas.
Step9: Removendo jogadores não cadastrados
Step10: Matriz M de estados
Step11: Atualizando a matriz
Step12: Depois de processar todas as partidas de todos os jogadores, vamos normalizar M para que todas as colunas somem 1.
Step13: Distribuição estacionária
Step14: Por final geramos uma array de tamanho d, lembrando que uma posição i aqui está relacionada a posição i no ranking de ids que criamos no começo do estudo.
Step15: Podemos verificar por exemplo quem teve probabilidade maior que 1.5%.
Step16: Calculando a distribuição para todas posições
Step17: Escalando para a rodada
Step18: Primeiro vamos criar um DataFrame somente com as informações da rodada e colocar as probabilidades que encontramos referente a cada jogador.
Step19: Vamos utilizar também do recurso de status e só trabalhar com jogadores em status Provável.
Step20: Otimizando a escalação
Step21: Programação Linear
Step22: Primeiro, iniciamos o problema de otimização e definimos uma função objetivo.
Step23: Agora adicionamos todas as restrições e calculamos.
Step24: Os jogadores escalados que maximizam as probabilidades dentro das restrições, ficam com o valor 1 para a variável de atletas.
Step25: Podemos verificar qual foi o total de pontos que essa escalação somaria na última rodada.
Step26: Também o custo total.
Step27: Incluindo Palpites
Step28: Agora, aumentamos as probabilidades dos jogadores que se enquadram nessa regra, multiplicando o seu valor atual, pela porcentagem de pontos que demos a ele, por exemplo
Step29: Programação Linear
Step30: Por fim geramos uma nova escalação, que levou em consideração os pesos que colocamos acima.
Step31: Ao avaliar a escalação abaixo, geramos dessa vez uma pontuação de 81 pontos, 60% a mais que o resultado anterior.
|
3,566
|
<ASSISTANT_TASK:>
Python Code:
print(" pi ~= 3.14 (two first digits).")
print(" pi ~= 22/7 = {} (two first digits).".format(22.0 / 7.0))
print(" pi ~= 355/113 = {} (six first digits).".format(355.0 / 113.0))
def mathpi():
from math import pi
return pi
print("First method: using math.pi gives pi ~= {:.17f} (17 digits are displayed here).".format(mathpi()))
from decimal import Decimal
bigpi = Decimal('3.1415926535897932384626433832795028841971693993751058209749445923078164062862089986280348253421170679')
print("The first 100 digits of pi are {}.".format(bigpi))
from random import uniform
def montecarlo_pi(nbPoints=10000):
Returns a probabilist estimate of pi, as a float number.
nbInside = 0
# we pick a certain number of points (nbPoints)
for i in range(nbPoints):
x = uniform(0, 1)
y = uniform(0, 1)
# (x, y) is now a random point in the square [0, 1] × [0, 1]
if (x**2 + y**2) < 1:
# This point (x, y) is inside the circle C(0, 1)
nbInside += 1
return 4 * float(nbInside) / floor(nbPoints)
print("The simple Monte-Carlo method with {} random points gave pi = {}".format(10000, montecarlo_pi(10000)))
import mpmath
# from sympy import mpmath # on older sympy versions
mp = mpmath.mp
mp.dps = 1000 # number of digits
my_pi = mp.pi # Gives pi to a thousand places
print("A lazy method using the mpmath module:\npi is approximatly {} (with {} digits).".format(my_pi, mp.dps))
mp.dps = 100000 # number of digits
len(str(mp.pi))
mpmath_pi = Decimal(str(mp.pi))
mp.dps = 140330
print(str(mp.pi)[2:][140317:140317+10])
%timeit mp.dps=140330;print(str(mp.pi)[2:][140317:140317+10])
%timeit mp.dps=1403230;print(str(mp.pi)[2:][1403217:1403217+10])
import math
def gauss_legendre_1(max_step):
Float number implementation of the Gauss-Legendre algorithm, for max_step steps.
a = 1.
b = 1./math.sqrt(2)
t = 1./4.0
p = 1.
for i in range(max_step):
at = (a + b) / 2.0
bt = math.sqrt(a*b)
tt = t - p*(a-at)**2
pt = 2.0 * p
a, b, t, p = at, bt, tt, pt
my_pi = ((a+b)**2)/(4.0*t)
return my_pi
my_pi = gauss_legendre_1(4)
my_pi
print("pi is approximately: {:.15f} (as a float number, precision is limited).".format(my_pi))
accuracy = 100*abs(math.pi - my_pi)/math.pi
print("Accuracy % with math.pi: {:.4g}".format(accuracy))
accuracy = 100*abs(float(mpmath_pi) - my_pi)/float(mpmath_pi)
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
my_pi = gauss_legendre_1(40)
my_pi
print("pi is approximately: {:.15f} (as a float number, precision is limited).".format(my_pi))
accuracy = 100*abs(math.pi - my_pi)/math.pi
print("Accuracy % with math.pi: {:.4g}".format(accuracy))
accuracy = 100*abs(float(mpmath_pi) - my_pi)/float(mpmath_pi)
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
from decimal import Decimal, getcontext
def gauss_legendre_2(max_step):
Decimal number implementation of the Gauss-Legendre algorithm, for max_step steps.
# trick to improve precision
getcontext().prec = 3 + 2**(max_step + 2)
cst_2 = Decimal(2.0)
cst_4 = Decimal(4.0)
a = Decimal(1.0)
b = Decimal(0.5).sqrt()
t = Decimal(0.25)
p = Decimal(1.0)
for i in range(max_step):
new_a = (a+b)/cst_2
new_b = (a*b).sqrt()
new_t = Decimal(t - p*(a - new_a)**2)
new_p = cst_2*p
a, b, t, p = new_a, new_b, new_t, new_p
my_pi = Decimal(((a+b)**2)/(cst_4*t))
return my_pi
my_pi = gauss_legendre_2(5)
print("pi is approximately: {}.".format(my_pi.to_eng_string()[:2**(5+1)]))
accuracy = 100*abs(Decimal(math.pi) - my_pi)/Decimal(math.pi)
print("Accuracy % with math.pi: {:.4g}".format(accuracy))
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
%timeit gauss_legendre_1(8)
%timeit gauss_legendre_2(8)
def leibniz(max_step):
Computing an approximation of pi with Leibniz series.
my_pi = Decimal(0)
for k in range(max_step):
my_pi += Decimal((-1)**k) / Decimal(2*k+1)
return Decimal(4) * my_pi
getcontext().prec = 20 # trick to improve precision
my_pi = leibniz(1000)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 20 # trick to improve precision
my_pi = leibniz(10000)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
def bbp(max_step):
Computing an approximation of pi with Bailey-Borwein-Plouffe series.
my_pi = Decimal(0)
for k in range(max_step):
my_pi += (Decimal(1)/(16**k))*((Decimal(4)/(8*k+1))-(Decimal(2)/(8*k+4))-(Decimal(1)/(8*k+5))-(Decimal(1)/(8*k+6)))
return my_pi
getcontext().prec = 20 # trick to improve precision
my_pi = bbp(10)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 200 # trick to improve precision
my_pi = bbp(200)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 500 # trick to improve precision
my_pi = bbp(500)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 10 + 1000 # trick to improve precision
%timeit bbp(1000)
getcontext().prec = 10 + 2000 # trick to improve precision
%timeit bbp(2000)
def bellard(max_step):
Computing an approximation of pi with Bellard series.
my_pi = Decimal(0)
for k in range(max_step):
my_pi += (Decimal(-1)**k/(1024**k))*( Decimal(256)/(10*k+1) + Decimal(1)/(10*k+9) - Decimal(64)/(10*k+3) - Decimal(32)/(4*k+1) - Decimal(4)/(10*k+5) - Decimal(4)/(10*k+7) -Decimal(1)/(4*k+3))
return my_pi * Decimal(1.0/(2**6))
getcontext().prec = 40 # trick to improve precision
my_pi = bellard(10)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 800 # trick to improve precision
my_pi = bellard(200)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 10 + 1000 # trick to improve precision
%timeit bellard(1000)
getcontext().prec = 10 + 2000 # trick to improve precision
%timeit bellard(2000)
from math import factorial
def ramanujan(max_step):
Computing an approximation of pi with a Ramanujan's formula.
my_pi = Decimal(0)
d_1103 = Decimal(1103)
d_26390 = Decimal(26390)
d_396 = Decimal(396)
for k in range(max_step):
my_pi += ((Decimal(factorial(4 * k))) * (d_1103 + d_26390 * Decimal(k))) / ( (Decimal(factorial(k)))**4 * (d_396**(4*k)))
my_pi = my_pi * 2 * Decimal(2).sqrt() / Decimal(9801)
my_pi = my_pi**(-1)
return my_pi
getcontext().prec = 40 # trick to improve precision
my_pi = ramanujan(4)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 400 # trick to improve precision
my_pi = ramanujan(40)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 2000 # trick to improve precision
my_pi = ramanujan(200)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 10 + 2000 # trick to improve precision
%timeit ramanujan(200)
getcontext().prec = 10 + 5000 # trick to improve precision
%timeit ramanujan(400)
%%time
getcontext().prec = 140350 # trick to improve precision
i = 140317
my_pi = ramanujan(10000)
print(str(my_pi)[2:][i:i+10])
mp.dps=140330
print(str(mp.pi)[2:][i:i+10])
from math import factorial
def chudnovsky(max_step):
Computing an approximation of pi with Bellard series.
my_pi = Decimal(0)
for k in range(max_step):
my_pi += (Decimal(-1)**k)*(Decimal(factorial(6*k))/((factorial(k)**3)*(factorial(3*k)))* (13591409+545140134*k)/(640320**(3*k)))
my_pi = my_pi * Decimal(10005).sqrt()/4270934400
my_pi = my_pi**(-1)
return my_pi
getcontext().prec = 3000 # trick to improve precision
my_pi = chudnovsky(200)
my_pi
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 6000 # trick to improve precision
my_pi = chudnovsky(400)
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 3000 # trick to improve precision
%timeit chudnovsky(200)
getcontext().prec = 6000 # trick to improve precision
%timeit chudnovsky(400)
def arccot(x, unity):
Compute arccot(x) with a certain level of precision.
x = Decimal(x)
unity = Decimal(unity)
mysum = xpower = unity / x
n = 3
sign = -1
while True:
xpower = xpower / (x*x)
term = xpower / n
if not term:
break
mysum += sign * term
sign = -sign # we alternate the sign
n += 2
return mysum
def machin(digits):
Compute pi with Machin's formula, with precision at least digits.
unity = 10**(digits + 10)
my_pi = Decimal(4) * (Decimal(4)*arccot(5, unity) - arccot(239, unity))
return my_pi / Decimal(unity)
getcontext().prec = 10000 # trick to improve precision
my_pi = machin(100)
accuracy = 100*abs(mpmath_pi - my_pi)/mpmath_pi
print("Accuracy % with mpmath_pi: {:.4g}".format(accuracy))
getcontext().prec = 5000 # trick to improve precision
%timeit machin(50)
getcontext().prec = 10000 # trick to improve precision
%timeit machin(100)
%%time
i = 14031
getcontext().prec = i + 20 # trick to improve precision
mp.dps = i + 20
print(str(mp.pi)[2:][i:i+10])
my_pi = machin(11)
print(str(my_pi)[2:][i:i+10])
%%time
i = 140317
getcontext().prec = i + 20 # trick to improve precision
mp.dps = i + 20
print(str(mp.pi)[2:][i:i+10])
my_pi = machin(50)
print(str(my_pi)[2:][i:i+10])
def next_pi_digit(max_step):
q, r, t, k, m, x = 1, 0, 1, 1, 3, 3
for j in range(max_step):
if 4 * q + r - t < m * t:
yield m
# More details on Python generators can be found here http://stackoverflow.com/a/231855
q, r, t, k, m, x = 10*q, 10*(r-m*t), t, k, (10*(3*q+r))//t - 10*m, x
else:
q, r, t, k, m, x = q*k, (2*q+r)*x, t*x, k+1, (q*(7*k+2)+r*x)//(t*x), x+2
def generator_pi(max_step):
big_str = ''.join(str(d) for d in next_pi_digit(max_step))
return Decimal(big_str[0] + '.' + big_str[1:])
getcontext().prec = 50 # trick to improve precision
generator_pi(1000)
getcontext().prec = 5000 # trick to improve precision
generator_pi(1000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This method is extremely limited, and will not give you more than 13 correct digits, as math.pi is stored as a float number (limited precision).
Step2: If we know the digits, we can directly print them
Step4: A simple Monte-Carlo method
Step5: It is an interesting method, but it is just too limited for approximating digits of $\pi$.
Step6: We can arbitrarily set the precision, with the constant mp.dps (digit numbers).
Step7: Let save it for further comparison of simpler methods.
Step8: We can solve the initial challenge easily
Step9: And it will most probably be the quickest method presented here
Step10: Asking for $10$ times more digits take about $100$ more of time (that's a bad news).
Step12: The Gauss–Legendre iterative algorithm
Step14: This first implementation of the Gauss-Legendre algorithm is limited to a precision of 13 or 14 digits. But it converges quickly ! (4 steps here).
Step15: The second implementation of the Gauss-Legendre algorithm is now working better (when we adapt the precision). And it converges quickly ! (8 steps give a precision upto the 697th digits).
Step17: Methods based on a convergent series
Step19: This first formula is very inefficient!
Step20: That's pretty impressive, in only $10$ steps!
Step21: It is, of course, slower than the optimized algorithm from mpmath.
Step23: Bellard's formula (hard)
Step24: That's pretty impressive, in only $10$ steps!
Step25: It is, of course, slower than the optimized algorithm from mpmath.
Step27: It is also slower than BBP formula.
Step28: $1595$ correct digits with $200$ terms, that's quite good!!
Step29: Let's try to answer my initial question, using this naive implementation.
Step31: ... It was too slow!
Step32: It is very efficient, as Ramanujan's formula.
Step33: It gets $2834$ correct numbers in $200$ steps!
Step35: About $2$ seconds to find correctly the first $5671$ digits? That's slow! But hey, it's Python (dynamic typing etc).
Step37: Applying Machin's formula
Step38: So we got the first $9995$ digits correctly... in $45$ seconds.
Step39: The program can be used to compute tens of thousands of digits in just a few seconds on a modern computer.
Step40: It was too slow too! But at least it worked!
Step41: It does not use Decimal numbers.
|
3,567
|
<ASSISTANT_TASK:>
Python Code:
# You want to be able to rotate scatterplots in 3D, so don't show them inline
%matplotlib tk
# 'pip install bunch' if you do not have 'bunch' package
import bunch
# Our utility code resides in module dim_reduce.py, which we import here:
import dim_reduce
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
data = load_iris() # a JavaScript-like object ('Bunch'), holding the training data for the built-in 'iris' dataset
y = data.target # labels
X = data.data # features (4 features for each sample)
pca = PCA(n_components=3) # reduce feture set to 3 dimensions
reduced_X = pca.fit_transform(X) # reduced 3D feature set
visData = bunch.Bunch()
visData.target = data.target
visData.data = reduced_X
dim_reduce.vis3D(visData, title="3D PCA", dotsize=30)
pca = PCA(n_components=2)
visData.data = pca.fit_transform(X)
dim_reduce.vis2D(visData, title="2D PCA", dotsize=30)
from sklearn.manifold import TSNE
tsne = TSNE(n_components=3, random_state=0, learning_rate = 100)
visData.data = tsne.fit_transform(X)
dim_reduce.vis3D(visData, title="3D t-SNE", dotsize=30)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let us apply a PCA dimensionality reduction method to the "iris" dataset (which is 4D).
Step2: A 3D scatterplot should display in a separate window. Rotating it by clicking and dragging you can observe that 2D is enough to separate the classes in this case! Indeed, let's transform directly to 2D
Step3: Similarly for t-SNE
|
3,568
|
<ASSISTANT_TASK:>
Python Code:
from auxi.tools.chemistry import thermochemistry as thermo
#TODO: The following line of code is not working, and must be fixed.
#thermo.convert_fact_file_to_auxi_thermo_file("path/to/factsage_file", "path/to/new_auxi_thermo_file")
thermo.list_compounds()
thermo.load_data_auxi('data')
thermo.list_compounds()
thermo.load_data_auxi()
Cp_H2O = thermo.Cp("H2O[L]", 70.0)
print("The Cp of 1 kg of water at 70 °C is", Cp_H2O, "kWh/K.")
Cp_H2O = thermo.Cp("H2O[G]", 70.0)
print("The Cp of 1 kg of water vapour at 70 °C is", Cp_H2O, "kWh/K.")
m_ZrO2 = 2.34
Cp_ZrO2 = thermo.Cp("ZrO2[S1]", 893.5, m_ZrO2)
print("The Cp of 2.34 kg of ZrO2[S1] at 893.5 °C is", Cp_ZrO2, "kWh/K.")
H_H2O = thermo.H("H2O[L]", 70.0)
print("The enthalpy of 1 kg of water at 70 °C is", H_H2O, "kWh.")
H_H2O = thermo.H("H2O[G]", 70.0)
print("The enthalpy of 1 kg of water vapour at 70 °C is", H_H2O, "kWh.")
m_ZrO2 = 2.34
H_ZrO2 = thermo.H("ZrO2[S1]", 893.5, m_ZrO2)
print("The enthalpy of 2.34 kg of ZrO2[S1] at 893.5 °C is", H_ZrO2, "kWh.")
S_H2O = thermo.S("H2O[L]", 70.0)
print("The entropy of 1 kg of water at 70 °C is", S_H2O, "kWh/K.")
S_H2O = thermo.S("H2O[G]", 70.0)
print("The entropy of 1 kg of water vapour at 70 °C is", S_H2O, "kWh/K.")
m_ZrO2 = 2.34
S_ZrO2 = thermo.S("ZrO2[S1]", 893.5, m_ZrO2)
print("The entropy of 2.34 kg of ZrO2[S1] at 893.5 °C is", S_ZrO2, "kWh/K.")
G_H2O = thermo.G("H2O[L]", 70.0)
print("The Gibbs free energy of 1 kg of water at 70 °C is", G_H2O,
"kWh.")
G_H2O = thermo.G("H2O[G]", 70.0)
print("The Gibbs free energy of 1 kg of water vapour at 70 °C is", G_H2O,
"kWh.")
m_ZrO2 = 2.34
G_ZrO2 = thermo.G("ZrO2[S1]", 893.5, m_ZrO2)
print("The Gibbs free energy of 2.34 kg of ZrO2[S1] at 893.5 °C is", G_ZrO2,
"kWh.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading Thermochemical Data
Step2: The result lists all the compounds with the phases for which data are available. Taking the compound SiO2 as an example, data are available for eight solid phases (S1 to S8), for the liquid phase and for the gas phase.
Step3: Calculating Heat Capacity
Step4: The first parameter to the function must specify both the compound's formula and phase. If the phase is not specified it is impossible to calculate a result. The heat capacity of water is clearly significantly different from that of water vapour.
Step5: The parameters to the auxi.tools.chemistry.thermochemistry.H function works the same as that of the auxi.tools.chemistry.thermochemistry.Cp function. Both formula and phase are required in the first parameter, the second is temperature in °C and the third is mass, which is optional with a default value of 1 kg.
Step6: The parameters to the
|
3,569
|
<ASSISTANT_TASK:>
Python Code:
!pip install -q -U apache-beam[gcp]
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
from datetime import datetime
import apache_beam as beam
from apache_beam.io.gcp.datastore.v1new.datastoreio import WriteToDatastore
PROJECT_ID = 'yourProject' # Change to your project.
BUCKET = 'yourBucketName' # Change to the bucket you created.
BQ_REGION = 'yourBigQueryRegion' # Change to your BigQuery region.
DF_REGION = 'yourDataflowRegion' # Change to your Dataflow region.
BQ_DATASET_NAME = 'recommendations'
BQ_TABLE_NAME = 'playlist'
DS_KIND = 'song'
!gcloud config set project $PROJECT_ID
try:
from google.colab import auth
auth.authenticate_user()
print("Colab user is authenticated.")
except: pass
!bq mk --dataset \
--location={BQ_REGION} \
--project_id={PROJECT_ID} \
--headless=True \
{PROJECT_ID}:{BQ_DATASET_NAME}
def run_copy_bq_data_pipeline(args):
schema = 'list_Id:INT64, track_Id:INT64, track_title:STRING, track_artist:STRING'
query = '''
SELECT
id list_Id,
tracks_data_id track_Id,
tracks_data_title track_title,
tracks_data_artist_name track_artist
FROM `bigquery-samples.playlists.playlist`
WHERE tracks_data_title IS NOT NULL AND tracks_data_id > 0
GROUP BY list_Id, track_Id, track_title, track_artist;
'''
pipeline_options = beam.options.pipeline_options.PipelineOptions(**args)
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| 'ReadFromBigQuery' >> beam.io.Read(beam.io.BigQuerySource(
project=PROJECT_ID, query=query, use_standard_sql=True))
| 'WriteToBigQuery' >> beam.io.WriteToBigQuery(
table=BQ_TABLE_NAME, dataset=BQ_DATASET_NAME, project=PROJECT_ID,
schema=schema,
create_disposition='CREATE_IF_NEEDED',
write_disposition='WRITE_TRUNCATE'
)
)
DATASET = 'playlist'
RUNNER = 'DataflowRunner'
job_name = f'copy-bigquery-{datetime.utcnow().strftime("%y%m%d%H%M%S")}'
args = {
'job_name': job_name,
'runner': RUNNER,
'project': PROJECT_ID,
'temp_location': f'gs://{BUCKET}/dataflow_tmp',
'region': DF_REGION
}
print("Pipeline args are set.")
print("Running pipeline...")
%time run_copy_bq_data_pipeline(args)
print("Pipeline is done.")
%%bigquery --project $PROJECT_ID
CREATE OR REPLACE VIEW `recommendations.vw_item_groups`
AS
SELECT
list_Id AS group_Id,
track_Id AS item_Id
FROM
`recommendations.playlist`
def create_entity(song_info, kind):
from apache_beam.io.gcp.datastore.v1new.types import Entity
from apache_beam.io.gcp.datastore.v1new.types import Key
track_Id = song_info.pop("track_Id")
key = Key([kind, track_Id])
song_entity = Entity(key)
song_entity.set_properties(song_info)
return song_entity
def run_export_to_datatore_pipeline(args):
query = f'''
SELECT
track_Id,
MAX(track_title) track_title,
MAX(track_artist) artist
FROM
`{BQ_DATASET_NAME}.{BQ_TABLE_NAME}`
GROUP BY track_Id
'''
pipeline_options = beam.options.pipeline_options.PipelineOptions(**args)
with beam.Pipeline(options=pipeline_options) as pipeline:
_ = (
pipeline
| 'ReadFromBigQuery' >> beam.io.Read(beam.io.BigQuerySource(
project=PROJECT_ID, query=query, use_standard_sql=True))
| 'ConvertToDatastoreEntity' >> beam.Map(create_entity, DS_KIND)
| 'WriteToDatastore' >> WriteToDatastore(project=PROJECT_ID)
)
import os
from datetime import datetime
DATASET = 'playlist'
RUNNER = 'DataflowRunner'
job_name = f'load-datastore-{datetime.utcnow().strftime("%y%m%d%H%M%S")}'
args = {
'job_name': job_name,
'runner': RUNNER,
'project': PROJECT_ID,
'temp_location': f'gs://{BUCKET}/dataflow_tmp',
'region': DF_REGION
}
print("Pipeline args are set.")
print("Running pipeline...")
%time run_export_to_datatore_pipeline(args)
print("Pipeline is done.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import libraries
Step2: Configure GCP environment settings
Step3: Authenticate your GCP account
Step4: Copy the public playlist data into your BigQuery dataset
Step5: Define the Dataflow pipeline
Step6: Run the Dataflow pipeline
Step7: Create the vw_item_groups view
Step8: Export song information to Datastore
Step9: Run the Dataflow pipeline
|
3,570
|
<ASSISTANT_TASK:>
Python Code:
# TFlearn libraries
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist
# General purpose libraries
import matplotlib.pyplot as plt
import numpy as np
import math
# Extract data from mnist.load_data()
x, y, x_test, y_test = mnist.load_data(one_hot = True)
# View number of samples in data sets
print 'x has shape {}'.format(x.shape)
print 'y has shape {}'.format(y.shape)
print 'x_test has shape {}'.format(x_test.shape)
print 'y_test has shape {}'.format(y_test.shape)
# Get first image of training set
x_first = x[0]
print 'x_first has shape {}'.format(x_first.shape)
# Reshape the array into 28 x 28 array (2-dimensional array)
x_first_reshaped = x_first.reshape((28, 28))
print 'x_first_reshaped has the shape {}'.format(x_first_reshaped.shape)
# Plot
plt.imshow(x_first_reshaped, cmap='gray')
plt.show()
# Reshape x
x_reshaped = x.reshape([-1, 28, 28, 1])
print 'x_reshaped has the shape {}'.format(x_reshaped.shape)
# Get first reshaped image of x
x_reshaped_first = x_reshaped[0]
# View x_reshaped_first
x_reshaped_first
# Reshape x_test
x_test_reshaped = x_test.reshape([-1, 28, 28, 1])
print 'x_test_reshaped has the shape {}'.format(x_test_reshaped.shape)
print 'x_reshaped has the shape {}'.format(x_reshaped.shape)
print 'x_test_reshaped has the shape {}'.format(x_test_reshaped.shape)
print 'y has the shape {}'.format(y.shape)
print 'y_test has the shape {}'.format(y_test.shape)
# sentdex's code to build the neural net using tflearn
# Input layer --> conv layer w/ max pooling --> conv layer w/ max pooling --> fully connected layer --> output layer
convnet = input_data(shape = [None, 28, 28, 1], name = 'input')
convnet = conv_2d(convnet, 32, 2, activation = 'relu')
convnet = max_pool_2d(convnet, 2)
convnet = conv_2d(convnet, 64, 2, activation = 'relu')
convnet = max_pool_2d(convnet, 2)
convnet = fully_connected(convnet, 1024, activation = 'relu')
# convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 10, activation = 'softmax')
convnet = regression(convnet, optimizer = 'sgd', learning_rate = 0.01, loss = 'categorical_crossentropy', name = 'targets')
model = tflearn.DNN(convnet)
model.fit(
{'input': x_reshaped},
{'targets': y},
n_epoch = 1,
validation_set = ({'input': x_test_reshaped}, {'targets': y_test}),
snapshot_step = 500,
show_metric = True
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Gathering Data
Step2: It looks like each sample (55k samples in the training set and 10k samples in the test set). Let's just try to output 1 image if we can.
Step3: Okay, so it's a 7. Here, we reshaped the data from being a single array of 784 elements to 28 arrays of 28 elements each.
Step4: Here, I'm a bit confused as to exactly what the 4 elements in reshape are. Through a bit of research, I've learned that the -1 input means "unknown / undisclosed parameter" which will be figured out by the rest of the parameters.
Step5: I think I'm right...
Step6: Build Model
Step7: That's all sentdex does to build his 2D CNN. I've obviously have yet to play around with any of the parameters yet, but let's just try to understand his code for a second.
|
3,571
|
<ASSISTANT_TASK:>
Python Code:
%%file sq.py
def square(n):
return n*n
import firefly
remote_sq = firefly.Client("http://127.0.0.1:8000")
remote_sq.square(n=4)
%%file add.py
# your code here
%%file credit_grade.py
Program to find the credit grade of a person.
import zlib
import random
def find_credit_grade(email):
Returns the credit grade of the person identified by the given email address.
The credit grade can be either A, B, C, D, E, F or G.
# since we need to give the same grade everytime the function is called
# with the same email. Using the checksum of the string as random seed
# to get the same result everytime when used with the same email.
seed = zlib.adler32(email.encode("utf-8"))
r = random.Random(seed)
return r.choice(["A", "B", "C", "D", "E", "F", "G"])
credit_grade_api = firefly.Client("http://127.0.0.1:8000/")
credit_grade_api.find_credit_grade(email="alice@example.com")
%%file credit_risk_service.py
Service to expose the credit risk model as an API.
from sklearn.externals import joblib
# read the encoders and the model
grade_encoder = joblib.load("../notebooks/le_grade.pkl")
ownership_encoder = joblib.load("../notebooks/le_ownership.pkl")
model = joblib.load("../notebooks/model.pkl")
def predict(amount, years, age, ownership, income, grade):
Returns the probablity of default for given features.
# encoders work on a vector. Wrapping in a list as we only have a single value
ownership_code = ownership_encoder.transform([ownership])[0]
grade_code = grade_encoder.transform([grade])[0]
# important to pass the features in the same order as we built the model
features = [amount, grade_code, years, ownership_code, income, age]
# probablity for not-defaulting and defaulting
# Again, wrapping in a list as a list of features is expected
p0, p1 = model.predict_proba([features])[0]
return p1
import firefly
credit_risk_api = firefly.Client("http://127.0.0.1:9000")
credit_risk_api.predict(amount=10000,
years=2,
age=35,
ownership='RENT',
income=12345,
grade='A')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let us run it as a service using firefly by running the following command in your terminal.
Step2: The function will be available with the same name in the client. Please note that the client functions takes parameters only by name.
Step5: Credit Grade Service
Step6: Deploy it as a servive using Firefly.
Step9: Deploying the ML model
Step10: Run it as a service using firefly, again from your terminal. Let us use port 9000 now as port 8000 is used by the credit grade service.
|
3,572
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Use seaborn for pairplot
!pip install -q seaborn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# Make numpy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
print(tf.__version__)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
dataset.isna().sum()
dataset = dataset.dropna()
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
train_dataset.describe().transpose()[['mean', 'std']]
normalizer = preprocessing.Normalization(axis=-1)
normalizer.adapt(np.array(train_features))
print(normalizer.mean.numpy())
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
horsepower = np.array(train_features['Horsepower'])
horsepower_normalizer = preprocessing.Normalization(input_shape=[1,], axis=None)
horsepower_normalizer.adapt(horsepower)
horsepower_model = tf.keras.Sequential([
horsepower_normalizer,
layers.Dense(units=1)
])
horsepower_model.summary()
horsepower_model.predict(horsepower[:10])
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
%%time
history = horsepower_model.fit(
train_features['Horsepower'], train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
plot_loss(history)
test_results = {}
test_results['horsepower_model'] = horsepower_model.evaluate(
test_features['Horsepower'],
test_labels, verbose=0)
x = tf.linspace(0.0, 250, 251)
y = horsepower_model.predict(x)
def plot_horsepower(x, y):
plt.scatter(train_features['Horsepower'], train_labels, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('Horsepower')
plt.ylabel('MPG')
plt.legend()
plot_horsepower(x,y)
linear_model = tf.keras.Sequential([
normalizer,
layers.Dense(units=1)
])
linear_model.predict(train_features[:10])
linear_model.layers[1].kernel
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
%%time
history = linear_model.fit(
train_features, train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
plot_loss(history)
test_results['linear_model'] = linear_model.evaluate(
test_features, test_labels, verbose=0)
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
model = build_model()
model.summary()
%%time
history = dnn_horsepower_model.fit(
train_features['Horsepower'], train_labels,
validation_split=0.2,
verbose=0, epochs=100)
plot_loss(history)
x = tf.linspace(0.0, 250, 251)
y = dnn_horsepower_model.predict(x)
plot_horsepower(x, y)
test_results['dnn_horsepower_model'] = dnn_horsepower_model.evaluate(
test_features['Horsepower'], test_labels,
verbose=0)
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
%%time
history = dnn_model.fit(
train_features, train_labels,
validation_split=0.2,
verbose=0, epochs=100)
plot_loss(history)
test_results['dnn_model'] = dnn_model.evaluate(test_features, test_labels, verbose=0)
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
test_predictions = dnn_model.predict(test_features).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
lims = [0, 50]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel('Prediction Error [MPG]')
_ = plt.ylabel('Count')
dnn_model.save('dnn_model')
reloaded = tf.keras.models.load_model('dnn_model')
test_results['reloaded'] = reloaded.evaluate(
test_features, test_labels, verbose=0)
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic regression
Step2: Auto MPG 数据集
Step3: 数据清洗
Step4: 为了保证这个初始示例的简单性,删除这些行。
Step5: "Origin" 列实际上是分类的,而不是数字。因此,使用 pd.get_dummies 将其转换为独热码:
Step6: 拆分训练数据集和测试数据集
Step7: 数据检查
Step8: 也可以查看总体的数据统计
Step9: 从标签中分离特征
Step10: 归一化
Step11: 使用不同的尺度和范围对特征归一化是好的实践。尽管模型可能 在没有特征归一化的情况下收敛,它会使得模型训练更加复杂,并会造成生成的模型依赖输入所使用的单位选择。
Step12: 然后对其执行 .adapt() 以适应数据:
Step13: 这将计算均值和方差,并将它们存储在层中。
Step14: 当层被调用时,它会返回输入数据,每个特征独立归一化:
Step15: 线性回归
Step16: 构建序贯模型:
Step17: 此模型将根据 Horsepower 预测 MPG。
Step18: 构建模型后,使用 Model.compile() 方法配置训练过程。要编译的最重要参数是 loss 和 optimizer,因为它们定义了将要优化的内容 (mean_absolute_error) 以及优化的方法(使用 optimizers.Adam)。
Step19: 训练配置完成后,使用 Model.fit() 执行训练:
Step20: 使用 history 对象中存储的统计信息可视化模型的训练进度。
Step21: 收集测试集上的结果,以便稍后使用:
Step22: 由于这是一个单变量回归,因此很容易将模型的预测视为输入的函数:
Step23: 多个输入
Step24: 当您对一批输入调用此模型时,它会为每个样本生成 units=1 输出。
Step25: 当您调用模型时,将构建其权重矩阵。现在,您可以看到 kernel($y=mx+b$ 中的 $m$)的形状为 (9,1)。
Step26: 使用与单输入 horsepower 模型相同的 compile 和 fit 调用:
Step27: 使用所有输入可以实现比 horsepower 模型低得多的训练和验证误差:
Step28: 收集测试集上的结果,以便稍后使用:
Step29: 模型
Step30: 一个变量
Step31: 使用 .summary 方法来打印该模型的简单描述。
Step32: 对模型进行1000个周期的训练,并在 history 对象中记录训练和验证的准确性。
Step33: 此模型略优于线性马力模型。
Step34: 如果您将预测值绘制为 Horsepower 的函数,则将看到此模型如何利用隐藏层提供的非线性:
Step35: 收集测试集上的结果,以便稍后使用:
Step36: 完整模型
Step37: 收集测试集上的结果:
Step38: 性能
Step39: 这些结果与训练期间看到的验证误差相匹配。
Step40: 看起来模型预测得相当出色。
Step41: 如果您对模型感到满意,请将其保存以备以后使用:
Step42: 如果您重新加载模型,它会给出相同的输出:
|
3,573
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_classification
# Generate features matrix and target vector
X, y = make_classification(n_samples = 10000,
n_features = 3,
n_informative = 3,
n_redundant = 0,
n_classes = 2,
random_state = 1)
# Create logistic regression
logit = LogisticRegression()
# Cross-validate model using precision
cross_val_score(logit, X, y, scoring="f1")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate Features And Target Data
Step2: Create Logistic Regression
Step3: Cross-Validate Model Using F1
|
3,574
|
<ASSISTANT_TASK:>
Python Code:
from ggplot import *
import pandas as pd
from sklearn import datasets
# import iris data
iris = datasets.load_iris()
df1 = pd.DataFrame(iris.data, columns = iris.feature_names)
df2 = pd.DataFrame(iris.target_names[iris.target])
df = pd.concat([df1, df2], axis = 1)
df.head()
df.columns = ['sl', 'sw', 'pl', 'pw', 'species']
p1 = ggplot(aes(x = 'sl', y = 'sw', color = 'species'), data = df) + geom_point()
p1
p2 = ggplot(aes(x = 'sl', y = 'sw', group = 'species', color = 'species'), data = df) + \
geom_point() + geom_smooth(alpha = 0.5) + theme_bw()
p2
p3 = ggplot(aes(x = 'sl', y = 'sw', color = 'species'), data = df[df.species != 'setosa']) + \
geom_point() + theme_538()
p3
p3 = ggplot(aes(x = 'sl'), data = df) + geom_histogram() + facet_wrap('species', ncol = 1)
p3
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: geom_point (scatter plot)
Step3: The plot shows that setosa class can be linearly separated from other two classes.
Step4: geom_points with subsetting
Step5: geom_histogram with facetting
|
3,575
|
<ASSISTANT_TASK:>
Python Code:
import opsimsummary as oss
from opsimsummary import Tiling, HealpixTiles
# import snsims
import healpy as hp
%matplotlib inline
import matplotlib.pyplot as plt
class NoTile(Tiling):
pass
noTile = NoTile()
class MyTile(Tiling):
def __init__(self):
pass
@property
def tileIDSequence(self):
return np.arange(100)
def tileIDsForSN(self, ra, dec):
x = ra + dec
y = np.remainder(x, 100.)
return np.floor(y)
def area(self, tileID):
return 1.
def pointingSequenceForTile(self, tileID, pointings):
return None
def positions(self):
pass
myTile = MyTile()
issubclass(HealpixTiles, Tiling)
help(HealpixTiles)
datadir = os.path.join(oss.__path__[0], 'example_data')
opsimdb = os.path.join(datadir, 'enigma_1189_micro.db')
NSIDE = 4
hpOpSim = oss.HealPixelizedOpSim.fromOpSimDB(opsimdb, NSIDE=NSIDE)
NSIDE
hpTileshpOpSim = HealpixTiles(healpixelizedOpSim=hpOpSim, nside=NSIDE)
hpTileshpOpSim.pointingSequenceForTile(1, allPointings=None)
phi, theta = hpTileshpOpSim.positions(1, 10000)
mapvals = np.ones(hp.nside2npix(NSIDE)) * hp.UNSEEN
mapvals[1] = 100
hp.ang2pix(NSIDE, np.radians(theta), np.radians(phi), nest=True)
theta_c, phi_c = hp.pix2ang(4, 1, nest=True)
hp.mollview(mapvals, nest=True)
hp.projscatter(np.radians(theta), np.radians(phi), **dict(s=0.0002))
hp.projscatter(theta_c, phi_c, **dict(s=8., c='r'))
%timeit hpTileshpOpSim.pointingSequenceForTile(33, allPointings=None)
preCompMap = os.path.join(oss.__path__[0], 'example_data', 'healpixels_micro.db')
hpTilesMap = HealpixTiles(nside=1, preComputedMap=preCompMap)
hpTilesMap.pointingSequenceForTile(10, allPointings=None)
%timeit hpOpSim.obsHistIdsForTile(34)
hpTiles = HealpixTiles(healpixelizedOpSim=hpOpSim)
hpTiles.pointingSequenceForTile(34, allPointings=None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This section pertains to how to write a new Tiling class
Step2: ```
Step4: Using the class HealpixTiles
|
3,576
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns;
sns.set(color_codes=True)
tips = sns.load_dataset("tips")
ax = sns.barplot(x="day", y="total_bill", data=tips)
ax = sns.barplot(x="day", y="total_bill", hue="sex", data=tips)
from echarts import Echart, Legend, Bar, Axis, Line
from IPython.display import HTML
chart = Echart('GDP', 'This is a fake chart')
chart.use(Bar('China', [2, 3, 4, 5]))
chart.use(Legend(['GDP']))
chart.use(Axis('category', 'bottom', data=['Nov', 'Dec', 'Jan', 'Feb']))
chart = Echart('GDP', 'This is a fake chart')
chart.use(Line('China', [2, 5, 4, 7]))
chart.use(Legend(['GDP']))
chart.use(Axis('category', 'bottom', data=['Nov', 'Dec', 'Jan', 'Feb']))
chart.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: https
|
3,577
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import itertools
pop_size = 60
seq_length = 100
alphabet = ['A', 'T', 'G', 'C']
base_haplotype = "AAAAAAAAAA"
pop = {}
pop["AAAAAAAAAA"] = 40
pop["AAATAAAAAA"] = 30
pop["AATTTAAAAA"] = 30
pop["AAATAAAAAA"]
mutation_rate = 0.0001 # per gen per individual per site
def get_mutation_count():
mean = mutation_rate * pop_size * seq_length
return np.random.poisson(mean)
get_mutation_count()
pop.keys()
[x/float(pop_size) for x in pop.values()]
def get_random_haplotype():
haplotypes = pop.keys()
frequencies = [x/float(pop_size) for x in pop.values()]
total = sum(frequencies)
frequencies = [x / total for x in frequencies]
return np.random.choice(haplotypes, p=frequencies)
get_random_haplotype()
def get_mutant(haplotype):
site = np.random.randint(seq_length)
possible_mutations = list(alphabet)
possible_mutations.remove(haplotype[site])
mutation = np.random.choice(possible_mutations)
new_haplotype = haplotype[:site] + mutation + haplotype[site+1:]
return new_haplotype
get_mutant("AAAAAAAAAA")
def mutation_event():
haplotype = get_random_haplotype()
if pop[haplotype] > 1:
pop[haplotype] -= 1
new_haplotype = get_mutant(haplotype)
if new_haplotype in pop:
pop[new_haplotype] += 1
else:
pop[new_haplotype] = 1
mutation_event()
pop
def mutation_step():
mutation_count = get_mutation_count()
for i in range(mutation_count):
mutation_event()
mutation_step()
pop
def get_offspring_counts():
haplotypes = pop.keys()
frequencies = [x/float(pop_size) for x in pop.values()]
return list(np.random.multinomial(pop_size, frequencies))
get_offspring_counts()
def offspring_step():
counts = get_offspring_counts()
for (haplotype, count) in zip(pop.keys(), counts):
if (count > 0):
pop[haplotype] = count
else:
del pop[haplotype]
offspring_step()
pop
def time_step():
mutation_step()
offspring_step()
generations = 500
def simulate():
for i in range(generations):
time_step()
simulate()
pop
pop = {"AAAAAAAAAA": pop_size}
history = []
def simulate():
clone_pop = dict(pop)
history.append(clone_pop)
for i in range(generations):
time_step()
clone_pop = dict(pop)
history.append(clone_pop)
simulate()
pop
history[0]
history[1]
history[2]
pop
def get_distance(seq_a, seq_b):
diffs = 0
length = len(seq_a)
assert len(seq_a) == len(seq_b)
for chr_a, chr_b in zip(seq_a, seq_b):
if chr_a != chr_b:
diffs += 1
return diffs / float(length)
get_distance("AAAAAAAAAA", "AAAAAAAAAB")
def get_diversity(population):
haplotypes = population.keys()
haplotype_count = len(haplotypes)
diversity = 0
for i in range(haplotype_count):
for j in range(haplotype_count):
haplotype_a = haplotypes[i]
haplotype_b = haplotypes[j]
frequency_a = population[haplotype_a] / float(pop_size)
frequency_b = population[haplotype_b] / float(pop_size)
frequency_pair = frequency_a * frequency_b
diversity += frequency_pair * get_distance(haplotype_a, haplotype_b)
return diversity
get_diversity(pop)
def get_diversity_trajectory():
trajectory = [get_diversity(generation) for generation in history]
return trajectory
get_diversity_trajectory()
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.plot(get_diversity_trajectory())
def diversity_plot():
mpl.rcParams['font.size']=14
trajectory = get_diversity_trajectory()
plt.plot(trajectory, "#447CCD")
plt.ylabel("diversity")
plt.xlabel("generation")
diversity_plot()
def get_divergence(population):
haplotypes = population.keys()
divergence = 0
for haplotype in haplotypes:
frequency = population[haplotype] / float(pop_size)
divergence += frequency * get_distance(base_haplotype, haplotype)
return divergence
def get_divergence_trajectory():
trajectory = [get_divergence(generation) for generation in history]
return trajectory
get_divergence_trajectory()
def divergence_plot():
mpl.rcParams['font.size']=14
trajectory = get_divergence_trajectory()
plt.plot(trajectory, "#447CCD")
plt.ylabel("divergence")
plt.xlabel("generation")
divergence_plot()
def get_frequency(haplotype, generation):
pop_at_generation = history[generation]
if haplotype in pop_at_generation:
return pop_at_generation[haplotype]/float(pop_size)
else:
return 0
get_frequency("AAAAAAAAAA", 4)
def get_trajectory(haplotype):
trajectory = [get_frequency(haplotype, gen) for gen in range(generations)]
return trajectory
get_trajectory("AAAAAAAAAA")
def get_all_haplotypes():
haplotypes = set()
for generation in history:
for haplotype in generation:
haplotypes.add(haplotype)
return haplotypes
get_all_haplotypes()
haplotypes = get_all_haplotypes()
for haplotype in haplotypes:
plt.plot(get_trajectory(haplotype))
plt.show()
colors = ["#781C86", "#571EA2", "#462EB9", "#3F47C9", "#3F63CF", "#447CCD", "#4C90C0", "#56A0AE", "#63AC9A", "#72B485", "#83BA70", "#96BD60", "#AABD52", "#BDBB48", "#CEB541", "#DCAB3C", "#E49938", "#E68133", "#E4632E", "#DF4327", "#DB2122"]
colors_lighter = ["#A567AF", "#8F69C1", "#8474D1", "#7F85DB", "#7F97DF", "#82A8DD", "#88B5D5", "#8FC0C9", "#97C8BC", "#A1CDAD", "#ACD1A0", "#B9D395", "#C6D38C", "#D3D285", "#DECE81", "#E8C77D", "#EDBB7A", "#EEAB77", "#ED9773", "#EA816F", "#E76B6B"]
def stacked_trajectory_plot(xlabel="generation"):
mpl.rcParams['font.size']=18
haplotypes = get_all_haplotypes()
trajectories = [get_trajectory(haplotype) for haplotype in haplotypes]
plt.stackplot(range(generations), trajectories, colors=colors_lighter)
plt.ylim(0, 1)
plt.ylabel("frequency")
plt.xlabel(xlabel)
stacked_trajectory_plot()
def get_snp_frequency(site, generation):
minor_allele_frequency = 0.0
pop_at_generation = history[generation]
for haplotype in pop_at_generation.keys():
allele = haplotype[site]
frequency = pop_at_generation[haplotype] / float(pop_size)
if allele != "A":
minor_allele_frequency += frequency
return minor_allele_frequency
get_snp_frequency(3, 5)
def get_snp_trajectory(site):
trajectory = [get_snp_frequency(site, gen) for gen in range(generations)]
return trajectory
get_snp_trajectory(3)
def get_all_snps():
snps = set()
for generation in history:
for haplotype in generation:
for site in range(seq_length):
if haplotype[site] != "A":
snps.add(site)
return snps
def snp_trajectory_plot(xlabel="generation"):
mpl.rcParams['font.size']=18
snps = get_all_snps()
trajectories = [get_snp_trajectory(snp) for snp in snps]
data = []
for trajectory, color in itertools.izip(trajectories, itertools.cycle(colors)):
data.append(range(generations))
data.append(trajectory)
data.append(color)
plt.plot(*data)
plt.ylim(0, 1)
plt.ylabel("frequency")
plt.xlabel(xlabel)
snp_trajectory_plot()
pop_size = 50
seq_length = 100
generations = 500
mutation_rate = 0.0001 # per gen per individual per site
seq_length * mutation_rate
2 * pop_size * seq_length * mutation_rate
base_haplotype = ''.join(["A" for i in range(seq_length)])
pop.clear()
del history[:]
pop[base_haplotype] = pop_size
simulate()
plt.figure(num=None, figsize=(14, 14), dpi=80, facecolor='w', edgecolor='k')
plt.subplot2grid((3,2), (0,0), colspan=2)
stacked_trajectory_plot(xlabel="")
plt.subplot2grid((3,2), (1,0), colspan=2)
snp_trajectory_plot(xlabel="")
plt.subplot2grid((3,2), (2,0))
diversity_plot()
plt.subplot2grid((3,2), (2,1))
divergence_plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make population dynamic model
Step2: Setup a population of sequences
Step3: Add mutation
Step4: Walk through population and mutate basepairs. Use Poisson splitting to speed this up (you may be familiar with Poisson splitting from its use in the Gillespie algorithm).
Step5: Here we use Numpy's Poisson random number.
Step6: We need to get random haplotype from the population.
Step7: Here we use Numpy's weighted random choice.
Step8: Here, we take a supplied haplotype and mutate a site at random.
Step9: Putting things together, in a single mutation event, we grab a random haplotype from the population, mutate it, decrement its count, and then check if the mutant already exists in the population. If it does, increment this mutant haplotype; if it doesn't create a new haplotype of count 1.
Step10: To create all the mutations that occur in a single generation, we draw the total count of mutations and then iteratively add mutation events.
Step11: Add genetic drift
Step12: Here we use Numpy's multinomial random sample.
Step13: We then need to assign this new list of haplotype counts to the pop dictionary. To save memory and computation, if a haplotype goes to 0, we remove it entirely from the pop dictionary.
Step14: Combine and iterate
Step15: Can iterate this over a number of generations.
Step16: Record
Step17: Analyze trajectories
Step18: First, we need to calculate the number of differences per site between two arbitrary sequences.
Step19: We calculate diversity as a weighted average between all pairs of haplotypes, weighted by pairwise haplotype frequency.
Step20: Plot diversity
Step21: Here, we make a simple line plot using matplotlib's plot function.
Step22: Here, we style the plot a bit with x and y axes labels.
Step23: Analyze and plot divergence
Step24: Plot haplotype trajectories
Step25: We want to plot all haplotypes seen during the simulation.
Step26: Here is a simple plot of their overall frequencies.
Step27: We can use stackplot to stack these trajectoies on top of each other to get a better picture of what's going on.
Step28: Plot SNP trajectories
Step29: Find all variable sites.
Step30: Scale up
Step31: In this case there are $\mu$ = 0.01 mutations entering the population every generation.
Step32: And the population genetic parameter $\theta$, which equals $2N\mu$, is 1.
|
3,578
|
<ASSISTANT_TASK:>
Python Code:
from collatex import *
collation = Collation()
witness_1707 = open( "../data/sonnet/Lope_soneto_FR_1707.txt", encoding='utf-8' ).read()
witness_1822 = open( "../data/sonnet/Lope_soneto_FR_1822.txt", encoding='utf-8' ).read()
collation.add_plain_witness( "wit 1707", witness_1707 )
collation.add_plain_witness( "wit 1822", witness_1822 )
alignment_table = collate(collation, output='html2')
import glob, re, os
path = '../data/sonnet/' # put the path into a variable
os.makedirs(path + 'norm', exist_ok=True) # create a new folder, if does not exist
files = [os.path.basename(x) for x in glob.glob(path+'*.txt')] # take all txt files in the directory
for file in files: # for each file in the directory
### READ THE FILE CONTENT
file_opened = open(path+file, 'r', encoding='utf-8') # open the file in mode 'r' (read)
content = file_opened.read() # read the file content
### ALL TO LOWER CASE
lowerContent = content.lower()
### REMOVE PUNCTUATION
# replace everything that is not alphanumeric character (\w) or space (\s) with nothing or whitespace, depending on languages
noPunct_lowerContent = re.sub(r'[^\w\s]','',lowerContent)
### REMOVE MULTIPLE WHITESPACES
regularSpaces_noPunct_lowerContent = " ".join(noPunct_lowerContent.split())
### CREATE A NEW FILE
filename = file.split('.')[0]
new_file = open(path+'norm/' + filename + '_norm.txt', 'w', encoding='utf-8') # open the new file in mode 'w' (write)
### WRITE THE NEW CONTENT INTO THE NEW FILE
new_file.write(regularSpaces_noPunct_lowerContent)
### CLOSE THE FILE
new_file.close()
print('Finished! All normalized!')
from collatex import *
collation = Collation()
witness_1707 = open( "../data/sonnet/norm/Lope_soneto_FR_1707_norm.txt", encoding='utf-8' ).read()
witness_1822 = open( "../data/sonnet/norm/Lope_soneto_FR_1822_norm.txt", encoding='utf-8' ).read()
collation.add_plain_witness( "wit 1707", witness_1707 )
collation.add_plain_witness( "wit 1822", witness_1822 )
alignment_table = collate(collation, output='html2')
# first tercet only
from collatex import *
collation = Collation()
collation.add_plain_witness( "wit 1707", "Je commence au hasard; et si je ne m'abuse,")
collation.add_plain_witness( "wit 1822", "Je commence au hasard, et, si je ne m'abuse,")
alignment_table = collate(collation, output='html2', segmentation=False)
print( alignment_table )
# first tercet only
# normalize with nothing will give errors in the svg output
from collatex import *
import json
collation = Collation()
json_input = {
"witnesses": [
{
"id": "wit1707",
"tokens": [
{
"t": "Je",
"n": "je"
},
{
"t": "commence",
"n": "commence"
},
{
"t": "au",
"n": "au"
},
{
"t": "hasard;",
"n": "hasard"
},
{
"t": "et",
"n": "et"
},
{
"t": "si",
"n": "si"
},
{
"t": "je",
"n": "je"
},
{
"t": "ne",
"n": "ne"
},
{
"t": "m'abuse,",
"n": "m'abuse"
}
]
},
{
"id": "wit1822",
"tokens": [
{
"t": "Je",
"n": "je"
},
{
"t": "commence",
"n": "commence"
},
{
"t": "au",
"n": "au"
},
{
"t": "hasard,",
"n": "hasard"
},
{
"t": "et,",
"n": "et"
},
{
"t": "si",
"n": "si"
},
{
"t": "je",
"n": "je"
},
{
"t": "ne",
"n": "ne"
},
{
"t": "m'abuse,",
"n": "m'abuse"
}
]
}
]
}
collate(json.loads(json_input), segmentation=False, output="html2")
import re
from collatex import *
import json
witness_1707 = open( "../data/sonnet/Lope_soneto_FR_1707.txt", encoding='utf-8' ).read()
witness_1822 = open( "../data/sonnet/Lope_soneto_FR_1822.txt", encoding='utf-8' ).read()
A = ["wit 1707", witness_1707]
B = ["wit 1822", witness_1822]
listWitnesses = [A,B] # create a list of witnesses
data = {}
data["witnesses"] = []
for witness in listWitnesses: # for each witness in the list
tokens = [] # create empty list for tokens
data["witnesses"].append({
"id": witness[0], # give as id the first item in A or B
"tokens" : tokens # and as tokens the empty list
})
for w in witness[1].split(): # for each word in witness (second item in A or B)
t = w # t is the original word
# N is w with no upper-case and no punctuation
# Replace everything that is not alphanumeric character (\w) or space (\s) with nothing.
# Attention: if replaced with whitespace, it will create differences --> avoid.
# This does not happen in the previous method (Normalization 1),
# because the tokenization happens afterwards and strip whitespaces.
n = re.sub(r'[^\w\s]','',w.lower())
tokens.append({ # populate the empty token list with values for t and n
"t" : t,
"n" : n
})
json_input = json.dumps(data) # data created turned into json string with double quotes
print(json_input)
collation = Collation()
# if segmentation=True there are no whitespaces between words, because input is given with single tokens without whitespaces
collate(json.loads(json_input), segmentation=False, output="html2")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imagine that we are not interested in punctuation and capitalization
Step2: Now, let's collate the normalized copies.
Step3: Normalization 2. Annotate
Step5: Now we want to arrive at the same results that we reached in Normalization 1, but using the 't' and 'n' properties. They become visible if we input the data for collation as json (an open-standard file format for storing and exchanging data widely used in web development and beyond).
Step6: This is wonderful, but very time consuming!
|
3,579
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('../../metal')
import metal
%load_ext autoreload
%autoreload 2
%matplotlib inline
import pickle
with open("data/basics_tutorial.pkl", 'rb') as f:
X, Y, L, D = pickle.load(f)
X.shape
Y.shape
L.shape
from metal.utils import split_data
Xs, Ys, Ls, Ds = split_data(X, Y, L, D, splits=[0.8, 0.1, 0.1], stratify_by=Y, seed=123)
from metal.analysis import lf_summary
lf_summary(Ls[1],Y=Ys[1])
from metal.label_model import LabelModel
label_model = LabelModel(k=2, seed=123)
%%time
label_model.train_model(Ls[0], Y_dev=Ys[1], n_epochs=500, log_train_every=50)
score = label_model.score((Ls[1], Ys[1]))
print('Trained Label Model Metrics:')
scores = label_model.score((Ls[1], Ys[1]), metric=['accuracy','precision', 'recall', 'f1'])
from metal.label_model.baselines import MajorityLabelVoter
mv = MajorityLabelVoter(seed=123)
print('Majority Label Voter Metrics:')
scores = mv.score((Ls[1], Ys[1]), metric=['accuracy','precision', 'recall', 'f1'])
# Y_train_ps stands for "Y[labels]_train[split]_p[redicted]s[oft]"
Y_train_ps = label_model.predict_proba(Ls[0])
Y_train_ps
from metal.analysis import confusion_matrix
Y_dev_p = label_model.predict(Ls[1])
cm = confusion_matrix(Ys[1], Y_dev_p)
try:
from metal.contrib.visualization.analysis import (
plot_predictions_histogram,
plot_probabilities_histogram,
)
plot_predictions_histogram(Y_dev_p, Ys[1], title="Label Distribution")
Y_dev_ps = label_model.predict_proba(Ls[1])
plot_probabilities_histogram(Y_dev_ps[:,0], title="Probablistic Label Distribution")
except ModuleNotFoundError:
print("The tools in contrib/visualization/ require matplotlib. Try `conda/pip install matplotlib`.")
from metal.end_model import EndModel
import torch
if torch.cuda.is_available():
device = 'cuda'
else:
device='cpu'
end_model = EndModel([1000,10,2], seed=123, device=device)
end_model.train_model((Xs[0], Y_train_ps), valid_data=(Xs[1], Ys[1]), lr=0.01, l2=0.01, batch_size=256,
n_epochs=5, checkpoint_metric='accuracy', checkpoint_metric_mode='max')
print("Label Model:")
score = label_model.score((Ls[2], Ys[2]), metric=['accuracy','precision', 'recall', 'f1'])
print()
print("End Model:")
score = end_model.score((Xs[2], Ys[2]), metric=['accuracy','precision', 'recall', 'f1'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: If you need to divide your data into splits, you can do so with the provided utility function. We split our data 80/10/10 into train/dev/test, stratifying by the labels in Y to ensure a similar class balance in each split.
Step3: Investigate Label Matrices
Step4: If you're interested in more graphical investigative tools, you can take a look at the Visualization tutorial.
Step 2
Step5: The only required argument to LabelModel.train_model() is a label matrix. All other keyword arguments are optional.
Step6: You can test the quality of our label model on our dev set as a sanity check, but we'll see if we can do better in Step 3 by using the predictions of the label model to train a discriminative model over a larger feature set than just the outputs of these ten labeling functions.
Step7: We can see that our trained LabelModel outperforms the baseline of taking the majority vote label by approximately 4 accuracy points.
Step8: We can see that our trained LabelModel outperforms the baseline of taking the majority vote label by approximately 0.04 in accuracy and 0.03 in F1 on the dev set. However, it has lower recall. Which of these metrics matters most will vary by application.
Step9: Analysis tools
Step10: If matplotlib is installed, we can also use some of the visualization tools provided in the contrib/ directory to plot the label distributions.
Step11: Step 3
Step12: Once initiated, the network structure is printed so you can confirm that it captured the architecture you want.
Step13: Step 4
|
3,580
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import os
import matplotlib.pyplot as plt
%matplotlib inline
from cycler import cycler
from pylab import rcParams
rcParams['figure.figsize'] = 8, 6
rcParams.update({'font.size': 15})
# color and linestyle cycle
#colors = [x['color'] for x in list(rcParams['axes.prop_cycle'])]
colors_base = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '0.3', '0.5', '0.75', 'chartreuse']
print 'colors_base', colors_base
colors = [item for sublist in [colors_base]*len(colors_base) for item in sublist] # replicate and flatten
print 'colors', colors, len(list(rcParams['axes.prop_cycle']))
lnstyl = [[l] * len(colors_base) for l in ['-', '--', ':', '.', '-.', '*', 'x']] # replicate per color
print 'lnstyl', lnstyl
lnstyl = [item for sublist in lnstyl for item in sublist] # flatten
plt.rc('axes', prop_cycle=(cycler('color', colors) + cycler('linestyle', lnstyl))) # define cycler
from nideep.eval.learning_curve import LearningCurve
from nideep.eval.eval_utils import Phase
import nideep.eval.log_utils as lu
def moving_avg(x, window_size):
window = np.ones(int(window_size)) / float(window_size)
return np.convolve(x, window, 'valid')
classnames = ['alarm', 'baby', 'crash', 'dog', 'engine', 'femaleSpeech', 'fire', 'footsteps',\
'knock', 'phone', 'piano']
classnames_scalar = ['alarm', 'baby', 'crash', 'dog', 'engine', 'femaleSpeech', 'fire', 'footsteps', 'general'\
'knock', 'phone', 'piano']
print("Done importing")
from nideep.proto.proto_utils import Parser
from nideep.nets.net_merge import merge_indep_net_spec
# select network definitions to merge into a single prototxt
# You can also just repeat the same network over and over if you want to train the same network with different random initializations
p0 = './train_val_00.prototxt'
p1 = './train_val_01.prototxt'
p2 = './train_val_02.prototxt'
# load each network definition from file
nets = [Parser().from_net_params_file(p) for p in [p0,p1,p2]]
# merge and save merged prototxt to file
p_dst = './train_val_00_01_02.prototxt'
with open(p_dst, 'w') as f:
f.write(merge_indep_net_spec(nets))
# use p_dst file in your solver and train this 'network ensemble' like you would any single network.
logs = [\
'./xD/caffe.eltanin.kashefy.log.INFO.20160818-105955.20804',
'./xE_03/caffe.eltanin.kashefy.log.INFO.20160818-145600.31621',
'./xE_04/caffe.eltanin.kashefy.log.INFO.20160818-150354.710',
]
print("Found %d logs" % (len(logs),))
for phase in [Phase.TRAIN, Phase.TEST]:
print phase
plt.figure()
for p in logs:
e = LearningCurve(p)
lc_keys = e.parse()[phase == Phase.TEST]
num_iter = e.list('NumIters', phase)
print('%s: %d %s iterations' % (os.path.basename(os.path.dirname(p)), num_iter.size, phase))
for lck_idx, lck in enumerate(lc_keys):
if 'nidx' in lck or ('NumIters' not in lck and 'rate' not in lck.lower() and 'seconds' not in lck.lower()):
try:
loss = e.list(lck, phase)
plt.plot(num_iter, loss, label='%s %s' % (os.path.basename(os.path.dirname(p)), lck))
except KeyError as kerr:
print("Inavlid values for %s %s" % (phase, lck))
ticks, _ = plt.xticks()
plt.xticks(ticks, ["%dK" % int(t/1000) for t in ticks])
plt.title(phase)
plt.xlabel('iterations')
plt.ylabel(' '.join([phase, 'cross entropy loss']))
#plt.xlim([0,20e3])
#plt.xlim([0,300e3])
plt.ylim([1,20])
plt.title('on %s set' % phase)
plt.legend(loc='upper right')
plt.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Merge multiple network definitions that share the same data layers into a single definition to train within the same single process
Step2: After training, we look at the learning curves of the individual sub-networks
|
3,581
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
import pandas as pd
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'data/train.p'
validation_file= 'data/valid.p'
testing_file = 'data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_test, y_test = test['features'], test['labels']
from sklearn.model_selection import train_test_split
X_train, X_validation, y_train, y_validation = train_test_split(X_train, y_train, test_size=0.2, random_state=0)
print("Updated Image Shape: {}".format(X_train[0].shape))
import random
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
plt.figure(figsize=(1,1))
plt.imshow(image)
print(y_train[index])
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
import tensorflow as tf
EPOCHS = 10
BATCH_SIZE = 128
from tensorflow.contrib.layers import flatten
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
# SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# SOLUTION: Activation.
conv1 = tf.nn.relu(conv1)
# SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
# SOLUTION: Activation.
conv2 = tf.nn.relu(conv2)
# SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
# SOLUTION: Flatten. Input = 5x5x16. Output = 400.
fc0 = flatten(conv2)
# SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# SOLUTION: Activation.
fc1 = tf.nn.relu(fc1)
# SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# SOLUTION: Activation.
fc2 = tf.nn.relu(fc2)
# SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
rate = 0.001
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_y)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.
Step2: Visualize Data
Step3: Preprocess Data
Step4: Setup TensorFlow
Step5: SOLUTION
Step6: Features and Labels
Step7: Training Pipeline
Step8: Model Evaluation
Step9: Train the Model
Step10: Evaluate the Model
|
3,582
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
def tokenize(s, stop_words=[] or '', punctuation='`~!@#$%^&*()_-+={[}]|\:;"<,>.?/}\t'):
Split a string into a list of words, removing punctuation and stop words.
a = s.splitlines()
b = []
for n in a:
b.append(n.split(' '))
c = []
for n in b:
for m in n:
c.append(m)
e = []
for n in c:
e.append(n.lower())
f = []
for i in e:
f.append(''.join([j for j in i if j not in punctuation]))
d = []
if type(stop_words) == list:
for x in f:
if x not in stop_words:
d.append(x)
if type(stop_words) == str:
stp = stop_words.split()
for x in f:
if x not in stp:
d.append(x)
g = []
for t in d:
if t != '':
g.append(t)
return g
assert tokenize("This, is the way; that things will end", stop_words=['the', 'is']) == \
['this', 'way', 'that', 'things', 'will', 'end']
wasteland =
APRIL is the cruellest month, breeding
Lilacs out of the dead land, mixing
Memory and desire, stirring
Dull roots with spring rain.
assert tokenize(wasteland, stop_words='is the of and') == \
['april','cruellest','month','breeding','lilacs','out','dead','land',
'mixing','memory','desire','stirring','dull','roots','with','spring',
'rain']
def count_words(data):
Return a word count dictionary from the list of words in data.
a = {}
for n in data:
if n in data:
i = data.count(n)
a[n] = i
return a
assert count_words(tokenize('this and the this from and a a a')) == \
{'a': 3, 'and': 2, 'from': 1, 'the': 1, 'this': 2}
def sort_word_counts(wc):
Return a list of 2-tuples of (word, count), sorted by count descending.
w = wc.items()
q = sorted(w, key = lambda w: w[1], reverse=True)
return q
assert sort_word_counts(count_words(tokenize('this and a the this this and a a a'))) == \
[('a', 4), ('this', 3), ('and', 2), ('the', 1)]
f = open('mobydick_chapter1.txt','r')
r = f.read()
re = r.replace('--',' ')
tok = tokenize(re, stop_words = ['the', 'of', 'and', 'a', 'to', 'in', 'is', 'it', 'that', 'as'])
wcount = count_words(tok)
swc = sort_word_counts(wcount)
f.close()
print(len(swc))
assert swc[0]==('i',43)
assert len(swc)==848
top50 = []
for n in range(0,51):
top50.append(swc[n])
lett = []
num = []
for n in top50:
lett.append(n[0])
num.append(n[1])
yax = range(0,51)
f = plt.figure(figsize=(7,12))
plt.scatter(num,yax)
plt.ylim(-1,51)
plt.yticks(range(0,51),lett)
plt.tick_params(right=False,left=False)
plt.title('Top 50 Words')
plt.xlabel('Count')
plt.ylabel('Word')
plt.grid(True);
assert True # use this for grading the dotplot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Word counting
Step5: Write a function count_words that takes a list of words and returns a dictionary where the keys in the dictionary are the unique words in the list and the values are the word counts.
Step7: Write a function sort_word_counts that return a list of sorted word counts
Step8: Perform a word count analysis on Chapter 1 of Moby Dick, whose text can be found in the file mobydick_chapter1.txt
Step9: Create a "Cleveland Style" dotplot of the counts of the top 50 words using Matplotlib. If you don't know what a dotplot is, you will have to do some research...
|
3,583
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import freqopttest.util as util
import freqopttest.data as data
import freqopttest.kernel as kernel
import freqopttest.tst as tst
import freqopttest.glo as glo
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import sys
# sample source
m = 800
dim = 2
n = m
ss = data.SSGaussMeanDiff(dim, my=0.5)
#ss = data.SSBlobs()
tst_data = ss.sample(m, seed=2)
tr, te = tst_data.split_tr_te(tr_proportion=0.5, seed=10)
# plot test data
xte, yte = te.xy()
plt.plot(xte[:, 0], xte[:, 1], 'xr', label='X te')
plt.plot(yte[:, 0], yte[:, 1], 'xb', label='Y te')
plt.legend(loc='best')
plt.title('Test set')
print(te)
# test locations
T = np.array([[0, 0], [1, 0]])
gwidth = 1.0
alpha = 0.01
met = tst.MeanEmbeddingTest(T, gwidth, alpha)
met.perform_test(te)
t1 = np.array([0, 0])
t2x_list = np.linspace(-7, 7, 200)
# add an x very close to 0
t2x_list = np.append(t2x_list, [1e-9])
t2x_list.sort()
stats = np.zeros(len(t2x_list))
for i, t2x in enumerate(t2x_list):
t2 = np.array([t2x, 0])
T = np.vstack((t1, t2))
met_i = tst.MeanEmbeddingTest(T, gwidth, alpha)
test_i = met_i.perform_test(te)
stats[i] = test_i['test_stat']
# plot location shift vs. test stat
plt.plot(t2x_list, stats)
plt.title('t1 = %s, t2 = [x, 0]'%(str(t1)) )
plt.xlabel('x in $1^{st}$ dim. of t2')
plt.ylabel('Test statistic')
t1 = np.array([0, 0])
t3 = np.array([1, 0])
t2x_list = np.linspace(-7, 8, 200)
# add an x very close to 0
t2x_list = np.append(t2x_list, [1e-12, 1+1e-9])
t2x_list.sort()
stats = np.zeros(len(t2x_list))
for i, t2x in enumerate(t2x_list):
t2 = np.array([t2x, 0])
T = np.vstack((t1, t2, t3))
met_i = tst.MeanEmbeddingTest(T, gwidth, alpha)
test_i = met_i.perform_test(te)
stats[i] = test_i['test_stat']
# plot location shift vs. test stat
plt.plot(t2x_list, stats)
plt.title('t1 = %s, t2 = [x, 0], t3 = %s'%(str(t1), str(t3)) )
plt.xlabel('x in $1^{st}$ dim. of t2')
plt.ylabel('Test statistic')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: mean embedding test. J=2 locations
Step2: This showed that if both the test locations are the same at [0, 0], then the covariance matrix is singular, and the test statistic cannot be computed. If $t_1 = [0, 0], t_2 = [x, 0]$ where $x$ approaches 0, then test statistic drops significantly as shown.
|
3,584
|
<ASSISTANT_TASK:>
Python Code:
import vcsn
%%automaton a
context = "lal_char(abc), b"
$ -> 0
0 -> 1 a
1 -> $
2 -> 0 a
1 -> 3 a
a.is_accessible()
a.accessible()
a.accessible().is_accessible()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following automaton has states that cannot be reached from the initial(s) states
Step2: Calling accessible returns a copy of the automaton without non-accessible states
|
3,585
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from sklearn.model_selection import train_test_split
# Let X be our input data consisting of
# 5 samples and 2 features
X = np.arange(10).reshape(5, 2)
# Let y be the target feature
y = [0, 1, 2, 3, 4]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
from sklearn.linear_model import LinearRegression
lr = LinearRegression(normalize=True)
print(lr) # outputs the name of the estimator and its hyperparameters
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Validation Data
Step2: Estimators
|
3,586
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
A = np.array([1,1,2,3,3,3,4,5,6,7,8,8])
B = np.array([1,2,8])
C = A[~np.in1d(A,B)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,587
|
<ASSISTANT_TASK:>
Python Code:
# Installs the vit_jax package from Github.
!pip install -q git+https://github.com/google-research/vision_transformer
import jax
import jax.numpy as jnp
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
import tqdm
from vit_jax import models
# Currently available LiT models
[name for name in models.model_configs.MODEL_CONFIGS if name.startswith('LiT')]
model_name = 'LiT-B16B'
lit_model = models.get_model(model_name)
# Loading the variables from cloud can take a while the first time...
lit_variables = lit_model.load_variables()
# Creating tokens from freeform text (see next section).
tokenizer = lit_model.get_tokenizer()
# Resizing images & converting value range to -1..1 (see next section).
image_preprocessing = lit_model.get_image_preprocessing()
# Preprocessing op for use in tfds pipeline (see last section).
pp = lit_model.get_pp()
# Let's load some sample images from tfds.
# Alternatively you can also load these images from the internet / your Drive.
ds = tfds.load('imagenette', split='train')
images_list = [
example['image'].numpy()
for _, example in zip(range(5), ds)
]
# Note that this is a list of images with different shapes, not a four
# dimensional tensor.
[image.shape for image in images_list]
# Note that our preprocessing converts to floats ranging from -1..1 !
images = image_preprocessing(images_list)
images.shape, images.min(), images.max()
plt.figure(figsize=(15, 4))
plt.imshow(np.hstack(images) * .5 + .5)
plt.axis('off');
texts = [
'itap of a cd player',
'a photo of a truck',
'gas station',
'chainsaw',
'a bad photo of colorful houses',
]
tokens = tokenizer(texts)
tokens.shape
# Embed both texts and images with a single model call.
# See next section for embedding images/texts separately.
zimg, ztxt, out = lit_model.apply(lit_variables, images=images, tokens=tokens)
plt.imshow(ztxt @ zimg.T)
probs = np.array(jax.nn.softmax(out['t'] * ztxt @ zimg.T, axis=1))
pd.DataFrame(probs, index=texts).style.background_gradient('Greens', vmin=0, vmax=1).format('{:.2%}')
# Load dataset and create array of class names.
builder = tfds.builder('cifar100')
builder.download_and_prepare()
ds_test = builder.as_dataset('test')
info = builder.info
classnames = [
info.features['label'].int2str(id_)
for id_ in range(info.features['label'].num_classes)
]
classnames[:10]
# "best prompts" from CLIP paper (https://arxiv.org/abs/2103.00020)
PROMPTS = [
'itap of a {}.',
'a bad photo of the {}.',
'a origami {}.',
'a photo of the large {}.',
'a {} in a video game.',
'art of the {}.',
'a photo of the small {}.',
'{}',
]
texts = [
prompt.format(classname)
for classname in classnames
for prompt in PROMPTS
]
len(texts)
# Tokenize the texts using numpy like before.
tokens = tokenizer(texts)
tokens.shape
_, ztxt, _ = lit_model.apply(lit_variables, tokens=tokens)
ztxt.shape
# `pp` from above (section "Load model") is a TensorFlow graph that can
# efficiently be added to the input pre-processing.
imgs = next(iter(ds_test.map(pp).batch(4)))['image']
# Note that `pp` would also tokenize "texts" to "tokens", if such a feature was
# present in the dataset (which is not the case for cifar).
plt.figure(figsize=(15, 4))
plt.imshow(np.hstack(imgs) * .5 + .5)
plt.axis('off');
# JIT-compile image embedding function because there are lots of images.
@jax.jit
def embed_images(variables, images):
zimg, _, _ = lit_model.apply(variables, images=images)
return zimg
# Compute all images embeddings & collect correct labels.
zimgs = []
labels = []
for batch in tqdm.tqdm(ds_test.map(lit_model.get_pp()).batch(500)):
labels += list(batch['label'].numpy())
zimg = embed_images(lit_variables, batch['image'].numpy())
zimgs.append(np.array(zimg))
zimgs = np.concatenate(zimgs)
zimgs.shape
# Compute similarities ...
sims = zimgs @ ztxt.reshape([len(classnames), len(PROMPTS), -1]).mean(axis=1).T
sims.shape
# ... and use most similar embedding to predict label.
(sims.argmax(axis=1) == np.array(labels)).mean()
# Expected accuracy for model "LiT-B16B" : 79.19
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use model
Step2: tfds zero-shot evaluation
|
3,588
|
<ASSISTANT_TASK:>
Python Code:
mondat="A "
mondat+="mezőn legelésző "
mondat+="bárányok "
mondat+="mélyen "
mondat+="hallgatnak."
print(mondat)
kisbetuk='qwertzuiopasdfghjklyxcvbnm'
nagybetuk='QWERTZUIOPASDFGHJKLYXCVBNM'
extra='+- %=.~'
kicsi=['al',9,'+',42.137,'szoveg',69,1j]
telefon_konyv={'Alonzo Hinton': '(855) 278-2590',
'Cleo Hennings': '(844) 832-0585',
'Daine Ventura': '(833) 832-5081',
'Esther Leeson': '(855) 485-0624',
'Gene Connell': '(811) 973-2926',
'Lashaun Bottorff': '(822) 687-1735',
'Marx Hermann': '(844) 164-8116',
'Nicky Duprey': '(811) 032-6328',
'Piper Subia': '(844) 373-4228',
'Zackary Palomares': '(822) 647-3686'}
tesztelendo=[7,5,0,-2]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 02-egyszerű számolás
Step2: 04-lista manipulálás
Step3: 05-szótár kezelés
Step4: 06-logikai
|
3,589
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
s = pd.Series([1,3,5,np.nan,6,8])
s
dates = pd.date_range('20130101', periods=6)
dates
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
df
df2 = pd.DataFrame({ 'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo' })
df2
df2.dtypes
wp = pd.Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=pd.date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
wp
df.head()
df.tail(3)
df.index
df.columns
df.values
df.describe()
df.T
df.sort_index(axis=1, ascending=False)
df['A']
# By index
df[0:3]
#By Value
df['20130102':'20130104']
df.loc[dates[0]]
# Limit columns
df.loc[:,['A','B']]
df_stock = pd.DataFrame({'Stocks': ["AAPL","CA","CTXS","FIS","MA"],
'Values': [126.17,31.85,65.38,64.08,88.72]})
df_stock
df_stock = df_stock.append({"Stocks":"GOOG", "Values":523.53}, ignore_index=True)
df_stock
df_stock[df_stock["Values"]>65]
df_stock.mean()
# Per column
df.mean()
# Per row
df.mean(1)
big_dates = pd.date_range('20130101', periods=60000)
big_dates
big_df = pd.DataFrame(np.random.randn(60000,4), index=big_dates, columns=list('ABCD'))
big_df
big_df['20200102':'20200104']
big_df.loc['20130102':'20130104']
%timeit big_df['20200102':'20200104']
%timeit big_df.loc['20200102':'20200104']
big_df[30000:30003]
big_df.iloc[30000:30003]
%timeit big_df[30000:30003]
%timeit big_df.iloc[30000:30003]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Series
Step2: DataFrame
Step3: Creating a DataFrame by passing a dict of objects that can be converted to series-like.
Step4: Panel
Step5: Viewing data
Step6: See NumPy data
Step7: Statistic Summary
Step8: Transposing data
Step9: Sorting
Step10: Selection
Step11: Get rows
Step12: Selection by Label
Step13: Adding data
Step14: Boolean indexing
Step15: Stats operations
Step16: Optimized pandas data access
|
3,590
|
<ASSISTANT_TASK:>
Python Code:
# Jupyter setup to expand cell display to 100% width on your screen (optional)
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# Import relevant modules and setup for calling glmnet
%reset -f
%matplotlib inline
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
from glmnet import glmnet; from glmnetPlot import glmnetPlot
from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'QuickStartExampleX.dat', dtype = scipy.float64)
y = scipy.loadtxt(baseDataDir + 'QuickStartExampleY.dat', dtype = scipy.float64)
# create weights
t = scipy.ones((50, 1), dtype = scipy.float64)
wts = scipy.row_stack((t, 2*t))
# call glmnet
fit = glmnet(x = x.copy(), y = y.copy(), family = 'gaussian', \
weights = wts, \
alpha = 0.2, nlambda = 20
)
glmnetPrint(fit)
glmnetPlot(fit, xvar = 'lambda', label = True);
glmnetPlot(fit, xvar = 'dev', label = True);
any(fit['lambdau'] == 0.5)
glmnetCoef(fit, s = scipy.float64([0.5]), exact = False)
fc = glmnetPredict(fit, x[0:5,:], ptype = 'response', \
s = scipy.float64([0.05]))
print(fc)
warnings.filterwarnings('ignore')
cvfit = cvglmnet(x = x.copy(), y = y.copy(), ptype = 'mse', nfolds = 20)
warnings.filterwarnings('default')
cvfit['lambda_min']
cvglmnetCoef(cvfit, s = 'lambda_min')
cvglmnetPredict(cvfit, newx = x[0:5,], s='lambda_min')
foldid = scipy.random.choice(10, size = y.shape[0], replace = True)
cv1=cvglmnet(x = x.copy(),y = y.copy(),foldid=foldid,alpha=1)
cv0p5=cvglmnet(x = x.copy(),y = y.copy(),foldid=foldid,alpha=0.5)
cv0=cvglmnet(x = x.copy(),y = y.copy(),foldid=foldid,alpha=0)
f = plt.figure()
f.add_subplot(2,2,1)
cvglmnetPlot(cv1)
f.add_subplot(2,2,2)
cvglmnetPlot(cv0p5)
f.add_subplot(2,2,3)
cvglmnetPlot(cv0)
f.add_subplot(2,2,4)
plt.plot( scipy.log(cv1['lambdau']), cv1['cvm'], 'r.')
plt.hold(True)
plt.plot( scipy.log(cv0p5['lambdau']), cv0p5['cvm'], 'g.')
plt.plot( scipy.log(cv0['lambdau']), cv0['cvm'], 'b.')
plt.xlabel('log(Lambda)')
plt.ylabel(cv1['name'])
plt.xlim(-6, 4)
plt.ylim(0, 9)
plt.legend( ('alpha = 1', 'alpha = 0.5', 'alpha = 0'), loc = 'upper left', prop={'size':6});
cl = scipy.array([[-0.7], [0.5]], dtype = scipy.float64)
tfit=glmnet(x = x.copy(),y= y.copy(), cl = cl)
glmnetPlot(tfit);
pfac = scipy.ones([1, 20])
pfac[0, 4] = 0; pfac[0, 9] = 0; pfac[0, 14] = 0
pfit = glmnet(x = x.copy(), y = y.copy(), penalty_factor = pfac)
glmnetPlot(pfit, label = True);
scipy.random.seed(101)
x = scipy.random.rand(100,10)
y = scipy.random.rand(100,1)
fit = glmnet(x = x, y = y)
glmnetPlot(fit);
%%capture
# Output from this sample code has been suppressed due to (possible) Jupyter limitations
# The code works just fine from ipython (tested on spyder)
c = glmnetCoef(fit)
c = c[1:, -1] # remove intercept and get the coefficients at the end of the path
h = glmnetPlot(fit)
ax1 = h['ax1']
xloc = plt.xlim()
xloc = xloc[1]
for i in range(len(c)):
ax1.text(xloc, c[i], 'var' + str(i));
# Import relevant modules and setup for calling glmnet
%reset -f
%matplotlib inline
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
from glmnet import glmnet; from glmnetPlot import glmnetPlot
from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'MultiGaussianExampleY.dat', dtype = scipy.float64, delimiter = ',')
mfit = glmnet(x = x.copy(), y = y.copy(), family = 'mgaussian')
glmnetPlot(mfit, xvar = 'lambda', label = True, ptype = '2norm');
f = glmnetPredict(mfit, x[0:5,:], s = scipy.float64([0.1, 0.01]))
print(f[:,:,0], '\n')
print(f[:,:,1])
warnings.filterwarnings('ignore')
cvmfit = cvglmnet(x = x.copy(), y = y.copy(), family = "mgaussian")
warnings.filterwarnings('default')
cvglmnetPlot(cvmfit)
cvmfit['lambda_min']
cvmfit['lambda_1se']
# Import relevant modules and setup for calling glmnet
%reset -f
%matplotlib inline
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
from glmnet import glmnet; from glmnetPlot import glmnetPlot
from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'BinomialExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'BinomialExampleY.dat', dtype = scipy.float64)
fit = glmnet(x = x.copy(), y = y.copy(), family = 'binomial')
glmnetPlot(fit, xvar = 'dev', label = True);
glmnetPredict(fit, newx = x[0:5,], ptype='class', s = scipy.array([0.05, 0.01]))
warnings.filterwarnings('ignore')
cvfit = cvglmnet(x = x.copy(), y = y.copy(), family = 'binomial', ptype = 'class')
warnings.filterwarnings('default')
cvglmnetPlot(cvfit)
cvfit['lambda_min']
cvfit['lambda_1se']
cvglmnetCoef(cvfit, s = 'lambda_min')
cvglmnetPredict(cvfit, newx = x[0:10, ], s = 'lambda_min', ptype = 'class')
# Import relevant modules and setup for calling glmnet
%reset -f
%matplotlib inline
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
from glmnet import glmnet; from glmnetPlot import glmnetPlot
from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'MultinomialExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'MultinomialExampleY.dat', dtype = scipy.float64)
fit = glmnet(x = x.copy(), y = y.copy(), family = 'multinomial', mtype = 'grouped')
glmnetPlot(fit, xvar = 'lambda', label = True, ptype = '2norm');
warnings.filterwarnings('ignore')
cvfit=cvglmnet(x = x.copy(), y = y.copy(), family='multinomial', mtype = 'grouped');
warnings.filterwarnings('default')
cvglmnetPlot(cvfit)
cvglmnetPredict(cvfit, newx = x[0:10, :], s = 'lambda_min', ptype = 'class')
# Import relevant modules and setup for calling glmnet
%reset -f
%matplotlib inline
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
from glmnet import glmnet; from glmnetPlot import glmnetPlot
from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'PoissonExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'PoissonExampleY.dat', dtype = scipy.float64, delimiter = ',')
fit = glmnet(x = x.copy(), y = y.copy(), family = 'poisson')
glmnetPlot(fit);
glmnetCoef(fit, s = scipy.float64([1.0]))
glmnetPredict(fit, x[0:5,:], ptype = 'response', s = scipy.float64([0.1, 0.01]))
warnings.filterwarnings('ignore')
cvfit = cvglmnet(x.copy(), y.copy(), family = 'poisson')
warnings.filterwarnings('default')
cvglmnetPlot(cvfit)
optlam = scipy.array([cvfit['lambda_min'], cvfit['lambda_1se']]).reshape([2,])
cvglmnetCoef(cvfit, s = optlam)
# Import relevant modules and setup for calling glmnet
%reset -f
%matplotlib inline
import sys
sys.path.append('../test')
sys.path.append('../lib')
import scipy, importlib, pprint, matplotlib.pyplot as plt, warnings
from glmnet import glmnet; from glmnetPlot import glmnetPlot
from glmnetPrint import glmnetPrint; from glmnetCoef import glmnetCoef; from glmnetPredict import glmnetPredict
from cvglmnet import cvglmnet; from cvglmnetCoef import cvglmnetCoef
from cvglmnetPlot import cvglmnetPlot; from cvglmnetPredict import cvglmnetPredict
# parameters
baseDataDir= '../data/'
# load data
x = scipy.loadtxt(baseDataDir + 'CoxExampleX.dat', dtype = scipy.float64, delimiter = ',')
y = scipy.loadtxt(baseDataDir + 'CoxExampleY.dat', dtype = scipy.float64, delimiter = ',')
fit = glmnet(x = x.copy(), y = y.copy(), family = 'cox')
glmnetPlot(fit);
glmnetCoef(fit, s = scipy.float64([0.05]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As an example, we set $\alpha = 0.2$ (more like a ridge regression), and give double weights to the latter half of the observations. To avoid too long a display here, we set nlambda to 20. In practice, however, the number of values of $\lambda$ is recommended to be 100 (default) or more. In most cases, it does not come with extra cost because of the warm-starts used in the algorithm, and for nonlinear models leads to better convergence properties.
Step2: We can then print the glmnet object.
Step3: This displays the call that produced the object fit and a three-column matrix with columns Df (the number of nonzero coefficients), %dev (the percent deviance explained) and Lambda (the corresponding value of $\lambda$).
Step4: Now when we plot against %deviance we get a very different picture. This is percent deviance explained on the training data. What we see here is that toward the end of the path this value are not changing much, but the coefficients are "blowing up" a bit. This lets us focus attention on the parts of the fit that matter. This will especially be true for other models, such as logistic regression.
Step5: We can extract the coefficients and make predictions at certain values of $\lambda$. Two commonly used options are
Step6: The output is for False.(TBD) The exact = 'True' option is not yet implemented.
Step7: gives the fitted values for the first 5 observations at $\lambda = 0.05$. If multiple values of s are supplied, a matrix of predictions is produced.
Step8: does 20-fold cross-validation, based on mean squared error criterion (default though).
Step9: Users can control the folds used. Here we use the same folds so we can also select a value for $\alpha$.
Step10: There are no built-in plot functions to put them all on the same plot, so we are on our own here
Step11: We see that lasso (alpha=1) does about the best here. We also see that the range of lambdas used differs with alpha.
Step12: These are rather arbitrary limits; often we want the coefficients to be positive, so we can set only lower.limit to be 0.
Step13: We see from the labels that the three variables with 0 penalty factors always stay in the model, while the others follow typical regularization paths and shrunken to 0 eventually.
Step14: We wish to label the curves with the variable names. Here's a simple way to do this, using the matplotlib library in python (and a little research into how to customize it). We need to have the positions of the coefficients at the end of the path.
Step15: We have done nothing here to avoid overwriting of labels, in the event that they are close together. This would be a bit more work, but perhaps best left alone, anyway.
Step16: We fit the data, with an object "mfit" returned.
Step17: For multiresponse Gaussian, the options in glmnet are almost the same as the single-response case, such as alpha, weights, nlambda, standardize. A exception to be noticed is that standardize.response is only for mgaussian family. The default value is FALSE. If standardize.response = TRUE, it standardizes the response variables.
Step18: Note that we set type.coef = "2norm". Under this setting, a single curve is plotted per variable, with value equal to the $\ell_2$ norm. The default setting is type.coef = "coef", where a coefficient plot is created for each response (multiple figures).
Step19: The prediction result is saved in a three-dimensional array with the first two dimensions being the prediction matrix for each response variable and the third indicating the response variables.
Step20: We plot the resulting cv.glmnet object "cvmfit".
Step21: To show explicitly the selected optimal values of $\lambda$, type
Step22: As before, the first one is the value at which the minimal mean squared error is achieved and the second is for the most regularized model whose mean squared error is within one standard error of the minimal.
Step23: The input matrix $x$ is the same as other families. For binomial logistic regression, the response variable $y$ should be either a factor with two levels, or a two-column matrix of counts or proportions.
Step24: Like before, we can print and plot the fitted object, extract the coefficients at specific $\lambda$'s and also make predictions. For plotting, the optional arguments such as xvar and label are similar to the Gaussian. We plot against the deviance explained and show the labels.
Step25: Prediction is a little different for logistic from Gaussian, mainly in the option type. "link" and "response" are never equivalent and "class" is only available for logistic regression. In summary,
Step26: For logistic regression, cvglmnet has similar arguments and usage as Gaussian. nfolds, weights, lambda, parallel are all available to users. There are some differences in ptype
Step27: It uses misclassification error as the criterion for 10-fold cross-validation.
Step28: coef and predict are simliar to the Gaussian case and we omit the details. We review by some examples.
Step29: As mentioned previously, the results returned here are only for the second level of the factor response.
Step30: Like other GLMs, glmnet allows for an "offset". This is a fixed vector of N numbers that is added into the linear predictor.
Step31: The optional arguments in glmnet for multinomial logistic regression are mostly similar to binomial regression except for a few cases.
Step32: We plot the resulting object "fit".
Step33: The options are xvar, label and ptype, in addition to other ordinary graphical parameters.
Step34: Note that although mtype is not a typical argument in cvglmnet, in fact any argument that can be passed to glmnet is valid in the argument list of cvglmnet. We also use parallel computing to accelerate the calculation.
Step35: [Back to <a href='#toc'>Table of Contents</a>]
Step36: We apply the function glmnet with the "poisson" option.
Step37: The optional input arguments of glmnet for "poisson" family are similar to those for others.
Step38: Like before, we can extract the coefficients and make predictions at certain $\lambda$'s by using coef and predict respectively. The optional input arguments are similar to those for other families. In function predict, the option type, which is the type of prediction required, has its own specialties for Poisson family. That is,
Step39: We may also use cross-validation to find the optimal $\lambda$'s and thus make inferences.
Step40: Options are almost the same as the Gaussian family except that for type.measure,
Step41: We can also show the optimal $\lambda$'s and the corresponding coefficients.
Step42: The predict method is similar and we do not repeat it here.
Step43: The Surv function in the package survival can create such a matrix. Note, however, that the coxph and related linear models can handle interval and other fors of censoring, while glmnet can only handle right censoring in its present form.
Step44: All the standard options are available such as alpha, weights, nlambda and standardize. Their usage is similar as in the Gaussian case and we omit the details here. Users can also refer to the help file help(glmnet).
Step45: As before, we can extract the coefficients at certain values of $\lambda$.
|
3,591
|
<ASSISTANT_TASK:>
Python Code:
# from qiita_db.study import Study
# from shutil import copy
# from os import mkdir
# ffp = '/home/qiita/emp-sample-info-files'
# study_ids = [ 550, 632, 638, 659, 662, 678, 713, 714, 722, 723,
# 755, 776, 804, 805, 807, 808, 809, 810, 829, 846,
# 861, 864, 865, 889, 894, 895, 905, 910, 925, 933,
# 940, 945, 958, 963, 990, 1001, 1024, 1030, 1031, 1033,
# 1034, 1035, 1036, 1037, 1038, 1039, 1041, 1043, 1056, 1064,
# 1098, 1197, 1198, 1222, 1235, 1240, 1242, 1288, 1289, 1453,
# 1481, 1521, 1526, 1578, 1579, 1580, 1621, 1622, 1627, 1632,
# 1642, 1665, 1673, 1674, 1692, 1694, 1696, 1702, 1711, 1713,
# 1714, 1715, 1716, 1717, 1721, 1734, 1736, 1747, 1748, 1773,
# 1774, 1795, 1799, 1883, 1889, 2080, 2182, 2192, 2229, 2300,
# 2318, 2338, 2382, 10145, 10146, 10156, 10171, 10172, 10180, 10245,
# 10246, 10247, 10248, 10273, 10278, 10308, 10323, 10346, 10363, 10522,
# 10533, 10581]
# studies = [Study(s) for s in study_ids]
# mkdir(ffp)
# [copy(s.sample_template.get_filepaths()[0][1], ffp)
# for s in studies if s.sample_template is not None]
import pandas as pd
import numpy as np
pd.set_option("display.max_rows", 200)
pd.set_option("display.max_columns", 80)
path_refined = '../../data/metadata-refine/emp_qiime_mapping_refined_YYYYMMDD.tsv'
path_ids = '../../data/metadata-refine/refine_emp_studies_ct112.txt'
path_plan = '../../data/metadata-refine/qiita_add_replace_columns.xlsx'
path_sample_info = '../../data/metadata-refine/metadata-sample-info'
studies = set([line.rstrip('\n') for line in open(path_ids)])
# remove studies where mapping file has fewer samples than sample info file -- ignore
studies = studies - {'10246', '10278', '10346'}
# remove studies where mapping file has fewer samples than sample info file -- these will be fixed manually
studies = studies - {'1033', '1696', '2229'}
# remove studies where sample names don't match (10146 is prepended twice!)
studies = studies - {'10146'}
# NOW: DON'T remove these studies bc sample info files ARE in Qiita (studies not in EMP paper)
#studies = studies - {'1889'}
# convert to sorted list of strings
studies = list(studies)
studies = [int(x) for x in studies]
studies.sort()
studies = [str(x) for x in studies]
problem_studies = ['1033', '1696', '2229', '10146', '10246', '10278', '10346']
df_refined = pd.read_csv(path_refined, sep='\t', index_col=0, dtype=object, low_memory=False)
df_plan = pd.read_excel(path_plan)
for study_id in studies:
df = pd.read_csv('%s/%s_sample_info.tsv' % (path_sample_info, study_id), sep='\t', index_col=0)
df_new = df.copy(deep=True)
df_diff = pd.DataFrame(index=df.index)
for index, row in df_plan.iterrows():
old_cols = row.old_column.split(',')
newcol = ''
if row.action == 'replace always':
# drop all old columns if they exist
for old_col in old_cols:
if old_col in df_new.columns:
df_new.drop(old_col, axis=1, inplace=True)
# add new column always
newcol = [df_refined.loc[i, row.new_column] for i in df_new.index]
df_new[row.new_column] = newcol
df_diff[row.new_column] = newcol
elif row.action == 'replace if':
# check if any old columns exist
if np.any([x in df_new.columns for x in old_cols]):
# drop old column if it exists
for old_col in old_cols:
if old_col in df_new.columns:
df_new.drop(old_col, axis=1, inplace=True)
# add new column if old column exists
newcol = [df_refined.loc[i, row.new_column] for i in df_new.index]
df_new[row.new_column] = newcol
df_diff[row.new_column] = newcol
elif row.action == 'add always':
# add new column always
newcol = [df_refined.loc[i, row.new_column] for i in df_new.index]
df_new[row.new_column] = newcol
df_diff[row.new_column] = newcol
elif row.action == 'add if':
# add new column if old column exists
if np.any([x in df_new.columns for x in old_cols]):
newcol = [df_refined.loc[i, row.new_column] for i in df_new.index]
df_new[row.new_column] = newcol
df_diff[row.new_column] = newcol
# fill NaNs with 'Not applicable' (Qiita terminology)
df_new.fillna('Not applicable', inplace=True)
df_diff.fillna('Not applicable', inplace=True)
# reorder columns alphabetically (Qiita style)
df_new = df_new[df_new.columns.sort_values()]
df_diff = df_diff[df_diff.columns.sort_values()]
# write to tsv
df_new.to_csv('../../data/metadata-refine/metadata-sample-info-refined/%s_sample_info.tsv' % str(study_id), sep='\t', index=True)
df_diff.to_csv('../../data/metadata-refine/metadata-sample-info-diff/%s_sample_info_diff.tsv' % str(study_id), sep='\t', index=True)
new_cols = list(df_plan.new_column)
df_refined[df_refined.study_id.isin(problem_studies)][new_cols].to_csv(
'../../data/metadata-refine/qiita_metadata_for_problem_studies.tsv', sep='\t')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Porting refined EMP metadata to Qiita sample info files
Step2: Replace or add columns
Step3: Export action columns for problem studies
|
3,592
|
<ASSISTANT_TASK:>
Python Code:
import math
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
def biseccion(intA, intB, errorA, noMaxIter):
if(funcion(intA)*funcion(intB)<0):
noIter = 0
errorTmp = 1
intTmp = 0
oldInt = intA
while(noIter<noMaxIter and errorTmp>errorA and funcion(intTmp)!=0):
intTmp = (intB+intA)/2
if(funcion(intA)*funcion(intTmp)<0):
intB = intTmp
else:
intA = intTmp
noIter+=1
errorTmp=abs((intTmp-oldInt)/intTmp)*100
oldInt = intTmp
#print('Error: ',errorTmp)
print('La raíz es: ',intTmp)
print('F(raiz) es:' ,funcion(intTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
else:
print('En el intervalo dado la función no presenta cambio de signo')
print('No hay raices que encontrar')
biseccion(-1,0,math.pow(10,-7),1000)
import math
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
def reglaFalsa(intA, intB, errorA, noMaxIter):
if(funcion(intA)*funcion(intB)<0):
noIter = 0
errorTmp = 1
intTmp = 0
oldInt = intA
while(noIter<noMaxIter and errorTmp>errorA and funcion(intTmp)!=0):
intTmp = intB-((funcion(intB)*(intA+intB))/(funcion(intA)-funcion(intB)))
if(funcion(intA)*funcion(intTmp)<0):
intB = intTmp
else:
intA = intTmp
noIter+=1
errorTmp=abs((intTmp-oldInt)/intTmp)*100
oldInt = intTmp
#print('Error: ',errorTmp)
print('La raíz es: ',intTmp)
print('F(raiz) es:' ,funcion(intTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
else:
print('En el intervalo dado la función no presenta cambio de signo')
print('No hay raices que encontrar')
reglaFalsa(-1,0,math.pow(10,-6),1000)
import math
def funcion(x):
return math.pow(x,10)-1
def funcionDeriv(x):
return 10*math.pow(x,9)
def niuton(valorIn, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
oldInt = valorIn
while(noIter<noMaxIter and errorTmp>errorA and funcion(intTmp)!=0):
intTmp = oldInt-((funcion(oldInt))/(funcionDeriv(oldInt)))
errorTmp=abs((intTmp-oldInt)/intTmp)*100
print('Noiter: ',noIter,' oldval:',oldInt,' error:',errorTmp)
oldInt = intTmp
noIter+=1
print('La raíz es: ',intTmp)
print('F(raiz) es:' ,funcion(intTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
niuton(0.5,math.pow(10,-10),1000)
import math
def funcion(x):
return math.pow(x,10)-1
def niutonConSecante(primerVal, segundoVal, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
while(noIter<noMaxIter and errorTmp>errorA and funcion(segundoVal)!=0):
valTmp = segundoVal-((funcion(segundoVal)*(primerVal-segundoVal))/(funcion(primerVal)-funcion(segundoVal)))
primerVal = segundoVal
segundoVal = valTmp
errorTmp=abs((segundoVal-primerVal)/segundoVal)*100
print('Noiter: ',noIter, ' primVal:', primerVal,' segunVal:',segundoVal,' error:',errorTmp)
noIter+=1
print('La raíz es: ',valTmp)
print('F(raiz) es:' ,funcion(valTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
niutonConSecante(0.5,-0.9990234375,math.pow(10,-10),1000)
import math
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
def funcionDeriv(x):
return (6*math.pow(math.e,6*x))-(8.316*math.pow(math.e,4*x))+(2.88*math.pow(math.e,2*x))
def niuton(valorIn, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
oldInt = valorIn
while(noIter<noMaxIter and errorTmp>errorA and funcion(intTmp)!=0):
intTmp = oldInt-((funcion(oldInt))/(funcionDeriv(oldInt)))
errorTmp=abs((intTmp-oldInt)/intTmp)*100
print('Noiter: ',noIter,' oldval:',oldInt,' error:',errorTmp)
oldInt = intTmp
noIter+=1
print('La raíz es: ',intTmp)
print('F(raiz) es:' ,funcion(intTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
niuton(-1, math.pow(10,-10),1000)
import math
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
def funcionDeriv(x):
return (6*math.pow(math.e,6*x))-(8.316*math.pow(math.e,4*x))+(2.88*math.pow(math.e,2*x))
def niuton(valorIn, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
oldInt = valorIn
while(noIter<noMaxIter and errorTmp>errorA and funcion(intTmp)!=0):
intTmp = oldInt-((funcion(oldInt))/(funcionDeriv(oldInt)))
errorTmp=abs((intTmp-oldInt)/intTmp)*100
print('Noiter: ',noIter,' oldval:',oldInt,' error:',errorTmp)
oldInt = intTmp
noIter+=1
print('La raíz es: ',intTmp)
print('F(raiz) es:' ,funcion(intTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
niuton(-3, math.pow(10,-10),1000)
import math
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
def niutonConSecante(primerVal, segundoVal, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
while(noIter<noMaxIter and errorTmp>errorA and funcion(segundoVal)!=0):
valTmp = segundoVal-((funcion(segundoVal)*(primerVal-segundoVal))/(funcion(primerVal)-funcion(segundoVal)))
primerVal = segundoVal
segundoVal = valTmp
errorTmp=abs((segundoVal-primerVal)/segundoVal)*100
print('Noiter: ',noIter, ' primVal:', primerVal,' segunVal:',segundoVal,' error:',errorTmp)
noIter+=1
print('La raíz es: ',valTmp)
print('F(raiz) es:' ,funcion(valTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
niutonConSecante(-3,-1,math.pow(10,-6),100000)
import math
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
def niutonConSecante(primerVal, segundoVal, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
while(noIter<noMaxIter and errorTmp>errorA and funcion(segundoVal)!=0):
valTmp = segundoVal-((funcion(segundoVal)*(primerVal-segundoVal))/(funcion(primerVal)-funcion(segundoVal)))
primerVal = segundoVal
segundoVal = valTmp
errorTmp=abs((segundoVal-primerVal)/segundoVal)*100
#print('Noiter: ',noIter, ' primVal:', primerVal,' segunVal:',segundoVal,' error:',errorTmp)
noIter+=1
print('La raíz es: ',valTmp)
print('F(raiz) es:' ,funcion(valTmp))
print('Error: ',errorTmp)
print('No. de iteraciones realizadas: ',noIter)
niutonConSecante(-3,-0.32944335545310327,math.pow(10,-10),1000)
def funcion(x):
return math.pow(x,10)-1
funcion(0.5)
def funcion(x):
return (math.pow(math.e,6*x))+(1.44*math.pow(math.e,2*x))-(2.079*math.pow(math.e,4*x))-(0.333)
funcion(-1)
math.pow(math.e,-1)-1
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import math
def funcion(x):
return (10*(math.sin(x+3))*(math.exp((x/2)+1)))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.spines['left'].set_position('zero')
# ax.spines['right'].set_color('none')
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
x = np.linspace(-10,10,30000)
plt.plot(x, np.exp(x))
y = [funcion(punto) for punto in x]
plt.plot(x, y)
import math
def funcion(x):
return (10*(math.sin(x+3))*(math.exp((x/2)+1)))
a = -4
b = -2
max = 1000
epsilon = 0
iter = 0
xold = b
while iter<=max:
x = a-((funcion(a)*(b-a))/(funcion(b)-funcion(a)))
iter += 1
if abs(x-xold)<epsilon*abs(x):
print("ajá",x)
else:
print(iter,a,b,x)
xold = x
if funcion(a)*funcion(x) > 0:
a = x
else:
b = x
import math
def funcion(x):
return (10*(math.sin(x+3))*(math.exp((x/2)+1)))
def funcionDeriv(x):
return (5*(math.exp((x/2)+1))*(math.sin(x+3)+(2*math.cos(x+3))))
def newtonRaphson(val, errorA, noMaxIter):
noIter = 0
errorTmp = 1
intTmp = 0
while(noIter<noMaxIter):
valTmp = val-((funcion(val))/(funcionDeriv(val)))
if abs(valTmp-val) < errorA:
print('es >:V : ',valTmp)
break
val = valTmp
noIter+=1
newtonRaphson(-5, math.pow(10,-15),10000)
newtonRaphson(0, math.pow(10,-15),10000)
import math
import numpy as np
def cuadratica(r,s):
discrim = math.pow(r,2)+(4*s)
raices = []
if discrim > 0: #raiz real
raices.append((r+math.sqrt(discrim))/(2))
raices.append((r-math.sqrt(discrim))/(2))
else: #raiz compleja
raices.append(str(r/2)+'+'+str(math.sqrt(abs(discrim))/2))
raices.append(str(r/2)+'-'+str(math.sqrt(abs(discrim))/2))
return raices
def generateb(a, r, s):
b = []
b.append(a[0])
b.append(a[1]+(r*b[-1]))
for i in a[2:]:
b.append(i+(r*b[-1])+(s*b[-2]))
return b
def generatec(b, r, s):
c = []
c.append(b[0])
c.append(b[1]+(r*c[-1]))
for i in b[2:-1]:
c.append(i+(r*c[-1])+(s*c[-2]))
return c
r = 0
s = 0
a = [1,-5,10,-10,4] #a4, a3, a2, a1, a0
grado = len(a)
errorA = 0.000001
noMaxIter = 1000
listaRaices = []
rerror = 100
serror = 100
noIter = 1
while len(listaRaices) < len(a):
print('=-='+str(len(listaRaices)))
while rerror>errorA or serror>errorA:
b = generateb(a,r,s) #b4, b3, b2, b1, b0 ...
c = generatec(b,r,s) #c4, c3, c2, c1
eq1 = np.array([[c[-2],c[-3]],[c[-1],c[-2]]])
eq2 = np.array([-b[-2],-b[-1]])
solEq = np.linalg.solve(eq1,eq2)
rdelta = solEq[0]
sdelta = solEq[1]
r += rdelta
s += sdelta
rerror = abs(rdelta/r)*100
serror = abs(sdelta/s)*100
noIter+=1
listaRaices.extend(cuadratica(r,s))
b = b[:-2]
if len(a)-len(listaRaices) == 2:
listaRaices.extend(cuadratica(b[0],b[1]))
print('==2')
elif len(a)-len(listaRaices) == 1:
listaRaices.append(-s/r)
print('==1')
print(noIter)
print(r)
print(s)
print(noIter)
print('---------------')
for i in listaRaices:
print(i)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h1>Intento de algoritmo de regla falsa</h1>
Step2: <h1>(testing) Intento de algoritmo de Newton - Raphson (en x^10 -1)</h1>
Step3: <h1>Intento de algoritmo de secante (en x^10 -1)</h1>
Step4: <h1>Intento de algoritmo de Newton - Raphson (en exp(6x)+1.44 exp(2x)- 2.079 exp(4x)- 0.333) con punto inicial de -1</h1>
Step5: <h1>Intento de algoritmo de Newton - Raphson (en exp(6x)+1.44 exp(2x)- 2.079 exp(4x)- 0.333) con punto inicial de -3</h1>
Step6: <h1>Intento de algoritmo de secante (en exp(6x)+1.44 exp(2x)- 2.079 exp(4x)- 0.333) con punto inicial de -1</h1>
Step7: <h1>Intento de algoritmo de secante (en exp(6x)+1.44 exp(2x)- 2.079 exp(4x)- 0.333) con punto inicial de -3</h1>
|
3,593
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import google.datalab.bigquery as bq
import matplotlib.pyplot as plot
import numpy as np
%bq tables list --project cloud-datalab-samples --dataset httplogs
%bq tables describe -n cloud-datalab-samples.httplogs.logs_20140615
%%bq query -n logs
SELECT timestamp, latency, status, method, endpoint
FROM `cloud-datalab-samples.httplogs.logs_20140615`
ORDER by timestamp
%%bq sample --query logs --count 7
%%bq query -n timeseries
SELECT DIV(UNIX_SECONDS(timestamp), 300) * 300 AS five_minute_window,
APPROX_QUANTILES(latency, 99)[SAFE_ORDINAL(99)] as latency
FROM `cloud-datalab-samples.httplogs.logs_20140615`
WHERE endpoint = 'Recent'
GROUP BY five_minute_window
ORDER by five_minute_window
%%bq sample --query timeseries --count 10
# Execute and convert the results to a Pandas dataframe
timeseries_df = timeseries.execute(output_options=bq.QueryOutput.dataframe()).result()
timeseries_values = timeseries_df['latency'].values
timeseries_len = len(timeseries_values)
plot.plot(np.array(range(timeseries_len)), timeseries_values)
plot.yscale('log')
plot.grid()
class AnomalyDetector(object):
def __init__(self, window = 10):
self._index = 0
self._window = window
self._values = np.zeros(window)
self._valuesSq = np.zeros(window)
self._mean = 0
self._variance = 0
self._count = 0
def observation(self, value):
anomaly = False
threshold = 3 * np.sqrt(self._variance)
if self._count > self._window:
if value > self._mean + threshold:
value = self._mean + threshold
anomaly = True
elif value < self._mean - threshold:
value = self._mean - threshold
anomaly = True
else:
self._count += 1
prev_value = self._values[self._index]
self._values[self._index] = value
self._valuesSq[self._index] = value ** 2
self._index = (self._index + 1) % self._window
self._mean = self._mean - prev_value / self._window + value / self._window
self._variance = sum(self._valuesSq) / self._window - (self._mean ** 2)
return anomaly, self._mean
anomalies = np.zeros(timeseries_len)
means = np.zeros(timeseries_len)
anomaly_detector = AnomalyDetector(36)
for i, value in enumerate(timeseries_values):
anomaly, mean = anomaly_detector.observation(value)
anomalies[i] = anomaly
means[i] = mean
ticks = np.array(range(timeseries_len))
plot.plot(ticks, timeseries_values)
plot.plot(ticks[anomalies == 1], timeseries_values[anomalies == 1], 'ro')
plot.plot(ticks, means, 'g', linewidth = 1)
plot.yscale('log')
plot.grid()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Understanding the Logs Data
Step2: Transforming Logs into a Time Series
Step3: Visualizing the Time Series Data
Step4: Anomaly Detection
Step5: With the anomaly detector implemented, let's run the timeseries through it to collect any anomalies and the expected mean along each point.
Step6: Then, plot the same time series, but overlay the anomalies and the mean values as well.
|
3,594
|
<ASSISTANT_TASK:>
Python Code:
from dx import *
import numpy as np
import pandas as pd
from pylab import plt
plt.style.use('seaborn')
h5 = pd.HDFStore('./data/vstoxx_march_2014.h5', 'r')
vstoxx_index = h5['vstoxx_index']
vstoxx_futures = h5['vstoxx_futures']
vstoxx_options = h5['vstoxx_options']
h5.close()
%matplotlib inline
vstoxx_index['V2TX'].plot(figsize=(10, 6))
vstoxx_futures.info()
vstoxx_futures.tail()
vstoxx_options.info()
vstoxx_options.tail()
import datetime as dt
import calendar
def third_friday(date):
day = 21 - (calendar.weekday(date.year, date.month, 1) + 2) % 7
return dt.datetime(date.year, date.month, day)
third_fridays = {}
for month in set(vstoxx_futures['EXP_MONTH']):
third_fridays[month] = third_friday(dt.datetime(2014, month, 1))
third_fridays
V0 = 17.6639 # VSTOXX level on 31.03.2014
futures_data = vstoxx_futures[vstoxx_futures.DATE == '2014/3/31'].copy()
options_data = vstoxx_options[(vstoxx_options.DATE == '2014/3/31')
& (vstoxx_options.TYPE == 'C')].copy()
me = market_environment('me', dt.datetime(2014, 3, 31))
me.add_constant('initial_value', 17.6639) # index on 31.03.2014
me.add_constant('volatility', 2.0) # for initialization
me.add_curve('discount_curve', constant_short_rate('r', 0.01)) # assumption
options_data['IMP_VOL'] = 0.0 # initialization new iv column
%%time
tol = 0.3 # tolerance level for moneyness
for option in options_data.index:
# iterating over all option quotes
forward = futures_data[futures_data['MATURITY'] == \
options_data.loc[option]['MATURITY']]['PRICE'].values
# picking the right futures value
if (forward * (1 - tol) < options_data.loc[option]['STRIKE']
< forward * (1 + tol)):
# only for options with moneyness within tolerance
call = options_data.loc[option]
me.add_constant('strike', call['STRIKE'])
me.add_constant('maturity', call['MATURITY'])
call_option = BSM_european_option('call', me)
options_data.loc[option, 'IMP_VOL'] = \
call_option.imp_vol(call['PRICE'], 'call', volatility_est=0.6)
options_data[60:70]
import matplotlib.pyplot as plt
%matplotlib inline
plot_data = options_data[options_data.IMP_VOL > 0]
plt.figure(figsize=(10, 6))
for maturity in sorted(set(options_data['MATURITY'])):
data = plot_data.isin({'MATURITY': [maturity,]})
data = plot_data[plot_data.MATURITY == maturity]
# select data for this maturity
plt.plot(data['STRIKE'], data['IMP_VOL'],
label=maturity.date(), lw=1.5)
plt.plot(data['STRIKE'], data['IMP_VOL'], 'r.')
plt.xlabel('strike')
plt.ylabel('implied volatility of volatility')
plt.legend()
plt.show()
tol = 0.2
def get_option_selection(pricing_date, maturity, tol=tol):
''' Function selects relevant options data. '''
forward = vstoxx_futures[(vstoxx_futures.DATE == pricing_date)
& (vstoxx_futures.MATURITY == maturity)]['PRICE'].values[0]
option_selection = \
vstoxx_options[(vstoxx_options.DATE == pricing_date)
& (vstoxx_options.MATURITY == maturity)
& (vstoxx_options.TYPE == 'C')
& (vstoxx_options.STRIKE > (1 - tol) * forward)
& (vstoxx_options.STRIKE < (1 + tol) * forward)]
return option_selection, forward
def get_option_models(pricing_date, maturity, option_selection):
''' Models and returns traded options for given option_selection object. '''
me_vstoxx = market_environment('me_vstoxx', pricing_date)
initial_value = vstoxx_index['V2TX'][pricing_date]
me_vstoxx.add_constant('initial_value', initial_value)
me_vstoxx.add_constant('final_date', maturity)
me_vstoxx.add_constant('currency', 'EUR')
me_vstoxx.add_constant('frequency', 'W')
me_vstoxx.add_constant('paths', 10000)
csr = constant_short_rate('csr', 0.01)
# somewhat arbitrarily chosen here
me_vstoxx.add_curve('discount_curve', csr)
# parameters to be calibrated later
me_vstoxx.add_constant('kappa', 1.0)
me_vstoxx.add_constant('theta', 1.2 * initial_value)
me_vstoxx.add_constant('volatility', 1.0)
vstoxx_model = square_root_diffusion('vstoxx_model', me_vstoxx)
# square-root diffusion for volatility modeling
# mean-reverting, positive process
# option parameters and payoff
me_vstoxx.add_constant('maturity', maturity)
payoff_func = 'np.maximum(maturity_value - strike, 0)'
option_models = {}
for option in option_selection.index:
strike = option_selection['STRIKE'].ix[option]
me_vstoxx.add_constant('strike', strike)
option_models[option] = \
valuation_mcs_european_single(
'eur_call_%d' % strike,
vstoxx_model,
me_vstoxx,
payoff_func)
return vstoxx_model, option_models
def calculate_model_values(p0):
''' Returns all relevant option values.
Parameters
===========
p0 : tuple/list
tuple of kappa, theta, volatility
Returns
=======
model_values : dict
dictionary with model values
'''
kappa, theta, volatility = p0
vstoxx_model.update(kappa=kappa,
theta=theta,
volatility=volatility)
model_values = {}
for option in option_models:
model_values[option] = \
option_models[option].present_value(fixed_seed=True)
return model_values
i = 0
def mean_squared_error(p0):
''' Returns the mean-squared error given
the model and market values.
Parameters
===========
p0 : tuple/list
tuple of kappa, theta, volatility
Returns
=======
MSE : float
mean-squared error
'''
if p0[0] < 0 or p0[1] < 5. or p0[2] < 0 or p0[2] > 10.:
return 100
global i, option_selection, vstoxx_model, option_models, first, last
pd = dt.datetime.strftime(
option_selection['DATE'].iloc[0].to_pydatetime(),
'%d-%b-%Y')
mat = dt.datetime.strftime(
option_selection['MATURITY'].iloc[0].to_pydatetime(),
'%d-%b-%Y')
model_values = calculate_model_values(p0)
option_diffs = {}
for option in model_values:
option_diffs[option] = (model_values[option]
- option_selection['PRICE'].loc[option])
MSE = np.sum(np.array(list(option_diffs.values())) ** 2) / len(option_diffs)
if i % 150 == 0:
# output every 0th and 100th iteration
if i == 0:
print('%12s %13s %4s %6s %6s %6s --> %6s' % \
('pricing_date', 'maturity_date', 'i', 'kappa',
'theta', 'vola', 'MSE'))
print('%12s %13s %4d %6.3f %6.3f %6.3f --> %6.3f' % \
(pd, mat, i, p0[0], p0[1], p0[2], MSE))
i += 1
return MSE
import scipy.optimize as spo
def get_parameter_series(pricing_date_list, maturity_list):
global i, option_selection, vstoxx_model, option_models, first, last
# collects optimization results for later use (eg. visualization)
parameters = pd.DataFrame()
for maturity in maturity_list:
first = True
for pricing_date in pricing_date_list:
option_selection, forward = \
get_option_selection(pricing_date, maturity)
vstoxx_model, option_models = \
get_option_models(pricing_date, maturity, option_selection)
if first is True:
# use brute force for the first run
i = 0
opt = spo.brute(mean_squared_error,
((0.5, 2.51, 1.), # range for kappa
(10., 20.1, 5.), # range for theta
(0.5, 10.51, 5.0)), # range for volatility
finish=None)
i = 0
opt = spo.fmin(mean_squared_error, opt,
maxiter=200, maxfun=350, xtol=0.0000001, ftol=0.0000001)
parameters = parameters.append(
pd.DataFrame(
{'pricing_date' : pricing_date,
'maturity' : maturity,
'initial_value' : vstoxx_model.initial_value,
'kappa' : opt[0],
'theta' : opt[1],
'sigma' : opt[2],
'MSE' : mean_squared_error(opt)}, index=[0,]),
ignore_index=True)
first = False
last = opt
return parameters
%%time
pricing_date_list = pd.date_range('2014/3/1', '2014/3/31', freq='B')
maturity_list = [third_fridays[7]]
parameters = get_parameter_series(pricing_date_list, maturity_list)
paramet = parameters.set_index('pricing_date')
paramet.tail()
%matplotlib inline
paramet[['kappa', 'theta', 'sigma', 'MSE']].plot(subplots=True, color='b', figsize=(10, 12))
plt.tight_layout()
index = paramet.index[-1]
opt = np.array(paramet[['kappa', 'theta', 'sigma']].loc[index])
option_selection = get_option_selection(index, maturity_list[0], tol=tol)[0]
model_values = np.sort(np.array(list(calculate_model_values(opt).values())))[::-1]
import matplotlib.pyplot as plt
%matplotlib inline
fix, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(10, 8))
strikes = option_selection['STRIKE'].values
ax1.plot(strikes, option_selection['PRICE'], label='market quotes')
ax1.plot(strikes, model_values, 'ro', label='model values')
ax1.set_ylabel('option values')
ax1.grid(True)
ax1.legend(loc=0)
wi = 0.25
ax2.bar(strikes - wi / 2., model_values - option_selection['PRICE'],
label='market quotes', width=wi)
ax2.grid(True)
ax2.set_ylabel('differences')
ax2.set_xlabel('strikes')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: VSTOXX Futures & Options Data
Step2: VSTOXX index for the first quarter of 2014.
Step3: The VSTOXX futures data (8 futures maturities/quotes per day).
Step4: The VSTOXX options data. This data set is quite large due to the large number of European put and call options on the VSTOXX.
Step5: As a helper function we need a function to calculate all relevant third Fridays for all relevant maturity months of the data sets.
Step6: Implied Volatilities from Market Quotes
Step7: The following loop now calculates the implied volatilities for all those options whose strike lies within the defined tolerance level.
Step8: A selection of the results.
Step9: And the complete results visualized.
Step10: Market Modeling
Step11: Options Modeling
Step12: The function calculate_model_values estimates and returns model value estimates for all relevant options given a parameter set for the square_root_diffusion risk factor model.
Step13: Calibration Functions
Step14: Implementing the Calibration Procedure
Step15: The Calibration Itself
Step16: Calibration Results
Step17: This is also illustrated by the visualization of the time series data for the calibrated/optimal parameter values. The MSE is below 0.01 throughout.
Step18: The following generates a plot of the calibration results for the last calibration day. The absolute price differences are below 0.10 EUR for all options.
|
3,595
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from gatspy.periodic import LombScargle
import sys
%matplotlib inline
from toy_simulator import simulate_LSST
from trilegal_models import random_stars
import simple_gyro as sg
import pandas as pd
fname = "output574523944248.dat"
N = 100
logAges, bvs, logTeff, rmag = random_stars(fname, N)
teff = 10**logTeff
m = bvs > .4 # select only cool stars
cool_ages = 10**logAges[m] * 1e-9
cool_ps = sg.period(cool_ages, bvs[m])
cool_teffs = teff[m]
cool_rmags = rmag[m]
hot_ages = 10**logAges[~m] * 1e-9 # select hot stars
hot_teffs = teff[~m]
hot_rmags = rmag[~m]
# copy parameters for two Gaussians from hot_stars ipython notebook
A1, A2, mu1, mu2, sig1, sig2 = 254.11651209, 49.8149765, 3.00751724, 3.73399554, 2.26525979, 8.31739725
hot_ps = np.zeros_like(hot_ages)
hot_ps1 = np.random.randn(int(len(hot_ages)*(1 - A2/A1)))*sig1 + mu1 # mode 1
hot_ps2 = np.random.randn(int(len(hot_ages)*(A2/A1)))*sig2 + mu2 # mode 2
hot_ps[:len(hot_ps1)] = hot_ps1
hot_ps[len(hot_ps1):len(hot_ps2)] = hot_ps2
tot = len(hot_ps1) + len(hot_ps2)
hot_ps[tot:] = np.random.randn(len(hot_ps)-tot)*sig2 + mu2 # make up the total number of Ps
# combine the modes
age = np.concatenate((cool_ages, hot_ages))
ps = np.concatenate((cool_ps, hot_ps))
teff = np.concatenate((cool_teffs, hot_teffs))
rmag = np.concatenate((cool_rmags, hot_rmags))
plt.hist(age)
plt.xlabel("Age (Gyr)")
plt.hist(ps)
plt.xlabel("Period (days)")
plt.hist(rmag)
plt.xlabel("r mag")
## Arrays of random (log-normal) periods and (uniform) amplitudes.
#min_period, max_period = 1, 100 # days
#ps = np.exp(np.random.uniform(np.log(min_period), np.log(max_period), N)) # periods
#amps = np.random.uniform(10, 300, N) # ppm
# Column headings: log10P, log10R, stdR, Nbin
teff_bins = [3500, 4000, 4500, 5000, 5500, 6000]
d35 = pd.read_csv("data/rot_v_act3500.txt")
d40 = pd.read_csv("data/rot_v_act4000.txt")
d45 = pd.read_csv("data/rot_v_act4500.txt")
d50 = pd.read_csv("data/rot_v_act5000.txt")
d55 = pd.read_csv("data/rot_v_act5500.txt")
d60 = pd.read_csv("data/rot_v_act6000.txt")
plt.step(d35["log10P"], d35["log10R"], label="T=3500")
plt.step(d40["log10P"], d40["log10R"], label="T=4000")
plt.step(d45["log10P"], d45["log10R"], label="T=4500")
plt.step(d50["log10P"], d50["log10R"], label="T=5000")
plt.step(d55["log10P"], d55["log10R"], label="T=5500")
plt.step(d60["log10P"], d60["log10R"], label="T=6000")
plt.legend()
plt.xlabel("log Period")
plt.ylabel("log Range")
def find_nearest (array, value):
Match a period to a bin.
array: array of bin heights.
value: the period of the star.
Returns the value and index of the bin.
m = np.abs(array-value) == np.abs(array-value).min()
return array[m], m
def assign_amps(ps, log10P, log10R, stdR):
Take periods and bin values and return an array of amplitudes.
npi = np.array([find_nearest(10**log10P, p) for p in ps]) # match periods to bins
nearest_ps, inds = npi[:, 0], npi[:, 1]
log_ranges = np.array([log10R[i] for i in inds])[:, 0] # array of ranges for each *
std_ranges = np.array([stdR[i] for i in inds])[:, 0] # array of stdevs in range for each *
return np.random.randn(len(ps))*std_ranges + log_ranges # draw amps from Gaussians
def make_arrays(data, temp_bin):
Amplitude arrays for each temperature bin
P, R, std = np.array(data["log10P"]), np.array(data["log10R"]), np.array(data["stdR"])
if temp_bin == 3500:
m = teff < 3750
elif temp_bin == 6000:
m = teff > 6000
else:
m = (temp_bin - 250 < teff) * (teff < temp_bin + 250)
periods, teffs, rmags = ps[m], teff[m], rmag[m]
amplitudes = assign_amps(periods, P, R, std)
return periods, amplitudes, teffs, rmags
def LSST_sig(m):
Approximate the noise in figure 2 of arxiv:1603.06638 from the apparent r-mag.
Returns the noise in magnitudes and ppm.
if m < 19:
return .005
mags = np.array([19, 20, 21, 22, 23, 24, 25])
sigs = np.array([.005, .007, .01, .02, .03, .1, .2])
return sigs[np.abs(mags - m) == np.abs(mags-m).min()][0]
pers, logamps, teffs, rmags = np.concatenate((make_arrays(d35, 3500), make_arrays(d40, 4000),
make_arrays(d45, 4500), make_arrays(d50, 5000),
make_arrays(d55, 5500), make_arrays(d60, 6000)), axis=1)
amps = 10**logamps # parts per million
noise = LSST_sig(rmag[0])
noises_mag = np.array([LSST_sig(mag) for mag in rmags])
noises_ppm = (1 - 10**(-noises_mag/2.5)) * 1e6
%%capture
# amps = np.random.uniform(10, 300, N) # ppm
path = "simulations" # where to save the lcs
[simulate_LSST(i, pers[i], amps[i], path, noises_ppm[i]) for i in range(len(pers))] # simulations
# save the true values
ids = np.arange(len(pers))
data = np.vstack((ids, pers, amps))
np.savetxt("{0}/truth.txt".format(path), data.T)
id = 0
sid = str(int(id)).zfill(4)
path = "results" # where to save results
x, y, yerr = np.genfromtxt("simulations/{0}.txt".format(sid)).T # load a fake light curve
plt.errorbar(x, y, yerr=yerr, fmt="k.", capsize=0)
ps = np.linspace(2, 100, 1000) # the period array (in days)
model = LombScargle().fit(x, y, yerr)
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] < pgram[i] and pgram[i+1] < pgram[i]])
if len(peaks):
period = ps[pgram==max(pgram[peaks])][0]
else: period = 0
plt.plot(ps, pgram) # plot the pgram
plt.axvline(period, color="r") # plot the position of the highest peak
# load and plot the truth
ids, true_ps, true_as = np.genfromtxt("simulations/truth.txt").T
plt.axvline(true_ps[id], color="g") # plot the position of the highest peak
print(period, true_ps[id])
ids = np.arange(len(pers))
periods = np.zeros_like(ids)
for i, id in enumerate(ids):
sid = str(int(id)).zfill(4)
x, y, yerr = np.genfromtxt("simulations/{0}.txt".format(sid)).T # load a fake light curve
model = LombScargle().fit(x, y, yerr) # compute pgram
pgram = model.periodogram(ps)
# find peaks
peaks = np.array([i for i in range(1, len(ps)-1) if pgram[i-1] < pgram[i] and pgram[i+1] < pgram[i]])
if len(peaks):
period = ps[pgram==max(pgram[peaks])][0]
else: period = 0
periods[i] = period
data = np.vstack((true_ps, periods, teffs, rmags, true_as, noises_ppm))
np.savetxt("rotation_results{0}.txt".format(fname), data.T)
plt.plot(true_ps, periods, "k.")
xs = np.linspace(min(true_ps), max(true_ps), 100)
plt.plot(xs, xs, "r")
tau = .1 # the recovery must be within a factor of *threshold* of the truth
plt.plot(xs, xs-tau*xs, "r--")
plt.plot(xs, xs+tau*xs, "r--")
m = (true_ps - tau*true_ps < periods) * (periods < true_ps + tau*true_ps)
plt.hist(true_ps, 15, color="b", label="all")
plt.hist(true_ps[m], 15, color="r", alpha=.5, label="recovered")
plt.legend(loc="best")
print(len(true_ps), "injected", len(true_ps[m]), "recovered")
print(len(true_ps[m])/len(true_ps)*100, "percent success")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Randomly select targets from a TRILEGAL output.
Step2: Calculate periods from ages and colours for cool stars
Step3: Draw from a sum of two Gaussians (modelled in another notebook) that describes the period distribution for hot stars. Approximations
Step4: Make histograms of the ages and periods
Step5: Use Derek's results to calculate amplitudes
Step10: Assign amplitudes
Step11: Simulate light curves
Step12: Load and plot an example light curve
Step13: Compute a periodogram
Step14: Now compute LS pgrams for a set of LSST light curves and save the highest peak .
Step15: Save the data
Step16: Plot the recovered periods vs the true periods.
Step17: Decide whether the recovery was successful or not
|
3,596
|
<ASSISTANT_TASK:>
Python Code:
import pysal.lib
import numpy as np
from pysal.explore.giddy.directional import Rose
%matplotlib inline
f = open(pysal.lib.examples.get_path('spi_download.csv'), 'r')
lines = f.readlines()
f.close()
lines = [line.strip().split(",") for line in lines]
names = [line[2] for line in lines[1:-5]]
data = np.array([list(map(int, line[3:])) for line in lines[1:-5]])
sids = list(range(60))
out = ['"United States 3/"',
'"Alaska 3/"',
'"District of Columbia"',
'"Hawaii 3/"',
'"New England"','"Mideast"',
'"Great Lakes"',
'"Plains"',
'"Southeast"',
'"Southwest"',
'"Rocky Mountain"',
'"Far West 3/"']
snames = [name for name in names if name not in out]
sids = [names.index(name) for name in snames]
states = data[sids,:]
us = data[0]
years = np.arange(1969, 2009)
rel = states/(us*1.)
gal = pysal.lib.io.open(pysal.lib.examples.get_path('states48.gal'))
w = gal.read()
w.transform = 'r'
Y = rel[:, [0, -1]]
Y.shape
Y
np.random.seed(100)
r4 = Rose(Y, w, k=4)
r4.plot()
r4.plot(Y[:,0]) # condition on starting relative income
r4.plot(attribute=r4.lag[:,0]) # condition on the spatial lag of starting relative income
r4.plot_vectors() # lisa vectors
r4.plot_vectors(arrows=False)
r4.plot_origin() # origin standardized
r4.cuts
r4.counts
np.random.seed(1234)
r4.permute(permutations=999)
r4.p
r4.permute(alternative='positive', permutations=999)
r4.p
r4.expected_perm
r4.permute(alternative='negative', permutations=999)
r4.p
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualization
Step2: Inference
Step3: Here all the four sector counts are signficantly different from their expectation under the null.
Step4: Finally, a directional alternative reflecting negative association between the movement of the focal unit and its lag has the complimentary interpretation to the positive alternative
|
3,597
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from qutip import *
from qutip.piqs import *
N = 10
system = Dicke(N = N)
[jx, jy, jz] = jspin(N)
jp = jspin(N,"+")
jm = jp.dag()
w0 = 1
h0 = w0 * jz
gCE = 1
gP = N * gCE
system.hamiltonian = h0
system.collective_emission = gCE
system.pumping = gP
L = system.liouvillian()
rhoss = steadystate(L, method="direct")
jpjm_ss = expect(jp*jm, rhoss)
# time evolution parameters
nt = 1000
td = np.log(N)/(N*gCE)
tmax = 5 * td
t = np.linspace(0, tmax, nt)
# initial state
rho0= dicke(N, N/2, -N/2)
# calculate g2(tau)
A = jp*jm
rhoA = jm*rhoss*jp
#g2(tau)
result1 = mesolve(L, rhoA, t, [], e_ops = [A], options = Options(store_states=True))
g2t = result1.expect[0]
#rho(t)
result2 = mesolve(L, rho0, t, [], e_ops = A, options = Options(store_states=True))
rhot = result2.states
jpjmt = result2.expect[0]
j2max = (0.5 * N + 1) * (0.5 * N)
plt.rc('text', usetex = True)
label_size = 20
plt.rc('xtick', labelsize=label_size)
plt.rc('ytick', labelsize=label_size)
fig1 = plt.figure()
plt.plot(t/td, g2t/jpjm_ss**2, '-')
plt.plot(t/td, 1+0*g2t, '--')
plt.xlabel(r'$\tau/t_\mathrm{D}$', fontsize = label_size)
plt.ylabel(r'$g^{(2)}(\tau)$', fontsize = label_size)
plt.xticks([0,(tmax/2)/td,tmax/td])
plt.show()
plt.close()
fig2 = plt.figure()
plt.plot(t/td, jpjmt/j2max, '-')
plt.xlabel(r'$t/t_\mathrm{D}$', fontsize = label_size)
plt.ylabel(r'$\langle J_{+}J_{-}\rangle (t)$', fontsize = label_size)
plt.xticks([0,(tmax/2)/td,tmax/td])
plt.title(r'Light emission', fontsize = label_size)
plt.show()
plt.close()
# Cycle on Coefficients
gCE = 1
gP0 = 1
gP_min_exp = -20
gP_max_exp = 20
gP_stepsize = 0.5
gP_list = np.arange(gP_min_exp, gP_max_exp+1, gP_stepsize)*0.1
gP_list_log = 10**(gP_list)
jpjmss_max_list = []
for i in gP_list_log:
gP = i*gP0
system = Dicke(hamiltonian = jz, N = N, pumping = gP, collective_emission = gCE)
liouv = system.liouvillian()
#steadystate
rho_ss = steadystate(liouv,method="direct")
jpjm_ss = expect(jp*jm, rho_ss)
jpjmss_max_list.append(jpjm_ss)
intensity_max = float(N)*gCE/2*(float(N)*gCE/2+1)
normalized_intensity = np.array(jpjmss_max_list)/intensity_max
plt.semilogx(gP_list_log/(gCE*N), normalized_intensity, '-')
label_size = 20
plt.xlabel(r'${\gamma_\mathrm{P}}/\left({N\gamma_\mathrm{CE}}\right)$', fontsize = label_size)
plt.ylabel(r'$\langle J_{+}J_{-}\rangle_\mathrm{ss}$', fontsize = label_size)
plt.title(r'Steady-state light emission', fontsize = label_size)
plt.show()
plt.close()
qutip.about()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) Time evolution
Step2: Liouvillian and steady state $\rho_\text{ss}$
Step3: Time integration for $g^{(2)}(\tau)$ and $\langle J_{+}J_{-}\rangle (t)$
Step4: Visualization
Step5: 2) Maximum of light emission as a function of $\frac{\gamma_\text{P}}{N\gamma_\text{CE}}$
Step6: Visualization
Step7: References
|
3,598
|
<ASSISTANT_TASK:>
Python Code:
%load_ext watermark
%watermark -a '' -u -d -v -p numpy,pandas,matplotlib,scipy,sklearn
%matplotlib inline
# Added version check for recent scikit-learn 0.18 checks
from distutils.version import LooseVersion as Version
from sklearn import __version__ as sklearn_version
import numpy as np
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data # data in pixels
y = digits.target # digit labels
print(X.shape)
print(y.shape)
print(np.unique(y))
import matplotlib.pyplot as plt
import pylab as pl
num_rows = 4
num_cols = 5
fig, ax = plt.subplots(nrows=num_rows, ncols=num_cols, sharex=True, sharey=True)
ax = ax.flatten()
for index in range(num_rows*num_cols):
img = digits.images[index]
label = digits.target[index]
ax[index].imshow(img, cmap='Greys', interpolation='nearest')
ax[index].set_title('digit ' + str(label))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
if Version(sklearn_version) < '0.18':
from sklearn.cross_validation import train_test_split
else:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=1)
num_training = y_train.shape[0]
num_test = y_test.shape[0]
print('training: ' + str(num_training) + ', test: ' + str(num_test))
import numpy as np
# check to see if the data are well distributed among digits
for y in [y_train, y_test]:
print(np.bincount(y))
## Note: We do not guarantee that there is a one-to-one correspondence, and therefore the toy result is different.
## See Explanation for more information
def clustering_accuracy_score(y_true, y_pred):
n_labels = len(list(set(y_true)))
n_clusters = len(list(set(y_pred)))
Pre = np.zeros((n_clusters, n_labels))
Rec = np.zeros((n_clusters, n_labels))
F = np.zeros((n_clusters, n_labels))
w = np.zeros((n_clusters))
F_i = np.zeros((n_clusters))
P = np.zeros((n_labels))
C = np.zeros((n_clusters))
for i in range(n_clusters):
C[i] = sum(y_pred == i)
for j in range(n_labels):
P[j] = sum(y_true == j)
for i in range(n_clusters):
F_i_max = 0
for j in range(n_labels):
if (C[i]):
Pre[i][j] = sum(y_pred[y_true == j] == i) / C[i]
if (P[j]):
Rec[i][j] = sum(y_true[y_pred == i] == j) / P[j]
if (Pre[i][j]+Rec[i][j]):
F[i][j] = 2*Pre[i][j]*Rec[i][j]/(Pre[i][j]+Rec[i][j])
F_i_max = max(F_i_max, F[i][j])
F_i[i] = F_i_max
w[i] = sum(y_pred == i) / len(y_pred)
return F_i.dot(w)
# toy case demonstrating the clustering accuracy
# this is just a reference to illustrate what this score function is trying to achieve
# feel free to design your own as long as you can justify
# ground truth class label for samples
toy_y_true = np.array([0, 0, 0, 1, 1, 2])
# clustering id for samples
toy_y_pred_true = np.array([1, 1, 1, 2, 2, 0])
toy_y_pred_bad1 = np.array([0, 0, 1, 1, 1, 2])
toy_y_pred_bad2 = np.array([2, 2, 1, 0, 0, 0])
toy_accuracy = clustering_accuracy_score(toy_y_true, toy_y_pred_true)
print('accuracy', toy_accuracy, ', should be 1')
toy_accuracy = clustering_accuracy_score(toy_y_true, toy_y_pred_bad1)
print('accuracy', toy_accuracy, ', should be', 5.0/6.0)
toy_accuracy = clustering_accuracy_score(toy_y_true, toy_y_pred_bad2)
print('accuracy', toy_accuracy, ', should be', 4.0/6.0, ', this will be explained in the following content')
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import KernelPCA
from sklearn.cluster import KMeans
from sklearn.metrics import make_scorer
from scipy.stats import mode
pipe = Pipeline([('scl', StandardScaler()),
('pca', KernelPCA()),
('km', KMeans(random_state=1))])
# map cluster number to acutal label
def cluster_mapping(y_true, y_pred):
mapping = {}
n_labels = len(list(set(y_true)))
n_clusters = len(list(set(y_pred)))
Pre = np.zeros((n_clusters, n_labels))
Rec = np.zeros((n_clusters, n_labels))
F = np.zeros((n_clusters, n_labels))
P = np.zeros((n_labels))
C = np.zeros((n_clusters))
for i in range(n_clusters):
C[i] = sum(y_pred == i)
for j in range(n_labels):
P[j] = sum(y_true == j)
for i in range(n_clusters):
F_i_max = 0
F_i_max_label = 0
for j in range(n_labels):
if (C[i]):
Pre[i][j] = sum(y_pred[y_true == j] == i) / C[i]
if (P[j]):
Rec[i][j] = sum(y_true[y_pred == i] == j) / P[j]
if (Pre[i][j]+Rec[i][j]):
F[i][j] = 2*Pre[i][j]*Rec[i][j]/(Pre[i][j]+Rec[i][j])
if (F_i_max < F[i][j]):
F_i_max_label = j
F_i_max = F[i][j]
mapping[i] = F_i_max_label
return mapping
if Version(sklearn_version) < '0.18':
from sklearn.grid_search import GridSearchCV
else:
from sklearn.model_selection import GridSearchCV
pcs = list(range(1, 60))
kernels = ['linear', 'rbf', 'cosine']
initTypes = ['random', 'k-means++']
clusters = list(range(10, 20))
tfs = [True, False]
param_grid = [{'pca__n_components': pcs,
'pca__kernel': kernels,
'km__init' : initTypes,
'km__n_clusters' : clusters,
'scl__with_std' : tfs,
'scl__with_mean' : tfs}]
gs = GridSearchCV(estimator=pipe,
param_grid=param_grid,
scoring=make_scorer(clustering_accuracy_score),
cv=10,
n_jobs=-1,
verbose=False)
gs = gs.fit(X_train, y_train)
print(gs.best_score_)
print(gs.best_params_)
best_model = gs.best_estimator_
print('Test accuracy: %.3f' % clustering_accuracy_score(y_test, best_model.predict(X_test)))
mapping = cluster_mapping(y_train, best_model.predict(X_train))
y_test_pred = np.array(list(map(lambda x: mapping[x], best_model.predict(X_test))))
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab = y_test_pred[y_test != y_test_pred][:25]
fig, ax = plt.subplots(nrows=5, ncols=5, sharex=True, sharey=True)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(8, 8)
ax[i].imshow(img, cmap='Greys', interpolation='nearest')
ax[i].set_title('%d) t: %d p: %d' % (i+1, correct_lab[i], miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
# Functions to build a user-defined wide resnet for cifa-10
# Author: Somshubra Majumdar https://github.com/titu1994/Wide-Residual-Networks
# Modified By: Gao Chang, HKU
from keras.models import Model
from keras.layers import Input, merge, Activation, Dropout, Flatten, Dense
from keras.layers.convolutional import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras import backend as K
def initial_conv(input):
x = Convolution2D(16, 3, 3, border_mode='same')(input)
channel_axis = 1 if K.image_dim_ordering() == "th" else -1
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
return x
def conv1_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == "th" else -1
# Check if input number of filters is same as 16 * k, else create convolution2d for this input
if K.image_dim_ordering() == "th":
if init._keras_shape[1] != 16 * k:
init = Convolution2D(16 * k, 1, 1, activation='linear', border_mode='same')(init)
else:
if init._keras_shape[-1] != 16 * k:
init = Convolution2D(16 * k, 1, 1, activation='linear', border_mode='same')(init)
x = Convolution2D(16 * k, 3, 3, border_mode='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0: x = Dropout(dropout)(x)
x = Convolution2D(16 * k, 3, 3, border_mode='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = merge([init, x], mode='sum')
return m
def conv2_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == "th" else -1
# Check if input number of filters is same as 32 * k, else create convolution2d for this input
if K.image_dim_ordering() == "th":
if init._keras_shape[1] != 32 * k:
init = Convolution2D(32 * k, 1, 1, activation='linear', border_mode='same')(init)
else:
if init._keras_shape[-1] != 32 * k:
init = Convolution2D(32 * k, 1, 1, activation='linear', border_mode='same')(init)
x = Convolution2D(32 * k, 3, 3, border_mode='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0: x = Dropout(dropout)(x)
x = Convolution2D(32 * k, 3, 3, border_mode='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = merge([init, x], mode='sum')
return m
def conv3_block(input, k=1, dropout=0.0):
init = input
channel_axis = 1 if K.image_dim_ordering() == "th" else -1
# Check if input number of filters is same as 64 * k, else create convolution2d for this input
if K.image_dim_ordering() == "th":
if init._keras_shape[1] != 64 * k:
init = Convolution2D(64 * k, 1, 1, activation='linear', border_mode='same')(init)
else:
if init._keras_shape[-1] != 64 * k:
init = Convolution2D(64 * k, 1, 1, activation='linear', border_mode='same')(init)
x = Convolution2D(64 * k, 3, 3, border_mode='same')(input)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
if dropout > 0.0: x = Dropout(dropout)(x)
x = Convolution2D(64 * k, 3, 3, border_mode='same')(x)
x = BatchNormalization(axis=channel_axis)(x)
x = Activation('relu')(x)
m = merge([init, x], mode='sum')
return m
def WRN(nb_classes, N, k, dropout):
Creates a Wide Residual Network with specified parameters
:param nb_classes: Number of output classes
:param N: Depth of the network. Compute N = (n - 4) / 6.
Example : For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
Example2: For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
Example3: For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
:param k: Width of the network.
:param dropout: Adds dropout if value is greater than 0.0
init = Input(shape=(3, 32, 32))
x = initial_conv(init)
for i in range(N):
x = conv1_block(x, k, dropout)
x = MaxPooling2D((2,2))(x)
for i in range(N):
x = conv2_block(x, k, dropout)
x = MaxPooling2D((2,2))(x)
for i in range(N):
x = conv3_block(x, k, dropout)
x = AveragePooling2D((8,8))(x)
x = Flatten()(x)
x = Dense(nb_classes, activation='softmax')(x)
model = Model(init, x)
return model
import numpy as np
import sklearn.metrics as metrics
from keras.datasets import cifar10
from keras.models import Model
from keras.layers import Input
from keras.optimizers import SGD
import keras.callbacks as callbacks
import keras.utils.np_utils as kutils
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import accuracy_score
batch_size = 64
nb_epoch = 5
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32')
X_train /= 255.0
X_test = X_test.astype('float32')
X_test /= 255.0
y_train = kutils.to_categorical(y_train)
y_test = kutils.to_categorical(y_test)
generator = ImageDataGenerator(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=10,
width_shift_range=5./32,
height_shift_range=5./32,
horizontal_flip=True)
generator.fit(X_train, seed=0, augment=True)
test_generator = ImageDataGenerator(featurewise_center=True,
featurewise_std_normalization=True)
test_generator.fit(X_test, seed=0, augment=True)
model = WRN(nb_classes=10, N=4, k=10, dropout=0.0)
model.summary()
print ("Start Training:")
sgd = SGD(lr = 0.001, decay = 0.1, momentum = 0.9, nesterov = True)
model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["acc"])
# model.load_weights("WRN-28-10.h5")
model.fit_generator(generator.flow(X_train, y_train, batch_size=batch_size),
samples_per_epoch=len(X_train),
nb_epoch=nb_epoch,
# callbacks=[callbacks.ModelCheckpoint("WRN-28-10-Best.h5", monitor="val_acc", save_best_only=True)],
validation_data=test_generator.flow(X_test, y_test, batch_size=batch_size),
nb_val_samples=X_test.shape[0],
verbose = True)
print ("Start Testing:")
# model.load_weights("WRN-28-10.h5")
results = model.evaluate_generator(test_generator.flow(X_test, y_test, batch_size=batch_size), X_test.shape[0])
print ("Results:")
print ("Test loss: {0}".format(results[0]))
print ("Test accuracy: {0}".format(results[1]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Visualize data
Step3: Data sets
Step4: Answer
Step5: Explanation
Step6: Use GridSearchCV to tune hyper-parameters.
Step7: Visualize mis-clustered samples, and provide your explanation.
Step9: Explanation
|
3,599
|
<ASSISTANT_TASK:>
Python Code:
# Setup, just putting together what we will need to do the analysis
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.spines import Spine
from matplotlib.projections.polar import PolarAxes
from matplotlib.projections import register_projection
%matplotlib inline
pokemon_data = pd.read_csv('../datasets/pokemon-stats.csv')
damage_mul = pd.read_csv('./damage_multipliers.csv')
status = ['HP','Attack','Defense','Sp. Atk','Sp. Def','Speed']
pokemon_data.head()
damage_mul.head()
# Setup your team! Choose 6 pokemon in the list below
team = ["Milotic", "Aggron", "Vigoroth", "Raichu", "Breloom", "Alakazam"]
# Let's gather only your team's data
team_data = pokemon_data.query("Name in @team")
team_data
# First, let's see your types matchups
# Primary types
team_types = team_data["Type1"].unique().tolist()
# Secundary types may be NaN, let's deal with it
team_types.extend(team_data["Type2"].dropna().unique().tolist())
all_types = damage_mul["Attacking"].unique()
attack_matchups = {}
defense_matchups = {}
for p in team:
t1 = pokemon_data[pokemon_data["Name"] == p]["Type1"].item()
t2 = False
if pokemon_data[pokemon_data["Name"] == p]["Type2"].any():
t2 = pokemon_data[pokemon_data["Name"] == p]["Type2"].item()
for t in all_types:
att_mul =damage_mul[damage_mul["Attacking"] == t1][t].item()
def_mul = damage_mul[damage_mul["Attacking"] == t][t1].item()
if t2:
# In case you have a dual type poke, we get the max multiplier for the attack
# and the product for the defense
att_mul = max(att_mul,damage_mul[damage_mul["Attacking"] == t2][t].item())
def_mul *=damage_mul[damage_mul["Attacking"] == t][t2].item()
attack_matchups[t] = attack_matchups.get(t, 0) + att_mul
defense_matchups[t] = defense_matchups.get(t,0) + def_mul
attack_matchups = pd.DataFrame(attack_matchups, index=['Score']).T.sort_values('Score', ascending=False).T
defense_matchups = pd.DataFrame(defense_matchups, index=['Score']).T.sort_values('Score', ascending=False).T
status_data = pd.melt(team_data, id_vars=['#','Name','Type1', 'Type2','Legendary','Generation', 'Total'], var_name="Stat")
plt.figure(figsize=(10,3))
plt.subplot(121)
plt.suptitle("Types Matchups")
plt.title("Attack Matchups")
plt.xlabel("Defending Type")
plt.ylabel("Score")
plt.xticks(range(18), attack_matchups.columns,rotation=65)
att_color = lambda x:(1,0.3,0.3) if x <= 5 else (0.2,0.3,1) if x <= 7 else (0.3, 0.8, 0.1)
def_color = lambda x:(1,0.3,0.3) if x > 8 else (0.2,0.3,1) if x >= 6 else (0.3, 0.8, 0.1)
for x, v in zip(range(18), attack_matchups.values[0]):
plt.bar(x,v, color=att_color(v))
plt.subplot(122)
plt.title("Defense Matchups")
plt.xlabel("Attacking Type")
plt.xticks(range(18), defense_matchups.columns[::-1],rotation=65)
for x, v in zip(range(18), defense_matchups.values[0][::-1]):
plt.bar(x,24-v, color=def_color(v))
plt.show()
plt.figure(figsize=(10,5))
plt.title("Status values for your team")
sns.swarmplot(x="Stat", y="value", data=status_data, hue="Name")
plt.ylabel("Value")
plt.xlabel("Status")
colors = ['r','g','b','y','c','k']
for stat,c in zip(status,colors):
plt.hlines(team_data[stat].mean(),-1,6, linestyles='dotted', color=c, label="%s Avg"%stat)
plt.legend(loc=(1.05,0.05))
plt.show()
print(pokemon_data[status].mean())
print(team_data[status].mean())
fig = plt.figure( figsize=(4,4))
ax = plt.subplot(111)
sns.heatmap(team_data[status], cmap='gist_heat_r')
plt.yticks(range(6), team_data["Name"][::-1], rotation=0)
plt.xticks(rotation=90)
plt.show()
# Let's get you 3 worst attack and defense matchups
att_w = attack_matchups.columns[-3:].tolist()
def_w = defense_matchups.columns[-3:].tolist()
archenemy = pokemon_data.query("Type1 in @att_w or Type2 in @att_w")
archenemy.query("Type1 in @def_w or Type2 in @def_w")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Types Matchups
Step2: Status Analysis
Step3: Archenemies
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.