Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
2,600
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# Répare une incompatibilité entre scipy 1.0 et statsmodels 0.8.
from pymyinstall.fix import fix_scipy10_for_statsmodels08
fix_scipy10_for_statsmodels08()
import pyensae.datasource
f = pyensae.datasource.download_data("dads2011_gf_salaries11_dbase.zip",
website="https://www.insee.fr/fr/statistiques/fichier/2011542/")
import pandas
try:
from dbfread import DBF
use_dbfread = True
except ImportError as e :
use_dbfread = False
if use_dbfread:
import os
from pyensae.sql.database_exception import ExceptionSQL
from pyensae.datasource import dBase2sqllite
print("convert dbase into sqllite")
try:
dBase2sqllite("salaries2011.db3", "varlist_salaries11.dbf", overwrite_table="varlist")
dBase2sqllite("salaries2011.db3", "varmod_salaries11.dbf", overwrite_table="varmod")
dBase2sqllite("salaries2011.db3", 'salaries11.dbf', overwrite_table="salaries", fLOG = print)
except ExceptionSQL:
print("La base de données est déjà renseignée.")
else :
print("use of zipped version")
import pyensae.datasource
db3 = pyensae.datasource.download_data("salaries2011.zip")
# pour aller plus vite, données à télécharger au
# http://www.xavierdupre.fr/enseignement/complements/salaries2011.zip
import pandas, numpy, matplotlib.pyplot as plt
from sklearn.decomposition import PCA
import pyensae.datasource
pyensae.datasource.download_data("eleve_region.txt")
df = pandas.read_csv("eleve_region.txt", sep="\t", encoding="utf8", index_col=0)
print(df.shape)
df.head(n=5)
for c in df.columns:
if c != "1993":
df[c] /= df ["1993"]
df["1993"] /= df["1993"]
df.head()
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(linkage='ward', compute_full_tree=True).fit(df)
ward
from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
dendro = [ ]
for a,b in ward.children_:
dendro.append([a, b, float(len(dendro)+1), len(dendro)+1])
# le dernier coefficient devrait contenir le nombre de feuilles dépendant de ce noeud
# et non le dernier indice
# de même, le niveau (3ème colonne) ne devrait pas être le nombre de noeud
# mais la distance de Ward
fig = plt.figure( figsize=(8,8) )
ax = fig.add_subplot(1,1,1)
r = dendrogram(dendro, color_threshold=1, labels=list(df.index),
show_leaf_counts=True, ax=ax, orientation="left")
from scipy.spatial.distance import pdist, squareform
data_dist = pdist(df)
fig = plt.figure(figsize=(8,8))
# x ywidth height
ax1 = fig.add_axes([0.05,0.1,0.2,0.6])
Z1 = dendrogram(dendro, orientation='right',labels=list(df.index))
ax1.set_xticks([])
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Z2 = dendrogram(dendro)
ax2.set_xticks([])
ax2.set_yticks([])
# Compute and plot the heatmap
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = squareform(data_dist)
D = D[idx1,:]
D = D[:,idx2]
im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=plt.cm.YlGnBu)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
plt.colorbar(im, cax=axcolor)
plt.title("Matrice de corrélation et dendogramme.");
import sqlite3, pandas
con = sqlite3.connect("salaries2011.db3")
df = pandas.io.sql.read_sql("select * from varmod", con)
con.close()
values = df[ df.VARIABLE == "TRNNETO"].copy()
def process_intervalle(s):
# [14 000 ; 16 000[ euros
acc = "0123456789;+"
s0 = "".join(c for c in s if c in acc)
spl = s0.split(';')
if len(spl) != 2:
raise ValueError("Unable to process '{0}'".format(s0))
try:
a = float(spl[0])
except Exception as e:
raise ValueError("Cannot interpret '{0}' - {1}".format(s, spl))
b = float(spl[1]) if "+" not in spl[1] else None
if b is None:
return a
return (a+b) / 2.0
values["montant"] = values.apply(lambda r : process_intervalle(r["MODLIBELLE"]), axis=1)
values.head()
import sqlite3, pandas
con = sqlite3.connect("salaries2011.db3")
data = pandas.io.sql.read_sql("select TRNNETO,AGE,SEXE from salaries", con)
con.close()
salaires = data.merge (values, left_on="TRNNETO", right_on="MODALITE" )
salaires["M"] = salaires.apply(lambda r: 1 if r["SEXE"] == "1" else 0, axis=1)
salaires["F"] = salaires.apply(lambda r: 1 if r["SEXE"] == "2" else 0, axis=1)
data = salaires[["AGE","M","F","montant"]]
data = data [data.M + data.F > 0]
data.head()
nonull = data.dropna().copy()
nonull.shape
nonull[["AGE","M"]].dropna().shape
from sklearn import linear_model
clf = linear_model.LinearRegression()
clf.fit (nonull[["AGE","M"]].values, nonull.montant.values)
clf.coef_, clf.intercept_, "R^2=", clf.score(
nonull[["AGE","M"]], nonull.montant)
import random
val = nonull.copy()
val["rnd"] = val.apply(lambda r: random.randint(0, 1000), axis=1)
ech = val[val["rnd"] == 1]
ech.shape
homme = ech[ech.M == 1]
femme = ech[ech.M == 0]
predh = clf.predict(homme[["AGE","M"]])
predf = clf.predict(femme[["AGE","M"]])
import matplotlib.pyplot as plt
plt.figure(figsize=(16,6))
plt.plot(homme.AGE, homme.montant, "r.")
plt.plot(femme.AGE + 0.2, femme.montant, "b.")
plt.plot(homme.AGE, predh, "ro-", label="hommes")
plt.plot(femme.AGE, predf, "bo-", label="femmes")
plt.legend()
plt.title("Montant moyen par âge et genre");
import statsmodels.api as sm
nonull["one"] = 1.0 # on ajoute la constante
model = sm.OLS(nonull.montant, nonull [["AGE","M", "one"]])
results = model.fit()
print("coefficients",results.params)
results.summary()
import random
val = nonull.copy()
val["rnd"] = val.apply(lambda r: random.randint(0,1000), axis=1)
ech = val[val["rnd"] == 1]
homme = ech [ ech.M == 1]
femme = ech [ ech.M == 0]
predh = results.predict(homme[["AGE","M","one"]])
predf = results.predict(femme[["AGE","M","one"]])
import matplotlib.pyplot as plt
def graph(homme, femme, predh, predf):
fig, ax = plt.subplots(1, 1, figsize=(16, 6))
ax.plot(homme.AGE, homme.montant, "r.")
ax.plot(femme.AGE + 0.2, femme.montant, "b.")
ax.plot(homme.AGE, predh, "ro-", label="hommes")
ax.plot(femme.AGE, predf, "bo-", label="femmes")
ax.legend()
ax.set_title("Montant moyen par âge et genre");
return ax
graph(homme, femme, predh, predf);
from statsmodels.sandbox.regression.predstd import wls_prediction_std
prstd, iv_l, iv_u = wls_prediction_std(results)
val = nonull.copy()
val["rnd"] = val.apply(lambda r: random.randint(0, 1000), axis=1)
val["pred"] = prstd
val["up"] = iv_u
val["down"] = iv_l
ech = val[val["rnd"] == 1]
ech.head()
homme = ech[ech.M == 1]
femme = ech[ech.M == 0]
predh = results.predict(homme[["AGE","M","one"]])
predf = results.predict(femme[["AGE","M","one"]])
ax = graph(homme, femme, predh, predf)
ax.plot(homme.AGE, homme.up, 'r-')
ax.plot(homme.AGE, homme.down, 'r-')
ax.plot(femme.AGE, femme.up, 'b-')
ax.plot(femme.AGE, femme.down, 'b-')
ax.set_title("Montant moyen par âge et genre avec écart-type");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prérequis de l'énoncé
Step2: Exercice 1
Step3: Je reprends également le graphique montrant la matrice de corrélations qu'on peut également obtenir avec seaborn
Step4: Exercice 2
Step5: On supprime les valeurs manquantes
Step6: version scikit-learn
Step7: On prend un échantillon aléatoire
Step8: On sépare homme et femmes
Step9: version statsmodels
Step10: On reproduit le même dessin
Step11: On ajoute l'intervalle de confiance sur un échantillon
Step12: Puis on l'ajoute au graphe précédent
|
2,601
|
<ASSISTANT_TASK:>
Python Code:
import os
import re
import time
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
BASE_PATH = "/Volumes/LaCie/from_macHD/Github/crossing_paper2017"
# BASE_PATH = ".."
def offspring_empirical(Dmnk, levels, laplace=False):
# Get pooled frequencies
Djk = Dmnk[:, levels].sum(axis=1, keepdims=False, dtype=np.float)
Dj = Djk.sum(axis=1, keepdims=True)
# Compute the empirical probabilities
Pjk = Djk / Dj if not laplace else (Djk + 1.0) / (Dj + Djk.shape[1])
levels = np.arange(Dmnk.shape[1], dtype=np.int)[levels]
return levels + 1, np.nanmean(Pjk, axis=0), np.nanstd(Pjk, axis=0)
from math import log
def offspring_prob(Z_max, hurst):
Z = np.arange(2, Z_max, 2)
theta = 2.0 ** (1.0 - 1.0 / hurst)
return Z, theta * np.exp((Z // 2 - 1) * log(1 - theta))
def offspring_hurst(Dmnk, levels, laplace=False):
# Get pooled frequencies
Dmj = Dmnk[:, levels].sum(axis=2, dtype=np.float)
# Compute the sum of the left-closed tails sums,
# and divide by the total number of offspring.
Mmj = 2 * Dmnk[:, levels, ::-1].cumsum(axis=-1).sum(axis=-1) / Dmj
Hmj = np.log(2) / np.log(Mmj)
levels = np.arange(Dmnk.shape[1], dtype=np.int)[levels]
return levels + 1, np.nanmean(Hmj, axis=0), np.nanstd(Hmj, axis=0)
output_path = os.path.join("../plots", time.strftime("%Y%m%d_%H%M%S"))
if not os.path.exists(output_path):
os.mkdir(output_path)
print(output_path)
from crossing_tree.manager import ExperimentManager
experiment = ExperimentManager(name_format=re.compile(
r"^(?P<class>[^-]+)"+
r"-(?P<size>\d+)" +
r"-(?P<hurst>(\d*\.)?\d+)" +
r"-(?P<replications>\d+x\d+)" + # r"-(?P<n_batch>\d+)x(?P<n_jobs>\d+)" +
r"_(?:[\d-]+)" + # r"_(?P<dttm>[\d-]+)" +
r".gz$", flags=re.I | re.U))
experiment.update(os.path.join(BASE_PATH, "results/version_2"))
print(experiment.keys_)
method = "med" # needs bytes encoding
experiments = [# (8388608, "125x8", "FBM", method),
(33554432, "334x3", "FBM", method),
(8388608, "125x8", "HRP2_1", method),
(8388608, "125x8", "HRP3_1", method),
(8388608, "125x8", "HRP4_1", method),
# (524288, "125x8", "HRP2_16", method),
# (524288, "125x8", "HRP3_16", method),
# (524288, "125x8", "HRP4_16", method),
(8388608, "125x8", "WEI_1.2", method),
]
exponents = [0.500, 0.550, 0.600, 0.650, 0.700, 0.750, 0.800, 0.850, 0.900,
0.910, 0.915, 0.920, 0.925, 0.930, 0.935, 0.940, 0.945, 0.950,
0.990]
def figure_01(fig, generator, size, replications, method, p=6, q=7, bars=True, legend=True):
ax = fig.add_subplot(111)
results = experiment[generator, size, :, replications]
data = {float(info_[2]): data_[method] for info_, start_, finish_, seeds_, data_ in results}
color_ = plt.cm.rainbow(np.linspace(0, 1, num=len(exponents)))[::-1]
for col_, hurst_ in zip(color_, exponents):
try:
try:
scale_m, Nmn, Dmnk, Cmnkk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except ValueError:
scale_m, Nmn, Dmnk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except KeyError:
continue
levels, Pk_avg, Pk_std = offspring_empirical(Dmnk, slice(p, q), laplace=False)
k, Pk = offspring_prob(2*(Pk_avg.shape[0] + 1), hurst=hurst_)
ax.plot(k, Pk, linestyle='-', color='black', alpha=0.5, zorder=-99)
if bars:
ax.errorbar(k, Pk_avg, yerr=Pk_std, fmt='-s',
color=col_, markersize=3, alpha=1.0,
label="%s %0.3f"%(generator, hurst_))
else:
ax.plot(k, Pk_avg, "-s", color=col_, markersize=3,
alpha=1.0, label="%s %0.3f"%(generator, hurst_))
ax.set_xticks(np.arange(2, 43, 2))
ax.grid(alpha=0.5, linestyle=":", color="grey")
ax.set_xlim(1.9, 12.1)
ax.set_yscale("log", basey=2)
ax.set_ylim(.5e-4, 1.1)
ax.set_ylabel("probability")
ax.set_xlabel("number of offspring")
if legend:
legend_ = ax.legend(loc="lower left", frameon=True,
ncol=2, fontsize=7)
legend_.get_frame() #.set_facecolor("whitesmoke")
p, q = 6, 10 # 5, 8
for experiment_ in experiments:
size, replications, generator, method_ = experiment_
name_ = "fig_01-%d_%s-%s-%d-%s-%s.pdf"%(p, str(q) if isinstance(q, int) else "X",
generator, size, replications, method_,)
fig = plt.figure(figsize=(6, 5))
figure_01(fig, str(generator), str(size), str(replications), method_,
p, q, bars=False, legend=True)
fig.savefig(os.path.join(output_path, name_), format="pdf")
plt.close()
# exponents = [0.5, 0.6, 0.7, 0.8, 0.9]
# exponents = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
def figure_04(fig, generator, size, replications, method, p=6, q=7, bars=False, legend=True):
ax = fig.add_subplot(111)
results = experiment[generator, size, :, replications]
data = {float(info_[2]): data_[method] for info_, start_, finish_, seeds_, data_ in results}
first_, last_ = np.inf, -np.inf
color_ = plt.cm.rainbow(np.linspace(0, 1, num=len(exponents)))[::-1]
for col_, hurst_ in zip(color_, exponents):
try:
try:
scale_m, Nmn, Dmnk, Cmnkk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except ValueError:
scale_m, Nmn, Dmnk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except KeyError:
continue
levels, Hj_avg, Hj_std = offspring_hurst(Dmnk, slice(p, q))
ax.axhline(y=hurst_, color='black', linestyle='-', alpha=0.25, zorder=-99)
mask = Hj_avg < hurst_ * 1.35
if bars:
ax.errorbar(levels[mask], Hj_avg[mask], yerr=Hj_std[mask],
fmt="-s", color=col_, markersize=3, alpha=1.0,
label="%s %0.3f"%(generator, hurst_))
else:
ax.plot(levels[mask], Hj_avg[mask], "-s",
color=col_, markersize=3, alpha=1.0,
label="%s %0.3f"%(generator, hurst_))
first_ = min(levels[mask][0], first_)
last_ = max(levels[mask][-1], last_)
last_ = 20 # min(last_, 20)
ax.set_xticks(np.arange(first_, last_ + 1))
ax.grid(color="grey", linestyle=":", alpha=0.5)
ax.set_xlim(first_ - 0.1, last_ + 1.1)
ax.set_ylim(0.45, 1.01)
## Add a legend with white opaque background.
# ax.set_title( 'Crossing tree estimates of the Hurst exponent' )
ax.set_xlabel("level $\\delta 2^k$")
ax.set_ylabel("$H$")
if legend:
legend_ = ax.legend(loc="lower right", frameon=1,
ncol=2, fontsize=7)
legend_.get_frame() #.set_facecolor("whitesmoke")
p, q = 0, None
for experiment_ in experiments:
size, replications, generator, method_ = experiment_
name_ = "fig_04-%d_%s-%s-%d-%s-%s.pdf"%(p, str(q) if isinstance(q, int) else "X",
generator, size, replications, method_,)
fig = plt.figure(figsize=(6, 5))
figure_04(fig, str(generator), str(size), str(replications), method_,
p, q, bars=False, legend=True)
fig.savefig(os.path.join(output_path, name_), format="pdf")
plt.close()
# exponents = [0.5, 0.6, 0.7, 0.8, 0.9]
# exponents = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]
def figure_08(fig, generator, size, replications, method, bars=False, legend=True):
ax = fig.add_subplot(111)
results = experiment[generator, size, :, replications]
data = {float(info_[2]): data_[method] for info_, start_, finish_, seeds_, data_ in results}
color_ = plt.cm.rainbow(np.linspace(0, 1, num=len(exponents)))[::-1]
for col_, hurst_ in zip(color_, exponents):
try:
try:
scale_m, Nmn, Dmnk, Cmnkk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except ValueError:
scale_m, Nmn, Dmnk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except KeyError:
continue
level = np.arange(Wavgmn.shape[-1], dtype=np.float128)
scale_ = (2 ** (-level / hurst_))
# scale_ *= (2 * hurst_ - 1) * 2 * hurst_
Wavgn_ = np.nanmean(Wavgmn / (scale_m[:, np.newaxis] ** (1 / hurst_)), axis=0) * scale_
if bars:
Wstdn_ = np.nanstd(Wavgmn / (scale_m[:, np.newaxis] ** (1 / hurst_)), axis=0) * scale_
ax.errorbar(1+level, Wavgn_, yerr=Wstdn_, fmt="-s", color=col_,
markersize=3, alpha=1.0, label="%s %0.3f"%(generator, hurst_))
else:
ax.plot(1+level, Wavgn_, "-s", color=col_, markersize=3,
alpha=1.0, label="%s %0.3f"%(generator, hurst_))
ax.set_xticks(range(1, 21))
ax.grid(color="grey", linestyle=":", alpha=0.5)
ax.set_yscale("log", basey=2)
ax.set_xlim(0.9, 20.1)
ax.set_xlabel("level")
ax.set_ylabel("$\\left(\\delta 2^n \\right)^{-H^{-1}} {\\mathbb{E}W^n}$")
if legend:
legend_ = ax.legend(loc="lower left", frameon=1,
ncol=3, fontsize=7)
legend_.get_frame() #.set_facecolor("whitesmoke")
for experiment_ in experiments:
size, replications, generator, method_ = experiment_
name_ = "fig_08-%s-%d-%s-%s.pdf"%(generator, size, replications, method_,)
fig = plt.figure(figsize=(6, 5))
figure_08(fig, str(generator), str(size), str(replications), method_,
bars=False, legend=True)
fig.savefig(os.path.join(output_path, name_), format="pdf")
plt.close()
from math import floor
full_table = list()
for experiment_ in experiments:
size, replications, generator, method = experiment_
results = experiment[str(generator), str(size), :, str(replications)]
data = {float(info_[2]): data_[method] for info_, start_, finish_, seeds_, data_ in results}
table = list()
for hurst_ in exponents:
try:
try:
scale_m, Nmn, Dmnk, Cmnkk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except ValueError:
scale_m, Nmn, Dmnk, Vmnde, Wmnp, Wavgmn, Wstdmn = data[hurst_]
except KeyError:
continue
# Compute the average number of offspring and the standard deviation
# df_ = pd.DataFrame(dict(average=Nmn.mean(axis=0), std=Nmn.std(axis=0)),
# index=pd.RangeIndex(stop=Nmn.shape[1],name='Level'))
df_ = pd.Series(["$%1.1f\pm%0.2f\\%%$"%(m/1000, 100*s/m) if floor(m/100) > 0 else "--"
for m, s in zip(Nmn.mean(axis=0), Nmn.std(axis=0))],
index=pd.RangeIndex(stop=Nmn.shape[1],name='Level'), name=hurst_)
table.append((hurst_, df_))
table = pd.concat([tab_ for hurst_, tab_ in table], axis=1,
keys=[hurst_ for hurst_, tab_ in table], names=["hurst"])
full_table.append((experiment_, table))
table = pd.concat([tab_ for hurst_, tab_ in full_table], axis=1, join="inner",
keys=[hurst_ for hurst_, tab_ in full_table],
names=["size", "replications", "generator", "method"])
for hurst_ in exponents:
name_ = "tab_01-%s-%0.3f.tex"%(method_, hurst_,)
out_ = table.xs(method, axis=1, level=3).xs(hurst_, axis=1, level=-1)
out_.columns = out_.columns.droplevel(0).droplevel(0)
# .style.format({"average":"{:1.0f}", "std":"±{:1.0f}"})
body_ = out_.to_latex(escape=False, na_rep="--", bold_rows=True)\
.replace("_", "\\_")
body_ += \\caption{The average number of offspring at each level (in\n\
thousands; $\\pm$1 std. dev. in percent) for processes\n\
with $H=%0.3f$.} \n%(hurst_,)
body_ += \\label{tab:avg_offspring_%0.3f}\n%(hurst_,)
with open(os.path.join(output_path, name_), "w") as fout_:
fout_.write(body_)
selector = np.s_[:12]
levels_ = np.r_[selector].astype(float)
log2ed_list = []
check_list_ = [
(33554432, "334x3", "FBM", 1.0),
(8388608, "125x8", "HRP2_1", 2.0),
(8388608, "125x8", "HRP3_1", 3.0),
(8388608, "125x8", "HRP4_1", 4.0),
]
for size, replications, name, degree in check_list_:
results = experiment[name, str(size), :, str(replications)]
data = {float(info_[2]): data_[method]
for info_, start_, finish_, seeds_, data_ in results
if float(info_[2]) > 0.5}
slices_ = {hurst_: (res_[0], res_[-2][:, selector]) for hurst_, res_ in data.items()}
log2ed_ = np.stack([(np.log2(dur_) - (np.log2(delta_[:, np.newaxis]) + levels_) / hurst_).mean(axis=0)
for hurst_, (delta_, dur_) in slices_.items()], axis=0)
hursts_ = np.array([*slices_.keys()])[:, np.newaxis]
order_ = hursts_.argsort(axis=0)[:, 0]
hursts_ = hursts_[order_]
log2ed_ = log2ed_[order_]
log2ed_ /= (1.5 - hursts_)
# h0_ = (hursts_ - 1) / degree + 1
# log2ed_ /= (1.5 - h0_)
log2ed_list.append(log2ed_)
# log2ed_ /= ((hursts_ - 1) / degree + 0.5) ** (-2 / float(degree))
log2ed_ = np.stack(log2ed_list, axis=0).mean(axis=-1)
plt.plot(hursts_, log2ed_[0], "r") # d - 1
plt.plot(hursts_, log2ed_[1], "g") # d - 1 - 0
plt.plot(hursts_, log2ed_[2], "b") # d - 1 - 0.75
plt.plot(hursts_, log2ed_[3], "k")# d - 1 -
dlog2ed_ = np.diff(log2ed_, axis=-1) / np.diff(hursts_.T, axis=-1)
dlog2ed_[:, :-2].mean(axis=-1)
plt.plot(hursts_[1:], dlog2ed_[0], "r")
plt.plot(hursts_[1:], dlog2ed_[1], "g")
plt.plot(hursts_[1:], dlog2ed_[2], "b")
plt.plot(hursts_[1:], dlog2ed_[3], "k")
plt.plot(hursts_[1:], dlog2ed_[0] + 0, "r") # d - 1
plt.plot(hursts_[1:], dlog2ed_[1] + 18.5, "g") # d - 1 - 0
plt.plot(hursts_[1:], dlog2ed_[2] + 25, "b") # d - 1 - 0.75
plt.plot(hursts_[1:], dlog2ed_[3] + 28.25, "k") # d - 1 -
fig = plt.figure(figsize=(16, 9))
ax = fig.add_subplot(111)
color_ = plt.cm.rainbow(np.linspace(0, 1, num=5))[::-1]
for hurst_, Wavgmn in slices_.items():
ax.hist(np.log2(Wavgmn[:, 0]),
bins=200, alpha=0.5, lw=0, normed=True, color="red")
# for level, (Wavgn, col_) in enumerate(zip(Wavgmn.T, color_), 7):
# ax.hist(np.log2(Wavgn) - (float(level) / hurst_)**(1-hurst_),
# bins=200, alpha=0.5, lw=0, normed=True, color=col_)
log_Wavghn = np.stack([np.nanmean(np.diff(np.log2(Wavgmn), axis=1), axis=0)
for hurst_, Wavgmn in slices_.items()])
hursts_ = np.array(slices_.keys())
log_Wavghn.shape
plt.plot(hursts_[np.newaxis, :] * log_Wavghn.T)
colors_ = plt.cm.rainbow_r(np.linspace(0, 1, num=log_Wavghn.shape[1]))
for col_, log_Wavgh in zip(colors_, log_Wavghn.T):
plt.scatter(hursts_, log_Wavgh * hursts_, lw=0, color=col_, alpha=0.5);
log_Wavgh * hursts_ - 1
y = np.log2(np.diff(log_Wavghn, axis=1).mean(axis=1))
X = hursts_
1.0 / (np.diff(y) / np.diff(X))
1.0 / np.diff(log_Wavghn, axis=1).mean(axis=1) - hursts_
plt.scatter(hursts_, np.diff(log_Wavghn, axis=1).mean(axis=1) - 1.0 / hursts_)
plt.scatter(hursts_, np.log2(np.diff(log_Wavghn, axis=1).mean(axis=1)))
plt.plot(np.diff(log_Wavghn, axis=1).T)
# / hursts_[np.newaxis]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute the empirical probabilities by averaging across all replications
Step2: Get theoretical values of the probability according to the conjectured distribution
Step3: Use the geometric distribution's mean value to estimate the hurst exponent
Step4: Experiments
Step5: Load the experiment manager
Step6: Print the keys of the experiment
Step7: Choose a particular instance
Step8: FIGURE 01
Step9: Generate a figure-01 for different sizes and numbers of replications.
Step10: FIGURE 04
Step11: Create a figure-04 plot of mean-based hurst estimates
Step12: FIGURE 08
Step13: Create a figure-08 plot of scaled average crossing durations.
Step14: TABLE 01
Step19: Might want to use \usepackage{booktabs} or \usepackage{lscape}
Step20: FIGURE
|
2,602
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
3 + math.sqrt(3)
expr = 3 * sqrt(3)
expr
init_printing(use_latex='mathjax')
expr
expr = sqrt(8)
expr
x, y = symbols("x y")
expr = x**2 + y**2
expr
expr = (x+y)**3
expr
a = Symbol("a")
a.is_imaginary
b = Symbol("b", integer=True)
b.is_imaginary
c = Symbol("c", positive=True)
c.is_positive
c.is_imaginary
I
I ** 2
Rational(1,3)
Rational(1,3) + Rational(1,2)
expr = Rational(1,3) + Rational(1,2)
N(expr)
N(pi, 100)
pi.evalf(100)
expr = x**2 + 2*x + 1
expr
expr.subs(x, 1)
expr = pi * x**2
expr
expr.subs(x, 3)
N(_)
expr = (x + y) ** 2
expr
expand(expr)
factor(_)
expr = (2*x + Rational(1,3)*x + 4) / x
expr
simplify(expr)
expr = "(2*x + 1/3*x + 4)/x"
simplify(expr)
expr = sin(x)/cos(x)
expr
simplify(expr)
expr = 1/(x**2 + 2*x)
expr
apart(expr)
together(_)
diff(sin(x), x)
diff(log(x**2 + 1) + 2*x, x)
integrate(cos(x), x)
Integral(sin(x), (x,0,pi))
N(_)
expr = Sum(1/(x**2 + 2*x), (x, 1, 10))
expr
expr.doit()
expr = Product(1/(x**2 + 2*x), (x, 1, 10))
expr
expr.doit()
expr = 2*x + 1
solve(expr)
expr = x**2 - 1
solve(expr)
expr_1 = 2*x + y + 3
expr_2 = 2*y - x
solve([expr_1, expr_2],(x,y))
from sympy.physics import units as u
5. * u.milligram
1./2 * u.inch
1. * u.nano
u.watt
u.ohm
kmph = u.km / u.hour
mph = u.mile / u.hour
N(mph / kmph)
80 * N(mph / kmph)
def sympy_expr(x_val):
expr = x**2 + sqrt(3)*x - Rational(1,3)
return expr.subs(x, x_val)
sympy_expr(3)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
list1 = np.arange(1,1000)
list2 = pd.Series(list1)
%timeit [sympy_expr(item) for item in list1]
%timeit [sympy_expr(item) for item in list2]
%timeit np.vectorize(sympy_expr)(list1)
%timeit list2.apply(sympy_expr)
expr = x**2 + sqrt(3)*x - Rational(1,3)
lf = lambdify(x, expr)
%timeit lf(list1)
%timeit lf(list2)
fig = plt.figure()
axes = fig.add_subplot(111)
x_vals = np.linspace(-5.,5.)
y_vals = lf(x_vals)
axes.grid()
axes.plot(x_vals, y_vals)
plt.show();
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: symbols() & Symbol()
Step2: Assumptions for symbols
Step3: Imaginary Numbers
Step4: Rational()
Step5: Numerical evaluation
Step6: subs()
Step7: factor() and expand()
Step8: simplify()
Step9: apart() and together()
Step10: Calculus
Step11: Sum()
Step12: Product()
Step13: Solve()
Step14: Units
Step15: Converting from Kilometers/hours to Miles/hours
Step16: Working with NumPy / Pandas and Matplotlib
|
2,603
|
<ASSISTANT_TASK:>
Python Code:
'''
:entrée n: int, SAISIE au clavier
:pré-cond: n ≥ 0
:sortie f: int, AFFICHÉE à l'écran
:post-cond: f = n! = 1×2×3×...×n
'''
n = int(input("Valeur de n (entier positif ou nul) ? "))
f = 1
i = 2
while i < n:
f = f*i
i = i+1
print(f)
'''
:entrée n: int, AFFECTÉE précédemment
:pré-cond: n ≥ 0
:sortie f: int, AFFECTÉE pour la suite
:post-cond: f = n! = 1×2×3×...×n
'''
f = 1
i = 2
while i < n:
f = f*i
i = i+1
def fact(n):
'''
:entrée n: int, PASSÉE en paramètre
:pré-cond: n ≥ 0
:sortie f: int, RETOURNÉE
:post-cond: f = n! = 1×2×3×...×n
'''
f = 1
i = 2
while i < n:
f = f*i
i = i+1
return f
# la fonction input fournie par Python (version sans paramètre)
def input():
:entrée txt1: str, SAISIE au clavier
:sortie txt2: str
:post-cond: txt2 = txt1
# la fonction print fournie par Python
def print(val1):
:entrée val1: une valeur quelconque
:sortie val2: une valeur quelconque, AFFICHÉE à l'écran
:post-cond: val2 = val1
def rechercheTab(tab, nb):
'''
:entree tab: un tableau d'entiers
:entree nb: le nombre recherché
:sortie present: un booleen qui indique si le nombre est présent dans le tableau ou non
:pré-condition: le tableau peut être vide
'''
present = False
i = 0
while (i < len(tab)) and (present == False):
if tab[i] == nb:
present = True
i = i + 1
return present
print(rechercheTab([3, 6, 4, 5, 9], 6))
print(rechercheTab([3, 6, 4, 5, 9], 1))
print(rechercheTab([], 2))
# Même solution (et même contrat), mais avec un for à la place du while
# Attention, on va parcourir tout le tableau même si on n'en a pas besoin
def rechercheTabBis(tab, nb):
present = False
for i in range(len(tab)):
if tab[i] == nb:
present = True
return present
print(rechercheTabBis([3, 6, 4, 5, 9], 6))
print(rechercheTabBis([3, 6, 4, 5, 9], 1))
print(rechercheTabBis([], 2))
def doublerTableau(tab):
'''
:entree-sortie: tab un tableau d'entiers ou de flottants
:pre-condition: aucune
:post-condition: le tableau est modifié. Chaque case contient le double de la valeur qu'elle contenait précédemment
'''
for i in range(len(tab)):
tab[i] = 2*tab[i]
return tab
print(doublerTableau([]))
print(doublerTableau([2,4,7,9]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: On admettra que, dans une fonction, lorsque qu'on ne spécifie pas le mode de transmission des entrées-sorties, il est forcément "PASSÉE en paramètre" pour les entrées, et "RETOURÉE" pour les sorties.
Step4: Notons cependant que les entrées-sorties d'une fonction ne peuvent pas avoir pour mode de transmission "AFFECTÉE précédemment" ou "AFFECTÉE pour la suite", puisque les variables d'une fonction n'existent qu'à l'intérieur de cette fonction.
Step5: Exercice 2 Écrire un algorithme qui remplace tous les élements du tableau par leur double. Vous écrirez également le contrat.
|
2,604
|
<ASSISTANT_TASK:>
Python Code:
theta = 0.1
lam = 18000
grid_size = int(theta * lam)
def kernel_oversample(ff, Qpx, s=None, P = 1):
Takes a farfield pattern and creates an oversampled convolution
function.
If the far field size is smaller than N*Qpx, we will pad it. This
essentially means we apply a sinc anti-aliasing kernel by default.
:param ff: Far field pattern
:param Qpx: Factor to oversample by -- there will be Qpx x Qpx convolution arl
:param s: Size of convolution function to extract
:returns: Numpy array of shape [ov, ou, v, u], e.g. with sub-pixel
offsets as the outer coordinates.
# Pad the far field to the required pixel size
N = ff.shape[0]
if s is None: s = N
padff = pad_mid(ff, N*Qpx*P)
# Obtain oversampled uv-grid
af = fft(padff)
# Extract kernels
return extract_oversampled(extract_mid(af, N*Qpx), Qpx, s)
grid_size = 2047
aa_over = 256
aa_support = 10
aa_x0 = 0.375
aa_mode = 0
aa_szetan = False
aa_nifty = True
aa_parameter = numpy.pi*aa_support/2
if aa_support == 1:
print("Using trivial gridder")
aa_gcf = numpy.ones((aa_over, aa_support))
def aa(x): return numpy.ones_like(x)
elif aa_nifty:
print("Using exponential of semi-circle with beta=%d" % (aa_support))
aa = numpy.exp(aa_parameter*(numpy.sqrt(1-(2*coordinates(grid_size))**2)-1))
aa_gcf = kernel_oversample(aa, aa_over, aa_support) / grid_size
def aa(x):
return numpy.exp(aa_parameter*(numpy.sqrt(1-(2*x)**2)-1))
elif aa_szetan:
print("Using Sze-Tan's gridder with R=%d, x_0=%g" % (aa_support//2, aa_x0))
aa_gcf = sze_tan_gridder(aa_support//2, aa_x0, aa_over)
def aa(x):
return sze_tan_grid_correction_gen(aa_support//2, aa_x0, x)
print("Mean error:", sze_tan_mean_error(aa_support//2, aa_x0))
else:
print("Using PSWF with mode %d and parameter %g" % (aa_mode, aa_parameter))
aa = scipy.special.pro_ang1(aa_mode, aa_mode, aa_parameter, 2*coordinates(grid_size))[0]
aa_gcf = kernel_oversample(aa, aa_over, aa_support) / grid_size
def aa(x):
return scipy.special.pro_ang1(aa_mode, aa_mode, aa_parameter, 2*x)[0]
# Calculate appropriate step length to give us full accuracy for a field of view of size theta
du = du_opt = aa_x0/(theta/2)
print("Optimal du =", du)
# Plot gridding function
plt.rcParams['figure.figsize'] = 10, 5
r = numpy.arange(-aa_over*(aa_support//2), aa_over*((aa_support+1)//2)) / aa_over
plt.semilogy(du_opt*r, numpy.abs(numpy.transpose(aa_gcf).flatten()));
#plt.semilogy(du_opt*r, numpy.transpose(aa2_gcf).flatten().real);
plt.xticks(du_opt*numpy.arange(-(aa_support//2), ((aa_support+1)//2)+1))
plt.grid(True);plt.xlabel('u/v [$\lambda$]');plt.title('$u/v$ Gridder');plt.show()
# Plot grid correction function
theta_x0 = theta/aa_x0/2
x = coordinates(101)
plt.semilogy(theta*x/aa_x0/2, aa(x));
plt.title('$u/v$ Grid correction');plt.grid(True);plt.xlabel('l [1]')
plt.axvspan(theta/2, theta_x0/2, color='lightgray', hatch='x', alpha=0.5)
plt.axvspan(-theta/2, -theta_x0/2, color='lightgray', hatch='x', alpha=0.5)
plt.annotate('(unused)', xy=((theta+theta_x0)/4,0.9), ha='center', color='gray')
plt.annotate('(unused)', xy=(-(theta+theta_x0)/4,0.9), ha='center', color='gray');
#plt.semilogy(theta*coordinates(grid_size)/aa_x0/2, anti_aliasing_function(grid_size, aa_mode, aa_parameter));
aa_support_w = 8
aa_x0_w = 0.125
aa_szetan_w = False
aa_nifty_w = False
aa_parameter_w = numpy.pi*aa_support_w/2
if aa_support_w == 1:
print("Using trivial gridder")
aa_gcf_w = numpy.ones((aa_over, aa_support_w))
def aa_w(x): return numpy.ones_like(x)
elif aa_nifty_w:
print("Using exponential of semi-circle with beta=%d" % (aa_support))
aa_gcf_w = kernel_oversample(
numpy.exp(aa_support*(numpy.sqrt(1-(2*coordinates(grid_size))**2)-1)),
aa_over, aa_support) / grid_size
def aa_w(x):
return numpy.exp(aa_support*(numpy.sqrt(1-(2*x)**2)-1))
elif aa_szetan_w:
print("Using Sze-Tan's gridder with R=%d, x_0=%g" % (aa_support_w//2, aa_x0_w))
aa_gcf_w = sze_tan_gridder(aa_support_w//2, aa_x0_w, aa_over)
def aa_w(x):
return sze_tan_grid_correction_gen(aa_support_w//2, aa_x0_w, x)
print("Mean error:", sze_tan_mean_error(aa_support_w//2, aa_x0_w))
else:
aa_w = anti_aliasing_function(grid_size, 0, aa_parameter_w)
aa_gcf_w = kernel_oversample(aa_w, aa_over, aa_support_w) / grid_size
def aa_w(x):
return scipy.special.pro_ang1(aa_mode, aa_mode, aa_parameter_w, 2*x)[0]
# Calculate appropriate step length to give us full accuracy for a field of view of size theta
max_n = 1.0 - numpy.sqrt(1.0 - 2*(theta/2)**2)
print("max_n =", max_n)
dw = dw_opt = aa_x0_w / max_n
print("Optimal dw =", dw)
# Plot gridding function
plt.rcParams['figure.figsize'] = 10, 5
r = numpy.arange(-aa_over*(aa_support_w//2), aa_over*((aa_support_w+1)//2)) / aa_over
plt.semilogy(dw_opt*r, numpy.transpose(aa_gcf_w).flatten().real);
plt.xticks(dw_opt*numpy.arange(-(aa_support_w//2), ((aa_support_w+1)//2)+1))
plt.grid(True); plt.xlabel('w [$\lambda$]'); plt.title('$w$ Gridder'); plt.show()
x = coordinates(101)
plt.semilogy(max_n*x/aa_x0_w, aa_w(x));
plt.title('$w$ Grid correction'); plt.grid(True); plt.xlabel('$n$ [1]');
max_n_x0 = max_n/aa_x0_w/2
plt.axvspan(max_n, max_n_x0, color='lightgray', hatch='x', alpha=0.5)
plt.axvspan(-max_n, -max_n_x0, color='lightgray', hatch='x', alpha=0.5)
plt.annotate('(unused)', xy=((max_n+max_n_x0)/2,0.9), ha='center', color='gray')
plt.annotate('(unused)', xy=(-(max_n+max_n_x0)/2,0.9), ha='center', color='gray');
Npt = 500
points = theta * (numpy.random.rand(Npt,2)-0.5)
#points = list(theta/10 * numpy.array(list(itertools.product(range(-5, 6), range(-5, 6)))))
#points.append((theta/3,0))
#points = numpy.array(points)
plt.rcParams['figure.figsize'] = 8, 8
plt.scatter(points[:,0], points[:,1]);
def predict(dist_uvw, du=du_opt, dw=dw_opt, apply_aa = False, apply_aa_w = False):
# Get image coordinates
ls, ms = numpy.transpose(points)
ns = numpy.sqrt(1.0 - ls**2 - ms**2) - 1
# Evaluate grid correction functions in uv & w
aas = numpy.ones(len(ls))
if apply_aa:
aas *= aa(du*ls) * aa(du*ms)
if apply_aa_w:
aas *= aa_w(dw*ns)
# Now simulate points, dividing out grid correction
vis = 0
for l,m, a in zip(ls, ms, aas):
vis += simulate_point(dist_uvw, l, m) / a
return vis
def predict_grid(u,v,w,ov_u,ov_v,ov_w,du=du_opt, dw=dw_opt, visualise=False):
# Generate offsets that we are going to sample at
ius, ivs, iws = numpy.meshgrid(numpy.arange(aa_support), numpy.arange(aa_support), numpy.arange(aa_support_w))
dus = du*(ius.flatten()-(aa_support//2)+ov_u/aa_over)
dvs = du*(ivs.flatten()-(aa_support//2)+ov_v/aa_over)
dws = dw*(iws.flatten()-(aa_support_w//2)+ov_w/aa_over)
# Get grid convolution function for offsets
aas = aa_gcf[ov_u,ius.flatten()] * aa_gcf[ov_v,ivs.flatten()] * aa_gcf_w[ov_w,iws.flatten()]
# Add offsets to all uvw coordinates
us = numpy.array(u)[:,numpy.newaxis] + dus[numpy.newaxis,:]
vs = numpy.array(v)[:,numpy.newaxis] + dvs[numpy.newaxis,:]
ws = numpy.array(w)[:,numpy.newaxis] + dws[numpy.newaxis,:]
# Visualise sampling pattern?
if visualise:
ax = plt.subplot(111, projection='3d')
ax.scatter(us,vs,ws, color='red');
ax.set_xlabel('u'); ax.set_ylabel('v'); ax.set_zlabel('w')
# Predict visibilities
vis = predict(numpy.transpose([us.flatten(),vs.flatten(),ws.flatten()]),
du=du, dw=dw, apply_aa=True, apply_aa_w=True).reshape(us.shape)
# Convolve with gridder, sum up
return numpy.sum(vis * aas[numpy.newaxis,:], axis=1)
@interact(u=(-lam/2,lam/2,0.1),v=(-lam/2,lam/2,0.1),w=(-lam/2,lam/2,0.1),
ov_u=(0,aa_over-1), ov_v=(0,aa_over-1), ov_w=(0,aa_over-1),
du=(du_opt/10,du_opt*2,du_opt/10), dw=(dw_opt/10,dw_opt*2,dw_opt/10))
def test(u=0,v=0,w=0, ov_u=0,ov_v=0,ov_w=0, du=du_opt, dw=dw_opt):
vis = predict(numpy.transpose([[u],[v],[w]]))
print("Direct: ", vis[0])
vis_sum = predict_grid([u],[v],[w],ov_u,ov_v,ov_w,du,dw)
print("Grid: ", vis_sum[0])
print("Error: ", numpy.abs(vis[0]-vis_sum[0]) / numpy.sqrt(Npt))
N = 500
us = lam * (numpy.random.rand(N)-0.5)
vs = lam * (numpy.random.rand(N)-0.5)
ws = lam * (numpy.random.rand(N)-0.5)
ov_u = random.randint(0, aa_over-1)
ov_v = random.randint(0, aa_over-1)
ov_w = random.randint(0, aa_over-1)
vis = predict(numpy.transpose([us,vs,ws]))
grid_vis = predict_grid(us,vs,ws,ov_u,ov_v,ov_w)
diff = numpy.abs(vis-grid_vis)
mean_err = numpy.sqrt(numpy.mean(diff**2)) / numpy.mean(numpy.abs(vis))
print("Mean error:", mean_err)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, some grid characteristics. Only theta is actually important here, the rest is just decides the range of the example $u/v$ values.
Step2: Determine $u/v$ gridding function to use. Three choices here - trivial, Sze-Tan's version and PSWF. x0 decides how much of the image coordinate space we can actually use without errors rising.
Step3: Now generate some sources on the sky. We use a random pattern to make reasonably sure that we are not hand-picking a good sky pattern.
Step4: Set up code to predict visibilities - either directly or by visibilities weighted by the grid correction and offset in a grid-like fashion.
Step5: Now we can test the performance of the sampling over a wide variety of parameters. Note that u,v and w do not actually matter too much, but we get into trouble quickly by increasing du or dw -- that is when we start using our gridder for inaccurate image coordinates!
Step6: We can make a quick statistic by feeding in a good couple of points
|
2,605
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(z, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(h1 * alpha, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, d_hidden_size, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, d_hidden_size, reuse=True, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_real, labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
2,606
|
<ASSISTANT_TASK:>
Python Code:
### BEGIN SOLUTION
import sympy as sym
x = sym.Symbol("x")
y = 2 * x * (x - 3) * (x - 5)
sym.diff(y, x)
### END SOLUTION
q1_a_answer = _
feedback_text = Your output is not a symbolic expression.
You are expected to use sympy for this question.
try:
assert q1_a_answer.is_algebraic_expr(), feedback_text
except AttributeError:
print(feedback_text)
x = sym.Symbol("x")
expected_answer = 2 * x * (x - 5) + 2 * x * (x - 3) + (x - 3) * (2 * x - 10)
feedback_text = fYour answer is not correct.
The expected answer is {expected_answer}.
assert sym.simplify(q1_a_answer - expected_answer) == 0, feedback_text
### BEGIN SOLUTION
y = (3 * x ** 3 + 6 * sym.sqrt(x) + 3) / (3 * x ** (sym.S(1) / 4))
sym.diff(y, x)
### END SOLUTION
q1_b_answer = _
feedback_text = Your output is not a symbolic expression.
You are expected to use sympy for this question.
try:
assert q1_b_answer.is_algebraic_expr(), feedback_text
except AttributeError:
print(feedback_text)
x = sym.Symbol("x")
expected_answer = sym.diff((3 * x ** 3 + 6 * sym.sqrt(x) + 3) / (3 * x ** (sym.S(1) / 4)), x)
feedback_text = fYour answer is not correct.
The expected answer is {expected_answer}.
assert sym.simplify(q1_b_answer - expected_answer) == 0, feedback_text
### BEGIN SOLUTION
y = 2 * x * (x - 3) * (x - 5)
sym.diff(y, x)
### END SOLUTION
q1_c_answer = _
feedback_text = Your output is not a symbolic expression.
You are expected to use sympy for this question.
try:
assert q1_c_answer.is_algebraic_expr(), feedback_text
except AttributeError:
print(feedback_text)
x = sym.Symbol("x")
expected_answer = sym.diff(2 * x * (x - 3) * (x - 5), x)
feedback_text = fYour answer is not correct.
The expected answer is {expected_answer}.
assert sym.simplify(q1_c_answer - expected_answer) == 0, feedback_text
### BEGIN SOLUTION
f = - x ** 3 + 2 * x ** 2 + 3 * x
f_dash = sym.diff(f, x)
turning_points = sym.solveset(f_dash, x)
### END SOLUTION
feedback_text =
Your output is not a sympy Finite Set which is what is expected here.
try:
assert type(turning_points) is sym.FiniteSet
except NameError:
print("You did not create a variable called `turning_points`")
expected_answer = sym.solveset(- 3 * x ** 2 + 4 * x + 3, x)
feedback_text = fYour answer is not correct.
The expected answer is obtained by equating the derivative of f to 0 which gives:
{expected_answer}
assert set(turning_points) == set(expected_answer), feedback_text
### BEGIN SOLUTION
g = - x ** 3 + 3 * x ** 2 - x + 3
intersection_points = sym.solveset(sym.Eq(f, g), x)
### END SOLUTION
feedback_text =
Your output is not a sympy Finite Set which is what is expected here.
try:
assert type(intersection_points) is sym.FiniteSet
except NameError:
print("You did not create a variable called `turning_points`")
expected_answer = sym.FiniteSet(1, 3)
feedback_text = fYour answer is not correct.
The expected answer is obtained by equating the f and g:
{expected_answer}
assert set(intersection_points) == set(expected_answer), feedback_text
### BEGIN SOLUTION
area_of_shaded_region = abs(sym.integrate(f, (x, 1, 3)) - sym.integrate(g, (x, 1, 3)))
area_of_shaded_region
### END SOLUTION
feedback_text = "Your output is not a sympy rational which is expected here"
try:
assert type(area_of_shaded_region) is sym.Rational, feedback_text
except NameError:
print("You did not create a variable called `area_of_shaded_region`")
expected_answer = sym.S(4) / 3
feedback_text = fThe expected answer is {expected_answer} which is obtained by
calculating the definite integral of |f - g| between 1 and 3.
assert float(area_of_shaded_region) == float(expected_answer), expected_answer
import itertools
### BEGIN SOLUTION
letters = "MONGOLIA"
words = list(itertools.combinations(letters, 4))
number_of_selections = len(words)
### END SOLUTION
expected_answer = 70
feedback_text = fThe expected number of selections is {expected_answer}.
This is obtained using the `itertools.combinations` function.
try:
assert number_of_selections == expected_answer, feedback_text
except NameError:
print("You did not create a variable `number_of_selections`")
### BEGIN SOLUTION
probability_of_selecting_N = sum('N' in word for word in words) / len(words)
### END SOLUTION
expected_answer = 35 / 70
feedback_text = fThe expected probability is {expected_answer}.
This is obtained using a conditional summation over the generated words.
try:
assert float(probability_of_selecting_N) == float(expected_answer), feedback_text
except NameError:
print("You did not create a variable `probability_of_selecting_P`")
### BEGIN SOLUTION
words = list(itertools.permutations(letters, 4))
probability_of_selecting_GOAL = sum((["A", "G", "L", "O"] == sorted(word)) for word in words) / len(words)
### END SOLUTION
expected_answer = 48 / 1680
feedback_text = fThe expected probability is {expected_answer}.
This is obtained using a conditional summation over the words generated using `itertools.permutations`.
try:
assert np.isclose(probability_of_selecting_GOAL, expected_answer), feedback_text
except NameError:
print("You did not create a variable `probability_of_selecting_TOP`")
def generate_x(n, p):
### BEGIN SOLUTION
Gives the nth term of the sequence for a given value of p
if n == 1:
return 1
previous_x = generate_x(n - 1, p)
return previous_x * (p + previous_x)
### END SOLUTION
feedback_text = You did not include a docstring. This is important to help document your code.
It is done using triple quotation marks. For example:
def get_remainder(m, n):
\"\"\"
This function returns the remainder of m when dividing by n
\"\"\"
...
Using that it's possible to access the docstring,
one way to do this is to type: `get_remainder?`
(which only works in Jupyter) or help(get_remainder).
We can also comment code using `#` but this is completely
ignored by Python so cannot be accessed in the same way.
try:
assert generate_x.__doc__ is not None, feedback_text
except NameError:
print("You did not create a variable called `area_of_shaded_region`")
try:
assert generate_x(n=1, p=2) == 1, f"Your function did not give the expected answer for n=1, p=2"
assert generate_x(n=1, p=1) == 1, f"Your function did not give the expected answer for n=1, p=2"
assert generate_x(n=5, p=1) == 1806, f"Your function did not give the expected answer for n=5, p=1"
assert generate_x(n=5, p=2) == 65535, f"Your function did not give the expected answer for n=5, p=2"
except NameError:
print("You did not create a variable called `area_of_shaded_region`")
### BEGIN SOLUTION
p = sym.Symbol("p")
generate_x(n=2, p=p)
### END SOLUTION
q4_a_answer = _
feedback_text = Your output is not a symbolic expression.
You are expected to use sympy for this question.
try:
assert q4_a_answer.is_algebraic_expr(), feedback_text
except AttributeError:
print(feedback_text)
p = sym.Symbol("p")
expected_answer = p + 1
feedback_text = fYour answer is not correct.
The expected answer is {expected_answer}.
assert sym.simplify(q4_a_answer - expected_answer) == 0, feedback_text
### BEGIN SOLUTION
generate_x(3, p=p)
### END SOLUTION
q4_b_answer = _
feedback_text = Your output is not a symbolic expression.
You are expected to use sympy for this question.
try:
assert q4_b_answer.is_algebraic_expr(), feedback_text
except AttributeError:
print(feedback_text)
p = sym.Symbol("p")
expected_answer = 2 * p ** 2 + 3 * p + 1
feedback_text = fYour answer is not correct.
The expected answer is {expected_answer}.
assert sym.simplify(q4_b_answer - expected_answer) == 0, feedback_text
### BEGIN SOLUTION
sym.solveset(sym.Eq(generate_x(3, p=p), 1), p)
### END SOLUTION
q4_c_answer = _
feedback_text =
Your output is not a sympy Finite Set which is what is expected here.
assert type(q4_c_answer) is sym.FiniteSet, feedback_text
values_of_n = [1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 200, 300]
### BEGIN SOLUTION
values = [generate_x(n=n, p=-sym.S(3) / 2) for n in values_of_n]
range_of_values = min(values), max(values)
### END SOLUTION
import numpy as np
expected_answer = (- sym.S(1) / 2, 1)
expected_answer = np.array(sorted(expected_answer), dtype=np.float64)
range_of_values = np.array(sorted(range_of_values), dtype=np.float64)
feedback_text = fYour answer is not correct, the expected answer is:
{expected_answer}
try:
assert np.allclose(expected_answer, range_of_values), feedback_text
except NameError:
print("You did not create a variable called `all_values_of_n`")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Computing for Mathematics - Example individual coursework
Step5: b. y =\(\frac{3x ^ 3 + 6 \sqrt{x} + 3) }{ (3 x ^{(1 / 4)})}\)
Step8: \(y=2 x (x - 3) (x - 5)\)
Step11: Question 2
Step14: b. Create a variable intersection_points which has value the points where \(f(x)\) and \(g(x)\) intersect.
Step16: c. Using your answers to parts b., calculate the area of the region between \(f\) and \(g\). Assign this value to a variable area_of_shaded_region.
Step18: Question 3
Step20: b. Create a variable probability_of_selecting_N with value the probability that the letter N is included in the selection.
Step22: c. letters are now selected at random, one at a time, from the 8 letters of the word MONGOLIA, and are placed in order in a line. Create a variable probability_of_selecting_GOAL with value the probability that the 4 letters can form the word GOAL.
Step25: Question 4
Step28: b. Output an expression for \(x_2\) in terms of \(p\).
Step31: b. Output an expression for \(x_3\) in terms of \(p\)
Step33: c. Output the values of \(p\) for which \(x_3=1\).
Step35: d. Using the non zero value of \(p\) calculated in the previous question calculate \(x_n\) for \(n\in{1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 200, 300}\) and create a variable range_of_values with minimum and maximum value or \(x_n\).
|
2,607
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
# GRADED FUNCTION: identity_block
def identity_block(X, f, filters, stage, block):
Implementation of the identity block as defined in Figure 3
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (≈3 lines)
X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
# GRADED FUNCTION: convolutional_block
def convolutional_block(X, f, filters, stage, block, s = 2):
Implementation of the convolutional block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
s -- Integer, specifying the stride to be used
Returns:
X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C)
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Retrieve Filters
F1, F2, F3 = filters
# Save the input value
X_shortcut = X
##### MAIN PATH #####
# First component of main path
X = Conv2D(F1, (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
### START CODE HERE ###
# Second component of main path (≈3 lines)
X = Conv2D(F2, (f, f), strides = (1,1), padding = 'same', name = conv_name_base + '2b', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2b')(X)
X = Activation('relu')(X)
# Third component of main path (≈2 lines)
X = Conv2D(F3, (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2c', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2c')(X)
##### SHORTCUT PATH #### (≈2 lines)
X_shortcut = Conv2D(F3, (1, 1), strides = (s,s), padding = 'valid', name = conv_name_base + '1', kernel_initializer = glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis = 3, name = bn_name_base + '1')(X_shortcut)
# Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
X = Add()([X, X_shortcut])
X = Activation('relu')(X)
### END CODE HERE ###
return X
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
# GRADED FUNCTION: ResNet50
def ResNet50(input_shape = (64, 64, 3), classes = 6):
Implementation of the popular ResNet50 the following architecture:
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
Arguments:
input_shape -- shape of the images of the dataset
classes -- integer, number of classes
Returns:
model -- a Model() instance in Keras
# Define the input as a tensor with shape input_shape
X_input = Input(input_shape)
# Zero-Padding
X = ZeroPadding2D((3, 3))(X_input)
# Stage 1
X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((3, 3), strides=(2, 2))(X)
# Stage 2
X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block='a', s = 1)
X = identity_block(X, 3, [64, 64, 256], stage=2, block='b')
X = identity_block(X, 3, [64, 64, 256], stage=2, block='c')
### START CODE HERE ###
# Stage 3 (≈4 lines)
X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block='a', s = 2)
X = identity_block(X, 3, [128, 128, 512], stage=3, block='b')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='c')
X = identity_block(X, 3, [128, 128, 512], stage=3, block='d')
# Stage 4 (≈6 lines)
X = convolutional_block(X, f = 3, filters = [256, 256, 1024], stage = 4, block='a', s = 2)
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='b')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='c')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='d')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='e')
X = identity_block(X, 3, [256, 256, 1024], stage=4, block='f')
# Stage 5 (≈3 lines)
X = convolutional_block(X, f = 3, filters = [512, 512, 2048], stage = 5, block='a', s = 2)
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='b')
X = identity_block(X, 3, [512, 512, 2048], stage=5, block='c')
# AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)"
X = AveragePooling2D((2,2), name = "avg_pool")(X)
### END CODE HERE ###
# output layer
X = Flatten()(X)
X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)
# Create model
model = Model(inputs = X_input, outputs = X, name='ResNet50')
return model
model = ResNet50(input_shape = (64, 64, 3), classes = 6)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
model.fit(X_train, Y_train, epochs = 2, batch_size = 32)
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
model = load_model('ResNet50.h5')
preds = model.evaluate(X_test, Y_test)
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
img_path = 'images/my_image.jpg'
img = image.load_img(img_path, target_size=(64, 64))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print('Input image shape:', x.shape)
my_image = scipy.misc.imread(img_path)
imshow(my_image)
print("class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ")
print(model.predict(x))
model.summary()
plot_model(model, to_file='model.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: 1 - The problem of very deep neural networks
Step4: Expected Output
Step6: Expected Output
Step7: Run the following code to build the model's graph. If your implementation is not correct you will know it by checking your accuracy when running model.fit(...) below.
Step8: As seen in the Keras Tutorial Notebook, prior training a model, you need to configure the learning process by compiling the model.
Step9: The model is now ready to be trained. The only thing you need is a dataset.
Step10: Run the following cell to train your model on 2 epochs with a batch size of 32. On a CPU it should take you around 5min per epoch.
Step11: Expected Output
Step12: Expected Output
Step13: ResNet50 is a powerful model for image classification when it is trained for an adequate number of iterations. We hope you can use what you've learnt and apply it to your own classification problem to perform state-of-the-art accuracy.
Step14: You can also print a summary of your model by running the following code.
Step15: Finally, run the code below to visualize your ResNet50. You can also download a .png picture of your model by going to "File -> Open...-> model.png".
|
2,608
|
<ASSISTANT_TASK:>
Python Code:
import time
from poppy.creatures import PoppyTorso
poppy = PoppyTorso(simulator='vrep')
io = poppy._controllers[0].io
name = 'cube'
position = [0.2, 0, 1] # X, Y, Z
sizes = [0.15, 0.15, 0.15] # in meters
mass = 0.1 # in kg
io.add_cube(name, position, sizes, mass)
#ouvrir
poppy.l_arm_z.goal_position = 20
poppy.r_arm_z.goal_position = -20
#fermer
poppy.l_arm_z.goal_position = -20
poppy.r_arm_z.goal_position = 20
poppy.l_shoulder_y.goal_position = -40
poppy.r_shoulder_y.goal_position = -40
#lever
poppy.l_shoulder_y.goto_position(-180,0.1)
poppy.r_shoulder_y.goto_position(-180,0.1)
#jeter
poppy.l_shoulder_y.goal_position = -40
poppy.r_shoulder_y.goal_position = -40
poppy.l_arm_z.goal_position = 20
poppy.r_arm_z.goal_position = -20
poppy.reset_simulation()
import pypot
poppy.stop_simulation()
pypot.vrep.close_all_connections()
from poppy.creatures import PoppyTorso
poppy=PoppyTorso(simulator='vrep')
import pypot
poppy.stop_simulation()
pypot.vrep.close_all_connections()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ajouter un objet
Step2: Quelques exemples de mouvement "utile"
Step3: Solution possible
Step4: Encore buger ? essaie celles-ci
Step5: Tu as fini? coupe la simulation ici
|
2,609
|
<ASSISTANT_TASK:>
Python Code:
# 为这个项目导入需要的库
import numpy as np
import pandas as pd
from time import time
from IPython.display import display # 允许为DataFrame使用display()
# 导入附加的可视化代码visuals.py
import visuals as vs
# 为notebook提供更加漂亮的可视化
%matplotlib inline
# 导入人口普查数据
data = pd.read_csv("census.csv")
# 成功 - 显示第一条记录
display(data.head())
# TODO:总的记录数
n_records = data.count().income
# TODO:被调查者的收入大于$50,000的人数
n_greater_50k = data[data.income == '>50K'].shape[0]
# TODO:被调查者的收入最多为$50,000的人数
n_at_most_50k = data[data.income == '<=50K'].shape[0]
# TODO:被调查者收入大于$50,000所占的比例
greater_percent = 100.0*n_greater_50k/n_records
# 打印结果
print "Total number of records: {}".format(n_records)
print "Individuals making more than $50,000: {}".format(n_greater_50k)
print "Individuals making at most $50,000: {}".format(n_at_most_50k)
print "Percentage of individuals making more than $50,000: {:.2f}%".format(greater_percent)
# 将数据切分成特征和对应的标签
income_raw = data['income']
features_raw = data.drop('income', axis = 1)
# 可视化原来数据的倾斜的连续特征
vs.distribution(data)
# 对于倾斜的数据使用Log转换
skewed = ['capital-gain', 'capital-loss']
features_raw[skewed] = data[skewed].apply(lambda x: np.log(x + 1))
# 可视化经过log之后的数据分布
vs.distribution(features_raw, transformed = True)
# 导入sklearn.preprocessing.StandardScaler
from sklearn.preprocessing import MinMaxScaler
# 初始化一个 scaler,并将它施加到特征上
scaler = MinMaxScaler()
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
features_raw[numerical] = scaler.fit_transform(data[numerical])
# 显示一个经过缩放的样例记录
display(data.head())
display(features_raw.head())
print 'Origin features:'
display(features_raw.head())
print 'Origin income:'
display(income_raw.head())
# TODO:使用pandas.get_dummies()对'features_raw'数据进行独热编码
features = pd.get_dummies(features_raw)
print type(income_raw)
# TODO:将'income_raw'编码成数字值
income = income_raw.replace({'<=50K':0, '>50K':1})
# 打印经过独热编码之后的特征数量
encoded = list(features.columns)
print "{} total features after one-hot encoding.".format(len(encoded))
# 移除下面一行的注释以观察编码的特征名字
print encoded
# 导入 train_test_split
from sklearn.model_selection import train_test_split
# 将'features'和'income'数据切分成训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(features, income, test_size = 0.2, random_state = 0)
# 显示切分的结果
print "Training set has {} samples.".format(X_train.shape[0])
print "Testing set has {} samples.".format(X_test.shape[0])
# TODO: 计算准确率
tp = income[income==1].shape[0]
fp = income[income==0].shape[0]
tn = 0
fn = 0
accuracy = 1.0*tp/income.shape[0]
precision = 1.0*tp/(tp+fp)
recall = 1.0*tp/(tp+fn)
# TODO: 使用上面的公式,并设置beta=0.5计算F-score
beta = 0.5
fscore = 1.0*(1+pow(beta,2))*precision*recall / ((pow(beta,2)*precision)+recall)
# 打印结果
print "Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore)
# TODO:从sklearn中导入两个评价指标 - fbeta_score和accuracy_score
from sklearn.metrics import fbeta_score, accuracy_score
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# TODO:使用sample_size大小的训练数据来拟合学习器
# TODO: Fit the learner to the training data using slicing with 'sample_size'
start = time() # 获得程序开始时间
learner = learner.fit(X_train[0:sample_size], y_train[0:sample_size])
end = time() # 获得程序结束时间
# TODO:计算训练时间
results['train_time'] = end - start
# TODO: 得到在测试集上的预测值
# 然后得到对前300个训练数据的预测结果
start = time() # 获得程序开始时间
predictions_test = learner.predict(X_test)
predictions_train = learner.predict(X_train[0:300])
end = time() # 获得程序结束时间
# TODO:计算预测用时
results['pred_time'] = end - start
# TODO:计算在最前面的300个训练数据的准确率
results['acc_train'] = accuracy_score(y_train[0:300], predictions_train)
# TODO:计算在测试集上的准确率
results['acc_test'] = accuracy_score(y_test, predictions_test)
# TODO:计算在最前面300个训练数据上的F-score
results['f_train'] = fbeta_score(y_train[0:300], predictions_train, 0.5)
# TODO:计算测试集上的F-score
results['f_test'] = fbeta_score(y_test, predictions_test, 0.5)
# 成功
print "{} trained on {} samples.".format(learner.__class__.__name__, sample_size)
# 返回结果
return results
%%time
%pdb on
# TODO:从sklearn中导入三个监督学习模型
from sklearn import tree, svm, neighbors, ensemble
# TODO:初始化三个模型
clf_A = tree.DecisionTreeClassifier(random_state=20)
clf_B = neighbors.KNeighborsClassifier()
# clf_C = svm.SVC(random_state=20)
clf_C = ensemble.GradientBoostingClassifier(random_state=20)
# TODO:计算1%, 10%, 100%的训练数据分别对应多少点
samples_1 = int(X_train.shape[0]*0.01)
samples_10 = int(X_train.shape[0]*0.1)
samples_100 = int(X_train.shape[0]*1.0)
# 收集学习器的结果
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test)
for k in results.keys():
result_df = pd.DataFrame.from_dict(results[k]).T
result_df.index = ['1%', '10%', '100%']
print k
display(result_df)
# 对选择的三个模型得到的评价结果进行可视化
vs.evaluate(results, accuracy, fscore)
%%time
%pdb on
# TODO:导入'GridSearchCV', 'make_scorer'和其他一些需要的库
from sklearn.metrics import fbeta_score, make_scorer, accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn import ensemble
# TODO:初始化分类器
clf = ensemble.GradientBoostingClassifier(random_state=20)
# TODO:创建你希望调节的参数列表
#parameters = {'n_neighbors':range(5,10,5), 'algorithm':['ball_tree', 'brute']}
parameters = {'max_depth':range(2,10,1)}
# TODO:创建一个fbeta_score打分对象
scorer = make_scorer(fbeta_score, beta=0.5)
# TODO:在分类器上使用网格搜索,使用'scorer'作为评价函数
grid_obj = GridSearchCV(clf, parameters, scorer)
# TODO:用训练数据拟合网格搜索对象并找到最佳参数
print "Start to GridSearchCV"
grid_obj.fit(X_train, y_train)
print "Start to fit origin model"
clf.fit(X_train, y_train)
# 得到estimator
best_clf = grid_obj.best_estimator_
# 使用没有调优的模型做预测
print "Start to predict"
predictions = clf.predict(X_test)
best_predictions = best_clf.predict(X_test)
# 汇报调参前和调参后的分数
print "Unoptimized model\n------"
print "Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions))
print "F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5))
print "\nOptimized Model\n------"
print "Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))
print "Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))
print "Best parameter:"
print grid_obj.best_params_
%%time
# TODO:导入一个有'feature_importances_'的监督学习模型
from sklearn.ensemble import GradientBoostingClassifier
# TODO:在训练集上训练一个监督学习模型
model = GradientBoostingClassifier()
model.fit(X_train, y_train)
# TODO: 提取特征重要性
importances = model.feature_importances_
# 绘图
vs.feature_plot(importances, X_train, y_train)
%%time
# 导入克隆模型的功能
from sklearn.base import clone
# 减小特征空间
X_train_reduced = X_train[X_train.columns.values[(np.argsort(importances)[::-1])[:5]]]
X_test_reduced = X_test[X_test.columns.values[(np.argsort(importances)[::-1])[:5]]]
# 在前面的网格搜索的基础上训练一个“最好的”模型
# 这里使用前面变量model里面AdaBoostClassifier()
clf = (clone(best_clf)).fit(X_train_reduced, y_train)
# 做一个新的预测
best_predictions = model.predict(X_test)
reduced_predictions = clf.predict(X_test_reduced)
# 对于每一个版本的数据汇报最终模型的分数
print "Final Model trained on full data\n------"
print "Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, best_predictions))
print "F-score on testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5))
print "\nFinal Model trained on reduced data\n------"
print "Accuracy on testing data: {:.4f}".format(accuracy_score(y_test, reduced_predictions))
print "F-score on testing data: {:.4f}".format(fbeta_score(y_test, reduced_predictions, beta = 0.5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 练习:数据探索
Step2: 准备数据
Step3: 对于高度倾斜分布的特征如'capital-gain'和'capital-loss',常见的做法是对数据施加一个<a href="https
Step4: 规一化数字特征
Step5: 练习:数据预处理
Step6: 混洗和切分数据
Step7: 评价模型性能
Step8: 监督学习模型
Step9: 练习:初始模型的评估
Step10: 提高效果
Step11: 问题 5 - 最终模型评估
Step12: 问题 7 - 提取特征重要性
|
2,610
|
<ASSISTANT_TASK:>
Python Code:
# a is a tensor with require grad
a = torch.tensor(2., requires_grad=True);a
b = a.detach();b # with deatch() no grad.
c = a.data;c
d = a.item();d
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: tensor.data returns a new tensor that shares storage with tensor. However, it always has requires_grad=False (even if the original tensor had requires_grad=True
|
2,611
|
<ASSISTANT_TASK:>
Python Code:
def digitDividesK(num , k ) :
while(num ) :
d = num % 10
if(d != 0 and k % d == 0 ) :
return True
num = num // 10
return False
def findCount(l , r , k ) :
count = 0
for i in range(l , r + 1 ) :
if(digitDividesK(i , k ) ) :
count += 1
return count
l = 20
r = 35
k = 45
print(findCount(l , r , k ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,612
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import HTML
def css_styling():
sheet = '../css/custom.css'
styles = open(sheet, "r").read()
return HTML(styles)
css_styling()
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
%matplotlib inline
class AbstractShape(object):
A class that shares common methods between rectangles and triangles:
Note that some methods raise an error - what we are doing here is
actually creating 'abstract methods' which helps achieve a consistent
API through all derived classes => This is Polymorphism!
See example 07-PredatorPrey for a more robust way of defining abstract
base classes
def __init__(self, base, height, center):
# Add all args as attributes- there are quicker/better ways
# of doing this, but this is fine
self.base = base
self.height = height
self.center = center
# Call some methods: This will do nothing unless the methods are
# defined in the base classes!
self.vertices = self.get_vertices()
self.area = self.get_area()
def plot(self, ax):
# First point must be repeated for a closed plot
x = np.hstack([self.vertices[:, 0], self.vertices[0, 0]])
y = np.hstack([self.vertices[:, 1], self.vertices[0, 1]])
ax.plot(x, y, '-')
def get_vertices(self):
raise NotImplementedError('Base class method should not be called directly')
def get_area(self):
raise NotImplementedError('Base class method should not be called directly')
# Magic methods extension
def __str__(self):
return "Shape object - base={}, height={}, Area={}".format(self.base, self.height, self.area)
def __lt__(self, shape):
return self.area < shape.area
# Your classes here
class Rectangle(AbstractShape):
def __init__(self, base=1, height=1, center=(0., 0.)):
super().__init__(base, height, center)
def get_vertices(self):
pts = np.ones([4, 2]) * self.center
xshift = self.base / 2.
yshift = self.height / 2.
pts[0,:] += np.array([-xshift, -yshift])
pts[1,:] += np.array([xshift, -yshift])
pts[2,:] += np.array([xshift, yshift])
pts[3,:] += np.array([-xshift, yshift])
return pts
def get_area(self):
return self.base * self.height
class Triangle(AbstractShape):
def __init__(self, base=1, height=1, center=(0., 0.)):
Obtain the vertices of a triangle (isosceles) given its
base, height and the coordinates of the base line mid point
super().__init__(base, height, center)
def get_vertices(self):
pts = np.ones([3, 2]) * self.center
pts[0,:] += np.array([-self.base/2., 0])
pts[1,:] += np.array([self.base/2., 0])
pts[2,:] += np.array([0, self.height])
return pts
def get_area(self):
return 0.5 * self.base * self.height
# Extension
class Cuboid(Rectangle):
def __init__(self, base, height, depth, center):
self.depth = depth
super().__init__(base, height, center)
def get_vertices(self):
base2d = super().get_vertices()
midplane = np.zeros([4,3])
midplane[:,:-1] = base2d
zshift = np.array([0, 0, self.depth/2.])
lower_plane = midplane - zshift
upper_plane = midplane + zshift
return np.vstack([lower_plane, upper_plane])
def plot(self, ax):
ax.scatter(self.vertices[:,0], self.vertices[:,1], self.vertices[:,2])
# Helper functions:
def init_figure():
fig = plt.figure()
ax = fig.add_subplot(111)
plt.axis('equal')
return fig, ax
def init_3dfigure():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
return fig, ax
# Test code for the rectangle class:
fig, ax = init_figure()
for i in range(1,5):
rect = Rectangle(base=i, height=i, center=(0.,0.))
rect.plot(ax)
# Test code for the triangle class:
fig, ax = init_figure()
for height in range(1,5):
tri = Triangle(base=4, height=height, center=(0.,0.))
tri.plot(ax)
#Tests for the cuboid extension:
fig, ax = init_3dfigure()
cube = Cuboid(base=2, height=2, center=(0.,0.), depth=2)
cube.plot(ax)
# Test code for __str__
square = Rectangle(4, 4, center=(0.,0.))
print(square) # Did this do what you expected?
# Test code for iterator
bigsquare = Rectangle(8, 8, (0.,0.))
square < bigsquare
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Tasks
Step4: Magic Methods Extension
|
2,613
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-3', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
2,614
|
<ASSISTANT_TASK:>
Python Code:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from metpy.calc import get_wind_components
from metpy.calc import reduce_point_density
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, current_weather, sky_cover, StationPlot, wx_code_map
from metpy.units import units
with get_test_data('station_data.txt') as f:
data = pd.read_csv(f, header=0, usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
names=['stid', 'lat', 'lon', 'slp', 'air_temperature', 'cloud_fraction',
'dew_point_temperature', 'weather', 'wind_dir', 'wind_speed'],
na_values=-99999)
# Drop rows with missing winds
data = data.dropna(how='any', subset=['wind_dir', 'wind_speed'])
# Set up the map projection
proj = ccrs.LambertConformal(central_longitude=-95, central_latitude=35,
standard_parallels=[35])
# Use the cartopy map projection to transform station locations to the map and
# then refine the number of stations plotted by setting a 300km radius
point_locs = proj.transform_points(ccrs.PlateCarree(), data['lon'].values, data['lat'].values)
data = data[reduce_point_density(point_locs, 300000.)]
# Get the wind components, converting from m/s to knots as will be appropriate
# for the station plot.
u, v = get_wind_components((data['wind_speed'].values * units('m/s')).to('knots'),
data['wind_dir'].values * units.degree)
# Convert the fraction value into a code of 0-8 and compensate for NaN values,
# which can be used to pull out the appropriate symbol
cloud_frac = (8 * data['cloud_fraction'])
cloud_frac[np.isnan(cloud_frac)] = 10
cloud_frac = cloud_frac.astype(int)
# Map weather strings to WMO codes, which we can use to convert to symbols
# Only use the first symbol if there are multiple
wx = [wx_code_map[s.split()[0] if ' ' in s else s] for s in data['weather'].fillna('')]
# Change the DPI of the resulting figure. Higher DPI drastically improves the
# look of the text rendering.
plt.rcParams['savefig.dpi'] = 255
# Create the figure and an axes set to the projection.
fig = plt.figure(figsize=(20, 10))
add_metpy_logo(fig, 1080, 290, size='large')
ax = fig.add_subplot(1, 1, 1, projection=proj)
# Add some various map elements to the plot to make it recognizable.
ax.add_feature(cfeature.LAND)
ax.add_feature(cfeature.OCEAN)
ax.add_feature(cfeature.LAKES)
ax.add_feature(cfeature.COASTLINE)
ax.add_feature(cfeature.STATES)
ax.add_feature(cfeature.BORDERS)
# Set plot bounds
ax.set_extent((-118, -73, 23, 50))
#
# Here's the actual station plot
#
# Start the station plot by specifying the axes to draw on, as well as the
# lon/lat of the stations (with transform). We also the fontsize to 12 pt.
stationplot = StationPlot(ax, data['lon'].values, data['lat'].values, clip_on=True,
transform=ccrs.PlateCarree(), fontsize=12)
# Plot the temperature and dew point to the upper and lower left, respectively, of
# the center point. Each one uses a different color.
stationplot.plot_parameter('NW', data['air_temperature'], color='red')
stationplot.plot_parameter('SW', data['dew_point_temperature'],
color='darkgreen')
# A more complex example uses a custom formatter to control how the sea-level pressure
# values are plotted. This uses the standard trailing 3-digits of the pressure value
# in tenths of millibars.
stationplot.plot_parameter('NE', data['slp'], formatter=lambda v: format(10 * v, '.0f')[-3:])
# Plot the cloud cover symbols in the center location. This uses the codes made above and
# uses the `sky_cover` mapper to convert these values to font codes for the
# weather symbol font.
stationplot.plot_symbol('C', cloud_frac, sky_cover)
# Same this time, but plot current weather to the left of center, using the
# `current_weather` mapper to convert symbols to the right glyphs.
stationplot.plot_symbol('W', wx, current_weather)
# Add wind barbs
stationplot.plot_barb(u, v)
# Also plot the actual text of the station id. Instead of cardinal directions,
# plot further out by specifying a location of 2 increments in x and 0 in y.
stationplot.plot_text((2, 0), data['stid'])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The setup
Step2: This sample data has way too many stations to plot all of them. The number
Step3: Now that we have the data we want, we need to perform some conversions
Step4: The payoff
|
2,615
|
<ASSISTANT_TASK:>
Python Code:
# import the modules
import GPy
import csv
import sys
import numpy as np
import pandas as pd
import seaborn as sns
import cPickle as pickle
from matplotlib import cm
import scipy.stats as stats
from scipy.stats import norm
import sklearn.metrics as metrics
from numpy import sqrt, abs, round
from matplotlib import pyplot as plt
from sklearn.neighbors import NearestNeighbors
%matplotlib notebook
# sns properties
sns.set_style("white")
sns.set_context("paper", font_scale=2.0, rc={"lines.linewidth": 2.0})
kinectExt = 'C'
kinectDim = 7500
kinectKey = 'Cloud'
mocapDim = 8
mocapExt = 'T'
mocapKey = 'TopCoord'
dataTypes = ['train','test']
parameters = ['rmse','nrmse','corr']
models = ['mlp','lr','nn','gp','mrd']
nPos = 6
nShr = 4
nMod = 5
nTypes = 2
nParam = 3
dims = [kinectDim,mocapDim]
keys = [kinectKey,mocapKey]
names = []
for nS in range(nShr):
for nP in range(nPos):
names.append('K1S%dP%dT1' % (nS+1,nP+1))
compRes = np.zeros((nTypes,nMod,nParam,nShr,nPos))
for nS in range(nShr):
for nP in range(nPos):
regRes = pickle.load(open('../Results/Exp2/RegRes%d%d.p' % (nS+1,nP+1),'rb'))
mrdRes = pickle.load(open('../Results/Exp2/MRDRes%d%d.p' % (nS+1,nP+1),'rb'))
for d,dataType in zip(range(nTypes),dataTypes):
for m,model in zip(range(nMod-1),models):
for p,parameter in zip(range(nParam),parameters):
compRes[d,m,p,nS,nP] = regRes[dataType][model][parameter].mean()
for d,dataType in zip(range(nTypes),dataTypes):
for p,parameter in zip(range(nParam),parameters):
compRes[d,-1,p,nS,nP] = mrdRes[dataType][parameter].mean()
pickle.dump(compRes,open('Result/metricResults.p','wb'))
# variables for analysis
statRes = {}
for m,model in zip(range(len(models)-1),models):
statRes[model] = {}
for p,parameter in zip(range(len(parameters)),parameters):
_, pval = stats.wilcoxon(compRes[1,m,p,:,:].flatten(), compRes[1,-1,p,:,:].flatten())
statRes[model][parameter] = pval/2.0
print model,': ',statRes[model]
pickle.dump(statRes,open('Result/statResults.p','wb'))
nTrials = nShr*nPos
dataTypes = ['Train','Test']
columns = ['Value','Type','Model','Param']
parameters = ['rmse','nrmse','corr']
models = ['MLP','Lin Reg','K-NN','GP Reg','MRD']
dataFrame = pd.DataFrame(columns=columns)
for d,dataType in zip(range(nTypes),dataTypes):
for m,model in zip(range(nMod),models):
for p,parameter in zip(range(nParam),parameters):
dat = np.atleast_2d(compRes[d,m,p,:,:].flatten())
dat = np.concatenate((dat,np.atleast_2d([dataType]*nTrials)),axis=0)
dat = np.concatenate((dat,np.atleast_2d([model]*nTrials)),axis=0)
dat = np.concatenate((dat,np.atleast_2d([parameter]*nTrials)),axis=0)
dF = pd.DataFrame(dat.transpose(),columns=columns)
dataFrame = dataFrame.append(dF)
dataFrame.index = range(nTypes*nMod*nParam*nTrials)
dataFrame['Value'] = dataFrame['Value'].apply(pd.to_numeric)
props = {'connectionstyle':'bar,fraction=0.15', 'arrowstyle':'-', 'lw':2}
dF = dataFrame[dataFrame['Param'] == 'rmse']
fig = plt.figure()
ax = sns.boxplot(x='Type', y='Value', hue='Model', data=dF, notch=True)
ax.set_xlabel('')
ax.set_ylabel('RMSE')
ax.legend_.set_title('')
ax.set_ylim([0.0,1.2])
ax.text(1.24375, 1.03, '*', horizontalalignment='center', verticalalignment='center')
ax.annotate("", xy=(1.1625,1), xycoords='data', xytext=(1.325,1), textcoords='data', arrowprops=props)
ax.text(1.1625, 1.1, '***', horizontalalignment='center', verticalalignment='center')
ax.annotate("", xy=(1.0,1.05), xycoords='data', xytext=(1.325,1.05), textcoords='data', arrowprops=props)
plt.tight_layout()
plt.savefig('Result/modelRMSE.pdf', format='pdf')
dF = dataFrame[dataFrame['Param'] == 'nrmse']
fig = plt.figure()
ax = sns.boxplot(x='Type', y='Value', hue='Model', data=dF, notch=True, fliersize=0)
ax.set_xlabel('')
ax.set_ylabel('NRMSE')
ax.set_ylim([0.0,0.35])
ax.legend_.set_title('')
ax.text(1.24375, 0.20, '***', horizontalalignment='center', verticalalignment='center')
ax.annotate("", xy=(1.1625,0.19), xycoords='data', xytext=(1.325,0.19), textcoords='data', arrowprops=props)
ax.text(1.1625, 0.225, '***', horizontalalignment='center', verticalalignment='center')
ax.annotate("", xy=(1.0,0.21), xycoords='data', xytext=(1.325,0.21), textcoords='data', arrowprops=props)
plt.tight_layout()
plt.savefig('Result/modelNRMSE.pdf', format='pdf')
dF = dataFrame[dataFrame['Param'] == 'corr']
fig = plt.figure()
ax = sns.boxplot(x='Type', y='Value', hue='Model', data=dF, notch=True, fliersize=0)
ax.set_xlabel('')
ax.legend_.set_title('')
ax.set_ylim([0.7,1.04])
ax.set_ylabel('Pearson Correlation')
ax.text(1.24375, 0.99, '***', horizontalalignment='center', verticalalignment='center')
ax.annotate("", xy=(1.1625,0.98), xycoords='data', xytext=(1.325,0.98), textcoords='data', arrowprops=props)
ax.text(1.1625, 1.02, '***', horizontalalignment='center', verticalalignment='center')
ax.annotate("", xy=(1.0,1.0), xycoords='data', xytext=(1.325,1.0), textcoords='data', arrowprops=props)
plt.tight_layout()
plt.savefig('Result/modelCorr.pdf', format='pdf')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Loading
Step2: Statistical Significance Tests
Step3: Plotting
|
2,616
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
sample_numpy_data = np.array(np.arange(24)).reshape((6,4))
dates_index = pd.date_range('20160101', periods=6)
sample_df = pd.DataFrame(sample_numpy_data, index=dates_index, columns=list('ABCD'))
sample_df
sample_df['C']
sample_df[1:4]
sample_df['2016-01-01':'2016-01-04']
sample_df.loc[dates_index[1:3]]
dates_index[2
]
sample_df.loc[:,['A','B']]
sample_df.loc['2016-01-01':'2016-01-03',['A','B']]
sample_df.loc['2016-01-03',['D','B']]
sample_df.loc['2016-01-03',['D','B']] [0] * 4
sample_df.loc[dates_index[2], 'C']
sample_numpy_data[3]
sample_df.iloc[3]
sample_numpy_data[1:3, 2:4]
sample_df.iloc[1:3, 2:4]
sample_df.iloc[[0,1,3], [0,2]]
sample_df.iloc[1:3,:]
sample_df.iloc[:, 1:3]
sample_df.C >= 14
sample_df[sample_df.A >= 14]
sample_df[sample_df >= 11]
sample_df_2 = sample_df.copy()
sample_df_2['Fruits'] = ['apple', 'orange','banana','strawberry','blueberry','pineapple']
sample_df_2
sample_df_2[sample_df_2['Fruits'].isin(['banana','pineapple', 'smoothy'])]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: selection using column name
Step2: selection using slice
Step3: selection using date time index
Step4: Selection by label
Step5: Selecting using multi-axis by label
Step6: Label slicing, both endpoints are included
Step7: Reduce number of dimensions for returned object
Step8: using result
Step9: select a scalar
Step10: Selection by Position
Step11: integer slices
Step12: lists of integers
Step13: slicing rows explicitly
Step14: slicing columns explicitly
Step15: Boolean Indexing
Step16: test based upon entire data set
Step17: isin() method
Step18: select rows where 'Fruits' column contains eith 'banana' or 'pineapple'; notice 'smoothy', which is not in the column
|
2,617
|
<ASSISTANT_TASK:>
Python Code:
from fretbursts import *
from fretbursts.phtools import phrates
sns = init_notebook(apionly=True)
sns.__version__
# Tweak here matplotlib style
import matplotlib as mpl
mpl.rcParams['font.sans-serif'].insert(0, 'Arial')
mpl.rcParams['font.size'] = 12
%config InlineBackend.figure_format = 'retina'
url = 'http://files.figshare.com/2182601/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5'
download_file(url, save_dir='./data')
filename = "data/0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
d = loader.photon_hdf5(filename)
loader.alex_apply_period(d)
d.calc_bg(fun=bg.exp_fit, time_s=20, tail_min_us='auto', F_bg=1.7)
d.burst_search()
ds1 = d.select_bursts(select_bursts.size, th1=30)
ds = ds1.select_bursts(select_bursts.naa, th1=30)
alex_jointplot(ds)
ph = d.ph_times_m[0]
tau = 100e-6/d.clk_p
tau
tau = 1
tau2 = 2 * (tau**2)
xx = np.arange(-4*tau, 4*tau, tau/100.)
y1 = np.exp(-np.abs(xx) / tau)
y2 = np.exp(-xx**2 / tau2)
plt.plot(xx,y1, label=r'$\exp \left( - \frac{|t|}{\tau} \right)$')
plt.plot(xx, y2, label=r'$\exp \left( - \frac{t^2}{2\tau^2} \right)$')
plt.axvline(2*tau, color='k')
plt.axvline(-2*tau, color='k')
plt.xlabel('t')
plt.legend(fontsize=22, bbox_to_anchor=(1.05, 1), loc=2)
plt.title(r'$\tau = %d$' % tau, fontsize=22);
def calc_fret_2cde(tau, ph, mask_d, mask_a, bursts):
Compute FRET-2CDE for each burst.
FRET-2CDE is a quantity that tends to be around 10 for bursts which have no
dynamics, while it has larger values (e.g. 30..100) for bursts with
millisecond dynamics.
References:
Tomov et al. BJ (2012) doi:10.1016/j.bpj.2011.11.4025
Arguments:
tau (scalar): time-constant of the exponential KDE
ph (1D array): array of all-photons timestamps.
mask_d (bool array): mask for DexDem photons
mask_a (bool array): mask for DexAem photons
bursts (Bursts object): object containing burst data
(start-stop indexes are relative to `ph`).
Returns:
FRET_2CDE (1D array): array of FRET_2CDE quantities, one element
per burst. This array contains NaN in correspondence of bursts
containing to few photons to compute FRET-2CDE.
# Computing KDE burst-by-burst would cause inaccuracies at the burst edges.
# Therefore, we first compute KDE on the full timestamps array and then
# we take slices for each burst.
# These KDEs are evaluated on all-photons array `ph` (hence the Ti suffix)
# using D or A photons during D-excitation (argument ph[mask_d] or ph[mask_a]).
KDE_DTi = phrates.kde_laplace(ph[mask_d], tau, time_axis=ph)
KDE_ATi = phrates.kde_laplace(ph[mask_a], tau, time_axis=ph)
FRET_2CDE = []
for ib, burst in enumerate(bursts):
burst_slice = slice(int(burst.istart), int(burst.istop) + 1)
if ~mask_d[burst_slice].any() or ~mask_a[burst_slice].any():
# Either D or A photon stream has no photons in current burst,
# thus FRET_2CDE cannot be computed. Fill position with NaN.
FRET_2CDE.append(np.nan)
continue
# Take slices of KDEs for current burst
kde_adi = KDE_ATi[burst_slice][mask_d[burst_slice]]
kde_ddi = KDE_DTi[burst_slice][mask_d[burst_slice]]
kde_dai = KDE_DTi[burst_slice][mask_a[burst_slice]]
kde_aai = KDE_ATi[burst_slice][mask_a[burst_slice]]
# nbKDE does not include the "center" timestamp which contributes 1.
# We thus subtract 1 from the precomputed KDEs.
# The N_CHD (N_CHA) value in the correction factor is the number of
# timestamps in DexDem (DexAem) stream falling within the current burst.
N_CHD = mask_d[burst_slice].sum()
N_CHA = mask_a[burst_slice].sum()
nbkde_ddi = (1 + 2/N_CHD) * (kde_ddi - 1)
nbkde_aai = (1 + 2/N_CHA) * (kde_aai - 1)
# N_CHD (N_CHA) in eq. 6 (eq. 7) of (Tomov 2012) is the number of photons
# in DexDem (DexAem) in current burst. Thus the sum is a mean.
ED = np.mean(kde_adi / (kde_adi + nbkde_ddi)) # (E)_D
EA = np.mean(kde_dai / (kde_dai + nbkde_aai)) # (1 - E)_A
# Compute fret_2cde for current burst
fret_2cde = 110 - 100 * (ED + EA)
FRET_2CDE.append(fret_2cde)
return np.array(FRET_2CDE)
def calc_fret_2cde_gauss(tau, ph, mask_d, mask_a, bursts):
Compute a modification of FRET-2CDE using a Gaussian kernel.
Reference: Tomov et al. BJ (2012) doi:10.1016/j.bpj.2011.11.4025
Instead of using the exponential kernel (i.e. laplace distribution)
of the original paper, here we use a Gaussian kernel.
Photon density using Gaussian kernel provides a smooth estimate
regardless of the evaluation time. On the contrary, the
laplace-distribution kernel has discontinuities in the derivative
(cuspids) on each time point corresponding to a timestamp.
Using a Gaussian kernel removes the need of using the heuristic
correction (pre-factor) of nbKDE.
Arguments:
tau (scalar): time-constant of the exponential KDE
ph (1D array): array of all-photons timestamps.
mask_d (bool array): mask for DexDem photons
mask_a (bool array): mask for DexAem photons
bursts (Bursts object): object containing burst data
Returns:
FRET_2CDE (1D array): array of FRET_2CDE quantities, one element
per burst. This array contains NaN in correspondence of bursts
containing to few photons to compute FRET-2CDE.
# Computing KDE burst-by-burst would cause inaccuracies at the edges
# So, we compute KDE for the full timestamps
KDE_DTi = phrates.kde_gaussian(ph[mask_d], tau, time_axis=ph)
KDE_ATi = phrates.kde_gaussian(ph[mask_a], tau, time_axis=ph)
FRET_2CDE = []
for ib, burst in enumerate(bursts):
burst_slice = slice(int(burst.istart), int(burst.istop) + 1)
if ~mask_d[burst_slice].any() or ~mask_a[burst_slice].any():
# Either D or A photon stream has no photons in current burst,
# thus FRET_2CDE cannot be computed.
FRET_2CDE.append(np.nan)
continue
kde_ddi = KDE_DTi[burst_slice][mask_d[burst_slice]]
kde_adi = KDE_ATi[burst_slice][mask_d[burst_slice]]
kde_dai = KDE_DTi[burst_slice][mask_a[burst_slice]]
kde_aai = KDE_ATi[burst_slice][mask_a[burst_slice]]
ED = np.mean(kde_adi / (kde_adi + kde_ddi)) # (E)_D
EA = np.mean(kde_dai / (kde_dai + kde_aai)) # (1 - E)_A
fret_2cde = 110 - 100 * (ED + EA)
FRET_2CDE.append(fret_2cde)
return np.array(FRET_2CDE)
tau_s = 50e-6 # in seconds
tau = int(tau_s/d.clk_p) # in raw timestamp units
tau
ph = d.get_ph_times(ph_sel=Ph_sel('all'))
mask_d = d.get_ph_mask(ph_sel=Ph_sel(Dex='Dem'))
mask_a = d.get_ph_mask(ph_sel=Ph_sel(Dex='Aem'))
bursts = ds.mburst[0]
fret_2cde = calc_fret_2cde(tau, ph, mask_d, mask_a, bursts)
fret_2cde_gauss = calc_fret_2cde_gauss(tau, ph, mask_d, mask_a, bursts)
len(fret_2cde), len(fret_2cde_gauss), bursts.num_bursts, ds.num_bursts
plt.figure(figsize=(4.5, 4.5))
hist_kws = dict(edgecolor='k', linewidth=0.2,
facecolor=sns.color_palette('Spectral_r', 100)[7])
valid = np.isfinite(fret_2cde)
sns.kdeplot(ds.E[0][valid], fret_2cde[valid],
cmap='Spectral_r', shade=True, shade_lowest=False, n_levels=20)
plt.xlabel('E', fontsize=16)
plt.ylabel('FRET-2CDE', fontsize=16);
plt.ylim(-10, 50);
plt.axhline(10, ls='--', lw=2, color='k')
plt.text(0.05, 0.95, '2CDE', va='top', fontsize=22, transform=plt.gca().transAxes)
plt.text(0.95, 0.95, '# Bursts: %d' % valid.sum(),
va='top', ha='right', transform=plt.gca().transAxes)
plt.savefig('2cde.png', bbox_inches='tight', dpi=200, transparent=False)
valid = np.isfinite(fret_2cde)
x, y = ds.E[0][valid], fret_2cde[valid]
hist_kws = dict(edgecolor='k', linewidth=0.2,
facecolor=sns.color_palette('Spectral_r', 100)[7])
g = sns.JointGrid(x=x, y=y, ratio=3)
g.plot_joint(sns.kdeplot, cmap='Spectral_r', shade=True, shade_lowest=False, n_levels=20)
g.ax_marg_x.hist(x, bins=np.arange(-0.2, 1.2, 0.0333), **hist_kws)
g.ax_marg_y.hist(y, bins=70, orientation="horizontal", **hist_kws)
g.ax_joint.set_xlabel('E', fontsize=16)
g.ax_joint.set_ylabel('FRET-2CDE', fontsize=16);
g.ax_joint.set_ylim(-10, 50);
g.ax_joint.set_xlim(-0.1, 1.1);
g.ax_joint.axhline(10, ls='--', lw=2, color='k')
g.ax_joint.text(0.05, 0.95, '2CDE', va='top', fontsize=22, transform=g.ax_joint.transAxes)
g.ax_joint.text(0.95, 0.95, '# Bursts: %d' % valid.sum(),
va='top', ha='right', transform=g.ax_joint.transAxes)
plt.savefig('2cde_joint.png', bbox_inches='tight', dpi=200, transparent=False)
bursts = ds1.mburst[0]
ph_dex = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'))
ph_aex = d.get_ph_times(ph_sel=Ph_sel(Aex='Aem'))
mask_dex = d.get_ph_mask(ph_sel=Ph_sel(Dex='DAem'))
mask_aex = d.get_ph_mask(ph_sel=Ph_sel(Aex='Aem'))
KDE_DexTi = phrates.kde_laplace(ph_dex, tau, time_axis=ph)
KDE_AexTi = phrates.kde_laplace(ph_aex, tau, time_axis=ph)
ALEX_2CDE = []
BRDex, BRAex = [], []
for ib, burst in enumerate(bursts):
burst_slice = slice(int(burst.istart), int(burst.istop) + 1)
if ~mask_dex[burst_slice].any() or ~mask_aex[burst_slice].any():
# Either D or A photon stream has no photons in current burst,
# thus ALEX_2CDE cannot be computed.
ALEX_2CDE.append(np.nan)
continue
kde_dexdex = KDE_DexTi[burst_slice][mask_dex[burst_slice]]
kde_aexdex = KDE_AexTi[burst_slice][mask_dex[burst_slice]]
N_chaex = mask_aex[burst_slice].sum()
BRDex.append(np.sum(kde_aexdex / kde_dexdex) / N_chaex)
kde_aexaex = KDE_AexTi[burst_slice][mask_aex[burst_slice]]
kde_dexaex = KDE_DexTi[burst_slice][mask_aex[burst_slice]]
N_chdex = mask_dex[burst_slice].sum()
BRAex.append(np.sum(kde_dexaex / kde_aexaex) / N_chdex)
alex_2cde = 100 - 50*(BRDex[-1] - BRAex[-1])
ALEX_2CDE.append(alex_2cde)
ALEX_2CDE = np.array(ALEX_2CDE)
ALEX_2CDE.size, np.isfinite(ALEX_2CDE).sum(), np.isfinite(ds1.E[0]).sum()
hist_kws = dict(edgecolor='k', linewidth=0.2,
facecolor=sns.color_palette('Spectral_r', 100)[7])
valid = np.isfinite(ALEX_2CDE)
g = sns.JointGrid(x=ds1.E[0][valid], y=ALEX_2CDE[valid], ratio=3)
g = g.plot_joint(plt.hexbin, **{'cmap': 'Spectral_r', 'mincnt': 1, 'gridsize': 40})
_ = g.ax_marg_x.hist(ds1.E[0][valid], bins=np.arange(-0.2, 1.2, 0.0333), **hist_kws)
_ = g.ax_marg_y.hist(ALEX_2CDE[valid], bins=40, orientation="horizontal", **hist_kws)
g.ax_joint.set_xlabel('E', fontsize=16)
g.ax_joint.set_ylabel('ALEX-2CDE', fontsize=16);
g.ax_joint.text(0.95, 0.95, '# Bursts: %d' % valid.sum(),
va='top', ha='right', transform=g.ax_joint.transAxes);
valid = np.isfinite(ALEX_2CDE)
print('Number of bursts (removing NaNs/Infs):', valid.sum())
g = sns.JointGrid(x=ds1.S[0][valid], y=ALEX_2CDE[valid], ratio=3)
g = g.plot_joint(plt.hexbin, **{'cmap': 'Spectral_r', 'mincnt': 1, 'gridsize': 40})
_ = g.ax_marg_x.hist(ds1.S[0][valid], bins=np.arange(0, 1.2, 0.0333), **hist_kws)
_ = g.ax_marg_y.hist(ALEX_2CDE[valid], bins=40, orientation="horizontal", **hist_kws)
g.ax_joint.set_xlabel('S', fontsize=16)
g.ax_joint.set_ylabel('ALEX-2CDE', fontsize=16)
g.ax_joint.text(0.95, 0.95, '# Bursts: %d' % valid.sum(),
va='top', ha='right', transform=g.ax_joint.transAxes);
masks = [valid * (ALEX_2CDE < 88) * (ds1.S[0] > 0.9)]
ds2 = ds1.select_bursts_mask_apply(masks)
alex_jointplot(ds2, vmax_fret=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Data
Step2: KDE considerations
Step5: Notes on Kernel Shape
Step6: FRET-2CDE Results
Step7: Next, we get the timestamps and selection masks for DexDem and DexAem photon streams,
Step8: We can finally compute the FRET-2CDE for each burst
Step9: And visualize the results with some plots
Step10: ALEX-2CDE Definition
Step11: And some final plots of ALEX-2CDE
|
2,618
|
<ASSISTANT_TASK:>
Python Code:
import ipywidgets
import IPython.display
import iris
import numpy as np
import iris.quickplot as iplt
import matplotlib.pyplot as plt
cube = iris.load_cube(iris.sample_data_path('A1B.2098.pp'))
print cube
plot_type_dict = {'contour': iplt.contour, 'contourf': iplt.contourf, 'pcolor': iplt.pcolor, 'outline': iplt.outline,
'pcolormesh': iplt.pcolormesh, 'plot': iplt.plot, 'points': iplt.points}
plot_types = plot_type_dict.keys()
sorted(plot_types)
im = ipywidgets.Dropdown(
description='Plot-type:',
options=plot_types,
value='contour')
IPython.display.display(im)
print im.value
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load cube.
Step2: Compose and sort a dictionary of plot-types and then construct widget to present them, along with a default option. Display the widget using the IPython display call.
Step3: Print selected widget value for clarity.
|
2,619
|
<ASSISTANT_TASK:>
Python Code:
from quspin.operators import hamiltonian # Hamiltonians and operators
from quspin.basis import spinless_fermion_basis_1d, tensor_basis # Hilbert space fermion and tensor bases
import numpy as np # generic math functions
##### define model parameters #####
L=4 # system size
J=1.0 # hopping
U=np.sqrt(2.0) # interaction
mu=0.0 # chemical potential
# define boson basis with 3 states per site L bosons in the lattice
N_up = L//2 + L % 2 # number of fermions with spin up
N_down = L//2 # number of fermions with spin down
basis_up=spinless_fermion_basis_1d(L,Nf=N_up)
basis_down=spinless_fermion_basis_1d(L,Nf=N_down)
basis = tensor_basis(basis_up,basis_down) # spinful fermions
print(basis)
from quspin.basis import spinful_fermion_basis_1d
basis = spinful_fermion_basis_1d(L,Nf=(N_up,N_down))
print(basis)
# define site-coupling lists
hop_right=[[-J,i,(i+1)%L] for i in range(L)] #PBC
hop_left= [[+J,i,(i+1)%L] for i in range(L)] #PBC
pot=[[-mu,i] for i in range(L)] # -\mu \sum_j n_{j \sigma}
interact=[[U,i,i] for i in range(L)] # U/2 \sum_j n_{j,up} n_{j,down}
# define static and dynamic lists
static=[
['+-|',hop_left], # up hops left
['-+|',hop_right], # up hops right
['|+-',hop_left], # down hops left
['|-+',hop_right], # down hops right
['n|',pot], # up on-site potention
['|n',pot], # down on-site potention
['n|n',interact] # up-down interaction
]
dynamic=[]
# build Hamiltonian
no_checks = dict(check_pcon=False,check_symm=False,check_herm=False)
H=hamiltonian(static,dynamic,basis=basis,dtype=np.float64,**no_checks)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To build the basis for spinful fermions, we take two copies of the basis for spinless fermions and tensor them using the tensor_basis constructor. While the tensor_basis can be used to tensor any two bases objects, it does not allow for passing symmetries, other than particle number conservation (we are currently working on developing a separate class which will allow using all symmetries for spinful fermions).
Step2: Alternatively, one can use the spinful_fermion_basis_1d class as well. This class, unlike the tensor_basis class can handle various 1d symmetries in the usual way and should be preferred for dealing with the FHM.
Step3: Defining the site-coupling lists is the same as before (mind the signs in the fermion hopping operator, though!).
|
2,620
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
import cirq
from cirq.aqt.aqt_device import get_aqt_device
from cirq.aqt.aqt_sampler import AQTSampler
access_token = 'MY_TOKEN'
device, qubits = get_aqt_device(2)
print(device)
circuit = cirq.Circuit(device=device)
circuit.append([cirq.XX(qubits[0], qubits[1])**0.5])
device.validate_circuit(circuit)
print(circuit, qubits)
url = 'BACKEND_URL'
aqt_sampler = AQTSampler(url, access_token=access_token)
aqt_sweep = aqt_sampler.run(circuit, repetitions=100)
print(aqt_sweep)
url = 'https://gateway.aqt.eu/marmot/sim/'
url = 'https://gateway.aqt.eu/marmot/sim/noise-model-1'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: AQT Cirq Tutorial
Step2: AQT supports Cirq as a third party software development kit and offers access to various quantum computing devices and simulators in the backend. Login to the AQT Gateway Portal to get a list of available devices.
Step3: Where MY_TOKEN is your access token for a specific AQT device. You need to subscribe to an AQT backend at the AQT Gateway Portal and retreive the access token. Then you can access the AQT device by
Step4: Sample a quantum circuit
Step5: This circuit can then be sampled on the real-hardware backend as well as on a simulator.
Step6: Where BACKEND_URL is the API URL of the AQT backend as specified in your subscription.
Step7: whereas for a simulation with noise model use
|
2,621
|
<ASSISTANT_TASK:>
Python Code:
df = pd.read_csv("congress.csv", error_bad_lines=False)
df.head()
#bioguide: The alphanumeric ID for legislators in http://bioguide.congress.gov.
df['chamber'].value_counts() #sounds like a lot. We might have repetitions.
df['bioguide'].describe() #we count the bioguide, which is unique to each legislator.
#There are only 3188 unique values, hence only 3188 senators and representatives in total.
total_democrats = (df['party'] == 'D').value_counts()
total_democrats
total_republicans =(df['party'] == 'R').value_counts()
total_republicans
df['age'].describe()
df.groupby("chamber")['age'].describe()
df['state'].value_counts()
df.groupby("state")['chamber'].value_counts()
df['termstart'].describe() #here we would look at unique.
df.sort_values(by='age').tail(1) #A senator!
representative = df[df['chamber'] == 'house']
representative.sort_values(by='age').tail(1)
representative.sort_values(by='age').head(2)
senator = df[df['chamber'] == 'senate']
senator.sort_values(by='age')
senator.sort_values(by='age').head(2)
# Store a new column
df['complete_name'] = df['firstname']+ " "+ df['middlename'] + " "+df['lastname']
df.head()
period_count = df.groupby('complete_name')['termstart'].value_counts().sort_values(ascending=False)
pd.DataFrame(period_count)
#With the help of Stephan we figured out that term-start is every 2 years
#(so this is not giving us info about how many terms has each legislator served)
terms_served_by_senators= senator.groupby('complete_name')['bioguide'].value_counts()
years= terms_served_by_senators * 2
total_years_served = years.sort_values(ascending=False)
pd.DataFrame(total_years_served)
terms_served_by_representative= representative.groupby("complete_name")['bioguide'].value_counts()
years= terms_served_by_representative * 2
total_years_served = years.sort_values(ascending=False)
pd.DataFrame(total_years_served)
df['firstname'].value_counts()
#this might be counting the same person many times but still we can get an idea of what names are more popular
plt.style.use("ggplot")
df['age'].hist(bins=15, xlabelsize=12, ylabelsize=12, color=['y'])
df.head(20).sort_values(by='age',ascending=True).plot(kind='barh', x=['complete_name'], y='age', color="y")
df.plot.scatter(x='congress', y='age');
df.plot.hexbin(x='age', y='congress', gridsize=25, legend=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Do a .head() to get a feel for your data
Step2: Write down 12 questions to ask your data, or 12 things to hunt for in the data
Step3: 2) How many from each party in total ?
Step4: 3) What is the average age for people that have worked in congress (both Senators and Representatives)
Step5: 4) What is the average age of Senators that have worked in the Senate? And for Representatives in the house?
Step6: 5) How many in total from each state?
Step7: 5) How many Senators in total from each state? How many Representatives?
Step8: 6) How many terms are recorded in this dataset?
Step9: 7) Who has been the oldest serving in the US, a senator or a representative? How old was he/she?
Step10: 8) Who have been the oldest and youngest serving Representative in the US?
Step11: 9) Who have been the oldest and youngest serving Senator in the US?
Step12: 10) Who has served for more periods (in this question I am not paying attention to the period length)?
Step13: We double-checked it by printing info from Thurmond, whom was part of the senate but appeared as if he had
Step14: 12)The most popular name in congress is....
Step15: Make three charts with your dataset
|
2,622
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import scipy
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split,cross_val_score, KFold, cross_val_predict
from sklearn.decomposition import PCA as sklearn_pca
from sklearn.decomposition import PCA
from sklearn.utils import resample
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn import preprocessing, decomposition
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import RFE
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.neural_network import MLPClassifier
# Read and import data
data = pd.read_csv('breastcancerdata.csv')
data.head()
#Check the columns in the raw data
data.columns
#Check the kind of variables in the raw data
data.info()
#deleting the "id" column
data.drop("id",axis=1,inplace=True)
#deleting the "Unnamed: 32" column
data.drop("Unnamed: 32",axis=1,inplace=True)
#Check variables type after deleting the ones we are not using
data.info()
#counting the diagnosis variable
data.diagnosis.value_counts()
#Transform classifying variable into numeric variable [0,1] and add a column
data.loc[data['diagnosis'] == 'M', 'Diagclass'] = 1
data.loc[data['diagnosis'] == 'B', 'Diagclass'] = 0
#Check dataset
data.head()
#Dataset with new classifying variable "Diagclass" and withouth the diagnosis column
data.drop("diagnosis",axis=1,inplace=True)
#counting the diagnosis variable
data.Diagclass.value_counts()
#UPsample the minority class
# Separate majority and minority classes
Diagclass_majority = data[data.Diagclass==0]
Diagclass_minority = data[data.Diagclass==1]
# Downsample Diaclass majority
Diagclass_majority_downsampled = resample(Diagclass_majority, replace=False, n_samples=212, random_state=123)
# Combine majority class with downsampled najority class
data1 = pd.concat([Diagclass_majority_downsampled, Diagclass_minority])
# Display new class counts
data1.Diagclass.value_counts()
#Define predictors and predicted variables
X = data1.drop('Diagclass', axis = 1)
Y = data1['Diagclass']
#Preprocess and scale data
names = X.columns
X1 = pd.DataFrame(preprocessing.scale(X), columns = names)
X1.head(2)
# Build up the correlation mtrix
Z = X1
correlation_matrix = Z.corr()
#Eigenvectores & Eigenvalues
eig_vals, eig_vecs = np.linalg.eig(correlation_matrix)
sklearn_pca = PCA(n_components=len(Z.columns))
Y_sklearn = sklearn_pca.fit_transform(correlation_matrix)
#From the Scree plot.
plt.plot(eig_vals)
plt.show()
print(
'The percentage of total variance in the dataset explained by each',
'component from Sklearn PCA.\n',
sklearn_pca.explained_variance_ratio_
)
#PCA features
# Create a scaler object
sc = StandardScaler()
# Fit the scaler to the features and transform
X_std = sc.fit_transform(X1)
# Create a PCA object from Scree plot the number of components is 3
pca = decomposition.PCA(n_components=3)
# Fit the PCA and transform the data
X_std_pca = pca.fit_transform(X_std)
# View the new feature data's shape
X_std_pca.shape
# Create a new dataframe with the new features
XPCA = pd.DataFrame(X_std_pca)
XPCA.head()
#Calculate Feature Importance using Random Forest
rf = RandomForestClassifier()
rf.fit(X1, Y)
#Define feature importance
feature_importance = rf.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(7, 30))
plt.subplot(1, 1, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X1.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Diagclass')
plt.show()
#Feature Selection. Scores for the most relevant features (should we start with the one that has more explanatory power)
# feature extraction
test = SelectKBest()
fit = test.fit(X1, Y)
#Identify features with highest score from a predictive perspective (for all programs)
names2 = X1.columns
Bestfeatures = pd.DataFrame(fit.scores_, index = names2)
Bestfeatures.columns = ['Best Features']
Bestfeatures.sort_values(by=['Best Features'], ascending=False)
# create the RFE model and select features
#From PCA analyis the number of components is 3
nfeatures = 3
lr = LogisticRegression()
rfe = RFE(lr,nfeatures)
fit = rfe.fit(X1,Y)
# summarize the selection of the features
result_RFE = pd.DataFrame(list(zip(X1.head(0), rfe.ranking_, rfe.support_)),columns=['Features','Ranking','Support'] )
result_RFE.sort_values('Ranking')
#View all the predictors to make the feature selection
X1.columns
#Feature Selection using Random Forest
X3 = X1[['perimeter_worst', 'area_worst', 'concave points_mean', 'concavity_mean','radius_worst','perimeter_mean',
'concavity_worst', 'compactness_mean','concave points_worst','compactness_worst']]
#Feature Selection using RFE & PCA
X2 = X1[['radius_worst','concave points_worst','perimeter_worst']]
#Split the data into training and testing datasets. Split: 70/30; train/test
X_train, X_test, y_train, y_test = train_test_split(X2,Y, test_size=0.3, random_state=123)
#Initiating the cross validation generator, N splits = 5
kf = KFold(5)
# Initialize the model
rf = RandomForestClassifier(n_jobs = -1)
#Create range of values to fit parameters
k1 = [20,100,300]
parameters = {'n_estimators':k1}
#Fit parameters
rf1 = GridSearchCV(rf, param_grid=parameters, cv=kf)
#Fit the tunned model
rf1.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", rf1.best_params_)
#Fit in test dataset
rf1.fit(X_test, y_test)
#Predict on test dataset
predtestrf_y = rf1.predict(X_test)
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestrf_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestrf_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestrf_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
accrf1 = cross_val_score(rf1,X_test,y_test,cv=kf).mean()
accrf1pca = cross_val_score(rf1,XPCA,Y,cv=kf).mean()
print((
'Random Forest accuracy:{}\n'
'Random Forest accuracy PCA:{}\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(accrf1,accrf1pca,test_tI_errors, test_tII_errors))
# Train model
GBC = GradientBoostingClassifier()
k1 = ['deviance','exponential']
k2 = np.arange(100)+1
k5 = ['friedman_mse','mse','mae']
parameters = {'loss': k1,
'n_estimators': k2,
'criterion': k5}
#Fit parameters
GBC1 = GridSearchCV(GBC, param_grid=parameters, cv=kf)
#Fit the tunned model
GBC1.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", GBC1.best_params_)
#Fit on the test set
GBC1.fit(X_test, y_test)
# Predict on test set
predtestgb_y = GBC1.predict(X_test)
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtestgb_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtestgb_y)
print(cnf)
table_test = pd.crosstab(y_test, predtestgb_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
accGBC1 = cross_val_score(GBC1,X_test,y_test,cv=kf).mean()
accGBC1pca = cross_val_score(GBC1,XPCA,Y,cv=kf).mean()
print((
'Gradient Boosting accuracy:{}\n'
'Gradient Boosting accuracy PCA:{}\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(accGBC1,accGBC1pca,test_tI_errors, test_tII_errors))
# Initialize and fit the model.
mlp = MLPClassifier(max_iter=1000, tol=0.01)
#Tune hyperparameters
#Create range of values to fit parameters
hidden_layer_sizes= [(50,),(100,),(1000,),(1000,10),(100,20)]
alpha = 10.0 ** -np.arange(1, 3)
activation = ['identity', 'logistic', 'tanh', 'relu']
parameters = {'hidden_layer_sizes': hidden_layer_sizes,
'alpha': alpha,
'activation': activation}
#Fit parameters using gridsearch
mlp_tuned = GridSearchCV(mlp, param_grid=parameters, cv=5)
#Fit the tunned model
mlp_tuned.fit(X_train, y_train)
#Fit on the test set
mlp_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = mlp_tuned.predict(X_test)
#Test Scores
target_names = ['0', '1']
print(classification_report(y_test, predtest_y, target_names=target_names))
cnf = confusion_matrix(y_test, predtest_y)
print(cnf)
table_test = pd.crosstab(y_test, predtest_y, margins=True)
test_tI_errors = table_test.loc[0.0,1.0]/table_test.loc['All','All']
test_tII_errors = table_test.loc[1.0,0.0]/table_test.loc['All','All']
accmlp = cross_val_score(mlp_tuned,X_test,y_test,cv=kf).mean()
accmlppca = cross_val_score(mlp_tuned,XPCA,Y,cv=kf).mean()
print((
'MLP accuracy:{}\n'
'MLP accuracy PCA:{}\n'
'Percent Type I errors: {}\n'
'Percent Type II errors: {}'
).format(accmlp,accmlppca,test_tI_errors, test_tII_errors))
#Print the best parameters
print(mlp_tuned.best_params_)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PCA Analysis
Step2: Feature Selection
Step3: Random Forest
Step4: Gradient Boosting
Step5: Neural Network
|
2,623
|
<ASSISTANT_TASK:>
Python Code:
%pylab nbagg
import sygma as s
reload(s)
s.__file__
#from imp import *
#s=load_source('sygma','/home/nugrid/nugrid/SYGMA/SYGMA_online/SYGMA_dev/sygma.py')
from scipy.integrate import quad
from scipy.interpolate import UnivariateSpline
import matplotlib.pyplot as plt
import numpy as np
k_N=1e11*0.35/ (10**-0.35 - 30**-0.35) #(I)
N_tot=k_N/1.35 * (10**-1.35 - 30**-1.35) #(II)
Yield_tot=0.1*N_tot
reload(s)
s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,special_timesteps=-1,imf_type='salpeter',
imf_yields_range_pop3=[10,30],imf_bdys_pop3=[10,30],
pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',
sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
s2=s.sygma(iolevel=0,mgal=1e11,dt=1e5,tend=1e7,special_timesteps=-1,imf_type='salpeter',
imf_yields_range_pop3=[10,30],imf_bdys_pop3=[10,30],
pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',
sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
print Yield_tot_sim
print Yield_tot
print 'ratio should be 1 : ',Yield_tot_sim/Yield_tot
print s1.history.ism_iso_yield_agb[-1][0]
print 'should be 1: ',s1.history.ism_iso_yield_massive[-1][0]/Yield_tot
print 'No SNIa contribution:',s1.history.ism_iso_yield_1a[-1][0]
s1.plot_totmasses(fig=1,source='all',markevery=2,marker='^')
s1.plot_totmasses(fig=1,source='agb')
s1.plot_totmasses(fig=1,source='massive',marker='x',markevery=3)
s1.plot_totmasses(fig=1,source='sn1a',marker='D')
s1.plot_mass(fig=1,specie='H-1',marker='+',markevery=1)
#plt.legend(loc=7,fontsize=14)
#mpld3.display()
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlim(1e6,1e9)
s2.plot_totmasses(fig=2,source='all',markevery=4,marker='^')
s2.plot_totmasses(fig=2,source='massive',marker='x',markevery=6)
s2.plot_mass(fig=2,specie='H-1',marker='+',markevery=10)
#plt.legend(loc=7,fontsize=14)
#mpld3.display()
#plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.xlim(6e6,3e7)
import read_yields as ry
y=ry.read_nugrid_yields('yield_tables/popIII_h1.txt')
zm_lifetime_grid=s1.zm_lifetime_grid_current
idx_z = (np.abs(zm_lifetime_grid[0]-0.0001)).argmin() #Z=0
grid_masses=zm_lifetime_grid[1][::-1]
grid_lifetimes=zm_lifetime_grid[2][idx_z][::-1]
plt.figure(981)
plt.plot(grid_masses,grid_lifetimes,label='spline fit grid points (SYGMA)',marker='x')
m=[]
ages=[]
for k in range(len(y.table_mz)):
m_ini=float(y.table_mz[k].split(',')[0].split('=')[1])
if m_ini>=30:
continue
m.append(m_ini)
ages.append(y.age[k])
plt.plot(np.array(m),np.log10(np.array(ages)),marker='+',markersize=20,label='input yield grid',linestyle='None')
plt.xlabel('Mini/Msun')
plt.ylabel('log lifetime');plt.legend(prop={'size':14})
s2.plot_totmasses(fig=561,marker='x',label='totmass',markevery=2)
s2.plot_mass(fig=561,specie='H',label='H, sim',color='k',shape='-',marker='o',markevery=3)
import read_yields as ry
y=ry.read_nugrid_yields('yield_tables/popIII_h1.txt')
m=[]
ages=[]
for k in range(len(y.table_mz)):
m_ini=float(y.table_mz[k].split(',')[0].split('=')[1])
if m_ini>=30:
continue
m.append(m_ini)
ages.append(y.age[k])
#print m[-1],ages[-1]
def yields(m,k_N):
return ( k_N/1.35 * (m**-1.35 - 30.**-1.35) ) * 0.1
yields1=[]
for m1 in m:
yields1.append(yields(m1,k_N))
plt.figure(561)
plt.plot(ages,yields1,marker='+',linestyle='',markersize=15,label='H, semi')
plt.legend(loc=4)
plt.xlim(5e6,3e7)
k_N=1e11*0.35/ (15**-0.35 - 30**-0.35)
N_tot=k_N/1.35 * (15**-1.35 - 30**-1.35)
Yield_tot=0.1*N_tot
# imf_bdys_pop3=[15,30]
##reload(chem_evol)
#dreload(s)
import sygma as s
reload(s)
s1=s.sygma(iolevel=1,mgal=1e11,iniZ=0,dt=1e7,tend=1.3e10,imf_type='salpeter',
imf_bdys=[10,30],imf_bdys_pop3=[15,30],pop3_table='yield_tables/popIII_h1.txt',
table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
print 'Sould be 1:' ,Yield_tot_sim/Yield_tot
k_N=1e11*0.35/ (10**-0.35 - 15**-0.35)
N_tot=k_N/1.35 * (10**-1.35 - 15**-1.35)
Yield_tot=0.1*N_tot
s1=s.sygma(iolevel=1,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',imf_yields_range_pop3=[10,30],imf_bdys_pop3=[10,15],pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
k_N=1e11*0.35/ (5**-0.35 - 100**-0.35) # IMF range
N_tot=k_N/1.35 * (10**-1.35 - 30**-1.35) # yield range
Yield_tot=0.1*N_tot
s1=s.sygma(iolevel=1,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',imf_bdys_pop3=[5,100],
pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',
sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt',
iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
#imf_yields_range_pop3=[10,30],
Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
print s1.imf_bdys,s1.imf_bdys_pop3,s1.imf_yields_range_pop3
print Yield_tot_sim
print Yield_tot
print 'Sould be 1:' ,Yield_tot_sim/Yield_tot
alphaimf = 1.5 #Set test alphaimf
k_N=1e11*(alphaimf-2)/ (-10**-(alphaimf-2) + 30**-(alphaimf-2))
N_tot=k_N/(alphaimf-1) * (-10**-(alphaimf-1) + 30**-(alphaimf-1))
Yield_tot=0.1*N_tot
s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='alphaimf',alphaimf=1.5,imf_bdys_pop3=[10,30],pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
print 'Should be 1 :',Yield_tot/Yield_tot_sim
reload(s)
s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='alphaimf',imf_bdys_pop3=[10,30],pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
s2=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='alphaimf',imf_bdys_pop3=[10,30],pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=True, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
print 'Should be 0:',(s1.history.ism_elem_yield_1a[0]),(s1.history.ism_elem_yield_1a[-1])
print (s1.history.ism_elem_yield[0]),(s1.history.ism_elem_yield[-1])
print 'Should be 0:',(s2.history.ism_elem_yield_1a[0]),(s2.history.ism_elem_yield_1a[-1])
print (s2.history.ism_elem_yield[0]),(s2.history.ism_elem_yield[-1])
print (s1.history.ism_elem_yield[-1][0] + s2.history.ism_elem_yield_1a[-1][0])/s2.history.ism_elem_yield[-1][0]
#s2.plot_mass(specie='H-1',source='sn1a') #plot s1 data (without sn) cannot be plotted -> error, maybe change plot function?
s0=s.sygma(iolevel=0,imf_bdys=[0.01,100],imf_yields_range=[0.02,99],imf_type='chabrier',transitionmass=6,sfr='input',iniZ=0.0,\
dt=1e7,tend=1.3e10, mgal=1e1,sn1a_on=True,sn1a_rate='exp',exp_dtd=2e9,exclude_masses=[100,6,7],netyields_on=True,pop3_table='yield_tables/popIII_h1.txt')
s1=s.sygma(iolevel=1,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',imf_bdys_pop3=[10,30],imf_yields_range_pop3=[20,30],iniZ=-1,pop3_table='yield_tables/popIII_h1.txt',table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=False, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn')
#Yield_tot_sim=s1.history.ism_iso_yield[-1][0]
k_N=1e11*0.35/ (10**-0.35 - 30**-0.35)
N_tot=k_N/1.35 * (20**-1.35 - 30**-1.35)
Yield_tot=0.1*N_tot
s1.imf_bdys,s1.imf_bdys_pop3,s1.imf_yields_range_pop3
print 'Sould be 1:' ,Yield_tot_sim/Yield_tot
yield range [20,30]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Results
Step2: Includes stars from 10Msun to 30Msun (upper end consistent with higher Z).
Step3: Test of distinguishing between massive PoPIII and (not relevant) other sources
Step4: As expected massive stars contribute to 'All'.
Step5: Higher resolution
Step6: Calculating yield ejection over time
Step7: Test of parameter imf_bdys
Step8: Select imf_bdys=[10,15]
Step9: Select imf_bdys_pop3=[5,100] ; NEEDS TO BE TESTED
Step10: The code should ignore s1.imf_bdys and use s1.imf_bdys_pop3
Step11: Test of parameter imf_type
Step12: Testing other IMF type does not make sense since range only considers massive stars
Step13: with net yields netyields_on = True
Step14: End of test suite (for now)
|
2,624
|
<ASSISTANT_TASK:>
Python Code:
s= 'wordsmith'
vowels = {'a','e','i','o','u'}
count = 0
for char in s:
if char in vowels:
count+=1
print "Number of vowels: " + str(count)
s = 'azcbobobegghakl'
pattern = 'bob'
count =0
for position in range(0,len(s)):
if s[position:position+3]==pattern:
count+=1
print count
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. COUNTING BOBS
|
2,625
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'csiro-bom', 'sandbox-1', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
2,626
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df_train = pd.read_csv("http://bit.do/house-price")
df_train.head()
df_train.columns
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.distplot(df_train["SalePrice"]);
df_train.plot.scatter(x="GrLivArea", y="SalePrice")
df_train.plot.scatter(x="TotalBsmtSF", y="SalePrice")
var = 'OverallQual'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
f, ax = plt.subplots(figsize=(8, 6))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
var = 'YearBuilt'
data = pd.concat([df_train['SalePrice'], df_train[var]], axis=1)
f, ax = plt.subplots(figsize=(16, 8))
fig = sns.boxplot(x=var, y="SalePrice", data=data)
fig.axis(ymin=0, ymax=800000);
plt.xticks(rotation=90);
corrmat = df_train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
sns.set()
cols = ['SalePrice', 'OverallQual', 'GrLivArea', 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']
sns.pairplot(df_train[cols], height = 2.5);
plt.show();
missing_features = df_train.isnull().sum()
missing_features[missing_features>0]
df_train.shape
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Some info about the attributes
Step2: Sale Price
Step3: grlivarea vs Sale Price
Step4: TotalBsmtSF vs Sale Price
Step5: box plot overallqual/saleprice
Step6: box plot year built / saleprice
Step7: correlation matrix
Step8: scatterplot with highly correlated features
Step9: Missing Data
|
2,627
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Sheraz Khan <sheraz@khansheraz.com>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.connectivity import envelope_correlation
from mne.minimum_norm import make_inverse_operator, apply_inverse_epochs
from mne.preprocessing import compute_proj_ecg, compute_proj_eog
data_path = mne.datasets.brainstorm.bst_resting.data_path()
subjects_dir = op.join(data_path, 'subjects')
subject = 'bst_resting'
trans = op.join(data_path, 'MEG', 'bst_resting', 'bst_resting-trans.fif')
src = op.join(subjects_dir, subject, 'bem', subject + '-oct-6-src.fif')
bem = op.join(subjects_dir, subject, 'bem', subject + '-5120-bem-sol.fif')
raw_fname = op.join(data_path, 'MEG', 'bst_resting',
'subj002_spontaneous_20111102_01_AUX.ds')
raw = mne.io.read_raw_ctf(raw_fname, verbose='error')
raw.crop(0, 60).pick_types(meg=True, eeg=False).load_data().resample(80)
raw.apply_gradient_compensation(3)
projs_ecg, _ = compute_proj_ecg(raw, n_grad=1, n_mag=2)
projs_eog, _ = compute_proj_eog(raw, n_grad=1, n_mag=2, ch_name='MLT31-4407')
raw.info['projs'] += projs_ecg
raw.info['projs'] += projs_eog
raw.apply_proj()
cov = mne.compute_raw_covariance(raw) # compute before band-pass of interest
raw.filter(14, 30)
events = mne.make_fixed_length_events(raw, duration=5.)
epochs = mne.Epochs(raw, events=events, tmin=0, tmax=5.,
baseline=None, reject=dict(mag=8e-13), preload=True)
del raw
src = mne.read_source_spaces(src)
fwd = mne.make_forward_solution(epochs.info, trans, src, bem)
inv = make_inverse_operator(epochs.info, fwd, cov)
del fwd, src
labels = mne.read_labels_from_annot(subject, 'aparc_sub',
subjects_dir=subjects_dir)
epochs.apply_hilbert() # faster to apply in sensor space
stcs = apply_inverse_epochs(epochs, inv, lambda2=1. / 9., pick_ori='normal',
return_generator=True)
label_ts = mne.extract_label_time_course(
stcs, labels, inv['src'], return_generator=True)
corr = envelope_correlation(label_ts, verbose=True)
# let's plot this matrix
fig, ax = plt.subplots(figsize=(4, 4))
ax.imshow(corr, cmap='viridis', clim=np.percentile(corr, [5, 95]))
fig.tight_layout()
threshold_prop = 0.15 # percentage of strongest edges to keep in the graph
degree = mne.connectivity.degree(corr, threshold_prop=threshold_prop)
stc = mne.labels_to_stc(labels, degree)
stc = stc.in_label(mne.Label(inv['src'][0]['vertno'], hemi='lh') +
mne.Label(inv['src'][1]['vertno'], hemi='rh'))
brain = stc.plot(
clim=dict(kind='percent', lims=[75, 85, 95]), colormap='gnuplot',
subjects_dir=subjects_dir, views='dorsal', hemi='both',
smoothing_steps=25, time_label='Beta band')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we do some things in the name of speed, such as crop (which will
Step2: Now we band-pass filter our data and create epochs.
Step3: Compute the forward and inverse
Step4: Compute label time series and do envelope correlation
Step5: Compute the degree and plot it
|
2,628
|
<ASSISTANT_TASK:>
Python Code:
import os
import urllib
import webbrowser
import pandas as pd
from bs4 import BeautifulSoup
url = 'http://grad-schools.usnews.rankingsandreviews.com/best-graduate-schools/top-humanities-schools/sociology-rankings/page+1'
webbrowser.open_new_tab(url)
def extract_page_data(table_rows):
Extract and return the the desired information from the td elements within
the table rows.
# create the empty list to store the player data
# <tr> tag: defines a row in a table
# <td> tag: defines a cell in a table
rows = []
for row in soup.findAll('tr'):
rows.append([val.text for val in row.find_all('td')])
return rows[1:]
us_news_rankings = []
errors_list = []
url_template = 'http://grad-schools.usnews.rankingsandreviews.com/best-graduate-schools/top-humanities-schools/sociology-rankings/page+{page_number}'
# for each page from 1 to (and including) 4
for page_number in range(1, 5):
# Use try/except block to catch and inspect any urls that cause an error
try:
# get the webpage url
url = url_template.format(page_number=page_number)
# get the html
html = urllib.request.urlopen(url)
# create the BeautifulSoup object
soup = BeautifulSoup(html, "lxml")
# get the column headers
headers = [header.text for header in soup.findAll('th')]
# start etracting rows
table_rows = soup.select('td')[1:]
school_data = extract_page_data(table_rows)
# create the dataframe for the current page
school_df = pd.DataFrame(school_data, columns=headers)
school_df = pd.DataFrame(school_data)
# append the current dataframe to the list of dataframes
us_news_rankings.append(school_df)
except Exception as e:
# Store the url and the error it causes in a list
error =[url, e]
# then append it to the list of errors
errors_list.append(error)
print(len(errors_list))
errors_list
us_news_rankings
us_news_df_raw = pd.concat(us_news_rankings, axis=0)
column_headers = ["rank", "school", "score"]
us_news_df_raw.columns = column_headers
us_news_df_raw.head(10)
if not os.path.exists('../data/raw_data'):
os.makedirs('../data/raw_data')
if not os.path.exists('../data/clean_data'):
os.makedirs('../data/clean_data')
# Write out the raw rankings data to the raw_data folder in the data folder
us_news_df_raw.to_csv("../data/raw_data/us_news_rankings_RAW.csv", index=False)
us_news_df_raw = pd.read_csv("../data/raw_data/us_news_rankings_RAW.csv")
us_news_df_raw = us_news_df_raw[0:len(us_news_df_raw)]
us_news_df_raw.head()
us_news_df_raw["school_location"] = "NaN"
us_news_df_raw["school_name"] = "NaN"
us_news_df_raw["rank"] = us_news_df_raw.loc[:,('rank')].replace(r"\D", "", regex = True)
us_news_df_raw["score"] = us_news_df_raw.loc[:,('score')].str.extract("(\d.\d)", expand=False)
us_news_df_raw.head(10)
for i in range(0,len(us_news_df_raw)+1):
try:
us_news_df_raw["school_name"][i] = us_news_df_raw["school"].str.split("\n\n")[i][0]
us_news_df_raw["school_location"][i] = us_news_df_raw["school"].str.split("\n\n")[i][1]
except:
us_news_df_raw["school_name"][i] = "NaN"
us_news_df_raw["school_location"][i] = "NaN"
us_news_df_raw["school_name"] = us_news_df_raw.loc[:,('school_name')].replace(r"\n", "", regex = True)
us_news_df_raw["school_location"] = us_news_df_raw.loc[:,('school_location')].replace("\n", "", regex = True)
cols = ["rank", "school_name", "school_location", "score"]
us_news_df_raw = us_news_df_raw[cols]
us_news_df_raw.head()
us_news_df_clean = us_news_df_raw[us_news_df_raw['school_name']!="NaN"]
us_news_df_clean.head()
us_news_df_clean.to_csv("../data/clean_data/us_news_rankings_clean.csv")
from geopy.geocoders import Nominatim
geolocator = Nominatim()
locations = us_news_df_clean['school_location'].apply(lambda x: geolocator.geocode(x)) # equiv to df.sum(0)
address,coordinates = zip(*locations)
latitude,longitude = zip(*coordinates)
us_news_df_clean.loc[:,'latitude'] = latitude
us_news_df_clean.loc[:,('longitude')] = longitude
us_news_df_clean = us_news_df_clean.apply(pd.to_numeric, errors="ignore")
us_news_df_clean.head()
us_news_df_clean['quintile'] = pd.qcut(us_news_df_clean['score'], 5, labels=False)
from altair import Chart, X, Y, Axis, Scale
Chart(us_news_df_clean).mark_circle(
size=100,
opacity=0.6
).encode(
x=X('longitude:Q', axis=Axis(title=' ')),
y=Y('latitude:Q', axis=Axis(title=' ')),
#scale=Scale(domain=(-60, 80))),
color='quintile:N',
).configure_cell(
width=800,
height=350
).configure_axis(
grid=False,
axisWidth=0,
tickWidth=0,
labels=False,
)
import sys
import altair
import bs4
print("System and module version information: \n")
print('Python version:', sys.version_info)
print('urllib.request version:', urllib.request.__version__)
print('pandas version:', pd.__version__)
print('altair version:',altair.__version__)
print('Beautiful Soup version:', bs4.__version__)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: open US News Rankings for Sociology webpage
Step3: create a function to extract page data from US News
Step4: make empty lists for US News Rankings and errors
Step5: go through multiple web pages of US News Rankings
Step6: view possible urls that returned errors
Step7: view list of appended dataframes
Step8: make folders for raw_data and clean_data
Step9: read in raw .csv of US News Rankings with Pandas
Step10: make new columns for school location and school name
Step11: fill in the school name and school location columns with regular expressions
Step12: drop observations that are empty rows (NaN observations on school name column)
Step13: save clean .csv to file
Step14: Review
|
2,629
|
<ASSISTANT_TASK:>
Python Code:
def get_data(x, mag=100, pl=-2.5, xmin=50.0):
C = (-pl - 1)*xmin**(-pl-1)
return mag/0.03*C*x**(pl)
get_data(50)
50**-2.5
100**(-1/2.5) * 50**-2.5
pl = -2.5
xmin = 50
C = (-pl - 1)*xmin**(-pl-1)
get_data(50)
plt.loglog(tb.logspace(50, 5000, 10), get_data(tb.logspace(50, 5000, 10)))
def integral_channel(x, cutoff, mag=1.0):
ans = np.ones_like(x)
ans.fill(mag)
ind = (x < cutoff)
ans[ind] = 0
return ans
plt.loglog(tb.logspace(50, 5000, 10), integral_channel(tb.logspace(50, 5000, 10), 60))
integral_channel(tb.logspace(50, 5000, 10), 60)
def de_e_channels(de=30, xmin=50, xmax=5000, npts=500):
ans = []
co = xmin
xval = tb.logspace(xmin, xmax, npts)
while co < xmax:
# print('co', co)
ans.append(integral_channel(xval, co))
# print(de/100 * co, de)
co += (de/100 * co)
return xval, np.asarray(ans).T
x, y = de_e_channels()
plt.semilogx(x, y)
print(x.shape, y.shape)
# print(x, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 100% efficiency integral channels
|
2,630
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
from IPython.display import Image
from IPython.display import display
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import FunctionTransformer
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn import metrics
from scipy import stats
# Allow modules and files to be loaded with relative paths
from pkg_resources import resource_filename as fpath
#from pkg_resources import ResourceManager
#from pkg_resources import DefaultProvider
#from pkg_resources import resource_string as fpath
import sys
from IPython.display import HTML
# set up path
#sys.path.append(/usr/local/www/data/)
if not "" in sys.path:
sys.path.append("") #fpath(__name__, "")"")
userpath = os.path.expanduser('~')
print(userpath)
if not userpath in sys.path:
sys.path.append(userpath)
sys.path.append(userpath+"/idata")
sys.path.append(userpath+"/idata/data")
#sys.path.append(fpath(__name__, "/"+userpath))
#sys.path.append(fpath(__name__, "/"+userpath+"/idata"))
#sys.path.append(fpath(__name__, "/"+userpath+"/idata/data"))
# We don't execute everything unless we're viewing in a notebook
IN_JUPYTER = 'get_ipython' in globals() and get_ipython().__class__.__name__ == "ZMQInteractiveShell"
IN_DEBUG = True
OPTIMIZING = False
# Set up fonts
if IN_DEBUG:
print ([f.name for f in mpl.font_manager.fontManager.ttflist])
from matplotlib import rcParams
rcParams['font.family'] = 'DejaVu Sans Mono'
print(__name__)
print(sys.path)
print(fpath(__name__,"dev.csv.zip"))
if IN_JUPYTER:
#print("In Jupyter")
%matplotlib inline
sns.set_style("whitegrid")
def load_csv_file(file):
if IN_JUPYTER:
filename = userpath = os.path.expanduser('~')+"/idata/data/" + file
else:
filename = fpath(__name__, file)
#filename = ResourceManager.resource_filename(DefaultProvider, file)
#filename = fpath(__name__, file)
print("found file")
print(filename)
return pd.read_csv(filename)
# Discritizes columns. Note it changes the order of columns.
def digitize(x, bins=[], cols=[]):
mask = np.ones(x.shape[1], np.bool)
mask[cols] = 0
return np.hstack((np.apply_along_axis(np.digitize, 1, x[:,cols], bins=bins), x[:,mask]))
if IN_JUPYTER and OPTIMIZING:
dev = load_csv_file('dev.csv.zip')
rcParams['font.family'] = 'DejaVu Sans Mono'
if IN_JUPYTER and OPTIMIZING:
sns.countplot(x="SEX", data=dev)
if IN_JUPYTER and OPTIMIZING:
dev.loc[dev['PINCP'].isnull(),'PINCP'] = 0
dev.loc[dev['WAGP'].isnull(),'WAGP'] = 0
if IN_JUPYTER and OPTIMIZING:
sns.boxplot(data=dev, x='SEX', y='PINCP')
if IN_JUPYTER and OPTIMIZING:
sns.boxplot(data=dev, x='SEX', y='WAGP')
if IN_JUPYTER and OPTIMIZING:
len(dev[dev['AGEP'].isnull()])
if IN_JUPYTER and OPTIMIZING:
sns.boxplot(data=dev, x='SEX', y='AGEP')
if IN_JUPYTER and OPTIMIZING:
len(dev[dev['MAR'].isnull()])
if IN_JUPYTER and OPTIMIZING:
sns.countplot(data=dev, x='MAR', hue='SEX')
if IN_JUPYTER and OPTIMIZING:
dev['SCOP_REDUCED'] = pd.to_numeric(dev[dev['SOCP'].notnull()]['SOCP'].str.slice(start=0, stop=2))
if IN_JUPYTER and OPTIMIZING:
dev.loc[dev['SCOP_REDUCED'].isnull(), 'SCOP_REDUCED'] = 0
if IN_JUPYTER and OPTIMIZING:
sns.countplot(data=dev, x='SCOP_REDUCED', hue='SEX')
if IN_JUPYTER and OPTIMIZING:
sns.factorplot(data=dev[['SCOP_REDUCED', 'SEX', 'PINCP']], x='SCOP_REDUCED', y='PINCP', hue='SEX', kind='box', size=7, aspect=1.5)
if IN_JUPYTER and OPTIMIZING:
sns.factorplot(data=dev[['SCOP_REDUCED', 'SEX', 'WAGP']], x='SCOP_REDUCED', y='WAGP', hue='SEX', kind='box', size=7, aspect=1.5)
# Modify this cell to add more features if any.
select_features = ['PINCP', 'WAGP', 'MAR', 'SCOP_REDUCED']
categorical_features = ['MAR', 'SCOP_REDUCED']
# Used for specifying which features to bin in $20,000 increments.
# Note that if you have features you would like to bin in a custom way, then you will have to modify the Naive Bayes
# classifier below.
monetary_features = ['PINCP', 'WAGP']
if IN_JUPYTER and OPTIMIZING:
select_dev = dev[select_features + ['SEX']]
classifiers = {}
classifier_parameters = {}
# Zero R
# This classifier does not require any additional preprocessing of data.
classifiers['ZeroR'] = DummyClassifier(strategy='prior')
# Binomial NB classifier
# This classifier requires that all features are in binary form.
# We can easily transform categorical data into binary form, but we have to first disretize continius variables first.
classifiers['Naive Bayes'] = Pipeline([
('discretize', FunctionTransformer(func=digitize, kw_args={'bins':np.array([0.0, 20000.0, 40000.0, 80000.0, 100000.0]), 'cols':pd.Series(select_features).isin(monetary_features)})),
('tranform', OneHotEncoder(categorical_features='all')),
('clf', BernoulliNB())])
# Decision Tree classifier
# This classifier can work on continious features and can find a good separation point on its own.
# We still have to convert categorical data to binary format.
classifiers['Decision Tree'] = Pipeline([('tranform', OneHotEncoder(categorical_features=pd.Series(select_features).isin(categorical_features))), ('clf', DecisionTreeClassifier())])
# Maximum Depth for a decision tree controls how many levels deep the tree will go before it stops.
# More levels means less generalizability, but fewer levels means less predictive power.
classifier_parameters['Decision Tree'] = {'clf__max_depth':(1, 3, 9, 12)}
# Create a label encoder to transform 1-MALE, 2-FEMALE into classes that sklearn can use (0 and 1).
le = LabelEncoder()
if IN_JUPYTER and OPTIMIZING:
# Split features and class into two dataframes.
X_dev = select_dev.ix[:, select_dev.columns != 'SEX'].values
y_dev = le.fit_transform(select_dev['SEX'].values)
kf = KFold(n_splits=10, shuffle=True)
# Initialize scores dict
scores = pd.DataFrame(columns=['fold', 'algorithm', 'parameters', 'accuracy', 'precision', 'recall'])
# For each fold run the classifier (outer CV).
fold = 0
for train_index, test_index in kf.split(X_dev):
X_train, X_test = X_dev[train_index], X_dev[test_index]
y_train, y_test = y_dev[train_index], y_dev[test_index]
fold = fold + 1
# Iterate over classifiers
for name, clf in classifiers.items():
# If the classifier has parameters, then run inner CV.
# Luckily sklearn provides a quick method to do this.
if name in classifier_parameters:
gs = GridSearchCV(estimator=clf, param_grid=classifier_parameters[name])
gs.fit(X_train, y_train)
y_pred = gs.predict(X_test)
best_params = str(gs.best_params_)
else:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
best_params = 'default'
scores = scores.append(pd.DataFrame(data={'fold':[fold],
'algorithm':[name],
'parameters':[best_params],
'accuracy':[accuracy_score(y_test, y_pred)],
'precision':[precision_score(y_test, y_pred)],
'recall':[recall_score(y_test, y_pred)]}),
ignore_index=True)
if IN_JUPYTER and OPTIMIZING:
scores[['algorithm', 'accuracy', 'precision', 'recall']].groupby(['algorithm']).median()
if IN_JUPYTER and OPTIMIZING:
sns.boxplot(data=scores, x='algorithm', y='accuracy')
if IN_JUPYTER and OPTIMIZING:
matrix = scores.pivot(index='fold', columns='algorithm', values='accuracy').as_matrix()
stats.friedmanchisquare(matrix[:,0], matrix[:,1], matrix[:,2])
if IN_JUPYTER and OPTIMIZING:
for i in range(np.shape(matrix)[1]):
for j in range(i+1, np.shape(matrix)[1]):
print(stats.wilcoxon(matrix[:,i], matrix[:,j], correction=True))
if IN_JUPYTER and OPTIMIZING:
features = select_dev.columns.tolist()
features = features[1:len(features)-1]
le = LabelEncoder()
# Split features and class into two dataframes.
X_dev = select_dev.ix[:, select_dev.columns != 'SEX']
y_dev = le.fit_transform(select_dev['SEX'].values)
X_dev_long = pd.get_dummies(data=X_dev, columns=categorical_features)
clf = DecisionTreeClassifier(max_depth=3)
clf.fit(X_dev_long, y_dev)
import pydotplus
if IN_JUPYTER and OPTIMIZING:
dot_data = export_graphviz(clf,
out_file=None,
feature_names=X_dev_long.columns,
class_names=['male', 'female'],
filled=True, rounded=True,
special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data)
i = Image(graph.create_png())
display(i)
train = load_csv_file('train01.csv.gz')
np.append(train, load_csv_file('train02.csv.gz'))
np.append(train, load_csv_file('train03.csv.gz'))
# np.append(train, load_csv_file('train04.csv.gz'))
# np.append(train, load_csv_file('train05.csv.gz'))
# np.append(train, load_csv_file('train06.csv.gz'))
# np.append(train, load_csv_file('train07.csv.gz'))
# np.append(train, load_csv_file('train08.csv.gz'))
# np.append(train, load_csv_file('train09.csv.gz'))
# np.append(train, load_csv_file('train10.csv.gz'))
# np.append(train, load_csv_file('train11.csv.gz'))
# this can be repeated for all 11 training files, but fails if you load them all
# if you are using the smalles machine (only 3.75 gigs ram) on google compute
test = load_csv_file('test01.csv.gz')
np.append(test, load_csv_file('test02.csv.gz'))
#np.append(test, load_csv_file('test03.csv.gz'))
#np.append(test, load_csv_file('test04.csv.gz'))
#np.append(test, load_csv_file('test05.csv.gz'))
#np.append(test, load_csv_file('test06.csv.gz'))
# this can be repeated for all 6 test files, but fails if you load them all
# if you are using the smalles machine (only 3.75 gigs ram) on google compute
# Ensure there is no NaN values.
print(len(train[train['MAR'].isnull()]))
print(len(test[test['MAR'].isnull()]))
if IN_JUPYTER:
sns.countplot(data=train, x='MAR', hue='SEX')
# Reduce and make sure no NaN.
train['SCOP_REDUCED'] = pd.to_numeric(train[train['SOCP'].notnull()]['SOCP'].str.slice(start=0, stop=2))
train.loc[train['SCOP_REDUCED'].isnull(), 'SCOP_REDUCED'] = 0
test['SCOP_REDUCED'] = pd.to_numeric(test[test['SOCP'].notnull()]['SOCP'].str.slice(start=0, stop=2))
test.loc[test['SCOP_REDUCED'].isnull(), 'SCOP_REDUCED'] = 0
if IN_JUPYTER:
sns.countplot(data=train, x='SCOP_REDUCED', hue='SEX')
train.loc[train['PINCP'].isnull(),'PINCP'] = 0
train.loc[train['WAGP'].isnull(),'WAGP'] = 0
test.loc[test['PINCP'].isnull(),'PINCP'] = 0
test.loc[test['WAGP'].isnull(),'WAGP'] = 0
sns.factorplot(data=train[['SCOP_REDUCED', 'SEX', 'WAGP']], x='SCOP_REDUCED', y='WAGP', hue='SEX', kind='box', size=7, aspect=1.5)
select_train = train[select_features + ['SEX']]
select_test = test[select_features + ['SEX']]
# Decision Tree classifier
# This classifier can work on continious features and can find a good separation point on its own.
# We still have to convert categorical data to binary format.
best_clf = Pipeline([('tranform', OneHotEncoder(categorical_features=pd.Series(select_features).isin(categorical_features))), ('clf', DecisionTreeClassifier(max_depth=12))])
if IN_JUPYTER:
# Split features and class into two dataframes.
X_train = select_train.ix[:, select_train.columns != 'SEX'].values
y_train = le.fit_transform(select_train['SEX'].values)
X_test = select_test.ix[:, select_test.columns != 'SEX'].values
y_test = le.fit_transform(select_test['SEX'].values)
best_clf.fit(X_train, y_train)
y_pred = best_clf.predict(X_test)
y_score = best_clf.predict_proba(X_test)
else:
# to train a final classifier for use in a website, we want to combine the data
# into the largest data set possible
all_data = select_train
np.append(all_data, select_test)
X_all_data = all_data.ix[:, all_data.columns != 'SEX'].values
y_all_data = le.fit_transform(all_data['SEX'].values)
best_clf.fit()
if IN_JUPYTER:
print('Accuracy: ' + str(metrics.accuracy_score(y_test, y_pred)))
print(metrics.classification_report(y_test, y_pred))
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = metrics.roc_curve(y_test, y_score[:,1])
roc_auc = metrics.auc(fpr, tpr)
if IN_JUPYTER:
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
if IN_JUPYTER:
precision, recall, thresholds = metrics.precision_recall_curve(y_test, y_score[:,1])
plt.clf()
plt.plot(recall, precision, label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.show()
if IN_JUPYTER and OPTIMIZING:
select_test = dev[select_features + ['SEX']]
X_test = select_test.ix[:, select_test.columns != 'SEX'].values
y_test = le.fit_transform(select_test['SEX'].values)
y_pred = best_clf.predict(X_test)
y_score = best_clf.predict_proba(X_test)
dev_wrong = dev[y_pred != y_test]
if IN_JUPYTER and OPTIMIZING:
sns.countplot(data=test_wrong, x='MAR', hue='SEX')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Custom functions and global variables
Step2: Dataset
Step3: Next, read the two documents describing the dataset (data/ACS2015_PUMS_README.pdf and data/PUMSDataDict15.txt) and get familiar with the different variables. You can use Seaborn to visualize the data. For example, you can start with comparing the number of records in the two classes that we will try to predict (1-MALE, 2-FEMALE). Although SEX contains numerical values, it is categorical. Thus, it is important to treat it as such.
Step4: The count plot above shows that the two classes are fairly balanced. This is good because when classes are NOT balanced, we have to make special considerations when training our algorithms.
Step5: We then compare the distribution of personal income across the two classes
Step6: And the distribution of wage income across the two classes.
Step7: The boxplots shows that there is likely a difference in personal income and wages between the two classes (despite large number of ourliers). We also note that the data is likely not normally distributed, which will come into play later. However, there also does not seem to be a clear (linear) separation between the two classes.
Step8: Eyeballing the data, we do not see any major differences between the two sexes. Also, note that although there are differences in the life expectency between the two sexes (https
Step9: Eyeballing the data, it looks like we are correct. However, this feature will only help in a small number of cases when a person is widowed. What else can you see in this chart?
Step10: And then we preserve the special value of NaN in this case (less than 16 years old or never worked) and assign it to a special code '00'
Step11: We now look at the difference in occupation across sexes.
Step12: We see that there are still differences between occupation categories between the two sexes (e.g., construction '47' is still dominated by males, but education and administrative is dominated by females).
Step13: Followed by wages (note the difference between wages and total income)
Step14: Eyeballing the results, we can conclude that females are on average paid less than males across different occupation fields. Here we decide to include the income and wage features as they are and let the model decide the differences between the classes.
Step15: We will now create a new dev data frame containing only the selected features and the class.
Step16: Questions you need to answer
Step17: We prepare out development set by creating 10 folds we will use to evaluate the algorithms.
Step18: Selecting the Best Algorithm
Step19: From the boxplots we can see that the Decision Tree was the best. That said, the accuracy of the decision tree is still pretty low at .71 (71%).
Step20: The results reject the null hypothesis (because the p values are small, <.005), meaning that we have some confidence there is a repeatable, real difference beteen the algorithms (even if the accuracy is low).
Step21: Questions you need to answer
Step22: Marriage
Step23: Occupation
Step24: Income and Wages
Step25: Train the Best Algorithm
Step26: We use the same pipeline as we did in the development stage, except that we only use one set of parameters (in this case max_depth=12).
Step27: Evaluate the Best Algorithm
Step28: ROC Curve
Step29: Any curve above the blue line means that the algorithm is predicting better than by random chance. However, we would ideally like to have the orange curve as close as possible to the y-axis and ROC curve area to be in the .90's.
Step30: The Precision/Recall Curve is not impressive either. Ideally, a rule of thumb tells us that a good model would have a curve that crosses (Precision, Recall) at (90, 80).
Step31: For example, the example below shows the distribution of different values of marriage feature .
|
2,631
|
<ASSISTANT_TASK:>
Python Code:
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
subjects_dir = data_path + '/subjects'
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname, preload=True, proj=True)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Pick the channels of interest
raw.pick_channels([raw.ch_names[pick] for pick in picks])
# Re-normalize our empty-room projectors, so they are fine after subselection
raw.info.normalize_proj()
# Read epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Compute regularized noise and data covariances
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0, method='shrunk')
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
# Plot last stc in the brain in 3D with PySurfer if available
brain = stc.plot(hemi='lh', subjects_dir=subjects_dir,
initial_time=0.1, time_unit='s')
brain.show_view('lateral')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Get epochs
|
2,632
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from astropy.io import fits
from astropy import wcs
import pickle
import dill
import sys
import os
import xidplus
import copy
from xidplus import moc_routines, catalogue
from xidplus import posterior_maps as postmaps
from builtins import input
from healpy import pixelfunc
order_large=6
order_small=10
tile_large=21875
output_folder='../../../test_files/'
outfile=output_folder+'Tile_'+str(tile_large)+'_'+str(order_large)+'.pkl'
with open(outfile, 'rb') as f:
obj=pickle.load(f)
priors=obj['priors']
theta, phi =pixelfunc.pix2ang(2**order_large, tile_large, nest=True)
tile_small = pixelfunc.ang2pix(2**order_small, theta, phi, nest=True)
priors[0].moc.write(output_folder+'Tile_'+str(tile_large)+'_'+str(order_large)+'_moc_.fits')
from astropy.table import Table, join, vstack,hstack
Table([priors[0].sra,priors[0].sdec]).write(output_folder+'Tile_'+str(tile_large)+'_'+str(order_large)+'_table.fits')
moc=moc_routines.get_fitting_region(order_small,tile_small)
for p in priors:
p.moc=moc
p.cut_down_prior()
p.prior_bkg(0.0,1)
p.get_pointing_matrix()
moc.write(output_folder+'Tile_'+str(tile_small)+'_'+str(order_small)+'_moc_.fits')
print('fitting '+ str(priors[0].nsrc)+' sources \n')
print('there are '+ str(priors[0].snpix)+' pixels')
%%time
from xidplus.stan_fit import PACS
#priors[0].upper_lim_map()
#priors[0].prior_flux_upper=(priors[0].prior_flux_upper-10.0+0.02)/np.max(priors[0].prf)
fit=PACS.all_bands(priors[0],priors[1],iter=1000)
Took 13205.7 seconds (3.6 hours)
outfile=output_folder+'Tile_'+str(tile_small)+'_'+str(order_small)
posterior=xidplus.posterior_stan(fit,priors)
xidplus.save(priors,posterior,outfile)
post_rep_map = postmaps.replicated_maps(priors, posterior, nrep=2000)
band = ['PACS_100', 'PACS_160']
for i, p in enumerate(priors):
Bayesian_Pval = postmaps.make_Bayesian_pval_maps(priors[i], post_rep_map[i])
wcs_temp = wcs.WCS(priors[i].imhdu)
ra, dec = wcs_temp.wcs_pix2world(priors[i].sx_pix, priors[i].sy_pix, 0)
kept_pixels = np.array(moc_routines.sources_in_tile([tile_small], order_small, ra, dec))
Bayesian_Pval[np.invert(kept_pixels)] = np.nan
Bayes_map = postmaps.make_fits_image(priors[i], Bayesian_Pval)
Bayes_map.writeto(outfile + '_' + band[i] + '_Bayes_Pval.fits', overwrite=True)
cat = catalogue.create_PACS_cat(posterior, priors[0], priors[1])
kept_sources = moc_routines.sources_in_tile([tile_small], order_small, priors[0].sra, priors[0].sdec)
kept_sources = np.array(kept_sources)
cat[1].data = cat[1].data[kept_sources]
cat.writeto(outfile + '_PACS_cat.fits', overwrite=True)
%%time
from xidplus.numpyro_fit import PACS
fit_numpyro=PACS.all_bands(priors)
outfile=output_folder+'Tile_'+str(tile_small)+'_'+str(order_small)+'_numpyro'
posterior_numpyro=xidplus.posterior_numpyro(fit_numpyro,priors)
xidplus.save(priors,posterior_numpyro,outfile)
post_rep_map = postmaps.replicated_maps(priors, posterior_numpyro, nrep=2000)
band = ['PACS_100', 'PACS_160']
for i, p in enumerate(priors):
Bayesian_Pval = postmaps.make_Bayesian_pval_maps(priors[i], post_rep_map[i])
wcs_temp = wcs.WCS(priors[i].imhdu)
ra, dec = wcs_temp.wcs_pix2world(priors[i].sx_pix, priors[i].sy_pix, 0)
kept_pixels = np.array(moc_routines.sources_in_tile([tile_small], order_small, ra, dec))
Bayesian_Pval[np.invert(kept_pixels)] = np.nan
Bayes_map = postmaps.make_fits_image(priors[i], Bayesian_Pval)
Bayes_map.writeto(outfile + '_' + band[i] + '_Bayes_Pval_numpyro.fits', overwrite=True)
cat = catalogue.create_PACS_cat(posterior_numpyro, priors[0], priors[1])
kept_sources = moc_routines.sources_in_tile([tile_small], order_small, priors[0].sra, priors[0].sdec)
kept_sources = np.array(kept_sources)
cat[1].data = cat[1].data[kept_sources]
cat.writeto(outfile + '_PACS_cat_numpyro.fits', overwrite=True)
moc.area_sq_deg
100.0*(20.0*np.pi*(1.0/3600.0)**2)/moc.area_sq_deg
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Work out what small tiles are in the test large tile file for PACS
Step2: You can fit with the numpyro backend.
|
2,633
|
<ASSISTANT_TASK:>
Python Code::
model.save('filename')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,634
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import pandas as pd
import mne
kiloword_data_folder = mne.datasets.kiloword.data_path()
kiloword_data_file = os.path.join(kiloword_data_folder,
'kword_metadata-epo.fif')
epochs = mne.read_epochs(kiloword_data_file)
epochs.metadata
print('Name-based selection with .loc')
print(epochs.metadata.loc[2:4])
print('\nIndex-based selection with .iloc')
print(epochs.metadata.iloc[2:4])
epochs.metadata['NumberOfLetters'] = \
epochs.metadata['NumberOfLetters'].map(int)
epochs.metadata['HighComplexity'] = epochs.metadata['VisualComplexity'] > 65
epochs.metadata.head()
print(epochs['WORD.str.startswith("dis")'])
print(epochs['Concreteness > 6 and WordFrequency < 1'])
epochs['solenoid'].plot_psd()
words = ['typhoon', 'bungalow', 'colossus', 'drudgery', 'linguist', 'solenoid']
epochs['WORD in {}'.format(words)].plot(n_channels=29)
evokeds = dict()
query = 'NumberOfLetters == {}'
for n_letters in epochs.metadata['NumberOfLetters'].unique():
evokeds[str(n_letters)] = epochs[query.format(n_letters)].average()
mne.viz.plot_compare_evokeds(evokeds, cmap=('word length', 'viridis'),
picks='Pz')
sort_order = np.argsort(epochs.metadata['WordFrequency'])
epochs.plot_image(order=sort_order, picks='Pz')
new_metadata = pd.DataFrame(data=['foo'] * len(epochs), columns=['bar'],
index=range(len(epochs)))
epochs.metadata = new_metadata
epochs.metadata.head()
epochs.metadata = None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Viewing Epochs metadata
Step2: Viewing the metadata values for a given epoch and metadata variable is done
Step3: Modifying the metadata
Step4: Selecting epochs using metadata queries
Step5: This capability uses the
Step6: Note also that traditional epochs subselection by condition name still works;
Step7: One use of the Pandas query string approach is to select specific words for
Step8: Notice that in this dataset, each "condition" (A.K.A., each word) occurs only
Step9: Metadata can also be useful for sorting the epochs in an image plot. For
Step10: Although there's no obvious relationship in this case, such analyses may be
Step11: You can remove metadata from an
|
2,635
|
<ASSISTANT_TASK:>
Python Code:
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=2, strides=None, padding='valid')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(250)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=2, strides=1, padding='valid')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(251)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=2, strides=3, padding='valid')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(252)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=2, strides=None, padding='same')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(253)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=2, strides=1, padding='same')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(254)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=2, strides=3, padding='same')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(255)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (6, 6)
L = AveragePooling1D(pool_size=3, strides=None, padding='valid')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(256)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (7, 7)
L = AveragePooling1D(pool_size=3, strides=1, padding='same')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(257)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
data_in_shape = (7, 7)
L = AveragePooling1D(pool_size=3, strides=3, padding='same')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(258)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling1D.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
print(json.dumps(DATA))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: [pooling.AveragePooling1D.1] input 6x6, pool_size=2, strides=1, padding='valid'
Step2: [pooling.AveragePooling1D.2] input 6x6, pool_size=2, strides=3, padding='valid'
Step3: [pooling.AveragePooling1D.3] input 6x6, pool_size=2, strides=None, padding='same'
Step4: [pooling.AveragePooling1D.4] input 6x6, pool_size=2, strides=1, padding='same'
Step5: [pooling.AveragePooling1D.5] input 6x6, pool_size=2, strides=3, padding='same'
Step6: [pooling.AveragePooling1D.6] input 6x6, pool_size=3, strides=None, padding='valid'
Step7: [pooling.AveragePooling1D.7] input 7x7, pool_size=3, strides=1, padding='same'
Step8: [pooling.AveragePooling1D.8] input 7x7, pool_size=3, strides=3, padding='same'
Step9: export for Keras.js tests
|
2,636
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import torch
A_log, B = load_data()
for i in range(len(A_log)):
if A_log[i] == 1:
A_log[i] = 0
else:
A_log[i] = 1
C = B[:, A_log.bool()]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,637
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from numpy import pi
import plotly.graph_objects as go
u = np.array([0.5 + 0.5*np.exp(2*k*np.pi*1j/8) for k in range(8)], dtype=np.complex)
fig = go.Figure(go.Scatter(
x = u.real,
y= u.imag,
mode='markers',
marker=dict( size=0.01, color='white'),
name='moving_pts',
))
frames=[]
for m in range(64):
w = np.exp(-1j*m*pi/8)*0.5+np.exp(1j*m*pi/8)*(u-0.5)
frames.append(go.Frame(data=[go.Scatter(
x= w.real,
y=w.imag,
marker=dict(size=15, color='white')
)],
traces=[0]))
fig.update(frames=frames);
axis = dict(visible=False,
range=[-1.1, 1.01],
autorange=False)
fig.update_layout(
title='Math is Fun',
width=600,
height=600,
showlegend=False,
xaxis=axis,
yaxis=axis,
hovermode='closest',
updatemenus=[dict(type='buttons',
showactive=False,
y=1,
x=1.2,
xanchor='right',
yanchor='top',
pad=dict(l=10),
buttons=[dict(label='Play',
method='animate',
args=[None, dict(frame=dict(duration=150, redraw=False),
transition=dict(duration=80),
fromcurrent=True,
mode='immediate'
)]
)]
)]
);
z = np.array([np.exp(2*k*np.pi*1j/16) for k in range(16)], dtype=np.complex) # the 16^th roots of unity
fig.add_shape(dict(type= 'circle',
layer= 'below',
xref= 'x',
yref='y',
fillcolor= 'rgba(10,10,10, 0.9)',
x0=-1.01,
y0= -1.01,
x1= 1.01,
y1= 1.01,
line_color= 'rgba(10,10,10, 0.9)'))
#define the shapes for the eight diameters
for k in range(8):
x0 = z[k].real
y0 = z[k].imag
x1 = z[k+8].real
y1 = z[k+8].imag
fig.add_shape(dict(type= 'path',
path= f'M{x0}, {y0} Q 0.0, 0.0 {x1}, {y1}',
line= dict(color= 'white',
width=0.75)
))
from plotly.offline import download_plotlyjs, init_notebook_mode, iplot
init_notebook_mode(connected=True)
iplot(fig)
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define data that will be updated by each animation frame
Step2: Set the plot layout
Step3: The black disk is defined as a Plotly shape, and the withe diameters as quadratic Bézier curves defined by
|
2,638
|
<ASSISTANT_TASK:>
Python Code:
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Enable inline plotting at lower left
%matplotlib inline
import pynrc
from pynrc import nrc_utils
from pynrc.nrc_utils import S, jl_poly_fit
from pynrc.pynrc_core import table_filter
pynrc.setup_logging('WARNING', verbose=False)
from astropy.table import Table
# Progress bar
from tqdm.auto import tqdm, trange
# Get stellar spectra and normalize at K-Band
# The stellar_spectrum convenience function creates a Pysynphot spectrum
bp_k = S.ObsBandpass('k')
sp_M2V = pynrc.stellar_spectrum('M2V', 18, 'vegamag', bp_k)#, catname='ck04models')
sp_F0V = pynrc.stellar_spectrum('F0V', 13, 'vegamag', bp_k)#, catname='ck04models')
# Initiate a NIRCam observation
nrc = pynrc.NIRCam(filter='F430M', wind_mode='WINDOW', xpix=160, ypix=160)
# Set some observing constraints
# Let's assume we want photometry on the primary to calibrate the M-Dwarf for direct imaging
# - Set well_frac_max=0.75
# Want a SNR~100 in the F430M filter
# - Set snr_goal=100
res = nrc.ramp_optimize(sp_M2V, sp_bright=sp_F0V, snr_goal=100, well_frac_max=0.75, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Do the same thing, but for coronagraphic mask instead
nrc = pynrc.NIRCam(filter='F430M', image_mask='MASK430R', pupil_mask='CIRCLYOT',
wind_mode='WINDOW', xpix=320, ypix=320)
# We assume that longer ramps will give us the best SNR for time
patterns = ['MEDIUM8', 'DEEP8']
res = nrc.ramp_optimize(sp_M2V, sp_bright=sp_F0V, snr_goal=100,
patterns=patterns, even_nints=True)
# Take the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Get stellar spectra and normalize at K-Band
# The stellar_spectrum convenience function creates a Pysynphot spectrum
bp_k = pynrc.bp_2mass('ks')
sp_G0V = pynrc.stellar_spectrum('G0V', 4, 'vegamag', bp_k)
# Choose a representative planet spectrum
planet = pynrc.planets_sb12(atmo='hy3s', mass=8, age=200, entropy=8, distance=17.5)
sp_pl = planet.export_pysynphot()
# Renormalize to F360M = 18.8
bp_l = pynrc.read_filter('F360M') #
sp_pl = sp_pl.renorm(18.8, 'vegamag', bp_l)
# Initiate a NIRCam observation
nrc = pynrc.NIRCam(filter='F444W', pupil_mask='CIRCLYOT', image_mask='MASK430R',
wind_mode='WINDOW', xpix=320, ypix=320)
# Set even_nints=True assume 2 roll angles
res = nrc.ramp_optimize(sp_pl, sp_bright=sp_G0V, tacq_max=3600, tacq_frac=0.05,
even_nints=True, verbose=True)
# Take the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# The SHALLOWs, DEEPs, and MEDIUMs are very similar for SNR and efficiency.
# Let's go with SHALLOW2 for more GROUPS & INTS
# MEDIUM8 would be fine as well.
nrc.update_detectors(read_mode='SHALLOW2', ngroup=10, nint=70)
keys = list(nrc.multiaccum_times.keys())
keys.sort()
for k in keys:
print("{:<10}: {: 12.5f}".format(k, nrc.multiaccum_times[k]))
# Background sensitivity (5 sigma)
sens_dict = nrc.sensitivity(nsig=5, units='vegamag', verbose=True)
# M0V star normalized to K=12 mags
bp_k = S.ObsBandpass('k')
sp_M0V = pynrc.stellar_spectrum('M0V', 12, 'vegamag', bp_k)
nrc = pynrc.NIRCam(filter='F444W', pupil_mask='GRISMR', wind_mode='STRIPE', ypix=128)
# Set a minimum of 10 integrations to be robust against cosmic rays
# Also set a minimum of 10 groups for good ramp sampling
res = nrc.ramp_optimize(sp_M0V, snr_goal=100, nint_min=10, ng_min=10, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Let's say we choose SHALLOW4, NGRP=10, NINT=10
# Update detector readout
nrc.update_detectors(read_mode='SHALLOW4', ngroup=10, nint=10)
keys = list(nrc.multiaccum_times.keys())
keys.sort()
for k in keys:
print("{:<10}: {: 12.5f}".format(k, nrc.multiaccum_times[k]))
# Print final wavelength-dependent SNR
# For spectroscopy, the snr_goal is the median over the bandpass
snr_dict = nrc.sensitivity(sp=sp_M0V, forwardSNR=True, units='mJy', verbose=True)
# Ideal spectrum and wavelength solution
wspec, imspec = nrc.calc_psf_from_coeff(sp=sp_M0V, return_hdul=False, return_oversample=False)
# Resize to detector window
nx = nrc.det_info['xpix']
ny = nrc.det_info['ypix']
# Shrink/expand nx (fill value of 0)
# Then shrink to a size excluding wspec=0
# This assumes simulated spectrum is centered
imspec = nrc_utils.pad_or_cut_to_size(imspec, (ny,nx))
wspec = nrc_utils.pad_or_cut_to_size(wspec, nx)
# Add simple zodiacal background
im_slope = imspec + nrc.bg_zodi()
# Create a series of ramp integrations based on the current NIRCam settings
# Output is a single HDUList with 10 INTs
# Ignore detector non-linearity to return output in e-/sec
kwargs = {
'apply_nonlinearity' : False,
'apply_flats' : False,
}
res = nrc.simulate_level1b('M0V Target', 0, 0, '2023-01-01', '12:00:00',
im_slope=im_slope, return_hdul=True, **kwargs)
res.info()
tvals = nrc.Detector.times_group_avg
header = res['PRIMARY'].header
data_all = res['SCI'].data
slope_list = []
for data in tqdm(data_all):
ref = pynrc.ref_pixels.NRC_refs(data, header, DMS=True, do_all=False)
ref.calc_avg_amps()
ref.correct_amp_refs()
# Linear fit to determine slope image
cf = jl_poly_fit(tvals, ref.data, deg=1)
slope_list.append(cf[1])
# Create a master averaged slope image
slopes_all = np.array(slope_list)
slope_sim = slopes_all.mean(axis=0) * nrc.Detector.gain
fig, ax = plt.subplots(1,1, figsize=(12,3))
ax.imshow(slope_sim, vmin=0, vmax=10)
fig.tight_layout()
ind = wspec>0
# Estimate background emission and subtract from slope_sim
bg = np.median(slope_sim[:,~ind])
slope_sim -= bg
ind = wspec>0
plt.plot(wspec[ind], slope_sim[63,ind])
# Extract 2 spectral x 5 spatial pixels
# First, cut out the central 5 pixels
wspec_sub = wspec[ind]
sh_new = (5, len(wspec_sub))
slope_sub = nrc_utils.pad_or_cut_to_size(slope_sim, sh_new)
slope_sub_ideal = nrc_utils.pad_or_cut_to_size(imspec, sh_new)
# Sum along the spatial axis
spec = slope_sub.sum(axis=0)
spec_ideal = slope_sub_ideal.sum(axis=0)
spec_ideal_rebin = nrc_utils.frebin(spec_ideal, scale=0.5, total=False)
# Build a quick RSRF from extracted ideal spectral slope
sp_M0V.convert('mjy')
rsrf = spec_ideal / sp_M0V.sample(wspec_sub*1e4)
# Rebin along spectral direction
wspec_rebin = nrc_utils.frebin(wspec_sub, scale=0.5, total=False)
spec_rebin_cal = nrc_utils.frebin(spec/rsrf, scale=0.5, total=False)
# Expected noise per extraction element
snr_interp = np.interp(wspec_rebin, snr_dict['wave'], snr_dict['snr'])
_spec_rebin = spec_ideal_rebin / snr_interp
_spec_rebin_cal = _spec_rebin / nrc_utils.frebin(rsrf, scale=0.5, total=False)
fig, ax = plt.subplots(1,1, figsize=(12,8))
ax.plot(sp_M0V.wave/1e4, sp_M0V.flux, label='Input Spectrum')
ax.plot(wspec_rebin, spec_rebin_cal, alpha=0.7, label='Extracted Observation')
ax.errorbar(wspec_rebin, spec_rebin_cal, yerr=_spec_rebin_cal, zorder=3,
fmt='none', label='Expected Error Bars', alpha=0.7, color='C2')
ax.set_ylim([0,10])
ax.set_xlim([3.7,5.1])
ax.set_xlabel('Wavelength ($\mu m$)')
ax.set_ylabel('Flux (mJy)')
ax.set_title('Simulated Spectrum')
ax.legend(loc='upper right');
nrc = pynrc.NIRCam('F322W2', pupil_mask='GRISM0', wind_mode='STRIPE', ypix=64)
# K6V star at K=8.4 mags
bp_k = S.ObsBandpass('k')
sp_K6V = pynrc.stellar_spectrum('K6V', 8.4, 'vegamag', bp_k)
# Constraints
well = 0.5 # Keep well below 50% full
tacq = 2.1*3600. # 2.1 hour transit duration
ng_max = 30 # Transit spectroscopy allows for up to 30 groups per integrations
nint_max = int(1e6) # Effectively no limit on number of integrations
# Let's bin the spectrum to R~100
# dw_bin is a passable parameter for specifiying spectral bin sizes
R = 100
dw_bin = (nrc.bandpass.avgwave() / 10000) / R
res = nrc.ramp_optimize(sp_K6V, tacq_max=tacq, nint_max=nint_max,
ng_min=10, ng_max=ng_max, well_frac_max=well,
dw_bin=dw_bin, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Even though BRIGHT1 has a slight efficiency preference over RAPID
# and BRIGHT2, we decide to choose RAPID, because we are convinced
# that saving all data (and no coadding) is a better option.
# If APT informs you that the data rates or total data shorage is
# an issue, you can select one of the other options.
# Update to RAPID, ngroup=30, nint=700 and plot PPM
nrc.update_detectors(read_mode='RAPID', ngroup=30, nint=700)
snr_dict = nrc.sensitivity(sp=sp_K6V, dw_bin=dw_bin, forwardSNR=True, units='Jy')
wave = np.array(snr_dict['wave'])
snr = np.array(snr_dict['snr'])
# Let assume bg subtraction of something with similar noise
snr /= np.sqrt(2.)
ppm = 1e6 / snr
# NOTE: We have up until now neglected to include a "noise floor"
# which represents the expected minimum achievable ppm from
# unknown systematics. To first order, this can be added in
# quadrature to the calculated PPM.
noise_floor = 30 # in ppm
ppm_floor = np.sqrt(ppm**2 + noise_floor**2)
plt.plot(wave, ppm, marker='o', label='Calculated PPM')
plt.plot(wave, ppm_floor, marker='o', label='PPM + Noise Floor')
plt.xlabel('Wavelength ($\mu m$)')
plt.ylabel('Noise Limit (PPM)')
plt.xlim([2.4,4.1])
plt.ylim([20,100])
plt.legend()
# Detection bandpass is F200W
nrc = pynrc.NIRCam(filter='F200W')
# Flat spectrum (in photlam) with ABMag = 25 in the NIRCam bandpass
sp = pynrc.stellar_spectrum('flat', 25, 'abmag', nrc.bandpass)
res = nrc.ramp_optimize(sp, is_extended=True, tacq_max=10000, tacq_frac=0.05, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# MEDIUM8 10 10 looks like a good option
nrc.update_detectors(read_mode='MEDIUM8', ngroup=10, nint=10, verbose=True)
# Calculate flux/mag for various nsigma detection limits
tbl = Table(names=('Sigma', 'Point (nJy)', 'Extended (nJy/asec^2)',
'Point (AB Mag)', 'Extended (AB Mag/asec^2)'))
tbl['Sigma'].format = '.0f'
for k in tbl.keys()[1:]:
tbl[k].format = '.2f'
for sig in [1,3,5,10]:
snr_dict1 = nrc.sensitivity(nsig=sig, units='nJy', verbose=False)
snr_dict2 = nrc.sensitivity(nsig=sig, units='abmag', verbose=False)
tbl.add_row([sig, snr_dict1[0]['sensitivity'], snr_dict1[1]['sensitivity'],
snr_dict2[0]['sensitivity'], snr_dict2[1]['sensitivity']])
tbl
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example 1
Step2: RESULTS
Step3: Example 3
Step4: Mock observed spectrum
Step5: Example 4
Step6: Example 5
|
2,639
|
<ASSISTANT_TASK:>
Python Code:
def round_down(n):
s = str(n)
if n <= 20:
return n
elif n < 100:
return int(s[0] + '0'), int(s[1])
elif n<1000:
return int(s[0] + '00'),int(s[1]),int(s[2])
assert round_down(5) == 5
assert round_down(55) == (50,5)
assert round_down(222) == (200,2,2)
def number_to_words(n):
Given a number n between 1-1000 inclusive return a list of words for the number.
lst = []
dic = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'forty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
100: 'one hundred',
200: 'two hundred',
300: 'three hundred',
400: 'four hundred',
500: 'five hundred',
600: 'six hundred',
700: 'seven hundred',
800: 'eight hundred',
900: 'nine hundred'}
for i in range(1,n+1):
if i <= 20:
for entry in dic:
if i == entry:
lst.append(dic[i])
elif i < 100:
first,second = round_down(i)
for entry in dic:
if first == entry:
if second == 0:
lst.append(dic[first])
else:
lst.append(dic[first] + '-' + dic[second])
elif i <1000:
first,second,third = round_down(i)
for entry in dic:
if first == entry:
if second == 0 and third == 0:
lst.append(dic[first])
elif second == 0:
lst.append(dic[first] + ' and ' + dic[third])
elif second == 1:
#For handling the teen case
lst.append(dic[first] + ' and ' + dic[int(str(second)+str(third))])
elif third == 0:
#Here I multiply by 10 because round_down removes the 0 for my second digit
lst.append(dic[first] + ' and ' + dic[second*10])
else:
lst.append(dic[first] + ' and ' + dic[second*10] + '-' + dic[third])
elif i == 1000:
lst.append('one thousand')
return lst
number_to_words(5)
assert len(number_to_words(5))==5
assert len(number_to_words(900))==900
assert number_to_words(50)[-1]=='fifty'
assert True # use this for grading the number_to_words tests.
def count_letters(n):
Count the number of letters used to write out the words for 1-n inclusive.
lst2 = []
for entry in number_to_words(n):
count = 0
for char in entry:
if char != ' ' and char != '-':
count = count + 1
lst2.append(count)
return lst2
assert count_letters(1) == [3]
assert len(count_letters(342)) == 342
assert count_letters(5) == [3,3,5,4,4]
assert True # use this for grading the count_letters tests.
print(sum(count_letters(1000)))
print(sum(count_letters(998)))
assert True # use this for gradig the answer to the original question.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Project Euler
Step2: Now write a set of assert tests for your number_to_words function that verifies that it is working as expected.
Step4: Now define a count_letters(n) that returns the number of letters used to write out the words for all of the the numbers 1 to n inclusive.
Step5: Now write a set of assert tests for your count_letters function that verifies that it is working as expected.
Step6: Finally used your count_letters function to solve the original question.
|
2,640
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
# Python List
L = [1, 2, 3]
A = np.array([1, 2, 3])
# You can operate A with mathmatically operation. L cannot.
print(2*A)
print(A**2)
print(np.sqrt(A))
print(np.log(A))
a = np.array([1, 2])
b = np.array([3, 4])
# dot product in different ways:
np.dot(a, b) # 11
np.inner(a, b) # 11. dot product is also inner product
a.dot(b) # 11
b.dot(a) # 11
(a*b).sum() # 11
# you can use python looping to achieve the same, but it will be extremely slow when data is huge.
dot = 0
for i, j in zip(a, b):
dot += i*j
print(dot) # 11
# Note you can also use np.matrix but np.array is recommanded officially.
M = np.array([[1, 2], [3, 4]])
# extract element
M[0][0] # 1 ; this is the same as python list
M[0, 0] # 1
# matrix transport
M.T
# get shap
M.shap
# Matrix product. same as dot product
M.dot(M)
np.inner(M, M)
# inverse matrix
np.linalg.inv(M)
# determination
np.linalg.det(M)
# diagonal element
np.diag(M) # [1, 4]
# note this will rerurn diagonal matrix
np.diag([1, 4]) # [[1, 0], [0, 4]]
# trace
np.diag(M).sum()
np.trace(M)
# product various 10x10 matrix
# all zero
Z = np.zeros((10, 10))
# all one
O = np.ones((10, 10))
# random from uniform distribution
R = np.random.random((10, 10))
# random from normal distribution(0, 1)
# Note randn take each dimension as individual argument, others use turple
N = np.random.randn(10, 10)
print(N.mean())
print(N.var())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Array vs List
Step2: Dot Product
Step3: Matrix
|
2,641
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
!pip install h5py pyyaml
from __future__ import absolute_import, division, print_function
import os
import tensorflow as tf
from tensorflow import keras
tf.__version__
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
train_labels = train_labels[:1000]
test_labels = test_labels[:1000]
train_images = train_images[:1000].reshape(-1, 28 * 28) / 255.0
test_images = test_images[:1000].reshape(-1, 28 * 28) / 255.0
# Returns a short sequential model
def create_model():
model = tf.keras.models.Sequential([
keras.layers.Dense(512, activation=tf.nn.relu, input_shape=(784,)),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['accuracy'])
return model
# Create a basic model instance
model = create_model()
model.summary()
checkpoint_path = "training_1/cp.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
# Create checkpoint callback
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
model = create_model()
model.fit(train_images, train_labels, epochs = 10,
validation_data = (test_images,test_labels),
callbacks = [cp_callback]) # pass callback to training
!ls {checkpoint_dir}
model = create_model()
loss, acc = model.evaluate(test_images, test_labels)
print("Untrained model, accuracy: {:5.2f}%".format(100*acc))
model.load_weights(checkpoint_path)
loss,acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# include the epoch in the file name. (uses `str.format`)
checkpoint_path = "training_2/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, verbose=1, save_weights_only=True,
# Save weights, every 5-epochs.
period=5)
model = create_model()
model.fit(train_images, train_labels,
epochs = 50, callbacks = [cp_callback],
validation_data = (test_images,test_labels),
verbose=0)
import pathlib
# Sort the checkpoints by modification time.
checkpoints = pathlib.Path(checkpoint_dir).glob("*.index")
checkpoints = sorted(checkpoints, key=lambda cp:cp.stat().st_mtime)
checkpoints = [cp.with_suffix('') for cp in checkpoints]
latest = str(checkpoints[-1])
checkpoints
model = create_model()
model.load_weights(latest)
loss, acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
# Save the weights
model.save_weights('./checkpoints/my_checkpoint')
# Restore the weights
model = create_model()
model.load_weights('./checkpoints/my_checkpoint')
loss,acc = model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
model = create_model()
model.fit(train_images, train_labels, epochs=5)
# Save entire model to a HDF5 file
model.save('my_model.h5')
# Recreate the exact same model, including weights and optimizer.
new_model = keras.models.load_model('my_model.h5')
new_model.summary()
loss, acc = new_model.evaluate(test_images, test_labels)
print("Restored model, accuracy: {:5.2f}%".format(100*acc))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Save and restore models
Step2: Get an example dataset
Step3: Define a model
Step4: Save checkpoints during training
Step5: This creates a single collection of TensorFlow checkpoint files that are updated at the end of each epoch
Step6: Create a new, untrained model. When restoring a model from only weights, you must have a model with the same architecture as the original model. Since it's the same model architecture, we can share weights despite that it's a different instance of the model.
Step7: Then load the weights from the checkpoint, and re-evaluate
Step8: Checkpoint callback options
Step9: Now, have a look at the resulting checkpoints (sorting by modification date)
Step10: Note
Step11: What are these files?
Step12: Save the entire model
Step13: Now recreate the model from that file
Step14: Check its accuracy
|
2,642
|
<ASSISTANT_TASK:>
Python Code:
from PIL import Image, ImageOps
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
%matplotlib inline
test_image = 'harvard_2008_08_24_120140.jpg'
test_mask = 'harvard_DB_0001_01.tif'
# read in mask image and convert to nparray
mask_img = Image.open(test_mask)
roimask = np.asarray(mask_img, dtype=np.bool8)
# read in canopy image
img = Image.open(test_image)
img.load()
# create an image with alpha mask
rev_mask_img = ImageOps.invert(mask_img)
alpha_data = rev_mask_img.getdata()
rev_mask_img.putdata(alpha_data,.4,150)
imga = img.copy()
fig = plt.figure(figsize=[8,8])
imga.putalpha(rev_mask_img)
plt.imshow(imga)
def get_roi_stats(im, roimask):
Function to return a collection of stats for DN values for an image / mask pair.
# split into bands
(im_r, im_g, im_b) = im.split()
# create numpy arrays with bands
r_array = np.asarray(im_r, dtype=np.int16)
g_array = np.asarray(im_g, dtype=np.int16)
b_array = np.asarray(im_b, dtype=np.int16)
# try applying mask to red image ... if mask and image don't
# have same size this will raise an exception.
try:
r_ma = np.ma.array(r_array,mask=roimask)
except:
errstr = "Error applying mask to image file.\n"
sys.stderr.write(errstr)
return None
# make masked arrays for G,B
g_ma = np.ma.array(g_array,mask=roimask)
b_ma = np.ma.array(b_array,mask=roimask)
# find mean, std
r_vals = r_ma.compressed()
r_mean = r_vals.mean()
r_diff = np.float64(r_vals) - r_mean
r_std = np.sqrt(np.dot(r_diff, r_diff)/r_vals.size)
g_vals = g_ma.compressed()
g_mean = g_vals.mean()
g_diff = np.float64(g_vals) - g_mean
g_std = np.sqrt(np.dot(g_diff, g_diff)/g_vals.size)
b_vals = b_ma.compressed()
b_mean = b_vals.mean()
b_diff = np.float64(b_vals) - b_mean
b_std = np.sqrt(np.dot(b_diff, b_diff)/b_vals.size)
# calculate percentiles for each array
r_pcts = np.percentile(r_vals, (5., 10., 25., 50., 75., 90., 95.))
g_pcts = np.percentile(g_vals, (5., 10., 25., 50., 75., 90., 95.))
b_pcts = np.percentile(b_vals, (5., 10., 25., 50., 75., 90., 95.))
# calculate covariance
rg_cov = np.dot(r_diff, g_diff)/r_diff.size
gb_cov = np.dot(g_diff, b_diff)/g_diff.size
br_cov = np.dot(b_diff, r_diff)/b_diff.size
# correlation coefficients
RG_cor = rg_cov/(r_std * g_std)
GB_cor = gb_cov/(g_std * b_std)
BR_cor = br_cov/(b_std * r_std)
# return list of values
return [{'mean': r_mean,
'stdev': r_std,
'percentiles': r_pcts},
{'mean': g_mean,
'stdev': g_std,
'percentiles':g_pcts},
{'mean': b_mean,
'stdev': b_std,
'percentiles': b_pcts},
RG_cor, GB_cor, BR_cor]
# call stats function
stats = get_roi_stats(img, roimask)
stats
# split into bands
(im_r, im_g, im_b) = img.split()
# create numpy arrays with bands
r_array = np.asarray(im_r, dtype=np.float64)
g_array = np.asarray(im_g, dtype=np.float64)
b_array = np.asarray(im_b, dtype=np.float64)
brt_array = r_array + g_array + b_array
# apply mask to brightness image
brt_ma = np.ma.array(brt_array,mask=roimask)
# make masked arrays for R,G,B
g_ma = np.ma.array(g_array,mask=roimask)
r_ma = np.ma.array(r_array,mask=roimask)
b_ma = np.ma.array(b_array,mask=roimask)
# first let's get percentiles for R, G and B
nvalues = g_ma.size
r_values = r_ma.compressed()
g_values = g_ma.compressed()
b_values = b_ma.compressed()
print "Number of pixels in image: {0}".format(nvalues)
print "Number of masked pixels: {0}".format(len(g_values))
# calculate mean, median and std-dev for R, G, B
r_mean = np.mean(r_values)
r_median = np.median(r_values)
r_var = np.var(r_values)
r_std = np.std(r_values)
g_mean = np.mean(g_values)
g_median = np.median(g_values)
g_var = np.var(g_values)
g_std = np.std(g_values)
b_mean = np.mean(b_values)
b_median = np.median(b_values)
b_var = np.var(b_values)
b_std = np.std(b_values)
# calculate percentiles for each array
r_percentiles = np.percentile(r_values, (5., 10., 25., 50., 75., 90., 95.))
g_percentiles = np.percentile(g_values, (5., 10., 25., 50., 75., 90., 95.))
b_percentiles = np.percentile(b_values, (5., 10., 25., 50., 75., 90., 95.))
# histogram the value for R, G, B
fig = plt.figure(figsize=[20,6])
r_95 = r_percentiles[-1]
r_low = r_mean - r_std
r_high = r_mean + r_std
plt.subplot(1,3,1)
plt.hist(r_values,20,color='r')
plt.plot([r_mean, r_mean], [0,60000], 'k--', lw=2)
plt.plot([r_low, r_low], [0,60000], 'k:', lw=2)
plt.plot([r_high, r_high], [0,60000], 'k:', lw=2 )
plt.text(150,50000,'mean: {0:.2f}'.format(r_mean), size=14)
plt.text(150,47000,'std. dev.: {0:.2f}'.format(r_std), size=14)
plt.xlim([0,255])
g_95 = g_percentiles[-1]
g_low = g_mean - g_std
g_high = g_mean + g_std
plt.subplot(1,3,2)
plt.hist(g_values,20,color='g')
plt.plot([g_mean, g_mean], [0,60000], 'k--', lw=2)
plt.plot([g_low, g_low], [0,60000], 'k:', lw=2)
plt.plot([g_high, g_high], [0,60000], 'k:', lw=2 )
plt.text(150,50000,'mean: {0:.2f}'.format(g_mean), size=14)
plt.text(150,47000,'std. dev.: {0:.2f}'.format(g_std), size=14)
plt.xlim([0,255])
b_95 = b_percentiles[-1]
b_low = b_mean - b_std
b_high = b_mean + b_std
plt.subplot(1,3,3)
plt.hist(b_values,20,color='b')
plt.plot([b_mean, b_mean], [0,70000], 'k--', lw=2)
plt.plot([b_low, b_low], [0,70000], 'k:', lw=2)
plt.plot([b_high, b_high], [0,70000], 'k:', lw=2 )
plt.text(150,59000,'mean: {0:.2f}'.format(b_mean), size=14)
plt.text(150,55000,'std. dev.: {0:.2f}'.format(b_std), size=14)
plt.xlim([0,255])
print r_mean, r_std, r_median, r_var, r_percentiles
print g_mean, g_std, g_median, g_var, g_percentiles
print b_mean, b_std, b_median, b_var, b_percentiles
# calculate the covariance matrix between pairs (R,G), (R,B), (G,B)
rg_cov = np.cov(r_values, g_values)
rb_cov = np.cov(r_values, b_values)
gb_cov = np.cov(g_values, b_values)
fig = plt.figure(figsize=[16,4])
plt.subplot(1,3,1)
plt.scatter(r_values,g_values,s=2,color='k',alpha=.2,edgecolor='none')
plt.xlim([0,255])
plt.ylim([0,255])
plt.xlabel('R')
plt.ylabel('G')
plt.subplot(1,3,2)
plt.scatter(r_values,b_values,s=2,color='k',alpha=.2,edgecolor='none')
plt.xlim([0,255])
plt.ylim([0,255])
plt.xlabel('R')
plt.ylabel('B')
plt.subplot(1,3,3)
plt.scatter(g_values,b_values,s=2,color='k',alpha=.2,edgecolor='none')
plt.xlim([0,255])
plt.ylim([0,255])
plt.xlabel('G')
plt.ylabel('B')
RG_cov = rg_cov[0,1]
RB_cov = rb_cov[0,1]
GB_cov = gb_cov[0,1]
RG_cor = RG_cov/(r_std * g_std)
RB_cor = RB_cov/(r_std * b_std)
GB_cor = GB_cov/(g_std * b_std)
print 'rg_cov: {0} rb_cov: {1} gb_cov: {2}'.format(RG_cov, RB_cov, GB_cov)
print 'rg_cor: {0} rb_cor: {1} gb_cor: {2}'.format(RG_cor, RB_cor, GB_cor)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: The quantity we use for phenological studies is the "green chromatic coordinate" or "gcc" value. This is defined as
Step3: To get a better idea of what these numbers mean let's repeat the above without using a function. That way we can examine some of the intermediate steps. In particular it would be nice to examine the distribution of DN values over the ROI for each of the color planes.
Step4: Basically, all I've done so far is split the image into R,G and B components and created a numpy "masked array". For simple stats, like mean, used in the previous calculations we could just use masked array functions. Here I'm going to grab the un-masked values and put them in 1-d arrays. This also makes it easier to do the plotting.
Step5: Visualizing the correlation coefficients is probably best done by showing a scatterplot of DN values for the pixels in the ROI.
|
2,643
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
import pickle
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
from scipy.spatial.distance import pdist
from scipy.stats import gaussian_kde
pythonpath_for_regnmf = os.path.realpath(os.path.join(os.path.pardir, os.path.pardir))
sys.path.append(pythonpath_for_regnmf)
from regnmf import ImageAnalysisComponents as ia
from regnmf import datamaker
from regnmf.regularizedHALS import convex_cone
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
%matplotlib inline
param = {'act_time': [0.01, 0.1, 0.3, 0.8, 1.0, 1.0],
'cov': 0.3,
'latents': 40,
'mean': 0.2,
'no_samples': 50,
'noisevar': 0.2,
'shape': (50, 50),
'width':0.1,
'var': 0.08}
anal_param = {'sparse_param': 0.5,
'factors': 80,
'smooth_param': 2,
'init':'convex',
'sparse_fct':'global_sparse',
'verbose':0
}
def violin_plot(ax, data, color='b'):
'''
create violin plots on an axis
'''
w = 0.4
for p, d in enumerate(data):
k = gaussian_kde(d) #calculates the kernel density
m = k.dataset.min() #lower bound of violin
M = k.dataset.max() #upper bound of violin
x = np.arange(m,M,(M-m)/100.) # support for violin
v = k.evaluate(x) #violin profile (density curve)
scale = w/v.max()
v = v*scale #scaling the violin to the available space
ax.fill_betweenx(x,p,v+p, facecolor=color, edgecolor = color, alpha=1)
ax.fill_betweenx(x,p,-v+p, facecolor=color, edgecolor = color, alpha=1)
#median
perc = np.percentile(d, [25,50,75])
perc_width = k.evaluate(perc)*scale
l1, = ax.plot([p-perc_width[1],p+perc_width[1]],[perc[1], perc[1]], 'k', lw=0.5)
l2, = ax.plot([p-perc_width[0],p+perc_width[0]],[perc[0], perc[0]], '0.25', lw=0.5)
ax.plot([p-perc_width[2],p+perc_width[2]],[perc[2], perc[2]], '0.25', lw=0.5)
ax.legend([l1, l2], ['median', 'quartiles'], prop={'size':fontsize}, numpoints=1,
loc = 'lower right', labelspacing=0.1, handletextpad=0.5, bbox_to_anchor = (1, 0.9),
handlelength=1, borderaxespad=-0.5, frameon=False)
def cor(time1, time2, num_sources):
'''calculate crosscorrelation between sources and latents'''
return np.corrcoef(np.vstack((time1, time2)))[num_sources:, :num_sources]
num_datasets = 5 #number of independent datasets
mse = defaultdict(list)
cor = defaultdict(list)
for dummy in range(num_datasets):
compare = {}
# create data
tempdata = datamaker.Dataset(param)
# plain NMF
nnma = ia.NNMF(maxcount=50, num_components=anal_param['factors'])
anal_param.update({'init':'convex'})
nnma.param.update(anal_param)
compare['nmf'] = nnma(ia.TimeSeries(tempdata.observed, shape=param['shape']))
# plain sICA
sica = ia.sICA(num_components=anal_param['factors'])
compare['sica'] = sica(ia.TimeSeries(tempdata.observed, shape=param['shape']))
# NMF on sICA reduced data
reduced_data = np.dot(compare['sica']._series, compare['sica'].base._series)
compare['sicareduced_nmf'] = nnma(ia.TimeSeries(reduced_data, shape=param['shape']))
# sICA initialized NMF
nnma = ia.NNMF(maxcount=50, num_components=anal_param['factors'])
A = compare['sica']._series.copy()
X = compare['sica'].base._series.copy()
A[A<0]=0
X[X<0]=0
anal_param.update({'init':{'A':A, 'X':X}})
nnma.param.update(anal_param)
compare['sicainit_nmf'] = nnma(ia.TimeSeries(tempdata.observed, shape=param['shape']))
# NMF initialized sICA
compare['nmfinit_sica'] = compare['nmf'].copy()
sica = ia.sICA(num_components=anal_param['factors'])
out_temp = sica(compare['nmfinit_sica'].base.copy())
compare['nmfinit_sica'].base = out_temp.base
compare['nmfinit_sica']._series = np.dot(compare['nmfinit_sica']._series, out_temp._series)
# sICA on NMF reduced data
nmf_reduced = np.dot(compare['nmf']._series, compare['nmf'].base._series)
sica = ia.sICA(num_components=anal_param['factors'])
compare['nmfreduced_sica'] = sica(ia.TimeSeries(nmf_reduced, shape=param['shape']))
# sICA on convex cone
compare['ccinit_sica'] = compare['nmf'].copy()
init = convex_cone(tempdata.observed, anal_param['factors'])
out_temp = sica(ia.TimeSeries(np.array(init['base']), shape=param['shape']))
compare['ccinit_sica'].base = out_temp.base
compare['ccinit_sica']._series = np.dot(np.array(init['timecourses']).T, out_temp._series)
#collect performance measures
for k in compare:
cor[k] += list(tempdata.cor2source(compare[k])[1])
mse[k] += list(tempdata.mse2source(compare[k], local=0.05))
fig = plt.figure(figsize=(15, 6))
fontsize = 10
ax = fig.add_axes([0.1,0.2,0.35,0.75])
keys = ['nmf', 'sicainit_nmf', 'sicareduced_nmf']
data = [1-np.array(mse[i]) for i in keys]
violin_plot(ax, data, '0.5')
ax.set_xticks(range(len(keys)))
ax.set_xticklabels(['NMF', 'sICA init\nNMF', 'sICA reconst.\nNMF'],
rotation='0', ha='center', size=fontsize)
ax.set_ylabel('SR', size=fontsize)
ax.set_ylim([0,0.9])
ax.set_yticks([0,0.4,0.8])
ax.yaxis.set_tick_params(labelsize=fontsize)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_tick_params(size=0)
for pos in ['right', 'bottom', 'top']:
ax.spines[pos].set_color('none')
ax = fig.add_axes([0.6,0.2,0.35,0.75])
keys = ['sica', 'nmfinit_sica', 'nmfreduced_sica']
data = [1-np.array(mse[i]) for i in keys]
violin_plot(ax, data, '0.5')
ax.set_xticks(range(len(keys)))
ax.set_xticklabels(['sICA', 'NMF init\nsICA', 'NMF reconst.\nsICA'],
rotation='0', ha='center', size=fontsize)
ax.set_ylabel('SR', size=fontsize)
ax.set_ylim([0,0.9])
ax.set_yticks([0,0.4,0.8])
ax.yaxis.set_tick_params(labelsize=fontsize)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_tick_params(size=0)
for pos in ['right', 'bottom', 'top']:
ax.spines[pos].set_color('none')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameter for creation of surrogate Data
Step2: Parameter for Matrix Factorization
Step3: Helper Functions
Step4: Perform chained matrix factorization
Step5: Violinplots of Source Recovery (SR)
|
2,644
|
<ASSISTANT_TASK:>
Python Code:
X, y = make_circles(n_samples=1000, noise=0.1)
# 75/25 train/test split
orig_X_train, orig_X_test, orig_y_train, orig_y_test = train_test_split(X, y, test_size=0.25)
# Transform data into tensors.
X = torch.tensor(orig_X_train, dtype=torch.float)
y = torch.tensor(orig_y_train, dtype=torch.long)
import yellowbrick.contrib.scatter
visualizer = yellowbrick.contrib.scatter.ScatterVisualizer()
visualizer.fit(orig_X_train, orig_y_train)
visualizer.poof()
from torch import nn
# Sequential model allows easy model experimentation
model = nn.Sequential(
nn.Linear(2, 16), # input dim 2. 16 neurons in first layer.
nn.ReLU(), # ReLU activation
#nn.Dropout(p=0.2), # Optional dropout
nn.Linear(16, 4), # Linear from 16 neurons down to 2
nn.ReLU(),
nn.Linear(4,2),
nn.Softmax(dim=1) # Softmax activation to normalize output weights
)
# Loss function. CrossEntropy is valid for classification problems.
loss_fn = nn.CrossEntropyLoss()
# Optimizer. Many to choose from.
optimizer = torch.optim.Adam(params=model.parameters())
# Optimizer iterations
for i in range(1000):
# Clear the gradient at the start of each step.
optimizer.zero_grad()
# Compute the forward pass
output = model(X)
# Compute the loss
loss = loss_fn(output, y)
# Backprop to compute the gradients
loss.backward()
# Update the model parameters
optimizer.step()
print(loss.item())
%matplotlib inline
# Make a grid
ns = 25
xx, yy = np.meshgrid(np.linspace(-1.5, 1.5, 2*ns), np.linspace(-1.5, 1.5, 2*ns))
# Shape of each is [ns, ns]
# Combine into a single tensor
G = torch.tensor(np.array([xx, yy]), dtype=torch.float)
# Shape is [2, ns, ns]
# reshape to be convenient to work with
G = G.reshape((2, G.shape[1]*G.shape[2])).transpose(0,1)
# Now a tensor of shape [ns*ns, 2]. Sequence of x,y coordinate pairs
result = model(G).detach()
# For each row (sample) in G, get the prediction under the model
# The variables inside the model are tracked for gradients.
# Call "detach()" to stop tracking gradient for further computations.
# Result is shape [ns*ns, 2] since model takes 2-dim vectors and generates a 2-dim prediction
c0 = result[:,0]
# weights assigned to class 0
c1 = result[:,1]
# weights assigned to class 1
plt.hexbin(G[:,0].detach().numpy(), G[:,1].detach().numpy(), c0.numpy(), gridsize=ns, cmap='viridis')
# Gridsize is half that of the meshgrid for clean rendering.
plt.title("Class 0 Activation")
plt.axis('equal')
plt.show()
plt.hexbin(G[:,0].detach().numpy(), G[:,1].detach().numpy(), c1.numpy(), gridsize=ns, cmap='viridis')
plt.title("Class 1 Activation")
plt.axis('equal')
plt.show()
from sklearn.base import BaseEstimator
class NetWrapper(BaseEstimator):
Wrap our model as a BaseEstimator
_estimator_type = "classifier"
# Tell yellowbrick this is a classifier
def __init__(self, model):
# save a reference to the model
self.model = model
self.classes_ = None
def fit(self, X, y):
# save the list of classes
self.classes_ = list(set(i for i in y))
def predict_proba(self, X):
Define predict_proba or decision_function
Compute predictions from model.
Transform input into a Tensor, compute the prediction,
transform the prediction back into a numpy array
v = model(torch.tensor(X, dtype=torch.float)).detach().numpy()
print("v:", v.shape)
return v
wrapped_net = NetWrapper(model)
# Wrap the model
# Use ROCAUC as per usual
ROCAUC = yb.classifier.ROCAUC(wrapped_net)
ROCAUC.fit(orig_X_train, orig_y_train)
print(orig_X_test.shape, orig_y_test.shape)
print(orig_X_train.shape, orig_y_train.shape)
ROCAUC.score(orig_X_test, orig_y_test)
ROCAUC.poof()
# weight: a * (x-c)^T(x-c), a is a real number
class Circle(torch.nn.Module):
Extend torch.nn.Module for a new "layer" in a neural network
def __init__(self, k, data):
k is the number of neurons to use
data is passed in to use as samples to initialize centers
super().__init__()
# k is not a Parameter, so there is no gradient and this is not updated in optimization
self.k = int(k)
# Parameters always have gradients computed
self.alpha = torch.nn.Parameter(torch.normal(mean=torch.zeros(k), std=torch.ones(k)*0.5).unsqueeze(1))
self.C = torch.nn.Parameter(data[np.random.choice(data.shape[0], k, replace=False), :].unsqueeze(1))
def forward(self, x):
diff = (x - self.C)
# compact way of writing inner products, outer products, etc.
tmp = torch.einsum('kij,kij->ki', [diff, diff])
return (self.alpha * torch.einsum('kij,kij->ki', [diff, diff])).transpose(0,1)
from tqdm import tqdm
loss_fn = torch.nn.CrossEntropyLoss()
model = nn.Sequential(
Circle(16, X),
nn.ReLU(),
nn.Linear(16,4),
nn.ReLU(),
nn.Linear(4,2),
nn.Softmax(dim=1)
)
optimizer = torch.optim.Adam(params=model.parameters())
for i in tqdm(range(1000)):
optimizer.zero_grad()
output = model(X)
loss = loss_fn(output, y)
loss.backward()
optimizer.step()
%matplotlib inline
ns = 25
xx, yy = np.meshgrid(np.linspace(-1.5, 1.5, 2*ns), np.linspace(-1.5, 1.5, 2*ns))
G = torch.tensor(np.array([xx, yy]), dtype=torch.float)
# reshape...
G = G.reshape((2, G.shape[1]*G.shape[2])).transpose(0,1)
result = model(G).detach()
c0 = result[:,0]
c1 = result[:,1]
plt.hexbin(G[:,0].detach().numpy(), G[:,1].detach().numpy(), c0.numpy(), gridsize=ns, cmap='viridis')
plt.title("Class 0 Activation")
plt.axis('equal')
plt.show()
plt.hexbin(G[:,0].detach().numpy(), G[:,1].detach().numpy(), c1.numpy(), gridsize=ns, cmap='viridis')
plt.title("Class 1 Activation")
plt.axis('equal')
plt.show()
wrapped_net = NetWrapper(model)
ROCAUC = yb.classifier.ROCAUC(wrapped_net)
ROCAUC.fit(orig_X_train, orig_y_train)
wrapped_net.predict_proba(orig_X_test)
ROCAUC.score(orig_X_test, orig_y_test)
ROCAUC.poof()
%matplotlib inline
# Show the centers of each "kernel"
centers = model[0].C.squeeze().detach().numpy()
scales = model[0].alpha.squeeze().detach().numpy()
plt.scatter(centers[:,0], centers[:,1])
plt.scatter(X[:,0], X[:,1], alpha=0.1)
plt.axis('equal')
print(centers.shape)
%matplotlib inline
from matplotlib import cm
# Show the contours of the activation regions of each kernel
ns = 25
xx, yy = np.meshgrid(np.linspace(-2, 2, ns), np.linspace(-2, 2, ns))
G = torch.tensor(np.array([xx, yy]), dtype=torch.float)
G = G.reshape((2, G.shape[1]*G.shape[2])).transpose(0,1)
G = G.expand(centers.shape[0], ns*ns, 2)
Z = torch.tensor(scales).unsqueeze(1) * torch.einsum('kij,kij->ki', [G-torch.tensor(centers).unsqueeze(1), G-torch.tensor(centers).unsqueeze(1)])
plt.scatter(centers[:,0], centers[:,1])
plt.scatter(X[:,0], X[:,1], alpha=0.1)
cmap = cm.get_cmap('tab20')
for i in range(Z.shape[0]):
if scales[i] > 0:
plt.contour(np.linspace(-2, 2, ns), np.linspace(-2, 2, ns), Z[i].reshape(ns, ns), [-0.5,0.5], antialiased=True, colors=[cmap(i)], alpha=0.8, linestyles='dotted')
else:
plt.contour(np.linspace(-2, 2, ns), np.linspace(-2, 2, ns), Z[i].reshape(ns, ns), [-0.5,0.5], antialiased=True, colors=[cmap(i)], alpha=0.3, linestyles='solid')
plt.axis('equal')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Visualize data
Step2: Basic Neural Net
Step3: What do the activation regions look like?
Step6: What is the classification performance?
Step9: Custom Modules
|
2,645
|
<ASSISTANT_TASK:>
Python Code:
from dx import *
me = market_environment(name='me', pricing_date=dt.datetime(2015, 1, 1))
me.add_constant('initial_value', 0.01)
me.add_constant('volatility', 0.1)
me.add_constant('kappa', 2.0)
me.add_constant('theta', 0.05)
me.add_constant('paths', 1000)
me.add_constant('frequency', 'M')
me.add_constant('starting_date', me.pricing_date)
me.add_constant('final_date', dt.datetime(2015, 12, 31))
me.add_curve('discount_curve', 0.0) # dummy
me.add_constant('currency', 0.0) # dummy
ssr = stochastic_short_rate('sr', me)
time_list = [dt.datetime(2015, 1, 1),
dt.datetime(2015, 4, 1),
dt.datetime(2015, 6, 15),
dt.datetime(2015, 10, 21)]
ssr.get_forward_rates(time_list, 10)
ssr.get_discount_factors(time_list, 10)
me.add_constant('initial_value', 36.)
me.add_constant('volatility', 0.2)
# time horizon for the simulation
me.add_constant('currency', 'EUR')
me.add_constant('frequency', 'M')
# monthly frequency; paramter accorind to pandas convention
me.add_constant('paths', 10)
# number of paths for simulation
me.add_curve('discount_curve', ssr)
gbm = geometric_brownian_motion('gbm', me)
gbm.get_instrument_values()
from pylab import plt
plt.style.use('seaborn')
%matplotlib inline
# short rate paths
plt.figure(figsize=(10, 6))
plt.plot(ssr.process.instrument_values[:, :10]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Second, the instantiation of the class.
Step2: The following is an example list object containing datetime objects.
Step3: The call of the method get_forward_reates() yields the above time_list object and the simulated forward rates. In this case, 10 simulations.
Step4: Accordingly, the call of the get_discount_factors() method yields simulated zero-coupon bond prices for the time grid.
Step5: Stochstic Drifts
Step6: Then add the stochastic_short_rate object as discount curve.
Step7: Finally, instantiate the geometric_brownian_motion object.
Step8: We get simulated instrument values as usual via the get_instrument_values() method.
Step9: Visualization of Simulated Stochastic Short Rate
|
2,646
|
<ASSISTANT_TASK:>
Python Code:
def flip_case(string: str) -> str:
return string.swapcase()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,647
|
<ASSISTANT_TASK:>
Python Code:
from collections import defaultdict
import sys
def countFreq(arr , n ) :
mp1 = defaultdict(int )
for i in range(n ) :
mp1[arr[i ] ] += 1
mp2 = defaultdict(int )
for it in mp1 :
mp2[mp1[it ] ] += 1
M = - sys . maxsize - 1
for it in mp2 :
M = max(M , mp2[it ] )
for it in mp2 :
if(M == mp2[it ] ) :
return it
return 0
if __name__== "__main __":
arr =[6 , 10 , 3 , 10 , 8 , 3 , 6 , 4 ]
n = len(arr )
print(countFreq(arr , n ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,648
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
import numpy as np
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
# Set random seed
np.random.seed(0)
# Load data
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Create a pipeline
pipe = Pipeline([('classifier', RandomForestClassifier())])
# Create space of candidate learning algorithms and their hyperparameters
search_space = [{'classifier': [LogisticRegression()],
'classifier__penalty': ['l1', 'l2'],
'classifier__C': np.logspace(0, 4, 10)},
{'classifier': [RandomForestClassifier()],
'classifier__n_estimators': [10, 100, 1000],
'classifier__max_features': [1, 2, 3]}]
# Create grid search
clf = GridSearchCV(pipe, search_space, cv=5, verbose=0)
# Fit grid search
best_model = clf.fit(X, y)
# View best model
best_model.best_estimator_.get_params()['classifier']
# Predict target vector
best_model.predict(X)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Iris Dataset
Step2: Create Pipeline With Model Selection Search Space
Step3: Create Model Selection Using Grid Search
Step4: Conduct Model Selection Using Grid Search
Step5: View Best Model And Its Best Hyperparameters
Step6: Predict Using Best Model
|
2,649
|
<ASSISTANT_TASK:>
Python Code:
# Solution
import numpy as np
import math
def dist(pt1, pt2):
return math.sqrt( (pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2 )
pts1 = [ (25,125), (44,105), (29,97), (35, 63), (55, 63), (42, 57), (23, 40), (64,37), (33,22), (55,20) ]
pts2 = [ (28,145), (38,115), (50,130),(65,140), (55,118), (50, 90), (43, 83), (63,88), (50,60), (50,30) ]
clusters = [ [pt] for pt in pts1]
# simulate pass 1
for pt2 in pts2:
minDist = 9999
minIdx = None
# find the closest centroid
for idx, pt1 in enumerate(pts1):
if minDist > dist(pt1, pt2):
minDist = dist(pt1, pt2)
minIdx = idx
clusters[minIdx].append(pt2)
centroids = [ [ sum(y) / float(len(y)) for y in zip(*parray) ] for parray in clusters]
# print centroids after RECOMPUTATION
print ", ".join([ "({0:.1f} {1:.1f})".format(*pt) for pt in centroids])
def assign_cluster(yellow_centroid, blue_centroid, ul, lr):
if dist(ul, yellow_centroid) <= dist(ul, blue_centroid ) and \
dist(lr, yellow_centroid) <= dist(lr, blue_centroid ) :
print "yellow"
elif dist(ul, yellow_centroid) > dist(ul, blue_centroid ) and \
dist(lr, yellow_centroid) > dist(lr, blue_centroid ) :
print "blue"
else:
print "none clustered"
#option 1
print "Option 1"
assign_cluster((5,10) , (20,5), (6,7), (11,14))
assign_cluster((5,10) , (20,5), (11,5), (17,2))
#option 2
print "Option 2"
assign_cluster((5,10) , (20,5), (6,7), (11,4))
assign_cluster((5,10) , (20,5), (14,10), (23,6))
#option 3
print "Option 3"
assign_cluster((5,10) , (20,5), (3,15), (13,7))
assign_cluster((5,10) , (20,5), (11,5), (17,2))
#option 4
print "Option 3"
assign_cluster((5,10) , (20,5), (3,15), (13,7))
assign_cluster((5,10) , (20,5), (14,10), (23,6))
import numpy as np
mat = np.array([[1, 0, 0], [0, 2, 0], [0, 0, 0]])
pinv = np.linalg.pinv(mat)
print pinv
import numpy as np
print "slot 1 ============"
slot1 = .10 * .015
print slot1
slot1 = .09 * .016
print slot1
slot1 = .08 * .017
print slot1
slot1 = .07 * .018
print slot1
slot1 = .06 * .019
print slot1
print "slot 2 ============"
slot2 = .10 * .10
print slot2
slot2 = .09 * .12
print slot2
slot2 = .08 * .14
print slot2
slot2 = .07 * .15
print slot2
slot2 = .06 * .16
print slot2
print "slot 3 ============"
slot3 = .10 * .005
print slot3
slot3 = .09 * .006
print slot3
slot3 = .08 * .007
print slot3
slot3 = .07 * .008
print slot3
slot3 = .06 * .010
print slot3
# Program for simulating the process
clustered_pts = [(0, 0), (10, 10)]
unclustered_pts = [(1,6), (3,7), (4,3), (7,7), (8,2), (9,5)]
def dist(pt1, pt2):
return (pt1[0] - pt2[0])**2 + (pt1[1] - pt2[1])**2
for i in xrange(5):
representative = (0,0)
maxDist = 0
for pt1 in unclustered_pts:
# find the closest centroid
minDist = 9999
for idx, pt2 in enumerate(clustered_pts):
if minDist > float(dist(pt1, pt2)):
minDist = float(dist(pt1, pt2))
if maxDist < minDist:
maxDist = minDist
representative = pt1
print maxDist
print "point ({0}, {1}) added to cluster".format(*representative)
unclustered_pts.remove(representative)
clustered_pts.append(representative)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q2.
Step2: Q3.
Step3: Q2.
Step4: Solution 2.
|
2,650
|
<ASSISTANT_TASK:>
Python Code:
class Corpus:
def __init__(self):
'''
A corpus object maintains a mapping from a word (string) to a unique id (int).
'''
self.word_idx_dict = {}
self.uniq_word_cnt = 0
def update_vocab(self, words):
'''
Updates the corpus with the given list of words. The words that are seen for the
first time are added to the word -> id dictionary.
'''
for word in words:
if word not in self.word_idx_dict:
self.word_idx_dict[word] = self.uniq_word_cnt
self.uniq_word_cnt += 1
def words_idx(self, words):
'''
Returns the list of IDs corresponding to the given words.
'''
return [self.word_idx_dict[word] for word in words]
def tokenize(self, sentence):
return [x.strip() for x in re.split('(\W+)?', sentence) if x.strip()]
class ExampleParser:
'''
Responsible for parsing examples for the babi tasks as specified at https://research.fb.com/downloads/babi/
'''
@staticmethod
def add(example_lines, data, c):
'''
Takes the set of lines that form an example and:
a) updates the corpus with these lines
b) Parses the line to 3-tuples of the form: (memory, question, answer).
A single story line may yield several 3-tuples of the above form. E.g.:
1 Mary moved to the bathroom.
2 John went to the hallway.
3 Where is Mary? bathroom 1
4 Daniel went back to the hallway.
12 Where is Daniel? hallway 4
Will generate 2 tuples:
- ([1 Mary moved to the bathroom., 2 John went to the hallway.], Where is Mary?, bathroom)
- ([1 Mary moved to the bathroom., 2 John went to the hallway., 4 Daniel went back to the hallway.],
Where is Daniel?, hallway)
Note that instead of storing the actual words, an example stores the IDs of the associated words.
A word -> ID map is maintained in the corpus.
@example_lines: A set of lines that form an example.
@data: List of 3-tuples (memories, question, answer), updated by "add".
@c: The corpus object.
'''
memories = []
memories_txt = []
qa = []
for eg_line in example_lines:
if "\t" not in eg_line: #normal memory
eg_line = c.tokenize(eg_line)
c.update_vocab(eg_line)
mem_id, memory = eg_line[0], c.words_idx(eg_line[1:])
memories.append(c.words_idx(eg_line))
memories_txt.append(eg_line)
else: #question line
ques, ans, hints = eg_line.split("\t")
ques = c.tokenize(ques)[1:]
c.update_vocab(ques)
ans = c.tokenize(ans)
c.update_vocab(ans)
data.append(([m for m in memories],
c.words_idx(ques), c.words_idx(ans)))
@staticmethod
def process_files(lines, corpus):
'''
Reads the given file, identifies splits of the example and adds them to th.
'''
data = []
eg_lines = [lines[0].decode('utf-8').strip()]
for line in lines[1:]:
line = line.decode('utf-8').strip()
if int(line.split(" ", 1)[0]) == 1: #new story starts
ExampleParser.add(eg_lines, data, corpus)
eg_lines = [line.strip()]
else:
eg_lines.append(line.strip())
if len(eg_lines) > 0:
ExampleParser.add(eg_lines, data, corpus)
return data
challenges = {
# QA1 with 10,000 samples
'single_supporting_fact_10k': 'tasks_1-20_v1-2/en-10k/qa1_single-supporting-fact_{}.txt',
# QA2 with 10,000 samples
'two_supporting_facts_10k': 'tasks_1-20_v1-2/en-10k/qa2_two-supporting-facts_{}.txt',
}
path = get_file('babi-tasks-v1-2.tar.gz', origin='https://s3.amazonaws.com/text-datasets/babi_tasks_1-20_v1-2.tar.gz')
tar = tarfile.open(path)
challenge_type = 'two_supporting_facts_10k'
challenge = challenges[challenge_type]
test_lines = tar.extractfile(challenge.format('test')).readlines()
train_lines = tar.extractfile(challenge.format('train')).readlines()
print(train_lines[0:4])
print(test_lines[0:4])
c = Corpus()
print("Processing training files")
train_tuples = ExampleParser.process_files(train_lines, c)
print("Processing test files")
test_tuples = ExampleParser.process_files(test_lines, c)
all_tuples = train_tuples + test_tuples
print("# training tuples = {0}\n# test tuples = {1}".format(len(train_tuples), len(test_tuples)))
print(train_tuples[0])
print(train_lines[0:3])
max_num_memories = max([len(example[0]) for example in all_tuples])
max_memory_len = max([len(memory) for example in all_tuples for memory in example[0]])
max_ques_len = max([len(example[1]) for example in all_tuples])
vocab_size = c.uniq_word_cnt
len(train_tuples), len(test_tuples), c.uniq_word_cnt, max_num_memories, max_memory_len, max_ques_len
def pad_tuples(tuples, max_memory_len, max_num_memories, max_ques_len, vocab_size):
'''
Takes a number of tuples, as well as measures required for 0 padding.
Returns a padded version of the memories, questions and the answer.
In other words, for each tuple, memories is now a matrix of:
a) max_num_memories * max_memory_len
b) question is an array with max_ques_len elements. The gaps are filled with 0s.
Also performs 1-hot encoding for the output.
'''
m, q, a = [], [], []
for (memories, ques, ans) in tuples:
memories= pad_sequences(memories, maxlen=max_memory_len)
memories = np.concatenate([memories, np.zeros((max_num_memories - memories.shape[0],
max_memory_len), 'int') ])
m.append(memories)
q.append(ques)
#ans_vec = np.zeros((vocab_size))
#ans_vec[ans] = 1
a.append(ans)
return np.array(m), pad_sequences(q, maxlen=max_ques_len), np.array(a)
m_train, q_train, a_train = pad_tuples(train_tuples, max_memory_len, max_num_memories, max_ques_len, c.uniq_word_cnt)
m_test, q_test, a_test = pad_tuples(test_tuples, max_memory_len, max_num_memories, max_ques_len, c.uniq_word_cnt)
print(m_train.shape)
print(q_train.shape)
print(a_train.shape)
print(m_test.shape)
print(q_test.shape)
print(a_test.shape)
n_hidden = 64
memories = Input(shape=(max_num_memories, max_memory_len))
x = TimeDistributed(Embedding(input_dim=vocab_size, output_dim=n_hidden))(memories)
m = Lambda(lambda xx: K.sum(xx, 2))(x)
memories.shape, m.shape
query = Input(shape=(max_ques_len,))
u = Embedding(input_dim=vocab_size, output_dim=n_hidden)(query)
u = Lambda(lambda x : K.sum(x, 1))(u)
u = Reshape(target_shape=(1, n_hidden))(u)
query.shape, u.shape
p = dot([m, u], axes=2)
p = Reshape((max_num_memories,))(p)
p = Activation(activation='softmax')(p)
p = Reshape((max_num_memories,1))(p)
p.shape
x = TimeDistributed(Embedding(vocab_size, n_hidden))(memories)
c = Lambda(lambda xx: K.sum(xx, 2))(x)
c.shape
o = dot([c, p], axes=1)
o = Reshape(target_shape=(1,n_hidden))(o)
o
a_in = Lambda(lambda ou: sum([ou[0], ou[1]]))([o, u])
a_in = Reshape(target_shape=(n_hidden,))(a_in)
answer = Dense(vocab_size, activation='softmax')(a_in)
answer
babi_memmn = Model([memories, query], answer)
babi_memmn.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
K.set_value(babi_memmn.optimizer.lr, 1e-2)
params = {'verbose': 2, 'callbacks': [TQDMNotebookCallback(leave_inner=False)]}
babi_memmn.fit([m_train, q_train], a_train, **params, batch_size=32, epochs=5,
validation_data=([m_test, q_test], a_test))
mem_input = Input(shape=(max_num_memories, max_memory_len))
query_input = Input(shape=(max_ques_len,))
A1 = Embedding(vocab_size,output_dim=n_hidden)
m_i = TimeDistributed(A1)(mem_input)
m_i = Lambda(lambda x: K.sum(x, 2))(m_i)
m_i
B = A1 #as specified
u1 = B(query_input)
u1 = (Lambda(lambda x: K.sum(x, 1)))(u1)
u1 = Reshape((1, n_hidden))(u1)
u1
#nothing special here
C1 = Embedding(vocab_size,output_dim=n_hidden)
c_i_1 = TimeDistributed(C1)(mem_input)
c_i_1 = Lambda(lambda x: K.sum(x, 2))(c_i_1)
c_i_1
#Calculation for a given hop don't change at all.
p1 = dot([m_i, u1], axes=2)
p1 = Reshape((max_num_memories,))(p1)
p1 = Activation(activation='softmax')(p1)
p1 = Reshape((max_num_memories,1))(p1)
o1 = dot([c_i_1, p1], axes=1)
o1 = Reshape(target_shape=(n_hidden,))(o1)
o1
u1 = Reshape((n_hidden,))(u1)
u2 = add([o1, u1])
u2
A2 = C1 #A(k + 1) = C(k)
m_i = TimeDistributed(A2)(mem_input)
m_i = Lambda(lambda x: K.sum(x, 2))(m_i)
m_i
#Same as the previous layer
C2 = Embedding(vocab_size,output_dim=n_hidden)
c_i_2 = TimeDistributed(C2)(mem_input)
c_i_2 = Lambda(lambda x: K.sum(x, 2))(c_i_2)
c_i_2
#Same as the previous layer
u2 = Reshape((1, n_hidden))(u2)
p2 = dot([m_i, u2], axes=2)
p2 = Reshape((max_num_memories,))(p2)
p2 = Activation(activation='softmax')(p2)
p2 = Reshape((max_num_memories,1))(p2)
o2 = dot([c_i_2, p2], axes=1)
o2 = Reshape(target_shape=(n_hidden,))(o2)
o2
u2 = Reshape((n_hidden,))(u2)
u1 = Reshape((n_hidden,))(u1)
#u3 = add([o2, u1]) #u(k + 1) = o(k) + u(k)
#This is a hack, I was not able to get good results with u3 = u2 + o2
u3 = add([o2, u1]) #u(k + 1) = o(k) + u(k)
u3
#answer = MemOut(vocab_size, C2)(u3)
answer = Dense(vocab_size, activation='softmax')(u3)
answer
babi2 = Model([mem_input, query_input], answer)
babi2.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
K.set_value(babi_memmn.optimizer.lr, 0.01)
babi2.fit([m_train, q_train], a_train, **parms, batch_size=32, epochs=8,
validation_data=([m_test, q_test], a_test))
def get_mi(mem_input, A=None):
if A == None:
A = Embedding(vocab_size,output_dim=n_hidden)
m = TimeDistributed(A)(mem_input)
m = Lambda(lambda x: K.sum(x, 2))(m)
return A, m
def get_ci(mem_input, C=None):
if C == None:
C = Embedding(vocab_size,output_dim=n_hidden)
m = TimeDistributed(C)(mem_input)
m = Lambda(lambda x: K.sum(x, 2))(m)
return C, m
def get_u(B, query_input):
u = B(query_input)
u = (Lambda(lambda x: K.sum(x, 1)))(u)
u = Reshape((1, n_hidden))(u)
return u
def get_p(m, u):
u = Reshape((1, n_hidden))(u)
p = dot([m, u], axes=2)
p = Reshape((max_num_memories,))(p)
p = Activation(activation='softmax')(p)
p = Reshape((max_num_memories,1))(p)
return p
def get_o(c_i, p):
o = dot([c_i, p], axes=1)
o = Reshape(target_shape=(n_hidden,))(o)
return o
def next_u(u_prev, o_prev):
o_prev = Reshape(target_shape=(n_hidden,))(o_prev)
u_prev = Reshape((n_hidden,))(u_prev)
u_next = add([o_prev, u_prev]) #u(k + 1) = o(k) + u(k)
return u_next
#initialize
mem_input = Input(shape=(max_num_memories, max_memory_len))
query_input = Input(shape=(max_ques_len,))
A1, mi_1 = get_mi(mem_input=mem_input)
u1 = get_u(B=A1, query_input=query_input) #B = A1
p1 = get_p(mi_1, u1)
C1, ci_1 = get_ci(mem_input)
o1 = get_o(ci_1, p1)
u2 = next_u(u1, o1)
A2, mi_2 = get_mi(mem_input=mem_input, A=C1) #A2 = C1
p2 = get_p(mi_2, u2)
C2, ci_2 = get_ci(mem_input)
o2 = get_o(ci_2, p2)
u3 = next_u(u1, o2)
A3, mi_3 = get_mi(mem_input=mem_input, A=C2) #A2 = C1
p3 = get_p(mi_3, u3)
C3, ci_3 = get_ci(mem_input)
o3 = get_o(ci_3, p3)
u4 = next_u(u1, o3)
answer = Dense(vocab_size, activation='softmax')(u4)
answer
babi_mod = Model([mem_input, query_input], answer)
babi_mod.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
K.set_value(babi_memmn.optimizer.lr, 0.005)
babi_mod.fit([m_train, q_train], a_train, **parms, batch_size=32, epochs=10,
validation_data=([m_test, q_test], a_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download and extract datasets
Step2: Parse datasets to (memories, question, answer) tuples, perform word -> idx mapping
Step3: Example Description
Step4: From the mapping, note that "1 Mary moved to the bathroom." has been mapped to "0, 1, 2, 3, 4, 5, 6". (line number is used as a feature).
Step5: | Variable | Count |
Step6: Model
Step7: Step 2
Step8: Step 3
Step9: Step 4
Step10: Step 5
Step11: Step 6
Step12: Multi-hop network
Step13: So, the embedding layer for the query and the input should be shared for the first hop.
Step14: Calculating u2
Step15: Second Layer
Step16: Modular Hops Network
|
2,651
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import gzip
%matplotlib inline
def normalise_01(K):
Normalise values of kernel matrix to have smallest value 0 and largest value 1.
smallest = np.min(K)
largest = np.max(K)
return (K - smallest)/(largest-smallest)
def center(K):
Center the kernel matrix, such that the mean (in feature space) is zero.
one_mat = np.matrix(np.ones(K.shape))
one_vec = np.matrix(np.ones((K.shape[0],1)))
row_sum = np.matrix(np.mean(K,axis=0)).T
R = K - row_sum * one_vec.T - one_vec * row_sum.T +\
np.mean(row_sum.A)*one_mat
return R
def normalise_unit_diag(K):
Normalise the kernel matrix, such that all diagonal entries are 1.
Kii = np.diag(K)
Kii.shape = (len(Kii),1)
return np.divide(K, np.sqrt(np.matrix(Kii)*np.matrix(Kii).T))
def normalise_variance(K):
Normalise the kernel matrix, such that the variance (in feature space) is 1
one_vec = np.matrix(np.ones((K.shape[0],1)))
inv_sqrt_diag = np.divide(one_vec, np.matrix(np.sqrt(np.diag(K))).T)
KN = np.multiply(np.kron(one_vec.T,inv_sqrt_diag),K)
KN = np.multiply(np.kron(one_vec,inv_sqrt_diag.T),K)
return KN
def kernel2distance(K):
Convert the kernel matrix into the corresponding non-Euclidean distance.
D = np.zeros(K.shape)
for ix in range(K.shape[0]):
for iy in range(K.shape[1]):
sqr_dist = K[ix,ix] + K[iy,iy] - 2*K[ix,iy]
if sqr_dist > 0.0:
D[ix,iy] = np.sqrt(sqr_dist)
return D
def cloud_gen(num_feat, num_points, centers, width):
Generate Gaussian point clouds
total_points = np.sum(num_points)
data = np.zeros((num_feat, total_points))
start_idx = 0
for ix, center in enumerate(centers):
C = np.array(center).copy()
C.shape = (len(center),1)
cur_data = C*np.ones((num_feat, num_points[ix])) + width*np.random.randn(num_feat, num_points[ix])
end_idx = start_idx + num_points[ix]
data[:,start_idx:end_idx] = cur_data.copy()
start_idx = end_idx
return data
centers = [[1,1,1],[0,0,np.sqrt(3)],[0,0,0]]
X = cloud_gen(3, [10,15,25], centers, 0.3)
print('Shape of data')
print(X.shape)
raw_kmat = np.dot(X.T,X)
fig = plt.figure(figsize=(12,28))
ax = fig.add_subplot(521)
im = ax.matshow(raw_kmat, cmap=cm.winter)
ax.set_title('original kernel')
fig.colorbar(im)
ax = fig.add_subplot(522)
ax.set_title('original distance')
im = ax.matshow(kernel2distance(raw_kmat), cmap=cm.autumn)
fig.colorbar(im)
ax = fig.add_subplot(523)
im = ax.matshow(normalise_01(raw_kmat), cmap=cm.winter)
ax.set_title('normalise [0,1]')
fig.colorbar(im)
ax = fig.add_subplot(524)
im = ax.matshow(kernel2distance(normalise_01(raw_kmat)), cmap=cm.autumn)
fig.colorbar(im)
ax = fig.add_subplot(525)
im = ax.matshow(center(raw_kmat), cmap=cm.winter)
ax.set_title('zero mean in feature space')
fig.colorbar(im)
ax = fig.add_subplot(526)
im = ax.matshow(kernel2distance(center(raw_kmat)), cmap=cm.autumn)
fig.colorbar(im)
ax = fig.add_subplot(527)
im = ax.matshow(normalise_unit_diag(center(raw_kmat)), cmap=cm.winter)
ax.set_title('Ones along the diagonal')
fig.colorbar(im)
ax = fig.add_subplot(528)
im = ax.matshow(kernel2distance(normalise_unit_diag(center(raw_kmat))), cmap=cm.autumn)
fig.colorbar(im)
ax = fig.add_subplot(529)
im = ax.matshow(normalise_variance(center(raw_kmat)), cmap=cm.winter)
ax.set_title('Unit variance in feature space')
fig.colorbar(im)
ax = fig.add_subplot(5,2,10)
im = ax.matshow(kernel2distance(normalise_variance(center(raw_kmat))), cmap=cm.autumn)
fig.colorbar(im)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Normalisation
Step4: In the following, we use the fact that kernels ($k(\cdot, \cdot)$) are inner products in a feature space with feature mapping $\phi(\cdot)$
Step6: Unit diagonal
Step8: Unit variance
Step10: Kernels and distances
Step12: Generate Gaussian point clouds
Step13: Visualising the effect of different normalisations
|
2,652
|
<ASSISTANT_TASK:>
Python Code:
def h2percentile(h,p):
import numpy as np
s = h.sum()
k = ((s-1) * p/100.)+1
dw = np.floor(k)
up = np.ceil(k)
hc = np.cumsum(h)
if isinstance(p, int):
k1 = np.argmax(hc>=dw)
k2 = np.argmax(hc>=up)
else:
k1 = np.argmax(hc>=dw[:,np.newaxis],axis=1)
k2 = np.argmax(hc>=up[:,np.newaxis],axis=1)
d0 = k1 * (up-k)
d1 = k2 * (k -dw)
return np.where(dw==up,k1,d0+d1)
testing = (__name__ == "__main__")
if testing:
! jupyter nbconvert --to python h2percentile.ipynb
import numpy as np
import sys,os
import matplotlib.image as mpimg
ia898path = os.path.abspath('../../')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
if testing:
f = np.array([0,1,2,3,4,5,6,7,8])
h = ia.histogram(f)
print('h2percentile 1 = %f, np.percentile 1 = %f'%(ia.h2percentile(h,1),np.percentile(f,1)))
print('h2percentile 10 = %f, np.percentile 10 = %f'%(ia.h2percentile(h,10),np.percentile(f,10)))
print('h2percentile 50 = %f, np.percentile 50 = %f'%(ia.h2percentile(h,50),np.percentile(f,50)))
print('h2percentile 90 = %f, np.percentile 90 = %f'%(ia.h2percentile(h,90),np.percentile(f,90)))
print('h2percentile 99 = %f, np.percentile 99 = %f'%(ia.h2percentile(h,99),np.percentile(f,99)))
if testing:
f = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
h = ia.histogram(f)
p = [1, 10, 50, 90, 99]
print('percentiles:', p)
print('h2percentile', ia.h2percentile(h,np.array(p)))
print('np.percentile', np.percentile(f,p))
if testing:
import matplotlib.image as mpimg
f = mpimg.imread('../data/cameraman.tif')
h = ia.histogram(f)
p = [1, 10, 50, 90, 99]
print('percentiles:', p)
print('h2percentile', ia.h2percentile(h,np.array(p)))
print('np.percentile', np.percentile(f,p))
print('median', np.median(f))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: Numeric Example
Step3: Image Example
|
2,653
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!sudo apt -y install libportaudio2
!pip install -q tflite-model-maker-nightly
!pip install gdown
from tflite_model_maker import searcher
!gdown https://drive.google.com/uc?id=0BwmD_VLjROrfTHk4NFg2SndKcjQ
!gdown https://drive.google.com/uc?id=0BwmD_VLjROrfM1BxdkxVaTY2bWs
!wget -O all_train.txt https://raw.githubusercontent.com/abisee/cnn-dailymail/master/url_lists/all_train.txt
!tar xzf cnn_stories.tgz
!tar xzf dailymail_stories.tgz
#@title Save the highlights and urls to the CSV file
#@markdown Load the highlights from the stories of CNN / Daily Mail, map urls with highlights, and save them to the CSV file.
CNN_FRACTION = 0.05 #@param {type:"number"}
DAILYMAIL_FRACTION = 0.05 #@param {type:"number"}
import csv
import hashlib
import os
import tensorflow as tf
dm_single_close_quote = u"\u2019" # unicode
dm_double_close_quote = u"\u201d"
END_TOKENS = [
".", "!", "?", "...", "'", "`", '"', dm_single_close_quote,
dm_double_close_quote, ")"
] # acceptable ways to end a sentence
def read_file(file_path):
Reads lines in the file.
lines = []
with tf.io.gfile.GFile(file_path, "r") as f:
for line in f:
lines.append(line.strip())
return lines
def url_hash(url):
Gets the hash value of the url.
h = hashlib.sha1()
url = url.encode("utf-8")
h.update(url)
return h.hexdigest()
def get_url_hashes_dict(urls_path):
Gets hashes dict that maps the hash value to the original url in file.
urls = read_file(urls_path)
return {url_hash(url): url[url.find("id_/") + 4:] for url in urls}
def find_files(folder, url_dict):
Finds files corresponding to the urls in the folder.
all_files = tf.io.gfile.listdir(folder)
ret_files = []
for file in all_files:
# Gets the file name without extension.
filename = os.path.splitext(os.path.basename(file))[0]
if filename in url_dict:
ret_files.append(os.path.join(folder, file))
return ret_files
def fix_missing_period(line):
Adds a period to a line that is missing a period.
if "@highlight" in line:
return line
if not line:
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def get_highlights(story_file):
Gets highlights from a story file path.
lines = read_file(story_file)
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
highlight_list = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith("@highlight"):
next_is_highlight = True
elif next_is_highlight:
highlight_list.append(line)
# Make highlights into a single string.
highlights = "\n".join(highlight_list)
return highlights
url_hashes_dict = get_url_hashes_dict("all_train.txt")
cnn_files = find_files("cnn/stories", url_hashes_dict)
dailymail_files = find_files("dailymail/stories", url_hashes_dict)
# The size to be selected.
cnn_size = int(CNN_FRACTION * len(cnn_files))
dailymail_size = int(DAILYMAIL_FRACTION * len(dailymail_files))
print("CNN size: %d"%cnn_size)
print("Daily Mail size: %d"%dailymail_size)
with open("cnn_dailymail.csv", "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=["highlights", "urls"])
writer.writeheader()
for file in cnn_files[:cnn_size] + dailymail_files[:dailymail_size]:
highlights = get_highlights(file)
# Gets the filename which is the hash value of the url.
filename = os.path.splitext(os.path.basename(file))[0]
url = url_hashes_dict[filename]
writer.writerow({"highlights": highlights, "urls": url})
!wget -O universal_sentence_encoder.tflite https://tfhub.dev/google/lite-model/universal-sentence-encoder-qa-ondevice/1?lite-format=tflite
data_loader = searcher.TextDataLoader.create("universal_sentence_encoder.tflite", l2_normalize=True)
data_loader.load_from_csv("cnn_dailymail.csv", text_column="highlights", metadata_column="urls")
scann_options = searcher.ScaNNOptions(
distance_measure="dot_product",
tree=searcher.Tree(num_leaves=140, num_leaves_to_search=4),
score_ah=searcher.ScoreAH(dimensions_per_block=1, anisotropic_quantization_threshold=0.2))
model = searcher.Searcher.create_from_data(data_loader, scann_options)
model.export(
export_filename="searcher.tflite",
userinfo="",
export_format=searcher.ExportFormat.TFLITE)
from tflite_support.task import text
# Initializes a TextSearcher object.
searcher = text.TextSearcher.create_from_file("searcher.tflite")
# Searches the input query.
results = searcher.search("The Airline Quality Rankings Report looks at the 14 largest U.S. airlines.")
print(results)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Text Searcher with TensorFlow Lite Model Maker
Step2: Import the required packages.
Step3: Prepare the dataset
Step10: Then, save the data into the CSV file that can be loaded into tflite_model_maker library. The code is based on the logic used to load this data in tensorflow_datasets. We can't use tensorflow_dataset directly since it doesn't contain urls which are used in this colab.
Step11: Build the text Searcher model
Step 1. Load the dataset
Step12: Create a searcher.TextDataLoader instance and use data_loader.load_from_csv method to load the dataset. It takes ~10 minutes for this
Step13: For image use cases, you can create a searcher.ImageDataLoader instance and then use data_loader.load_from_folder to load images from the folder. The searcher.ImageDataLoader instance needs to be created by a TFLite embedder model because it will be leveraged to encode queries to feature vectors and be exported with the TFLite Searcher model. For instance
Step14: In the above example, we define the following options
Step15: Test the TFLite model on your query
|
2,654
|
<ASSISTANT_TASK:>
Python Code:
# HIDDEN - generic nonsense for setting up environment
from datascience import *
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
from ipywidgets import interact
# datascience version number of last run of this notebook
version.__version__
die = Table().with_column('side', [1,2,3,4,5,6])
die
# Simulate the roll of a die by sampling from the die table
def roll_die():
return die.sample(1)['side'][0]
# roll it. Try this over and over and see what you get
roll_die()
# Simulate rolling it many times, creating a table that records the rolls
num_rolls = 600
rolls = Table().with_column('roll', [roll_die() for i in range(num_rolls)])
rolls
bins = np.arange(1,8)
rolls.hist(bins=bins, normed=False)
# Normalize this gives a distribution. The probability of each side appearing. 1/6.
rolls.hist(normed=True,bins=bins)
roll_dist = rolls.bin(normed=True,bins=bins).take(range(6))
roll_dist
roll_dist['roll density']
roll_dist['Variation'] = (roll_dist['roll density'] - 1/6)/(1/6)
roll_dist
# What is the average value of a roll?
sum(roll_dist['bin']*roll_dist['roll density'])
np.mean(rolls['roll'])
# Life is about rolling lots of dice.
# Simulate rolling n dice.
def roll(n):
Roll n die. Return a table of the rolls
return die.sample(n, with_replacement=True)
# try it out. many times
roll(10)
def show_die_dist(n):
Roll a die n times and show the distribution of sides that appear.
roll(n).hist(bins=np.arange(1,8))
# We can now use the ipywidget we had included at the beginning.
interact(show_die_dist, n=(10, 1000, 10))
num_die = 10
num_rolls = 100
# Remember - referencing a column gives an array
roll(num_die)['side']
# Simulate rolling num_die dice num_rolls times and build a table of the result
rolls = Table(["die_"+str(i) for i in range(num_die)]).with_rows([roll(num_die)['side'] for i in range(num_rolls)])
rolls
# If we think of each row as a life experience, what is the life like?
label = "{}_dice".format(num_die)
sum_rolls = Table().with_column(label, [np.sum(roll(num_die)['side']) for i in range(num_rolls)])
sum_rolls.hist(range=[10,6*num_die], normed=False)
sum_rolls.stats()
# Or as a distribution
sum_rolls.hist(range=[10,6*num_die],normed=True)
# Or normalize by the number of die ...
#
Table().with_column(label, [np.sum(roll(num_die)['side'])/num_die for i in range(num_rolls)]).hist(normed=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a table as a model of a stochastic phenomenom
Step2: Composition
Step3: Visualization
Step4: Computing on distributions
Step6: Statistical thinking
Step8: Interactive visualization
Step9: Likelihood
|
2,655
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # modulo de computo numerico
import matplotlib.pyplot as plt # modulo de graficas
import pandas as pd # modulo de datos
import seaborn as sns
import scipy as sp
import scipy.interpolate, scipy.integrate # para interpolar e integrar
import wget, tarfile # para bajar datos y descompirmir
from __future__ import print_function
# esta linea hace que las graficas salgan en el notebook
%matplotlib inline
def awesome_settings():
# awesome plot options
sns.set_style("white")
sns.set_style("ticks")
sns.set_context("paper", font_scale=2)
sns.set_palette(sns.color_palette('bright'))
# image stuff
plt.rcParams['figure.figsize'] = (12.0, 6.0)
plt.rcParams['savefig.dpi'] = 60
plt.rcParams['lines.linewidth'] = 4
return
%config InlineBackend.figure_format='retina'
awesome_settings()
import numericalunits as nu
Tcell = 300 * nu.K
data_url = 'http://rredc.nrel.gov/solar/spectra/am1.5/ASTMG173/compressed/ASTMG173.csv.tar'
a_file = wget.download(data_url)
download_as_tarfile_object = tarfile.open(fileobj=a_file)
csv_file = download_as_tarfile_object.extractfile('ASTMG173.csv')
csv_file = 'ASTMG173.csv'
downloaded_array = np.genfromtxt(csv_file, delimiter=",", skip_header=2)
downloaded_array.shape
AM15 = downloaded_array[:,[0,2]]
print(AM15)
AM15[:,0] *= nu.nm
AM15[:,1] *= nu.W * nu.m**-2 * nu.nm**-1
wavelength_min =
wavelength_max =
E_min = nu.hPlanck * nu.c0
E_max = nu.hPlanck * nu.c0
AM15interp = scipy.interpolate.interp1d(AM15[:,0], AM15[:,1])
x =
y =
def FotonesPorTEA(Ephoton):
wavelength = nu.hPlanck * nu.c0 / Ephoton
return AM15interp(wavelength) * (1 / Ephoton) * (nu.hPlanck * nu.c0 / Ephoton**2)
print(FotonesPorTEA(2 * nu.eV) * (1 * nu.meV) * (1 * nu.m**2) * (1 * nu.s))
PowerPorTEA = lambda E : E * FotonesPerTEA(E)
# quad() es integracion
solar_constant = sp.integrate.quad(PowerPorTEA,E_min,E_max, full_output=1)[0]
print(solar_constant / (nu.W/nu.m**2))
def fotones_arriba_gap(Egap):
return scipy.integrate.quad(FotonesPorTEA, Egap, E_max, full_output=1)[0]
print(fotones_arriba_gap(1.1 * nu.eV) * (1 * nu.m**2) * (1 * nu.s))
Egap_list = np.linspace(0.4 * nu.eV, 3 * nu.eV, num=100)
y_values = np.array([fotones_arriba_gap(E) for E in Egap_list])
plt.plot(Egap_list / nu.eV , y_values / (1e21 * nu.m**-2 * nu.s**-1))
plt.xlabel("Bandgap (eV)")
plt.ylabel("fotones arriba del gap ($10^{21}$ m$^{-2} \cdot $s$^{-1}$)");
def RR0(Egap):
integrand = lambda E : E**2 / (np.exp(E / (nu.kB * Tcell)) - 1)
integral = scipy.integrate.quad(integrand, Egap, E_max, full_output=1)[0]
return ((2 * np.pi) / (nu.c0**2 * nu.hPlanck**3)) * integral
def densidad_de_corriente(V, Egap):
return nu.e * (fotones_arriba_gap(Egap) - RR0(Egap) * np.exp(nu.e * V / (nu.kB * Tcell)))
def JSC(Egap):
return densidad_de_corriente(0, Egap)
def VOC(Egap):
return (nu.kB * Tcell / nu.e) * np.log(fotones_arriba_gap(Egap) / RR0(Egap))
print(JSC(1.1 * nu.eV) / (nu.mA / nu.cm**2))
print(VOC(1.1 * nu.eV) / nu.V)
from scipy.optimize import fmin
def fmax(func_to_maximize, initial_guess=0):
return the x that maximizes func_to_maximize(x)
func_to_minimize = lambda x : -func_to_maximize(x)
return fmin(func_to_minimize, initial_guess, disp=False)[0]
def V_mpp(Egap):
voltage at max power point
return fmax(lambda V : V * densidad_de_corriente(V, Egap))
def J_mpp(Egap):
current at max power point
return densidad_de_corriente(V_mpp(Egap), Egap)
def max_power(Egap):
V = V_mpp(Egap)
return V * densidad_de_corriente(V, Egap)
def max_efficiencia(Egap):
return max_power(Egap) / solar_constant
max_efficiencia(1.1 * nu.eV)
def electricidad_util(Egap):
return max_efficiencia(Egap)
def energia_debajo_bandgap(Egap):
integrand = lambda E : E * FotonesPorTEA(E)
return scipy.integrate.quad(integrand, E_min, Egap, full_output=1)[0] / solar_constant
def exceso_arriba_bandgap(Egap):
integrand = lambda E : (E - Egap) * FotonesPorTEA(E)
return scipy.integrate.quad(integrand, Egap, E_max, full_output=1)[0] / solar_constant
def mpp_recombination(Egap):
return (solar_photons_above_gap(Egap) - J_mpp(Egap) / nu.e) * Egap / solar_constant
def mpp_voltage_debajo_bangap(Egap):
return J_mpp(Egap) * (Egap / nu.e - V_mpp(Egap)) / solar_constant
mpp_recombination(1.1 * nu.eV)
Egap_list = np.linspace(0.4 * nu.eV, 3 * nu.eV, num=25)
loss_list = []
for indx,Egap in enumerate(Egap_list):
e_util = electricidad_util(Egap)
gap_abajo = energia_debajo_bandgap(Egap)
gap_arriba = exceso_arriba_bandgap(Egap)
mpp_recomb = mpp_recombination(Egap)
mpp_voltaje = mpp_voltage_debajo_bangap(Egap)
loss_list.append([e_util,gap_abajo,gap_arriba,mpp_recomb,mpp_voltaje])
print("%2.2f%% .. "%(indx/float(len(Egap_list))*100.0),end='')
loss_list = np.array(loss_list)
# sumamos todo y lo ponemos encima
loss_list = np.cumsum(loss_list,axis=1)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.fill_between(Egap_list / nu.eV, 0, loss_list[:,0], facecolor="k")
ax1.fill_between(Egap_list / nu.eV, loss_list[:,0], loss_list[:,1], facecolor="m")
ax1.fill_between(Egap_list / nu.eV, loss_list[:,1], loss_list[:,2], facecolor="g")
ax1.fill_between(Egap_list / nu.eV, loss_list[:,2], loss_list[:,3], facecolor="b")
ax1.fill_between(Egap_list / nu.eV, loss_list[:,3], 1, facecolor="0.75")
plt.title('POWER GOES TO...\n'
'Energia Util (negro);\n'
'debajo del gap (magenta);\n'
'exceso del gap (verde);\n'
'Current loss from radiative recombination (azul)\n'
'Voltage menor del bandgap (gris)')
plt.xlabel('Bandgap (eV)')
plt.ylabel('Fraccion de luz incidente')
plt.xlim(0.4, 3)
plt.ylim(0,1);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graficas chidas!
Step2: 1 A graficar el Hermoso Espectro Solar
Step3: Para usarlo convertimos numeros a unidades, por ejemplo
Step4: Bajar datos
Step5: Que tamanio tienen los datos?
Step6: Manipular datos
Step7: Vamos a dar unidades a cada columna
Step8: Actividad
Step9: Creamos una funcion, interpolando valores intermedios
Step10: Conseguimos los valores x, y
Step11: Tiempo de Graficar
Step12: Ejemplo
Step13: Constante Solar
Step14: 3 - Fotones arriba del bandgap
Step15: Ejemplo
Step16: En funcion de la energia
Step17: 4 Recombinacion
Step18: Recombination rate when electron QFL and hole QFL are split
Step19: Examplo
Step23: Bandgap Ideal y maxima efficiencia
Step24: Example
Step25: Actividad
Step26: Actividad
Step27: Cuanto suma todo?
|
2,656
|
<ASSISTANT_TASK:>
Python Code:
str_massaction =
A -> B; 'k1'
B + C -> A + C; 'k2'
2 B -> B + C; 'k3'
rsys3 = ReactionSystem.from_string(str_massaction, substance_factory=lambda formula: Substance(formula))
rsys3.substance_names()
odesys3, extra3 = get_odesys(rsys3, include_params=False, lower_bounds=[0, 0, 0])
extra3['param_keys'], extra3['unique']
odesys3.exprs, odesys3.params, odesys3.names, odesys3.param_names
def integrate_and_plot(A0=1.0, B0=0.0, C0=0.0, lg_k1=-2, lg_k2=4, lg_k3=7, lg_tend=9):
plt.figure(figsize=(14, 4))
tout, yout, info = odesys3.integrate(
10**lg_tend, {'A': A0, 'B': B0, 'C': C0},
{'k1': 10**lg_k1, 'k2': 10**lg_k2, 'k3': 10**lg_k3},
integrator='cvode', nsteps=3000)
plt.subplot(1, 2, 1)
odesys3.plot_result(xscale='log', yscale='log')
plt.legend(loc='best')
plt.subplot(1, 2, 2)
plt.plot(tout[tout<.05], yout[tout<.05, odesys3.names.index('B')])
_ = plt.legend('best')
interact(integrate_and_plot) #, **kw)
# We could also have used SymPy to construct symbolic rates:
import sympy
rsys_sym = ReactionSystem.from_string(
A -> B; sp.Symbol('k1')
B + C -> A + C; sp.Symbol('k2')
2 B -> B + C; sp.Symbol('k3')
, rxn_parse_kwargs=dict(globals_={'sp': sympy}), substance_factory=lambda formula: Substance(formula))
odesys_sym, _ = get_odesys(rsys_sym, params=True)
for attr in 'exprs params names param_names'.split():
print(getattr(odesys_sym, attr))
rsys.substances['D'] = D
uni, not_uni = UnimolecularTable.from_ReactionSystem(rsys)
uni
bi, not_bi = BimolecularTable.from_ReactionSystem(rsys)
bi
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: We could also have parsed the reactions from a string
Step3: For larger systems it is easy to loose track of what substances are actually playing a part, here the html tables can help (note the yellow background color)
|
2,657
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
from em_utilities import *
wiki = graphlab.SFrame('people_wiki.gl/').head(5000)
wiki['tf_idf'] = graphlab.text_analytics.tf_idf(wiki['text'])
tf_idf, map_index_to_word = sframe_to_scipy(wiki, 'tf_idf')
tf_idf = normalize(tf_idf)
for i in range(5):
doc = tf_idf[i]
print(np.linalg.norm(doc.todense()))
from sklearn.cluster import KMeans
np.random.seed(5)
num_clusters = 25
# Use scikit-learn's k-means to simplify workflow
kmeans_model = KMeans(n_clusters=num_clusters, n_init=5, max_iter=400, random_state=1, n_jobs=-1)
kmeans_model.fit(tf_idf)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
means = [centroid for centroid in centroids]
num_docs = tf_idf.shape[0]
weights = []
for i in xrange(num_clusters):
# Compute the number of data points assigned to cluster i:
num_assigned = (cluster_assignment == i).sum() # YOUR CODE HERE
w = float(num_assigned) / num_docs
weights.append(w)
covs = []
for i in xrange(num_clusters):
member_rows = tf_idf[cluster_assignment==i]
cov = (member_rows.power(2) - 2*member_rows.dot(diag(means[i]))).sum(axis=0).A1 / member_rows.shape[0] \
+ means[i]**2
cov[cov < 1e-8] = 1e-8
covs.append(cov)
out = EM_for_high_dimension(tf_idf, means, covs, weights, cov_smoothing=1e-10)
out['loglik']
# Fill in the blanks
def visualize_EM_clusters(tf_idf, means, covs, map_index_to_word):
print('')
print('==========================================================')
num_clusters = len(means)
for c in xrange(num_clusters):
print('Cluster {0:d}: Largest mean parameters in cluster '.format(c))
print('\n{0: <12}{1: <12}{2: <12}'.format('Word', 'Mean', 'Variance'))
# The k'th element of sorted_word_ids should be the index of the word
# that has the k'th-largest value in the cluster mean. Hint: Use np.argsort().
sorted_word_ids = np.argsort(means[c])[::-1] # YOUR CODE HERE
for i in sorted_word_ids[:5]:
print '{0: <12}{1:<10.2e}{2:10.2e}'.format(map_index_to_word['category'][i],
means[c][i],
covs[c][i])
print '\n=========================================================='
'''By EM'''
visualize_EM_clusters(tf_idf, out['means'], out['covs'], map_index_to_word)
np.random.seed(5)
num_clusters = len(means)
num_docs, num_words = tf_idf.shape
random_means = []
random_covs = []
random_weights = []
for k in range(num_clusters):
# Create a numpy array of length num_words with random normally distributed values.
# Use the standard univariate normal distribution (mean 0, variance 1).
# YOUR CODE HERE
mean = np.random.normal(0, 1, num_words)
# Create a numpy array of length num_words with random values uniformly distributed between 1 and 5.
# YOUR CODE HERE
cov = np.random.uniform(1, 5, num_words)
# Initially give each cluster equal weight.
# YOUR CODE HERE
weight = 1. / num_clusters
random_means.append(mean)
random_covs.append(cov)
random_weights.append(weight)
out_random_init = EM_for_high_dimension(tf_idf, random_means, random_covs, random_weights, cov_smoothing=1e-5)
out_random_init['loglik'][-1]
out['loglik'][-1]
# YOUR CODE HERE. Use visualize_EM_clusters, which will require you to pass in tf_idf and map_index_to_word.
visualize_EM_clusters(tf_idf, out_random_init['means'], out_random_init['covs'], map_index_to_word)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We also have a Python file containing implementations for several functions that will be used during the course of this assignment.
Step2: Load Wikipedia data and extract TF-IDF features
Step3: Using a utility we provide, we will create a sparse matrix representation of the documents. This is the same utility function you used during the previous assignment on k-means with text data.
Step4: As in the previous assignment, we will normalize each document's TF-IDF vector to be a unit vector.
Step5: We can check that the length (Euclidean norm) of each row is now 1.0, as expected.
Step6: EM in high dimensions
Step7: Initializing cluster weights
Step8: Initializing covariances
Step9: Running EM
Step10: Interpret clustering results
Step11: Quiz Question. Select all the topics that have a cluster in the model created above. [multiple choice]
Step12: Quiz Question
Step13: Quiz Question
Step14: Quiz Question
|
2,658
|
<ASSISTANT_TASK:>
Python Code:
data_dir = os.path.join(os.environ['DATA_DIR'], 'uci')
exp_dir = os.path.join(os.environ['EXP_DIR'], 'apm_mcmc')
data_set = 'pima'
method = 'pmmh'
n_chain = 10
chain_offset = 0
seeds = np.random.random_integers(10000, size=n_chain)
n_imp_sample = 1
adapt_run = dict(
low_acc_thr = 0.15,
upp_acc_thr = 0.30,
batch_size = 100,
n_batch = 20
)
init_log_sigma_prop_scale = 0.5
init_log_tau_prop_scale = 0.5
n_sample_main = 10000
epsilon = 1e-8
X = np.genfromtxt(os.path.join(data_dir, data_set + '_X.txt'))
y = np.genfromtxt(os.path.join(data_dir, data_set + '_y.txt'))
X, X_mn, X_sd = utils.normalise_inputs(X)
prior = dict(
a_tau = 1.,
b_tau = 1. / X.shape[1]**0.5,
a_sigma = 1.1,
b_sigma = 0.1
)
run_params = dict(
data_set = data_set,
n_data = X.shape[0],
n_feature = X.shape[1],
method = method,
n_imp_sample = n_imp_sample,
epsilon = epsilon,
prior = prior,
adapt_run = adapt_run,
init_log_sigma_prop_scale = init_log_sigma_prop_scale,
init_log_tau_prop_scale = init_log_tau_prop_scale,
n_sample_main = n_sample_main
)
prng = np.random.RandomState()
kernel_func = lambda K, X, theta: (
krn.isotropic_squared_exponential_kernel(K, X, theta, epsilon)
)
det_ml_estimator = est.LogMarginalLikelihoodLaplaceEstimator(X, y, kernel_func)
imp_ml_estimator = est.LogMarginalLikelihoodApproxPosteriorISEstimator(
X, y, kernel_func, lpa.laplace_approximation)
log_f_estimator_adapt = lambda theta: (
det_ml_estimator(theta) +
utils.log_gamma_log_pdf(theta[0], prior['a_sigma'], prior['b_sigma']) +
utils.log_gamma_log_pdf(theta[1], prior['a_tau'], prior['b_tau'])
)
log_f_estimator_main = lambda theta: (
imp_ml_estimator(prng.normal(size=(y.shape[0], n_imp_sample)), theta)[0] +
utils.log_gamma_log_pdf(theta[0], prior['a_sigma'], prior['b_sigma']) +
utils.log_gamma_log_pdf(theta[1], prior['a_tau'], prior['b_tau'])
)
prop_sampler = lambda theta, prop_scales: np.r_[
theta[0] + prop_scales[0] * prng.normal(),
theta[1] + prop_scales[1] * prng.normal()
]
log_prop_density = lambda theta_prop, theta_curr, prop_scales: (
-0.5 * (
((theta_prop[0] - theta_curr[0]) / prop_scales[0])**2 +
((theta_prop[1] - theta_curr[1]) / prop_scales[1])**2
)
)
init_prop_scales = np.array([
init_log_sigma_prop_scale,
init_log_tau_prop_scale
])
sampler = smp.PMMHSampler(
log_f_estimator_adapt, log_prop_density, prop_sampler, init_prop_scales, prng)
for c in range(n_chain):
try:
print('Starting chain {0}...'.format(c + 1))
prng.seed(seeds[c])
theta_init = np.array([
np.log(prng.gamma(prior['a_sigma'], 1. / prior['b_sigma'])),
np.log(prng.gamma(prior['a_tau'], 1. / prior['b_tau'])),
])
sampler.prop_scales = init_prop_scales
print('Starting initial adaptive run...')
adapt_thetas, adapt_prop_scales, adapt_accept_rates = (
sampler.adaptive_run(
theta_init, adapt_run['batch_size'],
adapt_run['n_batch'], adapt_run['low_acc_thr'],
adapt_run['upp_acc_thr'], utils.adapt_factor_func, True
)
)
print('Final proposal scales: {0}'.format(adapt_prop_scales[-1]))
print('Starting main run...')
sampler.log_f_estimator = log_f_estimator_main
imp_ml_estimator.reset_cubic_op_count()
start_time = time.clock()
thetas, n_reject = sampler.get_samples(adapt_thetas[-1], n_sample_main)
comp_time = time.clock() - start_time
n_cubic_ops = imp_ml_estimator.n_cubic_ops
tag = '{0}_{1}_chain_{2}'.format(data_set, method, c + 1 + chain_offset)
print('Main run completed: accept rate {0:.1f}%, time {1}s, # cubic ops {2}'
.format((1. - n_reject * 1./ n_sample_main) * 100., comp_time, n_cubic_ops))
utils.save_adaptive_run(exp_dir, tag, adapt_thetas, adapt_prop_scales,
adapt_accept_rates, thetas, n_reject,
n_cubic_ops, comp_time, run_params)
utils.plot_trace(thetas)
plt.show()
except Exception as e:
print('Exception encountered')
print(e.message)
print(traceback.format_exc())
print('Skipping to next chain')
continue
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify main run parameters
Step2: Load data and normalise inputs
Step3: Specify prior parameters (data dependent so do after data load)
Step4: Assemble run parameters into dictionary for recording with results
Step5: Create necessary run objects
Step6: Run chains, starting from random sample from prior in each and saving results to experiments directory
|
2,659
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
%matplotlib inline
df_train = pd.read_csv('../input/train.csv')
df_test = pd.read_csv('../input/test.csv')
df_train.head()
df_train.describe()
df_train.info()
df_train.isnull().sum()
df_train.describe(include=['O'])
df_test.head()
df_test.describe()
df_test.describe(include=['O'])
sns.countplot(x='Survived', data=df_train)
plt.show()
df_train['Percentage'] = 1 # this is a helper colume
df_train[['Percentage','Survived']].groupby('Survived').count().apply(lambda x: (100 * x)/x.sum())
df_train[['Pclass','Survived']].groupby('Pclass').mean()
df_train['Count'] = 1 # this is a helper colume
df_train[['Pclass','Survived','Count']].groupby(['Pclass','Survived']).count()
df_train[['Sex','Survived']].groupby('Sex').mean()
df_train[['Sex','Survived','Count']].groupby(['Sex','Survived']).count()
df_train[['Pclass','Sex','Survived','Count']].groupby(['Pclass','Sex','Survived']).count()
df_train[['Pclass','Sex','Survived']].groupby(['Pclass','Sex']).mean()
sns.boxplot(x='Survived', y='Age', hue='Sex',data=df_train, palette="coolwarm")
plt.show()
def SimplyAge(colage):
colage = colage.fillna(-1)
bins = (-2,0,5,10,20,35,60,100)
colage = pd.cut(colage,bins)
return colage
colage = SimplyAge(df_train['Age'])
# for test
df_train['Age'] = colage
dfage = df_train
#dfage = pd.DataFrame()
#dfage['Age'] = colage
#dfage['Survived'] = df_train['Survived']
df_train[['Age','Survived','Count']].groupby(['Age','Survived']).count()
df_train[['Age','Survived']].groupby('Age').mean()
df_train[['Age','Survived','Sex','Count']].groupby(['Age','Sex','Survived']).count()
df_train[['Age','Sex','Survived']].groupby(['Age','Sex']).mean()
#sns.countplot(x='Age',hue='Survived',data=dfage)
sns.countplot(x='Age',data=dfage,color='Red')
sns.barplot(x='Age',y='Survived',data=dfage,estimator=np.sum,color='Blue')
sns.barplot(x='Age',y='Count',hue='Survived',data=dfage,estimator=np.sum)
sns.barplot(x='Pclass', y='Survived', hue='Sex',data=df_train,estimator=np.sum)
plt.show()
del df_train['Cabin']
del df_train['Embarked']
del df_train['Ticket']
del df_train['Name']
del df_train['PassengerId']
# remvoe the helper columns
del df_train['Count']
del df_train['Percentage']
df_train.columns
df_train['Sex'] = df_train['Sex'].map({'female':1,'male':0}).astype(int)
df_train['Age'] = df_train['Age'].map({'(-2, 0]':-1,'(0, 5]':1,'(5, 10]':2,'(10, 20]':3,'(20, 35]':4,'(35, 60]':5,'(60, 100]':6}).astype(int)
df_train.head()
x_train = df_train.drop("Survived",axis=1)
y_train = df_train['Survived']
logreg = LogisticRegression()
logreg.fit(x_train,y_train)
logreg.score(x_train, y_train)
del df_test['Cabin']
del df_test['Embarked']
del df_test['Ticket']
del df_test['Name']
test_id = df_test['PassengerId']
del df_test['PassengerId']
# Sex
df_test['Sex'] = df_test['Sex'].map({'female':1,'male':0}).astype(int)
# Age
colage = SimplyAge(df_test['Age'])
df_test['Age'] = colage
df_test['Age'] = df_test['Age'].map({'(-2, 0]':-1,'(0, 5]':1,'(5, 10]':2,'(10, 20]':3,'(20, 35]':4,'(35, 60]':5,'(60, 100]':6}).astype(int)
df_test.head()
df_test.info()
df_test[df_test['Fare'].isnull()]
df_test[df_test['Pclass']==3]['Fare'].mean()
df_test['Fare'] = df_test['Fare'].fillna(df_test[df_test['Pclass']==3]['Fare'].mean())
Y_pred = logreg.predict(df_test)
ypred = pd.DataFrame()
ypred['PassengerId'] = test_id
ypred['Survived'] = Y_pred
ypred
ypred.to_csv("1st_pred.csv",index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load training data and test data.
Step2: Well, I don't really have any idea how to handle these data. So let's just take a look at them. Let's start from the trainning data.
Step3: Hmm... There are some data missing. Age could be an important feature. Cabin seems like a useless feature and I am going to discard it. Well, my 1st question, how do you decide which feature to be used and which not?
Step4: Hmm... Seems some people share one cabin. Is it the case that people in one cabin help each other and increase the survive chance? But the cabin has too less data. Also, the ticket number is shared by upto 7 people, which means they are a group? And they will more likely help each other and increase the survive chance?
Step5: Relationship between Features and Survival
Step6: Pclass vs. Survived
Step7: Apparantly, the smaller Pclass, the higher survive rate. Richer is better, right?
Step8: Female has way higher survive rate than male. Sex is another strong corelate feature to the survival.
Step9: The female survive rate in Pclass 1 and 2 are similar, but Pclass 3 is way lower. Well, the story is the gate from Pclass 3 to the deck was locked at the very beginning. That's sad...
Step10: Well, the babys look have highest survive rate.
Step11: I am going to remove other columns which have NA, just for a quick dirty test.
Step12: Now I am going to predict in the test data.
Step13: Well, the NaN in Fare is a Pclass 3, so i am going fill it with the mean of Fare of Pclass 3.
|
2,660
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
url = 'http://aima.cs.berkeley.edu/data/iris.csv'
df = pd.read_csv(url,delimiter=',')
df.head(2)
df.describe()
from numpy import genfromtxt, zeros
data = genfromtxt(url,delimiter=',',usecols=(0,1,2,3))
target = genfromtxt(url,delimiter=',',usecols=(4),dtype=str)
print(data.shape)
print(target.shape)
print(set(target))
from pylab import plot, show
plot(data[target=='setosa',0],data[target=='setosa',2],'bo')
plot(data[target=='versicolor',0],data[target=='versicolor',2],'ro')
plot(data[target=='virginica',0],data[target=='virginica',2],'go')
show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 创建一个变量url,指向一个csv文件。然后通过read_csv()函数来加载它。
Step2: 变量df包含了一个DataFrame对象,一种二维表的pandas数据结构。 接下来就调用head(n)方法来显示前n列的数据吧。notebook会将其显示为一个HTML的表,如下所示:
|
2,661
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
from mpltools import style
import numpy as np
style.use('ggplot')
%matplotlib inline
import pandas as pd
import shelve
from collections import defaultdict
count_dict = {}
for line in open('../mapreduce/predicted_label_counts.txt'):
uri, label, values = line.split('\t')
upper_count, lower_count = values.split(',')
count_dict[(uri, label)] = {'infer_normal': int(upper_count), 'infer_lower': int(lower_count), 'len': len(label.split('_')),
'label': label, 'organ_normal': 0, 'organ_lower': 0, 'uri': uri}
for line in open('../mapreduce/organic_label_counts.txt'):
uri, label, values = line.split('\t')
if (uri, label) in count_dict:
upper_count, lower_count = values.split(',')
count_dict[(uri, label)].update({'organ_normal': int(upper_count), 'organ_lower': int(lower_count)})
counts_df = pd.DataFrame(count_dict.values())
del count_dict
counts_df.head()
from __future__ import division
We never exclude uppercase labels since we don't match at the beginning of a sentence
includes = open('../mapreduce/unambiguous_labels.txt', 'w')
for row in counts_df.iterrows():
row = row[1]
exclude = False
label = row['label']
uri = row['uri']
# skip uppercase
if label.isupper():
includes.write(label+'\t'+uri+'\n')
continue
# if label appears only in lowercase - add to lower includes
if row['organ_normal'] == 0: # means label is lowercase
if row['organ_lower'] > 1:
includes.write(label+'\t'+uri+'\n')
continue
else:
infer_ratio = row['infer_normal']/(row['infer_lower'] or 1)
orig_ratio = row['organ_normal']/(row['organ_lower'] or 1)
if infer_ratio == 0:
# weird label, p. ex. 中华人民共和国
continue
# always write a normal-case label
includes.write(label+'\t'+uri+'\n')
if orig_ratio/infer_ratio < 2 and row['infer_lower'] > 0:
includes.write(label.lower()+'\t'+uri+'\n')
includes.close()
counts_df[(counts_df.uri == 'Cicada')]
counts_df[(counts_df.organ_normal > 0) & (counts_df.infer_lower > 0) & (counts_df.infer_normal == 0)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Construct original counts file
Step3: Generate excludes by ambiguity
Step4: Generate typed n-grams
|
2,662
|
<ASSISTANT_TASK:>
Python Code:
# Author: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: BSD-3-Clause
import numpy as np
import mne
from mne.datasets import sample
from mne.inverse_sparse import gamma_map, make_stc_from_dipoles
from mne.viz import (plot_sparse_source_estimates,
plot_dipole_locations, plot_dipole_amplitudes)
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path / 'subjects'
meg_path = data_path / 'MEG' / 'sample'
fwd_fname = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif'
evoked_fname = meg_path / 'sample_audvis-ave.fif'
cov_fname = meg_path / 'sample_audvis-cov.fif'
# Read the evoked response and crop it
condition = 'Left visual'
evoked = mne.read_evokeds(evoked_fname, condition=condition,
baseline=(None, 0))
evoked.crop(tmin=-50e-3, tmax=300e-3)
# Read the forward solution
forward = mne.read_forward_solution(fwd_fname)
# Read noise noise covariance matrix and regularize it
cov = mne.read_cov(cov_fname)
cov = mne.cov.regularize(cov, evoked.info, rank=None)
# Run the Gamma-MAP method with dipole output
alpha = 0.5
dipoles, residual = gamma_map(
evoked, forward, cov, alpha, xyz_same_gamma=True, return_residual=True,
return_as_dipoles=True)
plot_dipole_amplitudes(dipoles)
# Plot dipole location of the strongest dipole with MRI slices
idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles])
plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample',
subjects_dir=subjects_dir, mode='orthoview',
idx='amplitude')
# # Plot dipole locations of all dipoles with MRI slices
# for dip in dipoles:
# plot_dipole_locations(dip, forward['mri_head_t'], 'sample',
# subjects_dir=subjects_dir, mode='orthoview',
# idx='amplitude')
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response Gradiometers'), ylim=ylim,
proj=True, time_unit='s')
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals Gradiometers'), ylim=ylim,
proj=True, time_unit='s')
stc = make_stc_from_dipoles(dipoles, forward['src'])
scale_factors = np.max(np.abs(stc.data), axis=1)
scale_factors = 0.5 * (1 + scale_factors / np.max(scale_factors))
plot_sparse_source_estimates(
forward['src'], stc, bgcolor=(1, 1, 1),
modes=['sphere'], opacity=0.1, scale_factors=(scale_factors, None),
fig_name="Gamma-MAP")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plot dipole activations
Step2: Show the evoked response and the residual for gradiometers
Step3: Generate stc from dipoles
Step4: View in 2D and 3D ("glass" brain like 3D plot)
|
2,663
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import exists
filename = 'modsim.py'
if not exists(filename):
from urllib.request import urlretrieve
url = 'https://raw.githubusercontent.com/AllenDowney/ModSim/main/'
local, _ = urlretrieve(url+filename, filename)
print('Downloaded ' + local)
# import functions from modsim
from modsim import *
params = Params(
R1 = 1e6, # * ohm
C1 = 1e-9, # * farad
A = 5, # * volt
f = 1000, # * Hz
)
params
from numpy import pi
def make_system(params):
Makes a System object for the given conditions.
params: Params object
returns: System object
f, R1, C1 = params.f, params.R1, params.C1
init = State(V_out = 0)
omega = 2 * pi * f
tau = R1 * C1
cutoff = 1 / R1 / C1 / 2 / pi
t_end = 4 / f
return System(params,
init=init,
t_end=t_end, num=401,
omega=omega, tau=tau,
cutoff=cutoff)
system = make_system(params)
system
# Solution
def slope_func(t, state, system):
Compute derivatives of the state.
t: time
state: V_out
system: System object with A, omega, R1 and C1
returns: dV_out/dt
V_out = state
R1, C1 = system.R1, system.C1
A, omega = system.A, system.omega
V_in = A * np.cos(omega * t)
V_R1 = V_in - V_out
I_R1 = V_R1 / R1
I_C1 = I_R1
dV_out_dt = I_C1 / C1
return dV_out_dt
slope_func(0, system.init, system)
results, details = run_solve_ivp(system, slope_func)
details.message
results.tail()
def plot_results(results):
V_out = results.V_out.copy()
t_end = results.index[-1]
if t_end < 0.1:
V_out.index *= 1000
xlabel = 'Time (ms)'
else:
V_out = results.V_out
xlabel = 'Time (s)'
V_out.plot(label='_nolegend')
decorate(xlabel=xlabel,
ylabel='$V_{out}$ (volt)')
plot_results(results)
from matplotlib.pyplot import subplot
fs = [1, 10, 100, 1000, 10000, 100000]
for i, f in enumerate(fs):
system = make_system(params.set(f=f))
results, details = run_solve_ivp(system, slope_func)
subplot(3, 2, i+1)
plot_results(results)
system = make_system(params.set(f=1000))
results, details = run_solve_ivp(system, slope_func)
V_out = results.V_out
plot_results(results)
def compute_vin(results, system):
Computes V_in as a TimeSeries.
results: TimeFrame with simulation results
system: System object with A and omega
returns: TimeSeries
A, omega = system.A, system.omega
ts = results.index
V_in = A * np.cos(omega * ts)
return TimeSeries(V_in, results.index, name='V_in')
V_in = compute_vin(results, system)
V_out.plot()
V_in.plot()
decorate(xlabel='Time (s)',
ylabel='V (volt)')
def estimate_A(series):
Estimate amplitude.
series: TimeSeries
returns: amplitude in volts
return (series.max() - series.min()) / 2
A_in = estimate_A(V_in)
A_in
A_out = estimate_A(V_out)
A_out
ratio = A_out / A_in
ratio
# Solution
def estimate_ratio(V1, V2):
Estimate the ratio of amplitudes.
V1: TimeSeries
V2: TimeSeries
returns: amplitude ratio
a1 = estimate_A(V1)
a2 = estimate_A(V2)
return a1 / a2
estimate_ratio(V_out, V_in)
corr = np.correlate(V_out, V_in, mode='same')
corr_series = make_series(V_in.index, corr)
corr_series.plot(color='C4')
decorate(xlabel='Lag (s)',
ylabel='Correlation')
peak_time = corr_series.idxmax()
peak_time
period = 1 / system.f
period
peak_time / period
frac, whole = np.modf(peak_time / period)
frac
frac * 360
# Solution
def estimate_offset(V1, V2, system):
Estimate phase offset.
V1: TimeSeries
V2: TimeSeries
system: System object with f
returns: amplitude ratio
corr = np.correlate(V1, V2, mode='same')
corr_series = make_series(V1.index, corr)
peak_time = corr_series.idxmax()
period = 1 / system.f
frac, whole = np.modf(peak_time / period)
return -frac * 360
estimate_offset(V_out, V_in, system)
# Solution
def sweep_frequency(fs, params):
ratios = SweepSeries()
offsets = SweepSeries()
for i, f in enumerate(fs):
system = make_system(params.set(f=f))
results, details = run_solve_ivp(system, slope_func)
V_out = results.V_out
V_in = compute_vin(results, system)
f = magnitude(f)
ratios[f] = estimate_ratio(V_out, V_in)
offsets[f] = estimate_offset(V_out, V_in, system)
return ratios, offsets
fs = 10 ** linspace(0, 4, 9)
ratios, offsets = sweep_frequency(fs, params)
ratios.plot(color='C2', label='output ratio')
decorate(xlabel='Frequency (Hz)',
ylabel='$V_{out} / V_{in}$')
def plot_ratios(ratios, system):
Plot output ratios.
# axvline can't handle a Quantity with units
cutoff = magnitude(system.cutoff)
plt.axvline(cutoff, color='gray', alpha=0.4)
ratios.plot(color='C2', label='output ratio')
decorate(xlabel='Frequency (Hz)',
ylabel='$V_{out} / V_{in}$',
xscale='log', yscale='log')
plot_ratios(ratios, system)
def plot_offsets(offsets, system):
Plot phase offsets.
# axvline can't handle a Quantity with units
cutoff = magnitude(system.cutoff)
plt.axvline(cutoff, color='gray', alpha=0.4)
offsets.plot(color='C9', label='phase offset')
decorate(xlabel='Frequency (Hz)',
ylabel='Phase offset (degree)',
xscale='log')
plot_offsets(offsets, system)
# Solution
fs = 10 ** linspace(0, 4, 9)
def output_ratios(fs, system):
R1, C1, omega = system.R1, system.C1, system.omega
omegas = 2 * pi * fs
rco = R1 * C1 * omegas
A = 1 / np.sqrt(1 + rco**2)
return make_series(fs, A)
A = output_ratios(fs, system)
# Solution
def phase_offsets(fs, system):
R1, C1, omega = system.R1, system.C1, system.omega
omegas = 2 * pi * fs
rco = R1 * C1 * omegas
phi = np.rad2deg(np.arctan(-rco))
return make_series(fs, phi)
phi = phase_offsets(fs, system)
A.plot(style=':', color='gray', label='analysis')
plot_ratios(ratios, system)
phi.plot(style=':', color='gray', label='analysis')
plot_offsets(offsets, system)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following circuit diagram (from Wikipedia) shows a low-pass filter built with one resistor and one capacitor.
Step3: Now we can pass the Params object make_system which computes some additional parameters and defines init.
Step4: Let's make a System
Step6: The system variable num controls how many time steps we get from run_solve_ivp. The default is 101; in this case we increase it to 401 because the methods we'll use to analyze the results require high resolution in time.
Step7: Test the slope function with the initial conditions.
Step8: And then run the simulation. I suggest using t_eval=ts to make sure we have enough data points to plot and analyze the results.
Step9: Here's a function you can use to plot V_out as a function of time.
Step10: If things have gone according to plan, the amplitude of the output signal should be about 0.8 V.
Step11: At low frequencies, notice that there is an initial "transient" before the output gets to a steady-state sinusoidal output. The duration of this transient is a small multiple of the time constant, tau, which is 1 ms.
Step13: The following function computes V_in as a TimeSeries
Step14: Here's what the input and output look like. Notice that the output is not just smaller; it is also "out of phase"; that is, the peaks of the output are shifted to the right, relative to the peaks of the input.
Step16: The following function estimates the amplitude of a signal by computing half the distance between the min and max.
Step17: The amplitude of V_in should be near 5 (but not exact because we evaluated it at a finite number of points).
Step18: The amplitude of V_out should be lower.
Step19: And here's the ratio between them.
Step21: Exercise
Step22: And test your function.
Step23: Estimating phase offset
Step24: The location of the peak in the cross correlation is the estimated shift between the two signals, in seconds.
Step25: We can express the phase offset as a multiple of the period of the input signal
Step26: We don't care about whole period offsets, only the fractional part, which we can get using modf
Step27: Finally, we can convert from a fraction of a cycle to degrees
Step29: Exercise
Step30: Test your function.
Step31: Sweeping frequency again
Step32: Run your function with these frequencies.
Step33: We can plot output ratios like this
Step35: But it is useful and conventional to plot ratios on a log-log scale. The vertical gray line shows the cutoff frequency.
Step37: This plot shows the cutoff behavior more clearly. Below the cutoff, the output ratio is close to 1. Above the cutoff, it drops off linearly, on a log scale, which indicates that output ratios for high frequencies are practically 0.
Step38: For low frequencies, the phase offset is near 0. For high frequencies, it approaches 90 degrees.
Step39: Test your function
Step40: Test your function
Step41: Plot the theoretical results along with the simulation results and see if they agree.
|
2,664
|
<ASSISTANT_TASK:>
Python Code:
# Standard library
import datetime
import time
# Third party libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Digitre code
import digitre_preprocessing as prep
import digitre_model
import digitre_classifier
# Reload digitre code in the same session (during development)
import imp
imp.reload(prep)
imp.reload(digitre_model)
imp.reload(digitre_classifier)
# Latest update
str(datetime.datetime.now())
# Data loading and preprocessing
X, Y, testX, testY = digitre_model.load_data()
#X = X.reshape([-1, 28, 28, 1])
#testX = testX.reshape([-1, 28, 28, 1])
# Plot functions
def plot_digit(digit, show=True, file_name=None):
plt.imshow(digit, cmap = 'Greys', interpolation = 'none')
plt.tick_params(axis='both', which='both', bottom='off', top='off',
labelbottom='off', right='off', left='off', labelleft='off')
if file_name is not None:
plt.savefig(file_name)
if show:
plt.show()
def plot_digits(digits, rows, columns):
for i, digit in enumerate(digits):
plt.subplot(rows, columns, i+1)
plot_digit(digit, show=False)
plt.show()
# Plot a few training examples
X_eg = X[10:20,:,:,:]
X_eg = [digit.reshape(28, 28) for digit in X_eg]
plot_digits(X_eg, 2, 5)
# Visualization
# Used "tensorboard_verbose=0", meaning Loss & Metric
# Run "$ tensorboard --logdir='/tmp/tflearn_logs'"
### Fit model using all data (merge training and test data)
# Done from command line:
# $ python digitre_model.py -f 'cnn_alldata.tflearn' -a -e 20
# Training Step: 20320 | total loss: 0.642990.9401 | val_loss: 0.052
# | Adam | epoch: 020 | loss: 0.64299 - acc: 0.9401 | val_loss: 0.05263 - val_acc: 0.9866 -- iter: 65000/65000
# --
# -----
# Completed training in
# 3.5 hr.
# -----
# ... Saving trained model as " cnn_alldata.tflearn "
with open('b64_2_preprocessing.txt', 'r') as f:
eg_2 = f.read()
# Preview base64 encoded image
print(eg_2[:500])
eg_2 = prep.b64_str_to_np(eg_2)
eg_2.shape
# Plot the example handwritten digit
plot_digit(eg_2, file_name='b64_2_preprocessing_1.png')
eg_2 = prep.crop_img(eg_2)
plot_digit(eg_2, file_name='b64_2_preprocessing_2.png')
eg_2 = prep.center_img(eg_2)
plot_digit(eg_2, file_name='b64_2_preprocessing_3.png')
eg_2 = prep.resize_img(eg_2)
eg_2.shape
plot_digit(eg_2, file_name='b64_2_preprocessing_4.png')
eg_2 = prep.min_max_scaler(eg_2, final_range=(0, 1))
plot_digit(eg_2)
# Plot processed Digitre image together with MNIST example
plot_digits([eg_2, X_eg[6]], 1, 2)
# Save MNIST example too
plot_digit(X_eg[6], file_name='MNIST_2.png')
eg_2.max()
eg_2.shape
# Instantiate Classifier (loads the tflearn pre-trained model)
model = digitre_classifier.Classifier(file_name='cnn.tflearn')
# Classify same example digit
with open('b64_2_preprocessing.txt', 'r') as f:
eg_2 = f.read()
eg_2 = model.preprocess(eg_2)
pred = np.around(model.classify(eg_2)[0], 2)
pred
from altair import Chart, Data, X, Y, Axis, Scale
# Plot prediction
def prob_distribution_plot(pred):
prediction = pred.reshape([10])
data = Data(values=[{'x': i, 'y': value} for i, value in enumerate(pred)])
plot = Chart(data).mark_bar(color='#f6755e').encode(
x=X('x:O', axis=Axis(title='Digit', labelAngle=0.5,
tickLabelFontSize=15, titleFontSize=15)),
y=Y('y:Q', axis=Axis(format='%', title='Probability',
tickLabelFontSize=15, titleFontSize=15),
scale=Scale(domain=(0, 1))))
return plot
prob_distribution_plot(pred)
from altair import Chart, Data, X, Y, Axis
# Plot prediction
def prob_distribution_plot(pred):
prediction = pred.reshape([10])
data = Data(values=[{'x': i, 'y': value} for i, value in enumerate(prediction)])
plot = Chart(data).mark_bar(color='#f6755e').encode(
x=X('x:O', axis=Axis(title='Digit', labelAngle=0.5, tickLabelFontSize=15, titleFontSize=15)),
y=Y('y:Q', axis=Axis(format='%', title='Probability', tickLabelFontSize=15, titleFontSize=15)))
return plot.to_json(indent=2)
prob_distribution_plot(pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id="TF"></a>
Step2: <a id="Digitre"></a>
Step3: <a id="Class"></a>
|
2,665
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import math
from scipy.optimize import fmin_l_bfgs_b
from sklearn.metrics import accuracy_score
import pickle
# Load data
with open('./data/pickled/xtrain.pickle', 'rb') as f:
xtrain = pickle.load(f)
with open('./data/pickled/ytrain.pickle', 'rb') as f:
ytrain = pickle.load(f)
with open('./data/pickled/xtest.pickle', 'rb') as f:
xtest = pickle.load(f)
with open('./data/pickled/ytest.pickle', 'rb') as f:
ytest = pickle.load(f)
with open('./data/pickled/xval.pickle', 'rb') as f:
xval = pickle.load(f)
with open('./data/pickled/yval.pickle', 'rb') as f:
yval = pickle.load(f)
# Sigmoid function
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
def nn(weights,x,y):
n = x.shape[0]
bias = np.ones((n,1))
# Unroll weights from a single vector
w2 = weights[:(m+1)*hidden].reshape((hidden,(m+1))) # [Hiddden X m + 1]
w3 = weights[(m+1)*hidden:].reshape((classes,(hidden+1))) # [classes X Hidden + 1]
### Forward propagation ###
a1 = np.concatenate((bias,x),axis=1) # [nXm+1]
a2 = sigmoid(np.dot(a1,w2.T)) # [nXm+1] . [m+1XHidden] = [nXHidden]
a2 = np.concatenate((bias,a2),axis=1) # [nXHidden+1]
a3 = sigmoid(np.dot(a2,w3.T)) # [nXHidden+1] . [Hidden+1Xclasses] = [nXclasses]
# Cost function: regularized cross entropy
C = np.sum(np.nan_to_num(-y*np.log(a3) - (1-y)*(np.log(1-a3))))/n
C += ((Lambda/(2*n)) * (np.sum(w2[:,1:]**2) + np.sum(w3[:,1:]**2))) # Add regularization to the cost function
### Backpropagation ###
d3 = (a3 - y) # [nXclasses]
d2 = np.dot(d3,w3) * (a2*(1-a2)) # [nXclasses] . [classesXHidden+1] = [nXHidden+1]
# We don't need to do this for the input layer, obviously.
# There were no calculations that produced the input layer, that is raw data
# Find the gradients for the weights
grad2 = np.dot(d2[:,1:].T,a1)/n # [HiddenXn] . [nXm+1] = [HiddenXm+1]
grad3 = np.dot(d3.T,a2)/n # [classesXn] . [nXHidden+1] = [classesXHidden+1]
# Regularize the weights only
if Lambda !=0:
grad2[:,1:] -= (Lambda/n * w2[:,1:])
grad3[:,1:] -= (Lambda/n * w3[:,1:])
# Roll weights and biases back into a single vector
grads = np.concatenate((grad2.reshape(((m+1)*hidden)),grad3.reshape(((hidden+1)*classes))))
return C, grads
# Predict function
def predict(weights,x):
# Establish some useful variables
n = np.int(x.shape[0])
bias = np.ones((n,1))
# Unroll weights
w2 = weights[:(m+1)*hidden].reshape((hidden,(m+1)))
w3 = weights[(m+1)*hidden:].reshape((classes,(hidden+1)))
### Forward propagation ###
a1 = np.concatenate((bias,x),axis=1)
a2 = sigmoid(np.dot(a1,w2.T))
a2 = np.concatenate((bias,a2),axis=1)
a3 = sigmoid(np.dot(a2,w3.T))
return np.argmax(a3, axis=1)
def weight_init(L_in,L_out):
return np.random.normal(scale=1/np.sqrt(L_in), size=(L_out,L_in+1))
# Model parameters
hidden = int(50) # Number of hidden layers
Lambda = 3 # Lambda regularization paramter
classes = int(10) # Number of output classes
maxiter = int(800) # Maximum number of iterations the optimizer is allowed to perform
m = np.int(xtrain.shape[1]) # Number of features in each example
# Initialize weights
weights = np.concatenate((weight_init(m,hidden).reshape(((m+1)*hidden)),weight_init(hidden,classes).reshape(((hidden+1)*classes))))
# Train the model
model = fmin_l_bfgs_b(nn, x0=weights, args=(xtrain,ytrain),maxiter=maxiter)
print("The accuracy is on the training set is %f" %(accuracy_score(np.argmax(ytrain,axis=1), predict(model[0],xtrain))))
print("The accuracy is on the test set is %f" %(accuracy_score(np.argmax(ytest,axis=1), predict(model[0],xtest))))
# Visualize the data
def drawplot(draw,x,y):
if draw:
n = x.shape[0]
idx = np.random.randint(0,n,size=100) # Make an array of random integers between 0 and n
fig, ax = plt.subplots(10, 10) # make the plots
img_size = math.sqrt(m) # Specify the image size (in these case sqrt(m) = 28)
for i in range(10):
for j in range(10):
Xi = x[idx[i*10+j],:].reshape(int(img_size), int(img_size)) # get each example and resize
ax[i,j].set_axis_off() # Turns off the axes for all the subplots for clarity
ax[i,j].imshow(Xi, aspect='auto',cmap='gray') # plots the current image in the correct position
plt.show()
drawplot(True,xtrain,ytrain)
# Interactive printer function
def printer(x,y,weights):
idx = np.random.randint(len(x),size=1)
img_size = int(math.sqrt(m))
xi = x[idx,:].reshape(img_size,img_size)
yi = predict(weights,x[idx,:])
plt.title('The predicted value is %i\n The true value is %i' %(yi,np.argmax(y[idx,:],axis=1)))
plt.imshow(xi, aspect='auto',cmap='gray')
plt.axis('off')
plt.show()
printer(xtrain,ytrain,model[0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, we load the data. For details, please see the accompanying notebook MNIST-loader.ipynb for details.
Step2: Now let's define some useful functions for the neural network to use. First is the sigmoid activation function
Step3: The neural network
Step4: Next is the predict function. This function takes the learned weights and performs forward propagation through the netwwork using the x values supplied in the arguments. The effect of this is essentially to predict the output class of the given data using the weights that have been learned. This function will only be called by the accuracy tools at the end, and thus does not perform backpropagation to actually learn the weights.
Step5: We initialize theta with a set of random weights with a standard deviation of $ 1/\sqrt{n} $
Step6: Next, we specify the model parameters. Lambda is the regularization parameter, which protects against overfitting.
Step7: Finally, we train the model
Step8: Visualizing the data
|
2,666
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division # Gunakan print(...) dan bukan print ...
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import random
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import accuracy_score, confusion_matrix, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
RANDOM_STATE = 1337
np.random.seed(RANDOM_STATE)
df = pd.read_csv('../datasets/advertising.csv', index_col=0)
X = np.load('../datasets/food.npy')
y = np.load('../datasets/food_labels.npy')
X.shape
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_label = le.fit_transform(y)
y_train = le.transform(y_train)
y_test = le.transform(y_test)
batch_size = 64
epochs = 20
x_train = X_train.reshape(X_train.shape[0], 28 * 28 * 3)
x_test = X_test.reshape(X_test.shape[0], 28 * 28 * 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(28 * 28 * 3,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Eksplorasi Awal Data - Advertising (6 poin)
Step2: Soal 1.1.a (1 poin)
Step3: Soal 3.1 (1 poin)
|
2,667
|
<ASSISTANT_TASK:>
Python Code:
from atmPy.instruments.POPS import housekeeping
%matplotlib inline
filename = './data/POPS_housekeeping.csv'
hk = housekeeping.read_csv(filename)
out = hk.plot_all()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading a housekeeping file
Step2: Done! hk is an instance of TimeSeries and you can do with it what ever the instance is capable of (see here). E.g. plot stuff.
|
2,668
|
<ASSISTANT_TASK:>
Python Code:
import espressomd
import espressomd.magnetostatics
espressomd.assert_features(['DIPOLES', 'DP3M', 'LENNARD_JONES'])
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
import numpy as np
import tqdm
# Lennard-Jones parameters
LJ_SIGMA = 1.
LJ_EPSILON = 1.
LJ_CUT = 2**(1. / 6.) * LJ_SIGMA
# Particles
N_PART = 700
# Area fraction of the mono-layer
PHI = 0.06
# Dipolar interaction parameter lambda = MU_0 m^2 /(4 pi sigma^3 kT)
DIP_LAMBDA = 4.
# Temperature
KT = 1.0
# Friction coefficient
GAMMA = 1.0
# Time step
TIME_STEP = 0.01
# Langevin parameter ALPHA = MU_0 m H / kT
ALPHA = 10.
# vacuum permeability
MU_0 = 1.
# System setup
box_size = (N_PART * np.pi * (LJ_SIGMA / 2.)**2. / PHI)**0.5
print("Box size", box_size)
# Note that the dipolar P3M and dipolar layer correction need a cubic
# simulation box for technical reasons.
system = espressomd.System(box_l=(box_size, box_size, box_size))
system.time_step = TIME_STEP
# Lennard-Jones interaction
system.non_bonded_inter[0, 0].lennard_jones.set_params(epsilon=LJ_EPSILON, sigma=LJ_SIGMA, cutoff=LJ_CUT, shift="auto")
# Random dipole moments
np.random.seed(seed=1)
dip_phi = 2. * np.pi * np.random.random((N_PART, 1))
dip_cos_theta = 2 * np.random.random((N_PART, 1)) - 1
dip_sin_theta = np.sin(np.arccos(dip_cos_theta))
dip = np.hstack((
dip_sin_theta * np.sin(dip_phi),
dip_sin_theta * np.cos(dip_phi),
dip_cos_theta))
# Random positions in the monolayer
pos = box_size * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))
# Add particles
particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)], dip=dip, fix=N_PART * [(False, False, True)])
# Remove overlap between particles by means of the steepest descent method
system.integrator.set_steepest_descent(
f_max=0, gamma=0.1, max_displacement=0.05)
while system.analysis.energy()["total"] > 5 * KT * N_PART:
system.integrator.run(20)
# Switch to velocity Verlet integrator
system.integrator.set_vv()
system.thermostat.set_langevin(kT=KT, gamma=GAMMA, seed=1)
# tune verlet list skin
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
# Setup dipolar P3M and dipolar layer correction (DLC)
dp3m = espressomd.magnetostatics.DipolarP3M(accuracy=5E-4, prefactor=DIP_LAMBDA * LJ_SIGMA**3 * KT)
mdlc = espressomd.magnetostatics.DLC(actor=dp3m, maxPWerror=1E-4, gap_size=box_size - LJ_SIGMA)
system.actors.add(mdlc)
# tune verlet list skin again
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
# print skin value
print(f'tuned skin = {system.cell_system.skin:.2f}')
# magnetic field times dipole moment
H_dipm = ALPHA * KT
H_field = [H_dipm, 0, 0]
# Equilibrate
print("Equilibration...")
equil_rounds = 10
equil_steps = 200
for i in tqdm.trange(equil_rounds):
system.integrator.run(equil_steps)
plt.figure(figsize=(10, 10))
plt.xlim(0, box_size)
plt.ylim(0, box_size)
plt.xlabel('x-position', fontsize=20)
plt.ylabel('y-position', fontsize=20)
plt.plot(particles.pos_folded[:, 0], particles.pos_folded[:, 1], 'o')
plt.show()
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import tempfile
import base64
VIDEO_TAG = <video controls>
<source src="data:video/x-m4v;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>
def anim_to_html(anim):
if not hasattr(anim, '_encoded_video'):
with tempfile.NamedTemporaryFile(suffix='.mp4') as f:
anim.save(f.name, fps=20, extra_args=['-vcodec', 'libx264'])
with open(f.name, "rb") as g:
video = g.read()
anim._encoded_video = base64.b64encode(video).decode('ascii')
plt.close(anim._fig)
return VIDEO_TAG.format(anim._encoded_video)
animation.Animation._repr_html_ = anim_to_html
def init():
# Set x and y range
ax.set_ylim(0, box_size)
ax.set_xlim(0, box_size)
xdata, ydata = [], []
part.set_data(xdata, ydata)
return part,
def run(i):
system.integrator.run(50)
# Save current system state as a plot
xdata, ydata = particles.pos_folded[:, 0], particles.pos_folded[:, 1]
ax.figure.canvas.draw()
part.set_data(xdata, ydata)
print(f'progress: {(i + 1):3.0f}%', end='\r')
return part,
fig, ax = plt.subplots(figsize=(10, 10))
part, = ax.plot([], [], 'o')
animation.FuncAnimation(fig, run, frames=100, blit=True, interval=0, repeat=False, init_func=init)
# Dipolar interaction parameter lambda = MU_0 m^2 /(4 pi sigma^3 kT)
DIP_LAMBDA = 1.
# increase time step
TIME_STEP = 0.02
# dipole moment
dipm = np.sqrt(4 * np.pi * DIP_LAMBDA * LJ_SIGMA**3. * KT / MU_0)
# remove all particles
system.part.clear()
system.actors.clear()
system.thermostat.turn_off()
# Random dipole moments
dip_phi = 2. * np.pi * np.random.random((N_PART, 1))
dip_cos_theta = 2 * np.random.random((N_PART, 1)) - 1
dip_sin_theta = np.sin(np.arccos(dip_cos_theta))
dip = np.hstack((
dip_sin_theta * np.sin(dip_phi),
dip_sin_theta * np.cos(dip_phi),
dip_cos_theta))
# Random positions in the monolayer
pos = box_size * np.hstack((np.random.random((N_PART, 2)), np.zeros((N_PART, 1))))
# Add particles
particles = system.part.add(pos=pos, rotation=N_PART * [(True, True, True)], dip=dip, fix=N_PART * [(False, False, True)])
# Remove overlap between particles by means of the steepest descent method
system.integrator.set_steepest_descent(f_max=0, gamma=0.1, max_displacement=0.05)
while system.analysis.energy()["total"] > 5 * KT * N_PART:
system.integrator.run(20)
# Switch to velocity Verlet integrator
system.integrator.set_vv()
system.thermostat.set_langevin(kT=KT, gamma=GAMMA, seed=1)
# tune verlet list skin
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
# Setup dipolar P3M and dipolar layer correction
dp3m = espressomd.magnetostatics.DipolarP3M(accuracy=5E-4, prefactor=DIP_LAMBDA * LJ_SIGMA**3 * KT)
mdlc = espressomd.magnetostatics.DLC(actor=dp3m, maxPWerror=1E-4, gap_size=box_size - LJ_SIGMA)
system.actors.add(mdlc)
# tune verlet list skin again
system.cell_system.tune_skin(min_skin=0.4, max_skin=2., tol=0.2, int_steps=100)
alphas = np.array([0, 0.25, 0.5, 1, 2, 3, 4, 8])
# dipole moment
dipm = np.sqrt(DIP_LAMBDA * 4 * np.pi * LJ_SIGMA**3. * KT / MU_0)
print(f'dipole moment = {dipm:.4f}')
M_sat = PHI * 4. / np.pi * 1. / (LJ_SIGMA**2.) * dipm
def dL_dB(alpha):
return (1. / (alpha**2.) - 1. / ((np.sinh(alpha))**2.)) * dipm / (KT)
# approximated magnetization curve for a field parallel to the monolayer plane
def magnetization_approx_para(alpha):
return L(alpha) * (1. + MU_0 / 8. * M_sat * dL_dB(alpha))
# approximated magnetization curve for a field perpendicular to the monolayer plane
def magnetization_approx_perp(alpha):
return L(alpha) * (1. - MU_0 / 4. * M_sat * dL_dB(alpha))
# Langevin function
def L(x):
return (np.cosh(x) / np.sinh(x)) - 1. / x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: and set up the simulation parameters where we introduce a new dimensionless parameter
Step2: Now we set up the system. As in part I, the orientation of the dipole moments is set directly on the particles, whereas the magnitude of the moments is taken into account when determining the prefactor of the dipolar P3M (for more details see part I).
Step3: We now apply the external magnetic field which is
Step4: Exercise
Step5: Now we can visualize the current state and see that the particles mostly create chains oriented in the direction of the external magnetic field. Also some monomers should be present.
Step7: Video of the development of the system
Step8: We now can start the sampling over the <tt>animation</tt> class of <tt>matplotlib</tt>
Step9: In the visualization video we can see that the single chains break and connect to each other during time. Also some monomers are present which break from and connect to chains. If you want to have some more frames, i.e. a longer video, just adjust the <tt>frames</tt> parameter in <tt>FuncAnimation</tt>.
Step10: To increase the performance we use the built-in function <tt>MagneticDipoleMoment</tt> to calculate the dipole moment of the whole system. In our case this is only the orientation as we never set the strength of the dipole moments on our particles.
Step11: For both the magnetization perpendicular and parallel to the monolayer plane we use the same system for every value of the Langevin parameter $\alpha$. Thus we use that the system is already more or less equilibrated from the previous run so we save some equilibration time. For scientific purposes one would use a new system for every value for the Langevin parameter to ensure that the systems are independent and no correlation effects are measured. Also one would perform more than just one simulation for each value of $\alpha$ to increase the precision of the results.
Step12: and the saturation magnetization by using
Step13: Further we need the derivation of the Langevin function after the external field $B$ thus we define the function
Step14: Now we define the approximated magnetization curves parallel and perpendicular to the monolayer plane
Step15: Now we define the Langevin function
|
2,669
|
<ASSISTANT_TASK:>
Python Code:
# import the required packages
from swat import *
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
import cv2
# define the function to display the processed image files.
def imageShow(session, casTable, imageId, nimages):
a = session.table.fetch(sastypes=False,sortby=[{'name':'_id_'}],table=casTable,to=nimages)
fig = plt.figure(figsize=(20, 20))
for i in range(nimages):
imageData = a['Fetch'].ix[ i][ imageId]
img_np = cv2.imdecode(np.fromstring( imageData, np.uint8),1)
fig.add_subplot(1,nimages,i+1)
plt.imshow(img_np)
img_np[:,:,[0,2]]=img_np[:,:,[2,0]]
plt.xticks([]), plt.yticks([])
# define the host machine and port for CAS connection: port is 5570 from Linux client and 8777 from Windows client.
hostmachine = 'my-viya-server.my-domain.com'
port = 8777
# authentication file on the client machine with user name and password (see the link above).
_authinfo = r"my-local-path\_authinfo"
# path on the Viya server where the image files to be processed are stored.
path_source_images = "my-host-path-for-sources"
path_reference_images = "my-host-path-for-references"
# set up a CAS session.
conn = CAS(hostmachine, port, authinfo = _authinfo)
# load CAS image action set for processing images.
conn.loadactionset('image')
conn.image.loadImages(casout={'name':'inputTable', 'replace':True}, path= path_source_images)
conn.image.processimages(casout={'name':'inputTable_resized', 'replace':True},
imagefunctions=[{'functionoptions':{'width':1000,'functiontype':'RESIZE','height':600}}],
imagetable={'name':'inputTable'})
imageTable = conn.CASTable('inputTable_resized')
imageShow(conn, imageTable, 0, 4)
r = conn.image.processImages(casout={'name':'resultingImages','replace':True},
imagetable={'name':'inputTable_resized'},
imagefunctions=[
{'options':{'functiontype':'CONVERT_COLOR'}} #change color space
])
print(r)
outTable = conn.CASTable('resultingImages')
type(outTable)
imageShow(conn, outTable, 0, 4)
r = conn.image.processImages(casout={'name':'resultingImages','replace':True},
imagetable={'name':'inputTable_resized'},
imagefunctions=[
{'options':{'functiontype':'CONVERT_COLOR'}}, #change color space
{'options':{'functiontype':'BILATERAL_FILTER', #noise reduction
'diameter':13,'sigmacolor':30,'sigmaspace':30}},
{'options':{'functiontype':'THRESHOLD', #image binarization
'type':'OTSU','value':125}},
{'options':{'functiontype':'LAPLACIAN', #edge detection with the Laplace operator
'kernelsize':12}}
])
print(r)
outTable = conn.CASTable('resultingImages')
imageShow(conn, outTable, 0, 4)
outTable.head(4)
# Process reference files to compare.
conn.image.loadImages(casout={'name':'inTable', 'replace':True}, path= 'path_reference_images')
conn.image.processImages(casout={'name':'refTable','replace':True},
imagetable={'name':'inTable'},
imagefunctions=[{'functionoptions':{'width':1000,'functiontype':'RESIZE','height':600}}, # resize
{'options':{'functiontype':'CONVERT_COLOR'}}, #change color space
{'options':{'functiontype':'BILATERAL_FILTER', #noise reduction
'diameter':13,'sigmacolor':30,'sigmaspace':30}},
{'options':{'functiontype':'THRESHOLD', #image binarization
'type':'OTSU','value':125}}
])
# Compare reference and source images to find the similarity index.
results = conn.image.compareImages(
casOut={
"name":"output",
"replace":True
},
pairAll=True,
referenceImages={
"table":{
"name":'refTable'
}},
sourceImages={
"table":{
"name":'resultingImages'
}}
)
scoreTable = conn.CASTable("output")
del scoreTable['_channel4_']
del scoreTable['_channel3_']
print(results)
print(scoreTable.head())
# end the CAS session.
conn.session.endsession()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up the environment and Connect to SAS from Python
Step2: Load images and resize
Step3: Convert colours
Step4: Apply noise reduction and binarization
Step5: Compare images with the labeled images in the historical data
|
2,670
|
<ASSISTANT_TASK:>
Python Code:
# Some functions already covered
nums = [num**2 for num in range(1,11)]
print(nums) #print is a function, atleast Python 3.x onwards
# In Python 2.x - Not a function, it's a statement.
# Will give an error in Python 3.x
print nums
len(nums)
max(nums)
min(nums)
sum(nums)
nums.reverse()
nums
# Reverse a string
# Notice how many functions we use
word = input("Enter a word:")
word = list(word)
word.reverse()
word = ''.join(word)
word
word = input("Enter a word: ")
word = list(word)
word
word.reverse()
word = "".join(word)
word
# Import the library
import random
# Initiate a for loo
for i in range(5):
# x is equal to a random value, that we got from the library named random, and the function named random()
x = random.random()
print(round(x,2))
# Circumference of a circle
from math import pi
radius = int(input("Enter the radius in cm: "))
c = 2*pi*radius
area = pi*(radius**2)
print("The circumference of the circle is: ",c)
print("The area of the circle is: ", area)
# Function to say hello to the user
def say_hello():
name = input("What's your name? ")
print("Hello ", name,"!")
say_hello()
list_a = ["a", 1, 42, 19, "c", "23",1,2,3,4,5,6]
type(list_a)
for i in list_a:
print(type(i))
len(list_a)
def list_scorer(list_name):
if type(list_name) == list:
print("Correctly identified a list of length,",len(list_name),"items.")
for i in list_name:
print(type(i))
else:
print("This is not a list.")
list_scorer(list_a)
list_scorer("Hello")
def sq_num(num):
squared = num ** 2
return squared
sq_num(10)
def sq_num2(num):
return num**2
sq_num2(5)
def clean_up(tel_num):
result = ""
digits = {"0","1","2","3","4","5","6","7","8","9"}
for character in tel_num:
if character in digits:
result = result + character
return result
client_phone_num = "+1-555-123-1234 Barone Sanitation (Call only day time)"
clean_up(client_phone_num)
def cubist(num):
cubed = num**3
return cubed
cubist(3)
print(cubed) # Will give an error
import random
import string
random.choice(string.ascii_letters)
def pass_gen(n):
# Initiate a blank password
password = ""
# Remember, n+1
for letter in range(1,n+1):
# We add a random character to our blank password
password = password + random.choice(string.ascii_letters)
return password
pass_gen(8)
# Your code below
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So as you can see, you have used a lot of functions already.
Step2: Best Practices in Importing
Step3: Let's also look at the 'return' statement. When you print something using the standard print syntax
Step4: As with the print statement, you can perform this in a single line
Step5: For a data scientist, writing efficient functions can come in really handy during the data cleaning phase. Let's see one example.
Step6: One important thing while dealing with functions. What happens inside a function, stays in a function.
Step7: Remember, what happens in a function, stays in a function! Except return. That will come back!
Step8: Exercise
|
2,671
|
<ASSISTANT_TASK:>
Python Code::
import tensorflow as tf
from tensorflow.keras.utils import image_dataset_from_directory
PATH = ".../Citrus/Leaves"
ds = image_dataset_from_directory(PATH,
validation_split=0.2, subset="training",
image_size=(256,256), interpolation="bilinear",
crop_to_aspect_ratio=True,
seed=42, shuffle=True, batch_size=32)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,672
|
<ASSISTANT_TASK:>
Python Code:
import pygeogrids.grids as grids
import pygeogrids.shapefile as shapefile
import numpy as np
import os
testgrid = grids.genreg_grid(0.1, 0.1)
austria = shapefile.get_gad_grid_points(
testgrid, os.path.join('/home', os.environ['USER'], 'Downloads', 'gadm', 'gadm28_levels.shp.zip'), 0, name='Austria')
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter(austria.arrlon, austria.arrlat)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(14, 47)
ring.AddPoint(14, 48)
ring.AddPoint(16, 48)
ring.AddPoint(16, 47)
ring.AddPoint(14, 47)
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
subgrid = austria.get_shp_grid_points(poly)
plt.scatter(austria.arrlon, austria.arrlat)
plt.scatter(subgrid.arrlon, subgrid.arrlat, c='orange')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can now subset the 0.1x0.1 degree regular grid with the shapefiles from http
Step2: We can the plot the resulting grid using a simple scatterplot.
Step3: Behind the scenes this functionality uses the get_shp_grid_points function of the grid object.
|
2,673
|
<ASSISTANT_TASK:>
Python Code:
!python -m spacy download en_core_web_sm
from __future__ import unicode_literals, print_function
import boto3
import json
import numpy as np
import pandas as pd
import spacy
S3_BUCKET = "verta-strata"
S3_KEY = "english-tweets.csv"
FILENAME = S3_KEY
boto3.client('s3').download_file(S3_BUCKET, S3_KEY, FILENAME)
import utils
data = pd.read_csv(FILENAME).sample(frac=1).reset_index(drop=True)
utils.clean_data(data)
data.head()
nlp = spacy.load('en_core_web_sm')
import training
training.train(nlp, data, n_iter=20)
filename = "/tmp/model.spacy"
with open(filename, 'wb') as f:
f.write(nlp.to_bytes())
boto3.client('s3').upload_file(filename, S3_BUCKET, "models/01/model.spacy")
filename = "/tmp/model_metadata.json"
with open(filename, 'w') as f:
f.write(json.dumps(nlp.meta))
boto3.client('s3').upload_file(filename, S3_BUCKET, "models/01/model_metadata.json")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And import the boilerplate code.
Step2: Data prep
Step3: Clean and load data using our library.
Step4: Train the model
Step5: Update the model with the current data using our library.
Step6: Now we save the model back into S3 to a well known location (make sure it's a location you can write to!) so that we can fetch it later.
|
2,674
|
<ASSISTANT_TASK:>
Python Code:
import csv
import pandas as pd
titanic_df = pd.read_csv('titanic.csv', quoting=csv.QUOTE_MINIMAL, skiprows=[0],
names=['passenger_id', 'survived', 'class', 'name', 'sex', 'age',
'sib_sp', 'par_ch', 'ticket_id', 'fare', 'cabin', 'port'])
titanic_df.head()
titanic_df = titanic_df.drop(['passenger_id', 'name', 'ticket_id', 'fare', 'cabin', 'port'], axis=1)
titanic_df.head()
titanic_df['survived'].isnull().sum()
titanic_df['age'].isnull().sum()
titanic_df['sex'].isnull().sum()
titanic_df['sib_sp'].isnull().sum()
titanic_df['par_ch'].isnull().sum()
titanic_df = titanic_df[titanic_df['age'].notnull()]
titanic_df['age'].isnull().sum()
survivors = titanic_df.groupby('survived')['age']
survivors.describe()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
ax1, ax2 = survivors.plot(kind='hist', legend=True, alpha=0.6, bins=range(0, 90, 10))
ax1.set_xlabel('Age (Years)')
ax1.legend(['No', 'Yes'], title='Survived', loc='upper right')
ax2.set_ylabel('Frequency')
plt.title('Comparison of survivors v/s non survivors based on age')
survivors = titanic_df.groupby('survived')['sex']
survivors.describe()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
survivors = pd.crosstab(titanic_df['survived'], titanic_df['sex'])
survivors.plot(kind='bar', legend=True)
plt.xlabel('Survived?')
plt.xticks([0, 1], ['No', 'Yes'], rotation=0)
plt.ylabel('Frequency')
plt.legend(['Female', 'Male'], title='Sex', loc='upper right')
plt.title('Comparison of survivors v/s non survivors based on sex')
survivors = titanic_df.groupby('survived')['class']
survivors.describe()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
survivors = pd.crosstab(titanic_df['survived'], titanic_df['class'])
survivors.plot(kind='bar')
plt.xlabel('Survived?')
plt.xticks([0, 1], ['No', 'Yes'], rotation=0)
plt.ylabel('Frequency')
plt.legend(['1st', '2nd', '3rd'], title='Ticket Class', loc='upper right')
plt.title('Comparison of survivors v/s non survivors based on ticket class')
survivors = titanic_df.groupby('survived')['sib_sp']
survivors.describe()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
ax1, ax2 = survivors.plot(kind='hist', bins=range(0, 8, 1), legend=True, alpha=0.6)
ax1.set_xlabel('No. of Siblings and Spouses Aboard')
ax1.legend(['No', 'Yes'], title='Survived', loc='upper right')
plt.title('Comparison of survivors v/s non survivors based on siblings and spouses aboard')
survivors = titanic_df.groupby('survived')['par_ch']
survivors.describe()
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
ax1, ax2 = survivors.plot(kind='hist', legend=True, bins=range(0, 7, 1), alpha=0.6)
ax1.set_xlabel('No. of Parents and Children Aboard')
ax1.legend(['No', 'Yes'], title='Survived?', loc='upper right')
plt.title('Comparison of survivors v/s non survivors based on parents and children aboard')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Wrangling
Step2: Next, to ensure that the dataset is ready for analysis, check whether any attributes have missing values.
Step3: The age attribute seems to have missing values; 177 to be exact. These missing values could be ignored during the analysis.
Step4: Survivors
Step5: From the results it can be concluded that age is not a determining factor for survival as the shape of the histogram is almost the same for both survivors and non survivors with the exception of children.
Step6: From the results, it can be concluded that more females survived than males.
Step7: From the results, it can be concluded that first and second class passengers had higher chances of survival than third class passengers.
Step8: From the results it can be concluded that having siblings and spouses aboard is not a factor determining survival of a passenger.
|
2,675
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from datetime import datetime, timedelta
import utils_data
from os.path import join
from IPython.display import display
dates_2016 = [datetime(2016, 1, 1) + timedelta(days=i) for i in range(366)]
dataset01, dataset0, dataset1 = utils_data.get_real_dataset()
datasets = [dataset0, dataset1]
out_folder = utils_data.FOLDER_REAL_DATA_ANALYSIS
print(dataset01.head())
data_stats = utils_data.get_real_data_stats()
data_stats.to_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'aggregated_data.csv'))
display(data_stats)
most_used_card = dataset0['CardID'].value_counts().index[0]
print("Card (ID) with most transactions: ", most_used_card)
plt.figure(figsize=(15, 5))
plt_idx = 1
for d in datasets:
plt.subplot(1, 2, plt_idx)
trans_dates = d["Global_Date"].apply(lambda date: date.date())
all_trans = trans_dates.value_counts().sort_index()
date_num = matplotlib.dates.date2num(all_trans.index)
plt.plot(date_num, all_trans.values, 'k.', label='num trans.')
plt.plot(date_num, np.zeros(len(date_num))+np.sum(all_trans)/366, 'g--',label='average')
plt_idx += 1
plt.title(d.name, size=20)
plt.xlabel('days (1.1.16 - 31.12.16)', size=15)
plt.xticks([])
plt.xlim(matplotlib.dates.date2num([datetime(2016,1,1), datetime(2016,12,31)]))
if plt_idx == 2:
plt.ylabel('num transactions', size=15)
plt.legend(fontsize=15)
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'time_day-in-year'))
plt.show()
monthdays_2016 = np.unique([dates_2016[i].day for i in range(366)], return_counts=True)
monthdays_2016 = monthdays_2016[1][monthdays_2016[0]-1]
plt.figure(figsize=(12, 5))
plt_idx = 1
monthday_frac = np.zeros((31, 2))
idx = 0
for d in datasets:
# get the average number of transactions per day in a month
monthday = d["Local_Date"].apply(lambda date: date.day).value_counts().sort_index()
monthday /= monthdays_2016
if idx > -1:
monthday_frac[:, idx] = monthday.values / np.sum(monthday.values, axis=0)
idx += 1
plt.subplot(1, 2, plt_idx)
plt.plot(monthday.index, monthday.values, 'ko')
plt.plot(monthday.index, monthday.values, 'k-', markersize=0.1)
plt.plot(monthday.index, np.zeros(31)+np.sum(monthday)/31, 'g--', label='average')
plt.title(d.name, size=20)
plt.xlabel('day in month', size=15)
if plt_idx == 1:
plt.ylabel('avg. num transactions', size=15)
plt_idx += 1
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'time_day-in-month'))
plt.show()
# save the resulting data
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT, 'monthday_frac'), monthday_frac)
weekdays_2016 = np.unique([dates_2016[i].weekday() for i in range(366)], return_counts=True)
weekdays_2016 = weekdays_2016[1][weekdays_2016[0]]
plt.figure(figsize=(12, 5))
plt_idx = 1
weekday_frac = np.zeros((7, 2))
idx = 0
for d in datasets:
weekday = d["Local_Date"].apply(lambda date: date.weekday()).value_counts().sort_index()
weekday /= weekdays_2016
if idx > -1:
weekday_frac[:, idx] = weekday.values / np.sum(weekday.values, axis=0)
idx += 1
plt.subplot(1, 2, plt_idx)
plt.plot(weekday.index, weekday.values, 'ko')
plt.plot(weekday.index, weekday.values, 'k-', markersize=0.1)
plt.plot(weekday.index, np.zeros(7)+np.sum(weekday)/7, 'g--', label='average')
plt.title(d.name, size=20)
plt.xlabel('weekday', size=15)
plt.xticks(range(7), ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su'])
if plt_idx == 1:
plt.ylabel('avg. num transactions', size=15)
plt_idx += 1
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'time_day-in-week'))
plt.show()
# save the resulting data
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT, 'weekday_frac'), weekday_frac)
monthdays = np.array([31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
plt.figure(figsize=(12, 5))
plt_idx = 1
month_frac = np.zeros((12, 2))
idx = 0
for d in datasets:
month = d["Local_Date"].apply(lambda date: date.month).value_counts().sort_index()
# correct for different number of days in a month
month = month / monthdays[month.index.values-1] * np.mean(monthdays[month.index.values-1])
if idx > -1:
month_frac[month.index-1, idx] = month.values / np.sum(month.values, axis=0)
idx += 1
plt.subplot(1, 2, plt_idx)
plt.plot(month.index, month.values, 'ko')
plt.plot(month.index, month.values, 'k-', markersize=0.1)
plt.plot(range(1,13), np.zeros(12)+np.sum(month)/12, 'g--', label='average')
plt.title(d.name, size=20)
plt.xlabel('month', size=15)
plt.xticks(range(1, 13), ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
if plt_idx == 1:
plt.ylabel('num transactions', size=15)
plt_idx += 1
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'time_month-in-year'))
plt.show()
# save the resulting data
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT, 'month_frac'), month_frac)
plt.figure(figsize=(12, 5))
plt_idx = 1
hour_frac = np.zeros((24, 2))
idx = 0
for d in datasets:
hours = d["Local_Date"].apply(lambda date: date.hour).value_counts().sort_index()
hours /= 366
if idx > -1:
hour_frac[hours.index.values, idx] = hours.values / np.sum(hours.values, axis=0)
idx += 1
plt.subplot(1, 2, plt_idx)
plt.plot(hours.index, hours.values, 'ko')
plt.plot(hours.index, hours.values, 'k-', markersize=0.1, label='transactions')
plt.plot(range(24), np.zeros(24)+np.sum(hours)/24, 'g--', label='average')
plt.title(d.name, size=20)
plt.xlabel('hour', size=15)
# plt.xticks([])
if plt_idx == 1:
plt.ylabel('avg. num transactions', size=15)
plt_idx += 1
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'time_hour-in-day'))
plt.show()
# save the resulting data
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT, 'hour_frac'), hour_frac)
# extract only hours
date_hour_counts = dataset0["Local_Date"].apply(lambda d: d.replace(minute=0, second=0)).value_counts(sort=False)
hours = np.array(list(map(lambda d: d.hour, list(date_hour_counts.index))))
counts = date_hour_counts.values
hour_mean = np.zeros(24)
hour_min = np.zeros(24)
hour_max = np.zeros(24)
hour_std = np.zeros(24)
for h in range(24):
hour_mean[h] = np.mean(counts[hours==h])
hour_min[h] = np.min(counts[hours==h])
hour_max[h] = np.max(counts[hours==h])
hour_std[h] = np.std(counts[hours==h])
print(np.vstack((range(24), hour_min, hour_max, hour_mean, hour_std)).T)
# total number of transactions we want in one year
aggregated_data = pd.read_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'aggregated_data.csv'), index_col=0)
trans_per_year = np.array(aggregated_data.loc['transactions'].values, dtype=np.float)[1:]
# transactions per day in a month
frac_monthday = np.load(join(utils_data.FOLDER_SIMULATOR_INPUT, 'monthday_frac.npy'))
# transactions per day in a week
frac_weekday = np.load(join(utils_data.FOLDER_SIMULATOR_INPUT, 'weekday_frac.npy'))
# transactions per month in a year
frac_month = np.load(join(utils_data.FOLDER_SIMULATOR_INPUT, 'month_frac.npy'))
# transactions hour in a day
frac_hour = np.load(join(utils_data.FOLDER_SIMULATOR_INPUT, 'hour_frac.npy'))
cust_idx = 0
std_transactions = 1000
num_customers = 200
# get the probability of a transaction in a given hour
curr_date = datetime(2016, 1, 1)
num_trans = 0
for i in range(366*24):
new_trans = float(trans_per_year[cust_idx])
new_trans *= frac_month[curr_date.month-1, cust_idx]
new_trans *= frac_monthday[curr_date.day-1, cust_idx]
new_trans *= 7 * frac_weekday[curr_date.weekday(), cust_idx]
new_trans *= frac_hour[curr_date.hour, cust_idx]
num_trans += new_trans
curr_date += timedelta(hours=1)
print(curr_date)
print(trans_per_year[cust_idx])
print(num_trans)
print("")
# the difference happens because some months have longer/shorter days.
# We did not want to scale up the transactions on day 31 because that's unrealistic.
curr_date = datetime(2016, 1, 1)
num_trans = 0
for i in range(366*24):
for c in range(num_customers):
# num_trans is the number of transactions the customer will make in this hour
# we assume that we have enough customers to model that each customer can make max 1 transaction per hour
cust_trans = float(trans_per_year[cust_idx])
cust_trans += np.random.normal(0, std_transactions, 1)[0]
cust_trans /= num_customers
cust_trans *= frac_month[curr_date.month-1, cust_idx]
cust_trans *= frac_monthday[curr_date.day-1, cust_idx]
cust_trans *= 7 * frac_weekday[curr_date.weekday(), cust_idx]
cust_trans *= frac_hour[curr_date.hour, cust_idx]
cust_trans += np.random.normal(0, 0.01, 1)[0]
if cust_trans > np.random.uniform(0, 1, 1)[0]:
num_trans += 1
curr_date += timedelta(hours=1)
print(curr_date)
print(trans_per_year[cust_idx])
print(num_trans)
print("")
country_counts = pd.concat([d['Country'].value_counts() for d in datasets], axis=1)
country_counts.fillna(0, inplace=True)
country_counts.columns = ['non-fraud', 'fraud']
country_counts[['non-fraud', 'fraud']] /= country_counts.sum(axis=0)
# save the resulting data
country_counts.to_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'country_frac.csv'))
countries_large = []
for c in ['non-fraud', 'fraud']:
countries_large.extend(country_counts.loc[country_counts[c] > 0.05].index)
countries_large = np.unique(countries_large)
countries_large_counts = []
for c in countries_large:
countries_large_counts.append(country_counts.loc[c, 'non-fraud'])
countries_large = [countries_large[np.argsort(countries_large_counts)[::-1][i]] for i in range(len(countries_large))]
plt.figure(figsize=(10,5))
bottoms = np.zeros(3)
for i in range(len(countries_large)):
c = countries_large[i]
plt.bar((0, 1, 2), np.concatenate((country_counts.loc[c], [0])), label=c, bottom=bottoms)
bottoms += np.concatenate((country_counts.loc[c], [0]))
# fill up the rest
plt.bar((0, 1), 1-bottoms[:-1], bottom=bottoms[:-1], label='rest')
plt.legend(fontsize=20)
plt.xticks([0, 1], ['non-fraud', 'fraud'], size=15)
plt.ylabel('fraction transactions made', size=15)
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'country_distribution'))
plt.show()
currency_counts = pd.concat([d['Currency'].value_counts() for d in datasets], axis=1)
currency_counts.fillna(0, inplace=True)
currency_counts.columns = ['non-fraud', 'fraud']
currency_counts[['non-fraud', 'fraud']] /= currency_counts.sum(axis=0)
currencies_large = []
for c in ['non-fraud', 'fraud']:
currencies_large.extend(currency_counts.loc[currency_counts[c] > 0].index)
currencies_large = np.unique(currencies_large)
currencies_large_counts = []
for c in currencies_large:
currencies_large_counts.append(currency_counts.loc[c, 'non-fraud'])
currencies_large = [currencies_large[np.argsort(currencies_large_counts)[::-1][i]] for i in range(len(currencies_large))]
plt.figure(figsize=(10,5))
bottoms = np.zeros(3)
for i in range(len(currencies_large)):
c = currencies_large[i]
plt.bar((0, 1, 2), np.concatenate((currency_counts.loc[c], [0])), label=c, bottom=bottoms)
bottoms += np.concatenate((currency_counts.loc[c], [0]))
plt.legend(fontsize=20)
plt.xticks([0, 1], ['non-fraud', 'fraud'], size=15)
plt.ylabel('fraction of total transactions made', size=15)
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'currency_distribution'))
plt.show()
curr_per_cust = dataset0[['CardID', 'Currency']].groupby('CardID')['Currency'].value_counts().index.get_level_values(0)
print(len(curr_per_cust))
print(len(curr_per_cust.unique()))
print(len(curr_per_cust) - len(curr_per_cust.unique()))
curr_per_country0 = dataset0.groupby(['Country'])['Currency'].value_counts(normalize=True)
curr_per_country1 = dataset1.groupby(['Country'])['Currency'].value_counts(normalize=True)
curr_per_country0.to_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'currency_per_country0.csv'))
curr_per_country1.to_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'currency_per_country1.csv'))
plt.figure(figsize=(7,5))
currencies = dataset01['Currency'].unique()
merchants = dataset01['MerchantID'].unique()
for curr_idx in range(len(currencies)):
for merch_idx in range(len(merchants)):
plt.plot(range(len(currencies)), np.zeros(len(currencies))+merch_idx, 'r-', linewidth=0.2)
if currencies[curr_idx] in dataset01.loc[dataset01['MerchantID'] == merch_idx, 'Currency'].values:
plt.plot(curr_idx, merch_idx, 'ko')
plt.xticks(range(len(currencies)), currencies)
plt.ylabel('Merchant ID', size=15)
plt.xlabel('Currency', size=15)
plt.tight_layout()
plt.show()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'currency_per_merchant'))
merch_per_curr0 = dataset0.groupby(['Currency'])['MerchantID'].value_counts(normalize=True)
merch_per_curr1 = dataset1.groupby(['Currency'])['MerchantID'].value_counts(normalize=True)
merch_per_curr0.to_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'merchant_per_currency0.csv'))
merch_per_curr1.to_csv(join(utils_data.FOLDER_SIMULATOR_INPUT, 'merchant_per_currency1.csv'))
merchant_count0 = dataset0['MerchantID'].value_counts().sort_index()
merchant_count1 = dataset1['MerchantID'].value_counts().sort_index()
plt.figure(figsize=(15,10))
ax = plt.subplot(2, 1, 1)
ax.bar(merchant_count0.index.values, merchant_count0.values)
rects = ax.patches
for rect, label in zip(rects, merchant_count0.values):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height, label, ha='center', va='bottom')
plt.ylabel('num transactions')
plt.xticks([])
plt.xlim([-0.5, data_stats.loc['num merchants', 'all']+0.5])
ax = plt.subplot(2, 1, 2)
ax.bar(merchant_count1.index.values, merchant_count1.values)
rects = ax.patches
for rect, label in zip(rects, merchant_count1.values):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2, height, label, ha='center', va='bottom')
plt.ylabel('num transactions')
plt.xlabel('Merchant ID')
plt.xlim([-0.5, data_stats.loc['num merchants', 'all']+0.5])
plt.tight_layout()
plt.show()
plt.figure(figsize=(12, 10))
plt_idx = 1
for d in datasets:
plt.subplot(2, 1, plt_idx)
plt.plot(range(d.shape[0]), d['Amount'], 'k.')
# plt.plot(date_num, amount, 'k.', label='num trans.')
# plt.plot(date_num, np.zeros(len(date_num))+np.mean(all_trans), 'g',label='average')
plt_idx += 1
# plt.title(d.name, size=20)
plt.xlabel('transactions', size=15)
plt.xticks([])
if plt_idx == 2:
plt.ylabel('amount', size=15)
plt.legend(fontsize=15)
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'amount_day-in-year'))
plt.show()
print(dataset0.loc[dataset0['Amount'] == 5472.53,['Local_Date', 'CardID', 'MerchantID', 'Amount', 'Currency', 'Country']])
plt.figure(figsize=(10,5))
bins = [0, 5, 25, 50, 100, 1000, 11000]
plt_idx = 1
for d in datasets:
amount_counts, loc = np.histogram(d["Amount"], bins=bins)
amount_counts = np.array(amount_counts, dtype=np.float)
amount_counts /= np.sum(amount_counts)
plt.subplot(1, 2, plt_idx)
am_bot = 0
for i in range(len(amount_counts)):
plt.bar(plt_idx, amount_counts[i], bottom=am_bot, label='{}-{}'.format(bins[i], bins[i+1]))
am_bot += amount_counts[i]
plt_idx += 1
plt.ylim([0, 1.01])
plt.legend()
# plt.title("Amount distribution")
plt_idx += 1
plt.show()
plt.figure(figsize=(12, 10))
plt_idx = 1
for d in datasets:
plt.subplot(2, 1, plt_idx)
min_amount = min(d['Amount'])
max_amount = max(d['Amount'])
plt.plot(range(d.shape[0]), np.sort(d['Amount']), 'k.', label='transaction')
# plt.plot(date_num, amount, 'k.', label='num trans.')
plt.plot(np.linspace(0, d.shape[0], 100), np.zeros(100)+np.mean(d['Amount']), 'g--',label='average')
plt_idx += 1
plt.title(d.name, size=20)
plt.ylabel('amount', size=15)
if plt_idx == 3:
plt.xlabel('transactions', size=15)
else:
plt.legend(fontsize=15)
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'amount_day-in-year'))
plt.show()
from scipy.optimize import curve_fit
def sigmoid(x, x0, k):
y = 1 / (1 + np.exp(-k * (x - x0)))
return y
num_merchants = data_stats.loc['num merchants', 'all']
num_bins = 20
merchant_amount_distr = np.zeros((2, num_merchants, 2*num_bins+1))
plt.figure(figsize=(15, 5))
plt_idx = 1
for dataset in [dataset0, dataset1]:
for m in dataset0['MerchantID'].unique():
# get all transactions from this merchant
trans_merch = dataset.loc[dataset['MerchantID']==m]
num_transactions = trans_merch.shape[0]
if num_transactions > 0:
# get the amounts paid for the transactions with this merchant
amounts = trans_merch['Amount']
bins_height, bins_edges = np.histogram(amounts, bins=num_bins)
bins_height = np.array(bins_height, dtype=np.float)
bins_height /= np.sum(bins_height)
merchant_amount_distr[int(plt_idx > 7), (plt_idx-1)%7, :] = np.concatenate((bins_height, bins_edges))
plt.subplot(2, num_merchants, plt_idx)
plt.hist(amounts, bins=num_bins)
plt_idx += 1
plt.tight_layout()
plt.show()
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT,'merchant_amount_distr'), merchant_amount_distr)
from scipy.optimize import curve_fit
def sigmoid(x, x0, k):
y = 1 / (1 + np.exp(-k * (x - x0)))
return y
num_merchants = data_stats.loc['num merchants', 'all']
merchant_amount_parameters = np.zeros((2, num_merchants, 4))
plt.figure(figsize=(15, 5))
plt_idx = 1
for dataset in [dataset0, dataset1]:
for m in dataset0['MerchantID'].unique():
# get all transactions from this merchant
trans_merch = dataset.loc[dataset['MerchantID']==m]
num_transactions = trans_merch.shape[0]
if num_transactions > 0:
# get the amounts paid for the transactions with this merchant
amounts = np.sort(trans_merch['Amount'])
min_amount = min(amounts)
max_amount = max(amounts)
amounts_normalised = (amounts - min_amount) / (max_amount - min_amount)
plt.subplot(2, num_merchants, plt_idx)
plt.plot(np.linspace(0, 1, num_transactions), amounts, '.')
# fit sigmoid
x_vals = np.linspace(0, 1, 100)
try:
p_sigmoid, _ = curve_fit(sigmoid, np.linspace(0, 1, num_transactions), amounts_normalised)
amounts_predict = sigmoid(x_vals, *p_sigmoid)
amounts_predict_denormalised = amounts_predict * (max_amount - min_amount) + min_amount
plt.plot(x_vals, amounts_predict_denormalised)
except:
# fit polynomial
p_poly = np.polyfit(np.linspace(0, 1, num_transactions), amounts_normalised, 2)
amounts_predict = np.polyval(p_poly, x_vals)
p_sigmoid, _ = curve_fit(sigmoid, x_vals, amounts_predict)
amounts_predict = sigmoid(x_vals, *p_sigmoid)
amounts_predict_denormalised = amounts_predict * (max_amount - min_amount) + min_amount
plt.plot(x_vals, amounts_predict_denormalised)
merchant_amount_parameters[int(plt_idx > 7), (plt_idx-1)%7] = [min_amount, max_amount, p_sigmoid[0], p_sigmoid[1]]
plt_idx += 1
plt.tight_layout()
plt.show()
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT,'merchant_amount_parameters'), merchant_amount_parameters)
print(merchant_amount_parameters)
from scipy.optimize import curve_fit
def sigmoid(x, x0, k):
y = 1 / (1 + np.exp(-k * (x - x0)))
return y
num_merchants = data_stats.loc['num merchants', 'all']
merchant_amount_parameters = np.zeros((2, num_merchants, 4))
plt.figure(figsize=(6, 3))
plt_idx = 1
dataset = dataset0
m = dataset0['MerchantID'].unique()[0]
# get all transactions from this merchant
trans_merch = dataset.loc[dataset['MerchantID']==m]
num_transactions = trans_merch.shape[0]
# get the amounts paid for the transactions with this merchant
amounts = np.sort(trans_merch['Amount'])
min_amount = min(amounts)
max_amount = max(amounts)
amounts_normalised = (amounts - min_amount) / (max_amount - min_amount)
plt.plot(range(num_transactions), amounts, 'k-', linewidth=2, label='real')
# fit sigmoid
x_vals = np.linspace(0, 1, 100)
x = np.linspace(0, 1, num_transactions)
p_sigmoid, _ = curve_fit(sigmoid, np.linspace(0, 1, num_transactions), amounts_normalised)
amounts_predict = sigmoid(x_vals, *p_sigmoid)
amounts_predict_denormalised = amounts_predict * (max_amount - min_amount) + min_amount
plt.plot(np.linspace(0, num_transactions, 100), amounts_predict_denormalised, 'm--', linewidth=3, label='approx')
merchant_amount_parameters[int(plt_idx > 7), (plt_idx-1)%7] = [min_amount, max_amount, p_sigmoid[0], p_sigmoid[1]]
plt.xlabel('transaction count', fontsize=20)
plt.ylabel('price', fontsize=20)
plt.legend(fontsize=15)
plt.tight_layout()
plt.savefig(join(utils_data.FOLDER_REAL_DATA_ANALYSIS, 'merchant_price_sigmoid_fit'))
plt.show()
plt.figure(figsize=(15, 30))
plt_idx = 1
dist_transactions = [[], []]
for d in datasets:
# d = d.loc[d['Date'].apply(lambda date: date.month) < 7]
# d = d.loc[d['Date'].apply(lambda date: date.month) > 3]
plt.subplot(1, 2, plt_idx)
trans_idx = 0
for card in dataset01['CardID'].unique():
card_times = d.loc[d['CardID'] == card, 'Global_Date']
dist_transactions[plt_idx-1].extend([(card_times.iloc[i+1] - card_times.iloc[i]).days for i in range(len(card_times)-1)])
if plt_idx == 2:
num_c = 2
else:
num_c = 10
if len(card_times) > num_c:
card_times = card_times.apply(lambda date: date.date())
card_times = matplotlib.dates.date2num(card_times)
plt.plot(card_times, np.zeros(len(card_times)) + trans_idx, 'k.', markersize=1)
plt.plot(card_times, np.zeros(len(card_times)) + trans_idx, 'k-', linewidth=0.2)
trans_idx += 1
min_date = matplotlib.dates.date2num(min(dataset01['Global_Date']).date())
max_date = matplotlib.dates.date2num(max(dataset01['Global_Date']).date())
# plt.xlim([min_date, max_date])
plt.xticks([])
for m in range(1,13):
datenum = matplotlib.dates.date2num(datetime(2016, m, 1))
plt.plot(np.zeros(2)+datenum, [-1, 1000], 'r-', linewidth=0.5)
if plt_idx == 1:
plt.ylim([0,300])
else:
plt.ylim([0, 50])
plt_idx += 1
plt.show()
# average distance between two transactions with the same card
print(np.mean(dist_transactions[0]))
print(np.mean(dist_transactions[1]))
prob_stay = np.zeros(2)
for k in range(2):
dataset = [dataset0, dataset1][k]
creditcards = dataset.loc[dataset['Global_Date'].apply(lambda d: d.month) > 3]
creditcards = creditcards.loc[creditcards['Global_Date'].apply(lambda d: d.month) < 6]
creditcard_counts = creditcards['CardID'].value_counts()
creditcardIDs = creditcards['CardID']
data = dataset.loc[dataset['Global_Date'].apply(lambda d: d.month) > 3]
single = 0
multi = 0
for i in range(len(creditcards)):
cc = creditcards.iloc[i]['CardID']
dd = creditcards.iloc[i]['Global_Date']
cond1 = data['CardID'] == cc
cond2 = data['Global_Date'] > dd
if len(data.loc[np.logical_and(cond1, cond2)]) == 0:
single += 1
else:
multi += 1
prob_stay[k] = multi/(single+multi)
print('probability of doing another transaction:', prob_stay[k], '{}'.format(['non-fraud', 'fraud'][k]))
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT, 'prob_stay'), prob_stay)
cards0 = dataset0['CardID'].unique()
cards1 = dataset1['CardID'].unique()
print('cards total:', len(np.union1d(cards0, cards1)))
print('fraud cards:', len(cards1))
print('intersection:', len(np.intersect1d(cards0, cards1)))
# go through the cards that were in both sets
cards0_1 = []
cards1_0 = []
cards010 = []
for cib in np.intersect1d(cards0, cards1):
date0 = dataset0.loc[dataset0['CardID']==cib].iloc[0]['Global_Date']
date1 = dataset1.loc[dataset1['CardID']==cib].iloc[0]['Global_Date']
if date0 < date1:
cards0_1.append(cib)
# genuine purchases after fraud
dates00 = dataset0.loc[dataset0['CardID']==cib].iloc[1:]['Global_Date']
if len(dates00)>0:
if sum(dates00>date1)>0:
cards010.append(cib)
else:
cards1_0.append(cib)
print('first genuine then fraud: ', len(cards0_1))
print('first fraud then genuine: ', len(cards1_0))
print('genuine again after fraud: ', len(cards010))
prob_stay_after_fraud = len(cards010)/len(cards0_1)
print('prob of purchase after fraud: ', prob_stay_after_fraud)
np.save(join(utils_data.FOLDER_SIMULATOR_INPUT, 'prob_stay_after_fraud'), prob_stay_after_fraud )
plt.figure(figsize=(10, 25))
dist_transactions = []
trans_idx = 0
data_compromised = dataset01.loc[dataset01['CardID'].apply(lambda cid: cid in np.intersect1d(cards0, cards1))]
no_trans_after_fraud = 0
trans_after_fraud = 0
for card in data_compromised['CardID'].unique():
cards_used = data_compromised.loc[data_compromised['CardID'] == card, ['Global_Date', 'Target']]
dist_transactions.extend([(cards_used.iloc[i+1, 0] - cards_used.iloc[i, 0]).days for i in range(len(cards_used)-1)])
card_times = cards_used['Global_Date'].apply(lambda date: date.date())
card_times = matplotlib.dates.date2num(card_times)
plt.plot(card_times, np.zeros(len(card_times)) + trans_idx, 'k-', linewidth=0.9)
cond0 = cards_used['Target'] == 0
plt.plot(card_times[cond0], np.zeros(len(card_times[cond0])) + trans_idx, 'g.', markersize=5)
cond1 = cards_used['Target'] == 1
plt.plot(card_times[cond1], np.zeros(len(card_times[cond1])) + trans_idx, 'r.', markersize=5)
if max(cards_used.loc[cards_used['Target']==0, 'Global_Date']) > max(cards_used.loc[cards_used['Target']==1, 'Global_Date']):
trans_after_fraud += 1
else:
no_trans_after_fraud += 1
trans_idx += 1
min_date = matplotlib.dates.date2num(min(dataset01['Global_Date']).date())
max_date = matplotlib.dates.date2num(max(dataset01['Global_Date']).date())
plt.xticks([])
plt.ylim([0, trans_idx])
# print lines for months
for m in range(1,13):
datenum = matplotlib.dates.date2num(datetime(2016, m, 1))
plt.plot(np.zeros(2)+datenum, [-1, 1000], 'r-', linewidth=0.5)
plt_idx += 1
plt.show()
print("genuine transactions after fraud: ", trans_after_fraud)
print("fraud is the last transaction: ", no_trans_after_fraud)
plt.figure(figsize=(10, 25))
dist_transactions = []
trans_idx = 0
for card in data_compromised['CardID'].unique():
cards_used = data_compromised.loc[data_compromised['CardID'] == card, ['Global_Date', 'Target', 'Country', 'Currency']]
if len(cards_used['Country'].unique()) > 1 or len(cards_used['Currency'].unique()) > 1:
print(cards_used)
print("")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read in dataset and split into fraud/non-fraud
Step2: Print some basic info about the dataset
Step3: Percentage of fraudulent cards also in genuine transactions
Step4: 1. TIME of TRANSACTION
Step5: Analysis
Step6: Analysis
Step7: Analysis
Step8: Analysis
Step9: Analysis
Step10: 1.6 TEST
Step11: 2. COUNTRY
Step12: 3. CURRENCY
Step13: 3.1 Currency per country
Step14: CONCLUSION
Step15: 4. Merchants
Step16: We conclude from this that most merchants only sell things in one currenyc; thus, we will let each customer select the merchant given the currency that the customer has (which is unique).
Step17: 4.2 Number transactions per merchant
Step18: 5. Transaction Amount
Step19: 5.2 Amount distribution
Step20: For each merchant, we will have a probability distribution over the amount spent
Step21: We conclude that the normal customers and fraudsters follow roughly the same distribution, so we will only have one per merchant; irrespective of whether a genuine or fraudulent customer is making the transaction.
Step22: Customers
Step23: At a given transaction, estimate the probability of doing another transaction with the same card.
Step24: Fraud behaviour
Step25: when a fraudster uses an existing card, are country and currency always the same?
|
2,676
|
<ASSISTANT_TASK:>
Python Code:
project = Project('test')
print project.files
print project.generators
print project.models
engine = project.generators.c(Engine).one
modeller = project.generators.c(Analysis).one
pdb_file = project.files.f('*.pdb').one
print project.trajectories
# for f in project.files:
# print f.url
t = engine.task_import_trajectory_folder('/Users/jan-hendrikprinz/radical.pilot.sandbox/rp.session.Stevie.fritz.box.jan-hendrikprinz.017219.0004-pilot.0000/staging_area/trajs/*.dcd')
scheduler = project.get_scheduler()
scheduler.submit(t)
print project.files
import os
from adaptivemd.reducer import _replace_prefix
print _replace_prefix(project.files.one.url)
print _replace_prefix(project.files.last.url)
os.path.relpath(
'rp.session.Stevie.fritz.box.jan-hendrikprinz.017219.0004-pilot.0000/staging_area/trajs/00000000.dcd',
'rp.session.Stevie.fritz.box.jan-hendrikprinz.017219.0031-pilot.0000/unit.000000')
os.getcwd()
from adaptivemd import Archiver
ar = Archiver(Directory('shared://delete'))
scheduler = project.get_scheduler()
t = ar.task_archive(list(project.trajectories))
scheduler.submit(t)
project.close()
scheduler = project.get_scheduler(cores=2)
scheduler.submit(project.new_ml_trajectory(100, 1))
project.new_ml_trajectory(100, 1)
print project.files.last
t = modeller.execute(list(project.trajectories))
scheduler.submit(t)
import numpy as np
print project.models.last.data['msm']['C']
print (lambda x: x / sum(x))(np.sum(project.models.last.data['msm']['C'], axis=1))
project.close()
def task_generator():
return [
engine.task_run_trajectory(traj) for traj in
project.new_ml_trajectory(100, 2)]
task_generator()
ev = Event().on(project.on_ntraj(range(4, 12, 2))).do(task_generator)
def hello():
print 'DONE!!!'
return [] # todo: allow for None here
finished = Event().on(ev.on_tasks_finished()).do(hello)
scheduler.add_event(ev)
scheduler.add_event(finished)
scheduler.add_event(
Event().on(project.on_ntraj(range(4, 50, 2))).do(task_generator)
)
scheduler.add_event(
Event()
.on(project.on_ntraj(10))
.do(modeller.task_run_msm)
.repeat().until(project.on_ntraj(20))) # todo: change that this will stop when the first event is done
from adaptivemd import FunctionalEvent
def strategy():
# create a new scheduler
local_scheduler = project.get_scheduler(cores=2)
# run 10 trajs of length 100 in parallel
tasks = scheduler.submit(project.new_ml_trajectory(
length=100, number=10))
# wait until this is finished and specify a condition on when to
# continue (all tasks need to be done)
yield [t.is_done for t in tasks]
# close scheduler when job is done
local_scheduler.exit()
# yield a condition on when to be done. Nothing since we are
# done already
yield []
scheduler.add_event(FunctionalEvent(strategy))
scheduler.add_event(FunctionalEvent(strategy))
ev = FunctionalEvent(strategy)
ev._current_when
project.schedulers
print project.files
print len(project.models)
project.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set up the project and pick a resource. This should be done only the first time, when the project is created.
Step2: Opening a project will open the DB and create a RP session to be used
Step3: To actually run simulations you need to have a scheduler (maybe a better name?). This instance can execute tasks or more precise you can use it to submit tasks which will be converted to ComputeUnitDescriptions and executed on the cluster previously chosen.
Step4: Now we are good to go and can run a first simulation
Step5: Now create an event.
Step6: .on specifies when something should be executed. In our case when the project has a number of trajectories (ntraj) of being one of range(4, 24, 2) which is [4, 6, 8, 10, ...]
Step7: All events and tasks run parallel or at least get submitted and queue for execution in parallel. RP takes care of the actual execution.
Step8: .repeat means to redo the same task when the last is finished (it will just append an infinite list of conditions to keep on running).
Step9: The brain
|
2,677
|
<ASSISTANT_TASK:>
Python Code:
# BE SURE TO RUN THIS CELL BEFORE ANY OF THE OTHER CELLS
import psycopg2
import pandas as pd
# put your code here
# ------------------
statement =
SELECT DISTINCT iso_language, job_id,COUNT(*)
FROM
(SELECT
DISTINCT ON (from_user, iso_language)
*
FROM (SELECT * FROM twitter.tweet WHERE iso_language != 'und' AND job_id >= 255 LIMIT 100000) as T
ORDER BY from_user, iso_language) as UNIQ
GROUP BY iso_language, job_id;
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
cities = {}
for i in list(range(len(column_names))):
cities['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
pd.DataFrame(cities)
# put your code here
# ------------------
df = pd.DataFrame(cities)
from skbio.diversity.alpha import shannon
pop1 = [10,10,10]
pop2 = [10,20,30]
pop3 = [100,200,300]
print("pop1 ({}) is more diverse that pop2 ({}), which is as diverse as pop3({})".format(
shannon(pop1),shannon(pop2),shannon(pop3)))
pop1 = [10,10,10]
pop2 = [10,10,10,10]
print(Also, more languages but equal eveness means greater diversity.
For example, pop2 ({}) is more diverse than pop1 ({}).format(shannon(pop2),shannon(pop1)))
statement =
SELECT DISTINCT iso_language, COUNT(*)
FROM
(SELECT
DISTINCT ON (from_user, iso_language)
*
FROM (SELECT * FROM twitter.tweet WHERE job_id = 261 AND iso_language != 'und' LIMIT 10000) as T
ORDER BY from_user, iso_language) as UNIQ
GROUP BY iso_language;
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
como = {}
for i in list(range(len(column_names))):
como['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
columbia = pd.DataFrame(como)
print("The shannon index for Columbia is {}".format(shannon(columbia['count'])))
# put your code here
# ------------------
# query database
statement =
SELECT DISTINCT iso_language, COUNT(*)
FROM (
SELECT iso_language
FROM twitter.tweet
WHERE job_id = 261
LIMIT 10000) AS users
GROUP BY iso_language
ORDER BY count;
try:
connect_str = "dbname='twitter' user='dsa_ro_user' host='dbase.dsa.missouri.edu'password='readonly'"
# use our connection values to establish a connection
conn = psycopg2.connect(connect_str)
cursor = conn.cursor()
cursor.execute(statement)
column_names = [desc[0] for desc in cursor.description]
rows = cursor.fetchall()
except Exception as e:
print("Uh oh, can't connect. Invalid dbname, user or password?")
print(e)
# create dictionary from the rows and column names
job_261 = {}
for i in list(range(len(column_names))):
job_261['{}'.format(column_names[i])] = [x[i] for x in rows]
# turn dictionary into a data frame
not_clean = pd.DataFrame(job_261)
shannon(not_clean['count'])
samp = pd.DataFrame({'lang': ['en','es','fr','en','es','fr','en','es','fr','ru'],
'count': [30,20,10,11,10,9,30,1,1,1],
'city': [1,1,1,2,2,2,3,3,3,3]})
samp
samp['count'].groupby(samp['city']).apply(shannon)
# put your code here
# ------------------
df['count'].groupby(df['job_id']).apply(shannon)
samp['count'].groupby(samp['city']).apply(shannon).sort_values()
# put your code here
# ------------------
df['count'].groupby(df['job_id']).apply(shannon).sort_values()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Twitter
Step2: Databases can do a lot, but there are somethings that are more easily acheived through throught the flexibility of a general-purpose programming language like Python. In this notebook, we are going run through running a query, returning the results to a pandas DataFrame object, and performing some operations over the data frame in order to analyze our data. Let's go ahead and create a data frame.
Step4: We have made substantial progress in the preparation of the calculating the linguistic diversity of these different cities. However, we want to use a single metric that can give us a summary of each city. For this, we're going to borrow from the biological literature and use Shannon's Diversity Index $H$. This is a sort of entropy index that is generally used to measure the biological richness of an ecosystem.
Step6: This shannon function operates on counts. Conveniently, we have pulled out language counts for per city, so we should be able to run it over this column per city to calculate the index for each city.
Step8: <span style="background-color
Step9: This isn't all we want though. One city is boring if we can't compare to others. This is where pandas comes in handy. We can chain together methods to return results that we are looking for.
Step10: Here, we start with the the count column as this is the column we want to find shannon on. Then we groupby city in order to find shannon per city. Finally, we run the apply method because shannon is a function and not a pandas method. Therefore, apply will apply the shannon function to every grouping of the count column.
Step11: We can also sort the values
Step12: <span style="background-color
|
2,678
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
assert os.path.isfile('yearssn.dat')
data=np.loadtxt('yearssn.dat')
year=data[:,0]
ssc=data[:,1]
assert len(year)==315
assert year.dtype==np.dtype(float)
assert len(ssc)==315
assert ssc.dtype==np.dtype(float)
plt.figure(figsize=(30,1))
plt.plot(year,ssc)
plt.xlabel('year')
plt.ylabel('ssc')
plt.title('Sunspots vs years')
plt.grid(True)
plt.box(False)
plt.xlim(right=2020)
assert True # leave for grading
plt.figure(figsize=(20,5))
plt.subplot(4,1,1)
plt.plot(year,ssc)
plt.xlim(1700,1800)
plt.ylabel('ssc')
plt.yticks([0,100,200],[0,100,200])
plt.box(False)
plt.tight_layout()
plt.subplot(4,1,2)
plt.plot(year,ssc)
plt.xlim(1801,1900)
plt.ylabel('ssc')
plt.yticks([0,100,200],[0,100,200])
plt.box(False)
plt.subplot(4,1,3)
plt.plot(year,ssc)
plt.xlim(1901,2000)
plt.ylabel('ssc')
plt.yticks([0,100,200],[0,100,200])
plt.box(False)
plt.subplot(4,1,4)
plt.plot(year,ssc)
plt.xlim(2000,2015)
plt.yticks([0,100,200],[0,100,200])
plt.ylabel('ssc')
plt.xlabel('year')
plt.box(False)
plt.tight_layout()
assert True # leave for grading
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Line plot of sunspot data
Step2: Use np.loadtxt to read the data into a NumPy array called data. Then create two new 1d NumPy arrays named years and ssc that have the sequence of year and sunspot counts.
Step3: Make a line plot showing the sunspot count as a function of year.
Step4: Describe the choices you have made in building this visualization and how they make it effective.
|
2,679
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import mne
import os.path as op
from matplotlib import pyplot as plt
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.RawFIF(data_path, preload=True, verbose=False)
# Give the sample rate
print('sample rate:', raw.info['sfreq'], 'Hz')
# Give the size of the data matrix
print('channels x samples:', raw._data.shape)
print('Shape of data array:', raw._data.shape)
array_data = raw._data[0, :1000]
_ = plt.plot(array_data)
# Extract data from the first 5 channels, from 1 s to 3 s.
sfreq = raw.info['sfreq']
data, times = raw[:5, int(sfreq * 1):int(sfreq * 3)]
_ = plt.plot(times, data.T)
_ = plt.title('Sample channels')
# Pull all MEG gradiometer channels:
# Make sure to use .copy() or it will overwrite the data
meg_only = raw.copy().pick_types(meg=True)
eeg_only = raw.copy().pick_types(meg=False, eeg=True)
# The MEG flag in particular lets you specify a string for more specificity
grad_only = raw.copy().pick_types(meg='grad')
# Or you can use custom channel names
pick_chans = ['MEG 0112', 'MEG 0111', 'MEG 0122', 'MEG 0123']
specific_chans = raw.copy().pick_channels(pick_chans)
print(meg_only, eeg_only, grad_only, specific_chans, sep='\n')
f, (a1, a2) = plt.subplots(2, 1)
eeg, times = eeg_only[0, :int(sfreq * 2)]
meg, times = meg_only[0, :int(sfreq * 2)]
a1.plot(times, meg[0])
a2.plot(times, eeg[0])
del eeg, meg, meg_only, grad_only, eeg_only, data, specific_chans
raw = raw.crop(0, 50) # in seconds
print('New time range from', raw.times.min(), 's to', raw.times.max(), 's')
nchan = raw.info['nchan']
raw = raw.drop_channels(['MEG 0241', 'EEG 001'])
print('Number of channels reduced from', nchan, 'to', raw.info['nchan'])
# Create multiple :class:`Raw <mne.io.RawFIF>` objects
raw1 = raw.copy().crop(0, 10)
raw2 = raw.copy().crop(10, 20)
raw3 = raw.copy().crop(20, 40)
# Concatenate in time (also works without preloading)
raw1.append([raw2, raw3])
print('Time extends from', raw1.times.min(), 's to', raw1.times.max(), 's')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Continuous data is stored in objects of type
Step2: Information about the channels contained in the
Step3: You can also pass an index directly to the
Step4: Selecting subsets of channels and samples
Step5: Notice the different scalings of these types
Step6: You can restrict the data to a specific time range
Step7: And drop channels by name
Step8: Concatenating
|
2,680
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import helper
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
import numpy as np
array = np.array(x) # Transform list of lists x into Numpy ndarray array
array_min = array.min()
return (array - array_min) / (array.max() - array_min) # Normalize array data (alternatively use sklearn's Normalizer)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
import numpy as np
n_classes = 10 # Number of possible label values given fixed as 0 to 9
return np.eye(n_classes)[x] # Return array of rows of identity matrix as given by x
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
tf.reset_default_graph()
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
return tf.placeholder(tf.float32, shape=(None, *image_shape), name='x')
def neural_net_label_input(n_classes):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
return tf.placeholder(tf.int32, shape=(None, n_classes), name='y')
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
return tf.placeholder(tf.float32, name='keep_prob')
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernel size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernel size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
conv_filter = tf.Variable(tf.truncated_normal([*conv_ksize, x_tensor.get_shape().as_list()[-1], conv_num_outputs], mean=0.0, stddev=0.1, dtype=tf.float32))
conv_bias = tf.Variable(tf.constant(0.1, shape=[conv_num_outputs]))
conv_strides = [1, *conv_strides, 1]
mpool_ksize = [1, *pool_ksize, 1]
mpool_strides = [1, *pool_strides, 1]
x_tensor = tf.nn.conv2d(x_tensor, conv_filter, conv_strides, padding='SAME')
x_tensor = tf.nn.bias_add(x_tensor, conv_bias)
x_tensor = tf.nn.relu(x_tensor)
x_tensor = tf.nn.max_pool(x_tensor, mpool_ksize, mpool_strides, padding='SAME')
return x_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
flat_size = 1
for i in x_tensor.get_shape().as_list()[1:] : flat_size *= i
x_tensor = tf.reshape(x_tensor, [-1, flat_size])
return x_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
weights = tf.Variable(tf.truncated_normal([x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.1, dtype=tf.float32))
biases = tf.Variable(tf.ones([num_outputs]) / 10)
x_tensor = tf.add(tf.matmul(x_tensor, weights), biases)
return x_tensor
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
weights = tf.Variable(tf.truncated_normal([x_tensor.get_shape().as_list()[1], num_outputs], mean=0.0, stddev=0.1, dtype=tf.float32))
biases = tf.Variable(tf.ones([num_outputs]) / 10)
y = tf.add(tf.matmul(x_tensor, weights), biases)
return y
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
x = conv2d_maxpool(x, 32, (5, 5), (1, 1), (2, 2), (2, 2))
x = conv2d_maxpool(x, 64, (5, 5), (1, 1), (2, 2), (2, 2))
x = conv2d_maxpool(x, 128, (3, 3), (1, 1), (2, 2), (2, 2))
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
x = flatten(x)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
x = fully_conn(x, 1024)
x = tf.nn.dropout(x, keep_prob)
x = fully_conn(x, 256)
x = tf.nn.dropout(x, keep_prob)
x = fully_conn(x, 64)
x = tf.nn.dropout(x, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
y = output(x, 10)
# TODO: return output
return y
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
session.run(optimizer, feed_dict={x: feature_batch, y: label_batch, keep_prob: keep_probability})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
train_accur, loss = session.run([accuracy, cost], feed_dict={x: feature_batch, y: label_batch, keep_prob: 1.})
print("Training Accuracy= " + "{:.4f}".format(train_accur) + ", Batch Loss= " + "{:.2f}".format(loss))
valid_accur = session.run(accuracy, feed_dict={x: valid_features, y: valid_labels, keep_prob: 1.})
print("Total Validation Accuracy= " + "{:.4f}".format(valid_accur))
# TODO: Tune Parameters
epochs = 30
batch_size = 256
keep_probability = .6
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
2,681
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
points = pd.read_csv('rand.txt')
points.tail()
y = points["class"]
X = points[['r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11']]
# Разбиваем на обучающее и тестовое множества:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Нормируем входные данные:
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
# from sklearn.decomposition import PCA
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
# Подгоняем модель
pipe_lr = Pipeline([
# ('pca', PCA(n_components=10)),
('clf', LogisticRegression(random_state=1))])
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [{'clf__C': param_range, 'clf__penalty': ['l1', 'l2']}]
gs = GridSearchCV(estimator=pipe_lr, param_grid=param_grid,
scoring='accuracy', cv=10)
gs = gs.fit(X, y)
print(gs.best_score_)
print(gs.best_params_)
clf = gs.best_estimator_
clf.fit(X_train, y_train)
print('Test accuracy: %.3f' % clf.score(X_test, y_test))
import matplotlib.pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf
x = np.array(range(72))
y = 10 + 0.5*x + 10*np.sin(x)
plt.plot(x,y)
plot_acf(y);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Не все так плохо, если
|
2,682
|
<ASSISTANT_TASK:>
Python Code:
#Ensure that we have Apache Beam version installed.
!pip freeze | grep apache-beam || sudo pip install apache-beam[gcp]==2.12.0
import tensorflow as tf
import apache_beam as beam
import shutil
import os
print(tf.__version__)
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = "cloud-training-bucket" # Replace with your BUCKET
REGION = "us-central1" # Choose an available region for Cloud MLE
TFVERSION = "1.14" # TF version for CMLE to use
import os
os.environ["BUCKET"] = BUCKET
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
# Create SQL query using natality data after the year 2000
query_string =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
# Call BigQuery and examine in dataframe
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
df = bq.query(query_string + "LIMIT 100").to_dataframe()
df.head()
import apache_beam as beam
import datetime, os
def to_csv(rowdict):
# Pull columns from BQ and create a line
import hashlib
import copy
CSV_COLUMNS = "weight_pounds,is_male,mother_age,plurality,gestation_weeks".split(',')
# Create synthetic data where we assume that no ultrasound has been performed
# and so we don"t know sex of the baby. Let"s assume that we can tell the difference
# between single and multiple, but that the errors rates in determining exact number
# is difficult in the absence of an ultrasound.
no_ultrasound = copy.deepcopy(rowdict)
w_ultrasound = copy.deepcopy(rowdict)
no_ultrasound["is_male"] = "Unknown"
if rowdict["plurality"] > 1:
no_ultrasound["plurality"] = "Multiple(2+)"
else:
no_ultrasound["plurality"] = "Single(1)"
# Change the plurality column to strings
w_ultrasound["plurality"] = ["Single(1)", "Twins(2)", "Triplets(3)", "Quadruplets(4)", "Quintuplets(5)"][rowdict["plurality"] - 1]
# Write out two rows for each input row, one with ultrasound and one without
for result in [no_ultrasound, w_ultrasound]:
data = ','.join([str(result[k]) if k in result else "None" for k in CSV_COLUMNS])
yield str("{}".format(data))
def preprocess(in_test_mode):
import shutil, os, subprocess
job_name = "preprocess-babyweight-features" + "-" + datetime.datetime.now().strftime("%y%m%d-%H%M%S")
if in_test_mode:
print("Launching local job ... hang on")
OUTPUT_DIR = "./preproc"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
os.makedirs(OUTPUT_DIR)
else:
print("Launching Dataflow job {} ... hang on".format(job_name))
OUTPUT_DIR = "gs://{0}/babyweight/preproc/".format(BUCKET)
try:
subprocess.check_call("gsutil -m rm -r {}".format(OUTPUT_DIR).split())
except:
pass
options = {
"staging_location": os.path.join(OUTPUT_DIR, "tmp", "staging"),
"temp_location": os.path.join(OUTPUT_DIR, "tmp"),
"job_name": job_name,
"project": PROJECT,
"teardown_policy": "TEARDOWN_ALWAYS",
"no_save_main_session": True
}
opts = beam.pipeline.PipelineOptions(flags = [], **options)
if in_test_mode:
RUNNER = "DirectRunner"
else:
RUNNER = "DataflowRunner"
p = beam.Pipeline(RUNNER, options = opts)
query =
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND weight_pounds > 0
AND mother_age > 0
AND plurality > 0
AND gestation_weeks > 0
AND month > 0
if in_test_mode:
query = query + " LIMIT 100"
for step in ["train", "eval"]:
if step == "train":
selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) < 80".format(query)
elif step == "eval":
selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 80 AND ABS(MOD(hashmonth, 100)) < 90".format(query)
else:
selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 90".format(query)
(p
| "{}_read".format(step) >> beam.io.Read(beam.io.BigQuerySource(query = selquery, use_standard_sql = True))
| "{}_csv".format(step) >> beam.FlatMap(to_csv)
| "{}_out".format(step) >> beam.io.Write(beam.io.WriteToText(os.path.join(OUTPUT_DIR, "{}.csv".format(step))))
)
job = p.run()
if in_test_mode:
job.wait_until_finish()
print("Done!")
preprocess(in_test_mode = False)
!gsutil ls gs://$BUCKET/babyweight/preproc/*-00000*
query =
WITH CTE_Raw_Data AS (
SELECT
weight_pounds,
CAST(is_male AS STRING) AS is_male,
mother_age,
plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND weight_pounds > 0
AND mother_age > 0
AND plurality > 0
AND gestation_weeks > 0
AND month > 0)
-- Ultrasound
SELECT
weight_pounds,
is_male,
mother_age,
CASE
WHEN plurality = 1 THEN "Single(1)"
WHEN plurality = 2 THEN "Twins(2)"
WHEN plurality = 3 THEN "Triplets(3)"
WHEN plurality = 4 THEN "Quadruplets(4)"
WHEN plurality = 5 THEN "Quintuplets(5)"
ELSE "NULL"
END AS plurality,
gestation_weeks,
hashmonth
FROM
CTE_Raw_Data
UNION ALL
-- No ultrasound
SELECT
weight_pounds,
"Unknown" AS is_male,
mother_age,
CASE
WHEN plurality = 1 THEN "Single(1)"
WHEN plurality > 1 THEN "Multiple(2+)"
END AS plurality,
gestation_weeks,
hashmonth
FROM
CTE_Raw_Data
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# Set dataset_id to the ID of the dataset to create.
dataset_name = "temp_babyweight_dataset"
dataset_id = "{}.{}".format(client.project, dataset_name)
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset.from_string(dataset_id)
# Specify the geographic location where the dataset should reside.
dataset.location = "US"
# Send the dataset to the API for creation.
# Raises google.api_core.exceptions.Conflict if the Dataset already
# exists within the project.
try:
dataset = client.create_dataset(dataset) # API request
print("Created dataset {}.{}".format(client.project, dataset.dataset_id))
except:
print("Dataset {}.{} already exists".format(client.project, dataset.dataset_id))
job_config = bigquery.QueryJobConfig()
for step in ["train", "eval"]:
if step == "train":
selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) < 80".format(query)
elif step == "eval":
selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 80 AND ABS(MOD(hashmonth, 100)) < 90".format(query)
else:
selquery = "SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 100)) >= 90".format(query)
# Set the destination table
table_name = "babyweight_{}".format(step)
table_ref = client.dataset(dataset_name).table(table_name)
job_config.destination = table_ref
job_config.write_disposition = "WRITE_TRUNCATE"
# Start the query, passing in the extra configuration.
query_job = client.query(
query=selquery,
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location="US",
job_config=job_config) # API request - starts the query
query_job.result() # Waits for the query to finish
print("Query results loaded to table {}".format(table_ref.path))
dataset_ref = client.dataset(dataset_id=dataset_name, project=PROJECT)
for step in ["train", "eval"]:
destination_uri = "gs://{}/{}".format(BUCKET, "babyweight/bq_data/{}*.csv".format(step))
table_name = "babyweight_{}".format(step)
table_ref = dataset_ref.table(table_name)
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location="US",
) # API request
extract_job.result() # Waits for job to complete.
print("Exported {}:{}.{} to {}".format(PROJECT, dataset_name, table_name, destination_uri))
!gsutil ls gs://$BUCKET/babyweight/bq_data/*000000000000*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, set the environment variables related to your GCP Project.
Step3: Save the query from earlier
Step5: Create ML dataset using Dataflow
Step6: For a Cloud preprocessing job (i.e. setting in_test_mode to False), the above step will take 20+ minutes. Go to the GCP web console, navigate to the Dataflow section and <b>wait for the job to finish</b> before you run the follwing step.
Step8: Preprocessing with BigQuery
Step9: Create temporary BigQuery dataset
Step10: Execute query and write to BigQuery table.
Step11: Export BigQuery table to CSV in GCS.
Step12: View results
|
2,683
|
<ASSISTANT_TASK:>
Python Code:
def getamzProd(a,i,search_1):#取得連線後的DataFrame資料,a是index,i是DataFrame總長度
while(a<=i):
ProdId=search_1.iloc[a]['pindex']
pname=search_1.iloc[a]['pname']
# totalRev=search_1.iloc[a]['totalRev']
totalRev=1#快速測試用
return ProdId,pname,totalRev
def Authors(soup,ProdId):#(1)評論者 &產品ID
list_author=[]
list_prodid=[]
# rAuthors=soup.select('span[class="a-size-base a-color-secondary review-byline"] > a[data-hook="review-author"]')
rAuthors=soup.select('div[data-hook="review"]')
for author in rAuthors:
list_author.append(author.div.next_sibling.span.get_text("|",strip=True).split("|")[1])
list_prodid.append(ProdId)
# print(author.div.prettify())
# print(list_author)
return list_author,list_prodid
def Stars(soup):#(2)星星數
list_star=[]
Rstars=soup.select("i[data-hook='review-star-rating']")
for star in Rstars:
star=re.sub('[a-zA-Z].+s','',star.text).split(' ')[0][0]
fStar=int(star)
list_star.append(fStar)
# print(repr(fStar))
return list_star
def Dates(soup):#(3)日期
list_date=[]
rDates=soup.select("span[data-hook='review-date']")
for date in rDates:
date2=(date.text)[3:]
date3=datetime.datetime.strptime(date2, '%B %d, %Y').strftime('%Y-%m-%d')
list_date.append(date3)
# print(repr(date2),end='\n')
return list_date
def Title(soup):#(4)評論主旨
list_title=[]
rtitle=soup.select("a[data-hook='review-title']")
for title in rtitle:
list_title.append(title.text)
# print((title.text),end='\n')
return list_title
def Reviews(soup):#(5)評論內容
list_review=[]
reviews=soup.select("span[class='a-size-base review-text']")
for review in reviews:
# list_review.append(review.text)
list_review.append(review.get_text(separator="\n\n",strip=True))
# print(repr(review.text))
return list_review
def Verifieds(soup):#(6)購買驗證
list_verified=[]
rVerifieds=soup.select('div[class="a-row a-spacing-mini review-data review-format-strip"]')
a=0
for verified in rVerifieds :
if 'Verified' in verified.text:
ver=1
a+=1
else:
ver=0
a+=1
list_verified.append(ver)
# print(a,(ver),verified,end='\n')
return list_verified
def Comments(soup):#(7)評論回覆數
list_comment=[]
rcomments = soup.select('span[class="review-comment-total aok-hidden"]')
for comment in rcomments:
list_comment.append(comment.text)
# print(repr(comment.text))
return list_comment
def Helps(soup):#(8)覺得有幫助的人數
list_helps=[]
tagsHelps=soup.select('span[class="cr-vote-buttons"] > span[class="a-color-secondary"]')
idx=0
for helps in tagsHelps:
if "One" in helps.text:
NumPeopleFindHelpful=1
elif (helps.span==None):
NumPeopleFindHelpful=0
else:
NumPeopleFindHelpful=int(re.sub('[^0-9]', '',(helps.text)))
idx+=1
# print(idx,repr(NumPeopleFindHelpful))
list_helps.append(NumPeopleFindHelpful)
return list_helps
def Crawler(ProdId,ProdName,totalNumReviews,maxretrytime=60):
'''
此函數輸入ProdId,ProdName,reviews
輸出為含有Amazon評論等資訊的Dataframe
'''
url_base ="https://www.amazon.com/"
url_01 = "/product-reviews/"
url_02="/ref=cm_cr_arp_d_paging_btm_1?pageNumber="
url_03="&reviewerType=all_reviews&pageSize=50"
#分辨要換多少頁 除以50有餘數就加一頁
#if (totalNumReviews %50) != 0:
# totalNumPages=int(totalNumReviews/50) +1
#else:
# totalNumPages=int(totalNumReviews/50)
totalNumPages=int(totalNumReviews/50)+1
#table_final=pd.DataFrame()
list_prodid=[]
list_author=[]
list_star=[]
list_date=[]
list_title=[]
list_review=[]
list_verified=[]
list_comments=[]
list_helps=[]
for currentPageNum in range(1,totalNumPages+1):
print("ProdId= %s. Total number of pages= %s. Current page= %s."%(ProdId,totalNumPages,currentPageNum) )
passed=False
cnt=0
while(passed==False):
cnt+=1
if(cnt>maxretrytime):
raise Exception("Error! Tried too many times but we are still blocked by Amazon.")
print("ProdId="+ProdId+","+"CurrentPage="+currentPageNum)
try:
# 建立連線
with requests.Session() as session:
#session.headers = {'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0"}
uri=url_base+ProdName+url_01+ProdId+url_02+str(currentPageNum)+url_03
r=session.get(uri)
if(r.status_code!=200):
print("Connection failed(status/=200). Reconnecting...")
sleep(0.3)
else:
# 回應200則獲取湯
soup = BeautifulSoup(r.content,"lxml")
#print(soup.prettify())
# 若發現湯是假的,就小睡數秒,之後再重複獲取一次湯。若重複獲取湯的次數超過maxretrytime,程式將終止
if("Robot Check" in soup.text):
print("we are identified as a robot! Reconnecting...")
sleep(1+0.5*random.randint(0,1)) # 睡 1 或 1.5 秒
if(cnt>25):
sleep(1) # 要是一直不行(重試超過25次),不如就再多睡一秒吧
else:
print("We've obtained the correct soup!")
passed=True
lst_author,lst_prodid=Authors(soup,ProdId) #(1)評論者與ProdId 分別放到2個列表
lst_star=Stars(soup) #(2)星星數
lst_date=Dates(soup) #(3)日期
lst_title=Title(soup) #(4)評論主旨
lst_review=Reviews(soup) #(5)評論內容
lst_verified=Verifieds(soup) #(6)購買驗證
lst_comments=Comments(soup) #(7)評論回覆數
lst_helps=Helps(soup) #(8)覺得有幫助的人數
print(lst_author)
# print(list_star)
# print(list_date)
# print(list_title)
# print(list_review)
# print(list_verified)
# print(list_comments)
# print(list_helps)
print("URL=",uri)
lengths=[len(lst_prodid),len(lst_author),len(lst_star),len(lst_date),len(lst_title),len(lst_review),len(lst_verified),len(lst_comments),len(lst_helps)]
if(len(set(lengths))!=1):
print(lengths)
warnings.warn('Beware. Lists obtained have no equal length.')
print("length of lst_prodid=",len(lst_prodid))
print("length of lst_author=",len(lst_author))
print("length of lst_star=",len(lst_star))
print("length of lst_date=",len(lst_date))
print("length of lst_title=",len(lst_title))
print("length of lst_review=",len(lst_review))
print("length of lst_verified=",len(lst_verified))
print("length of lst_comments=",len(lst_comments))
print("length of lst_helps=",len(lst_helps))
#data={"pindex":list_prodid,"author":list_author,"star":list_star,"date":list_date,"title":list_title,"review":list_review,"verified":list_verified,"comment":list_comments,"help":list_helps}
# try:
#以下兩行效率不一定佳。但就算不佳,因為我們資料量不是很大,所以暫無更改必要。
#table=pd.DataFrame(data)
#table_final=table_final.append(table,ignore_index=True)
# except ValueError:
# print("ValueError!")
# print(soup.prettify())
# table.to_csv("crawler.csv", sep=',', encoding='utf-8')
except:
print("Error encounted! Probably due to connection time out. Reconnecting...")
print("ProdId= "+ProdId+". "+"Current Page= "+str(currentPageNum))
sleep(1)
list_prodid+=lst_prodid
list_author+=lst_author
list_star+=lst_star
list_date+=lst_date
list_title+=lst_title
list_review+=lst_review
list_verified+=lst_verified
list_comments+=lst_comments
list_helps+=lst_helps
df=pd.DataFrame.from_items([("pindex",list_prodid),("author",list_author),("star",list_star),\
("date",list_date),("title",list_title),("review",list_review), \
("verified",list_verified),("comment",list_comments),("help",list_helps)])\
.drop_duplicates("review").reset_index(drop=True)
return df
def prodInfoFetcherForCrawler(thisCrawlerID,prodType):
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
engine=create_engine("mysql+pymysql://semantic:GbwSq1RzFb@104.199.201.206:13606/Tests?charset=utf8mb4",echo=False, encoding='utf-8')
conn = engine.connect()
sql_command = "SELECT pindex,pname,totalRev,cID,cStatus FROM Tests.amzProd where type='"+ prodType +"' \
and cStatus!=1 and cID="+str(thisCrawlerID)+" ORDER BY totalRev desc"
resultSet = pd.read_sql_query(sql=sql_command, con=conn, coerce_float=False)
conn.close()
return resultSet
def prodRevstoSQL(ProdId,resultTable):
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
# prepare the connection and connect to the DB
engine=create_engine("mysql+pymysql://semantic:GbwSq1RzFb@104.199.201.206:13606/Tests?charset=utf8mb4",convert_unicode=True,echo=False)
conn = engine.connect()
resultTable.to_sql(name='amzRev', con=conn, if_exists = 'append', index=False)
sql_command = "UPDATE Tests.amzProd SET cStatus=1 where pindex='"+ ProdId +"'"
result = conn.execute(sql_command)
# close the connection
conn.close()
prodTypes=["central","canister","handheld","robotic","stick","upright","wetdry"]
thisCrawlerID=1
prodType=prodTypes[5]
resultSet=prodInfoFetcherForCrawler(thisCrawlerID,prodType)
ProdId,ProdName,NumReviews=resultSet.loc[5,["pindex","pname","totalRev"]]
print(ProdId,ProdName,NumReviews)
ProdId,ProdName,NumReviews=resultSet.loc[5,["pindex","pname","totalRev"]]
NumReviews=200
resultTable=Crawler(ProdId,ProdName,NumReviews)
pd.set_option('max_colwidth', 2000)
print(resultTable.loc[resultTable["author"] == "Kellie",["review","title"]])
resultSet
print(resultSet.loc[0,"rurl"])
print(resultSet.iloc[0,-2])
print(prodTypes[-2])
print([*range(5)])
print([*range(0,5,2)])
print([*range(1,5,2)])
print(prodTypes)
thisCrawlerID=1
prodType=prodTypes[0]
resultSet=prodInfoFetcherForCrawler(thisCrawlerID,prodType)
display(resultSet)
print(*range(0,2,2))
print(*range(1,2,2))
print(*range(0,2,4))
for j in range(2,2,2):
print("this is item %i of %i items"%(j,nRows))
x=resultSet.loc[j,["pindex","pname","totalRev"]]
pri
for j in [0,]:
ProdId,ProdName,NumReviews=resultSet.loc[j,["pindex","pname","totalRev"]]
print(ProdId,ProdName,NumReviews,"\n")
resultTable=Crawler(ProdId,ProdName,NumReviews)
display(resultTable)
prodRevstoSQL(ProdId,resultTable)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: short summary
Step2: the following results are the same
Step3: 我們先前確立了我們的爬蟲程式完全可以抓到我們想要抓的東西,且抓了幾個有數千評論的商品,測試結果均正常。接下來,我們要指派特定吸塵器種類底下的商品給各爬蟲程式去爬。
Step4: 查詢資料表,確定自己有多少任務:
Step5: 爬蟲開始,以下我們只爬待爬商品中,最多篇評論的那一個商品來做測試
|
2,684
|
<ASSISTANT_TASK:>
Python Code:
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(20)
y_data = np.random.randn(20)
scatter_chart = Scatter(
x=x_data,
y=y_data,
scales={"x": x_sc, "y": y_sc},
colors=["dodgerblue"],
interactions={"click": "select"},
selected_style={"opacity": 1.0, "fill": "DarkOrange", "stroke": "Red"},
unselected_style={"opacity": 0.5},
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[scatter_chart], axes=[ax_x, ax_y])
scatter_chart.selected
scatter_chart.selected = [1, 2, 3]
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(20)
y_data = np.random.randn(20)
dd = Dropdown(options=["First", "Second", "Third", "Fourth"])
scatter_chart = Scatter(
x=x_data,
y=y_data,
scales={"x": x_sc, "y": y_sc},
colors=["dodgerblue"],
names=np.arange(100, 200),
names_unique=False,
display_names=False,
display_legend=True,
labels=["Blue"],
)
ins = Button(icon="fa-legal")
scatter_chart.tooltip = ins
line = Lines(x=x_data, y=y_data, scales={"x": x_sc, "y": y_sc}, colors=["dodgerblue"])
scatter_chart2 = Scatter(
x=x_data,
y=np.random.randn(20),
scales={"x": x_sc, "y": y_sc},
colors=["orangered"],
tooltip=dd,
names=np.arange(100, 200),
names_unique=False,
display_names=False,
display_legend=True,
labels=["Red"],
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
fig = Figure(marks=[scatter_chart, scatter_chart2, line], axes=[ax_x, ax_y])
fig
def print_event(self, target):
print(target)
# Adding call back to scatter events
# print custom mssg on hover and background click of Blue Scatter
scatter_chart.on_hover(print_event)
scatter_chart.on_background_click(print_event)
# print custom mssg on click of an element or legend of Red Scatter
scatter_chart2.on_element_click(print_event)
scatter_chart2.on_legend_click(print_event)
line.on_element_click(print_event)
# Changing interaction from hover to click for tooltip
scatter_chart.interactions = {"click": "tooltip"}
# Adding figure as tooltip
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(10)
y_data = np.random.randn(10)
lc = Lines(x=x_data, y=y_data, scales={"x": x_sc, "y": y_sc})
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
tooltip_fig = Figure(marks=[lc], axes=[ax_x, ax_y], layout=Layout(min_width="600px"))
scatter_chart.tooltip = tooltip_fig
i = ImageIpy.from_file(os.path.abspath("../data_files/trees.jpg"))
bqi = Image(image=i, scales={"x": x_sc, "y": y_sc}, x=(0, 10), y=(-1, 1))
fig_image = Figure(marks=[bqi], axes=[ax_x, ax_y])
fig_image
bqi.on_element_click(print_event)
# Adding default tooltip to Line Chart
x_sc = LinearScale()
y_sc = LinearScale()
x_data = np.arange(100)
y_data = np.random.randn(3, 100)
def_tt = Tooltip(
fields=["name", "index"], formats=["", ".2f"], labels=["id", "line_num"]
)
line_chart = Lines(
x=x_data,
y=y_data,
scales={"x": x_sc, "y": y_sc},
tooltip=def_tt,
display_legend=True,
labels=["line 1", "line 2", "line 3"],
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[line_chart], axes=[ax_x, ax_y])
# Adding call back to print event when legend or the line is clicked
line_chart.on_legend_click(print_event)
line_chart.on_element_click(print_event)
# Adding interaction to select bar on click for Bar Chart
x_sc = OrdinalScale()
y_sc = LinearScale()
x_data = np.arange(10)
y_data = np.random.randn(2, 10)
bar_chart = Bars(
x=x_data,
y=[y_data[0, :].tolist(), y_data[1, :].tolist()],
scales={"x": x_sc, "y": y_sc},
interactions={"click": "select"},
selected_style={"stroke": "orange", "fill": "red"},
labels=["Level 1", "Level 2"],
display_legend=True,
)
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[bar_chart], axes=[ax_x, ax_y])
# Adding a tooltip on hover in addition to select on click
def_tt = Tooltip(fields=["x", "y"], formats=["", ".2f"])
bar_chart.tooltip = def_tt
bar_chart.interactions = {
"legend_hover": "highlight_axes",
"hover": "tooltip",
"click": "select",
}
# Changing tooltip to be on click
bar_chart.interactions = {"click": "tooltip"}
# Call back on legend being clicked
bar_chart.type = "grouped"
bar_chart.on_legend_click(print_event)
# Adding tooltip for Histogram
x_sc = LinearScale()
y_sc = LinearScale()
sample_data = np.random.randn(100)
def_tt = Tooltip(formats=["", ".2f"], fields=["count", "midpoint"])
hist = Hist(
sample=sample_data,
scales={"sample": x_sc, "count": y_sc},
tooltip=def_tt,
display_legend=True,
labels=["Test Hist"],
select_bars=True,
)
ax_x = Axis(scale=x_sc, tick_format="0.2f")
ax_y = Axis(scale=y_sc, orientation="vertical", tick_format="0.2f")
Figure(marks=[hist], axes=[ax_x, ax_y])
# Changing tooltip to be displayed on click
hist.interactions = {"click": "tooltip"}
# Changing tooltip to be on click of legend
hist.interactions = {"legend_click": "tooltip"}
pie_data = np.abs(np.random.randn(10))
sc = ColorScale(scheme="Reds")
tooltip_widget = Tooltip(
fields=["size", "index", "color"], formats=["0.2f", "", "0.2f"]
)
pie = Pie(
sizes=pie_data,
scales={"color": sc},
color=np.random.randn(10),
tooltip=tooltip_widget,
interactions={"click": "tooltip"},
selected_style={"fill": "red"},
)
pie.selected_style = {"opacity": "1", "stroke": "white", "stroke-width": "2"}
pie.unselected_style = {"opacity": "0.2"}
Figure(marks=[pie])
# Changing interaction to select on click and tooltip on hover
pie.interactions = {"click": "select", "hover": "tooltip"}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Alternately, the selected attribute can be directly set on the Python side (try running the cell below)
Step2: Scatter Chart Interactions and Tooltips
Step3: Image
Step4: Line Chart
Step5: Bar Chart
Step6: Histogram
Step7: Pie Chart
|
2,685
|
<ASSISTANT_TASK:>
Python Code:
p=Function('p')
m,s,h = symbols('m s h')
m=M(x,y,z)
q=Q(x,y,t)
d=D(x,y,t)
e=E(x,y)
dtt=as_finite_diff(p(x,y,z,t).diff(t,t), [t-s,t, t+s])
dt=as_finite_diff(p(x,y,t).diff(t), [t-s, t+s])
# Spacial finite differences can easily be extended to higher order by increasing the list of sampling point in the next expression.
# Be sure to keep this stencil symmetric and everything else in the notebook will follow.
dxx=as_finite_diff(p(x,y,z,t).diff(x,x), [x-h,x, x+h])
dyy=as_finite_diff(p(x,y,z,t).diff(y,y), [y-h,y, y+h])
dzz=as_finite_diff(p(x,y,z,t).diff(z,z), [z-h,z, z+h])
dtt,dxx,dyy,dt
# Forward wave equation
wave_equation = m*dtt- (dxx+dyy+dzz)
stencil = solve(wave_equation,p(x,y,z,t+s))[0]
ts=lambdify((p(x,y,t-s),p(x-h,y,t), p(x,y,t), p(x+h,y,t),p(x,y-h,t), p(x,y+h,t), q , m, s, h,e),stencil,"numpy")
eq=Eq(p(x,y,z,t+s),stencil)
eq
# Adjoint wave equation
wave_equationA = m*dtt- (dxx+dyy) - D(x,y,t) - e*dt
stencilA = solve(wave_equationA,p(x,y,t-s))[0]
tsA=lambdify((p(x,y,t+s),p(x-h,y,t), p(x,y,t), p(x+h,y,t),p(x,y-h,t), p(x,y+h,t), d , m, s, h,e),stencilA,"numpy")
stencilA
import matplotlib.pyplot as plt
from matplotlib import animation
hstep=25 #space increment d = minv/(10*f0);
tstep=2 #time increment dt < .5 * hstep /maxv;
tmin=0.0 #initial time
tmax=300 #simulate until
xmin=-875.0 #left bound
xmax=875.0 #right bound...assume packet never reaches boundary
ymin=-875.0 #left bound
ymax=875.0 #right bound...assume packet never reaches boundary
f0=.010
t0=1/.010
nbpml=10
nx = int((xmax-xmin)/hstep) + 1 #number of points on x grid
ny = int((ymax-ymin)/hstep) + 1 #number of points on x grid
nt = int((tmax-tmin)/tstep) + 2 #number of points on t grid
xsrc=-400
ysrc=0.0
xrec = nbpml+4
#set source as Ricker wavelet for f0
def source(x,y,t):
r = (np.pi*f0*(t-t0))
val = (1-2.*r**2)*np.exp(-r**2)
if abs(x-xsrc)<hstep/2 and abs(y-ysrc)<hstep/2:
return val
else:
return 0.0
def dampx(x):
dampcoeff=1.5*np.log(1.0/0.001)/(5.0*hstep);
if x<nbpml:
return dampcoeff*((nbpml-x)/nbpml)**2
elif x>nx-nbpml-1:
return dampcoeff*((x-nx+nbpml)/nbpml)**2
else:
return 0.0
def dampy(y):
dampcoeff=1.5*np.log(1.0/0.001)/(5.0*hstep);
if y<nbpml:
return dampcoeff*((nbpml-y)/nbpml)**2
elif y>ny-nbpml-1:
return dampcoeff*((y-ny+nbpml)/nbpml)**2
else:
return 0.0
# Velocity models
def smooth10(vel,nx,ny):
out=np.ones((nx,ny))
out[:,:]=vel[:,:]
for a in range(5,nx-6):
out[a,:]=np.sum(vel[a-5:a+5,:], axis=0) /10
return out
# True velocity
vel=np.ones((nx,ny)) + 2.0
vel[floor(nx/2):nx,:]=4.5
mt=vel**-2
# Smooth velocity
v0=smooth10(vel,nx,ny)
m0=v0**-2
dm=m0-mt
def Forward(nt,nx,ny,m):
u=np.zeros((nt,nx,ny))
rec=np.zeros((nt,ny-2))
for ti in range(0,nt):
for a in range(1,nx-1):
for b in range(1,ny-1):
src = source(xmin+a*hstep,ymin+b*hstep,tstep*ti)
damp=dampx(a)+dampy(b)
if ti==0:
u[ti,a,b]=ts(0,0,0,0,0,0,src,m[a,b],tstep,hstep,damp)
elif ti==1:
u[ti,a,b]=ts(0,u[ti-1,a-1,b],u[ti-1,a,b],u[ti-1,a+1,b],u[ti-1,a,b-1],u[ti-1,a,b+1],src,m[a,b],tstep,hstep,damp)
else:
u[ti,a,b]=ts(u[ti-2,a,b],u[ti-1,a-1,b],u[ti-1,a,b],u[ti-1,a+1,b],u[ti-1,a,b-1],u[ti-1,a,b+1],src,m[a,b],tstep,hstep,damp)
if a==xrec :
rec[ti,b-1]=u[ti,a,b]
return rec,u
def Adjoint(nt,nx,ny,m,rec):
v=np.zeros((nt,nx,ny))
srca=np.zeros((nt))
for ti in range(nt-1, -1, -1):
for a in range(1,nx-1):
for b in range(1,ny-1):
if a==xrec:
resid=rec[ti,b-1]
else:
resid=0
damp=dampx(a)+dampy(b)
if ti==nt-1:
v[ti,a,b]=tsA(0,0,0,0,0,0,resid,m[a,b],tstep,hstep,damp)
elif ti==nt-2:
v[ti,a,b]=tsA(0,v[ti+1,a-1,b],v[ti+1,a,b],v[ti+1,a+1,b],v[ti+1,a,b-1],v[ti+1,a,b+1],resid,m[a,b],tstep,hstep,damp)
else:
v[ti,a,b]=tsA(v[ti+2,a,b],v[ti+1,a-1,b],v[ti+1,a,b],v[ti+1,a+1,b],v[ti+1,a,b-1],v[ti+1,a,b+1],resid,m[a,b],tstep,hstep,damp)
if abs(xmin+a*hstep-xsrc)<hstep/2 and abs(ymin+b*hstep-ysrc)<hstep/2:
srca[ti]=v[ti,a,b]
return srca,v
def Gradient(nt,nx,ny,m,rec,u):
v1=np.zeros((nx,ny))
v2=np.zeros((nx,ny))
v3=np.zeros((nx,ny))
grad=np.zeros((nx,ny))
for ti in range(nt-1,-1,-1):
for a in range(1,nx-1):
for b in range(1,ny-1):
if a==xrec:
resid=rec[ti,b-1]
else:
resid=0
damp=dampx(a)+dampy(b)
v3[a,b]=tsA(v1[a,b],v2[a-1,b],v2[a,b],v2[a+1,b],v2[a,b-1],v2[a,b+1],resid,m[a,b],tstep,hstep,damp)
grad[a,b]=grad[a,b]-(v3[a,b]-2*v2[a,b]+v1[a,b])*(u[ti,a,b])
v1,v2,v3=v2,v3,v1
return tstep**-2*grad
def Born(nt,nx,ny,m,dm):
u1=np.zeros((nx,ny))
U1=np.zeros((nx,ny))
u2=np.zeros((nx,ny))
U2=np.zeros((nx,ny))
u3=np.zeros((nx,ny))
U3=np.zeros((nx,ny))
rec=np.zeros((nt,ny-2))
src2=0
for ti in range(0,nt):
for a in range(1,nx-1):
for b in range(1,ny-1):
damp=dampx(a)+dampy(b)
src = source(xmin+a*hstep,ymin+b*hstep,tstep*ti)
u3[a,b]=ts(u1[a,b],u2[a-1,b],u2[a,b],u2[a+1,b],u2[a,b-1],u2[a,b+1],src,m[a,b],tstep,hstep,damp)
src2 = -tstep**-2*(u3[a,b]-2*u2[a,b]+u1[a,b])*dm[a,b]
U3[a,b]=ts(U1[a,b],U2[a-1,b],U2[a,b],U2[a+1,b],U2[a,b-1],U2[a,b+1],src2,m[a,b],tstep,hstep,damp)
if a==xrec :
rec[ti,b-1]=U3[a,b]
u1,u2,u3=u2,u3,u1
U1,U2,U3=U2,U3,U1
return rec
(rect,ut)=Forward(nt,nx,ny,mt)
fig = plt.figure()
plts = [] # get ready to populate this list the Line artists to be plotted
plt.hold("off")
for i in range(nt):
r = plt.imshow(ut[i,:,:]) # this is how you'd plot a single line...
plts.append( [r] )
ani = animation.ArtistAnimation(fig, plts, interval=50, repeat = False) # run the animation
plt.show()
fig2 = plt.figure()
plt.hold("off")
shotrec = plt.imshow(rect) # this is how you'd plot a single line...
#plt.show()
(rec0,u0)=Forward(nt,nx,ny,m0)
(srca,v)=Adjoint(nt,nx,ny,m0,rec0)
plts = [] # get ready to populate this list the Line artists to be plotted
plt.hold("off")
for i in range(0,nt):
r = plt.imshow(v[i,:,:],vmin=-100, vmax=100) # this is how you'd plot a single line...
plts.append( [r] )
ani = animation.ArtistAnimation(fig, plts, interval=50, repeat = False) # run the animation
plt.show()
shotrec = plt.plot(srca) # this is how you'd plot a single line...
#plt.show()
# Actual adjoint test
term1=0
for ti in range(0,nt):
term1=term1+srca[ti]*source(xsrc,ysrc,(ti)*tstep)
term2=LA.norm(rec0)**2
term1,term2,term1-term2,term1/term2
# Misfit
F0=.5*LA.norm(rec0-rect)**2
F0
Im1=Gradient(nt,nx,ny,m0,rec0-rect,u0)
shotrec = plt.imshow(rect,vmin=-1,vmax=1) # this is how you'd plot a single line...
shotrec = plt.imshow(rec0,vmin=-1,vmax=1) # this is how you'd plot a single line...
shotrec = plt.imshow(rec0-rect,vmin=-.1,vmax=.1) # this is how you'd plot a single line...
shotrec = plt.imshow(Im1,vmin=-1,vmax=1) # this is how you'd plot a single line...
#plt.show()
Im2=Gradient(nt,nx,ny,m0,rec0,u0)
du1=Born(nt,nx,ny,m0,dm)
term11=np.dot((rec0).reshape(-1),du1.reshape(-1))
term21=np.dot(Im2.reshape(-1),dm.reshape(-1))
term11,term21,term11-term21,term11/term21
H=[1,0.1,0.01,.001,0.0001,0.00001,0.000001]
(D1,u0)=Forward(nt,nx,ny,m0)
dub=Born(nt,nx,ny,m0,dm)
error1=np.zeros((7))
error2=np.zeros((7))
for i in range(0,7):
mloc=m0+H[i]*dm
(d,u)=Forward(nt,nx,ny,mloc)
error1[i] = LA.norm(d - D1,ord=1)
error2[i] = LA.norm(d - D1 - H[i]*dub,ord=1)
hh=np.zeros((7))
for i in range(0,7):
hh[i]=H[i]*H[i]
shotrec = plt.loglog(H,error1,H,H) # this is how you'd plot a single line...
plt.show()
shotrec = plt.loglog(H,error2,H,hh) # this is howyou'd plot a single line...
plt.show()
(DT,uT)=Forward(nt,nx,ny,mt)
(D1,u0)=Forward(nt,nx,ny,m0)
F0=.5*LA.norm(D1-DT)**2
g=Gradient(nt,nx,ny,m0,D1-DT,u0)
G=np.dot(g.reshape(-1),dm.reshape(-1));
error21=np.zeros((7))
error22=np.zeros((7))
for i in range(0,7):
mloc=m0+H[i]*dm
(D,u)=Forward(nt,nx,ny,mloc)
error21[i] = .5*LA.norm(D-DT)**2 -F0
error22[i] = .5*LA.norm(D-DT)**2 -F0 - H[i]*G
shotrec = plt.loglog(H,error21,H,H) # this is how you'd plot a single line...
plt.show()
shotrec = plt.loglog(H,error22,H,hh) # this is how you'd plot a single line...
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Time and space discretization as a Taylor expansion.
Step2: Solve forward in time
Step3: Rewriting the discret PDE as part of an Inversion
Step4: Define the discrete model
Step5: Create functions for the PDE
Step6: A Forward propagation example
Step7: Adjoint test
Step8: Least square objective Gradient
Step9: Adjoint test for the gradient
Step10: Jacobian test
Step11: Gradient test
|
2,686
|
<ASSISTANT_TASK:>
Python Code:
# The path to the local git repo for Indic NLP library
INDIC_NLP_LIB_HOME="/home/development/anoop/installs/indic_nlp_library"
# The path to the local git repo for Indic NLP Resources
INDIC_NLP_RESOURCES="/usr/local/bin/indicnlp/indic_nlp_resources"
import sys
sys.path.append('{}/src'.format(INDIC_NLP_LIB_HOME))
from indicnlp import common
common.set_resources_path(INDIC_NLP_RESOURCES)
from indicnlp import loader
loader.load()
from indicnlp.normalize.indic_normalize import IndicNormalizerFactory
input_text=u"\u0958 \u0915\u093c"
remove_nuktas=False
factory=IndicNormalizerFactory()
normalizer=factory.get_normalizer("hi",remove_nuktas)
output_text=normalizer.normalize(input_text)
print output_text
print 'Length before normalization: {}'.format(len(input_text))
print 'Length after normalization: {}'.format(len(output_text))
from indicnlp.transliterate.unicode_transliterate import UnicodeIndicTransliterator
input_text=u'राजस्थान'
print UnicodeIndicTransliterator.transliterate(input_text,"hi","pa")
from indicnlp.transliterate.unicode_transliterate import ItransTransliterator
input_text=u'राजस्थान'
lang='hi'
print ItransTransliterator.to_itrans(input_text,lang)
from indicnlp.transliterate.unicode_transliterate import ItransTransliterator
# input_text=u'rajasthAna'
input_text=u'pitL^In'
lang='hi'
x=ItransTransliterator.from_itrans(input_text,lang)
print x
for y in x:
print '{:x}'.format(ord(y))
from indicnlp.script import indic_scripts as isc
c=u'क'
lang='hi'
isc.get_phonetic_feature_vector(c,lang)
sorted(isc.PV_PROP_RANGES.iteritems(),key=lambda x:x[1][0])
from indicnlp.langinfo import *
c=u'क'
lang='hi'
print 'Is vowel?: {}'.format(is_vowel(c,lang))
print 'Is consonant?: {}'.format(is_consonant(c,lang))
print 'Is velar?: {}'.format(is_velar(c,lang))
print 'Is palatal?: {}'.format(is_palatal(c,lang))
print 'Is aspirated?: {}'.format(is_aspirated(c,lang))
print 'Is unvoiced?: {}'.format(is_unvoiced(c,lang))
print 'Is nasal?: {}'.format(is_nasal(c,lang))
from indicnlp.script import indic_scripts as isc
from indicnlp.script import phonetic_sim as psim
c1=u'क'
c2=u'ख'
c3=u'भ'
lang='hi'
print u'Similarity between {} and {}'.format(c1,c2)
print psim.cosine(
isc.get_phonetic_feature_vector(c1,lang),
isc.get_phonetic_feature_vector(c2,lang)
)
print
print u'Similarity between {} and {}'.format(c1,c3)
print psim.cosine(
isc.get_phonetic_feature_vector(c1,lang),
isc.get_phonetic_feature_vector(c3,lang)
)
from indicnlp.script import indic_scripts as isc
from indicnlp.script import phonetic_sim as psim
slang='hi'
tlang='ml'
sim_mat=psim.create_similarity_matrix(psim.cosine,slang,tlang,normalize=False)
c1=u'क'
c2=u'ഖ'
print u'Similarity between {} and {}'.format(c1,c2)
print sim_mat[isc.get_offset(c1,slang),isc.get_offset(c2,tlang)]
slang='hi'
tlang='ml'
sim_mat=psim.create_similarity_matrix(psim.sim1,slang,tlang,normalize=True)
c1=u'क'
c2=u'ഖ'
print u'Similarity between {} and {}'.format(c1,c2)
print sim_mat[isc.get_offset(c1,slang),isc.get_offset(c2,tlang)]
from indicnlp.syllable import syllabifier
w=u'जगदीशचंद्र'
lang='hi'
print u' '.join(syllabifier.orthographic_syllabify(w,lang))
from indicnlp.tokenize import indic_tokenize
indic_string=u'अनूप,अनूप?।फोन'
print u'Input String: {}'.format(indic_string)
print u'Tokens: '
for t in indic_tokenize.trivial_tokenize(indic_string):
print t
from indicnlp.morph import unsupervised_morph
from indicnlp import common
analyzer=unsupervised_morph.UnsupervisedMorphAnalyzer('mr')
indic_string=u'आपल्या हिरड्यांच्या आणि दातांच्यामध्ये जीवाणू असतात .'
analyzes_tokens=analyzer.morph_analyze_document(indic_string.split(' '))
for w in analyzes_tokens:
print w
import urllib2
from django.utils.encoding import *
from django.utils.http import *
text=iri_to_uri(urlquote('anoop, ratish kal fone par baat karenge'))
url=u'http://www.cfilt.iitb.ac.in/indicnlpweb/indicnlpws/transliterate_bulk/en/hi/{}/statistical'.format(text)
response=urllib2.urlopen(url).read()
print response
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add Library to Python path
Step2: Export environment variable
Step3: Initialize the Indic NLP library
Step4: Let's actually try out some of the API methods in the Indic NLP library
Step5: Script Conversion
Step6: Romanization
Step7: Indicization (ITRANS to Indic Script)
Step8: Script Information
Step9: This fields in this bit vector are (from left to right)
Step10: You can check the phonetic information database files in Indic NLP resources to know the definition of each of the bits.
Step11: Get Phonetic Similarity
Step12: You may have figured out that you can also compute similarities of characters belonging to different scripts.
Step13: Some similarity functions like sim do not generate values in the range [0,1] and it may be more convenient to have the similarity values in the range [0,1]. This can be achieved by setting the normalize paramter to True
Step14: Orthographic Syllabification
Step15: Tokenization
Step16: Word Segmentation
Step17: Transliteration
|
2,687
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import GPy
from emukit.model_wrappers import GPyModelWrapper
from emukit.experimental_design.experimental_design_loop import ExperimentalDesignLoop
from emukit.core import ParameterSpace, ContinuousParameter
from emukit.core.loop import UserFunctionWrapper
x_min = -30.0
x_max = 30.0
X = np.random.uniform(x_min, x_max, (10, 1))
Y = np.sin(X) + np.random.randn(10, 1) * 0.05
gpy_model = GPy.models.GPRegression(X, Y)
emukit_model = GPyModelWrapper(gpy_model)
p = ContinuousParameter('c', x_min, x_max)
space = ParameterSpace([p])
loop = ExperimentalDesignLoop(space, emukit_model)
loop.run_loop(np.sin, 30)
%matplotlib inline
import matplotlib.pyplot as plt
real_x = np.arange(x_min, x_max, 0.2)
real_y = np.sin(real_x)
plt.title('Learning function sin(x) with Emukit')
plt.xlabel('x')
plt.ylabel('y', rotation=None)
plt.plot(real_x, real_y, c='r')
plt.scatter(loop.loop_state.X[:, 0].tolist(), loop.loop_state.Y[:, 0].tolist());
plt.legend(['True function', 'Acquired datapoints'], loc='lower right');
predicted_y = []
predicted_std = []
for x in real_x:
y, var = emukit_model.predict(np.array([[x]]))
std = np.sqrt(var)
predicted_y.append(y)
predicted_std.append(std)
predicted_y = np.array(predicted_y).flatten()
predicted_std = np.array(predicted_std).flatten()
plt.title('Learning function sin(x) with Emukit')
plt.xlabel('x')
plt.ylabel('y', rotation=None)
plt.plot(real_x, real_y, c='r', )
plt.plot(real_x, predicted_y)
plt.legend(['True function', 'Estimated function'], loc='lower right')
plt.fill_between(real_x, predicted_y - 2 * predicted_std, predicted_y + 2 * predicted_std, alpha=.5);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's assume we have built a GPy model of some function that we would like to understand. In this toy example we will use sin(x), but of course the actual objective function can be much more complex.
Step2: Now we would like to collect more data about this function's behavior. Normally this function would be very expensive or time consuming to evaluate, so we cannot just run it for every possible input. Thus we need to have a reasonable approximation which allows us to collect as few points as possible. For that we will use Emukit's experimental design module.
Step3: Next step is to describe your parameter space. In our case this is just one continuous variable.
Step4: At this point we have everything we need to run the experimental design loop with Emukit.
Step5: After the loop is run its results are available as the loop state. Let's plot it alongside the original function.
Step6: We can also plot the trained model, to see how well it learned the function.
|
2,688
|
<ASSISTANT_TASK:>
Python Code:
BATCH_SIZE = 64
EPOCHS = 10
training_images_file = 'gs://mnist-public/train-images-idx3-ubyte'
training_labels_file = 'gs://mnist-public/train-labels-idx1-ubyte'
validation_images_file = 'gs://mnist-public/t10k-images-idx3-ubyte'
validation_labels_file = 'gs://mnist-public/t10k-labels-idx1-ubyte'
import os, re, math, json, shutil, pprint
import PIL.Image, PIL.ImageFont, PIL.ImageDraw
import IPython.display as display
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
print("Tensorflow version " + tf.__version__)
#@title visualization utilities [RUN ME]
This cell contains helper functions used for visualization
and downloads only. You can skip reading it. There is very
little useful Keras/Tensorflow code here.
# Matplotlib config
plt.ioff()
plt.rc('image', cmap='gray_r')
plt.rc('grid', linewidth=1)
plt.rc('xtick', top=False, bottom=False, labelsize='large')
plt.rc('ytick', left=False, right=False, labelsize='large')
plt.rc('axes', facecolor='F8F8F8', titlesize="large", edgecolor='white')
plt.rc('text', color='a8151a')
plt.rc('figure', facecolor='F0F0F0', figsize=(16,9))
# Matplotlib fonts
MATPLOTLIB_FONT_DIR = os.path.join(os.path.dirname(plt.__file__), "mpl-data/fonts/ttf")
# pull a batch from the datasets. This code is not very nice, it gets much better in eager mode (TODO)
def dataset_to_numpy_util(training_dataset, validation_dataset, N):
# get one batch from each: 10000 validation digits, N training digits
batch_train_ds = training_dataset.unbatch().batch(N)
# eager execution: loop through datasets normally
if tf.executing_eagerly():
for validation_digits, validation_labels in validation_dataset:
validation_digits = validation_digits.numpy()
validation_labels = validation_labels.numpy()
break
for training_digits, training_labels in batch_train_ds:
training_digits = training_digits.numpy()
training_labels = training_labels.numpy()
break
else:
v_images, v_labels = validation_dataset.make_one_shot_iterator().get_next()
t_images, t_labels = batch_train_ds.make_one_shot_iterator().get_next()
# Run once, get one batch. Session.run returns numpy results
with tf.Session() as ses:
(validation_digits, validation_labels,
training_digits, training_labels) = ses.run([v_images, v_labels, t_images, t_labels])
# these were one-hot encoded in the dataset
validation_labels = np.argmax(validation_labels, axis=1)
training_labels = np.argmax(training_labels, axis=1)
return (training_digits, training_labels,
validation_digits, validation_labels)
# create digits from local fonts for testing
def create_digits_from_local_fonts(n):
font_labels = []
img = PIL.Image.new('LA', (28*n, 28), color = (0,255)) # format 'LA': black in channel 0, alpha in channel 1
font1 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'DejaVuSansMono-Oblique.ttf'), 25)
font2 = PIL.ImageFont.truetype(os.path.join(MATPLOTLIB_FONT_DIR, 'STIXGeneral.ttf'), 25)
d = PIL.ImageDraw.Draw(img)
for i in range(n):
font_labels.append(i%10)
d.text((7+i*28,0 if i<10 else -4), str(i%10), fill=(255,255), font=font1 if i<10 else font2)
font_digits = np.array(img.getdata(), np.float32)[:,0] / 255.0 # black in channel 0, alpha in channel 1 (discarded)
font_digits = np.reshape(np.stack(np.split(np.reshape(font_digits, [28, 28*n]), n, axis=1), axis=0), [n, 28*28])
return font_digits, font_labels
# utility to display a row of digits with their predictions
def display_digits(digits, predictions, labels, title, n):
fig = plt.figure(figsize=(13,3))
digits = np.reshape(digits, [n, 28, 28])
digits = np.swapaxes(digits, 0, 1)
digits = np.reshape(digits, [28, 28*n])
plt.yticks([])
plt.xticks([28*x+14 for x in range(n)], predictions)
plt.grid(b=None)
for i,t in enumerate(plt.gca().xaxis.get_ticklabels()):
if predictions[i] != labels[i]: t.set_color('red') # bad predictions in red
plt.imshow(digits)
plt.grid(None)
plt.title(title)
display.display(fig)
# utility to display multiple rows of digits, sorted by unrecognized/recognized status
def display_top_unrecognized(digits, predictions, labels, n, lines):
idx = np.argsort(predictions==labels) # sort order: unrecognized first
for i in range(lines):
display_digits(digits[idx][i*n:(i+1)*n], predictions[idx][i*n:(i+1)*n], labels[idx][i*n:(i+1)*n],
"{} sample validation digits out of {} with bad predictions in red and sorted first".format(n*lines, len(digits)) if i==0 else "", n)
def plot_learning_rate(lr_func, epochs):
xx = np.arange(epochs+1, dtype=np.float)
y = [lr_decay(x) for x in xx]
fig, ax = plt.subplots(figsize=(9, 6))
ax.set_xlabel('epochs')
ax.set_title('Learning rate\ndecays from {:0.3g} to {:0.3g}'.format(y[0], y[-2]))
ax.minorticks_on()
ax.grid(True, which='major', axis='both', linestyle='-', linewidth=1)
ax.grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
ax.step(xx,y, linewidth=3, where='post')
display.display(fig)
class PlotTraining(tf.keras.callbacks.Callback):
def __init__(self, sample_rate=1, zoom=1):
self.sample_rate = sample_rate
self.step = 0
self.zoom = zoom
self.steps_per_epoch = 60000//BATCH_SIZE
def on_train_begin(self, logs={}):
self.batch_history = {}
self.batch_step = []
self.epoch_history = {}
self.epoch_step = []
self.fig, self.axes = plt.subplots(1, 2, figsize=(16, 7))
plt.ioff()
def on_batch_end(self, batch, logs={}):
if (batch % self.sample_rate) == 0:
self.batch_step.append(self.step)
for k,v in logs.items():
# do not log "batch" and "size" metrics that do not change
# do not log training accuracy "acc"
if k=='batch' or k=='size':# or k=='acc':
continue
self.batch_history.setdefault(k, []).append(v)
self.step += 1
def on_epoch_end(self, epoch, logs={}):
plt.close(self.fig)
self.axes[0].cla()
self.axes[1].cla()
self.axes[0].set_ylim(0, 1.2/self.zoom)
self.axes[1].set_ylim(1-1/self.zoom/2, 1+0.1/self.zoom/2)
self.epoch_step.append(self.step)
for k,v in logs.items():
# only log validation metrics
if not k.startswith('val_'):
continue
self.epoch_history.setdefault(k, []).append(v)
display.clear_output(wait=True)
for k,v in self.batch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.batch_step) / self.steps_per_epoch, v, label=k)
for k,v in self.epoch_history.items():
self.axes[0 if k.endswith('loss') else 1].plot(np.array(self.epoch_step) / self.steps_per_epoch, v, label=k, linewidth=3)
self.axes[0].legend()
self.axes[1].legend()
self.axes[0].set_xlabel('epochs')
self.axes[1].set_xlabel('epochs')
self.axes[0].minorticks_on()
self.axes[0].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[0].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
self.axes[1].minorticks_on()
self.axes[1].grid(True, which='major', axis='both', linestyle='-', linewidth=1)
self.axes[1].grid(True, which='minor', axis='both', linestyle=':', linewidth=0.5)
display.display(self.fig)
AUTO = tf.data.experimental.AUTOTUNE
def read_label(tf_bytestring):
label = tf.io.decode_raw(tf_bytestring, tf.uint8)
label = tf.reshape(label, [])
label = tf.one_hot(label, 10)
return label
def read_image(tf_bytestring):
image = tf.io.decode_raw(tf_bytestring, tf.uint8)
image = tf.cast(image, tf.float32)/256.0
image = tf.reshape(image, [28*28])
return image
def load_dataset(image_file, label_file):
imagedataset = tf.data.FixedLengthRecordDataset(image_file, 28*28, header_bytes=16)
imagedataset = imagedataset.map(read_image, num_parallel_calls=16)
labelsdataset = tf.data.FixedLengthRecordDataset(label_file, 1, header_bytes=8)
labelsdataset = labelsdataset.map(read_label, num_parallel_calls=16)
dataset = tf.data.Dataset.zip((imagedataset, labelsdataset))
return dataset
def get_training_dataset(image_file, label_file, batch_size):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.shuffle(5000, reshuffle_each_iteration=True)
dataset = dataset.repeat() # Mandatory for Keras for now
dataset = dataset.batch(batch_size, drop_remainder=True) # drop_remainder is important on TPU, batch size must be fixed
dataset = dataset.prefetch(AUTO) # fetch next batches while training on the current one (-1: autotune prefetch buffer size)
return dataset
def get_validation_dataset(image_file, label_file):
dataset = load_dataset(image_file, label_file)
dataset = dataset.cache() # this small dataset can be entirely cached in RAM, for TPU this is important to get good performance from such a small dataset
dataset = dataset.batch(10000, drop_remainder=True) # 10000 items in eval dataset, all in one batch
dataset = dataset.repeat() # Mandatory for Keras for now
return dataset
# instantiate the datasets
training_dataset = get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_dataset = get_validation_dataset(validation_images_file, validation_labels_file)
# For TPU, we will need a function that returns the dataset
training_input_fn = lambda: get_training_dataset(training_images_file, training_labels_file, BATCH_SIZE)
validation_input_fn = lambda: get_validation_dataset(validation_images_file, validation_labels_file)
N = 24
(training_digits, training_labels,
validation_digits, validation_labels) = dataset_to_numpy_util(training_dataset, validation_dataset, N)
display_digits(training_digits, training_labels, training_labels, "training digits and their labels", N)
display_digits(validation_digits[:N], validation_labels[:N], validation_labels[:N], "validation digits and their labels", N)
font_digits, font_labels = create_digits_from_local_fonts(N)
model = tf.keras.Sequential(
[
tf.keras.layers.Reshape(input_shape=(28*28,), target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(kernel_size=3, filters=12, use_bias=False, padding='same'),
tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(kernel_size=6, filters=24, use_bias=False, padding='same', strides=2),
tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Conv2D(kernel_size=6, filters=32, use_bias=False, padding='same', strides=2),
tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(200, use_bias=False),
tf.keras.layers.BatchNormalization(center=True, scale=False),
tf.keras.layers.Activation('relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss='categorical_crossentropy',
metrics=['accuracy'])
# print model layers
model.summary()
# utility callback that displays training curves
plot_training = PlotTraining(sample_rate=10, zoom=16)
# lr decay function
def lr_decay(epoch):
return 0.01 * math.pow(0.666, epoch)
# lr schedule callback
lr_decay_callback = tf.keras.callbacks.LearningRateScheduler(lr_decay, verbose=True)
# important to see what you are doing
plot_learning_rate(lr_decay, EPOCHS)
steps_per_epoch = 60000//BATCH_SIZE # 60,000 items in this dataset
print("Steps per epoch: ", steps_per_epoch)
history = model.fit(training_dataset, steps_per_epoch=steps_per_epoch, epochs=EPOCHS,
validation_data=validation_dataset, validation_steps=1, callbacks=[plot_training, lr_decay_callback])
# recognize digits from local fonts
probabilities = model.predict(font_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_digits(font_digits, predicted_labels, font_labels, "predictions from local fonts (bad predictions in red)", N)
# recognize validation digits
probabilities = model.predict(validation_digits, steps=1)
predicted_labels = np.argmax(probabilities, axis=1)
display_top_unrecognized(validation_digits, predicted_labels, validation_labels, N, 7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Imports
Step3: tf.data.Dataset
Step4: Let's have a look at the data
Step5: Keras model
Step6: Learning Rate schedule
Step7: Train and validate the model
Step8: Visualize predictions
|
2,689
|
<ASSISTANT_TASK:>
Python Code:
import asyncio
import functools
def callback(arg, *, kwarg='default'):
print('callback invoked with {} and {}'.format(arg, kwarg))
async def main(loop):
print('registering callbacks')
loop.call_soon(callback, 1)
wrapped = functools.partial(callback, kwarg='not default')
loop.call_soon(wrapped, 2)
await asyncio.sleep(0.1)
event_loop = asyncio.get_event_loop()
try:
print('entering event loop')
event_loop.run_until_complete(main(event_loop))
finally:
print('closing event loop')
event_loop.close()
# %load asyncio_call_later.py
import asyncio
def callback(n):
print('callback {} invoked'.format(n))
async def main(loop):
print('registering callbacks')
loop.call_later(0.2, callback, 1)
loop.call_later(0.1, callback, 2)
loop.call_soon(callback, 3)
await asyncio.sleep(0.4)
event_loop = asyncio.get_event_loop()
try:
print('entering event loop')
event_loop.run_until_complete(main(event_loop))
finally:
print('closing event loop')
event_loop.close()
!python asyncio_call_later.py
# %load asyncio_call_at.py
import asyncio
import time
def callback(n, loop):
print('callback {} invoked at {}'.format(n, loop.time()))
async def main(loop):
now = loop.time()
print('clock time: {}'.format(time.time()))
print('loop time: {}'.format(now))
print('registering callbacks')
loop.call_at(now + 0.2, callback, 1, loop)
loop.call_at(now + 0.1, callback, 2, loop)
loop.call_soon(callback, 3, loop)
await asyncio.sleep(1)
event_loop = asyncio.get_event_loop()
try:
print('entering event loop')
event_loop.run_until_complete(main(event_loop))
finally:
print('closing event loop')
event_loop.close()
!python asyncio_call_at.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The callbacks are invoked in the order they are scheduled.
Step2: In this example, the same callback function is scheduled for several different times with different arguments. The final instance, using call_soon(), results in the callback being invoked with the argument 3 before any of the time-scheduled instances, showing that “soon” usually implies a minimal delay.
|
2,690
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot
# We have this here to trigger matplotlib's font cache stuff.
# This cell is hidden from the output
import pandas as pd
import numpy as np
import matplotlib as mpl
df = pd.DataFrame([[38.0, 2.0, 18.0, 22.0, 21, np.nan],[19, 439, 6, 452, 226,232]],
index=pd.Index(['Tumour (Positive)', 'Non-Tumour (Negative)'], name='Actual Label:'),
columns=pd.MultiIndex.from_product([['Decision Tree', 'Regression', 'Random'],['Tumour', 'Non-Tumour']], names=['Model:', 'Predicted:']))
df.style
# Hidden cell to just create the below example: code is covered throughout the guide.
s = df.style\
.hide_columns([('Random', 'Tumour'), ('Random', 'Non-Tumour')])\
.format('{:.0f}')\
.set_table_styles([{
'selector': '',
'props': 'border-collapse: separate;'
},{
'selector': 'caption',
'props': 'caption-side: bottom; font-size:1.3em;'
},{
'selector': '.index_name',
'props': 'font-style: italic; color: darkgrey; font-weight:normal;'
},{
'selector': 'th:not(.index_name)',
'props': 'background-color: #000066; color: white;'
},{
'selector': 'th.col_heading',
'props': 'text-align: center;'
},{
'selector': 'th.col_heading.level0',
'props': 'font-size: 1.5em;'
},{
'selector': 'th.col2',
'props': 'border-left: 1px solid white;'
},{
'selector': '.col2',
'props': 'border-left: 1px solid #000066;'
},{
'selector': 'td',
'props': 'text-align: center; font-weight:bold;'
},{
'selector': '.true',
'props': 'background-color: #e6ffe6;'
},{
'selector': '.false',
'props': 'background-color: #ffe6e6;'
},{
'selector': '.border-red',
'props': 'border: 2px dashed red;'
},{
'selector': '.border-green',
'props': 'border: 2px dashed green;'
},{
'selector': 'td:hover',
'props': 'background-color: #ffffb3;'
}])\
.set_td_classes(pd.DataFrame([['true border-green', 'false', 'true', 'false border-red', '', ''],
['false', 'true', 'false', 'true', '', '']],
index=df.index, columns=df.columns))\
.set_caption("Confusion matrix for multiple cancer prediction models.")\
.set_tooltips(pd.DataFrame([['This model has a very strong true positive rate', '', '', "This model's total number of false negatives is too high", '', ''],
['', '', '', '', '', '']],
index=df.index, columns=df.columns),
css_class='pd-tt', props=
'visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'
'background-color: white; color: #000066; font-size: 0.8em;'
'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')
s
df.style.format(precision=0, na_rep='MISSING', thousands=" ",
formatter={('Decision Tree', 'Tumour'): "{:.2f}",
('Regression', 'Non-Tumour'): lambda x: "$ {:,.1f}".format(x*-1e6)
})
weather_df = pd.DataFrame(np.random.rand(10,2)*5,
index=pd.date_range(start="2021-01-01", periods=10),
columns=["Tokyo", "Beijing"])
def rain_condition(v):
if v < 1.75:
return "Dry"
elif v < 2.75:
return "Rain"
return "Heavy Rain"
def make_pretty(styler):
styler.set_caption("Weather Conditions")
styler.format(rain_condition)
styler.format_index(lambda v: v.strftime("%A"))
styler.background_gradient(axis=None, vmin=1, vmax=5, cmap="YlGnBu")
return styler
weather_df
weather_df.loc["2021-01-04":"2021-01-08"].style.pipe(make_pretty)
s = df.style.format('{:.0f}').hide([('Random', 'Tumour'), ('Random', 'Non-Tumour')], axis="columns")
s
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_hide')
cell_hover = { # for row hover use <tr> instead of <td>
'selector': 'td:hover',
'props': [('background-color', '#ffffb3')]
}
index_names = {
'selector': '.index_name',
'props': 'font-style: italic; color: darkgrey; font-weight:normal;'
}
headers = {
'selector': 'th:not(.index_name)',
'props': 'background-color: #000066; color: white;'
}
s.set_table_styles([cell_hover, index_names, headers])
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_tab_styles1')
s.set_table_styles([
{'selector': 'th.col_heading', 'props': 'text-align: center;'},
{'selector': 'th.col_heading.level0', 'props': 'font-size: 1.5em;'},
{'selector': 'td', 'props': 'text-align: center; font-weight: bold;'},
], overwrite=False)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_tab_styles2')
s.set_table_styles({
('Regression', 'Tumour'): [{'selector': 'th', 'props': 'border-left: 1px solid white'},
{'selector': 'td', 'props': 'border-left: 1px solid #000066'}]
}, overwrite=False, axis=0)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('xyz01')
out = s.set_table_attributes('class="my-table-cls"').to_html()
print(out[out.find('<table'):][:109])
s.set_table_styles([ # create internal CSS classes
{'selector': '.true', 'props': 'background-color: #e6ffe6;'},
{'selector': '.false', 'props': 'background-color: #ffe6e6;'},
], overwrite=False)
cell_color = pd.DataFrame([['true ', 'false ', 'true ', 'false '],
['false ', 'true ', 'false ', 'true ']],
index=df.index,
columns=df.columns[:4])
s.set_td_classes(cell_color)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_classes')
np.random.seed(0)
df2 = pd.DataFrame(np.random.randn(10,4), columns=['A','B','C','D'])
df2.style
def style_negative(v, props=''):
return props if v < 0 else None
s2 = df2.style.applymap(style_negative, props='color:red;')\
.applymap(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)
s2
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s2.set_uuid('after_applymap')
def highlight_max(s, props=''):
return np.where(s == np.nanmax(s.values), props, '')
s2.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s2.set_uuid('after_apply')
s2.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\
.apply(highlight_max, props='color:white;background-color:purple', axis=None)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s2.set_uuid('after_apply_again')
s2.applymap_index(lambda v: "color:pink;" if v>4 else "color:darkblue;", axis=0)
s2.apply_index(lambda s: np.where(s.isin(["A", "B"]), "color:pink;", "color:darkblue;"), axis=1)
s.set_caption("Confusion matrix for multiple cancer prediction models.")\
.set_table_styles([{
'selector': 'caption',
'props': 'caption-side: bottom; font-size:1.25em;'
}], overwrite=False)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_caption')
tt = pd.DataFrame([['This model has a very strong true positive rate',
"This model's total number of false negatives is too high"]],
index=['Tumour (Positive)'], columns=df.columns[[0,3]])
s.set_tooltips(tt, props='visibility: hidden; position: absolute; z-index: 1; border: 1px solid #000066;'
'background-color: white; color: #000066; font-size: 0.8em;'
'transform: translate(0px, -24px); padding: 0.6em; border-radius: 0.5em;')
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_tooltips')
s.set_table_styles([ # create internal CSS classes
{'selector': '.border-red', 'props': 'border: 2px dashed red;'},
{'selector': '.border-green', 'props': 'border: 2px dashed green;'},
], overwrite=False)
cell_border = pd.DataFrame([['border-green ', ' ', ' ', 'border-red '],
[' ', ' ', ' ', ' ']],
index=df.index,
columns=df.columns[:4])
s.set_td_classes(cell_color + cell_border)
# Hidden cell to avoid CSS clashes and latter code upcoding previous formatting
s.set_uuid('after_borders')
df3 = pd.DataFrame(np.random.randn(4,4),
pd.MultiIndex.from_product([['A', 'B'], ['r1', 'r2']]),
columns=['c1','c2','c3','c4'])
df3
slice_ = ['c3', 'c4']
df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
idx = pd.IndexSlice
slice_ = idx[idx[:,'r1'], idx['c2':'c4']]
df3.style.apply(highlight_max, props='color:red;', axis=0, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
slice_ = idx[idx[:,'r2'], :]
df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
slice_ = idx[idx[(df3['c1'] + df3['c3']) < -2.0], ['c2', 'c4']]
df3.style.apply(highlight_max, props='color:red;', axis=1, subset=slice_)\
.set_properties(**{'background-color': '#ffffb3'}, subset=slice_)
df4 = pd.DataFrame([[1,2],[3,4]])
s4 = df4.style
from pandas.io.formats.style import Styler
s4 = Styler(df4, uuid_len=0, cell_ids=False)
props = 'font-family: "Times New Roman", Times, serif; color: #e83e8c; font-size:1.3em;'
df4.style.applymap(lambda x: props, subset=[1])
df4.style.set_table_styles([{'selector': 'td.col1', 'props': props}])
df2.style.apply(highlight_max, props='color:white;background-color:darkblue;', axis=0)\
.apply(highlight_max, props='color:white;background-color:pink;', axis=1)\
.apply(highlight_max, props='color:white;background-color:purple', axis=None)
build = lambda x: pd.DataFrame(x, index=df2.index, columns=df2.columns)
cls1 = build(df2.apply(highlight_max, props='cls-1 ', axis=0))
cls2 = build(df2.apply(highlight_max, props='cls-2 ', axis=1, result_type='expand').values)
cls3 = build(highlight_max(df2, props='cls-3 '))
df2.style.set_table_styles([
{'selector': '.cls-1', 'props': 'color:white;background-color:darkblue;'},
{'selector': '.cls-2', 'props': 'color:white;background-color:pink;'},
{'selector': '.cls-3', 'props': 'color:white;background-color:purple;'}
]).set_td_classes(cls1 + cls2 + cls3)
my_css = {
"row_heading": "",
"col_heading": "",
"index_name": "",
"col": "c",
"row": "r",
"col_trim": "",
"row_trim": "",
"level": "l",
"data": "",
"blank": "",
}
html = Styler(df4, uuid_len=0, cell_ids=False)
html.set_table_styles([{'selector': 'td', 'props': props},
{'selector': '.c1', 'props': 'color:green;'},
{'selector': '.l0', 'props': 'color:blue;'}],
css_class_names=my_css)
print(html.to_html())
html
df2.iloc[0,2] = np.nan
df2.iloc[4,3] = np.nan
df2.loc[:4].style.highlight_null(color='yellow')
df2.loc[:4].style.highlight_max(axis=1, props='color:white; font-weight:bold; background-color:darkblue;')
left = pd.Series([1.0, 0.0, 1.0], index=["A", "B", "D"])
df2.loc[:4].style.highlight_between(left=left, right=1.5, axis=1, props='color:white; background-color:purple;')
df2.loc[:4].style.highlight_quantile(q_left=0.85, axis=None, color='yellow')
import seaborn as sns
cm = sns.light_palette("green", as_cmap=True)
df2.style.background_gradient(cmap=cm)
df2.style.text_gradient(cmap=cm)
df2.loc[:4].style.set_properties(**{'background-color': 'black',
'color': 'lawngreen',
'border-color': 'white'})
df2.style.bar(subset=['A', 'B'], color='#d65f5f')
df2.style.format('{:.3f}', na_rep="")\
.bar(align=0, vmin=-2.5, vmax=2.5, cmap="bwr", height=50,
width=60, props="width: 120px; border-right: 1px solid black;")\
.text_gradient(cmap="bwr", vmin=-2.5, vmax=2.5)
# Hide the construction of the display chart from the user
import pandas as pd
from IPython.display import HTML
# Test series
test1 = pd.Series([-100,-60,-30,-20], name='All Negative')
test2 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')
test3 = pd.Series([10,20,50,100], name='All Positive')
test4 = pd.Series([100, 103, 101, 102], name='Large Positive')
head =
<table>
<thead>
<th>Align</th>
<th>All Negative</th>
<th>Both Neg and Pos</th>
<th>All Positive</th>
<th>Large Positive</th>
</thead>
</tbody>
aligns = ['left', 'right', 'zero', 'mid', 'mean', 99]
for align in aligns:
row = "<tr><th>{}</th>".format(align)
for series in [test1,test2,test3, test4]:
s = series.copy()
s.name=''
row += "<td>{}</td>".format(s.to_frame().style.hide_index().bar(align=align,
color=['#d65f5f', '#5fba7d'],
width=100).to_html()) #testn['width']
row += '</tr>'
head += row
head+=
</tbody>
</table>
HTML(head)
style1 = df2.style\
.applymap(style_negative, props='color:red;')\
.applymap(lambda v: 'opacity: 20%;' if (v < 0.3) and (v > -0.3) else None)\
.set_table_styles([{"selector": "th", "props": "color: blue;"}])\
.hide(axis="index")
style1
style2 = df3.style
style2.use(style1.export())
style2
from ipywidgets import widgets
@widgets.interact
def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):
return df2.style.background_gradient(
cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,
as_cmap=True)
)
def magnify():
return [dict(selector="th",
props=[("font-size", "4pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
np.random.seed(25)
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()
bigdf.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '1pt'})\
.set_caption("Hover to magnify")\
.format(precision=2)\
.set_table_styles(magnify())
bigdf = pd.DataFrame(np.random.randn(16, 100))
bigdf.style.set_sticky(axis="index")
bigdf.index = pd.MultiIndex.from_product([["A","B"],[0,1],[0,1,2,3]])
bigdf.style.set_sticky(axis="index", pixel_size=18, levels=[1,2])
df4 = pd.DataFrame([['<div></div>', '"&other"', '<span></span>']])
df4.style
df4.style.format(escape="html")
df4.style.format('<a href="https://pandas.pydata.org" target="_blank">{}</a>', escape="html")
df2.style.\
applymap(style_negative, props='color:red;').\
highlight_max(axis=0).\
to_excel('styled.xlsx', engine='openpyxl')
print(pd.DataFrame([[1,2],[3,4]], index=['i1', 'i2'], columns=['c1', 'c2']).style.to_html())
df4 = pd.DataFrame([['text']])
df4.style.applymap(lambda x: 'color:green;')\
.applymap(lambda x: 'color:red;')
df4.style.applymap(lambda x: 'color:red;')\
.applymap(lambda x: 'color:green;')
df4.style.set_uuid('a_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'}])\
.applymap(lambda x: 'color:green;')
df4.style.set_uuid('b_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'},
{'selector': '.cls-1', 'props': 'color:blue;'}])\
.applymap(lambda x: 'color:green;')\
.set_td_classes(pd.DataFrame([['cls-1']]))
df4.style.set_uuid('c_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'},
{'selector': '.cls-1', 'props': 'color:blue;'},
{'selector': 'td.data', 'props': 'color:yellow;'}])\
.applymap(lambda x: 'color:green;')\
.set_td_classes(pd.DataFrame([['cls-1']]))
df4.style.set_uuid('d_')\
.set_table_styles([{'selector': 'td', 'props': 'color:red;'},
{'selector': '.cls-1', 'props': 'color:blue;'},
{'selector': 'td.data', 'props': 'color:yellow;'}])\
.applymap(lambda x: 'color:green !important;')\
.set_td_classes(pd.DataFrame([['cls-1']]))
from jinja2 import Environment, ChoiceLoader, FileSystemLoader
from IPython.display import HTML
from pandas.io.formats.style import Styler
with open("templates/myhtml.tpl") as f:
print(f.read())
class MyStyler(Styler):
env = Environment(
loader=ChoiceLoader([
FileSystemLoader("templates"), # contains ours
Styler.loader, # the default
])
)
template_html_table = env.get_template("myhtml.tpl")
MyStyler(df3)
HTML(MyStyler(df3).to_html(table_title="Extending Example"))
EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl")
HTML(EasyStyler(df3).to_html(table_title="Another Title"))
with open("templates/html_style_structure.html") as f:
style_structure = f.read()
HTML(style_structure)
with open("templates/html_table_structure.html") as f:
table_structure = f.read()
HTML(table_structure)
# # Hack to get the same style in the notebook as the
# # main site. This is hidden in the docs.
# from IPython.display import HTML
# with open("themes/nature_with_gtoc/static/nature.css_t") as f:
# css = f.read()
# HTML('<style>{}</style>'.format(css))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above output looks very similar to the standard DataFrame HTML representation. But the HTML here has already attached some CSS classes to each cell, even if we haven't yet created any styles. We can view these by calling the .to_html() method, which returns the raw HTML as string, which is useful for further processing or adding to a file - read on in More about CSS and HTML. Below we will show how we can use these to format the DataFrame to be more communicative. For example how we can build s
Step2: Formatting the Display
Step3: Using Styler to manipulate the display is a useful feature because maintaining the indexing and datavalues for other purposes gives greater control. You do not have to overwrite your DataFrame to display it how you like. Here is an example of using the formatting functions whilst still relying on the underlying data for indexing and calculations.
Step4: Hiding Data
Step5: Methods to Add Styles
Step6: Next we just add a couple more styling artifacts targeting specific parts of the table. Be careful here, since we are chaining methods we need to explicitly instruct the method not to overwrite the existing styles.
Step7: As a convenience method (since version 1.2.0) we can also pass a dict to .set_table_styles() which contains row or column keys. Behind the scenes Styler just indexes the keys and adds relevant .col<m> or .row<n> classes as necessary to the given CSS selectors.
Step8: Setting Classes and Linking to External CSS
Step9: Data Cell CSS Classes
Step10: Styler Functions
Step11: For example we can build a function that colors text if it is negative, and chain this with a function that partially fades cells of negligible value. Since this looks at each element in turn we use applymap.
Step12: We can also build a function that highlights the maximum value across rows, cols, and the DataFrame all at once. In this case we use apply. Below we highlight the maximum in a column.
Step13: We can use the same function across the different axes, highlighting here the DataFrame maximum in purple, and row maximums in pink.
Step14: This last example shows how some styles have been overwritten by others. In general the most recent style applied is active but you can read more in the section on CSS hierarchies. You can also apply these styles to more granular parts of the DataFrame - read more in section on subset slicing.
Step15: Tooltips and Captions
Step16: Adding tooltips (since version 1.3.0) can be done using the .set_tooltips() method in the same way you can add CSS classes to data cells by providing a string based DataFrame with intersecting indices and columns. You don't have to specify a css_class name or any css props for the tooltips, since there are standard defaults, but the option is there if you want more visual control.
Step17: The only thing left to do for our table is to add the highlighting borders to draw the audience attention to the tooltips. We will create internal CSS classes as before using table styles. Setting classes always overwrites so we need to make sure we add the previous classes.
Step18: Finer Control with Slicing
Step19: We will use subset to highlight the maximum in the third and fourth columns with red text. We will highlight the subset sliced region in yellow.
Step20: If combined with the IndexSlice as suggested then it can index across both dimensions with greater flexibility.
Step21: This also provides the flexibility to sub select rows when used with the axis=1.
Step22: There is also scope to provide conditional filtering.
Step23: Only label-based slicing is supported right now, not positional, and not callables.
Step24: <div class="alert alert-info">
Step25: 2. Use table styles
Step26: <div class="alert alert-info">
Step27: 3. Set classes instead of using Styler functions
Step28: <div class="alert alert-info">
Step29: 4. Don't use tooltips
Step30: Builtin Styles
Step31: Highlight Min or Max
Step32: Highlight Between
Step33: Highlight Quantile
Step34: Background Gradient and Text Gradient
Step35: .background_gradient and .text_gradient have a number of keyword arguments to customise the gradients and colors. See the documentation.
Step36: Bar charts
Step37: Additional keyword arguments give more control on centering and positioning, and you can pass a list of [color_negative, color_positive] to highlight lower and higher values or a matplotlib colormap.
Step40: The following example aims to give a highlight of the behavior of the new align options
Step41: Sharing styles
Step42: Notice that you're able to share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been used upon.
Step43: Magnify
Step44: Sticky Headers
Step45: It is also possible to stick MultiIndexes and even only specific levels.
Step46: HTML Escaping
Step47: Export to Excel
Step48: A screenshot of the output
Step49: CSS Hierarchies
Step50: This is only true for CSS rules that are equivalent in hierarchy, or importance. You can read more about CSS specificity here but for our purposes it suffices to summarize the key points
Step51: This text is red because the generated selector #T_a_ td is worth 101 (ID plus element), whereas #T_a_row0_col0 is only worth 100 (ID), so is considered inferior even though in the HTML it comes after the previous.
Step52: In the above case the text is blue because the selector #T_b_ .cls-1 is worth 110 (ID plus class), which takes precendence.
Step53: Now we have created another table style this time the selector T_c_ td.data (ID plus element plus class) gets bumped up to 111.
Step54: Finally got that green text after all!
Step55: We'll use the following template
Step56: Now that we've created a template, we need to set up a subclass of Styler that
Step57: Notice that we include the original loader in our environment's loader.
Step58: Our custom template accepts a table_title keyword. We can provide the value in the .to_html method.
Step59: For convenience, we provide the Styler.from_custom_template method that does the same as the custom subclass.
Step60: Template Structure
Step61: Table template
Step62: See the template in the GitHub repo for more details.
|
2,691
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import sys
import re
sys.path.append("../utils")
import loaders
employers = loaders.load_employers().set_index("CASE_ID")
cases = loaders.load_cases().set_index("CASE_ID")
cases_basics = cases[[ "DATE_CONCLUDED_FY", "INVEST_TOOL_DESC" ]]\
.join(employers[ "employer_id" ])\
.reset_index()
act_summaries = loaders.load_act_summaries()
h2_summaries = act_summaries[
act_summaries["ACT_ID"].isin([ "H2A", "H2B" ])
]
matching_cases = cases_basics[
cases_basics["CASE_ID"].isin(h2_summaries["CASE_ID"]) &
(cases_basics["DATE_CONCLUDED_FY"] >= 2010) &
(cases_basics["DATE_CONCLUDED_FY"] <= 2014)
]
invest_tool_counts = matching_cases["INVEST_TOOL_DESC"].value_counts()
case_counts = matching_cases.groupby([
"DATE_CONCLUDED_FY",
"INVEST_TOOL_DESC"
])["CASE_ID"].nunique()\
.unstack()\
.fillna(0)\
[invest_tool_counts.index.tolist()]
case_counts["[total]"] = case_counts.sum(axis=1)
employer_counts = pd.DataFrame({
"n_employer_ids": matching_cases.groupby("DATE_CONCLUDED_FY")["employer_id"].nunique()
})
case_counts
employer_counts
date_parser = lambda x: pd.to_datetime(x, format="%Y-%m-%d", coerce=True)
oflc_decisions = pd.read_csv("../data/oflc-decisions/processed/oflc-decisions.csv",
parse_dates=["last_event_date"],
date_parser=date_parser)
oflc_decisions["last_event_date_fy"] = oflc_decisions["last_event_date"].apply(loaders.date_to_fy)
certifications = oflc_decisions[
(oflc_decisions["is_certified"] == True) &
(oflc_decisions["is_expired"] == False) &
(oflc_decisions["is_duplicate"] == False) &
(oflc_decisions["last_event_date_fy"] >= 2010) &
(oflc_decisions["last_event_date_fy"] <= 2014)
].copy()
non_alphanum_pat = re.compile(r"[^A-Z0-9 ]+")
def standardize_name(x):
return re.sub(non_alphanum_pat, "", x.upper().strip())
certifications["employer_name_standard"] = certifications["employer_name"]\
.fillna("")\
.apply(standardize_name)
certifications[["employer_name", "employer_name_standard"]].tail()
certs_by_fy = certifications.groupby("last_event_date_fy")
employer_cert_counts = pd.DataFrame({
"n_employer_names": certs_by_fy["employer_name_standard"].nunique()
})
employer_cert_counts
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Number of H-2–related cases by overall investigation type and fiscal year concluded
Step3: Note
Step4: Data Loading — Certifications
Step5: Basic Standardization of Employer Names
Step6: Example of employer names before and after standardization
Step7: Rough count of the number of employers certified for H-2 visas, per fiscal year
|
2,692
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
print(tf.__version__)
# Some important imports
import math
import numpy as np
import colorsys
import matplotlib.pyplot as plt
%matplotlib inline
import random
import pickle
# If your files are named differently or placed in a different folder, please update lines below.
training_file ="./raw_data/train.p"
validation_file = "./raw_data/valid.p"
testing_file = "./raw_data/test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# Make sure that the number of features equals the number of labels
assert(len(X_train) == len(y_train))
assert(len(X_valid) == len(y_valid))
assert(len(X_test) == len(y_test))
# Number of training examples
n_train = X_train.shape[0]
# Number of training labels
n_train_lables = y_train.shape[0]
# Number of validation examples
n_validation = X_valid.shape[0]
# Number of validation labels
n_validation_labels = y_valid.shape[0]
# Number of testing examples
n_test = X_test.shape[0]
# Number of test labels
n_test_labels = y_test.shape[0]
# The shape of an traffic sign image
train_image_shape = [X_train.shape[1], X_train.shape[2], X_train.shape[3]]
valid_image_shape = [X_valid.shape[1], X_valid.shape[2], X_valid.shape[3]]
test_image_shape = [X_test.shape[1], X_test.shape[2], X_test.shape[3]]
# Number of unique classes/labels in the dataset.
n_classes = len(set(train['labels']))
print("Number of training examples =", n_train)
print("Number of training labels =", n_train_lables)
print()
print("Number of validation examples =", n_validation)
print("Number of validation labels =", n_validation)
print()
print("Number of testing examples =", n_test)
print("Number of testing labels =", n_test)
print()
print("Training image data shape =", train_image_shape)
print("Validation image data shape =", valid_image_shape)
print("Test image data shape =", test_image_shape)
print()
print("Number of classes =", n_classes)
n_pics_row = 5
n_pic_col = 10
plots = []
for i in range(n_pics_row):
for j in range(n_pic_col):
ax = plt.subplot2grid((n_pics_row,n_pic_col), (i,j))
ax.imshow(X_train[random.randint(0, n_train)][:][:][:], cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
# Frequencies of training data per class
plt.hist(y_train, bins = np.arange(n_classes)) # arguments are passed to np.histogram
plt.title("Frequencies of classes in training set")
plt.show()
# Frequencies of validation data per class
plt.hist(y_valid, bins = np.arange(n_classes)) # arguments are passed to np.histogram
plt.title("Frequencies of classes in validation set")
plt.show()
# Frequencies of test data per class
plt.hist(y_test, bins = np.arange(n_classes)) # arguments are passed to np.histogram
plt.title("Frequencies of classes in testing set")
plt.show()
def normalize_img(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9],
:param image_data: The image data to be normalized,
:return: Normalized image data.
a = 0.1
b = 0.9
scale_min = 0
scale_max = 255
return a + (((image_data - scale_min)*(b - a))/(scale_max - scale_min))
X_train_norm = normalize_img(X_train)
X_valid_norm = normalize_img(X_valid)
X_test_norm = normalize_img(X_test)
tf.reset_default_graph()
X_train2gray = tf.image.rgb_to_grayscale(X_train_norm)
with tf.Session() as sess:
X_train_gray = sess.run(X_train2gray)
tf.reset_default_graph()
X_train_rotated_ccw = tf.contrib.image.rotate(X_train_norm, 15 * math.pi / 180, interpolation='BILINEAR')
X_train_rotated_cw = tf.contrib.image.rotate(X_train_norm, -15 * math.pi / 180, interpolation='BILINEAR')
with tf.Session() as sess:
rotated_images_ccw = sess.run(X_train_rotated_ccw)
rotated_images_cw = sess.run(X_train_rotated_cw)
tf.reset_default_graph()
rotated_ccw2gray = tf.image.rgb_to_grayscale(rotated_images_ccw) # Ready to export
rotated_cw2gray = tf.image.rgb_to_grayscale(rotated_images_cw) # Ready to export
with tf.Session() as sess:
rotated_images_ccw_gray = sess.run(rotated_ccw2gray)
rotated_images_cw_gray = sess.run(rotated_cw2gray)
# Copy labels for rotated images
rotated_ccw_labels = y_train
rotated_cw_labels = y_train
# Time consuming task! Function is sequential. TODO: optimize it.
def random_brightness(image):
Modify image bightness with following formula: brightness = 0.2 + np.random.uniform(),
:param image: The image data to be processed,
:return: Modified image data
result = image
for i in range(image.shape[0]):
one_image = image[i][:][:][:]
brightness = 0.2 + np.random.uniform()
for x in range(one_image.shape[0]):
for y in range(one_image.shape[1]):
h, s, v = colorsys.rgb_to_hsv(one_image[x][y][0], one_image[x][y][1], one_image[x][y][2])
v = v * brightness
one_image[x][y][0], one_image[x][y][1], one_image[x][y][2] = colorsys.hsv_to_rgb(h, s, v)
result[i][:][:][:] = one_image[:][:][:]
return result
## Create a copy of original dataset and modify imeges' brightness
X_train_bright = random_brightness(X_train_norm)
y_train_bright = y_train
tf.reset_default_graph()
X_train_bright2gray = tf.image.rgb_to_grayscale(X_train_bright)
with tf.Session() as sess:
X_train_bright_gray = sess.run(X_train_bright2gray)
# Time consuming task! Function is sequential. TODO: optimize it.
def random_noise(image):
result = image
for i in range(image.shape[0]):
one_image = image[i][:][:][:]
for x in range(one_image.shape[0]):
for y in range(one_image.shape[1]):
brightness = np.random.uniform(low=0.0, high=0.3) # be careful with upper limit -> impact validation
h, s, v = colorsys.rgb_to_hsv(one_image[x][y][0], one_image[x][y][1], one_image[x][y][2])
v = v * brightness
one_image[x][y][0], one_image[x][y][1], one_image[x][y][2] = colorsys.hsv_to_rgb(h, s, v)
result[i][:][:][:] = one_image[:][:][:]
return result
X_train_noise = random_noise(X_train_norm)
y_train_noise = y_train
tf.reset_default_graph()
X_train_noise2gray = tf.image.rgb_to_grayscale(X_train_noise)
with tf.Session() as sess:
X_train_noise_gray = sess.run(X_train_noise2gray)
X_train_ready = X_train_gray
y_train_ready = y_train
X_train_ready = np.append(X_train_ready, rotated_images_ccw_gray, axis=0)
y_train_ready = np.append(y_train_ready, rotated_ccw_labels, axis=0)
X_train_ready = np.append(X_train_ready, rotated_images_cw_gray, axis=0)
y_train_ready = np.append(y_train_ready, rotated_cw_labels, axis=0)
X_train_ready = np.append(X_train_ready, X_train_bright_gray, axis=0)
y_train_ready = np.append(y_train_ready, y_train_bright, axis=0)
X_train_ready = np.append(X_train_ready, X_train_noise_gray, axis=0)
y_train_ready = np.append(y_train_ready, y_train_noise, axis=0)
tf.reset_default_graph()
X_valid_gray = tf.image.rgb_to_grayscale(X_valid_norm) # Ready to export
X_test_gray = tf.image.rgb_to_grayscale(X_test_norm) # Ready to export
with tf.Session() as sess:
X_valid_ready = sess.run(X_valid_gray)
X_test_ready = sess.run(X_test_gray)
# Propagate their labels
y_valid_ready = y_valid
y_test_ready = y_test
print("Training dataset shape: ", X_train_ready.shape)
print("Validation dataset shape: ", X_valid_ready.shape)
print("Test dataset shape: ", X_test_ready.shape)
# Make sure that the number of features equals the number of labels
assert(len(X_train_ready) == len(y_train_ready))
assert(len(X_valid_ready) == len(y_valid_ready))
assert(len(X_test_ready) == len(y_test_ready))
with open('./train_data/aug_train_features_ready2.pickle', 'wb') as output:
pickle.dump(X_train_ready, output)
with open('./train_data/aug_train_labels_ready2.pickle', 'wb') as output:
pickle.dump(y_train_ready, output)
with open('./train_data/aug_valid_features_ready2.pickle', 'wb') as output:
pickle.dump(X_valid_ready, output)
with open('./train_data/aug_valid_labels_ready2.pickle', 'wb') as output:
pickle.dump(y_valid_ready, output)
with open('./train_data/aug_test_features_ready2.pickle', 'wb') as output:
pickle.dump(X_test_ready, output)
with open('./train_data/aug_test_labels_ready2.pickle', 'wb') as output:
pickle.dump(y_test_ready, output)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Be aware of version compatibility. This copybook uses functions form Trensorflow package version 1.3.0 and higher.
Step2: Load data
Step3: Basic Summary
Step4: Some exploratory visualizations
Step6: Note
Step7: Transform normalized RGB image to grayscale
Step8: Create rotated images from normalized original data
Step10: Modify brightness randomly
Step11: Convert processed images to grayscale.
Step12: Add random noise
Step13: Concatenate all training data together
Step14: Convert to grayscale validation and test data
|
2,693
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls
import warnings
warnings.filterwarnings('ignore')
menu = pd.read_csv('menu.csv')
menu.head(4)
# Check for Nulls
print(menu.isnull().any())
# Check the shape
print(menu.shape)
# check for numbers
print(menu.describe())
# Plotting the KDEplots
f, axes = plt.subplots(3, 3, figsize=(10, 10), sharex=True, sharey=True)
s = np.linspace(0, 3, 10)
cmap = sns.cubehelix_palette(start=0.0, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Cholesterol (% Daily Value)'].values
y = menu['Sodium (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, cut=5, ax=axes[0,0])
axes[0,0].set(xlim=(-10, 50), ylim=(-30, 70), title = 'Cholesterol and Sodium')
cmap = sns.cubehelix_palette(start=0.333333333333, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Carbohydrates (% Daily Value)'].values
y = menu['Sodium (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,1])
axes[0,1].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Carbs and Sodium')
cmap = sns.cubehelix_palette(start=0.666666666667, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Carbohydrates (% Daily Value)'].values
y = menu['Cholesterol (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[0,2])
axes[0,2].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Carbs and Cholesterol')
cmap = sns.cubehelix_palette(start=1.0, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Total Fat (% Daily Value)'].values
y = menu['Saturated Fat (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,0])
axes[1,0].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Total Fat and Saturated Fat')
cmap = sns.cubehelix_palette(start=1.333333333333, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Total Fat (% Daily Value)'].values
y = menu['Cholesterol (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,1])
axes[1,1].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Cholesterol and Total Fat')
cmap = sns.cubehelix_palette(start=1.666666666667, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Vitamin A (% Daily Value)'].values
y = menu['Cholesterol (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[1,2])
axes[1,2].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Vitamin A and Cholesterol')
cmap = sns.cubehelix_palette(start=2.0, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Calcium (% Daily Value)'].values
y = menu['Sodium (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[2,0])
axes[2,0].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Calcium and Sodium')
cmap = sns.cubehelix_palette(start=2.333333333333, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Calcium (% Daily Value)'].values
y = menu['Cholesterol (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[2,1])
axes[2,1].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Cholesterol and Calcium')
cmap = sns.cubehelix_palette(start=2.666666666667, light=1, as_cmap=True)
# Generate and plot a random bivariate dataset
x = menu['Iron (% Daily Value)'].values
y = menu['Total Fat (% Daily Value)'].values
sns.kdeplot(x, y, cmap=cmap, shade=True, ax=axes[2,2])
axes[2,2].set(xlim=(-5, 50), ylim=(-10, 70), title = 'Iron and Total Fat')
f.tight_layout()
data = [
go.Heatmap(
z= menu.ix[:,3:].corr().values,
x=menu.columns.values,
y=menu.columns.values,
colorscale='Viridis',
text = True ,
opacity = 1.0
)
]
layout = go.Layout(
title='Pearson Correlation of all Nutritional metrics',
xaxis = dict(ticks='', nticks=36),
yaxis = dict(ticks='' ),
width = 900, height = 700,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='labelled-heatmap')
trace = go.Scatter(
y = menu['Cholesterol (% Daily Value)'].values,
x = menu['Item'].values,
mode='markers',
marker=dict(
size= menu['Cholesterol (% Daily Value)'].values,
#color = np.random.randn(500), #set color equal to a variable
color = menu['Cholesterol (% Daily Value)'].values,
colorscale='Portland',
showscale=True
),
text = menu['Item'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Scatter plot of Cholesterol (% Daily Value) per Item on the Menu',
hovermode= 'closest',
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False
),
yaxis=dict(
title= 'Cholesterol (% Daily Value)',
ticklen= 5,
gridwidth= 2,
showgrid=False,
zeroline=False,
showline=False
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatterChol')
trace = go.Scatter(
y = menu['Sodium (% Daily Value)'].values,
x = menu['Item'].values,
mode='markers',
marker=dict(
size= menu['Sodium (% Daily Value)'].values,
#color = np.random.randn(500), #set color equal to a variable
color = menu['Sodium (% Daily Value)'].values,
colorscale='Portland',
showscale=True
),
text = menu['Item'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Scatter plot of Sodium (% Daily Value) per Item on the Menu',
hovermode= 'closest',
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False
),
yaxis=dict(
title= 'Sodium (% Daily Value)',
ticklen= 5,
gridwidth= 2,
showgrid=False,
zeroline=False,
showline=False,
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatterChol')
trace = go.Scatter(
y = menu['Saturated Fat (% Daily Value)'].values,
x = menu['Item'].values,
mode='markers',
marker=dict(
size= menu['Saturated Fat (% Daily Value)'].values,
#color = np.random.randn(500), #set color equal to a variable
color = menu['Saturated Fat (% Daily Value)'].values,
colorscale='Portland',
showscale=True
),
text = menu['Item'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Scatter plot of Saturated Fat (% Daily Value) per Item on the Menu',
hovermode= 'closest',
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False
),
yaxis=dict(
title= 'Saturated Fat (% Daily Value)',
ticklen= 5,
gridwidth= 2,
showgrid=False,
zeroline=False,
showline=False,
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatterChol')
# 3D scatter plot for Total Fats
trace1 = go.Scatter3d(
x=menu['Category'].values,
y=menu['Item'].values,
z=menu['Total Fat (% Daily Value)'].values,
text=menu['Item'].values,
mode='markers',
marker=dict(
sizemode='diameter',
# sizeref=750,
# size= dailyValue['Cholesterol (% Daily Value)'].values,
color = menu['Total Fat (% Daily Value)'].values,
colorscale = 'Portland',
colorbar = dict(title = 'Total Fat (% Daily Value)'),
line=dict(color='rgb(255, 255, 255)')
)
)
data=[trace1]
layout=dict(height=800, width=800, title='3D Scatter Plot of Carbohydrates (% Daily Value)')
fig=dict(data=data, layout=layout)
py.iplot(fig, filename='3DBubble')
# 3D scatter plot for Carbohydrate
trace1 = go.Scatter3d(
x=menu['Category'].values,
y=menu['Item'].values,
z=menu['Carbohydrates (% Daily Value)'].values,
text=menu['Item'].values,
mode='markers',
marker=dict(
sizemode='diameter',
# sizeref=750,
# size= dailyValue['Cholesterol (% Daily Value)'].values,
color = menu['Carbohydrates (% Daily Value)'].values,
colorscale = 'Portland',
colorbar = dict(title = 'Carbohydrates (% Daily Value)'),
line=dict(color='rgb(255, 255, 255)')
)
)
data=[trace1]
layout=dict(height=800, width=800, title='3D Scatter Plot of Carbohydrates (% Daily Value)')
fig=dict(data=data, layout=layout)
py.iplot(fig, filename='3DBubble')
trace = go.Scatter(
y = menu['Dietary Fiber (% Daily Value)'].values,
x = menu['Item'].values,
mode='markers',
marker=dict(
size= menu['Dietary Fiber (% Daily Value)'].values,
#color = np.random.randn(500), #set color equal to a variable
color = menu['Dietary Fiber (% Daily Value)'].values,
colorscale='Portland',
reversescale = True,
showscale=True
),
text = menu['Item'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Scatter plot of Dietary Fiber (% Daily Value) per Item on the Menu',
hovermode= 'closest',
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False
),
yaxis=dict(
title= 'Dietary Fiber (% Daily Value)',
ticklen= 5,
gridwidth= 2,
showgrid=False,
zeroline=False,
showline=False,
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatterChol')
# Calcium Scatter plots
trace = go.Scatter(
y = menu['Calcium (% Daily Value)'].values,
x = menu['Item'].values,
mode='markers',
marker=dict(
size= menu['Calcium (% Daily Value)'].values,
#color = np.random.randn(500), #set color equal to a variable
color = menu['Calcium (% Daily Value)'].values,
colorscale='Portland',
reversescale = True,
showscale=True
),
text = menu['Item'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Scatter plot of Calcium (% Daily Value) per Item on the Menu',
hovermode= 'closest',
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False
),
yaxis=dict(
title= 'Calcium (% Daily Value)',
ticklen= 5,
gridwidth= 2,
showgrid=False,
zeroline=False,
showline=False,
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatterChol')
# Iron Scatter plots
trace = go.Scatter(
y = menu['Iron (% Daily Value)'].values,
x = menu['Item'].values,
mode='markers',
marker=dict(
size= menu['Iron (% Daily Value)'].values,
#color = np.random.randn(500), #set color equal to a variable
color = menu['Iron (% Daily Value)'].values,
colorscale='Portland',
reversescale = True,
showscale=True
),
text = menu['Item'].values
)
data = [trace]
layout= go.Layout(
autosize= True,
title= 'Scatter plot of Iron (% Daily Value) per Item on the Menu',
hovermode= 'closest',
xaxis=dict(
showgrid=False,
zeroline=False,
showline=False
),
yaxis=dict(
title= 'Iron (% Daily Value)',
ticklen= 5,
gridwidth= 2,
showgrid=False,
zeroline=False,
showline=False,
),
showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatterChol')
x, y = (list(x) for x in zip(*sorted(zip(menu.Calories.values, menu.Item.values),
reverse = False)))
trace2 = go.Bar(
x=x ,
y=y,
marker=dict(
color=x,
colorscale = 'Viridis',
reversescale = True
),
name='Household savings, percentage of household disposable income',
orientation='h',
)
layout = dict(
title='Barplot and Scatterplots of all MacDonald Food Items',
width = 900, height = 2000,
yaxis=dict(
showgrid=False,
showline=False,
showticklabels=True,
# domain=[0, 0.85],
))
fig1 = go.Figure(data=[trace2])
fig1['layout'].update(layout)
py.iplot(fig1, filename='plots')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: So much like every standard data exploration, let us load the data via the Pandas package and play around with it.
Step2: Quick checks on Data quality
Step3: Cool, so it seems that our brief data quality checks have now all passed. So onto the data itself, we can see that a rich source of nutritional information is provided to us where we
Step4: Pearson Correlation plots
Step5: As evinced from the correlation plots, one can already see features that obviously tie into one another (the more yellow sections of the plot). For example serving size and calories.
Step6: Takeaway from the plot
Step7: Takeaway from the plot
Step8: Takeaway from the plots
Step9: Anyway having looked at nutritional metrics which carry largely negative connotations in society (Cholesterol, Sodium, Total Fat), let us balance this out and generate the scatter plots of nutritional metrics that carry more positive social weights . Namely Calcium, Iron, Dietary Fibre to name a few.
Step10: Interactive Barplots of Calorie content per food item
Step11: Then we invoke the Bar plotting functionality within Plotly as such
|
2,694
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
import warnings
tf.enable_v2_behavior()
plt.style.use("ggplot")
warnings.filterwarnings('ignore')
num_schools = 8 # number of schools
treatment_effects = np.array(
[28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float32) # treatment effects
treatment_stddevs = np.array(
[15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float32) # treatment SE
fig, ax = plt.subplots()
plt.bar(range(num_schools), treatment_effects, yerr=treatment_stddevs)
plt.title("8 Schools treatment effects")
plt.xlabel("School")
plt.ylabel("Treatment effect")
fig.set_size_inches(10, 8)
plt.show()
model = tfd.JointDistributionSequential([
tfd.Normal(loc=0., scale=10., name="avg_effect"), # `mu` above
tfd.Normal(loc=5., scale=1., name="avg_stddev"), # `log(tau)` above
tfd.Independent(tfd.Normal(loc=tf.zeros(num_schools),
scale=tf.ones(num_schools),
name="school_effects_standard"), # `theta_prime`
reinterpreted_batch_ndims=1),
lambda school_effects_standard, avg_stddev, avg_effect: (
tfd.Independent(tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
tf.exp(avg_stddev[..., tf.newaxis]) *
school_effects_standard), # `theta` above
scale=treatment_stddevs),
name="treatment_effects", # `y` above
reinterpreted_batch_ndims=1))
])
def target_log_prob_fn(avg_effect, avg_stddev, school_effects_standard):
Unnormalized target density as a function of states.
return model.log_prob((
avg_effect, avg_stddev, school_effects_standard, treatment_effects))
num_results = 5000
num_burnin_steps = 3000
# Improve performance by tracing the sampler using `tf.function`
# and compiling it using XLA.
@tf.function(autograph=False, jit_compile=True)
def do_sampling():
return tfp.mcmc.sample_chain(
num_results=num_results,
num_burnin_steps=num_burnin_steps,
current_state=[
tf.zeros([], name='init_avg_effect'),
tf.zeros([], name='init_avg_stddev'),
tf.ones([num_schools], name='init_school_effects_standard'),
],
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
step_size=0.4,
num_leapfrog_steps=3))
states, kernel_results = do_sampling()
avg_effect, avg_stddev, school_effects_standard = states
school_effects_samples = (
avg_effect[:, np.newaxis] +
np.exp(avg_stddev)[:, np.newaxis] * school_effects_standard)
num_accepted = np.sum(kernel_results.is_accepted)
print('Acceptance rate: {}'.format(num_accepted / num_results))
fig, axes = plt.subplots(8, 2, sharex='col', sharey='col')
fig.set_size_inches(12, 10)
for i in range(num_schools):
axes[i][0].plot(school_effects_samples[:,i].numpy())
axes[i][0].title.set_text("School {} treatment effect chain".format(i))
sns.kdeplot(school_effects_samples[:,i].numpy(), ax=axes[i][1], shade=True)
axes[i][1].title.set_text("School {} treatment effect distribution".format(i))
axes[num_schools - 1][0].set_xlabel("Iteration")
axes[num_schools - 1][1].set_xlabel("School effect")
fig.tight_layout()
plt.show()
print("E[avg_effect] = {}".format(np.mean(avg_effect)))
print("E[avg_stddev] = {}".format(np.mean(avg_stddev)))
print("E[school_effects_standard] =")
print(np.mean(school_effects_standard[:, ]))
print("E[school_effects] =")
print(np.mean(school_effects_samples[:, ], axis=0))
# Compute the 95% interval for school_effects
school_effects_low = np.array([
np.percentile(school_effects_samples[:, i], 2.5) for i in range(num_schools)
])
school_effects_med = np.array([
np.percentile(school_effects_samples[:, i], 50) for i in range(num_schools)
])
school_effects_hi = np.array([
np.percentile(school_effects_samples[:, i], 97.5)
for i in range(num_schools)
])
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True)
ax.scatter(np.array(range(num_schools)), school_effects_med, color='red', s=60)
ax.scatter(
np.array(range(num_schools)) + 0.1, treatment_effects, color='blue', s=60)
plt.plot([-0.2, 7.4], [np.mean(avg_effect),
np.mean(avg_effect)], 'k', linestyle='--')
ax.errorbar(
np.array(range(8)),
school_effects_med,
yerr=[
school_effects_med - school_effects_low,
school_effects_hi - school_effects_med
],
fmt='none')
ax.legend(('avg_effect', 'HMC', 'Observed effect'), fontsize=14)
plt.xlabel('School')
plt.ylabel('Treatment effect')
plt.title('HMC estimated school treatment effects vs. observed data')
fig.set_size_inches(10, 8)
plt.show()
print("Inferred posterior mean: {0:.2f}".format(
np.mean(school_effects_samples[:,])))
print("Inferred posterior mean se: {0:.2f}".format(
np.std(school_effects_samples[:,])))
sample_shape = [5000]
_, _, _, predictive_treatment_effects = model.sample(
value=(tf.broadcast_to(np.mean(avg_effect, 0), sample_shape),
tf.broadcast_to(np.mean(avg_stddev, 0), sample_shape),
tf.broadcast_to(np.mean(school_effects_standard, 0),
sample_shape + [num_schools]),
None))
fig, axes = plt.subplots(4, 2, sharex=True, sharey=True)
fig.set_size_inches(12, 10)
fig.tight_layout()
for i, ax in enumerate(axes):
sns.kdeplot(predictive_treatment_effects[:, 2*i].numpy(),
ax=ax[0], shade=True)
ax[0].title.set_text(
"School {} treatment effect posterior predictive".format(2*i))
sns.kdeplot(predictive_treatment_effects[:, 2*i + 1].numpy(),
ax=ax[1], shade=True)
ax[1].title.set_text(
"School {} treatment effect posterior predictive".format(2*i + 1))
plt.show()
# The mean predicted treatment effects for each of the eight schools.
prediction = np.mean(predictive_treatment_effects, axis=0)
treatment_effects - prediction
residuals = treatment_effects - predictive_treatment_effects
fig, axes = plt.subplots(4, 2, sharex=True, sharey=True)
fig.set_size_inches(12, 10)
fig.tight_layout()
for i, ax in enumerate(axes):
sns.kdeplot(residuals[:, 2*i].numpy(), ax=ax[0], shade=True)
ax[0].title.set_text(
"School {} treatment effect residuals".format(2*i))
sns.kdeplot(residuals[:, 2*i + 1].numpy(), ax=ax[1], shade=True)
ax[1].title.set_text(
"School {} treatment effect residuals".format(2*i + 1))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Eight Schools の問題
Step2: データ
Step4: モデル
Step5: ベイズ推論
Step6: 上記の集団 avg_effect への縮小が見られます。
Step7: 批評
Step8: 処置効果データとモデルの事後確率予測の間にある残差を見ることができます。これらは、母集団の平均に対する推定効果の縮小を示す上記のプロットに対応しています。
Step9: 各高校の予測の分布があるため、残差の分布も検討することができます。
|
2,695
|
<ASSISTANT_TASK:>
Python Code:
#G = cf.load_seventh_grader_network()
G = nx.read_gpickle('Synthetic Social Network.pkl')
# Who are represented in the network?
G.nodes(data=True)
print(len(G.nodes()))
print(len(G))
# Who is connected to who in the network?
G.edges(data=True)
print(len(G.edges()))
# Let's get a list of nodes with their attributes.
G.nodes(data=True)
# NetworkX will return a list of tuples in the form (node_id, attribute_dictionary)
from collections import Counter
mf_counts = Counter([d['sex'] for n, d in G.nodes(data=True)])
def test_answer(mf_counts):
print(mf_counts['Female'])
print(mf_counts['Male'])
test_answer(mf_counts)
G.edges(data=True)
lst = [c['date'] for a, b, c in G.edges(data=True)]
#def test_answer(mf_counts):
# print(mf_counts['Female'])
# print(mf_counts['Male'])
#test_answer(mf_counts)
print(min(lst), max(lst))
# Answer
counts = [d['_____'] for _, _, _ in G._______(_________)]
maxcount = max(_________)
def test_maxcount(maxcount):
assert maxcount == 3
test_maxcount(maxcount)
# Answer: Follow the coding pattern.
G.add_node(30, {'age': 22, 'sex': 'Male'})
G.add_node(31, {'age': 24, 'sex': 'Female'})
G.add_edge(30, 31, date= datetime(2010, 1, 9))
G.add_edge(30, 7, date= datetime(2009, 12, 11))
G.add_edge(31, 7, date= datetime(2009, 12, 11))
G.edge[30]
def test_graph_integrity(G):
assert 30 in G.nodes()
assert 31 in G.nodes()
assert G.node[30]['sex'] == 'Male'
assert G.node[31]['sex'] == 'Female'
assert G.has_edge(30, 31)
assert G.has_edge(30, 7)
assert G.has_edge(31, 7)
print('All tests passed.')
test_graph_integrity(G)
nx.draw(G)
nx.draw(G, with_labels=True)
matrix = nx.to_numpy_matrix(G)
plt.pcolor(np.array(matrix))
plt.axes().set_aspect('equal') # set aspect ratio equal to get a square visualization
plt.xlim(min(G.nodes()), max(G.nodes())) # set x and y limits to the number of nodes present.
plt.ylim(min(G.nodes()), max(G.nodes()))
plt.title('Adjacency Matrix')
plt.show()
from circos import CircosPlot
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
nodes = sorted(G.nodes())
edges = G.edges()
node_cmap = {'Male':'blue', 'Female':'red'}
nodecolors = [node_cmap[G.node[n]['sex']] for n in G.nodes()]
c = CircosPlot(nodes, edges, radius=10, ax=ax, fig=fig, nodecolor=nodecolors)
c.draw()
plt.savefig('images/seventh.png', dpi=300)
from hiveplot import HivePlot
nodes = dict()
nodes['Male'] = [n for n,d in G.nodes(data=True) if d['sex'] == 'Male']
nodes['Female'] = [n for n,d in G.nodes(data=True) if d['sex'] == 'Female']
edges = dict()
edges['group1'] = G.edges(data=True)
nodes_cmap = dict()
nodes_cmap['Male'] = 'blue'
nodes_cmap['Female'] = 'red'
edges_cmap = dict()
edges_cmap['group1'] = 'black'
h = HivePlot(nodes, edges, nodes_cmap, edges_cmap)
h.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Network Statistics
Step2: Exercise
Step3: Let's now figure out who is connected to who in the network
Step4: Exercise
Step5: Concept
Step6: Exercise
Step7: Edges can also store attributes in their attribute dictionary.
Step8: In this synthetic social network, the number of times the left student indicated that the right student was their favourite is stored in the "count" variable.
Step9: Exercise
Step10: Verify that you have added in the edges and nodes correctly by running the following cell.
Step11: Tests
Step12: If the network is small enough to visualize, and the node labels are small enough to fit in a circle, then you can use the with_labels=True argument.
Step13: However, note that if the number of nodes in the graph gets really large, node-link diagrams can begin to look like massive hairballs. This is undesirable for graph visualization.
Step14: Let's try another visualization, the Circos plot. We can order the nodes in the Circos plot according to the node ID, but any other ordering is possible as well. Edges are drawn between two nodes.
Step15: This visualization helps us highlight nodes that there are poorly connected, and others that are strongly connected.
|
2,696
|
<ASSISTANT_TASK:>
Python Code:
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'trafficsign/train.p'
validation_file='trafficsign/valid.p'
testing_file = 'trafficsign/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
n_validation = X_valid.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = (X_train.shape[1],X_train.shape[2],X_train.shape[3])
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(set(y_test))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Number of validatio examples =", n_validation)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
import pandas as pd
# Visualizations will be shown in the notebook.
%matplotlib inline
pd.DataFrame.hist(pd.DataFrame(y_train))
plt.title('Distribution of the different classes in the Training data ')
pd.DataFrame.hist(pd.DataFrame(y_valid))
plt.title('Distribution of the different classes in the Validation data ')
pd.DataFrame.hist(pd.DataFrame(y_test))
plt.title('Distribution of the different classes in the Test data ')
plt.plot()
import numpy as np
%matplotlib inline
signNames = pd.read_csv("signnames.csv")
fig = plt.figure(figsize=(32,32), tight_layout={'h_pad':4})
for index in range(0,43,1):
image_index = (np.nonzero(y_train==index))[0][0]
ax=plt.subplot(11,4,index+1)
ax.imshow(X_train[image_index],interpolation=None)
ax.set_title( str(list(((signNames[signNames['ClassId']==index]))['SignName'])) )
plt.show()
#for gray_sclae images
import cv2
def gray(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def RGB_gray(X_data):
X_out_data=np.zeros((len(X_data),32,32,1))
for i in range(len(X_data)):
X_out_data[i,:,:,0]=gray(X_data[i,:,:,:])
return X_out_data
def norm(image):
dest = np.zeros((32,32,1))
return cv2.normalize(image,dest ,alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
def normalize(X_data):
X_out_data=np.zeros((len(X_data),32,32,X_data.shape[3]))
for i in range(len(X_data)):
X_out_data[i,:,:,0]=norm(X_data[i,:,:,0])
return X_out_data
train_gray = RGB_gray(X_train)
valid_gray = RGB_gray(X_valid)
test_gray = RGB_gray(X_test)
X_train = (train_gray-128)/128
X_valid = (valid_gray-128)/128
X_test = (test_gray-128)/128
#X_train = normalize(train_gray)
#X_valid = normalize(valid_gray)
#X_test = normalize(test_gray)
import numpy as np
%matplotlib inline
signNames = pd.read_csv("signnames.csv")
fig = plt.figure(figsize=(32,32), tight_layout={'h_pad':4})
for index in range(0,43,1):
image_index = (np.nonzero(y_train==index))[0][0]
ax=plt.subplot(11,4,index+1)
ax.imshow(np.squeeze(X_train[image_index]),interpolation=None)
ax.set_title( str(list(((signNames[signNames['ClassId']==index]))['SignName'])) )
plt.show()
import tensorflow as tf
from tensorflow.contrib.layers import flatten
EPOCHS = 20
BATCH_SIZE=128
def LaNet(x):
mu = 0
sigma = 0.1
#Every x is a batch with shape (128,32,32,1)
#Fallowing LANET Architecture after first convolution the ouput will be (128,28,28,6).
#The filter will be of size (5,5,1,6) so conv1_W = (5,5,1,6) and conv1_b = 6
tf.contrib.layers.xavier_initializer()
conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5,1 , 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
# The activation function is relu
conv1 = tf.nn.relu(conv1)
# We will apply max_pooling after applying max_pool our dimensions are (128,14,14,6)
conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='convolution1')
#2nd convolution
#after applyig 2nd convolution our output should be (128,10,10,16)
#Here we have (128,14,14,6) as input so the conv2_w =(5,5,6,16) and conv2_b=(16)
conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(16))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') +conv2_b
#activation
conv2 = tf.nn.relu(conv2)
# we apply max_pooling to the (128,10,10,16) to make it (128,5,5,16)
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID', name='convolution2')
# There will be 16 feature maps each with dimesion 5*5
#print(conv2)
# Flatten the output shape of the final pooling layer such that it's 1D instead of 3D.
#The easiest way to do is by using tf.contrib.layers.flatten. this will convert (128,5,5,16) to (128,400)
fc0 = flatten(conv2)
h_fc0_drop = tf.nn.dropout(fc0, keep_prob)
#print(fully_connected_layer0)
#This fully connected layer is connected to next layer with 120 neurons. So, the number of weights will be (400,120)
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(h_fc0_drop, fc1_W) + fc1_b
fc1 = tf.nn.relu(fc1)
# fully_connected_layer1 is of dimension(400,120). fully_connected_layer2 will have 84 activations
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
fc2 = tf.nn.relu(fc2)
#fully_connected_layer1 is of dimension(128,84). fully_connected_layer3 will have 43 activations
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(43))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
x = tf.placeholder(tf.float32,(None,32,32,1))
y=tf.placeholder(tf.int32,(None))
one_hot_y = tf.one_hot(y,43)
rate=0.001
keep_prob = tf.placeholder(tf.float32)
logits = LaNet(x)
CrossEntropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y,logits=logits)
Loss = tf.reduce_mean(CrossEntropy)
optimize = tf.train.AdamOptimizer(learning_rate=rate).minimize(Loss)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob:1.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
from sklearn.utils import shuffle
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(optimize, feed_dict={x: batch_x, y: batch_y, keep_prob:0.6})
training_accuracy = evaluate(X_train, y_train)
validation_accuracy = evaluate(X_valid, y_valid)
print("EPOCH {} ...".format(i+1))
print("Training Accuracy = {:.3f}".format(training_accuracy))
print()
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
with tf.Session() as sess:
saver.restore(sess,tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test,y_test)
print(test_accuracy)
import os
import matplotlib.image as mpimg
import cv2
test_image =[]
def Resize(img):
#resize image into (32,32) dimension
resize_img = cv2.resize(img,(32,32))
#Convert to 3 channels
#processed_img = cv2.cvtColor(resize_img, cv2.COLOR_BGRA2BGR)
test_image.append(resize_img)
return resize_img
#fig = plt.figure(figsize=(32,32), tight_layout={'h_pad':4})
i = 0
for file in os.listdir('testImages'):
if '.jpg' in file:
plt.figure(figsize=(1,1))
img = mpimg.imread('testImages/' + file)
plt.title(file)
plt.imshow(Resize(img))
plt.figure()
plt.title(file+(' Original'))
plt.imshow(img)
i+=1
test_image=normalize(RGB_gray(np.array(test_image)))
#real_test_data=np.array(test_image).reshape(8,32,32,3)
#plt.figure(figsize=(1,1))
#plt.imshow(real_test_data[7])
with tf.Session() as sess:
saver.restore(sess,tf.train.latest_checkpoint('.') )
predicted_logits = sess.run(logits,feed_dict={x:test_image,keep_prob:1.0})
prediction_class = np.argmax(predicted_logits,axis=1)
for val in (prediction_class):
print (str(list(((signNames[signNames['ClassId']==val]))['SignName'])))
prediction_class
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
with tf.Session() as sess:
softmax = tf.nn.softmax(predicted_logits)
top5 = sess.run(tf.nn.top_k(softmax,k=5))
print(top5)
### Feel free to use as many code cells as needed.
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
with tf.Session() as sess:
X_final_graph = test_image
saver.restore(sess, tf.train.latest_checkpoint('.'))
conv1 = sess.graph.get_tensor_by_name('convolution1:0')
outputFeatureMap(X_final_graph,conv1, activation_min=-1, activation_max=-1, plt_num=1)
conv2 = sess.graph.get_tensor_by_name('convolution2:0')
outputFeatureMap(X_final_graph,conv2, activation_min=-1, activation_max=-1, plt_num=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Include an exploratory visualization of the dataset
Step3: Step 2
Step4: SetUp TensorFlow
Step5: Model Architecture
Step6: Features and Labels
Step7: Training Pipeline
Step8: Evaluation PipeLine
Step9: Train Model
Step10: Evaluating model on the test data
Step11: Step 3
Step12: Output Top 5 Softmax Probabilities For Each Image Found on the Web
Step13: Project Writeup
|
2,697
|
<ASSISTANT_TASK:>
Python Code:
children = ["sally", "jenny", "latoya", "atalia", "yu"]
text = "s7p9r2o3ut5s3"
data="02:49:35 127.0.0.1 GET /index.html 200\n03:01:06 127.0.0.1 GET /images/logo.gif 304\n03:52:36 127.0.0.1 GET /search.php 200\n04:17:03 127.0.0.1 GET /style.css 200\n05:04:54 127.0.0.1 GET /favicon.ico 404\n"
tempsfile="D2015-07-01\nT85,66\nD2015-07-02\nT78,70\nD2015-07-03\nT80,72\nD2015-07-04\nT81,72\nD2015-07-05\nT80,73"
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make separate lists of letters and numbers.
Step2: A web server records information about who visits the web pages. Here's an example of the data. The fields from left to right are time, IP address, request type GET/POST, url, response code.
Step3: Sometimes a file format includes an indicator of the row type followed by data for that row, and then the next row contains a different type. Here's an example.
|
2,698
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Ensure the right version of Tensorflow is installed.
!pip freeze | grep tensorflow==2.1 || pip install tensorflow==2.1
import os
import matplotlib.pyplot as plt
import tensorflow as tf
print(f"TensorFlow version: {tf.__version__}")
print(f"Eager execution: {tf.executing_eagerly()}")
train_dataset_url = "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv"
train_dataset_fp = tf.keras.utils.get_file(
fname=os.path.basename(train_dataset_url), origin=train_dataset_url
)
print(f"Local copy of the dataset file: {train_dataset_fp}")
!head -n5 {train_dataset_fp}
# column order in CSV file
column_names = [
"sepal_length",
"sepal_width",
"petal_length",
"petal_width",
"species",
]
feature_names = column_names[:-1]
label_name = column_names[-1]
print(f"Features: {feature_names}")
print(f"Label: {label_name}")
class_names = ["Iris setosa", "Iris versicolor", "Iris virginica"]
batch_size = 32
train_dataset = tf.data.experimental.make_csv_dataset(
train_dataset_fp,
batch_size,
column_names=column_names,
label_name=label_name,
num_epochs=1,
)
features, labels = next(iter(train_dataset))
print(features)
plt.scatter(
features["petal_length"], features["sepal_length"], c=labels, cmap="viridis"
)
plt.xlabel("Petal length")
plt.ylabel("Sepal length")
plt.show()
def pack_features_vector(features, labels):
Pack the features into a single array.
features = tf.stack(list(features.values()), axis=1)
return features, labels
train_dataset = train_dataset.map(pack_features_vector)
features, labels = next(iter(train_dataset))
print(features[:5])
# TODO 1
# TODO -- Your code here.
predictions = model(features)
predictions[:5]
tf.nn.softmax(predictions[:5])
print(f"Prediction: {tf.argmax(predictions, axis=1)}")
print(f"Labels: {labels}")
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss(model, x, y, training):
# TODO 2
# TODO -- Your code here.
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.SGD(learning_rate=0.01)
loss_value, grads = grad(model, features, labels)
print(
"Step: {}, Initial Loss: {}".format(
optimizer.iterations.numpy(), loss_value.numpy()
)
)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(
"Step: {},Loss: {}".format(
optimizer.iterations.numpy(),
loss(model, features, labels, training=True).numpy(),
)
)
## Note: Rerunning this cell uses the same model variables
# Keep results for plotting
train_loss_results = []
train_accuracy_results = []
num_epochs = 201
for epoch in range(num_epochs):
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# Training loop - using batches of 32
for x, y in train_dataset:
# Optimize the model
loss_value, grads = grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Track progress
epoch_loss_avg.update_state(loss_value) # Add current batch loss
# Compare predicted label to actual label
# training=True is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
epoch_accuracy.update_state(y, model(x, training=True))
# End epoch
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
if epoch % 50 == 0:
print(
"Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(
epoch, epoch_loss_avg.result(), epoch_accuracy.result()
)
)
fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
fig.suptitle("Training Metrics")
axes[0].set_ylabel("Loss", fontsize=14)
axes[0].plot(train_loss_results)
axes[1].set_ylabel("Accuracy", fontsize=14)
axes[1].set_xlabel("Epoch", fontsize=14)
axes[1].plot(train_accuracy_results)
plt.show()
test_url = (
"https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv"
)
test_fp = tf.keras.utils.get_file(
fname=os.path.basename(test_url), origin=test_url
)
test_dataset = tf.data.experimental.make_csv_dataset(
test_fp,
batch_size,
column_names=column_names,
label_name="species",
num_epochs=1,
shuffle=False,
)
test_dataset = test_dataset.map(pack_features_vector)
test_accuracy = tf.keras.metrics.Accuracy()
for (x, y) in test_dataset:
# training=False is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
logits = model(x, training=False)
prediction = tf.argmax(logits, axis=1, output_type=tf.int32)
test_accuracy(prediction, y)
print(f"Test set accuracy: {test_accuracy.result():.3%}")
tf.stack([y, prediction], axis=1)
# TODO 3
# TODO -- Your code here.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure imports
Step2: The Iris classification problem
Step3: Inspect the data
Step4: From this view of the dataset, notice the following
Step5: Each label is associated with string name (for example, "setosa"), but machine learning typically relies on numeric values. The label numbers are mapped to a named representation, such as
Step6: Create a tf.data.Dataset
Step7: The make_csv_dataset function returns a tf.data.Dataset of (features, label) pairs, where features is a dictionary
Step8: Notice that like-features are grouped together, or batched. Each example row's fields are appended to the corresponding feature array. Change the batch_size to set the number of examples stored in these feature arrays.
Step10: To simplify the model building step, create a function to repackage the features dictionary into a single array with shape
Step11: Then use the tf.data.Dataset#map method to pack the features of each (features,label) pair into the training dataset
Step12: The features element of the Dataset are now arrays with shape (batch_size, num_features). Let's look at the first few examples
Step13: Select the type of model
Step14: The activation function determines the output shape of each node in the layer. These non-linearities are important—without them the model would be equivalent to a single layer. There are many tf.keras.activations, but ReLU is common for hidden layers.
Step15: Here, each example returns a logit for each class.
Step16: Taking the tf.argmax across classes gives us the predicted class index. But, the model hasn't been trained yet, so these aren't good predictions
Step17: Train the model
Step18: Use the tf.GradientTape context to calculate the gradients used to optimize your model
Step19: Create an optimizer
Step20: We'll use this to calculate a single optimization step
Step21: Training loop
Step22: Visualize the loss function over time
Step23: Evaluate the model's effectiveness
Step24: Evaluate the model on the test dataset
Step25: We can see on the last batch, for example, the model is usually correct
Step26: Use the trained model to make predictions
|
2,699
|
<ASSISTANT_TASK:>
Python Code:
import sys # system module
import pandas as pd # data package
import matplotlib.pyplot as plt # graphics module
import datetime as dt # date and time module
import numpy as np # foundation for Pandas
import tweepy # Twitter API wrapper
import json
%matplotlib inline
from plotly.offline import iplot, iplot_mpl # plotly imports
import plotly.graph_objs as go
import plotly
import cufflinks as cf
cf.set_config_file(offline=True, offline_show_link=False)
plotly.offline.init_notebook_mode()
pd.set_option('display.mpl_style', 'default') # Make the graphs prettier
consumer_key = '9QBuUD4uvjy8FDBcLSuUK9I2I'
consumer_secret = 'NYsobcxgUE6Gv5UyusRE3fj8lrmsI5OsIDpUrf7fzVC3uIugGr'
access_token = '29009929-3efFzAePX2CFTUTrrwV5J3WbU0LWwyzGnbET4xJso'
access_secret = 'KDQD725blNsSBNsddeEo3AE3wIinRPEUD29BClPEiw0Ap'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
hilz = api.get_user('HillaryClinton') # Get the User object for Hillary Clinton
bern = api.get_user('berniesanders') # Get the User object for Bernie Sanders
print (hilz.screen_name,'has', hilz.followers_count, 'followers')
print(bern.screen_name,'has', bern.followers_count, 'followers')
result = api.search(q='from:HillaryClinton') #Perform a basic search query where we
#search for the candidate's tweets
len(result) #Print the number of items returned by the search query to
#verify our query ran (15 by default).
tweet = result[0] #Get the first tweet in the result
for param in dir(tweet): #Analyze the data in one tweet to see what elements we can pull into our dataset
#key names beginning with an '_' are hidden and usually not required, so we'll skip
if not param.startswith("_"):
print ("%s : %s\n" % (param, eval('tweet.'+param)))
alltweets = []
#Twitter has a restriction on how
def get_all_tweets(screen_name): #many tweets we can pull: approx.3200
#make initial request for most recent tweets
#(200 is the maximum allowed count)
new_tweets = api.user_timeline(
screen_name = screen_name,count=200)
alltweets.extend(new_tweets) #save most recent tweets
oldest = alltweets[-1].id - 1 #save the id of the oldest tweet less one
while len(new_tweets) > 0: #keep grabbing tweets until there are
print ("getting tweets before %s" % (oldest)) #no tweets left to grab
new_tweets = api.user_timeline(
screen_name = screen_name,count=200,max_id=oldest) #all subsequent requests
#use the max_id param to prevent duplicates
alltweets.extend(new_tweets) #save most recent tweets again
oldest = alltweets[-1].id - 1
print ("...%s tweets downloaded so far" % (len(alltweets)))
if __name__ == '__main__':
get_all_tweets("HillaryClinton")
if __name__ == '__main__':
get_all_tweets("BernieSanders")
def toDataFrame(tweets):
DataSet = pd.DataFrame()
DataSet['tweetID'] = [tweet.id for tweet in tweets]
DataSet['tweetText'] = [tweet.text for tweet in tweets]
DataSet['tweetRetweetCt'] = [tweet.retweet_count for tweet in tweets]
DataSet['tweetFavoriteCt'] = [tweet.favorite_count for tweet in tweets]
DataSet['tweetCreated'] = [tweet.created_at for tweet in tweets]
DataSet['userID'] = [tweet.user.id for tweet in tweets]
DataSet['userScreen'] = [tweet.user.screen_name for tweet in tweets]
DataSet['userName'] = [tweet.user.name for tweet in tweets]
DataSet['userFollowerCt'] = [tweet.user.followers_count for tweet in tweets]
DataSet['userFriendsCt'] = [tweet.user.friends_count for tweet in tweets]
return DataSet
DataSet = toDataFrame(alltweets)
from datetime import datetime as dt, date, time
DataSet['tweetDate'] = DataSet['tweetCreated']
DataSet['tweetDate'] = DataSet['tweetDate'].dt.strftime('%m/%d/%Y')
DataSet['tweetDate'] = pd.to_datetime(DataSet['tweetDate'])
DataSet['tweetDates'] = pd.to_datetime(DataSet['tweetDate'])
DataSet.head(5)
Primary = DataSet[(DataSet['tweetDate'] > '2016-01-31') & (DataSet['tweetDate'] <= '2016-04-27')]
#selecting tweets in desired timeframe
Primary.groupby('userScreen').tweetFavoriteCt.sum() #pulling sum of tweet favorites by candidate
Primary.groupby('userScreen').tweetRetweetCt.sum() #pulling sum of tweet retweets by candidate
number_tweets=Primary.tweetDate.dt.strftime('%m/%d/%Y').value_counts().sort_index()
#creating set of amount of tweets per day in total
TweetsbyCandidate = Primary.groupby('userScreen').tweetDate.value_counts().sort_index()
AllCandidateTweets = TweetsbyCandidate.unstack()
#creating set of amount of tweets per day separated by candidate
#import democratic primary results csv
url = '/Users/Samantha/Desktop/Data_Bootcamp/2016 Democratic Primary Results.csv'
dpr = pd.read_csv(url, nrows=40) # read file and assign it to dpr
dpr.head(5) #check file has been stored correctly
#create a second set with States as the index and sorted on date
dpr1=dpr.set_index(['State'])
dpr1=dpr1.sort_values(['Date'])
dpr1.head(5)
#group results by DATE
DemsbyDate = dpr[['Date', 'Clinton Delegates', 'Sanders Delegates']].groupby('Date')
Sum = DemsbyDate.sum()
Sum
#bar chart for primary results by STATE
Sanders = dict(type="bar", # trace type
orientation="h", # make bars horizontal
name="Sanders", # legend entry
x=dpr1["Sanders Delegates"], # x data
y=dpr1.index, # y data
marker={"color": "#0048BA"} #bar color
)
Clinton = dict(type="bar", # trace type
orientation="h", # horizontal bars
name="Clinton", # legend entry
x=dpr1["Clinton Delegates"], # x data
y=dpr1.index, # y data
marker={"color": "#DE5D83"} # bar color
)
layout = go.Layout(width=850, height=950,
title = 'Democratic Primary Results by State',
xaxis=dict(
title='Delegates',
titlefont=dict(
family='Arial, sans-serif',
size=18,
color='grey'
),
showticklabels=True,
tickangle=45,
tickfont=dict(
family='Arial, sans-serif',
size=14,
color='black'
),
exponentformat='e',
showexponent='All'
),
yaxis=dict(
title='State',
titlefont=dict(
family='Arial, sans-serif',
size=18,
color='grey'
),
showticklabels=True,
tickangle=45,
tickfont=dict(
family='Arial, sans-serif',
size=14,
color='black'
),
exponentformat='e',
showexponent='All'
),
)
iplot(go.Figure(data=[Sanders, Clinton], layout=layout))
for col in dpr.columns:
dpr[col] = dpr[col].astype('str')
scl = [[0.0, '#0048BA'],[0.495,'#0048BA'],[0.505, '#DE5D83'],[1.0, '#DE5D83']]
dpr['text'] = dpr['State'] + '<br>' +\
dpr['Date']+ '<br>' +\
'Clinton Delegates: '+dpr['Clinton Delegates']+'<br>'+\
'Sanders Delegates: '+dpr['Sanders Delegates']+ '<br>' +\
'Winner: ' + dpr['Winner']
data = [ dict(
type='choropleth',
colorscale = scl,
showscale = False,
autocolorscale = False,
hoverinfo = 'location+text',
locations = dpr['Code'],
z = dpr['% Clinton'].astype(float),
locationmode = 'USA-states',
text = dpr['text'],
marker = dict(
line = dict (
color = 'rgb(255,255,255)',
width = 2
)
),
)]
layout = dict(
title = "'"+'Merica: 2016 Democratic Primary Results',
showlegend = True,
geo = dict(
scope='usa',
projection=dict( type='albers usa' ),
showlakes = True,
lakecolor = 'rgb(255, 255, 255)',
),
)
iplot(go.Figure(data=data, layout=layout))
#Total Tweets overtime for both candidates
plt.rcParams['figure.figsize'] = (35, 10)
# Plot the Time Zone data as a bar-graph
number_tweets.plot(kind='bar')
plt.xlabel('Dates', fontsize=25)
plt.ylabel('Tweet Count', fontsize=25)
plt.title('Total Tweet Timeline', fontsize=30, fontweight='bold')
fig, ax = plt.subplots()
fig.suptitle('Total Tweet Timeline by Candidate', fontsize=30, fontweight='bold')
AllCandidateTweets.T.plot(ax=ax,
kind='bar', # line plot
color=['mediumslateblue', 'orchid'],) # line color
ax.set_xlabel('Dates', fontsize=25)
ax.set_ylabel('Tweet Count', fontsize=25)
ax.annotate('Coming up to NYC Primary', color= 'black', xy=(74, 55), xytext=(72, 59),weight='bold',
arrowprops=dict(facecolor='black', shrink=0.1), size=20)
ax.annotate('Coming up to Super Tuesday', color = 'black', xy=(18, 55), xytext=(20, 59), weight='bold',
arrowprops=dict(facecolor='black', shrink=0.1), size=20)
Sum.iplot(kind='bar',
subplots=True,
color=['#DE5D83','#0048BA'],
subplot_titles=True,
legend=False,
layout = dict(
autosize=False,
title='Delegate Wins Over Time',
showlegend=False,
width=950,
height=500,
margin=dict(
l=50,
r=50,
b=100,
t=100,
pad=4)))
AllCandidateTweets.T.iplot(kind='bar', subplots=True, subplot_titles=True, layout = dict(
autosize=False,
title='Candidate Tweets Over Time',
width=975,
height=500))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating and Prepping the Datasets
Step2: Once we were able to confirm that we have access to the Twitter API to access both Hillary Clinton's and Bernie Sanders' tweets, we ran a query for a single candidate's tweet. This allowed us to see what sort of data parameters we can access for a single Tweet.
Step3: A single tweet holds a large amount of information!
Step4: Pull tweets for Hillary Clinton
Step5: Add in Bernie Sanders Tweets
Step6: Next, we create a function that converts the given list of tweets into a Pandas DataFrame. Note
Step7: We now are going to create a few additional data sets to later highlight different aspects of the Twitter data in our visuals.
Step8: Now import and read the 2016 Democratic Primary Results csv and assign it to a new DataFrame.
Step9: In many cases, multiple primaries take place on the same date, the most well known example being Super Tuesday with 11 state primaries taking place on single day (March 1st). It is important to look at the data not only sorted geographically, but by dates as well.
Step10: The General Twitter Landscape
Step11: Here, we see that Hillary has won all of the largest delegate states (Texas, Florida, New York, Pennsylvania). Because of these major wins, Bernie has only remained in the running by winning many of the much smaller delegate states (New Hampshire, Vermont, Oklahoma...his 7 state winning streak of Idaho, Utah, Washington, Hawaii, Alaska, Wisconsin, Wyoming, among many others).
Step12: In this format, we see that Hillary has completely won the southeast quarter of the US, continuing up through much of the northeast. Bernie's wins are more spread out, with small pockets of wins in New England and part of the Mid-West.
Step13: To compare the above Twitter activity to the election results, below shows the number of delegates won by each candidate over time as well as the Tweets broken out side by side as well.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.