text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's find the # of investments made by each company
|
<ASSISTANT_TASK:>
Python Code:
#I'm considering only Acquisitions made in USA, with USD (dollars)
acquisitions = pd.read_csv('data/acquisitions.csv')
acquisitions = acquisitions[acquisitions['acquirer_country_code'] == 'USA']
acquisitions[:3]
#acquirer_permalink
#rounds_agg = df_rounds.groupby(['company_permalink', 'funding_round_type'])['raised_amount_usd'].agg({'amount': [ pd.Series.sum, pd.Series.count]})
number_of_acquisitions = acquisitions.groupby(['acquirer_permalink'])['acquirer_permalink'].agg({'amount': [ pd.Series.count]}).reset_index()
number_of_acquisitions.columns = number_of_acquisitions.columns.droplevel()
number_of_acquisitions.columns = ['permalink', 'number_of_acquisitions']
number_of_acquisitions = number_of_acquisitions.set_index('permalink')
number_of_acquisitions[:3]
investments = pd.read_csv('data/investments.csv')
investments = investments[investments['investor_country_code'] == 'USA']
investments[:3]
#acquirer_permalink
#rounds_agg = df_rounds.groupby(['company_permalink', 'funding_round_type'])['raised_amount_usd'].agg({'amount': [ pd.Series.sum, pd.Series.count]})
number_of_investments = investments.groupby(['investor_permalink'])['investor_permalink'].agg({'amount': [ pd.Series.count]}).reset_index()
number_of_investments.columns = number_of_investments.columns.droplevel()
number_of_investments.columns = ['permalink', 'number_of_investments']
number_of_investments = number_of_investments.set_index('permalink')
number_of_investments[:3]
#Number of different companies in which each company have invested in
number_of_unique_investments = investments.groupby(['investor_permalink'])['company_permalink'].agg({'amount': [ pd.Series.nunique]}).reset_index()
number_of_unique_investments.columns = number_of_unique_investments.columns.droplevel()
number_of_unique_investments.columns = ['permalink', 'number_of_unique_investments']
number_of_unique_investments = number_of_unique_investments.set_index('permalink')
number_of_unique_investments[:3]
number_of_investors_per_round = investments.groupby(['company_permalink', 'funding_round_permalink'])['investor_permalink'].agg({'investor_permalink': [ pd.Series.count]}).reset_index()
number_of_investors_per_round.columns = number_of_investors_per_round.columns.droplevel(0)
number_of_investors_per_round.columns = ['company_permalink', 'funding_round_permalink', 'count']
number_of_investors_per_round = number_of_investors_per_round.groupby(['company_permalink']).agg({'count': [ pd.Series.mean]}).reset_index()
number_of_investors_per_round.columns = number_of_investors_per_round.columns.droplevel(0)
number_of_investors_per_round.columns = ['company_permalink', 'number_of_investors_per_round']
number_of_investors_per_round = number_of_investors_per_round.set_index('company_permalink')
number_of_investors_per_round[:3]
from numpy import nanmean
#investments['raised_amount_usd'].dtype()
investments['raised_amount_usd'] = investments['raised_amount_usd'].astype(float)
avg_amount_invested_per_round = investments.groupby(['company_permalink', 'funding_round_permalink'])['raised_amount_usd'].agg({'raised_amount_usd': [ pd.Series.mean]}).reset_index()
avg_amount_invested_per_round.columns = avg_amount_invested_per_round.columns.droplevel(0)
avg_amount_invested_per_round.columns = ['company_permalink', 'funding_round_permalink', 'mean']
avg_amount_invested_per_round = avg_amount_invested_per_round.groupby(['company_permalink']).agg({'mean': [ pd.Series.mean]}).reset_index()
avg_amount_invested_per_round.columns = avg_amount_invested_per_round.columns.droplevel(0)
avg_amount_invested_per_round.columns = ['company_permalink', 'avg_amount_invested_per_round']
avg_amount_invested_per_round = avg_amount_invested_per_round.set_index('company_permalink')
avg_amount_invested_per_round = avg_amount_invested_per_round.fillna(0)
avg_amount_invested_per_round[:3]
startups = startups.join(number_of_acquisitions).join(number_of_investments).join(number_of_unique_investments).join(number_of_investors_per_round).join(avg_amount_invested_per_round)
startups[['number_of_acquisitions', 'number_of_investments', 'number_of_unique_investments','number_of_investors_per_round', 'avg_amount_invested_per_round']] = startups[['number_of_acquisitions', 'number_of_investments', 'number_of_unique_investments','number_of_investors_per_round', 'avg_amount_invested_per_round']].fillna(value=0)
startups[:3]
startups.to_csv('data/startups_1_1.csv')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 医療画像処理向けに DICOM ファイルをデコードする
Step2: 必要なパッケージをインストールし、ランタイムを再起動する
Step3: DICOM 画像をデコードする
Step4: DICOM Metadata をデコードし、タグを操作する
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!curl -OL https://github.com/tensorflow/io/raw/master/docs/tutorials/dicom/dicom_00000001_000.dcm
!ls -l dicom_00000001_000.dcm
try:
# Use the Colab's preinstalled TensorFlow 2.x
%tensorflow_version 2.x
except:
pass
!pip install tensorflow-io
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_io as tfio
image_bytes = tf.io.read_file('dicom_00000001_000.dcm')
image = tfio.image.decode_dicom_image(image_bytes, dtype=tf.uint16)
skipped = tfio.image.decode_dicom_image(image_bytes, on_error='skip', dtype=tf.uint8)
lossy_image = tfio.image.decode_dicom_image(image_bytes, scale='auto', on_error='lossy', dtype=tf.uint8)
fig, axes = plt.subplots(1,2, figsize=(10,10))
axes[0].imshow(np.squeeze(image.numpy()), cmap='gray')
axes[0].set_title('image')
axes[1].imshow(np.squeeze(lossy_image.numpy()), cmap='gray')
axes[1].set_title('lossy image');
tag_id = tfio.image.dicom_tags.PatientsAge
tag_value = tfio.image.decode_dicom_data(image_bytes,tag_id)
print(tag_value)
print(f"PatientsAge : {tag_value.numpy().decode('UTF-8')}")
tag_id = tfio.image.dicom_tags.PatientsSex
tag_value = tfio.image.decode_dicom_data(image_bytes,tag_id)
print(f"PatientsSex : {tag_value.numpy().decode('UTF-8')}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Como primer ejemplo, podemos utilizar la función nltk.pos_tag para etiquetar morfológicamente una oración en inglés, siempre que la especifiquemos como una lista de palabras o tokens.
Step2: El etiquetador funciona bastante bien aunque comete errores, obviamente. Si probamos con la famosa frase de Chomksy detectamos palabras mal etiquetadas.
Step3: ¿Cómo funciona este etiquetador? nltk.pos_tag es un etiquetador morfológico basado en aprendizaje automático. A partir de miles de ejemplos de oraciones etiquetadas manualmente, el sistema ha aprendido, calculando frecuencias y generalizando cuál es la categoría gramatical más probable para cada token.
Step4: Para comparar ambas versiones, podemos imprimir la primera oración de este corpus en su versión sin etiquetas (fíjate que se trata de una lista de tokens, sin más) y en su versión etiquetada (se trata de una lista de tuplas donde el primer elemento es el token y el segundo es la etiqueta morfológica).
Step5: Etiquetado morfológico automático
Step6: Obviamente no funciona bien, pero ojo, en el ejemplo anterior con oracion1 hemos etiquetado correctamente 2 de 10 tokens. Si lo evaluamos con un corpus más grande, como el conjunto de oraciones de Brown que ya tenemos, obtenemos una precisión superior al 13%
Step7: el método .evaluate que podemos ejecutar con cualquier etiquetador si especificamos como argumento una colección de referencia que ya esté etiquetada, nos devuelve un número
Step8: Cuando probamos a evaluarlo con un corpus de oraciones más grande, vemos que nuestra precisión sube por encima del 32%.
Step9: Etiquetado basado en ngramas
Step10: A continuación vamos a crear un etiquetador basado en unigramas (secuencias de una palabra o palabras sueltas) a través de la clase nltk.UnigramTagger, proporcionando nuestro corpusEntrenamiento para que aprenda. Una vez entrenado, vamos a evaluar su rendimiento sobre corpusTest.
Step11: Los etiquetadores basados en unigramas se construyen a partir del simple cálculo de una distribución de frecuencia para cada token (palabra) y asignan siempre la etiqueta morfológica más probable. En nuestro caso, esta estrategia funciona relativamente bien
Step12: Como se ve en los ejemplos, los resultados son desastrosos. La mayoría de los tokens se quedan sin etiqueta y se muestran como None.
Step13: ¿Por qué ocurre esto? La intuición no nos engaña, y es verdad que si calculamos distribuciones de frecuencia condicionales teniendo en cuenta secuencias de palabras más largas, nuestros datos serán más finos. Sin embargo, cuando consideramos secuencias de tokens más largos nos arriesgamos a que dichas secuencias no aparezcan como tales en el corpus de entrenamiento.
Step14: Ejercicio en clase
|
<ASSISTANT_TASK:>
Python Code:
import nltk
oracion1 = 'This is the lost dog I found at the park'.split()
oracion2 = 'The progress of the humankind as I progress'.split()
print(nltk.pos_tag(oracion1))
print(nltk.pos_tag(oracion2))
oracion3 = 'Green colorless ideas sleep furiously'.split()
print(nltk.pos_tag(oracion3))
print(nltk.pos_tag(["My", "name", "is", "Prince"]))
print(nltk.pos_tag('He was born during the summer of 1988'.split()))
print(nltk.pos_tag('''She's Tony's sister'''.split()))
print(nltk.pos_tag('''My name is Sasolamatom and I have a stromkupft dog'''.split()))
from nltk.corpus import brown
brown_sents = brown.sents(categories='news')
brown_tagged_sents = brown.tagged_sents(categories='news')
# imprimimos la primera oración de las noticias de Brown
print(brown_sents[0]) # sin anotar
print(brown_tagged_sents[0]) # etiquetada morfológicamente
defaultTagger = nltk.DefaultTagger('NN')
print(defaultTagger.tag(oracion1))
print(defaultTagger.tag(oracion2))
defaultTagger.evaluate(brown_tagged_sents)
patrones = [
(r'[Aa]m$', 'BEM'), # irregular forms of 'to be'
(r'[Aa]re$', 'BER'), #
(r'[Ii]s$', 'BEZ'), #
(r'[Ww]as$', 'BEDZ'), #
(r'[Ww]ere$', 'BED'), #
(r'[Bb]een$', 'BEN'), #
(r'[Hh]ave$', 'HV'), # irregular forms of 'to be'
(r'[Hh]as$', 'HVZ'), #
(r'[Hh]ad$', 'HVD'), #
(r'I$', 'PRP'), # personal pronouns
(r'[Yy]ou$', 'PRP'), #
(r'[Hh]e$', 'PRP'), #
(r'[Ss]he$', 'PRP'), #
(r'[Ii]t$', 'PRP'), #
(r'[Tt]hey$', 'PRP'), #
(r'[Aa]n?$', 'AT'), #
(r'[Tt]he$', 'AT'), #
(r'[Ww]h.+$', 'WP'), # wh- pronoun
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # simple past
(r'.*es$', 'VBZ'), # 3rd singular present
(r'[Cc]an(not|n\'t)?$', 'MD'), # modals
(r'[Mm]ight$', 'MD'), #
(r'[Mm]ay$', 'MD'), #
(r'.+ould$', 'MD'), # modals: could, should, would
(r'.*ly$', 'RB'), # adverbs
(r'.*\'s$', 'NN$'), # possessive nouns
(r'.*s$', 'NNS'), # plural nouns
(r'-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'^to$', 'TO'), # to
(r'^in$', 'IN'), # in prep
(r'^[A-Z]+([a-z])*$', 'NNP'), # proper nouns
(r'.*', 'NN') # nouns (default)
]
regexTagger = nltk.RegexpTagger(patrones)
print(regexTagger.tag('I was taking a sunbath in Alpedrete'.split()))
print(regexTagger.tag('She would have found 100 dollars in the bag'.split()))
print(regexTagger.tag('DSFdfdsfsd 1852 to dgdfgould fXXXdg in XXXfdg'.split()))
regexTagger.evaluate(brown_tagged_sents)
print(len(brown_tagged_sents))
print((len(brown_tagged_sents) * 90) / 100)
size = int(len(brown_tagged_sents) * 0.9) # asegúrate de que conviertes esto a entero
corpusEntrenamiento = brown_tagged_sents[:size]
corpusTest = brown_tagged_sents[size:]
# como ves, estos corpus contienen oraciones diferentes
print(corpusEntrenamiento[0])
print(corpusTest[0])
unigramTagger = nltk.UnigramTagger(corpusEntrenamiento)
print(unigramTagger.evaluate(corpusTest))
# ¿qué tal se etiquetan nuestras oraciones de ejemplo?
print(unigramTagger.tag(oracion1))
print(unigramTagger.tag(oracion2))
print(unigramTagger.tag(oracion3))
bigramTagger = nltk.BigramTagger(corpusEntrenamiento)
trigramTagger = nltk.TrigramTagger(corpusEntrenamiento)
# funciona fatal :-(
print(bigramTagger.tag(oracion2))
print(trigramTagger.tag(oracion2))
# aquí hago trampas, le pido que analice una oración que ya ha visto durante el entrenamiento
print(bigramTagger.tag(['The', 'Fulton', 'County', 'Grand', 'Jury', 'said', 'Friday', 'an', 'investigation', 'of',
"Atlanta's", 'recent', 'primary', 'election', 'produced', '``', 'no', 'evidence', "''", 'that', 'any', 'irregularities', 'took', 'place', '.']))
print(bigramTagger.evaluate(corpusTest))
print(trigramTagger.evaluate(corpusTest))
unigramTagger = nltk.UnigramTagger(corpusEntrenamiento, backoff=regexTagger)
bigramTagger = nltk.BigramTagger(corpusEntrenamiento, backoff=unigramTagger)
trigramTagger = nltk.TrigramTagger(corpusEntrenamiento, backoff=bigramTagger)
print(trigramTagger.evaluate(corpusTest))
print(trigramTagger.tag(oracion1))
print(trigramTagger.tag(oracion2))
print(trigramTagger.tag(oracion3))
# escribe tu código aquí
corpus = brown.tagged_sents()
size = int(len(corpus) * 0.9)
training = corpus[:size]
test = corpus[size:]
defaultTagger = nltk.DefaultTagger('NN')
unigramTagger = nltk.UnigramTagger(training, backoff=defaultTagger)
bigramTagger = nltk.BigramTagger(training, backoff=unigramTagger)
trigramTagger = nltk.TrigramTagger(training, backoff=bigramTagger)
print(trigramTagger.evaluate(test))
print(trigramTagger.tag(oracion2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Each row of the data refers to a single observed flower
Step2: For use in Scikit-Learn, we will extract the features matrix and target array from the DataFrame
Step3: To summarize, the expected layout of features and target values is visualized in the following diagram
Step4: With this data in place, we can use the recipe outlined earlier. Let's walk through the process
Step5: Note that other more general linear regression models exist as well; you can read more about them in the sklearn.linear_model module documentation.
Step6: Keep in mind that when the model is instantiated, the only action is the storing of these hyperparameter values.
Step7: 4. Fit the model to your data
Step8: This fit() command causes a number of model-dependent internal computations to take place, and the results of these computations are stored in model-specific attributes that the user can explore.
Step9: Comparing to the data definition, we see that they are very close to the input slope of 2 and intercept of -1.
Step10: As before, we need to coerce these x values into a [n_samples, n_features] features matrix, after which we can feed it to the model
Step11: Finally, let's visualize the results by plotting first the raw data, and then this model fit
Step12: Typically the efficacy of the model is evaluated by comparing its results to some known baseline, as we will see in the next example
Step13: With the data arranged, we can follow our recipe to predict the labels
Step14: Finally, we can use the accuracy_score utility to see the fraction of predicted labels that match their true value
Step15: With an accuracy topping 97%, we see that even this very naive classification algorithm is effective for this particular dataset!
Step16: To plot the results
Step17: In the two-dimensional representation, the species are fairly well separated, even though the PCA algorithm had no knowledge of the species labels!
Step18: As before, we will
Step19: By splitting the data by cluster number, GMM algorithm recovered the underlying label without an expert
Step20: The images data is a three-dimensional array
Step21: In order to work with this data within Scikit-Learn,
Step22: We see here that there are 1,797 samples and 64 features.
Step23: We see that the projected data is now two-dimensional.
Step24: This plot gives us some good intuition into how well various numbers are separated in the larger 64-dimensional space.
Step25: Now that we have predicted our model, we can gauge its accuracy by comparing the true values of the test set to the predictions
Step26: With even this extremely simple model, we find about 80% accuracy for classification of the digits!
Step27: This shows us where the mis-labeled points tend to be
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
sns.set_context("talk", font_scale=1.5)
iris = sns.load_dataset('iris')
iris.head()
%matplotlib inline
import seaborn as sns;
sns.set()
sns.set_context("talk", font_scale=1)
sns.pairplot(iris, hue='species', size=1.5);
X_iris = iris.drop('species', axis=1)
X_iris.shape
X_iris[:3]
y_iris = iris['species']
y_iris.shape
y_iris[:3]
import matplotlib.pyplot as plt
import numpy as np
rng = np.random.RandomState(42)
x = 10 * rng.rand(50)
y = 2 * x - 1 + rng.randn(50)
plt.scatter(x, y)
plt.xlabel('x', fontsize = 30)
plt.ylabel('y', fontsize = 30);
from sklearn.linear_model import LinearRegression
model = LinearRegression(fit_intercept=True)
model
#help(LinearRegression)
X = x[:, np.newaxis]
X.shape
model.fit(X, y)
# The parameters represent the slope of the simple linear fit to the data.
model.coef_
# The parameter represent the intercept of the simple linear fit to the data.
model.intercept_
xfit = np.linspace(-1, 11)
xfit
Xfit = xfit[:, np.newaxis]
yfit = model.predict(Xfit)
plt.scatter(x, y)
plt.plot(xfit, yfit);
from sklearn.cross_validation import train_test_split
Xtrain, Xtest, ytrain, ytest = train_test_split(X_iris, y_iris,
random_state=1)
from sklearn.naive_bayes import GaussianNB # 1. choose model class
model = GaussianNB() # 2. instantiate model
model.fit(Xtrain, ytrain) # 3. fit model to data
y_model = model.predict(Xtest) # 4. predict on new data
print(*zip(ytest, y_model))
from sklearn.metrics import accuracy_score
accuracy_score(ytest, y_model)
from sklearn.decomposition import PCA # 1. Choose the model class
model = PCA(n_components=2) # 2. Instantiate the model with hyperparameters
model.fit(X_iris) # 3. Fit to data. Notice y is not specified!
X_2D = model.transform(X_iris) # 4. Transform the data to two dimensions
sns.set_context("talk", font_scale=1.5)
iris['PCA1'] = X_2D[:, 0]
iris['PCA2'] = X_2D[:, 1]
sns.lmplot("PCA1", "PCA2", hue='species', data=iris, fit_reg=False);
from sklearn.mixture import GMM # 1. Choose the model class
model = GMM(n_components=3,
covariance_type='full') # 2. Instantiate the model with hyperparameters
model.fit(X_iris) # 3. Fit to data. Notice y is not specified!
y_gmm = model.predict(X_iris) # 4. Determine cluster labels
sns.plotting_context()
sns.set_context("talk", font_scale=1.5)
iris['cluster'] = y_gmm
sns.lmplot("PCA1", "PCA2", data=iris, hue='species',
col='cluster', fit_reg=False);
from sklearn.datasets import load_digits
digits = load_digits()
digits.images.shape
import matplotlib.pyplot as plt
fig, axes = plt.subplots(10, 10, figsize=(8, 8),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(digits.images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(digits.target[i]),
transform=ax.transAxes, color='green')
X = digits.data
X.shape
X
y = digits.target
y.shape
y
from sklearn.manifold import Isomap
iso = Isomap(n_components=2)
iso.fit(digits.data)
data_projected = iso.transform(digits.data)
data_projected.shape
plt.scatter(data_projected[:, 0], data_projected[:, 1], c=digits.target,
edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('nipy_spectral', 10))
plt.colorbar(label='digit label', ticks=range(10))
plt.clim(-0.5, 9.5);
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, random_state=0)
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(Xtrain, ytrain)
y_model = model.predict(Xtest)
from sklearn.metrics import accuracy_score
accuracy_score(ytest, y_model)
from sklearn.metrics import confusion_matrix
sns.set_context("notebook", font_scale=1.7)
mat = confusion_matrix(ytest, y_model)
sns.heatmap(mat, square=True, annot=True, cbar=False)
plt.xlabel('predicted value')
plt.ylabel('true value');
fig, axes = plt.subplots(10, 10, figsize=(8, 8),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
test_images = Xtest.reshape(-1, 8, 8)
for i, ax in enumerate(axes.flat):
ax.imshow(test_images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(y_model[i]),
transform=ax.transAxes,
color='green' if (ytest[i] == y_model[i]) else 'red')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Stirling array $\mathcal{S}$
Step2: power function
Step3: inverse function
Step4: sqrt function
Step5: expt function
Step6: log function
Step7: sin function
Step8: cos function
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from sympy.abc import n, i, N, x, lamda, phi, z, j, r, k, a, alpha
from commons import *
from matrix_functions import *
from sequences import *
import functions_catalog
init_printing()
from sympy.functions.combinatorial.numbers import stirling
m=8
S2 = define(let=Symbol(r'\mathcal{{S}}_{{ {} }}'.format(m)),
be=Matrix(m, m, lambda n,k: stirling(n,k, kind=2)))
S2 # this version is the pure one
S2 = define(let=Symbol(r'\mathcal{{S}}_{{ {} }}'.format(m)),
be=Matrix(m, m, riordan_matrix_exponential(
riordan_matrix_by_convolution(d=Eq(Function('d')(z), exp(z)),
h=Eq(Function('h')(z), exp(z)-1),
dim=m))))
S2
inspect(S2.rhs)
production_matrix(S2.rhs)
eigendata = spectrum(S2)
eigendata
data, eigenvals, multiplicities = eigendata.rhs
Phi_poly = Phi_poly_ctor(deg=m-1)
Phi_poly
Phi_polynomials = component_polynomials(eigendata, early_eigenvals_subs=True)
Phi_polynomials
cmatrices = component_matrices(S2, Phi_polynomials)
cmatrices
f_power, g_power, G_power = functions_catalog.power(eigendata, Phi_polynomials)
S2_power = G_power(S2)
S2_power
define(S2_power.lhs, S2_power.rhs.applyfunc(factor)) # factored
S2_power.rhs[:,0]
assert (S2.rhs**r).applyfunc(simplify) == S2_power.rhs
inspect(S2_power.rhs)
production_matrix(S2_power.rhs).applyfunc(factor)
f_inverse, g_inverse, G_inverse = functions_catalog.inverse(eigendata, Phi_polynomials)
S2_inverse = G_inverse(S2)
S2_inverse, G_inverse(S2_inverse)
inspect(S2_inverse.rhs)
production_matrix(S2_inverse.rhs)
assert S2_inverse.rhs*S2.rhs == Matrix(m, m, identity_matrix())
assert S2_inverse.rhs == S2_power.rhs.subs({r:-1})
f_sqrt, g_sqrt, G_sqrt = functions_catalog.square_root(eigendata, Phi_polynomials)
S2_sqrt = G_sqrt(S2)
S2_sqrt
inspect(S2_sqrt.rhs)
production_matrix(S2_sqrt.rhs)
inspect(S2_sqrt.rhs)
production_matrix(S2_sqrt.rhs, exp=False)
assert S2_sqrt.rhs == S2.rhs**(S(1)/2)
assert S2_sqrt.rhs == S2_power.rhs.subs({r:S(1)/2})
f_exp, g_exp, G_exp = functions_catalog.exp(eigendata, Phi_polynomials)
S2_exp = G_exp(S2)
S2_exp
define(S2_exp.lhs, S2_exp.rhs.applyfunc(factor))
S2_exp1 = define(let=Subs(S2_exp.lhs, alpha, 1), be=S2_exp.rhs.subs({alpha:1}))
S2_exp1
inspect(S2_exp.rhs)
inspect(S2_exp1.rhs)
f_log, g_log, G_log = functions_catalog.log(eigendata, Phi_polynomials)
S2_log = G_log(S2)
S2_log
inspect(S2_log.rhs[1:,:-1])
production_matrix(S2_log.rhs[1:,:-1])
f_sin, g_sin, G_sin = functions_catalog.sin(eigendata, Phi_polynomials)
S2_sin = G_sin(S2)
S2_sin
f_cos, g_cos, G_cos = functions_catalog.cos(eigendata, Phi_polynomials)
S2_cos = G_cos(S2)
S2_cos
assert (S2_sin.rhs**2 + S2_cos.rhs**2).applyfunc(trigsimp) == Matrix(m, m, identity_matrix())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 以上是门面模式中的三个传感器类的结构。仔细分析业务,报警器、洒水器、拨号器都是“观察”烟雾传感器的情况来做反应的。因而,他们三个都是观察者,而烟雾传感器则是被观察对象了。根据分析,将三个类提取共性,泛化出“观察者”类,并构造被观察者。
Step2: 观察者中定义了update接口,如果被观察者状态比较多,或者每个具体的观察者方法比较多,可以通过update传参数进行更丰富的控制。
Step3: 被观察者中首先将观察对象加入到观察者数组中,若发生情况,则通过notifyAll通知各观察者。
|
<ASSISTANT_TASK:>
Python Code:
class AlarmSensor:
def run(self):
print ("Alarm Ring...")
class WaterSprinker:
def run(self):
print ("Spray Water...")
class EmergencyDialer:
def run(self):
print ("Dial 119...")
class Observer:
def update(self):
pass
class AlarmSensor(Observer):
def update(self,action):
print ("Alarm Got: %s" % action)
self.runAlarm()
def runAlarm(self):
print ("Alarm Ring...")
class WaterSprinker(Observer):
def update(self,action):
print ("Sprinker Got: %s" % action)
self.runSprinker()
def runSprinker(self):
print ("Spray Water...")
class EmergencyDialer(Observer):
def update(self,action):
print ("Dialer Got: %s"%action)
self.runDialer()
def runDialer(self):
print ("Dial 119...")
class Observed:
observers=[]
action=""
def addObserver(self,observer):
self.observers.append(observer)
def notifyAll(self):
for obs in self.observers:
obs.update(self.action)
class smokeSensor(Observed):
def setAction(self,action):
self.action=action
def isFire(self):
return True
alarm=AlarmSensor()
sprinker=WaterSprinker()
dialer=EmergencyDialer()
smoke_sensor=smokeSensor()
smoke_sensor.addObserver(alarm)
smoke_sensor.addObserver(sprinker)
smoke_sensor.addObserver(dialer)
if smoke_sensor.isFire():
smoke_sensor.setAction("On Fire!")
smoke_sensor.notifyAll()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nuist', 'sandbox-3', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='ll'>2.3 Which data to use?</a>
Step2: <a id='god'>3. Feature Engineering</a>
Step3: i saved a backup here(train_driver.csv which contains driver engaged)
Step4: <a id='ot2'>3.2 Backbone of my Analysis(Feature Engineering)</a>
Step5: Processing date
Step6: <a id='ot3'>3.3 Exploring hidden hints and making features.</a>
Step7: This is the one of the most shocking observation here, as you can see there is very high probability of getting a stockout during between 11-25 and 41-55 seconds of every minute.
Step8: you can see that is somewhat perfect normal
Step9: Inference
Step10: <a id='ot4'>3.4 Making Features using order file.</a>
Step11: Getting day time features for the order_file.
Step12: <a id='nlp'>4.Preprocessing the data. </a>
Step13: Authors of the data have very cleverly organised data by only removing the data of 2nd Jan to 12th Jan and remaining data is now used for the predicting the orders per hour,order per week and orders per day-hour combinaiton
Step14: correct the weekday
Step15: <a id='ot5'>3.5 Aggregate features on order file.</a>
Step16: <a id='nlp'>4.Making Model. </a>
Step17: using RandomForest model
Step18: <a id='nlp2'>4.2 Applying the RandomForestclassifier</a>
Step19: Feature Importance chart
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
train=pd.read_csv('post_stockout_train_candidate.csv',parse_dates=['time_stamp_utc'])
pre_stock = pd.read_csv('pre_stockout_train_candidate.csv',parse_dates=['time_stamp_utc'])
order =pd.read_csv('pre_and_post_orders_data_train.csv')
order_test = pd.read_csv('test/pre_orders_data_test.csv',parse_dates=['device_acknowledge_at'])
pre_stock_test =pd.read_csv('test/pre_stockout_test_candidate.csv')
test =pd.read_csv('test/post_stockout_test_candidate.csv')
driver_log=pd.read_csv('pre_and_post_driver_log_train_candidate.csv')
driver_log_test=pd.read_csv('test/pre_driver_log_test_candidate.csv')
pd.set_option('display.max_columns', 500)
train.head()
test.head()
pre_stock.head()
pre_stock_test.head()
pre_stock.groupby(by='dt')['stockout'].sum().sort_values(ascending=False)
%matplotlib inline
import matplotlib.pyplot as plt
pre_stock.groupby(by='dt')['stockout'].sum().plot(figsize=(10,4))
plt.xticks(rotation=40)
plt.annotate("1St Jan",xy=(1,8000))
order['created_at']=pd.to_datetime(order.created_at)
order['complete_at']=pd.to_datetime(order.complete_at)
order_test['created_at']=pd.to_datetime(order_test.created_at)
order_test['complete_at']=pd.to_datetime(order_test.complete_at)
order['date_create']=order.created_at.dt.date
order_test['date_create']=order_test.created_at.dt.date
order['unique']=1
pre_stock_test.columns
col = [ 'time_stamp_utc', 'dt', 'Latitude', 'Longitude',
'stockout', 'hour', 'minute', 'second', 'weekday']
alldata =pd.concat([train[col],pre_stock[col],pre_stock_test[col]],axis=0).reset_index(drop=True)
alldata.dt.value_counts().index
alldata =pd.concat([train[col],pre_stock[col],pre_stock_test[col]],axis=0).reset_index(drop=True)
alldata_out = alldata.loc[alldata.dt != '1-Jan-18'].reset_index(drop=True)
alldata_out.shape
df = order.loc[(order.state=='COMPLETE')&(order.created_at.dt.day>29)].reset_index()
df.head()
df2 = order_test.loc[(order_test.state=='COMPLETE')&(order_test.created_at.dt.day>29)].reset_index()
order_test.loc[(order_test.state=='COMPLETE')&(order_test.created_at.dt.day>25)].reset_index().shape
from tqdm import tqdm
train.head()
train=pd.read_csv('train_driver.csv')
test =pd.read_csv('test/post_stockout_test_candidate.csv')
train.head()
train.head()
#train.head()
def upd(train,pre_stock):
train = train.merge(pd.DataFrame(alldata_out.groupby(by=['weekday'])['stockout'].sum()).reset_index(),on='weekday',how='left',suffixes=('','_week'))
train = train.merge(pd.DataFrame(alldata_out.groupby(by=['hour'])['stockout'].sum()).reset_index(),on='hour',how='left',suffixes=('','_hour'))
train = train.merge(pd.DataFrame(alldata_out.groupby(by=['second'])['stockout'].sum()).reset_index(),on='second',how='left',suffixes=('','_second'))
train.head()
train = train.merge(pd.DataFrame(alldata_out.groupby(by=['weekday','hour'])['stockout'].sum()).reset_index(),on=['weekday','hour'],how='left',suffixes=('','_week_hour'))
train = train.merge(pd.DataFrame(alldata_out.groupby(by=['hour','minute'])['stockout'].sum()).reset_index(),on=['hour','minute'],how='left',suffixes=('','_hour_minute'))
train.fillna(0,inplace=True)
train = train.merge(pd.DataFrame(alldata_out.groupby(by=['weekday','hour','minute'])['stockout'].sum()).reset_index(),on=['weekday','hour','minute'],how='left',suffixes=('','_hour_week_minute'))
train = train.merge(pd.DataFrame(pre_stock.groupby(by=['res_id'])['stockout'].sum()).reset_index(),on='res_id',how='left',suffixes=('','_x'))
train = train.merge(pd.DataFrame(pre_stock.groupby(by=['res_id','hour'])['stockout'].sum()).reset_index(),on=['res_id','hour'],how='left',suffixes=('','_hour_res'))
train.fillna(0,inplace=True)
train = train.merge(pd.DataFrame(pre_stock.groupby(by=['res_id','hour','minute'])['stockout'].sum()).reset_index(),on=['res_id','hour','minute'],how='left',suffixes=('','_hour_res_minute'))
#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
train = train.merge(pd.DataFrame(pre_stock.groupby(by=['res_id'])['stockout'].count()).reset_index(),on='res_id',how='left',suffixes=('','_countx'))
train = train.merge(pd.DataFrame(pre_stock.groupby(by=['res_id','hour'])['stockout'].count()).reset_index(),on=['res_id','hour'],how='left',suffixes=('','_counthour_res'))
train.fillna(0,inplace=True)
train = train.merge(pd.DataFrame(pre_stock.groupby(by=['res_id','hour','minute'])['stockout'].count()).reset_index(),on=['res_id','hour','minute'],how='left',suffixes=('','_counthour_res_minute'))
train.fillna(0,inplace=True)
return train
from datetime import datetime
import datetime
def dat1(X):
return(datetime.datetime.strptime(X, "%d%b%Y:%H:%M:%S"))
tm=train.time_stamp_utc.apply(dat1)
tm2=test.time_stamp_utc.apply(dat1)
train =upd(train,pre_stock)
train.head()
test =upd(test,pre_stock_test)
test= test.merge(pd.DataFrame(alldata_out.groupby(by=['weekday'])['stockout'].sum()).reset_index(),on='weekday',how='left',suffixes=('','_week'))
test.head()
import seaborn as sns
%matplotlib inline
import matplotlib.pyplot as plt
plt.figure(figsize=(20,5))
sns.heatmap(pre_stock.pivot_table(index='weekday',columns='minute',values='stockout',aggfunc=sum),linecolor='black')
min_graph=pre_stock.pivot_table(index='weekday',columns='minute',values='stockout',aggfunc=sum)
min_graph
plt.figure(figsize=(20,5))
min_graph.loc[1].plot()
sec_graph=pre_stock.pivot_table(index='weekday',columns='second',values='stockout',aggfunc=sum)
sec_graph
plt.figure(figsize=(20,5))
sns.heatmap(pre_stock.pivot_table(index='weekday',columns='second',values='stockout',aggfunc=sum),linecolor='black')
plt.figure(figsize=(20,5))
sec_graph.loc[1].plot()
a=[]
for i in train.second:
if i<15:
a.append(i)
elif i<30:
a.append(30-i)
elif i<45:
a.append(i-30)
else:
a.append(60-i)
train['sec_fun']=a
a=[]
for i in test.second:
if i<15:
a.append(i)
elif i<30:
a.append(30-i)
elif i<45:
a.append(i-30)
else:
a.append(60-i)
test['sec_fun']=a
train.columns
cat_vars =[ 'res_id',
'hour', 'minute', 'second', 'weekday', ]
cont_vars =['Latitude', 'Longitude','stockout_week',
'stockout_hour', 'stockout_second', 'stockout_week_hour',
'stockout_hour_minute', 'stockout_hour_week_minute', 'stockout_x',
'stockout_hour_res', 'stockout_hour_res_minute', 'stockout_countx',
'stockout_counthour_res', 'stockout_counthour_res_minute']
for v in cat_vars: train[v] = train[v].astype('category').cat.as_ordered()
for v in cont_vars: train[v] = train[v].astype('float32')
for v in cat_vars: test[v] = test[v].astype('category').cat.as_ordered()
for v in cont_vars: test[v] = test[v].astype('category').astype('float32')
order_comp=order.loc[order.state=='COMPLETE']
order_comp_test=order_test.loc[order.state=='COMPLETE']
order_comp['day']=order_comp.created_at.dt.day
order_comp['hour']=order_comp.created_at.dt.hour
order_comp['weekday']=order_comp.created_at.dt.dayofweek
order_comp['geography']=order_comp.pickup_locality
order_comp_test['day']=order_comp_test.created_at.dt.day
order_comp_test['hour']=order_comp_test.created_at.dt.hour
order_comp_test['weekday']=order_comp_test.created_at.dt.dayofweek
order_comp_test['geography']=order_comp_test.pickup_locality
order_comp['count_order']=1
order_comp_test['count_order']=1
def lower(x):
return x.lower()
order_comp.geography=order_comp.geography.apply(lower)
train.geography=train.geography.apply(lower)
train.head()
order_comp.replace({'hsr layout':'hsr_layout'},inplace=True)
from sklearn.preprocessing import LabelEncoder
le_geo=LabelEncoder()
order_comp.geography=le_geo.fit_transform(order_comp.geography)
train.geography=le_geo.fit_transform(train.geography)
order_comp_test.geography=le_geo.fit_transform(order_comp_test.geography)
test.geography=le_geo.fit_transform(test.geography)
train.head()
order_comp.head()
train.weekday=train.weekday-1
test.weekday=test.weekday-1
order_comp.loc[order_comp.day>1].groupby(by=['geography','weekday','hour'],as_index=False).count_order.sum()
order_comp.loc[order_comp.day>1].groupby(by=['weekday'],as_index=False).count_order.sum()
order_comp.loc[order_comp.day>1].groupby(by=['day','hour'],as_index=False).count_order.sum()
order_comp_test.loc[order_comp_test.day>1].groupby(by=['weekday','hour'],as_index=False).count_order.sum()
order_comp_test.loc[order_comp_test.day>1].groupby(by=['weekday'],as_index=False).count_order.sum()
order_comp_test.loc[order_comp_test.day>1].groupby(by=['day','hour'],as_index=False).count_order.sum()
train=train.merge(order_comp.loc[order_comp.day>1].groupby(by=['geography','weekday','hour'],as_index=False).count_order.sum(),on=['geography','weekday','hour']
,how='left')
train=train.merge(order_comp.loc[order_comp.day>1].groupby(by=['geography','weekday'],as_index=False).count_order.sum(),on=['geography','weekday']
,how='left')
train=train.merge(order_comp.loc[order_comp.day>1].groupby(by=['weekday','hour'],as_index=False).count_order.sum(),on=['weekday','hour']
,how='left',suffixes=('','_1'))
train=train.merge(order_comp.loc[order_comp.day>1].groupby(by=['weekday'],as_index=False).count_order.sum(),on=['weekday']
,how='left',suffixes=('','_2'))
#train=train.merge(order_comp.loc[order_comp.day>1].groupby(by=['day','hour'],as_index=False).count_order.sum(),on=['day','hour']
# ,how='left')
train.head()
test=test.merge(order_comp_test.loc[order_comp_test.day>1].groupby(by=['geography','weekday','hour'],as_index=False).count_order.sum(),on=['geography','weekday','hour']
,how='left')
test=test.merge(order_comp_test.loc[order_comp_test.day>1].groupby(by=['geography','weekday'],as_index=False).count_order.sum(),on=['geography','weekday']
,how='left')
test=test.merge(order_comp_test.loc[order_comp_test.day>1].groupby(by=['weekday','hour'],as_index=False).count_order.sum(),on=['weekday','hour']
,how='left',suffixes=('','_1'))
test=test.merge(order_comp_test.loc[order_comp_test.day>1].groupby(by=['weekday'],as_index=False).count_order.sum(),on=['weekday']
,how='left',suffixes=('','_2'))
def dat1(X):
return(datetime.datetime.strptime(X, "%Y-%b-%d %H:%M:%S"))
from dateutil.parser import parse
def date1(x):
return parse(x)
driver_log.login_time=driver_log.login_time.apply(date1)
driver_log.logout_time=driver_log.logout_time.apply(date1)
driver_log_test.login_time=driver_log_test.login_time.apply(date1)
driver_log_test.logout_time=driver_log_test.logout_time.apply(date1)
driver_log_test.head()
(driver_log_test.logout_time-driver_log_test.login_time).dt.seconds.head()
valid =train.loc[train.dt == '31-Jan-18'].reset_index(drop=True)
train_val =train.loc[train.dt != '31-Jan-18'].reset_index(drop=True)
len(test.columns)
len(train.columns)
test.columns
train.columns
col = [ 'Latitude', 'Longitude', 'res_id',
'minute', 'geography',
'stockout_hour', 'stockout_week_hour',
'stockout_hour_minute', 'stockout_hour_week_minute','stockout_x',
'stockout_hour_res', 'stockout_hour_res_minute',
'stockout_counthour_res_minute', 'count_order_x',]
from sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier,BaggingClassifier,AdaBoostClassifier
import xgboost as xgb
X=train[col]
y=train.stockout
train_X=train_val[col]
train_y=train_val.stockout
test_X=valid[col]
test_y=valid.stockout
clf=RandomForestClassifier(n_estimators=30,max_depth=7)#highest_accuracy
clf=RandomForestClassifier(n_estimators=30,max_depth=7,)#highest_accuracy
#clf=AdaBoostClassifier(base_estimator=clf)
import lightgbm as lgb
from sklearn.tree import ExtraTreeClassifier
#clf = lgb.LGBMClassifier(max_depth=8,n_estimators=1000,random_state=5)
#clf=ExtraTreeClassifier(max_depth=7)
#clf=Dec
#clf=GradientBoostingClassifier(n_estimators=100,max_depth=7)
clf.fit(train_X[col],train_y,)
#a=test_y.copy()
a=np.zeros(test_y.shape)
from sklearn.metrics import accuracy_score,confusion_matrix
print('prediciton->',accuracy_score(test_y,clf.predict(test_X[col])))
print('zeroes->',accuracy_score(test_y,a))
confusion_matrix(test_y,clf.predict(test_X[col]))
valid.loc[valid.stockout==1]
valid.iloc[clf.predict(test_X[col])==1]
test
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
plt.figure(figsize=(20,5))
sns.barplot(col,clf.feature_importances_)
pd.DataFrame(clf.feature_importances_,index=col).sort_values(by=0)
sub=pd.read_csv('test/submission_online_testcase.csv')
clf.fit(X,y)
sub['stockout']=clf.predict(test[X.columns])[:len(sub)]
sub.to_csv('submi/hmsub_allfeat_count.csv',index=None)
sub.stockout.sum()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: I will use seaborn as my visualisation choice, but I want to modify the settings slightly to make the grid and plot area a bit less in-your-face
Step4: Create data and visualise
Step5: We do need to create a matrix of the variables though
Step6: For plotting below I also need a variable covering the xaxis - I'll call this xplot.
Step7: Standard linear regression
Step8: Ridge and LASSO regression
Step9: Here we can clearly see the effect of varying the regularisation parameter.
Step10: So this shows that when you have a lot of variables and not so much data, ridge regression or lasso regression might be very interesting options. The next step would then be to plot the weights. If you extend this example with the code from the URL listed above, you can do this - see it as a bit of home-work if you wish
Step11: You might (fairly enough!) feel that this is unnecessarily discontinuous - it does not look particularly useful like this. Part of the problem is of course that all points in the neighbourhood has equal weight. Thus one possible modification is to calculate a weighted average where the weights are inversely proportional to the distance between $x_i$ and $x$ (you can also pass in a user defined function). I do not like it very much personally
Step12: Kernel regression
Step13: As usual the bandwidth can be estimated using cross-validation.
Step14: An advantage of this function is the fact that it has compact support (only a finite range in $x$ will contribute to the calculation.
Step15: The two functions have a very similar calling interface and give similar results so I will plot only one
Step16: Ok, that is not that interesting - to really see the advantage of lowess smoothing it is necessary with more points - and noisy ones. So here are some with a lot of noise that is very much not Gaussian. I'll call it here with two different bandwidths (given as frac - the fraction of points to use)
Step17: In fact this way to indicate the local trend of the data is so common that the seaborn plotting package provides a direct way to do this
Step20: Ok, but what if you actually wanted to use this for regression to predict values at other points than your training sample. Well, at the moment you either have to move to R - there is a package called locfit in R which is excellent for this kind of work and personally what I use.
Step21: Gaussian process regression
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from astroML.linear_model import NadarayaWatson
import statsmodels.api as sm
import scipy.stats as stats
%matplotlib inline
sns.set_context('talk')
sns.set_style('whitegrid', {"grid.color": ".95", 'grid.linestyle': u'-'})
sns.set_palette('colorblind')
def fake_func(x):
return np.sin(x*3.0)*(x-6)**4/1e3 + 0.1*x + 0.04*(x-4.5)**2
def make_fake_data():
Create some data from $y = 0.001*sin(3 x)*(x-6)^2 + 0.1*x + 0.04*(x-4.5)^2$.
np.random.seed(15)
n_samples = 7
x = np.random.uniform(0, 6, 15)
x.sort()
y = fake_func(x)
# And finally add some noise
dy = 1.0/3.0
y = y + np.random.normal(0, dy, len(y))
return x, y, dy
def plot_a_fit(x, y, xest, yest, include_true=False, ax=None, dy=None, dyest=None):
Plot the result of a fit to the fake data. This is put in as a function
to speed things up below.
if ax is None:
ax = plt.gca()
ax.scatter(x, y, 150, marker='.')
if dyest is not None:
# The user provided a confidence interval on the estimates.
# I will use a 2sigma interval.
ax.plot(xest, yest, '-k')
y_low = yest - 1.96*dyest
y_high = yest + 1.96*dyest
ax.fill_between(xest, y_low, y_high, alpha=0.2, color='r')
if dy is not None:
# Data errorbars
ax.errorbar(x, y, dy, fmt='.k', ecolor='gray', markersize=8)
ax.plot(xest, yest, '-', color='#00aaff')
if include_true:
ax.plot(xest, fake_func(xest), '--', color='#999999')
x, y, dy = make_fake_data()
xx = np.linspace(0, 6, 100)
plt.plot(xx, fake_func(xx))
plt.scatter(x, y)
X = x[:, np.newaxis]
xplot = np.linspace(0, 6, 100)
Xplot = xplot[:, np.newaxis]
from astroML.linear_model import LinearRegression
# Create the model
model = LinearRegression()
# Run the fit
res = model.fit(X, y)
# And predict the y-values & print the best-fit coefficients
y_pred = model.predict(X)
yplot = model.predict(Xplot)
print "Fit results: ", res.coef_
plot_a_fit(x, y, xplot, yplot, include_true=True)
from sklearn.linear_model import Ridge, Lasso
# Create the models
ridge_model = Ridge(fit_intercept=True, alpha=0.05)
lasso_model = Lasso(fit_intercept=True, alpha=0.05)
# Run the fit
res_ridge = ridge_model.fit(X, y)
res_lasso = lasso_model.fit(X, y)
# And predict the y-values & print the best-fit coefficients
y_pred_ridge = ridge_model.predict(X)
yplot_ridge = ridge_model.predict(Xplot)
y_pred_lasso = lasso_model.predict(X)
yplot_lasso = lasso_model.predict(Xplot)
print "Ridge: a={0} b={1}\nLASSO: a={2} b={3}".format(res_ridge.intercept_, res_ridge.coef_[0],
res_lasso.intercept_, res_lasso.coef_[0])
plot_a_fit(x, y, xplot, yplot_ridge, include_true=True)
plt.plot(xplot, yplot_lasso, 'r')
def gaussian_basis(x, mu, sigma):
return np.exp(-0.5 * ((x - mu) / sigma) ** 2)
centers = np.linspace(0, 6, 100)
widths = 0.2
Xg = gaussian_basis(x[:, np.newaxis], centers, widths)
# Based on http://www.astroml.org/book_figures/chapter8/fig_rbf_ridge_mu_z.html
classifier = [LinearRegression, Ridge, Lasso]
kwargs = [dict(), dict(alpha=0.5), dict(alpha=0.025)]
labels = ['Linear Regression', 'Ridge Regression', 'Lasso Regression']
fig = plt.figure(figsize=(15, 4))
for i in range(3):
clf = classifier[i](fit_intercept=True, **kwargs[i])
clf.fit(Xg, y)
w = clf.coef_
tmp = gaussian_basis(xplot[:, None], centers, widths)
yplot = clf.predict(tmp)
ax = fig.add_subplot(131+i)
plot_a_fit(x, y, xplot, yplot, include_true=True, ax=ax)
ax.set_title(labels[i])
from sklearn import neighbors
k = 3
knn = neighbors.KNeighborsRegressor(k)
yplot_knn = knn.fit(X, y).predict(Xplot)
plot_a_fit(x, y, xplot, yplot_knn, include_true=True)
plt.title('KNN regression with k={0}'.format(k))
k = 3
knn = neighbors.KNeighborsRegressor(k, weights='distance')
yplot_knn = knn.fit(X, y).predict(Xplot)
plot_a_fit(x, y, xplot, yplot_knn, include_true=True)
plt.title('KNN regression with k={0} and distance weighting'.format(k))
from astroML.linear_model import NadarayaWatson
sigma = 0.6
model = NadarayaWatson('gaussian', np.asarray(sigma))
model.fit(X, y)
yplot_NW = model.predict(Xplot)
plot_a_fit(x, y, xplot, yplot_NW, include_true=True)
plt.title('Kernel regression (Nadaraya-Watson), Gaussian kernel with $\sigma$={0:0.1f}'.format(sigma))
xx = np.linspace(-3,3,1000)
plt.plot(xx, (1-np.abs(xx)**3)**3*(np.abs(xx) <= 1))
plt.xlabel('x')
plt.ylabel('w(x)')
plt.title('The tri-cube function')
from statsmodels.nonparametric.smoothers_lowess import lowess as sm_lowess
import cylowess
c_lowess = cylowess.lowess
res_c = c_lowess(y,x)
res_sm = sm_lowess(y, x)
plot_a_fit(x, y, res_c[:, 0], res_c[:, 1])
plt.title('Example of lowess regression')
x_noisy = np.random.uniform(low = -2*np.pi, high = 2*np.pi, size=500)
y_noisy = np.sin(x_noisy) + +stats.t.rvs(2.5, size=len(x_noisy))
# Run lowess smoothing:
res_noisy_0p1 = c_lowess(y_noisy, x_noisy, frac=0.1)
res_noisy_0p3 = c_lowess(y_noisy, x_noisy, frac=0.3)
plt.scatter(x_noisy, y_noisy)
plt.plot(res_noisy_0p1[:, 0], res_noisy_0p1[:, 1], 'r-', label='frac=0.1')
plt.plot(res_noisy_0p3[:, 0], res_noisy_0p3[:, 1], 'g-', label='frac=0.3')
plt.legend()
sns.regplot(x_noisy, y_noisy, lowess=True)
def tricube(t):
The tricube function
return (1-np.abs(t)**3)**3*(np.abs(t) <= 1)
def jb_local_linreg(x, y, x_out, bandwidth, frac=0.1):
Calculate a local linear regression from a dataset.
# The total number of points.
N = len(x)
# The output array
y_out = np.empty(len(x_out))
y_out.fill(np.nan)
# The number of points to consider for each regression.
N_each = np.ceil(frac*N)
# Loop over the x_out data. We here do:
# 1. Calculate the distance to each point and select the
# N_each closest points (there are _much_ faster ways to do this!)
# 2. Calculate the weight for each point.
# 3. Run a weighted linear regression
# 4. Estimate y at this x vlaue.
for i in range(len(x_out)):
this_xout = x_out[i]
# 1. Calculate the distances and choose the N_each closest
dist = np.abs(this_xout-x)
si = np.argsort(dist)
use = si[0:N_each]
x_fit = x[use]
y_fit = y[use]
# 2. Calculate the weights
t = dist[use]/bandwidth
weights = np.exp(-dist[use]/(2.0*bandwidth))#tricube(t)
# 3. Run the weighted linear regression - note that some weights
# might be zero so I will ignore those.
keep, = np.where(weights > 0)
if len(keep) > 2:
x_fit = x_fit[keep]
y_fit = y_fit[keep]
weights = weights[keep]
X = sm.add_constant(x_fit)
mod_wls = sm.WLS(y_fit, X, weights=weights)
res = mod_wls.fit()
p = res.params
y_out[i] = p[0] + p[1]*this_xout
return y_out
yplot = jb_local_linreg(x, y, xplot, 0.3, frac=1.0)
plot_a_fit(x, y, xplot, yplot, include_true=True)
plt.title('LOCFIT - sort of ')
from sklearn.gaussian_process import GaussianProcess
gp = GaussianProcess(corr='squared_exponential', theta0=0.1, thetaL=1e-2,
thetaU=1, normalize=False, nugget=(dy/y)**2, random_start=1)
g = gp.fit(X, y)
y_pred_GP, MSE = gp.predict(Xplot, eval_MSE=True)
sigma = np.sqrt(MSE)
plot_a_fit(x, y, xplot, y_pred_GP, dy=dy, dyest=sigma, include_true=True)
plt.title('Gaussian Process Regression - 2$\sigma$ band')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the above example, "Manish Gupta" is a sequence of characters and for loop traverse that sequence of characters. Also you will note that we are ending the print statement with space instead of new line using the option end=.
Step2: Nested loops
Step3: NOTE
Step4: for loop with a list
Step5: we can also have conditions where multiple values are returned every iteration.
Step6: for loop with dictionary.
Step7: Traversing the keys
Step8: Uses of forloops
Step9: NOTE
Step10: NOTE
Step11: Break
Step12: Continue Statement
Step13: The else in for
Step14: Usecases for else
|
<ASSISTANT_TASK:>
Python Code:
for x in "Manish":
print(x, end=" ")
# Output: range(0, 10)
print(range(10))
# Output: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(list(range(10)))
# Output: [2, 3, 4, 5, 6, 7]
print(list(range(2, 8)))
# Output: [2, 5, 8, 11, 14, 17]
print(list(range(2, 20, 3)))
print(list(range(20, 2, -3)))
print(dir(range(10)))
# Sum 0 to 99
s = 0
for x in range(30, 1, -5):
print(x)
s = s + x
print("sum of 30 to 1 with steps -5 is", s)
# Sum 0 to 99
s = 0
for x in range(30, 1, -5):
print(x)
s += x
print("sum of 30 to 1 with steps -5 is", s)
for x in range(1, 6):
for y in range(1, x+1):
print(x, y)
for x in range(1, 6):
for x in range(1, x+1):
print(x, x)
cols = ["Red", "Green", "Yellow", "White"]
for color in cols:
print(color)
cols = ["Red", "Green", "Yellow", "White"]
for color in cols:
print(color)
else:
print(" ~~~~ Done ~~~~")
# Advance code, to be used after learning slicing. Please use instead the other code
# for x in "Manish Gupta"[::-1]:
# print(x, end=" ")
reverse_text = ""
for char in "Manish Gupta":
reverse_text = char + reverse_text
print(reverse_text)
x_test = [[1,2],[3,4],[5,6]]
for x in x_test:
print(x)
a = x[0]
b = x[1]
print (a, b)
x_test = [[1,2],[3,4],[5,6], [7,8,9]]
for x in x_test:
print(x)
x_test = [[1,2],[3,4],[5,6], [7,8]]
for x, y in x_test:
print(x, y)
x_test = [[1,2],[3,4],[5,6], [7,8,9]]
for x in x_test:
print(x)
a = x[0]
b = x[1]
print (a, b)
color = {"c1": "Red", "c2": "Green", "c3": "Orange"}
for value in color.values():
print(value)
color = {"c1": "Red", "c2": "Green", "c3": "Orange"}
for col in color:
print(col, color[col])
color = {"c1": "Red", "c2": "Green", "c3": "Orange"}
for value in color.values():
if(value=="Green"):
break
print(value)
else:
print("Done")
color = {"c1": "Red", "c2": "Green", "c3": "Orange"}
for value in color.values():
if(value=="Green"):
continue
print(value)
else:
print("Done")
# Sum 0 to 99
s = 0
x = 1
while x < 100:
s = s + x
x = x + 1
else:
print("!!! Hurry Hurry !!!")
print(x)
print ("Sum of 0 to 99", s)
# Sum 0 to 99
s = 0
x = 1
while x < 100:
s += x
x += 1
else:
print("!!! Hurry Hurry !!!")
print(x)
print ("Sum of 0 to 99", s)
while x < 0:
print("Hello")
else:
print("Sorry")
# while x > 0:
# print("Hello")
# else:
# print("Sorry")
s = 0
x = 100
while x < 100:
s = s + x
x = x + 1
else:
print("x is already equal or greater than 100")
print(s)
x = 1;
s = 0
while (x < 10):
s = s + x
x = x + 1
if (x == 5):
break
else:
print('The sum of first 9 integers : ',s)
print('The sum of', x, 'numbers is :',s)
while 10 != int(input('Enter a passkeyid: ')):
print("Wrong Passkey"),
while int(input('Enter a passkeyid: ')) != 10:
print("Wrong Passkey"),
else:
print("!!! Welcome to the world of Magic !!!")
num_sum = 0
count = 0
for x in range(1, 9):
print(x)
num_sum = num_sum + x
count = count + 1
if count == 5:
break
print("Sum of first ",count,"integers is : ", num_sum)
for x in range(8):
if (x == 3 or x==6):
print("\tSkipping:", x)
continue
print("This should never print")
else:
print(x)
for x in [1, 10, 4]:
if x == 10:
continue
print("Hello", x)
else:
print("processing completed without issues.")
print("-" * 20)
for x in [1, 10, 4]:
if x == 10:
break
print("Hello", x)
else:
print("processing completed without issues.")
def meets_condition(x):
return x==20
data = [10, 20, 33, 42, 44]
for x in data:
if meets_condition(x):
break
else:
print("No one met the condition")
print("lets end it")
def meets_condition(x):
return x==21
data = [10, 20, 33, 42, 44]
for x in data:
if meets_condition(x):
break
else:
print("No one met the condition")
print("lets end it")
print(list(range(2, 4)))
print(4%2)
n-> 2:
x <- []
n -> 3:
x -> 2
3%2
Prime number
n -> 4
x -> [2, 3]
4%2
n -> 5:
x -> [2, 3, 4]
for n in [2, 3, 4, 5, 6, 7, 8, 9]:
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n/x)
break
else:
# loop fell through without finding a factor
print(n, 'is a prime number')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
<ASSISTANT_TASK:>
Python Code:
def list_of_chars(list_chars):
# TODO: Implement me
pass
# %load test_reverse_string.py
from nose.tools import assert_equal
class TestReverse(object):
def test_reverse(self):
assert_equal(list_of_chars(None), None)
assert_equal(list_of_chars(['']), [''])
assert_equal(list_of_chars(
['f', 'o', 'o', ' ', 'b', 'a', 'r']),
['r', 'a', 'b', ' ', 'o', 'o', 'f'])
print('Success: test_reverse')
def main():
test = TestReverse()
test.test_reverse()
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Functions for graph modification
Step2: Functions for translating a graph to node and edge lists
Step3: Let's start with an empty graph
Step4: And then populate it with the Graphistry team members and some of thier relationships
Step5: Now, let's convert our graph database to a pandas DataFrame, so it can be uploaded into our tool
Step6: And color the nodes based on their "type" property
Step7: Finally, let's vizualize the results!
|
<ASSISTANT_TASK:>
Python Code:
import asyncio
import aiogremlin
# Create event loop and initialize gremlin client
loop = asyncio.get_event_loop()
client = aiogremlin.GremlinClient(url='ws://localhost:8182/', loop=loop) # Default url
@asyncio.coroutine
def add_vertex_routine(name, label):
yield from client.execute("graph.addVertex(label, l, 'name', n)", bindings={"l":label, "n":name})
def add_vertex(name, label):
loop.run_until_complete(add_vertex_routine(name, label))
@asyncio.coroutine
def add_relationship_routine(who, relationship, whom):
yield from client.execute("g.V().has('name', p1).next().addEdge(r, g.V().has('name', p2).next())", bindings={"p1":who, "p2":whom, "r":relationship})
def add_relationship(who, relationship, whom):
loop.run_until_complete(add_relationship_routine(who, relationship, whom))
@asyncio.coroutine
def remove_all_vertices_routine():
resp = yield from client.submit("g.V()")
results = []
while True:
msg = yield from resp.stream.read();
if msg is None:
break
if msg.data is None:
break
for vertex in msg.data:
yield from client.submit("g.V(" + str(vertex['id']) + ").next().remove()")
def remove_all_vertices():
results = loop.run_until_complete(remove_all_vertices_routine())
@asyncio.coroutine
def remove_vertex_routine(name):
return client.execute("g.V().has('name', n).next().remove()", bindings={"n":name})
def remove_vertex(name):
return loop.run_until_complete(remove_vertex_routine(name));
@asyncio.coroutine
def get_node_list_routine():
resp = yield from client.submit("g.V().as('node')\
.label().as('type')\
.select('node').values('name').as('name')\
.select('name', 'type')")
results = [];
while True:
msg = yield from resp.stream.read();
if msg is None:
break;
if msg.data is None:
break;
else:
results.extend(msg.data)
return results
def get_node_list():
results = loop.run_until_complete(get_node_list_routine())
return results
@asyncio.coroutine
def get_edge_list_routine():
resp = yield from client.submit("g.E().as('edge')\
.label().as('relationship')\
.select('edge').outV().values('name').as('source')\
.select('edge').inV().values('name').as('dest')\
.select('source', 'relationship', 'dest')")
results = [];
while True:
msg = yield from resp.stream.read();
if msg is None:
break;
if msg.data is None:
break;
else:
results.extend(msg.data)
return results
def get_edge_list():
results = loop.run_until_complete(get_edge_list_routine())
return results
remove_all_vertices()
add_vertex("Paden", "Person")
add_vertex("Thibaud", "Person")
add_vertex("Leo", "Person")
add_vertex("Matt", "Person")
add_vertex("Brian", "Person")
add_vertex("Quinn", "Person")
add_vertex("Paul", "Person")
add_vertex("Lee", "Person")
add_vertex("San Francisco", "Place")
add_vertex("Oakland", "Place")
add_vertex("Berkeley", "Place")
add_vertex("Turkey", "Thing")
add_vertex("Rocks", "Thing")
add_vertex("Motorcycles", "Thing")
add_relationship("Paden", "lives in", "Oakland")
add_relationship("Quinn", "lives in", "Oakland")
add_relationship("Thibaud", "lives in", "Berkeley")
add_relationship("Matt", "lives in", "Berkeley")
add_relationship("Leo", "lives in", "San Francisco")
add_relationship("Paul", "lives in", "San Francisco")
add_relationship("Brian", "lives in", "Oakland")
add_relationship("Paden", "eats", "Turkey")
add_relationship("Quinn", "cooks", "Turkey")
add_relationship("Thibaud", "climbs", "Rocks")
add_relationship("Matt", "climbs", "Rocks")
add_relationship("Brian", "rides", "Motorcycles")
add_vertex("Graphistry", "Work")
add_relationship("Paden", "works at", "Graphistry")
add_relationship("Thibaud", "works at", "Graphistry")
add_relationship("Matt", "co-founded", "Graphistry")
add_relationship("Leo", "co-founded", "Graphistry")
add_relationship("Paul", "works at", "Graphistry")
add_relationship("Quinn", "works at", "Graphistry")
add_relationship("Brian", "works at", "Graphistry")
import pandas
nodes = pandas.DataFrame(get_node_list())
edges = pandas.DataFrame(get_edge_list())
# Assign different color to each type in a round robin fashion.
# For more information and coloring options please visit: https://graphistry.github.io/docs/legacy/api/0.9.2/api.html
unique_types = list(nodes['type'].unique())
nodes['color'] = nodes['type'].apply(lambda x: unique_types.index(x) % 11)
nodes
edges
import graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
g = graphistry.bind(source="source", destination="dest", node='name', point_color='color', edge_title='relationship')
g.plot(edges, nodes)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps
Step2: Inline Question #1
Step3: You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5
Step5: You should expect to see a slightly better performance than with k = 1.
Step6: Cross-validation
|
<ASSISTANT_TASK:>
Python Code:
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print X_train.shape, X_test.shape
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print dists.shape
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Let's compare how fast the implementations are
def time_function(f, *args):
Call a function f with args and return the time (in seconds) that it took to execute.
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print 'Two loop version took %f seconds' % two_loop_time
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print 'One loop version took %f seconds' % one_loop_time
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print 'No loop version took %f seconds' % no_loop_time
# you should see significantly faster performance with the fully vectorized implementation
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
indices = np.arange(5000)
np.random.shuffle(indices)
fold_indices = np.array_split(indices, num_folds)
X_train_folds = [X_train[i] for i in fold_indices]
y_train_folds = [y_train[i] for i in fold_indices]
################################################################################
# END OF YOUR CODE #
################################################################################
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
from collections import defaultdict
k_to_accuracies = defaultdict(list)
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
for f in range(num_folds):
X_train_f = np.concatenate(X_train_folds[:f] + X_train_folds[f + 1:])
y_train_f = np.concatenate(y_train_folds[:f] + y_train_folds[f + 1:])
X_validate_f = X_train_folds[f]
y_validate_f = y_train_folds[f]
classifier = KNearestNeighbor()
classifier.train(X_train_f, y_train_f)
for k in k_choices:
y_validate_pred = classifier.predict(X_validate_f, k=k)
num_correct = np.sum(y_validate_pred == y_validate_f)
accuracy = float(num_correct) / y_validate_f.shape[0]
k_to_accuracies[k].append(accuracy)
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print 'k = %d, accuracy = %f' % (k, accuracy)
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
std_order = accuracies_std.argsort()
(np.array(k_choices)[std_order], accuracies_std[std_order], std_order)
mean_order = accuracies_mean.argsort()[::-1]
(np.array(k_choices)[mean_order], accuracies_std[mean_order], mean_order)
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 5
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Decoding in sensor space using a LogisticRegression classifier
Step3: Let's do the same on EEG data using a scikit-learn pipeline
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Romain Trachel <trachelr@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
import mne
from mne import io, EvokedArray
from mne.datasets import sample
from mne.decoding import Vectorizer, get_coef
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
# import a linear classifier from mne.decoding
from mne.decoding import LinearModel
print(__doc__)
data_path = sample.data_path()
sample_path = data_path / 'MEG' / 'sample'
raw_fname = sample_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = sample_path / 'sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.4
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(.5, 25, fir_design='firwin')
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
decim=2, baseline=None, preload=True)
del raw
labels = epochs.events[:, -1]
# get MEG and EEG data
meg_epochs = epochs.copy().pick_types(meg=True, eeg=False)
meg_data = meg_epochs.get_data().reshape(len(labels), -1)
clf = LogisticRegression(solver='liblinear') # liblinear is faster than lbfgs
scaler = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = scaler.fit_transform(meg_data)
model.fit(X, labels)
# Extract and plot spatial filters and spatial patterns
for name, coef in (('patterns', model.patterns_), ('filters', model.filters_)):
# We fitted the linear model onto Z-scored data. To make the filters
# interpretable, we must reverse this normalization step
coef = scaler.inverse_transform([coef])[0]
# The data was vectorized to fit a single model across all time points and
# all channels. We thus reshape it:
coef = coef.reshape(len(meg_epochs.ch_names), -1)
# Plot
evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(title='MEG %s' % name, time_unit='s')
X = epochs.pick_types(meg=False, eeg=True)
y = epochs.events[:, 2]
# Define a unique pipeline to sequentially:
clf = make_pipeline(
Vectorizer(), # 1) vectorize across time and channels
StandardScaler(), # 2) normalize features across trials
LinearModel( # 3) fits a logistic regression
LogisticRegression(solver='liblinear')
)
)
clf.fit(X, y)
# Extract and plot patterns and filters
for name in ('patterns_', 'filters_'):
# The `inverse_transform` parameter will call this method on any estimator
# contained in the pipeline, in reverse order.
coef = get_coef(clf, name, inverse_transform=True)
evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin)
evoked.plot_topomap(title='EEG %s' % name[:-1], time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We show here the network being considered in this example
Step2: Query Overview
Step3: Evaluate the query on a few Morse Graph Indices
Step4: How many matches for each type of query?
Step5: Print the list of Morse graph indices which satisfy the monostable query.
Step6: Directly verify that all returns matches satisfy the corresponding query
Step7: SingleGeneQuery
Step8: For a single gene query, the queries are graphs isomorphic to the factor graph, and the number of such queries corresponds to the number of "reduced parameter indices". This will be explained in more depth shortly. To help explain this we first examine the following computation
Step9: Importantly, this factorization corresponds to a way to convert a parameter index (an integer) into a pair of integers, one in [0,50) and the other in [0,108), which we call the gene parameter index and the reduced parameter index. The manner in which this is done is technical and has to do with how the integers encode combinatorial parameters using a mixed-radix system. Roughly speaking, the gene parameter index is obtained by extracting a digit from the mixed-radix representation of the parameter index, and what remains after removing the digit entirely (not just setting it to 0) is the reduced parameter index. This process can be reversed as well, so both the original parameter index and the (GeneParameterIndex, ReducedParameterIndex) pair are equivalent representations. What the prepare step we just accomplished did was create a table with the database's information which sorted the information by ReducedParameterIndex first and GeneParameterIndex second. (The original database sorts by ParameterIndex.)
Step10: The query above returns the "MorseGraphIndex" which can be used with the database to retrieve the Morse graph. However we might only want to know if the Morse graph has a certain property. For example, we might want to know if it has 1 minimal node, or multiple (2 or more) minimal nodes. We create a function which takes a "MorseGraphIndex" and returns True if the associated Morse graph has multiple minimal nodes and False otherwise.
Step11: Features of the graph query
Step12: Testing the query result
Step13: Similarly, our query result indicates parameter index 2180 corresponds to Morse Graph 84, which is colored red, indicated it does exhibit multistability. We check this as well
Step14: SingleFixedPointQuery, DoubleFixedPointQuery
Step15: Using these "bounds" variables to represent groups of domains, we can use query functions which ask for the collection of morse graphs which have an "FP" node labelled with a domain in those bounds. For example, to find the set of Morse Graph indices corresponding to fixed points in the region specified by "bounds110"
Step16: Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds210"
Step17: Find set of Morse Graph indices corresponding to fixed points in the region specified by "bounds311"
Step18: Find the set of Morse Graph indices with both a fixed point in 1,1,0 and a fixed point in 3,1,1
Step19: Queries on Graph Properties
Step20: Q1. Is the minimal node red?
Step21: Q2. Is the maximal node yellow?
Step22: Q3(a). Is there an essential green node?
Step23: List all essential green nodes
Step24: Q3(b). Does every path from min to max pass through green?
Step25: No, they don't. What percentage of them pass through green?
Step26: Q3(b)'. Does every path from min to max pass through a blue vertex?
Step27: Which means there are zero paths from minimum to maximum in the subgraph where we take out the blue vertices, correct?
Step28: Q3(c). Is there an intermediate (neither max nor min) green node?
Step29: Visualizing the Essential parameter nodes
Step30: InducibilityQuery
Step31: HysteresisQuery
|
<ASSISTANT_TASK:>
Python Code:
from DSGRN import *
database = Database("querytest.db")
database.parametergraph.dimension()
database
print(database.network.specification())
monostable_query_object = MonostableQuery(database)
bistable_query_object = BistableQuery(database)
multistable_query_object = MultistableQuery(database)
monostable_query_object(0)
monostable_query_object(1)
print([len(monostable_query_object.matches()), len(bistable_query_object.matches()), len(multistable_query_object.matches())])
print(monostable_query_object.matches())
all( monostable_query_object(mgi) for mgi in monostable_query_object.matches() )
database.DrawMorseGraph(131)
single_gene_query = SingleGeneQuery(database, "X1")
N = single_gene_query.number_of_gene_parameters()
M = single_gene_query.number_of_reduced_parameters()
L = database.parametergraph.size()
print([N, M, N*M, L])
graph = single_gene_query(43) # 43 is a "reduced parameter index"
graph.data
graph
# Create a function which tells us if each vertex has the multistable property:
is_multistable = MultistableQuery(database)
# Change the coloring method of the graph to check for multistability:
graph.color = lambda v : "red" if is_multistable(v) else "blue"
# Display the graph:
graph
database.DrawMorseGraph(18)
database.DrawMorseGraph(84)
bounds110 = {"X1":1,"X2":1,"X3":0} # Domain 1,1,0
bounds210 = {"X1":[2,2],"X2":[1,1],"X3":[0,1]} # Domain 2,1,0 or Domain 2,1,1
bounds311 = {"X1":[3,3],"X2":[1,1],"X3":[1,1]} # Domain 3,1,1
matches110 = SingleFixedPointQuery(database, bounds110).matches()
matches210 = SingleFixedPointQuery(database, bounds210).matches()
matches311 = SingleFixedPointQuery(database, bounds311).matches()
matches_both = DoubleFixedPointQuery(database, bounds110,bounds311).matches()
len(matches110), len(matches210), len(matches311), len(matches_both)
matches_both
graph.color = lambda v : "green" if graph.mgi(v) in matches_both else ("blue" if graph.mgi(v) in matches210 else ( "yellow" if graph.mgi(v) in matches311 else "red"))
graph
minimum_gpi = 0
maximum_gpi = len(graph.vertices) - 1
graph.color(minimum_gpi) == "red"
graph.color(maximum_gpi) == "yellow"
any( graph.essential(v) and graph.color(v) == "green" for v in graph.vertices)
[v for v in graph.vertices if graph.essential(v) and graph.color(v) == "green"]
predicate = lambda v : graph.color(v) == "green"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
subgraph = graph.subgraph(lambda v : not predicate(v))
number_missing_green = subgraph.numberOfPaths(minimum_gpi,maximum_gpi)
total_number = graph.numberOfPaths(minimum_gpi,maximum_gpi)
print str((1.0 - float(number_missing_green)/float(total_number))*100.0) + "%"
predicate = lambda v : graph.color(v) == "blue"
graph.unavoidable(minimum_gpi,maximum_gpi,predicate)
subgraph = graph.subgraph(lambda v : graph.color(v) != "blue")
if subgraph.numberOfPaths(minimum_gpi,maximum_gpi) == 0: print("Correct.")
any( v != minimum_gpi and v != maximum_gpi and graph.color(v) == "green" for v in graph.vertices)
graph.color = lambda v : "red" if graph.essential(v) else "green"
graph
inducibility_query_object = InducibilityQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(0, inducibility_query_object.GeneQuery.number_of_reduced_parameters())
[ inducibility_query_object(rpi) for rpi in reduced_parameters ][0:10]
hysteresis_query_object = HysteresisQuery(database, "X1", bounds110, bounds311)
reduced_parameters = range(0, hysteresis_query_object.GeneQuery.number_of_reduced_parameters())
[ hysteresis_query_object(rpi) for rpi in reduced_parameters ][0:10]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Este es en realidad un conjunto de datos 2D (que sería la S desenrollada), pero se ha embebido en un espacio 3D, de tal forma que un PCA no es capaz de descubrir el conjunto de datos original
Step2: Como puedes observar, al ser un método lineal, el PCA ha obtenido dos direcciones máxima variabilidad, pero ha perdido muchísima varianza en los datos, al proyectar la S directamente en un hiperplano. Los algoritmos de aprendizaje de variedades, disponibles en el paquete sklearn.manifold, pretenden descubrir el manifold que contiene a los datos (en este caso, es un manifold de dos dimensiones). Apliquemos, por ejemplo, el método Isomap
Step3: Aprendizaje de variedades para la base de datos de dígitos
Step4: Si visualizamos el dataset utilizando una técnica lineal como PCA, ya pudimos comprobar como conseguíamos algo de información sobre la estructura de los datos
Step5: Sin embargo, podemos usar técnicas no lineales, que nos llevarán, en este caso, a una mejor visualización. Vamos a aplicar el método t-SNE de manifold learning
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_s_curve
X, y = make_s_curve(n_samples=1000)
from mpl_toolkits.mplot3d import Axes3D
ax = plt.axes(projection='3d')
ax.scatter3D(X[:, 0], X[:, 1], X[:, 2], c=y)
ax.view_init(10, -60);
from sklearn.decomposition import PCA
X_pca = PCA(n_components=2).fit_transform(X)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y);
from sklearn.manifold import Isomap
iso = Isomap(n_neighbors=15, n_components=2)
X_iso = iso.fit_transform(X)
plt.scatter(X_iso[:, 0], X_iso[:, 1], c=y);
from sklearn.datasets import load_digits
digits = load_digits()
fig, axes = plt.subplots(2, 5, figsize=(10, 5),
subplot_kw={'xticks':(), 'yticks': ()})
for ax, img in zip(axes.ravel(), digits.images):
ax.imshow(img, interpolation="none", cmap="gray")
# Construir un modelo PCA
pca = PCA(n_components=2)
pca.fit(digits.data)
# Transformar los dígitos según las dos primeras componentes principales
digits_pca = pca.transform(digits.data)
colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525",
"#A83683", "#4E655E", "#853541", "#3A3120","#535D8E"]
plt.figure(figsize=(10, 10))
plt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max() + 1)
plt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max() + 1)
for i in range(len(digits.data)):
# Representar los dígitos usando texto
plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
plt.xlabel("primera componente principal")
plt.ylabel("segunda componente principal");
from sklearn.manifold import TSNE
tsne = TSNE(random_state=42)
# utilizamos fit_transform en lugar de fit:
digits_tsne = tsne.fit_transform(digits.data)
plt.figure(figsize=(10, 10))
plt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)
plt.ylim(digits_tsne[:, 1].min(), digits_tsne[:, 1].max() + 1)
for i in range(len(digits.data)):
# actually plot the digits as text instead of using scatter
plt.text(digits_tsne[i, 0], digits_tsne[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Dataset of Wikipedia Edits by User from the Kaggle Data
Step2: Generate Life Tables
Step3: Kaplan Meier Survival Curve based on Days Since First Edit
Step4: Kaplan Meier Survival Curve on Months Since first Edit
Step5: Generate and Describe Covariates
Step6: Generating a Person Period Dataset on Months
Step7: Fitting the Hazard Probabilities in a Person Period Dataset
Step8: Plotting the Hazard Probabilities of our Model
Step9: Plotting the Survival Probabilities of our Model
Step10: Testing a More Parsimonious Model on Month Count
Step11: Although the terms are significant, the fit is dreadful. Let us not speak of this again. With more time, we might investigate the fascinating periodic spikes in hazard probabilities, which might be the result of how we constructed the data, or of some other pattern within the Wikipedia community.
Step12: In this logistic model of hazard probabilities, we do find a positive relationship between reversion rate and the chance of ceasing editing Wikipedia holding month periods constant, in the population of Wikipedians. Contributors with a higher lifetime reversion rate have a greater probability of ceasing editing activity than contributors with a lower revision rate (p<0.001).
Step13: Plotting Survival Probabilities for Prototypical Values of Reversion Rate
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import codecs
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.gofplots import qqplot
from scipy import stats as ss
import csv
from collections import Counter
from collections import defaultdict
import seaborn as sns
from scipy import stats
from dateutil import *
import math
import statsmodels.api as sm
import statsmodels.formula.api as smf
import re
import sys
## FILES TO IMPORT FOR LIFE TABLE ANALYSIS
import lifelines
kmf = lifelines.KaplanMeierFitter()
#naf = lifelines.NelsonAalenFitter()
import csv
cols = None
user_edits = defaultdict(list)
# Data was provided under the Creative Commons Attribution-Share-Alike 3.0 license
# https://creativecommons.org/licenses/by-sa/3.0/us/
with open("wikipedia-validation.tsv") as tsv:
for line in csv.reader(tsv, dialect="excel-tab"):
row = {}
if(cols is None):
cols = line
else:
for i in range(len(cols)):
row[cols[i]] = line[i]
user_edits[row['user_id']].append(row)
def diff_month(d1, d2):
return (d1.year - d2.year)*12 + d1.month - d2.month
# This method generates a row with details we're interested in
# that then becomes the dataframe for life table analysis
def generate_user_row(u):
r = {"user_id":u[0]['user_id'],
"edits":len(u),
"reversions":len([x for x in u if int(x['reverted'])==1]),
"namespaces":len(set([x['namespace'] for x in u]))}
sorted_dates = sorted([x['timestamp'] for x in u])
r['earliest_edit'] = parser.parse(sorted_dates[0])
#r['earliest_month'] = int(r['earliest_edit'].strftime("%Y%m"))
r['last_edit'] = parser.parse(sorted_dates[-1])
r['last_month'] = int(r['last_edit'].strftime("%Y%m"))
r['edit_days'] = (r['last_edit'] - r['earliest_edit']).days
r['edit_months'] = diff_month(r['last_edit'], r['earliest_edit'])
return r
def flip(val):
flip={0:1,1:0,None:None}
return flip[val]
user_rows = []
for user in user_edits.keys():
# remove an outlier for pedagogical purposes
if(user_edits[user][0]['user_id']=='526223'):
continue
else:
user_rows.append(generate_user_row(user_edits[user]))
udf = pd.DataFrame(user_rows)
# GENERATE A LIST OF CENSORED RECORDS
# WHO CONTRIBUTED IN THE FINAL RECORDED MONTH
maxmonth = udf.last_month.max()
def censored(last_month):
if last_month == maxmonth:
return 1
return 0
udf['censored'] = udf.last_month.map(censored)
udf['dropout'] = udf.censored.map(flip)
f, ax = plt.subplots(1,1, figsize=(10,6))
# fit the
kmf.fit(udf.edit_days,udf.dropout)
kmf.plot(ax=ax)
plt.suptitle("Sample Survival Probabilities for Continuing to Edit Wikipedia After First Edit",
fontsize="20")
plt.title("From the Kaggle Wikipedia Participation Challenge Validation Set (n = %(n)d users)" %{"n":len(user_rows)},
fontsize="14")
ax.set_xlabel("Day periods since first edit", fontsize="18")
ax.set_ylabel("Probability of Continuing to Edit",fontsize="18")
plt.show()
f, ax = plt.subplots(1,1, figsize=(10,6))
# fit the
kmf.fit(udf.edit_months,udf.dropout)
kmf.plot(ax=ax)
plt.suptitle("Sample Survival Probabilities for Continuing to Edit Wikipedia After First Edit",
fontsize="20")
plt.title("From the Kaggle Wikipedia Participation Challenge Validation Set (n = %(n)d users)" %{"n":len(user_rows)},
fontsize="14")
ax.set_xlabel("Month periods since first edit", fontsize="18")
ax.set_ylabel("Probability of Continuing to Edit",fontsize="18")
plt.show()
#GENERATE COVARIATES
def rrate(row):
return float(row.reversions)/float(row.edits)
udf['log_reversions'] = udf.reversions.map(math.log1p)
udf['log_edits'] = udf.edits.map(math.log1p)
udf['reversion_rate'] = udf.apply(rrate, axis=1)
udf['log_reversion_rate'] = udf.reversion_rate.map(lambda x: math.log1p(x*100.))
plt.xlabel("log(reversions per user)", fontsize="12")
plt.ylabel("log(edits per user)", fontsize="12")
plt.scatter(udf.log_reversions, udf.log_edits)
plt.show()
plt.hist(udf.reversion_rate)
plt.title("reversion rate")
plt.show()
plt.hist(udf.log_reversion_rate)
plt.title("log transformed reversion rate")
plt.show()
def gen_person_period(dataframe, period, event):
records = dataframe.to_dict("records")
min_period = int(dataframe[period].min())
person_periods = []
for r in records:
for i in range( int(r[period]) - (min_period - 1)):
ri = r.copy()
ri['PERIOD'] = int(i)+min_period
ri['EVENT'] = int((ri['PERIOD'] == int(ri[period])) and r[event]==1)
person_periods.append(ri)
return pd.DataFrame(person_periods)
ppdf=gen_person_period(udf, "edit_months", "dropout")
result = smf.glm(formula = " EVENT ~ C(PERIOD)",
data=ppdf,
family=sm.families.Binomial()).fit()
print result.summary()
ppdf['predvals1'] = result.predict()
yvals = []
xvals = list(set(ppdf.PERIOD.tolist()))
for i in xvals:
yvals.append(result.predict(exog={"PERIOD":[i]}))
f, ax = plt.subplots(1,1, figsize=(10,6))
plt.scatter(ppdf.PERIOD, ppdf.predvals1, marker=".")
plt.plot(xvals, yvals, color="r")
plt.title("Fitted Population Hazard Probabilities", fontsize="18")
periods = list(set(ppdf.PERIOD.tolist()))
f, ax = plt.subplots(1,1, figsize=(10,6))
survival = 1.0
yvals = [1.0]
xvals = [0]
for p in periods:
hazard =result.predict(exog={"PERIOD":[p]})
survival = survival - survival*hazard
yvals.append(survival)
xvals.append(p+1)
line, = ax.plot(xvals, yvals)
plt.suptitle("Fitted Survival Probability of Continuing to Edit Wikipedia beyond a Month Period since first Edit" %{"p":p+1},
fontsize="18")
plt.show()
result = smf.glm(formula = " EVENT ~ PERIOD",
data=ppdf,
family=sm.families.Binomial()).fit()
print result.summary()
yvals = []
xvals = list(set(ppdf.PERIOD.tolist()))
for i in xvals:
yvals.append(result.predict(exog={"PERIOD":[i]}))
f, ax = plt.subplots(1,1, figsize=(10,6))
plt.scatter(ppdf.PERIOD, ppdf.predvals1, marker=".")
plt.plot(xvals, yvals, color="r")
plt.title("Fitted Model of Hazard Probabilities Against Conditional Hazard Probabilities", fontsize="18")
# GENERATE FITTED MODEL
result = smf.glm(formula = " EVENT ~ reversion_rate + C(PERIOD)",
data=ppdf,
family=sm.families.Binomial()).fit()
print result.summary()
periods = [0,1, 11]
rates = np.linspace(0,1,100)
flatui = ["#9b59b6", "#3498db", "#e74c3c", "#34495e", "#2ecc71"]
f, ax = plt.subplots(1,1, figsize=(6,6))
lines = []
labels = []
i=0
for p in periods:
xvals = rates
yvals = []
for x in xvals:
yvals.append(result.predict(exog={"PERIOD":[p], "reversion_rate":x}))
line, = ax.plot(xvals, yvals, color=sns.color_palette(flatui)[i])
i+=1
lines.append(line)
labels.append("Month Period %(n)d" % {"n":p+1})
plt.suptitle("Fitted Hazard Probability of Ceasing Editing Wikipedia" %{"p":p+1},
fontsize="18")
plt.title("by lifetime Reversion Rate, for different month periods", fontsize="16")
plt.xlabel("Reversion Rate", fontsize="14")
plt.ylabel("Hazard Probability of Stopping out in a Given Month Period")
plt.legend(lines, labels, fontsize="12", loc=2)
plt.show()
rates = [udf.reversion_rate.mean(), 0.75, 0.90]
periods = list(set(ppdf.PERIOD.tolist()))
f, ax = plt.subplots(1,1, figsize=(10,6))
lines = []
labels = []
for r in rates:
survival = 1.0
yvals = [1.0]
xvals = [0]
for p in periods:
hazard =result.predict(exog={"PERIOD":[p], "reversion_rate":r})
survival = survival - survival*hazard
yvals.append(survival)
xvals.append(p+1)
line, = ax.plot(xvals, yvals)
lines.append(line)
labels.append("Reversion Rate: %(r).02f" % {"r":r})
plt.suptitle("Fitted Survival Probability of Ceasing Editing Wikipedia" %{"p":p+1},
fontsize="18")
plt.title("by Month period, for different Reversion Rates", fontsize="16")
plt.legend(lines, labels, fontsize="12", loc=4)
plt.xlabel("Month Period since First Edit", fontsize="14")
plt.ylabel("Fitted Survival Probability", fontsize="12")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Add nodes and edges
Step2: In a Bayesian network, each node has an associated CPD (conditional probability distribution).
Step3: To check the consistency of the model and associated CPDs
Step4: if an influence can flow in a trail in a network, it is known as an active trail
Step5: You can query the network as follows
Step6: Direct Causal Influence
Step7: Indirect Causal Influence
|
<ASSISTANT_TASK:>
Python Code:
from pgmpy.models import BayesianModel
student_model = BayesianModel()
student_model.add_nodes_from(['difficulty', 'intelligence', 'grade', 'sat', 'letter'])
student_model.nodes()
student_model.add_edges_from([('difficulty', 'grade'), ('intelligence', 'grade'), ('intelligence', 'sat'), ('grade', 'letter')])
student_model.edges()
from pgmpy.factors import TabularCPD
#TabularCPD?
cpd_difficulty = TabularCPD('difficulty', 2, [[0.6], [0.4]])
cpd_intelligence = TabularCPD('intelligence', 2, [[0.7], [0.3]])
cpd_sat = TabularCPD('sat', 2, [[0.95, 0.2],
[0.05, 0.8]], evidence=['intelligence'], evidence_card=[2])
cpd_grade = TabularCPD('grade', 3, [[0.3, 0.05, 0.9, 0.5],
[0.4, 0.25, 0.08, 0.3],
[0.3, 0.7, 0.02, 0.2]],
evidence=['intelligence', 'difficulty'], evidence_card=[2, 2])
cpd_letter = TabularCPD('letter', 2, [[0.1, 0.4, 0.99], [0.9, 0.6, 0.01]], evidence=['grade'], evidence_card=[3])
student_model.add_cpds(cpd_difficulty, cpd_intelligence, cpd_sat, cpd_grade, cpd_letter)
student_model.get_cpds()
print(cpd_difficulty) # 0:easy, 1:hard
print(cpd_intelligence) # 0:low, 1:high
print(cpd_grade) # 0:A, 1:B, 2:C
print(cpd_sat) # 0:low, 1:high
print(cpd_letter) # 0:week, 1:strong
student_model.check_model()
student_model.get_independencies()
student_model.is_active_trail('difficulty', 'intelligence')
student_model.is_active_trail('difficulty', 'intelligence',
observed='grade')
from pgmpy.inference import VariableElimination
student_infer = VariableElimination(student_model)
# marginal prob of grade
probs = student_infer.query(['grade', 'letter'])
print(probs['grade'])
print(probs['letter'])
# probs of grades given knowing nothing about course difficulty and intelligence
print(probs['grade'])
# probs of grades knowing course is hard
prob_grade_hard = student_infer.query(['grade'], {'difficulty':1})
print(prob_grade_hard['grade'])
# probs of getting an A knowing course is easy, and intelligence is low
prob_grade_easy_smart = student_infer.query(['grade'], {'difficulty':0, 'intelligence':1})
print(prob_grade_easy_smart['grade'])
# probs of letter knowing nothing
print(probs['letter'])
# probs of letter knowing course is difficult
prob_letter_hard = student_infer.query(['letter'], {'difficulty':1})
print(prob_letter_hard['letter'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part 1
Step2: pandas is an open source, BSD-licensed library providing high-performance,
Step3: A pandas Series, like a list, doesn't have to be homogenous.
Step4: The index of a Series can be arbitrary as well.
Step5: Multiple Series objects can be clubbed together to make a pandas DataFrame. The pandas DataFrame is similar to the data.frame object in R.
Step6: Think of pandas DataFrames as dicts of Series. Almost all operations that are valid on a Python dictionary will work on a pandas DataFrame.
Step7: Index Objects
|
<ASSISTANT_TASK:>
Python Code:
----------------------------------------------------------------------
Filename : 01_basic_data_structs.py
Date : 12th Dec, 2013
Author : Jaidev Deshpande
Purpose : To get started with basic data structures in Pandas
Libraries: Pandas 0.12 and its dependencies
----------------------------------------------------------------------
# imports
import pandas as pd
from math import pi
s = pd.Series(range(10))
print(s)
print(s[5])
s = pd.Series(['foo', None, 3+4j])
inds = ['bar',1, (1, 2)]
s.index = inds
print(s['bar'], s[1], s[(1, 2)])
s1 = pd.Series(range(10))
s2 = pd.Series(range(10,20))
df = pd.DataFrame({'A':s1,'B':s2})
df.head()
df['C'] = [str(c) for c in range(20, 30)]
print(df.head())
print(df['C'])
del df['A']
print(df.head(10))
df.update({'B': range(50,60)})
print(df.head())
df.index
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Zero-based indexing
Step2: Works with any index-supporting objects, including strings
Step3: another example
Step4: And multiplication results in repeating
Step5: Enter NumPy arrays
Step6: NumPy arrays
Step7: Creating arrays
Step8: The class is called
Step9: Scalar array
Step10: Equivalent to size(c) in MATLAB.
Step11: Do not use np.ndarray function to create an array
Step12: by a number of points
Step13: filled with specific number
Step14: Ones
Step15: Empty
Step16: The function empty creates an array whose initial content is random and depends on the state of the memory. By default, the dtype of the created array is float64.
Step17: Special cases
Step18: Missing data
Step19: We will have a separate session on Masked Arrays in NumPy.
Step20: with NumPy
Step21: References
|
<ASSISTANT_TASK:>
Python Code:
my_collection = [1, 4, 6, 10]
my_collection.append(100000)
my_collection.remove(1)
my_collection[1] = 'abcdef'
my_collection
a = [10, 20, 30, 40, 50, 60, 70]
low, high = 2, 4
a[:low]
a[low:high]
a[high:]
s = 'qwerty'
s[1:-1]
x = [1, 2, 3]
y = [10, 20, 30]
x + y
x = [2, 3]
x * 3
import numpy as np
l = list(range(1000))
%timeit [i**2 for i in l]
a = np.arange(1000)
%timeit a**2
a = np.array([3, 4, 5, 6])
a
type(a)
a.ndim
a0 = np.array(7)
a0.ndim
b = np.array([[10, 20, 30], [9, 8, 7]])
b
c = np.array([[[1], [2]], [[3], [4]]])
c.shape
c.max()
try:
a = np.array(1,2,3,4) # WRONG, throws ValueError
except ValueError as e:
print(e)
a = [1,2,3,4]
a = np.array(a) # RIGHT
b = a.copy()
np.arange(1, 9, 2) # start, end (exclusive), step
np.linspace(0, 1, 6) # start, end, num-points
np.logspace(-3,2,7)
np.zeros((2, 3))
np.ones((3, 2))
np.empty([2,3])
np.random.seed(1234)
np.random.rand(4) # uniform in [0, 1]
np.random.randn(4) # Gaussian
np.eye(3)
np.diag(np.array([1, 2, 3, 4]))
a = np.array([1, 2, 3, np.nan])
b = list(a)
a = np.array([1, 2, 3])
b = np.ma.masked_less(a, 2)
b
b.data
b.mask
np.rollaxis??
np.*space*?
np.lookfor('create array')
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Boltzmann Wealth Model
Step4: With the classes defined, we can now initialize and create the model and populate it with agents.
Step9: Scheduler
Step10: With the newly updated code, we can create the model and agents (just like above).
Step16: Space
Step17: Here, we create a model with N agents, with with and height of 50.
Step18: Since our agents are on a coordinate plane, and each agent has a wealth value, we can plot our data!
Step24: Data Collection
Step30: Batch Runner
|
<ASSISTANT_TASK:>
Python Code:
from mesa import Model, Agent
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id):
# Each agent should have a unique identifier, stored in the unique_id field.
self.unique_id = unique_id
self.wealth = 1
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N):
self.num_agents = N
# The scheduler will be added here
self.create_agents()
def create_agents(self):
Method to create all the agents.
for i in range(self.num_agents):
a = MoneyAgent(i)
# Now what? See below.
money_model = MoneyModel(10)
money_model.create_agents()
import randomd
from mesa import Model, Agent
from mesa.time import RandomActivation
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
Give money to another agent.
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N):
self.num_agents = N
# Adding the scheduler:
# Scheduler needs to be created before agents do
# Scheduler objects are instantiated with their model object,
# which they then pass to the agents at each step.
self.schedule = RandomActivation(self)
self.create_agents()
def create_agents(self):
Method to create all the agents.
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
def step(self):
# The scheduler's step method activates the step methods of all the
# agents that have been added to it, in this case in random order.
self.schedule.step()
def run_model(self, steps):
# Because the model has no inherent end conditions,
# the user must specify how many steps to run it for.
for i in range(steps):
self.step()
money_model = MoneyModel(10)
money_model.create_agents()
money_model.run_model(5)
import random
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import MultiGrid
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
Give money to another agent.
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
def move(self, model):
Take a random step.
grid = model.grid
# The get_neighborhood method returns a list of coordinate tuples for
# the appropriate neighbors of the given coordinates. In this case,
# it's getting the Moore neighborhood (including diagonals) and
# includes the center cell. The agent decides where to move by choosing
# one of those tuples at random. This is a good way of handling random
# moves, since it still works for agents on an edge of a non-toroidal
# grid, or if the grid itself is hexagonal.
possible_steps = grid.get_neighborhood(
self.pos, moore=True, include_center=True)
choice = random.choice(possible_steps)
# the move_agent method works like place_agent, but removes the agent
# from its current location before placing it in its new one.
grid.move_agent(self, choice)
def give_money(self, model):
grid = model.grid
pos = [self.pos]
# This is a helper method which returns the contents of the entire list
# of cell tuples provided. It's not strictly necessary here; the
# alternative would be: x, y = self.pos; others = grid[y][x]
# (note that grids are indexed y-first).
others = grid.get_cell_list_contents(pos)
if len(others) > 1:
other = random.choice(others)
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N, width, height, torus):
# The arguments needed to create a new grid are its
# width, height, and a boolean for whether it is a torus or not.
self.grid = MultiGrid(height, width, torus)
self.num_agents = N
self.schedule = RandomActivation(self)
self.create_agents()
def create_agents(self):
Method to create all the agents.
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
x = random.randrange(self.grid.width)
y = random.randrange(self.grid.width)
# The place_agent method places the given object in the grid cell
# specified by the (x, y) tuple, and assigns that tuple to the
# agent's pos property.
self.grid.place_agent(a, (x, y))
def step(self):
# The scheduler's step method activates the step methods of all the
# agents that have been added to it, in this case in random order.
self.schedule.step()
def run_model(self, steps):
# Because the model has no inherent end conditions,
# the user must specify how many steps to run it for.
for i in range(steps):
self.step()
money_model = MoneyModel(N=100, width=50, height=50, torus=True)
money_model.create_agents()
money_model.run_model(50)
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
wealth_grid = np.zeros((money_model.grid.width, money_model.grid.height))
for cell in money_model.grid.coord_iter():
cell_content, x, y = cell
cell_wealth = sum(a.wealth for a in cell_content)
wealth_grid[y][x] = cell_wealth
plt.imshow(wealth_grid, interpolation='nearest')
plt.show()
import random
from mesa import Model, Agent
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
Give money to another agent.
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
def move(self, model):
Take a random step.
grid = model.grid
# The get_neighborhood method returns a list of coordinate tuples for
# the appropriate neighbors of the given coordinates. In this case,
# it's getting the Moore neighborhood (including diagonals) and
# includes the center cell. The agent decides where to move by choosing
# one of those tuples at random. This is a good way of handling random
# moves, since it still works for agents on an edge of a non-toroidal
# grid, or if the grid itself is hexagonal.
possible_steps = grid.get_neighborhood(
self.pos, moore=True, include_center=True)
choice = random.choice(possible_steps)
# the move_agent method works like place_agent, but removes the agent
# from its current location before placing it in its new one.
grid.move_agent(self, choice)
def give_money(self, model):
grid = model.grid
pos = [self.pos]
# This is a helper method which returns the contents of the entire list
# of cell tuples provided. It's not strictly necessary here; the
# alternative would be: x, y = self.pos; others = grid[y][x]
# (note that grids are indexed y-first).
others = grid.get_cell_list_contents(pos)
if len(others) > 1:
other = random.choice(others)
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N, width, height, torus):
# The arguments needed to create a new grid are its
# width, height, and a boolean for whether it is a torus or not.
self.grid = MultiGrid(height, width, torus)
self.num_agents = N
self.schedule = RandomActivation(self)
self.create_agents()
ar = {"Wealth": lambda a: a.wealth}
self.dc = DataCollector(agent_reporters=ar)
def create_agents(self):
Method to create all the agents.
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
x = random.randrange(self.grid.width)
y = random.randrange(self.grid.width)
# The place_agent method places the given object in the grid cell
# specified by the (x, y) tuple, and assigns that tuple to the
# agent's pos property.
self.grid.place_agent(a, (x, y))
def step(self):
# The scheduler's step method activates the step methods of all the
# agents that have been added to it, in this case in random order.
self.schedule.step()
self.dc.collect(self)
def run_model(self, steps):
# Because the model has no inherent end conditions,
# the user must specify how many steps to run it for.
for i in range(steps):
self.step()
# Create a model with 100 agents
model = MoneyModel(100, 10, 10, True)
# Run it for 1,000 steps:
model.run_model(1000)
# Get the data as a DataFrame
wealth_history = model.dc.get_agent_vars_dataframe()
# wealth_history indexed on Step and AgentID, and...
# ...has Wealth as one data column
wealth_history.reset_index(inplace=True)
# Plot a histogram of final wealth
wealth_history[wealth_history.Step==999].\
Wealth.hist(bins=range(10))
from mesa.batchrunner import BatchRunner
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id, starting_wealth):
# Each agent should have a unique_id
self.unique_id = unique_id
self.wealth = starting_wealth
def step(self, model):
Give money to another agent.
if self.wealth > 0:
# Pick a random agent
other = random.choice(model.schedule.agents)
# Give them 1 unit money
other.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N, starting_wealth):
self.running = True
self.num_agents = N
self.starting_wealth = starting_wealth
self.schedule = RandomActivation(self)
self.create_agents()
ar = {"Wealth": lambda a: a.wealth}
self.dc = DataCollector(agent_reporters=ar)
def create_agents(self):
Method to create all the agents.
for i in range(self.num_agents):
a = MoneyAgent(i, self.starting_wealth)
self.schedule.add(a)
def step(self):
self.dc.collect(self)
self.schedule.step()
def run_model(self, steps):
The model has no end condition
so the user needs to specify how long to run
for _ in range(steps):
self.step()
def compute_gini(model):
agent_wealths = [agent.wealth for agent in model.schedule.agents]
x = sorted(agent_wealths)
N = model.num_agents
B = sum( xi * (N-i) for i,xi in enumerate(x) ) / (N*sum(x))
return (1 + (1/N) - 2*B)
param_values = {"N": 100, "starting_wealth": range(1,10)}
model_reporter={"Gini": compute_gini}
batch = BatchRunner(MoneyModel, param_values,
10, 1000, model_reporter)
batch.run_all()
out = batch.get_model_vars_dataframe()
plt.scatter(out.starting_wealth, out.Gini)
plt.grid(True)
plt.xlabel("Starting wealth")
plt.ylabel("Gini Coefficient")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's make some sample x and y data, and plot it with the plt.plot command
Step2: You can customize a bunch of features within the plot
Step3: Let's plot a sine function
Step4: Let's plot a figure with errorbars
Step5: There are three options for log-plots
Step6: To add a legend to your plot, include the label argument in the plot command, then call plt.legend() at the end of the plotting commands, before plt.show().
Step7: To save figures in python you just use plt.savefig()
Step8: Let's make a log-log plot displaying Kepler's law
Step9: Scatter Plots
Step10: SIDE NOTE
Step11: The plot is starting to look better, but there is one really important thing that is missing
Step12: We can also change things like the axis limits with plt.xlim() and plt.ylim(). For these we just want to feed it a range of values for each axis
Step13: The axis labels are easy to read, but the numbers and tick marks on the axis are pretty small. We can tweak lots of little things about how the tick marks look, how they are spaced, and if we want to have a grid to guide the reader's eyes. I will give just a couple of examples here
Step14: By default the figure is square, but maybe this is not the best way to represent our data. If this is the case we can change the size of the figure
Step15: The last thing I'll mention here is how to put text on plots. This too is simple as long as you specify (x,y) coordinates for the text.
Step16: Histograms
Step17: Subplots
Step18: You can do fancier things with subplots like have different plots share the same axis, put smaller plots as insets to larger plots, etc. Again, take a look at things like the matplotlib library for examples of different plots.
|
<ASSISTANT_TASK:>
Python Code:
# we use matplotlib and specifically pyplot
# the convention is to import it like this:
import matplotlib.pyplot as plt
# We'll also read some data using astropy, so let's import that
import numpy as np
from astropy.io import ascii
# I'm also using this "magic" function to make my plots appear in this notebook
# Only do this when working with notebooks
%matplotlib inline
# Sample data
x = np.arange(10)
y = np.arange(10, 20)
# Make the plot, then show the plot
plt.plot(x, y)
plt.show()
plt.plot(x, y, '.', markersize=20, color='red')
plt.show()
# Let's make x go from zero to 4*pi
x = np.linspace(0, 4*np.pi, 50)
y = np.sin(x)
# This will be a thick dashed line:
plt.plot(x, y, linestyle='--', linewidth=5)
# Add labels to the axes
plt.xlabel('Xlabel')
plt.ylabel('Ylable')
# Set the plot title
plt.title('Sine Curve')
plt.show()
# Let's plot y=x^3
x = np.arange(10)
y = x**3
# Let's make up some errorbars in x and y
xerr_values = 0.2 * np.sqrt(x)
yerr_values = 5 * np.sqrt(y)
# Call the errorbar function
plt.errorbar(x, y, xerr=xerr_values, yerr=yerr_values)
plt.show()
x = np.linspace(0, 20)
y = np.exp(x)
plt.semilogy(x, y)
plt.show()
xred = np.random.rand(100)
yred = np.random.rand(100)
xblue = np.random.rand(20)
yblue = np.random.rand(20)
plt.plot(xred, yred, '^', color='red', markersize=8,
label='Red Points')
plt.plot(xblue, yblue, '+', color='blue', markersize=12,
markeredgewidth=3, label='Blue Points')
plt.xlabel('Xaxis')
plt.ylabel('Yaxis')
plt.legend()
# You can also place the legend in different places using this:
# plt.legend(loc='lower left')
plt.show()
x = np.linspace(0, 10)
y = np.sin(x)
plt.plot(x, y)
plt.title('sin')
plt.xlabel('Xaxis')
plt.ylabel('Yaxis')
# just give savefig the file name, or path to file name that you want to write
plt.savefig('sineplot.png')
plt.show()
# Semimajor-axis in units of AU:
a_AU = np.array([0.387, 0.723, 1. , 1.524, 5.203, 9.537, 19.191, 30.069, 39.482])
# Orbital period in units of years
T_yr = np.array([0.24, 0.62, 1., 1.88, 11.86, 29.46, 84.01, 164.8, 247.7])
# Let's set gravity and mass of the sun in [cgs] units:
G = 6.67e-8
Msun = 1.99e+33
plt.loglog(a_AU, T_yr, 'o')
plt.xlabel('Semi-Major Axis [AU]')
plt.ylabel('Period [yrs]')
plt.show()
# now plot a function over the data
# as you work more in python you will learn how to actually fit models to your data
def keplers_third_law(a, M):
return np.sqrt((4*np.pi**2 * a**3) / (G * M))
# Convert semimajor-axis into centimeters
a_cm = a_AU * 1.496e+13
# Convert period into seconds
T_s = T_yr * 3.154e+7
plt.loglog(a_cm, T_s, 'o')
plt.loglog(a_cm, keplers_third_law(a_cm, Msun), '--',
label='Keplers Third Law') # try swapping out Msun with something else and see what it looks like
plt.xlabel('Semi-Major Axis [cm]')
plt.ylabel('Period [s]')
plt.legend(loc=2)
plt.show()
# first let's read in some data to use for plotting
galaxy_table = ascii.read('data/mygalaxy.dat')
galaxy_table[:5]
# simple scatter plot
plt.scatter(galaxy_table['col1'], galaxy_table['col2'])
plt.show()
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
plt.show()
# here would be the equivalent statement using plt.plot(), note that the syntax is a little different
plt.plot(galaxy_table['col1'], galaxy_table['col2'], 'o',
color='blue', markersize=1, markeredgecolor='None')
plt.show()
plt.scatter(galaxy_table['col1'], galaxy_table['col2'], color='blue',
s=1, edgecolor='None', marker='o')
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
plt.show()
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
plt.xlim([-180,180])
plt.ylim([-90,90])
plt.show()
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
# Labels
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
# Set limits
plt.xlim([-180,180])
plt.ylim([-90,90])
# Choose axis ticks
plt.xticks(range(-180,210,60), fontsize=16, fontweight='bold') # change tick spacing, font size and bold
plt.yticks(range(-90,120,30), fontsize=16, fontweight='bold')
# turn on minor tick marks
plt.minorticks_on()
plt.grid() # turn on a background grip to guide the eye
plt.show()
plt.figure(figsize=(10,4)) # change figure size
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
# Labels
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
# Set limits
plt.xlim([-180,180])
plt.ylim([-90,90])
# Choose axis ticks
plt.xticks(range(-180,210,60), fontsize=16, fontweight='bold') # change tick spacing, font size and bold
plt.yticks(range(-90,120,30), fontsize=16, fontweight='bold')
# turn on minor tick marks
plt.minorticks_on()
plt.grid() # turn on a background grip to guide the eye
plt.show()
plt.figure(figsize=(10,4)) # change figure size
plt.scatter(galaxy_table['col1'], galaxy_table['col2'],
color='blue', s=1, edgecolor='None', marker='o')
# the next three lines put text on the figure at the specified coordinates
plt.text(-90, -50, 'LMC', fontsize=20)
plt.text(-60, -60, 'SMC', fontsize=20)
plt.text(0, -30, 'MW Bulge', fontsize=20)
plt.xlabel('Galactic Longitude (degrees)',
fontweight='bold', size=16)
plt.ylabel('Galactic Latitude (degrees)',
fontweight='bold', size=16)
plt.xlim([-180,180])
plt.ylim([-90,90])
plt.xticks(range(-180,210,60), fontsize=16, fontweight='bold') # change tick spacing, font size and bold
plt.yticks(range(-90,120,30), fontsize=16, fontweight='bold')
plt.minorticks_on() # turn on minor tick marks
plt.grid() # turn on a background grip to guide the eye
plt.show()
# plots histogram where the y-axis is counts
x = np.random.randn(10000)
num, bins, patches = plt.hist(x, bins=50)
plt.xlabel('Bins')
plt.ylabel('Counts')
plt.show()
# plots histogram where the y-axis is a probability distribution
plt.hist(x, bins=50, normed=True)
plt.xlabel('Bins')
plt.ylabel('Probability')
plt.show()
# plots a histogram where the y-axis is a fraction of the total
weights = np.ones_like(x)/len(x)
plt.hist(x, bins=50, weights=weights)
plt.ylabel('Fraction')
plt.xlabel('Bins')
plt.show()
# print out num and bins and see what they look like! what size is each array?
# how would you plot this histogram using plt.plot? what is the x value and what is the y value?
# make two side by side plots
x1 = np.linspace(0.0, 5.0)
x2 = np.linspace(0.0, 2.0)
y1 = np.cos(2 * np.pi * x1) * np.exp(-x1)
y2 = np.cos(2 * np.pi * x2)
plt.figure(figsize=[15,3])
plt.subplot(1,2,1) # 1 row, 2 columns, 1st figure
plt.plot(x1,y1)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.subplot(1,2,2) # 1 row, 2 columsn, 2nd figure
plt.plot(x2,y2)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.show()
# stack two plots on top of one another
plt.subplot(2,1,1) # 1 row, 2 columns, 1st figure
plt.plot(x1,y1)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.subplot(2,1,2) # 1 row, 2 columsn, 2nd figure
plt.plot(x2,y2)
plt.xlabel('Xlabel')
plt.ylabel('Ylabel')
plt.show()
# don't worry about this way to read in files right now
import pandas as pd
exoplanets = pd.read_csv('data/exoplanet.eu_catalog_1022.csv')
# get rid of some rows with missing values to be safe
exoplanets = exoplanets[np.isfinite(exoplanets['orbital_period'])]
# let's see what the data table looks like
exoplanets.head()
# plot distance from host star versus mass (in jupiter masses) for each exoplanet
plt.loglog(exoplanets['semi_major_axis'], exoplanets['mass'],'.')
plt.annotate("Earth", xy=(1,1/317.), size=12)
plt.annotate("Jupiter", xy=(5,1), size=12)
plt.xlabel('Semi-Major Axis [AU]',size=20)
plt.ylabel('Mass [M$_{Jup}$]', size=20)
# let's try to find out if the blobs above separate out by detection type
import seaborn as sns; sns.set()
transits = exoplanets[exoplanets['detection_type'] == 'Primary Transit']
radial_vel = exoplanets[exoplanets['detection_type'] == 'Radial Velocity']
imaging = exoplanets[exoplanets['detection_type'] == 'Imaging']
ttv = exoplanets[exoplanets['detection_type'] == 'TTV']
plt.loglog(transits['semi_major_axis'], transits['mass'], '.', label='Transit',markersize=12)
plt.loglog(radial_vel['semi_major_axis'], radial_vel['mass'], '.', label='Radial Vel', markersize=12)
plt.loglog(imaging['semi_major_axis'], imaging['mass'], '.', label='Direct Imaging', markersize=16)
plt.loglog(ttv['semi_major_axis'], ttv['mass'], '.', label='TTV', color='cyan', markersize=16)
plt.annotate("Earth", xy=(1,1/317.), size=12)
plt.annotate("Jupiter", xy=(5,1), size=12)
plt.xlabel('Semi-Major Axis [AU]', size=20)
plt.ylabel('Mass [M$_{Jup}$]', size=20)
plt.legend(loc=4, prop={'size':16})
# and now just for fun an xkcd style plot!
plt.xkcd()
plt.scatter(exoplanets['discovered'], exoplanets['radius']*11)
plt.xlabel('Year Discovered')
plt.ylabel('Radius [R_Earth]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The apply function
Step2: The Rarefaction Transform
Step3: The rarefaction functions can also take a num_reps argument (default=1), to set the number of randomizations run.
Step4: Applying a list of Transforms in sequence
Step5: Another very basic transform is the number of unique observations in each sample.
Step6: Note how when we implement a Transform, we try to follow the convention of samples being in the columns rather than the rows (Take a look at the source code!)
Step7: Rarefaction curves are implemented as a transform
Step8: For the sake of fun (and science!), you will find that we can very easily, using minimal code, plot this dataframe as an actual rarefaction curves plot.
Step9: Not a very elegant curve for this example dataset, but you get the point!
Step10: Note how the resulting dataframe is structured.
Step11: Rarefaction curves
Step12: We can also use this method to calculate alpha diversity measures as a curve, analogous to the way we used them with RarefactionFunction.
Step13: Distance Matrix transforms
Step14: You can then apply PCoA on the distance matrix. Here we utilize sci-kit bio's implementation.
Step15: PCoA is also implemented in omicexperiment as a Transform. It is based on the excellent sci-kit bio's implementation, with minor changes.
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
#Load our data
from omicexperiment.experiment.microbiome import MicrobiomeExperiment
mapping = "example_map.tsv"
biom = "example_fungal.biom"
tax = "blast_tax_assignments.txt"
exp = MicrobiomeExperiment(biom, mapping,tax)
exp.data_df
exp.mapping_df
from omicexperiment.transforms.general import RelativeAbundance
exp.apply(RelativeAbundance) #note how it returns a new experiment
exp.apply(RelativeAbundance).data_df #peak into the transformed data
from omicexperiment.transforms.general import Rarefaction
exp.apply(Rarefaction(90000)).data_df
#a shorthand on the MicrobiomeExperiment object -- the rarefy method
exp.rarefy(90000).data_df
exp.apply(Rarefaction(n=90000, num_reps=10)).data_df
exp.apply([Rarefaction(90000), RelativeAbundance]).data_df
#%matplotlib inline
from omicexperiment.transforms.general import NumberUniqueObs
exp.apply(NumberUniqueObs).data_df
import numpy as np
exp.apply(np.mean).data_df
#Note that a transpose() is done on the DataFrame in the inside of
#the apply method, such that sample names remain as columns.
from omicexperiment.transforms.general import RarefactionCurve
curve_df = exp.apply(RarefactionCurve(10000, 1000, NumberUniqueObs)).data_df
curve_df
%matplotlib inline
import matplotlib.pyplot as plt
for sample in curve_df:
plt.plot(curve_df.index, curve_df['sample0'])
plt.plot(curve_df.index, curve_df['sample1'])
from omicexperiment.transforms.general import RarefactionFunction
from skbio.diversity import alpha
#we pass the function to the func argument
#the axis to apply to is 0 by default (i.e. on columns = samples)
shannon_rf = RarefactionFunction(n=90000, num_reps=10, func=alpha.shannon, axis=0)
shannon = exp.apply(shannon_rf).data_df
shannon
#this time we pass the np.mean() function function to the agg_rep argument, to aggregate the results
shannon_rf = RarefactionFunction(n=90000, num_reps=10, func=alpha.shannon, axis=0, agg_rep=np.mean)
shannon = exp.apply(shannon_rf).data_df
shannon #now note how the result produces one result row at the intended rarefaction level.
#the function is called number_unique_obs in the transforms subpackage
from omicexperiment.transforms.observation import number_unique_obs
num_obs_curve = RarefactionCurveFunction(n=90001, num_reps=1, step=30000, func=number_unique_obs, axis=0)
exp.apply(num_obs_curve).data_df
from omicexperiment.transforms.general import RarefactionCurveFunction
#construct a curve with a step of 10000 (0 --> n 90000)
#note we add 1 here to include the last level 90000, as the function uses np.arange internally
shannon_curve = RarefactionCurveFunction(n=90001, num_reps=10, step=10000, func=alpha.shannon, axis=0, agg_rep=np.mean)
exp.apply(shannon_curve).data_df
#again if you wish, you could not pass the agg_rep argument to keep the
#results of your repetitions in the resulting dataframe
shannon_curve = RarefactionCurveFunction(n=90001, num_reps=10, step=10000, func=alpha.shannon)
exp.apply(shannon_curve).data_df
from omicexperiment.transforms.general import DistanceMatrix
rarefied_90000 = exp.rarefy(90000, num_reps=10)
rarefied_90000.apply(DistanceMatrix('braycurtis')).data_df
from skbio.stats.ordination import pcoa
pcoa_results = pcoa(rarefied_90000.apply(DistanceMatrix('braycurtis')).data_df)
pcoa_results
from omicexperiment.transforms.ordination import PCoA
bray_curtis_90000 = rarefied_90000.apply(DistanceMatrix('braycurtis'))
pcoa_bray_curtis_90000 = bray_curtis_90000.apply(PCoA)
pcoa_bray_curtis_90000.data_df
#the scikit-bio OrdinationResults object is stored in the experiment's metadata dict under the key 'pcoa'
print(pcoa_bray_curtis_90000.metadata['pcoa'].eigvals)
pcoa_bray_curtis_90000.metadata['pcoa']
%matplotlib inline
from matplotlib import pyplot
#Plot 2nd vs 3rd principal coordinate
pyplot.scatter(pcoa_bray_curtis_90000.data_df.loc['PC2'], pcoa_bray_curtis_90000.data_df.loc['PC3'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Instantiate the individual classifiers
Step2: Assign the individual classifiers to the voting ensemble. Here, we need to pass a
Step3: Fit the ensemble to the training data and score it on the test data
Step4: In order to convince us that 95.1% is a great accuracy score, we can compare the ensemble's
Step5: Similarly, the naive Bayes classifier achieves 93.0% accuracy
Step6: Last but not least, the random forest classifier also achieved 94.4% accuracy
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_breast_cancer
iris = load_breast_cancer()
X = iris.data
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=13
)
from sklearn.linear_model import LogisticRegression
model1 = LogisticRegression(random_state=13)
from sklearn.naive_bayes import GaussianNB
model2 = GaussianNB()
from sklearn.ensemble import RandomForestClassifier
model3 = RandomForestClassifier(random_state=13)
from sklearn.ensemble import VotingClassifier
vote = VotingClassifier(estimators=[('lr', model1),
('gnb', model2),
('rfc', model3)],
voting='hard')
vote.fit(X_train, y_train)
vote.score(X_test, y_test)
model1.fit(X_train, y_train)
model1.score(X_test, y_test)
model2.fit(X_train, y_train)
model2.score(X_test, y_test)
model3.fit(X_train, y_train)
model3.score(X_test, y_test)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 3. Affine decomposition
Step2: 4. Main program
Step3: 4.2. Create Finite Element space (Lagrange P1)
Step4: 4.3. Allocate an object of the Gaussian class
Step5: 4.4. Prepare reduction with a reduced basis method
Step6: 4.5. Perform the offline phase
Step7: 4.6.1. Perform an online solve
Step8: 4.6.2. Perform an online solve with a lower number of EIM terms
Step9: 4.6.3. Perform an online solve with an even lower number of EIM terms
Step10: 4.7.1. Perform an error analysis
Step11: 4.7.2. Perform an error analysis with respect to the exact problem
Step12: 4.7.3. Perform an error analysis with respect to the exact problem, but employing a smaller number of EIM terms
|
<ASSISTANT_TASK:>
Python Code:
from dolfin import *
from rbnics import *
@EIM()
class Gaussian(EllipticCoerciveProblem):
# Default initialization of members
def __init__(self, V, **kwargs):
# Call the standard initialization
EllipticCoerciveProblem.__init__(self, V, **kwargs)
# ... and also store FEniCS data structures for assembly
assert "subdomains" in kwargs
assert "boundaries" in kwargs
self.subdomains, self.boundaries = kwargs["subdomains"], kwargs["boundaries"]
self.u = TrialFunction(V)
self.v = TestFunction(V)
self.dx = Measure("dx")(subdomain_data=subdomains)
self.f = ParametrizedExpression(
self, "exp(- 2 * pow(x[0] - mu[0], 2) - 2 * pow(x[1] - mu[1], 2))", mu=(0., 0.),
element=V.ufl_element())
# note that we cannot use self.mu in the initialization of self.f, because self.mu has not been initialized yet
# Return custom problem name
def name(self):
return "GaussianEIM"
# Return the alpha_lower bound.
def get_stability_factor_lower_bound(self):
return 1.
# Return theta multiplicative terms of the affine expansion of the problem.
def compute_theta(self, term):
if term == "a":
return (1.,)
elif term == "f":
return (1.,)
else:
raise ValueError("Invalid term for compute_theta().")
# Return forms resulting from the discretization of the affine expansion of the problem operators.
def assemble_operator(self, term):
v = self.v
dx = self.dx
if term == "a":
u = self.u
a0 = inner(grad(u), grad(v)) * dx
return (a0,)
elif term == "f":
f = self.f
f0 = f * v * dx
return (f0,)
elif term == "dirichlet_bc":
bc0 = [DirichletBC(self.V, Constant(0.0), self.boundaries, 1),
DirichletBC(self.V, Constant(0.0), self.boundaries, 2),
DirichletBC(self.V, Constant(0.0), self.boundaries, 3)]
return (bc0,)
elif term == "inner_product":
u = self.u
x0 = inner(grad(u), grad(v)) * dx
return (x0,)
else:
raise ValueError("Invalid term for assemble_operator().")
mesh = Mesh("data/gaussian.xml")
subdomains = MeshFunction("size_t", mesh, "data/gaussian_physical_region.xml")
boundaries = MeshFunction("size_t", mesh, "data/gaussian_facet_region.xml")
V = FunctionSpace(mesh, "Lagrange", 1)
problem = Gaussian(V, subdomains=subdomains, boundaries=boundaries)
mu_range = [(-1.0, 1.0), (-1.0, 1.0)]
problem.set_mu_range(mu_range)
reduction_method = ReducedBasis(problem)
reduction_method.set_Nmax(20, EIM=21)
reduction_method.set_tolerance(1e-4, EIM=1e-3)
reduction_method.initialize_training_set(50, EIM=60)
reduced_problem = reduction_method.offline()
online_mu = (0.3, -1.0)
reduced_problem.set_mu(online_mu)
reduced_solution = reduced_problem.solve()
plot(reduced_solution, reduced_problem=reduced_problem)
reduced_solution_11 = reduced_problem.solve(EIM=11)
plot(reduced_solution_11, reduced_problem=reduced_problem)
reduced_solution_1 = reduced_problem.solve(EIM=1)
plot(reduced_solution_1, reduced_problem=reduced_problem)
reduction_method.initialize_testing_set(50, EIM=60)
reduction_method.error_analysis(filename="error_analysis")
reduction_method.error_analysis(
with_respect_to=exact_problem, filename="error_analysis__with_respect_to_exact")
reduction_method.error_analysis(
with_respect_to=exact_problem, EIM=11, filename="error_analysis__with_respect_to_exact__EIM_11")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Generating Data for Examples of Unit Testing SQL Queries
Step4: Generate Data for Anomaly Detection Example
|
<ASSISTANT_TASK:>
Python Code:
def create_patients():
Creating a table of patient and ids
ids = list(range(1, 11))
doctor_ids = ['dr' + str((i % 2) + 1) for i in ids]
names = ['john', 'jeremy', 'mark', 'leslie', 'sam', 'matt', 'judy', 'parth', 'kevin', 'joshua']
patients = {
'patient_id': ids,
'doctor_id': doctor_ids,
'name': names
}
pd.DataFrame(patients).to_sql('patients', engine, schema=SCHEMA_NAME, index=False, if_exists='replace')
def create_risk_assessment_scores():
Creating a table of risk_assessments and scores
scores = [
(1, 'psychological', 100, datetime.date(2017, 1, 5)),
(2, 'psychological', 96, datetime.date(2017, 1, 5)),
(3, 'psychological', 89, datetime.date(2017, 1, 5)),
(4, 'psychological', 75, datetime.date(2017, 1, 5)),
(5, 'psychological', 81, datetime.date(2017, 1, 5)),
(1, 'psychological', 90, datetime.date(2017, 1, 12)),
(1, 'psychological', 92, datetime.date(2017, 1, 12)),
(3, 'psychological', 94, datetime.date(2017, 1, 13)),
(4, 'psychological', 85, datetime.date(2017, 1, 15)),
(6, 'physical', 92, datetime.date(2017, 1, 7)),
(7, 'physical', 85, datetime.date(2017, 1, 7)),
(8, 'physical', 72, datetime.date(2017, 1, 7)),
(9, 'physical', 73, datetime.date(2017, 1, 7)),
(6, 'physical', 93, datetime.date(2017, 1, 8)),
(6, 'physical', 94, datetime.date(2017, 1, 9)),
(7, 'physical', 82, datetime.date(2017, 1, 15)),
(8, 'physical', 75, datetime.date(2017, 1, 16)),
]
labels = ['patient_id', 'assessment_type', 'risk_score', 'date_modified']
pd.DataFrame.from_records(scores, columns=labels)\
.to_sql('risk_assessments', engine, schema=SCHEMA_NAME, index=False, if_exists='replace')
def create_doctors():
Creating a table of doctors and ids
doctor_ids = list(range(1, 3))
doctor_ids = ['dr' + str(s) for s in doctor_ids]
names = ['Dr. Smith', 'Dr. Smith']
doctors = {
'doctor_id': doctor_ids,
'names': names,
}
pd.DataFrame(doctors).to_sql('doctors', engine, schema=SCHEMA_NAME, index=False, if_exists='replace')
create_doctors()
create_patients()
create_risk_assessment_scores()
def draw_random_int():
number = np.random.normal(100, 30)
if number < 1:
return 1
else:
return int(round(number))
upcoders = [0.05, 0.05, 0.15, 0.15, 0.6]
typicalcoders = [0.05, 0.1, 0.4, 0.4, 0.05]
drs = ['Dr. ' + names.get_full_name() for i in range(0, 1000)]
num_of_encounters = [draw_random_int() for i in range(0, 1000)]
personid = [random.randint(1, 25000) for i in range(0, sum(num_of_encounters))]
procedure_map = {
1: 'Evaluation and Management, Lowest Intensity',
2: 'Evaluation and Management, Second Lowest Intensity',
3: 'Evaluation and Management, Medium Intensity',
4: 'Evaluation and Management, High Intensity',
5: 'Evaluation and Management, Highest Intensity',
}
doctors = list(itertools.chain(*[[drs[i]] * num_of_encounters[i] for i in range(0, len(num_of_encounters))]))
d = {
'servicing_provider_npi': doctors,
'personid': personid,
}
df = pd.DataFrame(d)
procedure_codes = []
for i in df.index:
if drs.index(df.loc[i, 'servicing_provider_npi']) % 10 < 2:
procedure_codes.append(np.random.choice(np.arange(1, 6), p=upcoders))
else:
procedure_codes.append(np.random.choice(np.arange(1, 6), p=typicalcoders))
procedure_codes = np.asarray(procedure_codes)
df['procedure_code'] = procedure_codes
df['procedure_name'] = df['procedure_code'].map(procedure_map)
df.head()
df.to_sql('claim_lines', engine, schema=SCHEMA_NAME, index=False, if_exists='replace')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Charlotte rain gauge dataset 15 min data from 2003 - 2014
Step2: Read in data
Step3: Format data to year, month, day, hour, min and rainfall & select only ONE rain gauge
Step4: Plot rain data as read
Step5: Replace invalid data with NaNs and plot again
Step6: Resample the 10-min dataset to 24h accumulated rainfall data
Step7: Resample 15 min data to 1h accumulated dataset
Step8: Select only summer months (April - Sept)
Step9: Select only winter months (Oct - Mar)
Step10: Resample 15 min dataset to monthly accumulated dataset
Step11: Answering the assignments
Step12: Histogram of the data
Step13: Histogram of the data without zeros
Step14: 24h accumulated dataset
Step15: Histogram of the dataset
Step16: Histogram without zeros
Step17: 2. a. Analysis of seasonal cycles
Step18: Or on a yearly scale
Step19: 2. b. Analysis of diurnal cycles
Step20: Neglecting events < 1mm/h
Step21: Neglecting events < 3mm/h
Step22: 2. c. Variation of diurnal cycles with seasons
Step23: Neglecting events <1mm/hour
Step24: Neglecting events <3mm/hour
Step25: Merge hourly winter data
Step26: Neglecting events <1mm/h
Step27: Neglecting events <3mm/h
Step28: 2. d. Diurnal cycles of intense storm events
Step29: Amount of hourly events
Step30: 10 mm/h events in summer periods
Step31: Amount of hourly events
Step32: 3. Fit GEV-distribution for POT values in the time series
Step33: 3. c. Compute rainfall amounts associated with return periods of 1 year, 10 years and 100 years
Step34: Update 10.10.2017
Step35: GEV and block maxima of monthly maxima of 1h data
Step36: POT & GPD
Step37: GPD and POT of data>10mm/h
Step38: Boxplot of POT values
Step39: Number of occurences per hour
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
%matplotlib inline
plt.style.use('ggplot')
from mpl_toolkits.basemap import Basemap
def get_basemap(_resolution):
return Basemap(projection='merc', llcrnrlat=25, urcrnrlat=38, llcrnrlon=275, urcrnrlon=285, lat_ts=35.,
resolution=_resolution)
positions = pd.read_csv('./Raw_RG_Data/RG_lat_lon.csv', header=None)
positions.columns=['lat', 'lon']
plt.figure(figsize=(24,12))
m = get_basemap('h')
m.drawcoastlines()
m.drawcountries()
m.fillcontinents(color = 'gray')
m.drawmapboundary()
for index, row in positions.iterrows():
x,y = m(row['lon']+360, row['lat'])
m.plot(x, y, 'ro', markersize=6)
x,y = m(positions['lon'][0]+360, positions['lat'][0])
m.plot(x, y, 'bx', markersize=6)
m.drawstates()
m.drawrivers()
plt.show()
charlotte_rainfall = pd.read_csv('./charlotte_rg_2003-2014.csv', header = None)
#charlotte_rainfall = pd.read_csv('./Raw_RG_Data/Charlotte_CRN_gage_2003.csv', header = None)
#for i in range(2004,2014):
# cur_rainfall = pd.read_csv('./Raw_RG_Data/Charlotte_CRN_gage_%d.csv' % i, header = None)
# charlotte_rainfall = charlotte_rainfall.append(cur_rainfall, ignore_index=True)
#charlotte_rainfall = charlotte_rainfall.iloc[:,:6]
charlotte_rainfall.columns = ["year","month","day", "hour", "min", "Rainfall"]
charlotte_rainfall.loc[:,'dt'] = pd.to_datetime(dict(year=charlotte_rainfall['year'], month=charlotte_rainfall['month'], day=charlotte_rainfall['day'], hour=charlotte_rainfall['hour'], minute=charlotte_rainfall['min']))
charlotte_rainfall.index=charlotte_rainfall['dt']
charlotte_rainfall.head()
plt.plot(charlotte_rainfall['dt'], charlotte_rainfall["Rainfall"])
plt.ylabel('mm/15min')
plt.gcf().autofmt_xdate()
charlotte_rainfall["Rainfall"] = charlotte_rainfall["Rainfall"].replace(-99, np.nan)
plt.plot(charlotte_rainfall['dt'], charlotte_rainfall["Rainfall"])
plt.ylabel('mm/15min')
plt.gcf().autofmt_xdate()
charlotte_rainfall.head()
charlotte_24h_rainfall = pd.DataFrame()
charlotte_24h_rainfall['mean_rain'] = charlotte_rainfall.Rainfall.resample('D').mean()
charlotte_24h_rainfall['accum_rain'] = charlotte_rainfall.Rainfall.resample('D').sum()
charlotte_24h_rainfall.head()
plt.plot(charlotte_24h_rainfall["accum_rain"])
plt.ylabel('mm/24h')
plt.gcf().autofmt_xdate()
plt.plot(charlotte_24h_rainfall["mean_rain"])
plt.ylabel(r'mm/15min ($\varnothing$ of 24h)')
plt.gcf().autofmt_xdate()
charlotte_1h_rainfall = pd.DataFrame()
charlotte_1h_rainfall['mean_rain'] = charlotte_rainfall.Rainfall.resample('H').mean()
charlotte_1h_rainfall['accum_rain'] = charlotte_rainfall.Rainfall.resample('H').sum()
charlotte_1h_rainfall.head()
plt.plot(charlotte_1h_rainfall["accum_rain"])
plt.ylabel('mm/h')
plt.gcf().autofmt_xdate()
plt.plot(charlotte_1h_rainfall["mean_rain"])
plt.ylabel(r'mm/15min ($\varnothing$ of 1h)')
plt.gcf().autofmt_xdate()
charlotte_summer_1h_rainfall = charlotte_1h_rainfall.loc[(charlotte_1h_rainfall.index.month>=4) & (charlotte_1h_rainfall.index.month<=9)]
plt.plot(charlotte_summer_1h_rainfall["accum_rain"])
plt.ylabel('mm/h')
plt.gcf().autofmt_xdate()
mask_start = (charlotte_1h_rainfall.index.month >= 1) & (charlotte_1h_rainfall.index.month <= 3)
mask_end = (charlotte_1h_rainfall.index.month >= 10) & (charlotte_1h_rainfall.index.month <= 12)
mask = mask_start | mask_end
charlotte_winter_1h_rainfall = charlotte_1h_rainfall.loc[mask]
plt.plot(charlotte_winter_1h_rainfall["accum_rain"])
plt.ylabel('mm/h')
plt.gcf().autofmt_xdate()
charlotte_winter_1h_rainfall.head()
charlotte_monthly_rainfall = pd.DataFrame()
charlotte_monthly_rainfall['mean_rain'] = charlotte_rainfall.Rainfall.resample('M').mean()
charlotte_monthly_rainfall['accum_rain'] = charlotte_rainfall.Rainfall.resample('M').sum()
plt.plot(charlotte_monthly_rainfall["accum_rain"])
plt.ylabel('mm/month')
plt.gcf().autofmt_xdate()
plt.plot(charlotte_monthly_rainfall["mean_rain"])
plt.ylabel(r'mm/15min ($\varnothing$ per month)')
plt.gcf().autofmt_xdate()
print('Mean: %s' % str(charlotte_rainfall.Rainfall.mean()))
print('Std: %s' % str(charlotte_rainfall.Rainfall.std()))
print('Skew: %s' % str(charlotte_rainfall.Rainfall.skew()))
charlotte_rainfall.Rainfall.hist(bins = 100)
plt.xlabel('mm/15min')
plt.gca().set_yscale("log")
cur_data = charlotte_rainfall.Rainfall.loc[charlotte_rainfall.Rainfall>0]
hist_d = plt.hist(cur_data, bins=100)
plt.xlabel('mm/15min')
plt.gca().set_yscale("log")
print('Mean: %s' % str(charlotte_24h_rainfall.accum_rain.mean()))
print('Std: %s' % str(charlotte_24h_rainfall.accum_rain.std()))
print('Skew: %s' % str(charlotte_24h_rainfall.accum_rain.skew()))
charlotte_24h_rainfall.accum_rain.hist(bins = 100)
plt.xlabel('mm/24h')
plt.gca().set_yscale("log")
charlotte_24h_rainfall.mean_rain.hist(bins = 100)
plt.xlabel(r'mm/15min ($\varnothing$ per 24h)')
plt.gca().set_yscale("log")
cur_data = charlotte_24h_rainfall.accum_rain.loc[charlotte_24h_rainfall.accum_rain>0]
hist_d = plt.hist(cur_data, bins=100)
plt.xlabel('mm/24h')
plt.gca().set_yscale("log")
charlotte_monthly_rainfall['mon'] = charlotte_monthly_rainfall.index.month
charlotte_monthly_rainfall['year'] = charlotte_monthly_rainfall.index.year
charlotte_monthly_rainfall.boxplot(column=['accum_rain'], by='mon', sym='+')
plt.ylabel('mm/month')
charlotte_monthly_rainfall.dropna().boxplot(column=['accum_rain'], by='year', sym='+')
plt.ylabel('mm/month')
plt.gcf().autofmt_xdate()
charlotte_1h_rainfall['hour'] = charlotte_1h_rainfall.index.hour
charlotte_1h_rainfall.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
cur_df = charlotte_1h_rainfall.copy()
cur_df.loc[cur_df.accum_rain<1, 'accum_rain'] = np.nan
cur_df.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
cur_df = charlotte_1h_rainfall.copy()
cur_df.loc[cur_df.accum_rain<3, 'accum_rain'] = np.nan
cur_df.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
pd.options.mode.chained_assignment = None # default='warn'
charlotte_summer_1h_rainfall['hour'] = charlotte_summer_1h_rainfall.index.hour
charlotte_summer_1h_rainfall.boxplot(column=['accum_rain'], by='hour', sym='+')
cur_df = charlotte_summer_1h_rainfall.copy()
cur_df.loc[cur_df.accum_rain<1, 'accum_rain'] = np.nan
cur_df.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
cur_df = charlotte_summer_1h_rainfall.copy()
cur_df.loc[cur_df.accum_rain<3, 'accum_rain'] = np.nan
cur_df.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
charlotte_winter_1h_rainfall['hour'] = charlotte_winter_1h_rainfall.index.hour
charlotte_winter_1h_rainfall.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
cur_df = charlotte_winter_1h_rainfall.copy()
cur_df.loc[cur_df.accum_rain<1, 'accum_rain'] = np.nan
cur_df.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
cur_df = charlotte_winter_1h_rainfall.copy()
cur_df.loc[cur_df.accum_rain<3, 'accum_rain'] = np.nan
cur_df.boxplot(column=['accum_rain'], by='hour', sym='+')
plt.ylabel('mm/h')
charlotte_1h_exceeds = charlotte_1h_rainfall.accum_rain[charlotte_1h_rainfall.accum_rain>10]
print(len(charlotte_1h_exceeds))
y = np.array(charlotte_1h_exceeds)
N = len(y)
x = range(N)
width = 1
plt.bar(x, y, width)
plt.ylabel('mm/h')
charlotte_1h_exceeds_summer = charlotte_summer_1h_rainfall.accum_rain[charlotte_summer_1h_rainfall.accum_rain>10]
y = np.array(charlotte_1h_exceeds_summer)
N = len(y)
x = range(N)
width = 1
plt.bar(x, y, width)
plt.ylabel('mm/h')
print(len(charlotte_1h_exceeds_summer))
plt.plot(charlotte_1h_exceeds)
plt.gcf().autofmt_xdate()
charlotte_1h_exceeds.hist(bins=100)
from scipy.stats import genextreme
x = np.linspace(0, 80, 1000)
y = np.array(charlotte_1h_exceeds[:])
np.seterr(divide='ignore', invalid='ignore')
genextreme.fit(y)
pdf = plt.plot(x, genextreme.pdf(x, *genextreme.fit(y)))
pdf_hist = plt.hist(y, bins=50, normed=True, histtype='stepfilled', alpha=0.8)
genextreme.ppf((1-1/1), *genextreme.fit(y))
genextreme.ppf((1-1/10), *genextreme.fit(y))
genextreme.ppf((1-1/100), *genextreme.fit(y))
from scipy.stats import genpareto
temp_monthly = charlotte_1h_rainfall.groupby(pd.TimeGrouper(freq='M'))
block_max_y = np.array(temp_monthly.accum_rain.max())
print(block_max_y)
print(len(block_max_y))
x = np.linspace(0, 100, 1000)
pdf = plt.plot(x, genextreme.pdf(x, *genextreme.fit(block_max_y)))
pdf_hist = plt.hist(block_max_y, bins=50, normed=True, histtype='stepfilled', alpha=0.8)
genextreme.fit(block_max_y)
genextreme.ppf((1-1/10), *genextreme.fit(block_max_y))
pdf_bm = plt.plot(x, genpareto.pdf(x, *genpareto.fit(y)))
pdf_hist_bm = plt.hist(y, bins=100, normed=True, histtype='stepfilled', alpha=0.8)
genpareto.fit(y)
genpareto.ppf((1-1/10), *genpareto.fit(y))
event_occurences = pd.DataFrame(charlotte_1h_exceeds)
event_occurences['hour'] = event_occurences.index.hour
event_occurences.boxplot(column=['accum_rain'], by='hour', sym='+')
event_occurences.hour.value_counts(sort=False)
# plt.plot(asd.hour.value_counts(sort=False))
cur_hist = plt.hist(event_occurences.hour, bins=24, histtype='stepfilled')
plt.xticks(range(24))
plt.xlabel('hour')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check that the Google BigQuery library is installed and if not, install it.
Step2: Note
Step3: Set environment variables.
Step4: Create a BigQuery Dataset and Google Cloud Storage Bucket
Step5: Train and deploy a logistic regression model
Step6: Export the model
Step7: Local deployment and serving
Step8: Create a version subdirectory
Step9: Pull the docker image
Step10: Run the Docker container
Step11: Run the prediction
Step12: Online deployment and serving
Step13: Create a model version
Step14: Create the version
Step15: This step might take a few minutes to complete. You should see the message Creating version (this might take a few minutes).......
Step16: Online prediction
Step17: Setup env variables for predict
Step18: Run predict
Step19: Train and deploy a Boosted Tree classifier model
Step20: Export the model
Step21: Local deployment and serving
Step22: Extract predictor.py
Step23: Install XGBoost library
Step24: Online deployment and serving
Step25: Create a model version
Step26: Create the version
Step27: This step might take a few minutes to complete. You should see the message Creating version (this might take a few minutes).......
Step28: Online prediction
Step29: Setup env variables for predict
Step30: Run predict
Step31: Train and deploy an AutoML classifier model
Step32: Export the model
Step33: Local deployment and serving
Step34: Pull AutoML Docker image
Step35: Start Docker container
Step36: Run the prediction
Step37: Make the predict call
|
<ASSISTANT_TASK:>
Python Code:
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
!pip install --user google-cloud-bigquery==1.25.0
import os
from google.cloud import bigquery
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
# TODO: Change environment variables
PROJECT = "cloud-training-demos" # REPLACE WITH YOUR PROJECT NAME
BUCKET = "BUCKET" # REPLACE WITH YOUR BUCKET NAME, DEFAULT BUCKET WILL BE PROJECT ID
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
# Do not change these
os.environ["BUCKET"] = PROJECT if BUCKET == "BUCKET" else BUCKET # DEFAULT BUCKET WILL BE PROJECT ID
os.environ["REGION"] = REGION
if PROJECT == "cloud-training-demos":
print("Don't forget to update your PROJECT name! Currently:", PROJECT)
%%bash
## Create a BigQuery dataset bqml_tutorial
bq --location=US mk --dataset \
--description "bqml_tutorial" \
$PROJECT:bqml_tutorial
echo "Here are your current datasets:"
bq ls
%%bash
bq query --use_legacy_sql=false \
'CREATE MODEL `bqml_tutorial.iris_model`
OPTIONS (model_type="logistic_reg",
max_iterations=10, input_label_cols=["species"])
AS SELECT
*
FROM
`bigquery-public-data.ml_datasets.iris`;'
%%bash
bq extract -m bqml_tutorial.iris_model gs://$BUCKET/iris_model
%%bash
mkdir tmp_dir
gsutil cp -r gs://$BUCKET/iris_model tmp_dir
%%bash
mkdir -p serving_dir/iris_model/1
cp -r tmp_dir/iris_model/* serving_dir/iris_model/1
rm -r tmp_dir
%%bash
docker pull tensorflow/serving
%%bash
docker run -p 8500:8500 --network="host" --mount type=bind,source=`pwd`/serving_dir/iris_model,target=/models/iris_model -e MODEL_NAME=iris_model -t tensorflow/serving &
%%bash
curl -d '{"instances": [{"sepal_length":5.0, "sepal_width":2.0, "petal_length":3.5, "petal_width":1.0}]}' -X POST http://localhost:8501/v1/models/iris_model:predict
MODEL_NAME="IRIS_MODEL"
gcloud ai-platform models create $MODEL_NAME
# Replace the BUCKET_NAME with your bucket name.
MODEL_DIR="gs://<BUCKET_NAME>/iris_model"
VERSION_NAME="v1"
FRAMEWORK="TENSORFLOW"
gcloud ai-platform versions create $VERSION_NAME --model=$MODEL_NAME --origin=$MODEL_DIR --runtime-version=2.1 --framework=$FRAMEWORK
gcloud ai-platform versions describe $VERSION_NAME --model $MODEL_NAME
{"sepal_length":5.0, "sepal_width":2.0, "petal_length":3.5, "petal_width":1.0}
{"sepal_length":5.3, "sepal_width":3.7, "petal_length":1.5, "petal_width":0.2}
INPUT_DATA_FILE="instances.json"
gcloud ai-platform predict --model $MODEL_NAME --version $VERSION_NAME --json-instances $INPUT_DATA_FILE
%%bash
bq query --use_legacy_sql=false \
'CREATE MODEL `bqml_tutorial.boosted_tree_iris_model`
OPTIONS (model_type="boosted_tree_classifier",
max_iterations=10, input_label_cols=["species"])
AS SELECT
*
FROM
`bigquery-public-data.ml_datasets.iris`;'
%%bash
bq extract --destination_format ML_XGBOOST_BOOSTER -m bqml_tutorial.boosted_tree_iris_model gs://$BUCKET/boosted_tree_iris_model
%%bash
mkdir serving_dir
gsutil cp -r gs://$BUCKET/boosted_tree_iris_model serving_dir
%%bash
tar -xvf serving_dir/boosted_tree_iris_model/xgboost_predictor-0.1.tar.gz -C serving_dir/boosted_tree_iris_model/
%%bash
pip3 install xgboost
%%bash
cd serving_dir/boosted_tree_iris_model/
python main.py '[{"sepal_length":5.0, "sepal_width":2.0, "petal_length":3.5, "petal_width":1.0}]'
MODEL_NAME="BOOSTED_TREE_IRIS_MODEL"
gcloud ai-platform models create $MODEL_NAME
# Replace the BUCKET_NAME with your bucket name.
MODEL_DIR="gs://<BUCKET_NAME>/boosted_tree_iris_model"
VERSION_NAME="v1"
gcloud beta ai-platform versions create $VERSION_NAME --model=$MODEL_NAME --origin=$MODEL_DIR --package-uris=${MODEL_DIR}/xgboost_predictor-0.1.tar.gz --prediction-class=predictor.Predictor --runtime-version=2.1
gcloud ai-platform versions describe $VERSION_NAME --model $MODEL_NAME
{"sepal_length":5.0, "sepal_width":2.0, "petal_length":3.5, "petal_width":1.0}
{"sepal_length":5.3, "sepal_width":3.7, "petal_length":1.5, "petal_width":0.2}
INPUT_DATA_FILE="instances.json"
gcloud ai-platform predict --model $MODEL_NAME --version $VERSION_NAME --json-instances $INPUT_DATA_FILE
%%bash
bq query --use_legacy_sql=false \
'CREATE MODEL `bqml_tutorial.automl_iris_model`
OPTIONS (model_type="automl_classifier",
budget_hours=1, input_label_cols=["species"])
AS SELECT
* EXCEPT(multiplier)
FROM
`bigquery-public-data.ml_datasets.iris`, unnest(GENERATE_ARRAY(1, 10)) as multiplier;'
%%bash
bq extract -m bqml_tutorial.automl_iris_model gs://$BUCKET/automl_iris_model
%%bash
mkdir automl_serving_dir
gsutil cp -r gs://$BUCKET/automl_iris_model/* automl_serving_dir/
%%bash
docker pull gcr.io/cloud-automl-tables-public/model_server
%%bash
docker run -v `pwd`/automl_serving_dir:/models/default/0000001 -p 8080:8080 -it gcr.io/cloud-automl-tables-public/model_server
{"instances": [{"sepal_length":5.0, "sepal_width":2.0, "petal_length":3.5, "petal_width":1.0},
{"sepal_length":5.3, "sepal_width":3.7, "petal_length":1.5, "petal_width":0.2}]}
%%bash
curl -X POST --data @input.json http://localhost:8080/predict
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Then we import cartopy itself
Step2: In addition, we import cartopy's coordinate reference system submodule
Step3: A few other modules and functions which we will use later to add cool stuff to our plots. Also updating font sizes for improved readability
Step4: Note on bathymetry data
Step5: We're just interested in bathy here, so set any height values greater than 0 to to 0 and set contour levels to plot later
Step6: Here we load some scatter data from a two column csv for plotting later
Step7: Now to make the map itself. First we define our coordinate system. Here we are using a Plate Carrée projection, which is one of equidistant cylindrical projections.
Step8: Now we contour the bathymetry data
Step9: A good start. To make it more map like we add gridlines, formatted labels and a colorbar
Step10: Now to add a few more features. First coastlines from cartopy's natural features toolbox. Then scatters of the samples we imported earlier
Step11: To finish off the map we add a legend for the scatter plot, an inset map showing the area at a larger scale and some text identifying the islands
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import xarray as xr
from pathlib import Path
import cartopy
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
plt.rcParams.update({"font.size": 20})
SMALL_SIZE = 22
MEDIUM_SIZE = 22
LARGE_SIZE = 26
plt.rc("font", size=SMALL_SIZE)
plt.rc("xtick", labelsize=SMALL_SIZE)
plt.rc("ytick", labelsize=SMALL_SIZE)
plt.rc("axes", titlesize=SMALL_SIZE)
plt.rc("legend", fontsize=SMALL_SIZE)
# Open prepared bathymetry dataset using pathlib to sepcify the relative path
bathy_file_path = Path('../data/bathy.nc')
bathy_ds = xr.open_dataset(bathy_file_path)
bathy_lon, bathy_lat, bathy_h = bathy_ds.bathymetry.longitude, bathy_ds.bathymetry.latitude, bathy_ds.bathymetry.values
bathy_h[bathy_h > 0] = 0
bathy_conts = np.arange(-9000, 500, 500)
# Load some scatter data of smaple locations near South Georgia
data = pd.read_csv("../data/scatter_coords.csv")
lons = data.Longitude.values
lats = data.Latitude.values
# Subset of sampling locations
sample_lon = lons[[0, 2, 7]]
sample_lat = lats[[0, 2, 7]]
coord = ccrs.PlateCarree()
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection=coord)
ax.set_extent([-42, -23, -60, -50], crs=coord);
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection=coord)
ax.set_extent([-42, -23, -60, -50], crs=coord)
bathy = ax.contourf(bathy_lon, bathy_lat, bathy_h, bathy_conts, transform=coord, cmap="Blues_r")
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection=coord)
ax.set_extent([-42, -23, -60, -50], crs=coord)
bathy = ax.contourf(bathy_lon, bathy_lat, bathy_h, bathy_conts, transform=coord, cmap="Blues_r")
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color="k", alpha=0.5, linestyle="--")
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.ylines = True
gl.xlines = True
fig.colorbar(bathy, ax=ax, orientation="horizontal", label="Bathymetry (m)", shrink=0.7, pad=0.08, aspect=40);
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection=coord)
ax.set_extent([-42, -23, -60, -50], crs=coord)
bathy = ax.contourf(bathy_lon, bathy_lat, bathy_h, bathy_conts, transform=coord, cmap="Blues_r")
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color="k", alpha=0.5, linestyle="--")
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.ylines = True
gl.xlines = True
fig.colorbar(bathy, ax=ax, orientation="horizontal", label="Bathymetry (m)", shrink=0.7, pad=0.08, aspect=40)
feature = cartopy.feature.NaturalEarthFeature(
name="coastline", category="physical", scale="50m", edgecolor="0.5", facecolor="0.8"
)
ax.add_feature(feature)
ax.scatter(lons, lats, zorder=5, color="red", label="Samples collected")
ax.scatter(sample_lon, sample_lat, zorder=10, color="k", marker="D", s=50, label="Samples sequenced");
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(111, projection=coord)
ax.set_extent([-42, -23, -60, -50], crs=coord)
bathy = ax.contourf(bathy_lon, bathy_lat, bathy_h, bathy_conts, transform=coord, cmap="Blues_r")
gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color="k", alpha=0.5, linestyle="--")
gl.xlabels_top = False
gl.ylabels_right = False
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.ylines = True
gl.xlines = True
fig.colorbar(bathy, ax=ax, orientation="horizontal", label="Bathymetry (m)", shrink=0.7, pad=0.08, aspect=40)
ax.add_feature(feature)
ax.scatter(lons, lats, zorder=5, color="red", label="Samples collected")
ax.scatter(sample_lon, sample_lat, zorder=10, color="k", marker="D", s=50, label="Samples sequenced")
fig.legend(bbox_to_anchor=(0.12, 0.2), loc="lower left")
tr2 = ccrs.Stereographic(central_latitude=-55, central_longitude=-35)
sub_ax = plt.axes(
[0.63, 0.65, 0.2, 0.2], projection=ccrs.Stereographic(central_latitude=-55, central_longitude=-35)
)
sub_ax.set_extent([-70, -15, -75, 10])
x_co = [-42, -42, -23, -23, -42]
y_co = [-60, -50, -50, -60, -60]
sub_ax.add_feature(feature)
sub_ax.plot(x_co, y_co, transform=coord, zorder=10, color="red")
ax.text(-38.5, -54.9, "South\nGeorgia", fontsize=14)
ax.text(-26.8, -58.2, "South\nSandwich\nIslands", fontsize=14);
HTML(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 7
Step2: Task 9 Z-criterion
Step3: Fischer
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import statsmodels
import scipy as sc
from statsmodels.stats.weightstats import *
%pylab inline
data = pd.read_csv('churn_analysis.csv', index_col=0)
data.head()
data['churn'].value_counts()
data1 = data[data['treatment'] == 1]
data['state'].describe()
pt = data.loc[data.treatment == 1, :].pivot_table(index="state",
values="area_code", columns="churn", aggfunc = len, fill_value=0)
pt
pt.loc[["CT", "DC"], :]
pt.iloc[[1, 2], :]
chi2, p, dof, expected = sc.stats.chi2_contingency(pt, correction=False)
p
n = 0
total = 0
p_values1 = np.zeros(1275)
k = 0
for i in xrange(pt.shape[0] - 1):
for j in xrange(i + 1, pt.shape[0]):
subtable = pt.iloc[[i, j], :]
total += 1
chi2, p, dof, expected = sc.stats.chi2_contingency(subtable, correction=False)
p_values1[k] = p
k += 1
if p < 0.05:
n += 1
print n
n = 0
total = 0
p_values2 = np.zeros(1275)
k = 0
for i in xrange(pt.shape[0] - 1):
for j in xrange(i + 1, pt.shape[0]):
subtable = pt.iloc[[i, j], :]
total += 1
chi2, p, dof, expected = sc.stats.chi2_contingency(subtable, correction=True)
p_values2[k] = p
k += 1
if p < 0.05:
n += 1
print n
p_values1.mean(), p_values2.mean()
n = 0
total = 0
p_values3 = np.zeros(1275)
k = 0
for i in xrange(pt.shape[0] - 1):
for j in xrange(i + 1, pt.shape[0]):
subtable = pt.iloc[[i, j], :]
total += 1
odds, p = sc.stats.fisher_exact(subtable)
p_values3[k] = p
k += 1
if p < 0.05:
n += 1
print n
p_values1.mean(), p_values2.mean(), p_values3.mean()
data.corr()['day_calls']['mes_estim']
sc.stats.pearsonr(data['day_calls'], data['mes_estim'])
sc.stats.spearmanr(data['day_calls'], data['mes_estim'])
data0 = data[data['treatment'] == 0]['churn']
data1 = data[data['treatment'] == 1]['churn']
data2 = data[data['treatment'] == 2]['churn']
data1.head()
pylab.figure(figsize=(12,8))
pylab.subplot(2,2,1)
stats.probplot(data['day_calls'], dist="norm", plot=pylab)
pylab.subplot(2,2,2)
stats.probplot(data['mes_estim'], dist="norm", plot=pylab)
pylab.show()
d0 = np.where(data0[:] == 'False.', 0, 1) # method1
d1 = np.where(data1[:] == 'False.', 0, 1) # control
d2 = np.where(data2[:] == 'False.', 0, 1) # method2
d0.sum(), float(d0.sum())/len(d0)
d1.sum(), float(d1.sum())/len(d1)
d2.sum(), float(d2.sum())/len(d2)
def proportions_diff_confint_ind(sample1, sample2, alpha = 0.05):
z = sc.stats.norm.ppf(1 - alpha / 2.)
p1 = float(sum(sample1)) / len(sample1)
p2 = float(sum(sample2)) / len(sample2)
left_boundary = (p1 - p2) - z * np.sqrt(p1 * (1 - p1)/ len(sample1) + p2 * (1 - p2)/ len(sample2))
right_boundary = (p1 - p2) + z * np.sqrt(p1 * (1 - p1)/ len(sample1) + p2 * (1 - p2)/ len(sample2))
return (left_boundary, right_boundary)
def proportions_diff_z_stat_ind(sample1, sample2):
n1 = len(sample1)
n2 = len(sample2)
p1 = float(sum(sample1)) / n1
p2 = float(sum(sample2)) / n2
P = float(p1*n1 + p2*n2) / (n1 + n2)
return (p1 - p2) / np.sqrt(P * (1 - P) * (1. / n1 + 1. / n2))
def proportions_diff_z_test(z_stat, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'two-sided':
return 2 * (1 - sc.stats.norm.cdf(np.abs(z_stat)))
if alternative == 'less':
return sc.stats.norm.cdf(z_stat)
if alternative == 'greater':
return 1 - sc.stats.norm.cdf(z_stat)
print "95%% confidence interval for a difference between proportions: [%f, %f]" %\
proportions_diff_confint_ind(d0, d1, alpha=0.025)
print "p-value: %f" % proportions_diff_z_test(proportions_diff_z_stat_ind(d0, d1))
print "95%% confidence interval for a difference between proportions: [%f, %f]" %\
proportions_diff_confint_ind(d2, d1, alpha=0.025)
print "p-value: %f" % proportions_diff_z_test(proportions_diff_z_stat_ind(d2, d1))
print "95%% confidence interval for a difference between proportions: [%f, %f]" %\
proportions_diff_confint_ind(d0, d2, alpha=0.025)
print "p-value: %f" % proportions_diff_z_test(proportions_diff_z_stat_ind(d0, d2))
pt2 = data.pivot_table(index="treatment",
values="area_code", columns="churn", aggfunc = len, fill_value=0)
pt2
p_values = np.zeros(3)
for i in xrange(pt2.shape[0] - 1):
for j in xrange(i + 1, pt2.shape[0]):
subtable = pt2.iloc[[i, j], :]
odds, p = sc.stats.fisher_exact(subtable)
print subtable
print p
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Params
Step2: Theoretical values
Step3: Emperical mean ($\mu$) and standard deviation ($\sigma$) estimates
Step4: Plotting mean estimates
Step5: Plotting standard deviation estimates
Step6: Across different number of samples
Step7: Plotting mean estimates
Step8: Plotting standard deviation estimates
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import lhsmdu
import matplotlib.pyplot as plt
def simpleaxis(axes, every=False):
if not isinstance(axes, (list, np.ndarray)):
axes = [axes]
for ax in axes:
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if every:
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.set_title('')
seed = 1
np.random.seed(seed)
lhsmdu.setRandomSeed(seed)
numDimensions = 2
numSamples = 100
numIterations = 100
theoretical_mean = 0.5
theoretical_std = np.sqrt(1./12)
theoretical_skew = 0.
mc_Mean, lhs_Mean = [], []
mc_Std, lhs_Std = [], []
for iterate in range(numIterations):
a = np.random.random((numDimensions,numSamples))
b = lhsmdu.sample(numDimensions,numSamples)
mc_Mean.append(np.mean(a))
lhs_Mean.append(np.mean(b))
mc_Std.append(np.std(a))
lhs_Std.append(np.std(b))
fig, ax = plt.subplots()
ax.plot(range(numIterations), mc_Mean, 'ko', label='numpy')
ax.plot(range(numIterations), lhs_Mean, 'o', c='orange', label='lhsmdu')
ax.hlines(xmin=0, xmax=numIterations, y=theoretical_mean, linestyles='--', label='theoretical value', zorder=3)
ax.set_xlabel("Iteration #")
ax.set_ylabel("$\mu$")
ax.legend(frameon=False)
simpleaxis(ax)
plt.show()
fig, ax = plt.subplots()
ax.plot(range(numIterations), mc_Std, 'ko', label='numpy')
ax.plot(range(numIterations), lhs_Std, 'o', c='orange', label='lhsmdu')
ax.hlines(xmin=0, xmax=numIterations, y=theoretical_std, linestyles='--', label='theoretical value', zorder=3)
ax.set_xlabel("Iteration #")
ax.set_ylabel("$\sigma$")
ax.legend(frameon=False)
simpleaxis(ax)
plt.show()
mc_Std, lhs_Std = [], []
mc_Mean, lhs_Mean = [], []
numSamples = range(1,numIterations)
for iterate in numSamples:
a = np.random.random((numDimensions,iterate))
b = lhsmdu.sample(numDimensions,iterate)
mc_Mean.append(np.mean(a))
lhs_Mean.append(np.mean(b))
mc_Std.append(np.std(a))
lhs_Std.append(np.std(b))
fig, ax = plt.subplots()
ax.plot(numSamples, mc_Mean, 'ko', label='numpy')
ax.plot(numSamples, lhs_Mean, 'o', c='orange', label='lhsmdu')
ax.hlines(xmin=0, xmax=numIterations, y=theoretical_mean, linestyles='--', label='theoretical value', zorder=3)
ax.set_xlabel("Number of Samples")
ax.set_ylabel("$\mu$")
ax.legend(frameon=False)
simpleaxis(ax)
plt.show()
fig, ax = plt.subplots()
ax.plot(numSamples, mc_Std, 'ko', label='numpy')
ax.plot(numSamples, lhs_Std, 'o', c='orange', label='lhsmdu')
ax.hlines(xmin=0, xmax=numIterations, y=theoretical_std, linestyles='--', label='theoretical value', zorder=3)
ax.set_xlabel("Number of Samples")
ax.set_ylabel("$\sigma$")
ax.legend(frameon=False)
simpleaxis(ax)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Required Widgets
Step2: We also need a way to define the crop percentage. This can be done with a simple slider that selects float numbers and is bounded between 0.0 and 1.0. Thus
Step3: Moreover, we can also create a TextPrintWidget for printing some info regarding the cropped image. For example
Step4: 3. Widgets Grouping and Styling
Step5: Now we can apply some styling on the final widget
Step6: 4. Define Render Function
Step7: Let's check that it works properly
Step8: 5. Final Widget Function
Step9: That's it! Let's see what we designed!
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import menpo.io as mio
path_to_lfpw = '/vol/atlas/databases/lfpw/'
images = []
for i in mio.import_images(path_to_lfpw + 'testset/*', max_images=20, verbose=True):
# convert it to greyscale if needed
if i.n_channels == 3:
i = i.as_greyscale(mode='luminosity')
# append it to the list
images.append(i)
from menpowidgets.options import AnimationOptionsWidget
index = {'min': 0, 'max': len(images) - 1, 'step': 1, 'index': 0}
anim_wid = AnimationOptionsWidget(index, description='Image')
anim_wid
from ipywidgets import FloatSlider
prop_wid = FloatSlider(value=1., min=0., max=1., step=0.1,
description='Proportion:', width='4.5cm')
prop_wid
from menpowidgets.options import TextPrintWidget
text_per_line = ['> Original size: ', '> Final size: ']
txt_wid = TextPrintWidget(text_per_line)
txt_wid
from ipywidgets import FlexBox, HBox
# Create an initial box for the proportion slider and the print info widget
opts_box = HBox(children=[prop_wid, txt_wid], align='center')
# Create the final widget's box
wid = FlexBox(children=[anim_wid, opts_box])
wid
from menpowidgets.style import map_styles_to_hex_colours
# Select some styles
final_widget_style = 'info'
print_widget_style = 'warning'
animation_widget_style = 'danger'
# Apply the selected final widget style
wid.box_style = final_widget_style
wid.border_width = 1
wid.border_color = map_styles_to_hex_colours(final_widget_style)
wid.border_radius = 10
prop_wid.slider_color = map_styles_to_hex_colours(final_widget_style)
prop_wid.background_color = map_styles_to_hex_colours(final_widget_style)
# Apply the selected print text widget style
txt_wid.predefined_style(style=print_widget_style)
# Apply the selected animation options widget style
anim_wid.predefined_style(style=animation_widget_style)
anim_wid.border_radius = 10
anim_wid.border_width = 1
anim_wid.border_color = map_styles_to_hex_colours(animation_widget_style)
from IPython.display import clear_output
from matplotlib.pyplot import show as pltshow
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
clear_output(wait=True)
# Get selected image index
i = anim_wid.selected_values
# Get selected crop proportion
prop = prop_wid.value
# Get a copy of the original image
im_copy = images[i].copy()
# Crop the copied image
im_copy = im_copy.crop_to_landmarks_proportion(prop)
# Visualize it
im_copy.view_landmarks();
# Update the printed info
text_per_line = ['> Original size: {}x{}'.format(images[i].height, images[i].width),
'> Final size: {}x{}'.format(im_copy.height, im_copy.width)]
txt_wid.set_widget_state(text_per_line=text_per_line)
# Make sure that image gets rendered
pltshow()
render_function({})
from IPython.display import display
def crop_images_widget(images, style='coloured'):
# Create widgets
index = {'min': 0, 'max': len(images) - 1, 'step': 1, 'index': 0}
anim_wid = AnimationOptionsWidget(index, description='Image')
prop_wid = FloatSlider(value=1., min=0., max=1., step=0.1, continuous_update=False,
description='Proportion:', width='4.5cm')
text_per_line = ['> Original size: ', '> Final size: ']
txt_wid = TextPrintWidget(text_per_line=text_per_line)
# Group widgets
opts_box = HBox(children=[prop_wid, txt_wid], align='center')
wid = FlexBox(children=[anim_wid, opts_box])
# Styling
final_widget_style = ''
border_width = 0
print_widget_style = 'minimal'
animation_widget_style = 'minimal'
if style == 'coloured':
final_widget_style = 'info'
print_widget_style = 'warning'
animation_widget_style = 'danger'
border_width = 1
wid.box_style = final_widget_style
wid.border_width = border_width
wid.border_color = map_styles_to_hex_colours(final_widget_style)
wid.border_radius = 10
prop_wid.slider_color = map_styles_to_hex_colours(final_widget_style)
prop_wid.background_color = map_styles_to_hex_colours(final_widget_style)
txt_wid.predefined_style(style=print_widget_style)
anim_wid.predefined_style(style=animation_widget_style)
anim_wid.border_radius = 10
anim_wid.border_width = border_width
anim_wid.border_color = map_styles_to_hex_colours(animation_widget_style)
# Define render function
def render_function(change):
clear_output(wait=True)
i = anim_wid.selected_values
prop = prop_wid.value
im_copy = images[i].copy()
im_copy = im_copy.crop_to_landmarks_proportion(prop)
im_copy.view_landmarks();
pltshow()
text_per_line = ['> Original size: {}x{}'.format(images[i].height, images[i].width),
'> Final size: {}x{}'.format(im_copy.height, im_copy.width)]
txt_wid.set_widget_state(text_per_line=text_per_line)
# Add render_function to the animation widget
anim_wid.add_render_function(render_function)
# Assign render_function to the proportion slider's value trait
prop_wid.observe(render_function, names='value', type='change')
# Display the widget
display(wid)
# Trigger its initial rendering
render_function({})
crop_images_widget(images, style='coloured')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 클래스를 통한 모델 구성
Step2: II. 내장된 trainer를 사용하지 않고 학습하기
Step3: 클래스를 통한 모델 구성
|
<ASSISTANT_TASK:>
Python Code:
import chainer
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
predictor = chainer.links.Linear(1,1)
model = chainer.links.Classifier(predictor, lossfun=chainer.functions.mean_squared_error,
accfun=chainer.functions.mean_squared_error)
#model = chainer.links.Linear(1,1)
Optimizer = chainer.optimizers.SGD()
Optimizer.setup(model)
train = list(zip(x[:2,:1], y[:2,:1]))
Train_iter = chainer.iterators.SerialIterator(train, 2)
Updater = chainer.training.updaters.StandardUpdater(Train_iter, Optimizer)
Trainer = chainer.training.Trainer(Updater, (1000, 'epoch'))
Trainer.run()
predictor(x)
import chainer
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.layer = chainer.links.Linear(1,1)
def predict(self, x):
return self.layer(x)
def forward(self, x, t):
return chainer.functions.mean_squared_error(self.predict(x), t)
model = Model()
#model = chainer.links.Linear(1,1)
Optimizer = chainer.optimizers.SGD()
Optimizer.setup(model)
train = list(zip(x[:2,:1], y[:2,:1]))
Train_iter = chainer.iterators.SerialIterator(train, 2)
Updater = chainer.training.updaters.StandardUpdater(Train_iter, Optimizer)
Trainer = chainer.training.Trainer(Updater, (1000, 'epoch'))
Trainer.run()
model.predict(x)
import chainer
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
model = chainer.links.Linear(1,1)
optimizer = chainer.optimizers.SGD()
optimizer.setup(model)
for _ in range(1000):
output = model(x)
loss = chainer.functions.mean_squared_error(y, output)
model.zerograds()
loss.backward()
optimizer.update()
model(x)
import chainer
import numpy as np
x = np.array([0, 1, 2, 3, 4]).astype('float32').reshape(-1,1)
y = x * 2 + 1
class Model(chainer.Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.layer = chainer.links.Linear(1,1)
def forward(self, x):
return self.layer(x)
model = Model()
#model = chainer.links.Linear(1,1)
optimizer = chainer.optimizers.SGD()
optimizer.setup(model)
for _ in range(1000):
output = model(x)
loss = chainer.functions.mean_squared_error(y, output)
model.zerograds()
loss.backward()
optimizer.update()
model(x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Carregando os dados da base REDD via NILMTK
Step2: Pré-processamento dos dados
Step3: Chunking Energy Consumption in time-box (1 box = 5 minutes)
Step4: Feature Extraction / Label Building
Step5: Modelagem
Step6: Outras análises
Step7: MLP
Step8: LSTM
|
<ASSISTANT_TASK:>
Python Code:
import warnings
#warnings.filterwarnings("warning")
import traceback
import time
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
from matplotlib import rcParams
rcParams['figure.figsize'] = (13, 10)
import pandas as pd
from tqdm import tqdm, tqdm_notebook
#tf.debugging.set_log_device_placement(True)
#print("GPU Available: ", tf.test.is_gpu_available())
from nilmtk import DataSet
from nilmtk.utils import print_dict
redd = DataSet('datasets/REDD/low_freq.h5')
# Configurações da amostragem
from datetime import datetime, timedelta
# Informações do CS446 Project : Electric Load Identification using Machine Learning (REDD)
building_idx = 3
set_sampling_rate = 3
start = datetime(2011, 4, 16, 5, 11, 27)
end = datetime(2011, 5, 31, 0, 19, 54)
time_interval_minutes = 5 # Split de amostra
# ... 6 seconds - Imaging Time Series (UK-DALE)
building_idx = 1
building = redd.buildings[building_idx]
# Available devices in building
building.elec
# Defining the fixed time block measurement
redd.set_window(start='2011-04-18', end='2011-04-20')
# Showing device consumption (inside time block)
num_apps = 20
fig, axes = plt.subplots((num_apps+1)//2,2, figsize=(24, num_apps*2) )
for i in range(num_apps):
e = redd.buildings[1].elec[i+1]
axes.flat[i].plot(e.power_series_all_data(sample_period=3), alpha = 0.6)
axes.flat[i].set_title(e.label(), fontsize = '15')
plt.suptitle('', fontsize = '30')
fig.tight_layout()
fig.subplots_adjust(top=0.95)
# Intervalos de geracao dos dados
def datetime_range(start, end, delta):
'''
Generating a list of datetime intervals (chunks of energy consumption)
from `start` to `end` at each `delta` units.
'''
current = start
while current < end:
yield current
current += delta
# List of datatimes
dts = [dt.strftime('%Y-%m-%d %H:%M:%S')
for dt in
datetime_range(start,
end,
timedelta(minutes=time_interval_minutes))
]
# Checking chunks list...
for idx in range(1, len(dts)):
print('de', dts[idx-1], ' a ', dts[idx])
power = building.elec[1].power_series_all_data(sample_period=set_sampling_rate)
mains1 = pd.DataFrame(data = {"Power": power.values }, index=power.index)
print('Mains 1 orginal shape: ', mains1.shape)
power = building.elec[2].power_series_all_data(sample_period=set_sampling_rate)
mains2 = pd.DataFrame(data = {"Power": power.values }, index=power.index)
print('Mains 2 orginal shape: ', mains2.shape)
power = building.elec[5].power_series_all_data(sample_period=set_sampling_rate)
appliance = pd.DataFrame(data = {"Power": power.values }, index=power.index)
print('Appliance shape: ', appliance.shape)
tStart
len(appliance.index)
# Conjunto de dataframes (chunks de 5 minutos)
dataframes = []
# Iterando sobre blocos de tempos (5 minutos)
for idx in tqdm_notebook(range(1, len(dts))):
tStart = dts[idx-1]
tEnd = dts[idx]
# Intervalo de treino do modelo
redd.set_window(start=tStart, end=tEnd)
try:
print('- Chunk #',idx,': from ', tStart, 'to', tEnd)
dfs = {}
_index = []
for m in building.elec.all_meters():
label = str(m.label()).lower().replace(' ','_') + '_' + str(m.instance())
power = m.power_series_all_data(sample_period=set_sampling_rate)
dfs[label] = pd.DataFrame(data = {"Power": power.values }, index=power.index)
if len(_index) < len(power.index) and ('site_meter' not in label):
_index = power.index
for meter_label in dfs:
#if 'site_meter' in meter_label:
# dfs[meter_label] = dfs[meter_label].reindex(index=_index)
dfs[meter_label] = dfs[meter_label].reindex(index=_index)
dfs[meter_label] = dfs[meter_label]['Power'].values
df = pd.DataFrame(dfs, index = _index)
# power = building.elec[1].power_series_all_data(sample_period=set_sampling_rate)
# mains1 = pd.DataFrame(data = {"Power": power.values }, index=power.index)
# print('Mains 1 orginal shape: ', mains1.shape)
# power = building.elec[2].power_series_all_data(sample_period=set_sampling_rate)
# mains2 = pd.DataFrame(data = {"Power": power.values }, index=power.index)
# print('Mains 2 orginal shape: ', mains2.shape)
# power = building.elec[5].power_series_all_data(sample_period=set_sampling_rate)
# appliance = pd.DataFrame(data = {"Power": power.values }, index=power.index)
# print('Appliance shape: ', appliance.shape)
# # Ajustar timeframes (eletronicos medidos em 3s, contra 1s da rede)
# mains1 = mains1.reindex(index=appliance.index)
# print('---\nMains 1 new shape: ', mains1.shape)
# mains2 = mains2.reindex(index=appliance.index)
# print('Mains 2 new shape: ', mains2.shape)
# # Dataframe da modelagem
# df = pd.DataFrame({
# 'Mains1': mains1["Power"].values,
# 'Mains2': mains2["Power"].values,
# 'Appliance': appliance["Power"].values
# }, index = appliance.index)
print('\n---\nDataframe shape: ', df.shape,'\n')
dataframes.append(df)
except Exception as e:
print(' ----- Error: ', str(e))#str(traceback.format_exc()))
#print(' ----- Não foi possível extrair dados do intervalo!')
print('Total Chunks:', len(dataframes))
# Check if the chunk has the valid length
valid_chunk_length = (time_interval_minutes*60)/set_sampling_rate
valid_chunks = [d for d in dataframes if d.shape[0] == valid_chunk_length]
print('Valid Chunks:', len(valid_chunks) )
# Plotting 5 chunks
for df in tqdm(dataframes):
#df = valid_chunks[i]
# if sum(df['Appliance'].values) == 0:
# fig = plt.figure(figsize=(10,8))
# plt.plot(df['Mains1'].values)
# plt.plot(df['Mains2'].values)
# plt.plot(df['Appliance'].values)
# plt.gca().legend(('Mains1','Mains2', 'Appliance'))
fig = plt.figure(figsize=(20,10))
for column in df.columns:
plt.plot(df[column].values)
plt.gca().legend(df.columns)
break
final_df = pd.DataFrame()
rows = []
classes = [c for c in dataframes[0].columns if 'site_meter' not in c]
for df in dataframes:
attributes = {
'mean_1': df['site_meter_1'].mean(),
'std_1': df['site_meter_1'].std(),
'max_1': df['site_meter_1'].max(),
'min_1': df['site_meter_1'].min(),
'sum_1': df['site_meter_1'].sum(),
'mean_2': df['site_meter_2'].mean(),
'std_2': df['site_meter_2'].std(),
'max_2': df['site_meter_2'].max(),
'min_2': df['site_meter_2'].min(),
'sum_2': df['site_meter_2'].sum()
}
labels = {}
for c in classes:
labels[c] = 1 if df[c].sum() > 0 else 0
final_df = final_df.append({**attributes, **labels}, ignore_index=True)
final_df = final_df[ list(attributes.keys()) + list(labels.keys()) ]
final_df.head(10)
final_df.describe()
final_df.to_csv('df_building_1_statistics_features.csv')
# TODO:
#- Validar metodologia
#- Validar chunks gerados (noralizar erros)
#- Rotular base (labels binários por dispositivo)
final_df[final_df.columns[:10]].head()
from skmultilearn.adapt import MLkNN
from sklearn import metrics
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
scores = cross_val_score(
MLkNN(k=3),
final_df[final_df.columns[:5]].values,
final_df[final_df.columns[10:]].values,
scoring = 'f1_micro',
cv=5,
n_jobs = 8
)
scores.mean()
from sklearn.metrics import make_scorer, hamming_loss
hamming_score = make_scorer(hamming_loss)
scores = cross_val_score(
MLkNN(k=3),
final_df[final_df.columns[:5]].values,
final_df[final_df.columns[10:]].values,
scoring = hamming_score,
cv=5,
n_jobs = 8
)
scores.mean()
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import train_test_split, cross_val_score
scores = cross_val_score(
RandomForestClassifier(n_estimators=1000),
final_df[final_df.columns[:5]].values,
final_df[final_df.columns[10:]].values,
scoring = hamming_score,
cv=5,
n_jobs = 8
)
scores.mean()
#dates = [str(dt).split(' ')[0] for dt in df.index]
dates = [str(time)[:10] for time in df.index.values]
dates = sorted(list(set(dates)))
print('Os dados da Residência modelada contém medições de {1} dia(s) (de {2} a {3}).'.format(i,len(dates),dates[0], dates[-1]))
# Split de treino, teste e validação
df1_train = df.loc[:dates[10]]
df1_val = df.loc[dates[11]:dates[16]]
df1_test = df.loc[dates[17]:]
print('df_train.shape: ', df1_train.shape)
print('df_val.shape: ', df1_val.shape)
print('df_test.shape: ', df1_test.shape)
# Usando a corrente 1 e 2 (variaveis independetes) para a previsão do refrigerador (variavel dependente)
X_train = df1_train[['Mains1','Mains2']].values
y_train = df1_train['Appliance'].values
X_test = df1_test[['Mains1','Mains2']].values
y_test = df1_test['Appliance'].values
X_val = df1_val[['Mains1','Mains2']].values
y_val = df1_val['Appliance'].values
print(
'Train: ', X_train.shape, y_train.shape, '\n',
'Test: ', X_val.shape, y_val.shape, '\n',
'Validation: ', X_test.shape, y_test.shape
)
# Metrcas de avaliação da regressão
def mse_loss(y_predict, y):
return np.mean(np.square(y_predict - y))
def mae_loss(y_predict, y):
return np.mean(np.abs(y_predict - y))
from tensorflow.keras.layers import Dense, Activation, Dropout, LSTM, Embedding
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.utils import plot_model
def build_fc_model(layers):
fc_model = Sequential()
for i in range(len(layers)-1):
#fc_model.add( Dense(input_dim=layers[i], output_dim= layers[i+1]) )#, W_regularizer=l2(0.1)) )
fc_model.add( Dense(input_shape=(layers[i],), units = layers[i+1]) )#, W_regularizer=l2(0.1)) )
fc_model.add( Dropout(0.5) )
if i < (len(layers) - 2):
fc_model.add( Activation('relu') )
fc_model.build()
fc_model.summary()
plot_model(fc_model)
return fc_model
fc_model_1 = build_fc_model([2, 256, 512, 1024, 1])
adam = Adam(lr = 1e-5)
fc_model_1.compile(loss='mean_squared_error', optimizer=adam)
start = time.time()
model_path = "./resources/mlp_fridge_h1.hdf5"
checkpointer = ModelCheckpoint(filepath=model_path, verbose=0, save_best_only=True)
hist_fc_1 = fc_model_1.fit( X_train, y_train,
batch_size=512, verbose=1, nb_epoch=200,
validation_split=0.33, callbacks=[checkpointer])
print('Tempo total de treinamento do modelo (s):', round(time.time() - start, 0))
import numpy as np
fc_model = load_model(model_path)
pred_fc = fc_model.predict(X_test).reshape(-1)
mse_loss_fc = mse_loss(pred_fc, y_test)
mae_loss_fc = mae_loss(pred_fc, y_test)
print('MSE no conjunto de teste: ', mse_loss_fc)
print('MAE no conjunto de teste:', mae_loss_fc)
train_loss = hist_fc_1.history['loss']
val_loss = hist_fc_1.history['val_loss']
def plot_losses(train_loss, val_loss):
plt.rcParams["figure.figsize"] = [24,10]
plt.title('MSE dos conjuntos de treino e teste - Resid. 1')
plt.plot( range(len(train_loss)), train_loss, color = 'b', alpha = 0.6, label='loss (treino)' )
plt.plot( range(len( val_loss )), val_loss, color = 'r', alpha = 0.6, label='loss (validação)' )
plt.xlabel( 'época' )
plt.ylabel( 'loss' )
plt.legend()
plot_losses(train_loss, val_loss)
# Plotando os cnsumos REAL e o PREVISTO do refrigerador nos 6 dias dos dados de teste
def plot_each_app(df, dates, predict, y_test, title, look_back = 0):
num_date = len(dates)
fig, axes = plt.subplots(num_date,1,figsize=(24, num_date*5) )
plt.suptitle(title, fontsize = '25')
fig.tight_layout()
fig.subplots_adjust(top=0.95)
for i in range(num_date):
if i == 0: l = 0
ind = df.ix[dates[i]].index[look_back:]
axes.flat[i].plot(ind, y_test[l:l+len(ind)], color = 'blue', alpha = 0.6, label = 'REAL')
axes.flat[i].plot(ind, predict[l:l+len(ind)], color = 'red', alpha = 0.6, label = 'PREVISTO')
axes.flat[i].legend()
l = len(ind)
plot_each_app(df1_test, dates[17:], pred_fc, y_test,
'Rede Neural FC: Real e Previsão nos 6 dias do Conjunto de Teste da Resid. 1', look_back = 50)
# Testar FC mais complexa
def build_lstm_model(layers):
# #fc_model.add( Dense(input_dim=layers[i], output_dim= layers[i+1]) )#, W_regularizer=l2(0.1)) )
# fc_model.add( Dense(input_shape=(layers[i],), units = layers[i+1]) )#, W_regularizer=l2(0.1)) )
# fc_model.add( Dropout(0.5) )
# if i < (len(layers) - 2):
# fc_model.add( Activation('relu') )
model = Sequential()
for i in range(len(layers) - 2):
if i == 0:
model.add(Embedding(input_dim=layers[i], output_dim=layers[i+1]))
else:
model.add(LSTM(
input_shape=(layers[i],),
units=layers[i+1],
return_sequences = True if i < len(layers) - 3 else False ))
model.add(Dropout(0.3))
model.add(Dense(layers[-1]))
model.build()
model.summary()
plot_model(model)
return model
model = build_lstm_model([2,64,128,256, 1])
# Utilizando 50 registros de consumos para retreinar o modelo, e prever o consumo de energia de cada aparelho
def process_data(df, dates, x_features, y_features, look_back = 50):
i = 0
for date in dates:
data = df.loc[date]
len_data = data.shape[0]
x = np.array([data[x_features].values[i:i+look_back]
for i in range(len_data - look_back) ]).reshape(-1,look_back, 2)
y = data[y_features].values[look_back:,:]
if i == 0:
X = x
Y = y
else:
X = np.append(X, x, axis=0)
Y = np.append(Y, y, axis=0)
i += 1
return X,Y
start = time.time()
X_train, y_train = process_data(df, dates[:17], ['Mains1','Mains2'], df.columns.values[2:])
X_test, y_test = process_data(df, dates[17:], ['Mains1','Mains2'], df.columns.values[2:])
print('Tempo de execução total (s): ', time.time() - start)
print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)
start = time.time()
adam = Adam(lr = 5e-5)
lstm_model_path = "./resources/lstm_model.hdf5"
model.compile(loss='mean_squared_error', optimizer=adam)
checkpointer = ModelCheckpoint(filepath=lstm_model_path, verbose=0, save_best_only=True)
hist_lstm = model.fit(
X_train,
y_train[:,2],
batch_size=512,
verbose=1,
nb_epoch=200,
validation_split=0.3,
callbacks=[checkpointer])
print('Tempo de treino (s): ', time.time() - start)
y_train
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can see that there are just over 500 data points.
Step2: It often helps to quickly visualize pieces of the data using histograms, scatter plots,
Step3: Let's have a quick look to see if some features are more relevant than others for our problem
Step4: This is a manual version of a technique called feature selection.
Step5: The prediction at least correlates with the true price, though there
Step6: Solution
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_boston
data = load_boston()
print(data.data.shape)
print(data.target.shape)
print(data.DESCR)
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
plt.hist(data.target)
plt.xlabel('price ($1000s)')
plt.ylabel('count');
for index, feature_name in enumerate(data.feature_names):
plt.figure()
plt.scatter(data.data[:, index], data.target)
plt.ylabel('Price')
plt.xlabel(feature_name)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target)
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
expected = y_test
plt.scatter(expected, predicted)
plt.plot([0, 50], [0, 50], '--k')
plt.axis('tight')
plt.xlabel('True price ($1000s)')
plt.ylabel('Predicted price ($1000s)')
print("RMS:", np.sqrt(np.mean((predicted - expected) ** 2)))
from sklearn.ensemble import GradientBoostingRegressor
# Instantiate the model, fit the results, and scatter in vs. out
from sklearn.ensemble import GradientBoostingRegressor
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
predicted = clf.predict(X_test)
expected = y_test
plt.scatter(expected, predicted)
plt.plot([0, 50], [0, 50], '--k')
plt.axis('tight')
plt.xlabel('True price ($1000s)')
plt.ylabel('Predicted price ($1000s)')
print("RMS:", np.sqrt(np.mean((predicted - expected) ** 2)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This data is not a part of the Movielens-1M dataset, however, it contains ratings for the same movies. You are expected to use this dataset to generate recommendations with your recommendation model.
Step2: As previously, you need to convert it into appropriate format
Step3: Important
Step4: You also have to remove gaps in user and movie indices. In standard usage scenarios this is done by the method _prepare() (and even doesn't require your intervention). However, this is not applicable in the custom setup (you want to prevent some of the actions performed by the method).
Step5: The last step is to "emulate" the splitting of the test data into the observed part and holdout
Step6: Building your model
Step7: Submitting your solution
Step8: Save you model and submit results. Note, that both upload address and the leaderbord itself have new location
|
<ASSISTANT_TASK:>
Python Code:
test_data = pd.read_csv("https://github.com/Evfro/RecSys_ISP2017/raw/master/test_data_new.gz", compression='gzip')
test_data.head()
ml_data = get_movielens_data()
data_model = RecommenderData(ml_data, 'userid', 'movieid', 'rating')
data_model._training = data_model._data #set training data to full movielens dataset
data_model._test = test_data.copy() # setting custom test data
data_model._reindex_data() # build new index of users and movies with no gaps and stores it in index.itemid attribute
data_model._test['movieid'] = data_model._test['movieid'].map(data_model.index.itemid.set_index('old').new)
data_model._test = namedtuple('TestData', 'testset evalset')._make([data_model._test, None])
svd = SVDModel(data_model)
svd.build()
recs = svd.get_recommendations()
recs.shape
recs = pd.Series(recs.ravel()).map(data_model.index.itemid.set_index('new').old).values.reshape(recs.shape)
np.savez('your-team-name', recs=recs)
import requests
files = {'upload': open('your-team-name.npz','rb')}
url = "http://isp2017.azurewebsites.net/team/upload"
r = requests.post(url, files=files)
print r.status_code, r.reason
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ok, that looks good - if a little messy. Let's open the file with some python
Step2: Data frames are pretty cool, for example I can index the column by name.
Step3: Check for understanding
Step4: Looks like there are 9093 rows in our dataset
Step5: Hmmm... looks like there are 4 values for the sentiment of the tweets with "No emotion toward brand or product" being the most common.
Step6: Interesting, there is a label "I can't tell" along with "Positive emotion", "Negative emotion" and "No emotion toward brand or product"
Step7: Hm, it's a 3G iphone, when was that? 2010?
Step8: Ok - the ipad2 was released in 2011, these tweets must be from 2011.
Step9: It is best practice to not change the input data. It's better to clearly show the ways that you've modified your data in your code. In this case, we can use pandas to easily pull out the rows where the tweets are empty. Here we are indexing into our data frame with the results of a pd.notnull function - this notation is really convenient.
Step10: We also need to remove the same rows of labels so that our "tweets" and "target" lists have the same length.
|
<ASSISTANT_TASK:>
Python Code:
# Our data file is in ../scikit/tweet.csv
# in a Comma Separated Values format
# this command uses the shell to print out the first ten lines
!head ../scikit/tweets.csv
import pandas as pd # this loads the pandas library, a very useful data exploration library
import numpy as np # this loads numpy, a very useful numerical computing library
# Puts tweets into a data frame
df = pd.read_csv('../scikit/tweets.csv') # read the file into a pandas data frame
print(df.head()) # print the first few rows of the data frame
tweets = df['tweet_text'] # sets tweets to be the first column, titled 'tweet_text'
print(tweets.head())
print(tweets.shape) # print the shape of the variable tweets
# we make target the list of labels from the third column
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
# describe is a cool function for quick data exploration
target.describe()
target.value_counts()
tweets[0]
tweets[200]
print(tweets[6])
fixed_tweets = tweets[pd.notnull(tweets)]
fixed_target = target[pd.notnull(tweets)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Environment Preparation
Step2: Step 0
Step3: Time series forecasting using Chronos Forecaster
Step4: Initialize train, valid and test tsdataset from raw pandas dataframe.
Step5: Preprocess the datasets. Here we perform
Step6: Forecaster Step 2
Step7: Forecaster Step 3
Step8: Save & restore the forecaster.
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Install latest pre-release version of bigdl-chronos
# Installing bigdl-chronos from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade bigdl-chronos
!pip uninstall -y torchtext # uninstall torchtext to avoid version conflict
exit() # restart the runtime to refresh installed pkg
# download the dataset
!wget https://raw.githubusercontent.com/numenta/NAB/v1.0/data/realKnownCause/nyc_taxi.csv
# load the dataset. The downloaded dataframe contains two columns, "timestamp" and "value".
import pandas as pd
df = pd.read_csv("nyc_taxi.csv", parse_dates=["timestamp"])
from bigdl.chronos.data import TSDataset
from sklearn.preprocessing import StandardScaler
tsdata_train, tsdata_valid, tsdata_test = TSDataset.from_pandas(df, dt_col="timestamp", target_col="value",
with_split=True, val_ratio=0.1, test_ratio=0.1)
lookback, horizon = 6, 1
scaler = StandardScaler()
for tsdata in [tsdata_train, tsdata_valid, tsdata_test]:
tsdata.deduplicate()\
.impute()\
.gen_dt_feature()\
.scale(scaler, fit=(tsdata is tsdata_train))\
.roll(lookback=lookback, horizon=horizon)
from bigdl.chronos.forecaster.tcn_forecaster import TCNForecaster
x, y = tsdata_train.to_numpy()
# x.shape = (num of sample, lookback, num of input feature)
# y.shape = (num of sample, horizon, num of output feature)
forecaster = TCNForecaster(past_seq_len=lookback, # number of steps to look back
future_seq_len=horizon, # number of steps to predict
input_feature_num=x.shape[-1], # number of feature to use
output_feature_num=y.shape[-1], # number of feature to predict
seed=1)
res = forecaster.fit((x, y), epochs=3)
x_test, y_test = tsdata_test.to_numpy()
pred = forecaster.predict(x_test)
pred_unscale, groundtruth_unscale = tsdata_test.unscale_numpy(pred), tsdata_test.unscale_numpy(y_test)
import matplotlib.pyplot as plt
plt.figure(figsize=(24,6))
plt.plot(pred_unscale[:,:,0])
plt.plot(groundtruth_unscale[:,:,0])
plt.legend(["prediction", "ground truth"])
forecaster.save("nyc_taxi.fxt")
forecaster.load("nyc_taxi.fxt")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Sempre tive interesse em investigar todos os crimes diretamente ligados ao tráfico de drogas. Acima posso ver quais são esses.
Step2: Distribuição dos crimes de acordo com o mês
Step3: Carregando o ano de 2015 pra comparações
Step4: Houve aumento dos roubos em 2016 ? Quanto ?
Step5: Que tal checar o aumento ou diminuição de outros crimes ? Vou olhar para o estupro de Vulneráveis - art.217 abaixo
Step6: Acima vemos que os estupros também aumentaram no estado de São Paulo, de 367 em 2015 para 374 em 2016 , um aumento de 1.90%
Step7: Nos últimos 2 anos ,a maioria dos estupros ocorreu no mês de maio. Em contrapartida, Dezembro é o mês de menor ocorrência
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
#carregando os dados de 2016
import pandas as pd
df = pd.read_csv('https://query.data.world/s/1ae7h2l9lvb1vapjwv9p1oga5', encoding = 'latin1')
df.head()
#Verificando tipo dados das colunas
df.info()
#Sera que temos boletins inclusive desse ano ?
df['ANO_BO'].max()
#Verificando variáveis que podem conter tipos de crimes
df['RUBRICA'].head()
#Excelente, vamos quer quantos tipos de crimes diferentes temos aqui registrados :
df['RUBRICA'].nunique()
#E agora vamos ver quais são
df['RUBRICA'].unique()
artigo_12 = df[df['RUBRICA']== 'A.I.-Tráfico de entorpecente (Art. 12)']
artigo_12.head()
#Vendo as características do novo dataframe
artigo_12.info()
artigo_12['DESDOBRAMENTO'].nunique()
artigo_12
artigo_12['ANO_BO'].max()
artigo_12['ANO_BO'].min()
df.sort(columns='ANO_BO', axis=0, ascending=True)
import matplotlib.pyplot as plt
import seaborn as sns
% matplotlib inline
sns.factorplot('MES', data= df,palette='Set1', kind='count')
sns.factorplot('RUBRICA', data= df,palette='Set1', kind='count' , size = 38)
# Como esperado, abaixo vemos que roubo é o crime mais comum
df['RUBRICA'].describe()
df2 = pd.read_csv('https://query.data.world/s/53c41jmuz5tf9cqtphr243t7a')
df2.head()
df2['RUBRICA'].describe()
print("Houve",(316548-315519), "roubos a mais no estado de São Paulo em 2016 do que 2015","um aumento de",(1029/315519)*100,"%")
estupro_2016 = df[df['RUBRICA'] == 'A.I.-Estupro de vulneravel (art.217-A)']
estupro_2016.head()
estupro_2015 = df2[df2['RUBRICA'] == 'A.I.-Estupro de vulneravel (art.217-A)']
estupro_2015.head()
estupro_2015.info()
estupro_2016.info()
print(((374/367)-1)*100,"%")
estupros_2015_2016 = [estupro_2015, estupro_2016]
estupros_2015_2016 = pd.concat(estupros_2015_2016)
estupros_2015_2016.head()
% matplotlib inline
sns.factorplot('MES', data= estupros_2015_2016,palette='Set2', kind='count', size = 5)
estupros_2015_2016['CIDADE'].value_counts()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we will create a QTable which is a quantity table. The first column is the redshift of the source, the second is the star formation rate (SFR), the third is the stellar mass and the fourth is the rotational velocity. Since it is a QTable, we can define the units for each of the entries, if we like.
Step2: When we create the table, we can assign the names to each column
Step3: We can display the table by typing the variable into the notebook
Step4: We can display the information about the table (i.e. data type, units, name)
Step5: We can view a specific column by calling is name.
Step6: We can also group subsets of columns.
Step7: We can call a specific row, in this case the first row, which shows all the entries for the first object. Remember python indexing starts with zero!
Step8: We can write to a standard format if we like, binary FITS tables are often used for this. If could be an ASCII file (either sapace, tab or comma delimited) or whatever your favorite sharable file format is. There is a whole list of supported formats that can be read in, many astronomy specific.
Step9: You can read it in just as easily too. As you can see, the units are stored as well, as long as you are using a QTable.
Step10: We can add new entries to the table. In this case specific star formation rate (sSFR).
Step11: Maybe we are unhappy using years and we want to convert to Gyr. There are a whole list of available units here.
Step12: If we want to estimate the H-alpha line lunminosity from the SFR using an emperical relation (such as one from Kennicutt et al. 1998).
Step13: Say we want to go from erg to photons, which are both "energy". You can see it is not quite as straight forward by the follwoing error.
Step14: In order to convert to photons/s properly, we need to check the units. Our beginning and ending units match (i.e. energy/time), we just need to acount for the energy of the photon which is depends on the wavelength.
Step15: The type function is useful for determining what data type you are dealing with, especially if it is an unfamilar one.
Step16: Assuming your background is uniform (and it most certianly is not), here is a rough background subtraction.
Step17: We use DAOStarFinder to find all of the objects in the image.
Step18: Astroquery
Step19: Print the columns returned from the search.
Step20: Print the redshift of the source
Step21: Lets now search for objects around M82.
Step22: Gaia query
Step23: IRSA query of 2MASS
Step24: SDSS query
Step25: Coordinates of your favorite object, in this case M82 in degrees.
Step26: Skyview
Step27: Plot image
Step28: Plot the in RA and Declination coordinates space, instead of pixels. The WCS (or World Coordinates System) in a FITS file has a set of keywords that define the coordinate system.
Step29: Use the FITS header to define the WCS of the image.
|
<ASSISTANT_TASK:>
Python Code:
from astropy.table import QTable
import astropy.units as u
import numpy as np
a = [0.10, 0.15, 0.2]
b = [10.0, 2.0, 100.0] * u.M_sun / u.yr
c = [1e10, 1e9, 1e11] * u.M_sun
d = [150., 100., 2000.] * u.km / u.s
t = QTable([a, b, c, d],
names=('redshift', 'sfr', 'stellar_mass', 'velocity'))
t
t.info
t['sfr']
t['sfr','velocity']
t[0]
t.write('my_table.fits', overwrite=True)
all_targets = QTable.read("my_table.fits")
all_targets
t['ssfr'] = t['sfr'] / t['stellar_mass']
t
t['ssfr'].to(1./u.gigayear)
t['lum'] = (t['sfr'] * u.erg / u.s )/(7.9e-42 * u.Msun / u.yr)
t
(t['lum']).to(u.ph / u.s)
import numpy as np
from photutils import datasets
hdu = datasets.load_star_image()
image = hdu.data[500:700, 500:700].astype(float)
head = hdu.header
print(type(hdu))
print(type(image))
print(type(head))
image -= np.median(image)
from photutils import DAOStarFinder
from astropy.stats import mad_std
bkg_sigma = mad_std(image)
daofind = DAOStarFinder(fwhm=4., threshold=3.*bkg_sigma)
sources = daofind(image)
for col in sources.colnames: sources[col].info.format = '%.8g' # for consistent table output
print(sources[:10])
print(sources.info)
from photutils import aperture_photometry, CircularAperture
positions = np.transpose((sources['xcentroid'], sources['ycentroid']))
apertures = CircularAperture(positions, r=4.)
phot_table = aperture_photometry(image, apertures)
for col in phot_table.colnames: phot_table[col].info.format = '%.8g' # for consistent table output
print(phot_table[:10])
print(phot_table.info)
import matplotlib.pyplot as plt
plt.imshow(image, cmap='gray_r', origin='lower')
apertures.plot(color='blue', lw=1.5, alpha=0.5)
print(head.keys())
from astroquery.ned import Ned
from astropy import coordinates
import astropy.units as u
result_table = Ned.query_object("m82")
print(result_table)
print(result_table.keys())
print(result_table['Redshift'])
result_table = Ned.query_region("m82", radius=1 * u.arcmin)
print(result_table)
from astroquery.gaia import Gaia
coord = coordinates.SkyCoord(ra=280, dec=-60, unit=(u.degree, u.degree), frame='icrs')
radius = u.Quantity(1.0, u.arcmin)
j = Gaia.cone_search_async(coord, radius)
r = j.get_results()
r.pprint()
print(r.keys())
from astroquery.irsa import Irsa
Irsa.list_catalogs()
table = Irsa.query_region("m82", catalog="fp_psc", spatial="Cone",radius=10 * u.arcmin)
table
table.keys()
from astroquery.sdss import SDSS
ra, dec = 148.969687, 69.679383
co = coordinates.SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg), frame='fk5')
xid = SDSS.query_region(co, radius=10 * u.arcmin)
# print the first 10 entries
print(xid[:10])
print(xid.keys())
print(xid['ra','dec'][:10]) # print the first 10 entries
from astroquery.skyview import SkyView
SkyView.list_surveys()
pflist = SkyView.get_images(position='M82', survey=['SDSSr'],radius=10 * u.arcmin)
ext = 0
pf = pflist[0] # first element of the list, might need a loop if multiple images
m82_image = pf[ext].data
ax = plt.subplot()
ax.imshow(m82_image, cmap='gray_r', origin='lower', vmin=-10, vmax=20)
ax.set_xlabel('X (pixels)')
ax.set_ylabel('Y (pixels)')
from astropy.wcs import WCS
head = pf[ext].header
wcs = WCS(head)
ax = plt.subplot(projection=wcs)
ax.imshow(m82_image, cmap='gray_r', origin='lower', vmin=-10, vmax=20)
#ax.grid(color='white', ls='solid')
ax.set_xlabel('Right Ascension (J2000)')
ax.set_ylabel('Declination (J2000)')
#ax.scatter(xid['ra'],xid['dec'],marker="o",s=50,transform=ax.get_transform('fk5'),edgecolor='b', facecolor='none')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parameters
Step2: Original Sequence Analysis
Step3: D-Markov Machines
Step4: D-Markov Machine Analysis
Step5: Clustering
Step6: Graph Minimization
Step7: DCGraM Analysis
Step8: Plots
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import yaml
import sequenceanalyzer as sa
#import dmarkov
name = 'ternary_even_shift'
tag = 'v1'
import os
if not os.path.exists(name):
os.makedirs(name)
os.makedirs(name + '/sequences')
os.makedirs(name + '/pfsa')
os.makedirs(name + '/results')
os.makedirs(name + '/results/probabilities')
os.makedirs(name + '/results/probabilities/conditional')
os.makedirs(name + '/results/cond_entropies')
os.makedirs(name + '/results/kldivergences')
os.makedirs(name + '/results/autocorrelations')
os.makedirs(name + '/results/prob_distances')
os.makedirs(name + '/results/plots')
N = 10000000
drange = range(4,11)
a = 20
#Open original sequence from yaml file
with open(name + '/sequences/original_len_' + str(N) + '_' + tag + '.yaml', 'r') as f:
X = yaml.load(f)
#Value up to which results are computed
d_max = drange[-1]
#Initialization of variables:
p = None
p_cond = None
#Compute subsequence probabilities of occurrence up to length d_max
p, alphabet = sa.calc_probs(X, d_max)
with open(name + '/results/probabilities/original_' + tag + '.yaml', 'w') as f:
yaml.dump(p, f)
with open(name + '/alphabet.yaml', 'w') as f:
yaml.dump(alphabet, f)
#If p has been previously computed, use this cell to load the values
if not p:
with open(name + '/results/probabilities/original_' + tag + '.yaml', 'r') as f:
p = yaml.load(f)
with open(name + '/alphabet.yaml', 'r') as f:
alphabet = yaml.load(f)
#Compute conditional probabilities of subsequences occurring after given each symbol of the alphabet
#One of the two previous cells needs to be executed first.
if p:
p_cond = sa.calc_cond_probs(p, alphabet, d_max - 1)
with open(name + '/results/probabilities/conditional/original_' + tag + '.yaml', 'w') as f:
yaml.dump(p_cond, f)
else:
print("Run a cell that either computes or opens the probabilities.")
#If p_cond has been previously computed, use this cell to load the values
if not p_cond:
with open(name + '/results/probabilities/conditional/original_' + tag + '.yaml', 'r') as f:
p_cond = yaml.load(f)
#Compute conditional entropy
if p and p_cond:
h = sa.calc_cond_entropy(p, p_cond, d_max)
h.to_csv(name + '/results/cond_entropies/original_' + tag + '.csv')
else:
print("Run the conditional probabilities cell first.")
#If p_cond has been previously computed, use this cell to load the values
if not h:
h = pd.read_csv(name + '/results/cond_entropies/original_' + tag + '.csv')
#Compute autocorrelation
aut = sa.calc_autocorr(X, a)
aut.to_csv(name + '/results/autocorrelations/original_' + tag + '.csv')
#If aut has been previously computed, use this cell to load the values
if not aut:
aut = pd.read_csv(name + '/results/autocorrelations/original_' + tag + '.csv')
dmark_machines = []
#If the D-Markov machines have not been previously created, generate them with this cell
for D in list(map(str,drange)):
dmark_machines.append(dmarkov.create(p_cond, D))
dmark_machines[-1].to_csv(name + '/pfsa/dmarkov_D' + D + '_' + tag + '.csv')
#On the other hand, if there already are D-Markov machines, load them with this cell
if not dmark_machines:
for D in drange:
dmark_machines.append(pd.read_csv(name + '/pfsa/dmarkov_D' + D + '_' + tag + '.csv'))
dmark_seqs = []
#Generate sequences:
count = 0
for machine in dmark_machines:
seq = machine.generate_sequence(N)
with open(name + '/sequences/dmarkov_D' + str(drange[count]) + '_' + tag + '.yaml', 'w') as f:
yaml.dump(seq, f)
dmark_seqs.append(seq)
count += 1
#If the sequences have been previously generated, load them here:
if not dmark_seqs:
for D in list(map(str,drange)):
with open(name + '/sequences/dmarkov_D' + D + '_' + tag + '.yaml', 'w') as f:
dmark_seqs.append(yaml.load(f))
#Compute subsequence probabilities of occurrence of the D-Markov sequences
count = 0
p_dmark = []
for seq in dmark_seqs:
p_dm, alphabet = sa.calc_probs(seq, d_max)
p_dm.to_csv(name + '/results/probabilities/dmarkov_D'+ str(drange[count]) + '_' + tag + '.csv')
p_dmark.append(p_dm)
count += 1
#If p_dmark has been previously computed, use this cell to load the values
if not p_dmark:
for D in list(map(str,drange)):
p_dm = pd.read_csv(name + '/results/probabilities/dmarkov_D' + D + '_' + tag + '.csv')
p_dmark.append(p_dm)
with open(name + '/alphabet.yaml', 'r') as f:
alphabet = yaml.load(f)
#Compute conditional probabilities of subsequences occurring after given each symbol of the alphabet
#One of the two previous cells needs to be executed first.
p_cond_dmark = []
count = 0
if p_dmark:
for p_dm in p_dmark:
p_cond_dm = sa.calc_cond_probs(p_dm, alphabet, d_max)
p_cond_dm.to_csv(name + '/results/probabilities/conditional/dmarkov_D' + str(drange[count]) + '_' + tag + '.csv')
p_cond_dmark.append(p_cond_dm)
count += 1
else:
print("Run a cell that either computes or opens the probabilities.")
#If p_cond has been previously computed, use this cell to load the values
if not p_cond_dmark:
for D in list(map(str,drange)):
p_cond_dmark.append(pd.read_csv(name + '/results/probabilities/conditional/dmarkov_D' + D + '_' + tag + '.csv'))
#Compute conditional entropy
count = 0
h_dmark = []
if p_dmark and p_cond_dmark:
for p_dm in p_dmark:
h_dm = sa.calc_cond_entropy(p_dm, p_cond_dmark[count], d_max)
h_dm.to_csv(name + '/results/cond_entropies/dmarkov_D' + str(drange[count]) + '_' + tag + '.csv')
h_dmark.append(h_dm)
count += 1
else:
print("Run the conditional probabilities cell first.")
#If h_dmark has been previously computed, use this cell to load the values
if not h_dmark:
for D in list(map(str,drange)):
h_dmark.append(pd.read_csv(name + '/results/cond_entropies/dmarkov_D' + D + '_' + tag + '.csv'))
#Compute autocorrelation
aut_dmark = []
count = 0
for dseq in dmark_seqs:
aut_dm = sa.calc_autocorr(dseq, a)
aut_dm.to_csv(name + '/results/autocorrelations/dmarkov_D' + str(drange[count]) + '_' + tag + '.csv')
aut_dmark.append(aut_dm)
count += 1
#If aut has been previously computed, use this cell to load the values
if not aut_dmark:
for D in list(map(str,drange)):
aut_dmark.append(pd.read_csv(name + '/results/autocorrelations/dmarkov_D' + D + '_' + tag + '.csv'))
#Compute the Kullback-Leibler Divergence between the sequences generated by the D-Markov Machines and the original
#sequence.
kld_dmark = []
for dseq in dmark_seqs:
kld_dm = sa.calc_kld(dseq, X, d_max)
kld_dmark.append(kld_dm)
kld_dmark.to_csv(name + '/results/kldivergences/dmarkov_' + tag + '.csv')
#If the D-Markov Kullback-Leibler divergence has been previously computed, use this cell to load the values
if not kld_dmark:
kld_dmark = pd.read_csv(name + '/results/kldivergences/dmarkov_' + tag + '.csv')
#Compute the Probability Distances between the sequences generated by the D-Markov Machines and the original
#sequence.
pdist_dmark = []
for p_dm in p_dmark:
pdist_dm = sa.calc_pdist(p_dm, p, d_max)
pdist_dmark.append(pdist_dm)
pdist_dmark.to_csv(name + '/results/prob_distances/dmarkov_' + tag + '.csv')
#If the Probability Distances of the D-Markov Machines have been previously computed, load them with this cell.
if not pdist_dmark:
pdist_dmark = pd.read_csv(name + '/results/prob_distances/dmarkov_' + tag + '.csv')
clustered = []
K = 4
for machine in dmark_machines:
clustered.append(clustering.kmeans_kld(machine, K))
dcgram_machines = []
for ini_part in clustered:
dcgram_machines.append(graphmin.moore(clustered))
dcgram_seqs = []
#Generate sequences:
count = 0
for machine in dcgram_machines:
seq = machine.generate_sequence(N)
with open(name + '/sequences/dcgram_D' + str(drange[count]) + '_' + tag + '.yaml', 'w') as f:
yaml.dump(seq, f)
dcgram_seqs.append(seq)
count += 1
#If the sequences have been previously generated, load them here:
if not dcgram_seqs:
for D in list(map(str,drange)):
with open(name + '/sequences/dcgram_D' + D + '_' + tag + '.yaml', 'w') as f:
dcgram_seqs.append(yaml.load(f))
#Compute subsequence probabilities of occurrence of the DCGraM sequences
count = 0
p_dcgram = []
for seq in dcgram_seqs:
p_dc, alphabet = sa.calc_probs(seq, d_max)
p_dc.to_csv(name + '/results/probabilities/dcgram_D'+ str(drange[count]) + '_' + tag + '.csv')
p_dcgram.append(p_dc)
count += 1
#If p_dcgram has been previously computed, use this cell to load the values
if not p_dcgram:
for D in list(map(str,drange)):
p_dc = pd.read_csv(name + '/results/probabilities/dcgram_D' + D + '_' + tag + '.csv')
p_dcgram.append(p_dm)
with open(name + '/alphabet.yaml', 'r') as f:
alphabet = yaml.load(f)
#Compute conditional probabilities of subsequences occurring after given each symbol of the alphabet
#One of the two previous cells needs to be executed first.
p_cond_dcgram = []
count = 0
if p_dcgram:
for p_dc in p_dcgram:
p_cond_dc = sa.calc_cond_probs(p_dc, alphabet, d_max)
p_cond_dc.to_csv(name + '/results/probabilities/conditional/dcgram_D' + str(drange[count]) + '_' + tag + '.csv')
p_cond_dcgram.append(p_cond_dc)
count += 1
else:
print("Run a cell that either computes or opens the probabilities.")
#If p_cond_dcgram has been previously computed, use this cell to load the values
if not p_cond_dcgram:
for D in list(map(str,drange)):
p_cond_dcgram.append(pd.read_csv(name + '/results/probabilities/conditional/dcgram_D' + D + '_' + tag + '.csv'))
#Compute conditional entropy
count = 0
h_dcgram = []
if p_dcgram and p_cond_dcgram:
for p_dc in p_dcgram:
h_dc = sa.calc_cond_entropy(p_dc, p_cond_dcgram[count], d_max)
h_dc.to_csv(name + '/results/cond_entropies/dcgram_D' + str(drange[count]) + '_' + tag + '.csv')
h_dcgram.append(h_dc)
count += 1
else:
print("Run the conditional probabilities cell first.")
#If h_dcgram has been previously computed, use this cell to load the values
if not h_dcgram:
for D in list(map(str,drange)):
h_dcgram.append(pd.read_csv(name + '/results/cond_entropies/dcgram_D' + D + '_' + tag + '.csv'))
#Compute autocorrelation
aut_dcgram = []
count = 0
for dcseq in dcgram_seqs:
aut_dc = sa.calc_autocorr(dcseq, a)
aut_dc.to_csv(name + '/results/autocorrelations/dcgram_D' + str(drange[count]) + '_' + tag + '.csv')
aut_dcgram.append(aut_dc)
count += 1
#If aut has been previously computed, use this cell to load the values
if not aut_dcgram:
for D in list(map(str,drange)):
aut_dmark.append(pd.read_csv(name + '/results/autocorrelations/dcgram_D' + D + '_' + tag + '.csv'))
#Compute the Kullback-Leibler Divergence between the sequences generated by the DCGraM Machines and the original
#sequence.
kld_dcgram = []
for dcseq in dcgram_seqs:
kld_dc = sa.calc_kld(dcseq, X, d_max)
kld_dcgram.append(kld_dc)
kld_dcgram.to_csv(name + '/results/kldivergences/dcgram_' + tag + '.csv')
#If the DCGraM Kullback-Leibler divergence has been previously computed, use this cell to load the values
if not kld_dcgram:
kld_dcgram = pd.read_csv(name + '/results/kldivergences/dcgram_' + tag + '.csv')
#Compute the Probability Distances between the sequences generated by the DCGraM Machines and the original
#sequence.
pdist_dcgram = []
for p_dc in p_dcgram:
pdist_dc = sa.calc_pdist(p_dc, p, d_max)
pdist_dcgram.append(pdist_dc)
pdist_dcgram.to_csv(name + '/results/prob_distances/dcgram_' + tag + '.csv')
#If the Probability Distances of the DCGraM Machines have been previously computed, load them with this cell.
if not pdist_dcgram:
pdist_dcgram = pd.read_csv(name + '/results/prob_distances/dcgram_' + tag + '.csv')
#initialization
import matplotlib.pyplot as plt
#Labels to be used in the plots' legends
labels = ['D-Markov Machines, D from ' + str(drange[0]) + ' to ' + str(d_max),
'DCGraM Machines, D from ' + str(drange[0]) + ' to ' + str(d_max),
'Original Sequence Baseline']
#Obtaining number of states of the machines to be used in the x-axis:
states_dmarkov = []
for dm in dmark_machines:
states_dmarkov.append(dm.shape[0])
states_dcgram = []
for dc in dcgram_machines:
states_dcgram.append(dc.shape[0])
states = [states_dmarkov, states_dcgram]
#Conditional Entropy plots
H = 10
h_dmark_curve = []
for h_dm in h_dmarkov:
h_dmark_curve.append(h_dm[H])
plt.semilogx(states[0], h_dmark_curve, marker='o', label=labels[0])
h_dcgram_curve = []
for h_dc in h_dcgram:
h_dcgram_curve.append(h_dc[H])
plt.semilogx(states[1], h_dcgram_curve, marker='x', label=labels[1])
#Opening original sequence baseline:
h_base = h[H]
plt.axhline(y=h_base, color='k', linewidth = 3, label=labels[2])
plt.xlabel('Number of States', fontsize=16)
plt.yalbel('$h_' + str(H) + '$', fontsize=16)
plt.legend(loc='upper right', shadow=False, fontsize='large')
plt.title('Conditional Entropy',fontsize=18,weight='bold')
plt.savefig(name + '/plots/conditional_entropy_' + tag + '.eps' , bbox_inches='tight', format='eps',dpi=1000)
plt.show()
#Kullback-Leibler plots
plt.semilogx(states[0], kld_dmark, marker='o', label=labels[0])
plt.semilogx(states[1], kld_dcgram, marker='x', label=labels[1])
plt.xlabel('Number of States', fontsize=16)
plt.yalbel('$k_' + str(H) + '$', fontsize=16)
plt.legend(loc='upper right', shadow=False, fontsize='large')
plt.title('Kullback-Leibler Divergence',fontsize=18,weight='bold')
plt.savefig(name + '/plots/kldivergence_' + tag + '.eps' , bbox_inches='tight', format='eps',dpi=1000)
plt.show()
#Probability Distance plots
plt.semilogx(states[0], pdist_dmark, marker='o', label=labels[0])
plt.semilogx(states[1], pdist_dcgram, marker='x', label=labels[1])
plt.xlabel('Number of States', fontsize=16)
plt.yalbel('$P_' + str(H) + '$', fontsize=16)
plt.legend(loc='upper right', shadow=False, fontsize='large')
plt.title('Probability Distance',fontsize=18,weight='bold')
plt.savefig(name + '/plots/prob_distance_' + tag + '.eps' , bbox_inches='tight', format='eps',dpi=1000)
plt.show()
#TODO: Think how to have good plots for autocorrelation
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Resolución directa de problemas
Step2: La teoría de la optimización nos dice que debemos derivar esta función e igualar el resultado a 0 para calcular el máximo local. Obtenemos
Step3: Resolución de problemas mediante métodos heurísticos
Step4: Naturalmente, es más fácil despertar cuando el nivel de alerta es más alto y además la sensación de descanso es mayor. En cambio, si se fuerza el despertar (por ejemplo mediante un despertador) en un momento del ciclo en el que el sueño es muy profundo, nos despertamos abotargados y cansados. La evolución del descanso seguirá una evolución como la siguiente
Step5: Esta función ni siquiera es derivable, ¿cómo podemos encontrar el óptimo? Un método heurístico muy sencillo consistiría en muestrear la función con un nivel de precisión suficiente y escoger el punto que se corresponda con el nivel de descanso más alto
Step6: El óptimo resulta ser
Step7: Este es un método heurístico muy sencillo y muy eficaz, pero no es muy eficiente. Para obtener un óptimo con la precisión deseada hemos evaluado 241 muestras, de las que hemos retenida sólo una. Es decir, hemos desechado el 99,6 % de las soluciones propuestas.
Step8: El genoma es donde está codificada la información que sirve para construir una solución. Mientras se sea coherente, el genoma se puede definir con total libertad según se necesite
Step9: Vamos a necesitar crear individuos aleatorios, para lo que utilizaremos una función "Generar genoma"
Step10: Cuando ya tengamos nuestros individuos, con su material genético, necesitaremos una función que nos descifre este código y lo traduzca a características reconocibles del mismo. En el mundo real sería como leer el "gen del color de los ojos" (en realidad son varios genes) para descubrir que el contenido de ese gen en concreto indica que el individuo tiene "ojos azules".
Step11: En el mundo natural, las características de un individuo determinarán en gran parte cómo de bien o de mal se desenvolverá en la vida, esto es, sus "actuaciones", "desempeño" o, en inglés, su "performance". Si tiene las piernas más largas, podrá correr más rápido; si tiene unos dientes afilados, podrá defenderse mejor... En nuestro ejemplo esto no es ni más ni menos que el eje vertical de nuestra función de descanso. También vamos a añadir una "performance" cuyo valor sea el tiempo de descanso, lo cual puede ser interesante para evaluar la calidad de la solución, como veremos luego.
Step12: Según sean de buenas sus actuaciones, el individuo será más apto, estará mejor adaptado. En nuestro problema se trata de definir qué es una solución buena, y aquí se haya una de las armas más potentes del algoritmo genético. El algoritmo genético nos obliga a definir qué es lo que estamos buscando en nuestro proceso de optimización mediante una función.
Step13: Ahora ya podemos generar nuestras soluciones y tenemos medios para evaluarlas. Es hora de hacer que la selección natural haga su trabajo.
Step15: Ahora, los individuos de esta sociedad se reproducen entre ellos, generando nuevos individuos. Los individuos más aptos tienen más probabilidades de reproducirse que los menos aptos, con lo que las mejores soluciones generan más descendencia. Durante la concepción, los nuevos individuos sufren pequeñas mutaciones aleatorias. Después nacen y se incorporan a la sociedad.
Step16: En el mundo natural, la escasez de recursos obliga a los individuos a competir. Una función de torneo escoge parejas de soluciones al azar, las compara y elimina las menos aptas.
Step17: Veamos ahora, animado, cómo evoluciona la población
Step18: Vemos que alcanzamos el resultado con muchas menos evaluaciones de la función que el algoritmo heurístico más básico (144, frente a 241). Hemos duplicado el rendimiento para esta función 1D, pero las mejoras de rendimiento son más espectaculares cuanto mayor es la dimensión del problema que se quiere optimizar.
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
import numpy as np
import pandas as pd
import random as random
from scipy.interpolate import interp1d
from IPython.html.widgets import interact
import matplotlib.pyplot as plt
%matplotlib inline
num_hora = 20
horas = np.linspace(0, 12, num_hora*12+1)
descanso = [(tiempo**3 - 50*tiempo**2 + 600*tiempo)/2200 for tiempo in horas]
with plt.xkcd():
plt.figure(figsize=(15, 5))
plt.plot(horas, descanso)
plt.xlabel("$horas$", fontsize=20)
plt.ylabel("$descanso$", fontsize=20)
hora_max = 7.85
with plt.xkcd():
plt.figure(figsize=(15, 5))
plt.plot(horas, descanso)
plt.vlines(hora_max, 0, 1, colors = 'r' ,linestyles = 'dashed')
plt.xlabel("$horas$", fontsize=20)
plt.ylabel("$descanso$", fontsize=20)
vigilia = []
vigilia += 5 * [1]
vigilia += 10 * [0.6]
vigilia += 5 * [0.4]
vigilia += 10 * [0.2]
vigilia += 20 * [0]
vigilia += 5 * [0.2]
vigilia += 5 * [0.4]
vigilia += 5 * [0.6]
vigilia += 5 * [0.4]
vigilia += 5 * [0.2]
vigilia += 10 * [0]
vigilia += 5 * [0.2]
vigilia += 5 * [0.4]
vigilia += 10 * [0.6]
vigilia += 5 * [0.4]
vigilia += 5 * [0.2]
vigilia += 5 * [0]
vigilia += 5 * [0.2]
vigilia += 5 * [0.4]
vigilia += 20 * [0.7]
vigilia += 5 * [0.4]
vigilia += 5 * [0.2]
vigilia += 5 * [0.4]
vigilia += 40 * [0.8]
vigilia += 5 * [0.4]
vigilia += 5 * [0.2]
vigilia += 5 * [0.4]
vigilia += 21 * [0.8]
plt.figure(figsize=(15,5))
plt.plot(horas, vigilia)
plt.title('$Evolución \, de \, la \, alerta \, durante \, el \, sueño$', fontsize=20)
plt.xlabel("$horas$", fontsize=15)
descanso_ciclos = list(np.array(vigilia)*np.array(descanso))
descanso_nuevo = interp1d (horas, descanso_ciclos)
with plt.xkcd():
plt.figure(figsize=(15,5))
plt.plot(horas, descanso_nuevo(horas))
plt.xlabel("$horas$", fontsize=20)
plt.ylabel("$descanso$", fontsize=20)
data = {
'Tiempo de descanso (horas)' : horas,
'Nivel de descanso' : descanso_nuevo(horas)
}
catalogue = pd.DataFrame(data,
columns = ['Tiempo de descanso (horas)', 'Nivel de descanso' ],
index = ['Muestra ' + str(i) for i in range(1, len(descanso_ciclos)+1)]
)
catalogue
index_max = descanso_ciclos.index(max(descanso_ciclos))
hora_max = horas[index_max]
catalogue[index_max : index_max+1]
with plt.xkcd():
plt.figure(figsize=(15,5))
plt.plot(horas, descanso_nuevo(horas))
plt.xlabel("$horas$", fontsize=20)
plt.ylabel("$descanso$", fontsize=20)
plt.vlines(hora_max, 0, 1, colors = 'r' ,linestyles = 'dashed')
class Individual (object):
def __init__(self, genome):
self.genome = genome
self.traits = {}
self.performances = {}
self.fitness = 0
dict_genes = {
'Horas de sueño' : 10,
}
generate_random_binary_list = lambda n: [random.randint(0,1) for b in range(1,n+1)]
def generate_genome (dict_genes):
#We first calculate the total number of bits that the genome must contain
number_of_bits = sum([dict_genes[trait] for trait in dict_genes])
#And we return a random genome of this length
return generate_random_binary_list(number_of_bits)
[generate_genome(dict_genes) for i in range (4)]
def calculate_traits (individual, dict_genes):
#This function must decipher the genome and return the traits of the individual.
#Normally, the genome contains binary numerical values for the different traits.
dict_traits = {}
index = 0
for trait in dict_genes:
step = 12 / (int(dict_genes[trait]*'1',2))
dict_traits[trait] = step*int(''.join(str(bit) for bit in individual.genome[index : index+dict_genes[trait]]), 2)
index += dict_genes[trait]
individual.traits = dict_traits
individuo = Individual(generate_genome(dict_genes))
calculate_traits (individuo, dict_genes)
print(individuo.genome)
print(individuo.traits)
def calculate_performances (individual):
dict_perfo = {
'Descanso' : float(descanso_nuevo (individual.traits['Horas de sueño'])),
'Tiempo' : individual.traits['Horas de sueño']
}
individual.performances = dict_perfo
individuo = Individual(generate_genome(dict_genes))
calculate_traits (individuo, dict_genes)
calculate_performances (individuo)
print(individuo.traits)
print(individuo.performances)
with plt.xkcd():
plt.figure(figsize=(15,5))
plt.plot(horas, descanso_nuevo(horas))
plt.xlabel("$horas$", fontsize=20)
plt.ylabel("$descanso$", fontsize=20)
def calculate_fitness (individual):
alpha = 1
beta = 0
individual.fitness = alpha*individual.performances['Descanso'] + beta*((12 - individual.performances['Tiempo'])/12)
individuo = Individual(generate_genome(dict_genes))
calculate_traits (individuo, dict_genes)
calculate_performances (individuo)
calculate_fitness (individuo)
print(individuo.performances)
print(individuo.fitness)
def immigration (society, target_population, dict_genes):
while len(society) < target_population:
new_individual = Individual (generate_genome (dict_genes))
calculate_traits (new_individual, dict_genes)
calculate_performances (new_individual)
calculate_fitness (new_individual)
society.append (new_individual)
society = []
immigration (society, 12, dict_genes)
plt.matshow([individual.genome for individual in society], cmap=plt.cm.gray)
plt.title('Immigration')
#This function was taken from Eli Bendersky's website
#It returns an index of a list called "weights",
#where the content of each element in "weights" is the probability of this index to be returned.
#For this function to be as fast as possible we need to pass it a list of weights in descending order.
def weighted_choice_sub(weights):
weights = [0.9, 0.05, 0.05]
N = 100000
lista = [weighted_choice_sub(weights) for ii in range(N)]
print( lista.count(0)/N, lista.count(1)/N, lista.count(2)/N)
rnd = random.random() * sum(weights)
for i, w in enumerate(weights):
rnd -= w
if rnd < 0:
return i
def crossover (society, reproduction_rate, mutation_rate):
#First we create a list with the fitness values of every individual in the society
fitness_list = [individual.fitness for individual in society]
#We sort the individuals in the society in descending order of fitness.
society_sorted = [x for (y, x) in sorted(zip(fitness_list, society), key=lambda x: x[0], reverse=True)]
#We then create a list of relative probabilities in descending order,
#so that the fittest individual in the society has N times more chances to reproduce than the least fit,
#where N is the number of individuals in the society.
probability = [i for i in reversed(range(1,len(society_sorted)+1))]
#We create a list of weights with the probabilities of non-mutation and mutation
mutation = [1 - mutation_rate, mutation_rate]
#For every new individual to be created through reproduction:
for i in range (int(len(society) * reproduction_rate)):
#We select two parents randomly, using the list of probabilities in "probability".
father, mother = society_sorted[weighted_choice_sub(probability)], society_sorted[weighted_choice_sub(probability)]
#We randomly select two cutting points for the genome.
a, b = random.randrange(0, len(father.genome)), random.randrange(0, len(father.genome))
#And we create the genome of the child putting together the genome slices of the parents in the cutting points.
child_genome = father.genome[0:min(a,b)]+mother.genome[min(a,b):max(a,b)]+father.genome[max(a,b):]
#For every bit in the not-yet-born child, we generate a list containing
#1's in the positions where the genome must mutate (i.e. the bit must switch its value)
#and 0's in the positions where the genome must stay the same.
n = [weighted_choice_sub(mutation) for ii in range(len(child_genome))]
#This line switches the bits of the genome of the child that must mutate.
mutant_child_genome = [abs(n[i] - child_genome[i]) for i in range(len(child_genome))]
#We finally append the newborn individual to the society
newborn = Individual(mutant_child_genome)
calculate_traits (newborn, dict_genes)
calculate_performances (newborn)
calculate_fitness (newborn)
society.append(newborn)
crossover(society, 1/12, 1/16)
plt.matshow([individual.genome for individual in society], cmap=plt.cm.gray)
plt.title('Crossover')
def tournament(society, target_population):
while len(society) > target_population:
#index1, index2 = random.randrange(0, len(society)), random.randrange(0, len(society))
#if society[index1].fitness > society[index2].fitness:
# society.pop(index2)
#else:
# society.pop(index1)
fitness_list = [individual.fitness for individual in society]
society.pop(fitness_list.index(min(fitness_list)))
tournament (society, 12)
plt.matshow([individual.genome for individual in society], cmap=plt.cm.gray)
plt.title('Tournament')
def optimise_sleep (number_of_genes = 10,
size_of_society = 30,
reproduction_rate = 0.2,
mutation_rate = 0.05,
number_of_generations = 20,
size_after_tournament = 20
):
dict_genes = {'Horas de sueño' : number_of_genes}
society = []
fitness_max = []
fitness_avg = []
for generation in range(number_of_generations):
immigration (society, size_of_society, dict_genes)
fitness_max += list([max([individual.fitness for individual in society])])
fitness_avg += list([np.mean([individual.fitness for individual in society])])
crossover(society, reproduction_rate, mutation_rate)
tournament (society, size_after_tournament)
fitness_list = [individual.fitness for individual in society]
winner = society[fitness_list.index(max(fitness_list))]
with plt.xkcd():
plt.figure(figsize=(15,5))
plt.title("Óptimo a las " + str(round(winner.performances['Tiempo'], 2)) + " horas", fontsize = 15)
plt.xlabel("$horas$", fontsize=20)
plt.ylabel("$descanso$", fontsize=20)
plt.plot(horas, descanso_nuevo(horas))
plt.vlines(winner.performances['Tiempo'], 0, 1, colors = 'r' ,linestyles = 'dashed')
plt.figure(figsize=(15,5))
plt.plot(fitness_max)
plt.title('$\Phi_{max} = $' + str(round(fitness_max[-1], 5)), fontsize = 15)
plt.figure(figsize=(15,5))
plt.plot(fitness_avg)
plt.title('$\overline{\Phi}$', fontsize = 15)
optimise_sleep ()
def calculate_fitness (individual):
alpha = 1
beta = 0
individual.fitness = (alpha*individual.performances['Descanso'] + beta*((12 - individual.performances['Tiempo'])/12))/(alpha + beta)
w = interact(optimise_sleep,
number_of_genes = (1, 50, 1),
size_of_society = (1, 100, 1),
reproduction_rate = (0, 1, 0.01),
mutation_rate = (0, 1, 0.01),
number_of_generations = (1, 100, 1),
size_after_tournament = (1, 100, 1)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
A = ['33.33', '33.33', '33.33', '33.37']
NA = np.asarray(A)
AVG = np.mean(NA.astype(float), axis = 0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
def pluck(arr):
if(len(arr) == 0): return []
evens = list(filter(lambda x: x%2 == 0, arr))
if(evens == []): return []
return [min(evens), arr.index(min(evens))]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Display mode
Step2: Display mode
Step3: Recognized Formats
Step4: Set index to DataFrame
Step5: Update cell
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from beakerx import *
pd.read_csv('../resources/data/interest-rates.csv')
table = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))
table.setAlignmentProviderForColumn('m3', TableDisplayAlignmentProvider.CENTER_ALIGNMENT)
table.setRendererForColumn("y10", TableDisplayCellRenderer.getDataBarsRenderer(False))
table.setRendererForType(ColumnType.Double, TableDisplayCellRenderer.getDataBarsRenderer(True))
table
df = pd.read_csv('../resources/data/interest-rates.csv')
df['time'] = df['time'].str.slice(0,19).astype('datetime64[ns]')
table = TableDisplay(df)
table.setStringFormatForTimes(TimeUnit.DAYS)
table.setStringFormatForType(ColumnType.Double, TableDisplayStringFormat.getDecimalFormat(4,6))
table.setStringFormatForColumn("m3", TableDisplayStringFormat.getDecimalFormat(0, 0))
table
table = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))
table
#freeze a column
table.setColumnFrozen("y1", True)
#freeze a column to the right
table.setColumnFrozenRight("y10", True)
#hide a column
table.setColumnVisible("y30", False)
table.setColumnOrder(["m3", "y1", "y5", "time", "y2"])
table
table = TableDisplay(pd.read_csv('../resources/data/interest-rates.csv'))
table.addCellHighlighter(TableDisplayCellHighlighter.getHeatmapHighlighter("m3", TableDisplayCellHighlighter.FULL_ROW))
table
beakerx.pandas_display_default()
pd.read_csv('../resources/data/interest-rates.csv')
beakerx.pandas_display_table()
pd.read_csv('../resources/data/interest-rates.csv')
TableDisplay([{'y1':4, 'm3':2, 'z2':1}, {'m3':4, 'z2':2}])
TableDisplay({"x" : 1, "y" : 2})
mapList4 = [
{"a":1, "b":2, "c":3},
{"a":4, "b":5, "c":6},
{"a":7, "b":8, "c":5}
]
display = TableDisplay(mapList4)
#set what happens on a double click
display.setDoubleClickAction(lambda row, column, tabledisplay: tabledisplay.values[row].__setitem__(column, sum(tabledisplay.values[row])))
#add a context menu item
display.addContextMenuItem("negate", lambda row, column, tabledisplay: tabledisplay.values[row].__setitem__(column, -1 * tabledisplay.values[row][column]))
display
mapList4 = [
{"a":1, "b":2, "c":3},
{"a":4, "b":5, "c":6},
{"a":7, "b":8, "c":5}
]
display = TableDisplay(mapList4)
#set what happens on a double click
display.setDoubleClickAction("runDoubleClick")
display
print("runDoubleClick fired")
df = pd.read_csv('../resources/data/interest-rates.csv')
df.set_index(['m3'])
df = pd.read_csv('../resources/data/interest-rates.csv')
df.index = df['time']
df
dataToUpdate = [
{'a':1, 'b':2, 'c':3},
{'a':4, 'b':5, 'c':6},
{'a':7, 'b':8, 'c':9}
]
tableToUpdate = TableDisplay(dataToUpdate)
tableToUpdate
tableToUpdate.values[0][0] = 99
tableToUpdate.sendModel()
tableToUpdate.updateCell(2,"c",121)
tableToUpdate.sendModel()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download and prepare the MS-COCO dataset
Step2: Optionally, limit the size of the training set for faster training
Step3: Preprocess the images using InceptionV3
Step4: Initialize InceptionV3 and load the pretrained Imagenet weights
Step5: Caching the features extracted from InceptionV3
Step6: Preprocess and tokenize the captions
Step7: Split the data into training and testing
Step8: Our images and captions are ready! Next, let's create a tf.data dataset to use for training our model.
Step9: Model
Step10: Training
Step11: Caption!
Step12: Try it on your own images
|
<ASSISTANT_TASK:>
Python Code:
# Import TensorFlow and enable eager execution
# This code requires TensorFlow version >=1.9
import tensorflow as tf
tf.enable_eager_execution()
# We'll generate plots of attention in order to see which parts of an image
# our model focuses on during captioning
import matplotlib.pyplot as plt
# Scikit-learn includes many helpful utilities
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import re
import numpy as np
import os
import time
import json
from glob import glob
from PIL import Image
import pickle
annotation_zip = tf.keras.utils.get_file('captions.zip',
cache_subdir=os.path.abspath('.'),
origin = 'http://images.cocodataset.org/annotations/annotations_trainval2014.zip',
extract = True)
annotation_file = os.path.dirname(annotation_zip)+'/annotations/captions_train2014.json'
name_of_zip = 'train2014.zip'
if not os.path.exists(os.path.abspath('.') + '/' + name_of_zip):
image_zip = tf.keras.utils.get_file(name_of_zip,
cache_subdir=os.path.abspath('.'),
origin = 'http://images.cocodataset.org/zips/train2014.zip',
extract = True)
PATH = os.path.dirname(image_zip)+'/train2014/'
else:
PATH = os.path.abspath('.')+'/train2014/'
# read the json file
with open(annotation_file, 'r') as f:
annotations = json.load(f)
# storing the captions and the image name in vectors
all_captions = []
all_img_name_vector = []
for annot in annotations['annotations']:
caption = '<start> ' + annot['caption'] + ' <end>'
image_id = annot['image_id']
full_coco_image_path = PATH + 'COCO_train2014_' + '%012d.jpg' % (image_id)
all_img_name_vector.append(full_coco_image_path)
all_captions.append(caption)
# shuffling the captions and image_names together
# setting a random state
train_captions, img_name_vector = shuffle(all_captions,
all_img_name_vector,
random_state=1)
# selecting the first 30000 captions from the shuffled set
num_examples = 30000
train_captions = train_captions[:num_examples]
img_name_vector = img_name_vector[:num_examples]
len(train_captions), len(all_captions)
def load_image(image_path):
img = tf.read_file(image_path)
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.resize_images(img, (299, 299))
img = tf.keras.applications.inception_v3.preprocess_input(img)
return img, image_path
image_model = tf.keras.applications.InceptionV3(include_top=False,
weights='imagenet')
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
# getting the unique images
encode_train = sorted(set(img_name_vector))
# feel free to change the batch_size according to your system configuration
image_dataset = tf.data.Dataset.from_tensor_slices(
encode_train).map(load_image).batch(16)
for img, path in image_dataset:
batch_features = image_features_extract_model(img)
batch_features = tf.reshape(batch_features,
(batch_features.shape[0], -1, batch_features.shape[3]))
for bf, p in zip(batch_features, path):
path_of_feature = p.numpy().decode("utf-8")
np.save(path_of_feature, bf.numpy())
# This will find the maximum length of any caption in our dataset
def calc_max_length(tensor):
return max(len(t) for t in tensor)
# The steps above is a general process of dealing with text processing
# choosing the top 5000 words from the vocabulary
top_k = 5000
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=top_k,
oov_token="<unk>",
filters='!"#$%&()*+.,-/:;=?@[\]^_`{|}~ ')
tokenizer.fit_on_texts(train_captions)
train_seqs = tokenizer.texts_to_sequences(train_captions)
tokenizer.word_index = {key:value for key, value in tokenizer.word_index.items() if value <= top_k}
# putting <unk> token in the word2idx dictionary
tokenizer.word_index[tokenizer.oov_token] = top_k + 1
tokenizer.word_index['<pad>'] = 0
# creating the tokenized vectors
train_seqs = tokenizer.texts_to_sequences(train_captions)
# creating a reverse mapping (index -> word)
index_word = {value:key for key, value in tokenizer.word_index.items()}
# padding each vector to the max_length of the captions
# if the max_length parameter is not provided, pad_sequences calculates that automatically
cap_vector = tf.keras.preprocessing.sequence.pad_sequences(train_seqs, padding='post')
# calculating the max_length
# used to store the attention weights
max_length = calc_max_length(train_seqs)
# Create training and validation sets using 80-20 split
img_name_train, img_name_val, cap_train, cap_val = train_test_split(img_name_vector,
cap_vector,
test_size=0.2,
random_state=0)
len(img_name_train), len(cap_train), len(img_name_val), len(cap_val)
# feel free to change these parameters according to your system's configuration
BATCH_SIZE = 64
BUFFER_SIZE = 1000
embedding_dim = 256
units = 512
vocab_size = len(tokenizer.word_index)
# shape of the vector extracted from InceptionV3 is (64, 2048)
# these two variables represent that
features_shape = 2048
attention_features_shape = 64
# loading the numpy files
def map_func(img_name, cap):
img_tensor = np.load(img_name.decode('utf-8')+'.npy')
return img_tensor, cap
dataset = tf.data.Dataset.from_tensor_slices((img_name_train, cap_train))
# using map to load the numpy files in parallel
# NOTE: Be sure to set num_parallel_calls to the number of CPU cores you have
# https://www.tensorflow.org/api_docs/python/tf/py_func
dataset = dataset.map(lambda item1, item2: tf.py_func(
map_func, [item1, item2], [tf.float32, tf.int32]), num_parallel_calls=8)
# shuffling and batching
dataset = dataset.shuffle(BUFFER_SIZE)
# https://www.tensorflow.org/api_docs/python/tf/contrib/data/batch_and_drop_remainder
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(1)
def gru(units):
# If you have a GPU, we recommend using the CuDNNGRU layer (it provides a
# significant speedup).
if tf.test.is_gpu_available():
return tf.keras.layers.CuDNNGRU(units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
else:
return tf.keras.layers.GRU(units,
return_sequences=True,
return_state=True,
recurrent_activation='sigmoid',
recurrent_initializer='glorot_uniform')
class BahdanauAttention(tf.keras.Model):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, features, hidden):
# features(CNN_encoder output) shape == (batch_size, 64, embedding_dim)
# hidden shape == (batch_size, hidden_size)
# hidden_with_time_axis shape == (batch_size, 1, hidden_size)
hidden_with_time_axis = tf.expand_dims(hidden, 1)
# score shape == (batch_size, 64, hidden_size)
score = tf.nn.tanh(self.W1(features) + self.W2(hidden_with_time_axis))
# attention_weights shape == (batch_size, 64, 1)
# we get 1 at the last axis because we are applying score to self.V
attention_weights = tf.nn.softmax(self.V(score), axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * features
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class CNN_Encoder(tf.keras.Model):
# Since we have already extracted the features and dumped it using pickle
# This encoder passes those features through a Fully connected layer
def __init__(self, embedding_dim):
super(CNN_Encoder, self).__init__()
# shape after fc == (batch_size, 64, embedding_dim)
self.fc = tf.keras.layers.Dense(embedding_dim)
def call(self, x):
x = self.fc(x)
x = tf.nn.relu(x)
return x
class RNN_Decoder(tf.keras.Model):
def __init__(self, embedding_dim, units, vocab_size):
super(RNN_Decoder, self).__init__()
self.units = units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = gru(self.units)
self.fc1 = tf.keras.layers.Dense(self.units)
self.fc2 = tf.keras.layers.Dense(vocab_size)
self.attention = BahdanauAttention(self.units)
def call(self, x, features, hidden):
# defining attention as a separate model
context_vector, attention_weights = self.attention(features, hidden)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# shape == (batch_size, max_length, hidden_size)
x = self.fc1(output)
# x shape == (batch_size * max_length, hidden_size)
x = tf.reshape(x, (-1, x.shape[2]))
# output shape == (batch_size * max_length, vocab)
x = self.fc2(x)
return x, state, attention_weights
def reset_state(self, batch_size):
return tf.zeros((batch_size, self.units))
encoder = CNN_Encoder(embedding_dim)
decoder = RNN_Decoder(embedding_dim, units, vocab_size)
optimizer = tf.train.AdamOptimizer()
# We are masking the loss calculated for padding
def loss_function(real, pred):
mask = 1 - np.equal(real, 0)
loss_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real, logits=pred) * mask
return tf.reduce_mean(loss_)
# adding this in a separate cell because if you run the training cell
# many times, the loss_plot array will be reset
loss_plot = []
EPOCHS = 20
for epoch in range(EPOCHS):
start = time.time()
total_loss = 0
for (batch, (img_tensor, target)) in enumerate(dataset):
loss = 0
# initializing the hidden state for each batch
# because the captions are not related from image to image
hidden = decoder.reset_state(batch_size=target.shape[0])
dec_input = tf.expand_dims([tokenizer.word_index['<start>']] * BATCH_SIZE, 1)
with tf.GradientTape() as tape:
features = encoder(img_tensor)
for i in range(1, target.shape[1]):
# passing the features through the decoder
predictions, hidden, _ = decoder(dec_input, features, hidden)
loss += loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss += (loss / int(target.shape[1]))
variables = encoder.variables + decoder.variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables), tf.train.get_or_create_global_step())
if batch % 100 == 0:
print ('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
loss.numpy() / int(target.shape[1])))
# storing the epoch end loss value to plot later
loss_plot.append(total_loss / len(cap_vector))
print ('Epoch {} Loss {:.6f}'.format(epoch + 1,
total_loss/len(cap_vector)))
print ('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
plt.plot(loss_plot)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Plot')
plt.show()
def evaluate(image):
attention_plot = np.zeros((max_length, attention_features_shape))
hidden = decoder.reset_state(batch_size=1)
temp_input = tf.expand_dims(load_image(image)[0], 0)
img_tensor_val = image_features_extract_model(temp_input)
img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
features = encoder(img_tensor_val)
dec_input = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
for i in range(max_length):
predictions, hidden, attention_weights = decoder(dec_input, features, hidden)
attention_plot[i] = tf.reshape(attention_weights, (-1, )).numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result.append(index_word[predicted_id])
if index_word[predicted_id] == '<end>':
return result, attention_plot
dec_input = tf.expand_dims([predicted_id], 0)
attention_plot = attention_plot[:len(result), :]
return result, attention_plot
def plot_attention(image, result, attention_plot):
temp_image = np.array(Image.open(image))
fig = plt.figure(figsize=(10, 10))
len_result = len(result)
for l in range(len_result):
temp_att = np.resize(attention_plot[l], (8, 8))
ax = fig.add_subplot(len_result//2, len_result//2, l+1)
ax.set_title(result[l])
img = ax.imshow(temp_image)
ax.imshow(temp_att, cmap='gray', alpha=0.6, extent=img.get_extent())
plt.tight_layout()
plt.show()
# captions on the validation set
rid = np.random.randint(0, len(img_name_val))
image = img_name_val[rid]
real_caption = ' '.join([index_word[i] for i in cap_val[rid] if i not in [0]])
result, attention_plot = evaluate(image)
print ('Real Caption:', real_caption)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image, result, attention_plot)
# opening the image
Image.open(img_name_val[rid])
image_url = 'https://tensorflow.org/images/surf.jpg'
image_extension = image_url[-4:]
image_path = tf.keras.utils.get_file('image'+image_extension,
origin=image_url)
result, attention_plot = evaluate(image_path)
print ('Prediction Caption:', ' '.join(result))
plot_attention(image_path, result, attention_plot)
# opening the image
Image.open(image_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: IMDB sentiment classification task
Step2: Model building
Step3: LSTM
Step4: Sentence Generation using RNN(LSTM)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
import theano
import theano.tensor as T
import keras
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# -- Keras Import
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.preprocessing import image
from keras.datasets import imdb
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.preprocessing import sequence
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU, SimpleRNN
from keras.layers import Activation, TimeDistributed, RepeatVector
from keras.callbacks import EarlyStopping, ModelCheckpoint
max_features = 20000
maxlen = 100 # cut texts after this number of words (among top max_features most common words)
batch_size = 32
print("Loading data...")
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Example:')
print(X_train[:1])
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(SimpleRNN(128))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='adam')
print("Train...")
model.fit(X_train, y_train, batch_size=batch_size, epochs=1,
validation_data=(X_test, y_test))
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
# !!! Play with those! try and get better results!
#model.add(SimpleRNN(128))
#model.add(GRU(128))
#model.add(LSTM(128))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='adam')
print("Train...")
model.fit(X_train, y_train, batch_size=batch_size,
epochs=4, validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
#path = 'YourFile.txt'
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.layers import LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import numpy as np
import random
import sys
text = open(path).read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# cut the text in semi-redundant sequences of maxlen characters
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
sentences.append(text[i: i + maxlen])
next_chars.append(text[i + maxlen])
print('nb sequences:', len(sentences))
print('Vectorization...')
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
# build the model: a single LSTM
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(maxlen, len(chars))))
model.add(Dense(len(chars)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, nb_epoch=1)
start_index = random.randint(0, len(text) - maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(400):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2>Before starting, a word about sizes.</h2>
Step2: <p>First calling and training the algorithm.
Step3: <p>Now checking the prediction for the test set
Step4: <h2>Digits Example</h2>
Step5: <p> ... taining gcForest ... (can take some time...) </p>
Step6: <p> ... and predicting classes ... </p>
Step7: <h2>Saving Models to Disk</h2>
Step8: <p>__Loading model__
Step9: <h2>Using mg-scanning and cascade_forest Sperately</h2>
Step10: <p>It is now possible to use the mg_scanning output as input for cascade forests using different parameters. Note that the cascade forest module does not directly return predictions but probability predictions from each Random Forest in the last layer of the cascade. Hence the need to first take the mean of the output and then find the max.</p>
Step11: <h3>Skipping mg_scanning</h3>
|
<ASSISTANT_TASK:>
Python Code:
from GCForest import gcForest
from sklearn.datasets import load_iris, load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# loading the data
iris = load_iris()
X = iris.data
y = iris.target
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.33)
gcf = gcForest(shape_1X=4, window=2, tolerance=0.0)
gcf.fit(X_tr, y_tr)
pred_X = gcf.predict(X_te)
print(pred_X)
# evaluating accuracy
accuracy = accuracy_score(y_true=y_te, y_pred=pred_X)
print('gcForest accuracy : {}'.format(accuracy))
# loading the data
digits = load_digits()
X = digits.data
y = digits.target
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.4)
gcf = gcForest(shape_1X=[8,8], window=[4,6], tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)
gcf.fit(X_tr, y_tr)
pred_X = gcf.predict(X_te)
print(pred_X)
# evaluating accuracy
accuracy = accuracy_score(y_true=y_te, y_pred=pred_X)
print('gcForest accuracy : {}'.format(accuracy))
from sklearn.externals import joblib
joblib.dump(gcf, 'gcf_model.sav')
gcf = joblib.load('gcf_model.sav')
gcf = gcForest(shape_1X=[8,8], window=5, min_samples_mgs=10, min_samples_cascade=7)
X_tr_mgs = gcf.mg_scanning(X_tr, y_tr)
X_te_mgs = gcf.mg_scanning(X_te)
gcf = gcForest(tolerance=0.0, min_samples_mgs=10, min_samples_cascade=7)
_ = gcf.cascade_forest(X_tr_mgs, y_tr)
pred_proba = gcf.cascade_forest(X_te_mgs)
tmp = np.mean(pred_proba, axis=0)
preds = np.argmax(tmp, axis=1)
accuracy_score(y_true=y_te, y_pred=preds)
gcf = gcForest(tolerance=0.0, min_samples_mgs=20, min_samples_cascade=10)
_ = gcf.cascade_forest(X_tr_mgs, y_tr)
pred_proba = gcf.cascade_forest(X_te_mgs)
tmp = np.mean(pred_proba, axis=0)
preds = np.argmax(tmp, axis=1)
accuracy_score(y_true=y_te, y_pred=preds)
gcf = gcForest(tolerance=0.0, min_samples_cascade=20)
_ = gcf.cascade_forest(X_tr, y_tr)
pred_proba = gcf.cascade_forest(X_te)
tmp = np.mean(pred_proba, axis=0)
preds = np.argmax(tmp, axis=1)
accuracy_score(y_true=y_te, y_pred=preds)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Slicing Arrays
Step2: The optional third parameter indicates the increment value
Step3: If you omit the start index, the slice implicitly starts from zero
Step4: Array Arithmetic
Step5: dot computes the dot product, or inner product, between arrays or matrices.
Step6: Boolean Operations
Step7: Distance Metrics
Step8: The cosine distance measures the angle between two vectors
Step9: Sorting
Step10: numpy.argsort returns an array of indices, ind, such that x[ind] is a sorted version of x.
|
<ASSISTANT_TASK:>
Python Code:
print numpy.arange(5)
print numpy.linspace(0, 5, 10, endpoint=False)
print numpy.zeros(5)
print numpy.ones(5)
print numpy.ones((5,2))
print scipy.randn(5) # random Gaussian, zero-mean unit-variance
print scipy.randn(5,2)
x = numpy.arange(10)
print x[2:4]
print x[-1]
print x[0:8:2]
print x[4:2:-1]
print x[:4]
print x[:999]
print x[::-1]
x = numpy.arange(5)
y = numpy.ones(5)
print x+2*y
x = scipy.randn(5)
y = numpy.ones(5)
print numpy.dot(x, y)
x = scipy.randn(5,3)
y = numpy.ones((3,2))
print numpy.dot(x, y)
x = numpy.arange(10)
print x < 5
y = numpy.ones(10)
print x < y
from scipy.spatial import distance
print distance.euclidean([0, 0], [3, 4])
print distance.sqeuclidean([0, 0], [3, 4])
print distance.cityblock([0, 0], [3, 4])
print distance.chebyshev([0, 0], [3, 4])
print distance.cosine([67, 0], [89, 0])
print distance.cosine([67, 0], [0, 89])
x = scipy.randn(5)
print x
x.sort()
print x
x = scipy.randn(5)
print x
ind = numpy.argsort(x)
print ind
print x[ind]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utilities to time functions
Step2: Utilities to draw graphs
Step3: Graphs
Step4: Solution
Step5: Draw Graph
Step6: Graph with negative edges
Step7: Solution
Step8: Draw Graph
Step9: Testing Utilities
Step11: Single-Source Shortest Paths
Step12: Testing
Step13: However, it doesn't in general produce correct answers for graphs with negative edges. Our graph with negative edges should serve as example.
Step15: Complexity Analysis
Step16: Testing
Step17: This variant should produce the correct answers also for graphs with negative edges. Our graph with negative edges should serve as example.
Step19: Bellman-Ford Algorithm
Step20: Testing
Step21: However, it doesn't in general produce correct answers for graphs with negative edges. Our graph with negative edges should serve as example.
Step24: Complexity Analysis
Step25: Testing
|
<ASSISTANT_TASK:>
Python Code:
import math
import numpy as np
from graphviz import Digraph
import queue
# so our plots get drawn in the notebook
%matplotlib inline
from matplotlib import pyplot as plt
from random import randint
from time import clock
# A timer - runs the provided function and reports the
# run time in ms
def time_f(f):
before = clock()
f()
after = clock()
return after - before
def get_graph_img_from_adjacency_matrix(graph_matrix):
n_nodes = len(graph_matrix)
dot = Digraph()
for i in range(n_nodes):
dot.node(str(i))
for i in range(n_nodes):
for j in range(n_nodes):
if np.isfinite(graph_matrix[i][j]):
dot.edge(str(i), str(j), str(graph_matrix[i][j]))
return dot
def get_graph_img_from_adjacency_list(graph_list):
n_nodes = len(graph_list)
dot = Digraph()
for i in range(n_nodes):
dot.node(str(i))
for i in range(n_nodes):
for adj_node, edge_cost in graph_list[i]:
dot.edge(str(i), str(adj_node), str(edge_cost))
return dot
def get_graph_img_from_edges_list(n_nodes, edges_list):
dot = Digraph()
for i in range(n_nodes):
dot.node(str(i))
for edge in edges_list:
start, destination, cost = edge
dot.edge(str(start), str(destination), str(cost))
return dot
n_nodes = 10
# List of tuples (start, destination, cost) which represent directed edges
non_negative_edges_list = [
(0, 1, 2),
(0, 2, 10),
(0, 3, 7),
(1, 5, 4),
(2, 3, 2),
(2, 4, 3),
(4, 0, 5),
(5, 3, 1),
(7, 8, 3),
(8, 9, 4),
]
# Adjacency list representation of a graph of n_nodes nodes
# non_negative_graph_list[i] contains a list of tuple (adj_node, cost) for all adjacent nodes of i
non_negative_graph_list = [[] for _ in range(n_nodes)]
for edge in non_negative_edges_list:
start, destination, cost = edge
non_negative_graph_list[start].append((destination, cost))
# List of tuples (start, destination, shortest_path_cost) which represent directed edges
non_negative_solution_list = [
(0, 1, 2),
(0, 5, 6),
(0, 3, 7),
(0, 8, np.inf),
(4, 1, 7),
]
img = get_graph_img_from_adjacency_list(non_negative_graph_list)
img
n_nodes = 10
# List of tuples (start, destination, cost) which represent directed edges
edges_list = [
(0, 1, 2),
(0, 2, 10),
(0, 3, 7),
(1, 5, 4),
(2, 3, -4),
(2, 4, 3),
(3, 6, 1),
(4, 0, -5),
(5, 3, 1),
(7, 8, 3),
(8, 9, 4),
]
# Adjacency list representation of a graph of n_nodes nodes
# non_negative_graph_list[i] contains a list of tuple (adj_node, cost) for all adjacent nodes of i
graph_list = [[] for _ in range(n_nodes)]
# Adjacency matrix representation of a graph of n_nodes nodes
# Initialize all values to infinite
graph_matrix = np.full((n_nodes, n_nodes), np.inf)
# Values in the diagonal are 0: going from any node to itself has no cost
# np.fill_diagonal(graph_matrix, 0)
for edge in edges_list:
start, destination, cost = edge
graph_matrix[start, destination] = cost
graph_list[start].append((destination, cost))
# List of tuples (start, destination, shortest_path_cost) which represent directed edges
solution_list = [
(0, 1, 2),
(0, 5, 6),
(0, 3, 6),
(0, 8, np.inf),
(0, 6, 7),
(4, 1, -3),
]
img = get_graph_img_from_adjacency_matrix(graph_matrix)
img
def test_single_source(algorithm, graph_list, solution_list):
n_nodes = len(graph_list)
for start_node in range(n_nodes):
sp = algorithm(graph_list, start_node)
for sol in solution_list:
start, destination, cost = sol
if start == start_node:
if sp[destination] != cost:
return False
return True
def test_single_source_non_negative(algorithm):
return test_single_source(algorithm, non_negative_graph_list, non_negative_solution_list)
def test_single_source_negative(algorithm):
return test_single_source(algorithm, graph_list, solution_list)
Takes an adjacency list representing a graph and a starting node 'start',
and returns a list 'sp' where sp[i] indicates the shortest path cost
from start to i.
Greedy algorithm which always chooses the closest node from the fringe.
Once a node has been extracted from the fringe, then it's sp from the origin
has already been computed.
Each node gets visited (in this case, extracted from the priority queue) only once
def dijkstra(graph, start):
n_nodes = len(graph)
# Array containing the shortest path costs initialized to infinity, apart to start itself which is 0
sp = np.full(n_nodes, np.inf)
sp[start] = 0
# Bitmaps to indicate if a given node has already been visited
visited = np.full(n_nodes, False)
# Min-priority queue representing the fringe of nodes. Contains tuples (distance_from_start, node)
fringe = queue.PriorityQueue()
fringe.put((sp[start], start))
while not fringe.empty():
_, node = fringe.get()
visited[node] = True
for adj_node, edge_cost in graph[node]:
if not visited[adj_node]:
if sp[adj_node] > sp[node] + edge_cost:
sp[adj_node] = sp[node] + edge_cost
fringe.put((sp[adj_node], adj_node))
return sp
assert test_single_source_non_negative(dijkstra) == True
assert test_single_source_negative(dijkstra) == False
Takes an adjacency list representing a graph and a starting node 'start',
and returns a list 'sp' where sp[i] indicates the shortest path cost
from start to i.
Greedy algorithm which always chooses the closest node from the fringe.
Once a node has been extracted from the fringe, then it's sp from the origin
has already been computed.
def dijkstra_variant(graph, start):
n_nodes = len(graph)
# Array containing the shortest path costs initialized to infinity, apart to start itself which is 0
sp = np.full(n_nodes, np.inf)
sp[start] = 0
# Min-priority queue representing the fringe of nodes. Contains tuples (distance_from_start, node)
fringe = queue.PriorityQueue()
fringe.put((sp[start], start))
while not fringe.empty():
_, node = fringe.get()
for adj_node, edge_cost in graph[node]:
if sp[adj_node] > sp[node] + edge_cost:
sp[adj_node] = sp[node] + edge_cost
fringe.put((sp[adj_node], adj_node))
return sp
assert test_single_source_non_negative(dijkstra_variant) == True
assert test_single_source_negative(dijkstra_variant) == True
Takes an adjacency list representing a graph and a starting node 'start',
and returns a list 'sp' where sp[i] indicates the shortest path cost
from start to i.
Greedy algorithm which always chooses the closest node from the fringe.
Once a node has been extracted from the fringe, then it's sp from the origin
has already been computed.
Each node gets visited (in this case, extracted from the priority queue) only once
def dijkstra(graph, start):
n_nodes = len(graph)
# Array containing the shortest path costs initialized to infinity, apart to start itself which is 0
sp = np.full(n_nodes, np.inf)
sp[start] = 0
# Bitmaps to indicate if a given node has already been visited
visited = np.full(n_nodes, False)
# Min-priority queue representing the fringe of nodes. Contains tuples (distance_from_start, node)
fringe = queue.PriorityQueue()
fringe.put((sp[start], start))
while not fringe.empty():
_, node = fringe.get()
visited[node] = True
for adj_node, edge_cost in graph[node]:
if not visited[adj_node]:
if sp[adj_node] > sp[node] + edge_cost:
sp[adj_node] = sp[node] + edge_cost
fringe.put((sp[adj_node], adj_node))
return sp
assert test_single_source_non_negative(dijkstra) == True
assert test_single_source_negative(dijkstra) == False
Takes an adjacency matrix representing a graph and returns a new matrix
with matrix[i, j] containg the shortest path between node i and node j.
def floyd_warshall(graph_matrix):
g = graph_matrix[:][:]
n_nodes = len(g)
floyd_warshall_helper(g, n_nodes - 1)
return g
Takes a matrix representing a graph and modifies that matrix
so that at the end of the function graph[i, j] contains the shortest path between
node i and node j using only nodes in the set {0,1, .., k} as intermediary nodes,
which we will call sp(i, j, k).
We have sp(i, j, k) = min(sp(i, j, k - 1), sp(i, k, k - 1) + sp(k, j, k - 1)).
This is saying that the shortest path from i to j using nodes {0, 1, .., k} is
the minimum between going from i to j using only nodes {0,1, .., k - 1}, and the
sum of first going from i to k and then from j to k (both steps still with nodes {0,1, .., k - 1}).
def floyd_warshall_helper(graph, k):
if k == -1:
# No intermediary node, leave the original adjacency matrix unchanged
return
# Run recursively
floyd_warshall_helper(graph, k - 1)
# Now graph[i][j] contains the shortest path using node {0, 1, .., k - 1} as intermediary
n_nodes = len(graph)
# Update distance to get from i to j, for every i and j
for i in range(n_nodes):
for j in range(n_nodes):
graph[i][j] = min(graph[i][j], graph[i][k] + graph[k][j])
sp_matrix = floyd_warshall(graph_matrix)
for sp in solution_list:
start, destination, cost = sp
assert sp_matrix[start, destination] == cost
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: If we want to use our shiny python scripts, we'll need to import them too.
Step2: If we want a quick man-page style summary, we can call help(spy). We can also help help(spy.<func>) for more information on the function <func>. Since the data that we're looking at is in a different directory, let's specify that now.
Step3: Reading Full Files
Step4: We have q (potential vorticity, this is a QG example). Let's read and plot our initial q field.
Step5: What the heck did that just do? Let's break it down and look at the inputs.
Step6: Note that this is ordered as (Nx,Ny). If we had wanted MATLAB style ordering (Ny,Nx), we could have used the optional argument ordering = 'matlab' (useful for some plotting). The default is ordering = 'natural' (useful because it feels right).
Step7: Perfect, now we have our grid vectors. We can also load in grid information, such as the domain size and limits. This is stored in a class that has a method called `display'. These are illustrated below.
Step8: Plotting
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Tells the system to plot in-line, only necessary for iPython notebooks,
# not regular command-line python
import numpy as np
import os
import sys
import matplotlib.pyplot as plt
import time
# Now that we have our packages, we need data. The file 'make_2d_data.py' will
# generate a sample data set. Let's run that now (may take a minute)
execfile('make_2d_data.py')
import spinspy as spy
import matpy as mp
help(spy)
spy.set_prefix('Data/2d')
ls Data/2d
q = spy.reader('q', 11, [0,-1], [0,-1])
q.shape
#x,y = spy.grid()
x = spy.reader('x',0,[0,-1])
y = spy.reader('y',[0,-1],0)
print('The shape of x is {0} and the shape of y is {1}'.format(x.shape,y.shape))
data = spy.get_shape()
print(data.Nx,data.Ny,data.Nz)
print('---')
data.display()
plt.figure(1)
t0 = time.clock()
plt.contour(x,y,q.T) # Transpose for plotting order
t1 = time.clock()
print('Time to plot: {0:1.2e}'.format(t1-t0))
plt.title('q');
plt.xlabel('x');
plt.ylabel('y');
plt.figure(2)
t0 = time.clock()
plt.pcolor(x,y,q.transpose());
t1 = time.clock()
print('Time to plot: {0:1.2e}'.format(t1-t0))
plt.title('Height Field');
plt.xlabel('x');
plt.ylabel('y');
plt.figure(3)
t0 = time.clock()
plt.pcolormesh(x,y,q.T, cmap='darkjet')
t1 = time.clock()
print('Time to plot: {0:1.2e}'.format(t1-t0))
plt.title('Height Field')
plt.xlabel('x')
plt.ylabel('y')
plt.axis('tight')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
layer = tf.layers.batch_normalization(layer, training=is_training)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', use_bias=False, activation=None)
conv_layer = tf.layers.batch_normalization(conv_layer, training=is_training)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Placeholder that indicates whether we're training the network or not
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# Tell TensorFlow to update the population statistics while training
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, use_bias=False, activation=None)
gamma = tf.Variable(tf.ones([num_units]))
beta = tf.Variable(tf.zeros([num_units]))
pop_mean = tf.Variable(tf.zeros([num_units]))
pop_variance = tf.Variable(tf.ones([num_units]))
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(layer, [0])
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_mean * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalized_output)
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
gamma = tf.Variable(tf.ones([out_channels]))
beta = tf.Variable(tf.zeros([out_channels]))
pop_mean = tf.Variable(tf.ones([out_channels]), trainable=False)
pop_variance = tf.Variable(tf.zeros([out_channels]), trainable=False)
epsilon = 1e-3
def batch_norm_training():
batch_mean, batch_variance = tf.nn.moments(conv_layer, [0, 1, 2], keep_dims=False)
decay = 0.99
train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
train_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
with tf.control_dependencies([train_mean, train_variance]):
return tf.nn.batch_normalization(conv_layer, batch_mean, batch_variance, beta, gamma, epsilon)
def batch_norm_inference():
return tf.nn.batch_normalization(conv_layer, pop_mean, pop_variance, beta, gamma, epsilon)
batch_normalized_output = tf.cond(is_training, batch_norm_training, batch_norm_inference)
return tf.nn.relu(batch_normalized_output)
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The following records all pre-modern languages as identified by the Glottolog project.
Step2: Word <a name="word"></a>
Step3: Doc <a name="doc"></a>
Step4: It contains a number of helper methods, too, which read (usually) from .word and return convenient data structures.
Step7: Process <a name="process"></a>
Step9: Pipeline <a name="pipeline"></a>
Step11: MorphosyntacticFeature <a name="morpho"></a>
Step12: Note there is a distinction between POS and a word's features. POS tags are more general categories for general classes of words, like noun, verb, etc.
Step13: MorphosyntacticFeatureBundle <a name="morpho-bundle"></a>
Step14: POS and MorphosyntacticFeatureBundle are intended to be placed within a Wordinstance.
Step15: Form <a name="form"></a>
Step16: DecisionTree <a name="dt"></a>
|
<ASSISTANT_TASK:>
Python Code:
from cltk.core.data_types import Language
Language(
name="Classical Mongolian",
glottolog_id="",
latitude=0.0,
longitude=0.0,
dates=[],
family_id="",
parent_id="",
level="",
iso_639_3_code="cmg",
type="h",
)
from cltk.languages.glottolog import LANGUAGES
print(LANGUAGES["gmh"])
from pprint import pprint
pprint([(l.name, l.iso_639_3_code) for _, l in LANGUAGES.items()])
from cltk.core.data_types import Word
Word(index_char_start=0, index_char_stop=6, index_token=0, string="Gallia", pos="nom")
from cltk.core.data_types import Doc
Doc(raw="Gallia est omnis divisa in partes tres")
from cltk import NLP
cltk_nlp = NLP(language="lat")
cltk_doc = cltk_nlp.analyze(text="Gallia est omnis divisa in partes tres")
cltk_doc.tokens # List[str]
from dataclasses import dataclass, field
from typing import List, Type
from boltons.cacheutils import cachedproperty
from cltk.core.data_types import Process
@dataclass
class StopsProcess(Process):
>>> from cltk.core.data_types import Doc, Word
>>> from cltk.stops.processes import StopsProcess
>>> from cltk.languages.example_texts import get_example_text
>>> lang = "lat"
>>> words = [Word(string=token) for token in split_punct_ws(get_example_text(lang))]
>>> stops_process = StopsProcess(language=lang)
>>> output_doc = stops_process.run(Doc(raw=get_example_text(lang), words=words))
>>> output_doc.words[1].string
'est'
>>> output_doc.words[1].stop
True
@cachedproperty
def algorithm(self):
return Stops(iso_code=self.language).get_stopwords()
def run(self, input_doc: Doc) -> Doc:
Note this marks a word a stop if there is a match on
either the inflected form (``Word.string``) or the
lemma (``Word.lemma``).
output_doc = deepcopy(input_doc)
stops_list = self.algorithm
for index, word_obj in enumerate(output_doc.words):
if (word_obj.string in stops_list) or (word_obj.lemma in stops_list):
word_obj.stop = True
else:
word_obj.stop = False
output_doc.words[index] = word_obj
return output_doc
from cltk.core.data_types import Pipeline
from cltk.languages.utils import get_lang
@dataclass
class AkkadianPipeline(Pipeline):
Default ``Pipeline`` for Akkadian.
>>> from cltk.languages.pipelines import AkkadianPipeline
>>> a_pipeline = AkkadianPipeline()
>>> a_pipeline.description
'Pipeline for the Akkadian language.'
>>> a_pipeline.language
Language(name='Akkadian', glottolog_id='akka1240', latitude=33.1, longitude=44.1, dates=[], family_id='afro1255', parent_id='east2678', level='language', iso_639_3_code='akk', type='a')
>>> a_pipeline.language.name
'Akkadian'
>>> a_pipeline.processes[0]
<class 'cltk.tokenizers.processes.AkkadianTokenizationProcess'>
description: str = "Pipeline for the Akkadian language."
language: Language = get_lang("akk")
processes: List[Type[Process]] = field(
default_factory=lambda: [AkkadianTokenizationProcess, StopsProcess]
)
from enum import auto
from cltk.morphology.universal_dependencies_features import Case, Gender, MorphosyntacticFeature, Number
# 'hercule' ('by Hercules, assuredly, indeed')
# http://www.perseus.tufts.edu/hopper/text?doc=Perseus:text:1999.04.0060:entry=hercule
gender = Gender.masculine
print("Gender:", gender)
case = Case.vocative
print("Case:", case)
number = Number.singular
print("Number", number)
type(case)
print("`MorphosyntacticFeature.name`:", case.name)
print("`MorphosyntacticFeature.value`", case.value)
# Example declaration
class Case(MorphosyntacticFeature):
The case of a noun phrase.
see https://universaldependencies.org/u/feat/Case.html
# structural cases
nominative = auto()
accusative = auto()
ergative = auto()
absolutive = auto()
# oblique cases
abessive = auto()
befefactive = auto()
causative = auto()
comparative = auto()
considerative = auto()
comitative = auto()
dative = auto()
distributive = auto()
equative = auto()
genitive = auto()
instrumental = auto()
partitive = auto()
vocative = auto()
# spatiotemporal cases
ablative = auto()
additive = auto()
adessive = auto()
allative = auto()
delative = auto()
elative = auto()
essive = auto()
illative = auto()
inessive = auto()
lative = auto()
locative = auto()
perlative = auto()
sublative = auto()
superessive = auto()
terminative = auto()
temporal = auto()
translative = auto()
# Users can learn a bit about these features, too
#help(case)
from cltk.morphology.universal_dependencies_features import POS
# so the Latin word 'hercule' would be
pos = POS.interjection
print(pos)
from cltk.morphology.morphosyntax import MorphosyntacticFeatureBundle
bundle = MorphosyntacticFeatureBundle(case, gender, number)
print(type(bundle))
print(bundle)
# How to access features within a bundle
print("Gender", bundle["Gender"])
print("Case", bundle["Case"])
print("Number", bundle["Number"])
Word(string="hercule", pos=pos, features=bundle)
from cltk.dependency.tree import Form
from cltk.dependency.tree import DependencyTree
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Example
Step2: estimate effect size standardized mean difference
Step3: Using one-step chi2, DerSimonian-Laird estimate for random effects variance tau
Step4: Using iterated, Paule-Mandel estimate for random effects variance tau
Step5: Example Kacker interlaboratory mean
Step7: Meta-analysis of proportions
Step8: changing data to have positive random effects variance
Step9: Replicate fixed effect analysis using GLM with var_weights
Step10: We need to fix scale=1 in order to replicate standard errors for the usual meta-analysis.
Step11: Using HKSJ variance adjustment in meta-analysis is equivalent to estimating the scale using pearson chi2, which is also the default for the gaussian family.
Step12: Mantel-Hanszel odds-ratio using contingency tables
Step13: compare pooled log-odds-ratio and standard error to R meta package
Step14: check conversion to stratified contingency table
Step15: Results from R meta package
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import pandas as pd
from scipy import stats, optimize
from statsmodels.regression.linear_model import WLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.stats.meta_analysis import (
effectsize_smd,
effectsize_2proportions,
combine_effects,
_fit_tau_iterative,
_fit_tau_mm,
_fit_tau_iter_mm,
)
# increase line length for pandas
pd.set_option("display.width", 100)
data = [
["Carroll", 94, 22, 60, 92, 20, 60],
["Grant", 98, 21, 65, 92, 22, 65],
["Peck", 98, 28, 40, 88, 26, 40],
["Donat", 94, 19, 200, 82, 17, 200],
["Stewart", 98, 21, 50, 88, 22, 45],
["Young", 96, 21, 85, 92, 22, 85],
]
colnames = ["study", "mean_t", "sd_t", "n_t", "mean_c", "sd_c", "n_c"]
rownames = [i[0] for i in data]
dframe1 = pd.DataFrame(data, columns=colnames)
rownames
mean2, sd2, nobs2, mean1, sd1, nobs1 = np.asarray(
dframe1[["mean_t", "sd_t", "n_t", "mean_c", "sd_c", "n_c"]]
).T
rownames = dframe1["study"]
rownames.tolist()
np.array(nobs1 + nobs2)
eff, var_eff = effectsize_smd(mean2, sd2, nobs2, mean1, sd1, nobs1)
res3 = combine_effects(eff, var_eff, method_re="chi2", use_t=True, row_names=rownames)
# TODO: we still need better information about conf_int of individual samples
# We don't have enough information in the model for individual confidence intervals
# if those are not based on normal distribution.
res3.conf_int_samples(nobs=np.array(nobs1 + nobs2))
print(res3.summary_frame())
res3.cache_ci
res3.method_re
fig = res3.plot_forest()
fig.set_figheight(6)
fig.set_figwidth(6)
res3 = combine_effects(eff, var_eff, method_re="chi2", use_t=False, row_names=rownames)
# TODO: we still need better information about conf_int of individual samples
# We don't have enough information in the model for individual confidence intervals
# if those are not based on normal distribution.
res3.conf_int_samples(nobs=np.array(nobs1 + nobs2))
print(res3.summary_frame())
res4 = combine_effects(
eff, var_eff, method_re="iterated", use_t=False, row_names=rownames
)
res4_df = res4.summary_frame()
print("method RE:", res4.method_re)
print(res4.summary_frame())
fig = res4.plot_forest()
eff = np.array([61.00, 61.40, 62.21, 62.30, 62.34, 62.60, 62.70, 62.84, 65.90])
var_eff = np.array(
[0.2025, 1.2100, 0.0900, 0.2025, 0.3844, 0.5625, 0.0676, 0.0225, 1.8225]
)
rownames = ["PTB", "NMi", "NIMC", "KRISS", "LGC", "NRC", "IRMM", "NIST", "LNE"]
res2_DL = combine_effects(eff, var_eff, method_re="dl", use_t=True, row_names=rownames)
print("method RE:", res2_DL.method_re)
print(res2_DL.summary_frame())
fig = res2_DL.plot_forest()
fig.set_figheight(6)
fig.set_figwidth(6)
res2_PM = combine_effects(eff, var_eff, method_re="pm", use_t=True, row_names=rownames)
print("method RE:", res2_PM.method_re)
print(res2_PM.summary_frame())
fig = res2_PM.plot_forest()
fig.set_figheight(6)
fig.set_figwidth(6)
import io
ss = \
study,nei,nci,e1i,c1i,e2i,c2i,e3i,c3i,e4i,c4i
1,19,22,16.0,20.0,11,12,4.0,8.0,4,3
2,34,35,22.0,22.0,18,12,15.0,8.0,15,6
3,72,68,44.0,40.0,21,15,10.0,3.0,3,0
4,22,20,19.0,12.0,14,5,5.0,4.0,2,3
5,70,32,62.0,27.0,42,13,26.0,6.0,15,5
6,183,94,130.0,65.0,80,33,47.0,14.0,30,11
7,26,50,24.0,30.0,13,18,5.0,10.0,3,9
8,61,55,51.0,44.0,37,30,19.0,19.0,11,15
9,36,25,30.0,17.0,23,12,13.0,4.0,10,4
10,45,35,43.0,35.0,19,14,8.0,4.0,6,0
11,246,208,169.0,139.0,106,76,67.0,42.0,51,35
12,386,141,279.0,97.0,170,46,97.0,21.0,73,8
13,59,32,56.0,30.0,34,17,21.0,9.0,20,7
14,45,15,42.0,10.0,18,3,9.0,1.0,9,1
15,14,18,14.0,18.0,13,14,12.0,13.0,9,12
16,26,19,21.0,15.0,12,10,6.0,4.0,5,1
17,74,75,,,42,40,,,23,30
df3 = pd.read_csv(io.StringIO(ss))
df_12y = df3[["e2i", "nei", "c2i", "nci"]]
# TODO: currently 1 is reference, switch labels
count1, nobs1, count2, nobs2 = df_12y.values.T
dta = df_12y.values.T
eff, var_eff = effectsize_2proportions(*dta, statistic="rd")
eff, var_eff
res5 = combine_effects(
eff, var_eff, method_re="iterated", use_t=False
) # , row_names=rownames)
res5_df = res5.summary_frame()
print("method RE:", res5.method_re)
print("RE variance tau2:", res5.tau2)
print(res5.summary_frame())
fig = res5.plot_forest()
fig.set_figheight(8)
fig.set_figwidth(6)
dta_c = dta.copy()
dta_c.T[0, 0] = 18
dta_c.T[1, 0] = 22
dta_c.T
eff, var_eff = effectsize_2proportions(*dta_c, statistic="rd")
res5 = combine_effects(
eff, var_eff, method_re="iterated", use_t=False
) # , row_names=rownames)
res5_df = res5.summary_frame()
print("method RE:", res5.method_re)
print(res5.summary_frame())
fig = res5.plot_forest()
fig.set_figheight(8)
fig.set_figwidth(6)
res5 = combine_effects(eff, var_eff, method_re="chi2", use_t=False)
res5_df = res5.summary_frame()
print("method RE:", res5.method_re)
print(res5.summary_frame())
fig = res5.plot_forest()
fig.set_figheight(8)
fig.set_figwidth(6)
from statsmodels.genmod.generalized_linear_model import GLM
eff, var_eff = effectsize_2proportions(*dta_c, statistic="or")
res = combine_effects(eff, var_eff, method_re="chi2", use_t=False)
res_frame = res.summary_frame()
print(res_frame.iloc[-4:])
weights = 1 / var_eff
mod_glm = GLM(eff, np.ones(len(eff)), var_weights=weights)
res_glm = mod_glm.fit(scale=1.0)
print(res_glm.summary().tables[1])
# check results
res_glm.scale, res_glm.conf_int() - res_frame.loc[
"fixed effect", ["ci_low", "ci_upp"]
].values
res_glm = mod_glm.fit(scale="x2")
print(res_glm.summary().tables[1])
# check results
res_glm.scale, res_glm.conf_int() - res_frame.loc[
"fixed effect", ["ci_low", "ci_upp"]
].values
t, nt, c, nc = dta_c
counts = np.column_stack([t, nt - t, c, nc - c])
ctables = counts.T.reshape(2, 2, -1)
ctables[:, :, 0]
counts[0]
dta_c.T[0]
import statsmodels.stats.api as smstats
st = smstats.StratifiedTable(ctables.astype(np.float64))
st.logodds_pooled, st.logodds_pooled - 0.4428186730553189 # R meta
st.logodds_pooled_se, st.logodds_pooled_se - 0.08928560091027186 # R meta
st.logodds_pooled_confint()
print(st.test_equal_odds())
print(st.test_null_odds())
ctables.sum(1)
nt, nc
print(st.summary())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing all the data
Step2: Visualizing some examples
Step3: The hyperparameters of the model are
Step4: Now, training the model using 10001 steps
Step5: Cheking the trained model with the test dataset
Step6: Seeing the model perform in 9 images from the valid dataset
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import tensorflow as tf
import inspect
import matplotlib.pyplot as plt
import numpy as np
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import get_data_4d, plot9images,randomize_in_place
from CNN import CNNModel, train_model,check_test,one_prediction
from DataHolder import DataHolder
from Config import Config
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d()
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
train_classes = np.argmax(train_labels, axis=1)
train_classes = [chr(i + ord('A')) for i in train_classes]
img_size = 28
img_shape = (img_size, img_size)
images = train_dataset[0:9]
cls_true = train_classes[0:9]
plot9images(images, cls_true, img_shape)
my_config = Config()
print("batch_size = {}".format(my_config.batch_size))
print("patch_size = {}".format(my_config.patch_size))
print("image_size = {}".format(my_config.image_size))
print("num_labels = {}".format(my_config.num_labels))
print("num_channels = {}".format(my_config.num_channels))
print("num_filters_1 = {}".format(my_config.num_filters_1))
print("num_filters_2 = {}".format(my_config.num_filters_2))
print("hidden_nodes_1 = {}".format(my_config.hidden_nodes_1))
print("hidden_nodes_2 = {}".format(my_config.hidden_nodes_2))
print("hidden_nodes_3 = {}".format(my_config.hidden_nodes_3))
print("learning_rate = {}".format(my_config.learning_rate))
print("steps_for_decay = {}".format(my_config.steps_for_decay))
print("decay_rate = {}".format(my_config.decay_rate))
my_dataholder = DataHolder(train_dataset,
train_labels,
valid_dataset,
valid_labels,
test_dataset,
test_labels)
my_model = CNNModel(my_config, my_dataholder)
train_model(my_model, my_dataholder, num_steps=10001, show_step=1000)
print("Test accuracy: %.2f%%" % (check_test(my_model) * 100))
randomize_in_place(valid_dataset, valid_labels, 0)
valid_classes = np.argmax(valid_labels, axis=1)
valid_classes = [chr(i + ord('A')) for i in valid_classes]
cls_true = valid_classes[0:9]
images = valid_dataset[0:9]
images = [image.reshape(1,
image.shape[0],
image.shape[1],
image.shape[2]) for image in images]
predictions = [chr(one_prediction(my_model, image) + ord('A')) for image in images]
plot9images(images, cls_true, img_shape)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
pip install kaggle
mkdir ~/.kaggle
chmod 600 ~/.kaggle/kaggle.json
cp kaggle.json ~/.kaggle/
kaggle datasets download -d 'dataset'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initializing DNA object with HDF5 file
Step2: Store/Save data to HDF5 file
Step3: Example to extract a parameter
Step4: Example to extract parameter as a function of time
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import matplotlib.pyplot as plt
import dnaMD
%matplotlib inline
try:
os.remove('cdna.h5')
except:
pass
# Initialization
dna = dnaMD.DNA(60, filename='cdna.h5') #Initialization for 60 base-pairs DNA bound with the protein
# Read Local base-pair parameters
dna.set_base_pair_parameters('tutorial_data/L-BP_cdna.dat', bp=[1, 60], bp_range=True)
# Read Local base-step parameters
dna.set_base_step_parameters('tutorial_data/L-BPS_cdna.dat', bp_step=[1, 59], parameters='All', step_range=True)
# Read Local helical base-step parameters
dna.set_base_step_parameters('tutorial_data/L-BPH_cdna.dat', bp_step=[1, 59], parameters='All', step_range=True, helical=True)
# Read Helical axis
dna.set_helical_axis('tutorial_data/HelAxis_cdna.dat')
# Generate global axis by interpolation (smoothening)
dna.generate_smooth_axis(smooth=500, spline=3, fill_point=6)
# Calculate curvature and tangent along global helical axis
dna.calculate_curvature_tangent(store_tangent=True)
# Major and minor grooves
parameters = [ 'minor groove', 'minor groove refined', 'major groove', 'major groove refined' ]
dna.set_major_minor_groove('tutorial_data/MGroove_cdna.dat', bp_step=[1, 59], parameters=parameters, step_range=True)
#Backbone dihedrals
dna.set_backbone_dihedrals('tutorial_data/BackBoneCHiDihedrals_cdna.dat', bp=[2, 59], parameters='All', bp_range=True)
# Extracting "Shear" of 22nd bp
shear_20bp = dna.data['bp']['22']['shear']
#Shear vs Time for 22nd bp
plt.title('22nd bp')
plt.plot(dna.time, shear_20bp)
plt.xlabel('Time (ps)')
plt.ylabel('Shear ($\AA$)')
plt.show()
# Rise vs Time for 25-40 bp segment
plt.title('Rise for 25-40 bp segment')
# Rise is the distance between two base-pairs, so for a given segment it is sum over the base-steps
time, value = dna.time_vs_parameter('rise', [25, 40], merge=True, merge_method='sum')
plt.plot(time, value, label='bound DNA', c='k')
plt.xlabel('Time (ps)')
plt.ylabel('Rise ( $\AA$)')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Ice Albedo
Step7: 1.4. Atmospheric Coupling Variables
Step8: 1.5. Oceanic Coupling Variables
Step9: 1.6. Prognostic Variables
Step10: 2. Key Properties --> Software Properties
Step11: 2.2. Code Version
Step12: 2.3. Code Languages
Step13: 3. Grid
Step14: 3.2. Adaptive Grid
Step15: 3.3. Base Resolution
Step16: 3.4. Resolution Limit
Step17: 3.5. Projection
Step18: 4. Glaciers
Step19: 4.2. Description
Step20: 4.3. Dynamic Areal Extent
Step21: 5. Ice
Step22: 5.2. Grounding Line Method
Step23: 5.3. Ice Sheet
Step24: 5.4. Ice Shelf
Step25: 6. Ice --> Mass Balance
Step26: 7. Ice --> Mass Balance --> Basal
Step27: 7.2. Ocean
Step28: 8. Ice --> Mass Balance --> Frontal
Step29: 8.2. Melting
Step30: 9. Ice --> Dynamics
Step31: 9.2. Approximation
Step32: 9.3. Adaptive Timestep
Step33: 9.4. Timestep
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cas', 'sandbox-2', 'landice')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Step 2
Step3: Step 3
Step4: Step 4
|
<ASSISTANT_TASK:>
Python Code:
from igraph import Graph
from igraph import summary
import pandas
import numpy
sif_data = pandas.read_csv("shared/pathway_commons.sif",
sep="\t", names=["species1","interaction_type","species2"])
interaction_types_ppi = set(["interacts-with",
"in-complex-with"])
interac_ppi = sif_data[sif_data.interaction_type.isin(interaction_types_ppi)].copy()
boolean_vec = interac_ppi['species1'] > interac_ppi['species2']
interac_ppi.loc[boolean_vec, ['species1', 'species2']] = interac_ppi.loc[boolean_vec, ['species2', 'species1']].values
interac_ppi_unique = interac_ppi[["species1","species2"]].drop_duplicates()
ppi_igraph = Graph.TupleList(interac_ppi_unique.values.tolist(), directed=False)
summary(ppi_igraph)
# call the `clusters` method on the `ppi_igraph` object, and assign the
# resulting `VertexClustering` object to have object name `ppi_components`
# call the `sizes` method on the `ppi_components` object, and assign the
# resulting list object to have the name `ppi_component_sizes`.
# make a `numpy.array` initialized by `ppi_component_sizes`, and find its
# maximum value using the `max` method on the `numpy.array` class
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise
Step2: Sample Model Code
Step3: Adding Strides
Step4: Make Model Larger
Step5: Add Dropout
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import YouTubeVideo
YouTubeVideo('fwNLf4t7MR8', width=800, height=450)
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout
# Prep the data:
img_rows, img_cols = 28, 28
num_classes = 10
def data_prep(raw):
out_y = keras.utils.to_categorical(raw.label, num_classes)
num_images = raw.shape[0]
x_as_array = raw.values[:,1:]
x_shaped_array = x_as_array.reshape(num_images, img_rows, img_cols, 1)
out_x = x_shaped_array / 255
return out_x, out_y
train_size = 30000
train_file = 'inputs/digit_recognizer/train.csv'
raw_data = pd.read_csv(train_file)
# Build the model:
x, y = data_prep(raw_data)
model = Sequential()
model.add(Conv2D(30, kernel_size=(3,3),
strides=2,
activation='relu',
input_shape=(img_rows, img_cols, 1)))
model.add(Dropout(0.5))
model.add(Conv2D(30, kernel_size=(3,3), strides=2, activation='relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile and fit the model:
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
model.fit(x, y,
batch_size=128,
epochs=2,
validation_split=0.2)
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.python import keras
img_rows, img_cols = 28, 28
num_classes = 10
def prep_data(raw, train_size, val_size):
y = raw[:, 0]
out_y = keras.utils.to_categorical(y, num_classes)
x = raw[:,1:]
num_images = raw.shape[0]
out_x = x.reshape(num_images, img_rows, img_cols, 1)
out_x = out_x / 255
return out_x, out_y
fashion_file = 'inputs/fashionmnist/train.csv'
fashion_data = np.loadtxt(fashion_file, skiprows=1, delimiter=',')
x, y = prep_data(fashion_data, train_size=50000, val_size=5000)
fashion_model = Sequential()
fashion_model.add(Conv2D(12, kernel_size=(3, 3), strides=2, activation='relu',
input_shape=(img_rows, img_cols, 1)))
fashion_model.add(Conv2D(12, (3, 3), strides=2, activation='relu'))
fashion_model.add(Flatten())
fashion_model.add(Dense(128, activation='relu'))
fashion_model.add(Dense(num_classes, activation='softmax'))
fashion_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
batch_size = 128
epochs = 3
fashion_model.fit(x, y,
batch_size=batch_size,
epochs=epochs,
validation_split = 0.2)
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, Flatten, Conv2D, Dropout
fashion_model_1 = Sequential()
fashion_model_1.add(Conv2D(12, kernel_size=(3, 3), strides=2,
activation='relu',
input_shape=(img_rows, img_cols, 1)))
fashion_model_1.add(Conv2D(12, (3, 3), strides=2, activation='relu'))
fashion_model_1.add(Flatten())
fashion_model_1.add(Dense(128, activation='relu'))
fashion_model_1.add(Dense(num_classes, activation='softmax'))
fashion_model_1.compile(loss=keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
fashion_model_1.fit(x, y,
batch_size=128,
epochs=2,
validation_split=0.2)
fashion_model_2 = Sequential()
fashion_model_2.add(Conv2D(24, kernel_size=(3, 3), strides=2,
activation='relu',
input_shape=(img_rows, img_cols, 1)))
fashion_model_2.add(Conv2D(24, (3, 3), strides=2, activation='relu'))
fashion_model_2.add(Conv2D(24, (3, 3), activation='relu'))
fashion_model_2.add(Flatten())
fashion_model_2.add(Dense(num_classes, activation='softmax'))
fashion_model_2.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
fashion_model_2.fit(x, y, batch_size=128, epochs=2, validation_split=0.2)
fashion_model_3 = Sequential()
fashion_model_3.add(Conv2D(24, kernel_size=(3, 3), strides=2, activation='relu', input_shape=(img_rows, img_cols, 1)))
fashion_model_3.add(Dropout(0.5))
fashion_model_3.add(Conv2D(24, (3, 3), strides=2, activation='relu'))
fashion_model_3.add(Dropout(0.5))
fashion_model_3.add(Conv2D(24, (3, 3), activation='relu'))
fashion_model_3.add(Dropout(0.5))
fashion_model_3.add(Flatten())
fashion_model_3.add(Dense(128, activation='relu'))
fashion_model_3.add(Dense(num_classes, activation='softmax'))
fashion_model_3.compile(loss=keras.losses.categorical_crossentropy, optimizer='adam', metrics=['accuracy'])
fashion_model.fit(x, y, batch_size=3, epochs=2, validation_split=0.2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Split data into training and testing.
Step3: Learning a multiple regression model
Step4: Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows
Step5: Making Predictions
Step6: Compute RSS
Step7: Test your function by computing the RSS on TEST data for the example model
Step8: Create some new features
Step9: Next create the following 4 new features as column in both TEST and TRAIN data
Step10: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.
Step11: Learning Multiple Models
Step12: Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients
Step13: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 1?
Step14: Which model (1, 2 or 3) has lowest RSS on TRAINING Data? Is this what you expected?
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl')
sales.head(2) ## view first 2 rows of the data set
train_data,test_data = sales.random_split(.8,seed=0)
example_features = ['sqft_living', 'bedrooms', 'bathrooms']
example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features,
validation_set = None)
example_weight_summary = example_model.get("coefficients")
print example_weight_summary
example_predictions = example_model.predict(train_data)
print example_predictions[0] # should be 271789.505878
import numpy as np
def get_residual_sum_of_squares(model, data, outcome):
# First get the predictions
predictions = model.predict(data) # this the y heat and real output is ___outcome___
# Then compute the residuals/errors
# error = model.evaluate(data)
# rmse = graphlab.evaluation.rmse(outcome, predictions) ## root mean square error
diff = np.subtract(outcome,predictions)
# square the residuals and add them up
RSS = np.vdot(diff,diff)
#RSS = rmse
# Then square and add them up
return(RSS)
rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])
print rss_example_train # should be 2.7376153833e+14
from math import log
train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)
test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)
# create the remaining 3 features in both TEST and TRAIN data
train_data['bed_bath_rooms'] = train_data['bedrooms'] * train_data['bathrooms']
test_data['bed_bath_rooms'] = test_data['bedrooms'] * test_data['bathrooms']
train_data['log_sqft_living'] = train_data['sqft_living'].apply(lambda x: log(x))
test_data['log_sqft_living'] = test_data['sqft_living'].apply(lambda x: log(x))
train_data['lat_plus_long'] = train_data['lat'] + train_data['long']
test_data['lat_plus_long'] = test_data['lat'] + test_data['long']
print sum(test_data['bedrooms_squared'])/len(test_data['bedrooms_squared'])
print sum(test_data['bed_bath_rooms'])/len(test_data['bed_bath_rooms'])
print sum(test_data['log_sqft_living'])/len(test_data['log_sqft_living'])
print sum(test_data['lat_plus_long'])/len(test_data['lat_plus_long'])
model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
model_2_features = model_1_features + ['bed_bath_rooms']
model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']
# Learn the three models: (don't forget to set validation_set = None)
model1 = graphlab.linear_regression.create(train_data, target = 'price', features = model_1_features,
validation_set = None)
model2 = graphlab.linear_regression.create(train_data, target = 'price', features = model_2_features,
validation_set = None)
model3 = graphlab.linear_regression.create(train_data, target = 'price', features = model_3_features,
validation_set = None)
# Examine/extract each model's coefficients:
model1_coefficients = model1.get("coefficients")
print model1_coefficients
model2_coefficients = model2.get("coefficients")
print model2_coefficients
model3_coefficients = model3.get("coefficients")
print model3_coefficients
# Compute the RSS on TESTING data for each of the three models and record the values:
rss_model1_test = get_residual_sum_of_squares(model1, test_data, test_data['price'])
print rss_model1_test
rss_model2_test = get_residual_sum_of_squares(model2, test_data, test_data['price'])
print rss_model2_test
rss_model3_test = get_residual_sum_of_squares(model3, test_data, test_data['price'])
print rss_model3_test
# Compute the RSS on TRAINING data for each of the three models and record the values:
rss_model1_train = get_residual_sum_of_squares(model1, train_data, train_data['price'])
print rss_model1_train
rss_model2_train = get_residual_sum_of_squares(model2, train_data, train_data['price'])
print rss_model2_train
rss_model3_train = get_residual_sum_of_squares(model3, train_data, train_data['price'])
print rss_model3_train
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Converting shapes
Step2: Input and output of an RNN layer
Step3: What do we see?
Step4: Multi Layer RNNs
Step5: Bidirectional RNNs
Step6: LSMTs / GRUs
Step7: Multivariate LSTM Models
Step8: Let's make this a little bit harder
Step9: Multi-Step LSTM Models
|
<ASSISTANT_TASK:>
Python Code:
# univariate data preparation
import numpy as np
# split a univariate sequence into samples
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the sequence
if end_ix > len(sequence)-1:
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# define input sequence
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# choose a number of time steps
n_steps = 3
# split into samples
X, y = split_sequence(raw_seq, n_steps)
# summarize the data
list(zip(X, y))
X
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
X
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dense, LSTM, GRU, SimpleRNN, Bidirectional
from tensorflow.keras.models import Sequential, Model
model = Sequential()
model.add(SimpleRNN(units=50, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"))
model.add(Dense(units=1, name="Linear_Output"))
model.compile(optimizer='adam', loss='mse')
%time history = model.fit(X, y, epochs=500, verbose=0)
import matplotlib.pyplot as plt
plt.plot(history.history['loss'])
# this does not look too bad
X_sample = np.array([[10, 20, 30], [70, 80, 90]])
X_sample = X_sample.reshape((X_sample.shape[0], X_sample.shape[1], n_features))
X_sample
y_pred = model.predict(X_sample)
y_pred
def predict(model, samples, n_features=1):
input = np.array(samples)
input = input.reshape((input.shape[0], input.shape[1], n_features))
y_pred = model.predict(input)
return y_pred
# do not look too close, though
predict(model, [[100, 110, 120], [200, 210, 220], [200, 300, 400]])
# https://keras.io/layers/recurrent/
# input: (samples, timesteps, input_dim)
# output: (samples, units)
# let's have a look at the actual output for an example
rnn_layer = model.get_layer("RNN_Input")
model_stub = Model(inputs = model.input, outputs = rnn_layer.output)
hidden = predict(model_stub, [[10, 20, 30]])
hidden
# https://arxiv.org/ftp/arxiv/papers/1701/1701.05923.pdf
# n = output dimension
# m = input dimension
# Total number of parameters for
# Simple RNN = n**2 + nm + n
# GRU = 3 × (n**2 + nm + n)
# LSTM = 4 × (n**2 + nm + n)
rnn_units = 1
model = Sequential()
model.add(SimpleRNN(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"))
# model.add(GRU(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"))
model.summary()
output_dimension = rnn_units
input_dimension = n_features
parameters = 1 * (output_dimension ** 2 + output_dimension * input_dimension + output_dimension)
parameters
# from only a single output for the final timestep
# ideal for feeding into something that *does not* handle timesteps
rnn_units = 1
model = Sequential([
SimpleRNN(units=rnn_units, activation='relu', input_shape=(n_steps, n_features))
])
predict(model, [[10, 20, 30]])
# to one output for each timestep
# ideal for feeding into something that *expects* timesteps
rnn_units = 1
model = Sequential([
SimpleRNN(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), return_sequences=True)
])
# https://keras.io/layers/recurrent/
# input: (samples, timesteps, input_dim)
# output with return_sequences: (samples, timesteps, units)
predict(model, [[10, 20, 30]])
rnn_units = 50
model = Sequential([
SimpleRNN(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), return_sequences=True, name="RNN_Input"),
SimpleRNN(units=rnn_units, activation='relu', name="RNN_Latent"),
Dense(units=1, name="Linear_Output")
])
model.compile(optimizer='adam', loss='mse')
model.summary()
%time history = model.fit(X, y, epochs=500, verbose=0)
plt.plot(history.history['loss'])
predict(model, [[10, 20, 30], [70, 80, 90], [100, 110, 120], [200, 210, 220], [200, 300, 400]])
rnn_units = 50
model = Sequential([
Bidirectional(SimpleRNN(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input")),
Dense(units=1, name="Linear_Output")
])
model.compile(optimizer='adam', loss='mse')
%time history = model.fit(X, y, epochs=500, verbose=0)
plt.plot(history.history['loss'])
predict(model, [[10, 20, 30], [70, 80, 90], [100, 110, 120], [200, 210, 220], [200, 300, 400]])
rnn_units = 50
model = Sequential([
LSTM(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"),
Dense(units=1, name="Linear_Output")
])
model.compile(optimizer='adam', loss='mse')
model.summary()
output_dimension = rnn_units
input_dimension = n_features
parameters = 4 * (output_dimension ** 2 + output_dimension * input_dimension + output_dimension)
parameters
%time history = model.fit(X, y, epochs=500, verbose=0)
plt.plot(history.history['loss'])
predict(model, [[10, 20, 30], [70, 80, 90], [100, 110, 120], [200, 210, 220], [200, 300, 400]])
rnn_units = 50
model = Sequential([
GRU(units=rnn_units, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"),
Dense(units=1, name="Linear_Output")
])
model.compile(optimizer='adam', loss='mse')
model.summary()
output_dimension = rnn_units
input_dimension = n_features
parameters = 3 * (output_dimension ** 2 + output_dimension * input_dimension + output_dimension)
parameters
%time history = model.fit(X, y, epochs=500, verbose=0)
plt.plot(history.history['loss'])
predict(model, [[10, 20, 30], [70, 80, 90], [100, 110, 120], [200, 210, 220], [200, 300, 400]])
in_seq1 = [10, 20, 30, 40, 50, 60, 70, 80, 90]
in_seq2 = [15, 25, 35, 45, 55, 65, 75, 85, 95]
out_seq = [in1 + in2 for in1, in2 in zip(in_seq1, in_seq2)]
out_seq
# convert to [rows, columns] structure
in_seq1 = np.array(in_seq1).reshape((len(in_seq1), 1))
in_seq2 = np.array(in_seq2).reshape((len(in_seq2), 1))
out_seq = np.array(out_seq).reshape((len(out_seq), 1))
out_seq
# horizontally stack columns
dataset = np.hstack((in_seq1, in_seq2, out_seq))
dataset
# split a multivariate sequence into samples
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# choose a number of time steps
n_steps = 3
# convert into input/output
X, y = split_sequences(dataset, n_steps)
# summarize the data
list(zip(X, y))
# the dataset knows the number of features, e.g. 2
n_features = X.shape[2]
# define model
model = Sequential()
model.add(GRU(units=50, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"))
model.add(Dense(units=1, name="Linear_Output"))
model.compile(optimizer='adam', loss='mse')
# fit model
%time history = model.fit(X, y, epochs=500, verbose=0)
import matplotlib.pyplot as plt
plt.yscale('log')
plt.plot(history.history['loss'])
def predict_multi(model, samples):
input = np.array(samples)
input = input.reshape(1, input.shape[0], input.shape[1])
y_pred = model.predict(input)
return y_pred
predict_multi(model, [[80, 85], [90, 95], [100, 105]])
predict_multi(model, [[10, 15], [20, 25], [30, 35]])
predict_multi(model, [[180, 185], [190, 195], [200, 205]])
y += 20
list(zip(X, y))
model = Sequential()
model.add(GRU(units=50, activation='relu', input_shape=(n_steps, n_features), name="RNN_Input"))
model.add(Dense(units=1, name="Linear_Output"))
model.compile(optimizer='adam', loss='mse')
# train a little bit longer, as this should be harder now
%time history = model.fit(X, y, epochs=2000, verbose=0)
import matplotlib.pyplot as plt
plt.yscale('log')
plt.plot(history.history['loss'])
predict_multi(model, [[80, 85], [90, 95], [100, 105]])
predict_multi(model, [[10, 15], [20, 25], [30, 35]])
predict_multi(model, [[180, 185], [190, 195], [200, 205]])
# split a univariate sequence into samples
def split_sequence(sequence, n_steps_in, n_steps_out):
X, y = list(), list()
for i in range(len(sequence)):
# find the end of this pattern
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out
# check if we are beyond the sequence
if out_end_ix > len(sequence):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix:out_end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# define input sequence
raw_seq = [10, 20, 30, 40, 50, 60, 70, 80, 90]
# choose a number of time steps
n_steps_in, n_steps_out = 3, 2
# split into samples
X, y = split_sequence(raw_seq, n_steps_in, n_steps_out)
# summarize the data
for input, output in zip(X, y):
print (input, output)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
# define model
model = Sequential()
model.add(GRU(100, activation='relu', input_shape=(n_steps_in, n_features)))
# model.add(GRU(100, activation='relu', return_sequences=True, input_shape=(n_steps_in, n_features)))
# model.add(GRU(100, activation='relu'))
model.add(Dense(n_steps_out))
model.compile(optimizer='adam', loss='mse')
# fit model
%time history = model.fit(X, y, epochs=500, verbose=0)
import matplotlib.pyplot as plt
plt.yscale('log')
plt.plot(history.history['loss'])
X_sample = np.array([70, 80, 90]).reshape((1, n_steps_in, n_features))
y_pred = model.predict(X_sample)
print(y_pred)
X_sample = np.array([10, 20, 30]).reshape((1, n_steps_in, n_features))
y_pred = model.predict(X_sample)
print(y_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Necessary packages and functions call
Step2: Data loading & Sample corruption
Step3: Data preprocessing
Step4: Run DVRL
Step5: Evaluations
Step6: 2. Removing high/low valued samples
Step7: 3. Corrupted sample discovery
|
<ASSISTANT_TASK:>
Python Code:
# Uses pip3 to install necessary package (lightgbm)
!pip3 install lightgbm
# Resets the IPython kernel to import the installed package.
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
import os
from git import Repo
# Current working directory
repo_dir = os.getcwd() + '/repo'
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
# Clones github repository
if not os.listdir(repo_dir):
git_url = "https://github.com/google-research/google-research.git"
Repo.clone_from(git_url, repo_dir)
import numpy as np
import tensorflow as tf
from sklearn import linear_model
import lightgbm
# Sets current directory
os.chdir(repo_dir)
from dvrl.data_loading import load_tabular_data, preprocess_data
from dvrl import dvrl
from dvrl.dvrl_metrics import discover_corrupted_sample, remove_high_low, learn_with_dvrl
# Data name: 'adult' in this notebook
data_name = 'adult'
# The number of training and validation samples
dict_no = dict()
dict_no['train'] = 1000
dict_no['valid'] = 400
# Label noise ratio
noise_rate = 0.2
# Loads data and corrupts labels
noise_idx = load_tabular_data(data_name, dict_no, noise_rate)
# noise_idx: ground truth noisy sample indices
print('Finished data loading.')
# Normalization methods: 'minmax' or 'standard'
normalization = 'minmax'
# Extracts features and labels. Then, normalizes features.
x_train, y_train, x_valid, y_valid, x_test, y_test, _ = \
preprocess_data(normalization, 'train.csv', 'valid.csv', 'test.csv')
print('Finished data preprocess.')
# Resets the graph
tf.reset_default_graph()
# Network parameters
parameters = dict()
parameters['hidden_dim'] = 100
parameters['comb_dim'] = 10
parameters['iterations'] = 2000
parameters['activation'] = tf.nn.relu
parameters['layer_number'] = 5
parameters['batch_size'] = 2000
parameters['learning_rate'] = 0.01
# Sets checkpoint file name
checkpoint_file_name = './tmp/model.ckpt'
# Defines predictive model
pred_model = linear_model.LogisticRegression(solver='lbfgs')
problem = 'classification'
# Flags for using stochastic gradient descent / pre-trained model
flags = {'sgd': False, 'pretrain': False}
# Initalizes DVRL
dvrl_class = dvrl.Dvrl(x_train, y_train, x_valid, y_valid,
problem, pred_model, parameters, checkpoint_file_name, flags)
# Trains DVRL
dvrl_class.train_dvrl('auc')
print('Finished dvrl training.')
# Estimates data values
dve_out = dvrl_class.data_valuator(x_train, y_train)
# Predicts with DVRL
y_test_hat = dvrl_class.dvrl_predictor(x_test)
print('Finished data valuation.')
# Defines evaluation model
eval_model = lightgbm.LGBMClassifier()
# Robust learning (DVRL-weighted learning)
robust_perf = learn_with_dvrl(dve_out, eval_model,
x_train, y_train, x_valid, y_valid, x_test, y_test, 'accuracy')
print('DVRL-weighted learning performance: ' + str(np.round(robust_perf, 4)))
# Evaluates performance after removing high/low valued samples
remove_high_low_performance = remove_high_low(dve_out, eval_model, x_train, y_train,
x_valid, y_valid, x_test, y_test, 'accuracy', plot = True)
# If noise_rate is positive value.
if noise_rate > 0:
# Evaluates true positive rates (TPR) of corrupted sample discovery and plot TPR
noise_discovery_performance = discover_corrupted_sample(dve_out, noise_idx, noise_rate, plot = True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Total reverts by namespace
Step2: Group by reverter and namespace
Step3: Botplots
Step4: Daily reverts subplotted by reverting bot, linear y-axis, shared y-axis
|
<ASSISTANT_TASK:>
Python Code:
# bot-vs-bot revert table: https://quarry.wmflabs.org/query/17237
!wget https://quarry.wmflabs.org/run/161084/output/0/tsv?download=true -O botvbot.tsv
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv("botvbot.tsv", sep="\t")
len(df)
print(df.columns)
df[0:5]
df['reverting_timestamp_dt'] = pd.to_datetime(df['reverting_timestamp'], format="%Y%m%d%H%M%S")
df = df.set_index('reverting_timestamp_dt')
df.page_namespace.value_counts()
df.reverting_user_text.value_counts()
len(df.reverting_user_text.value_counts())
topbots = df.reverting_user_text.value_counts().index.tolist()[0:24]
topbots
gp3 = df.groupby([pd.TimeGrouper('1D', closed='left'), 'reverting_user_text'])
user_namespace_g = gp3.rev_id.count()
user_namespace_g.head()
user_namespace_g.unstack().head()
user_namespace_unstack = user_namespace_g.unstack()[topbots]
fig, axes = plt.subplots(ncols=1,nrows=len(topbots), figsize=[14,28], sharex=True)
user_namespace_unstack[topbots].plot(subplots=True, kind='area', ax=axes)
fig.subplots_adjust(hspace=.25)
fig, axes = plt.subplots(ncols=1,nrows=len(topbots), figsize=[14,28], sharex=True, sharey=True)
user_namespace_unstack[topbots].plot(subplots=True, kind='area', ax=axes)
fig.subplots_adjust(hspace=.25)
user_namespace_unstack['Addbot'].plot(kind='area')
user_namespace_unstack['AvicBot'].plot(kind='area')
user_namespace_unstack['Xqbot'].plot(kind='area')
user_namespace_unstack['KLBot2'].plot(kind='area')
user_namespace_unstack['RussBot'].plot(kind='area')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are warnings, but that's okay - this happens a lot these days due to the whole ipython/jupyter renaming process. You can ignore them.
Step2: You should be able to execute all the standard SQL queries from the lesson here now. Note that you can also do this on the command line.
Step3: note if you get an error about MySQLdb not being installed here, enter this back in your bash shell
Step4: Now that we've created the database week3demo, we need to tell MySQL that we want to use it
Step5: But there's nothing in it
Step6: Creating a table
Step7: Inserting data
Step8: Selecting data
Step9: Accessing data from Python
Step10: You can even assign it to a Pandas dataframe
Step11: Cleaning up
Step12: And to get rid of a whole database, use DROP DATABASE
|
<ASSISTANT_TASK:>
Python Code:
%load_ext sql
%sql sqlite:///survey.db
%sql SELECT * FROM Person;
%sql mysql://mysqluser:mysqlpass@localhost/
%sql CREATE DATABASE week3demo;
%sql USE week3demo;
%sql SHOW TABLES;
%%sql
CREATE TABLE Person
(ident CHAR(10),
personal CHAR(25),
family CHAR(25));
%sql SHOW TABLES;
%sql DESCRIBE Person;
%%sql
INSERT INTO Person VALUES
("dyer", "William", "Dyer"),
("pb", "Frank", "Pabodie"),
("lake", "Anderson", "Lake"),
("roe", "Valentina", "Roerich"),
("danforth", "Frank", "Danforth")
;
%sql SELECT * FROM Person;
%sql SELECT * FROM Person WHERE personal = "Frank";
result = _
print result
df = result.DataFrame()
df
%sql DROP TABLE Person;
%sql SHOW TABLES;
%sql DROP DATABASE week3demo;
%sql SHOW DATABASES;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) Explore the dataset
Step2: What's the content of df ?
Step3: Describe each attribute (is it discrete? is it continuous? is it a number? is it text?)
Step4: Is Pclass a continuous or discrete class?
Step5: What about these
Step6: and what about these
Step7: Identify the target
Step8: Check if any values are missing
Step9: Mental notes so far
Step10: impute the missing values for Age using the median Age
Step11: check the influence of Age
Step12: Check the influence of Sex on Survival
Step13: Check the influence of Pclass on Survival
Step14: Ok, so, Age and Pclass seem to have some influence on survival rate.
Step15: Define simplest model as benchmark
Step16: We need to do better than that
Step17: Initialize a decision tree model
Step18: Split the features and the target into a Train and a Test subsets.
Step19: Train the model
Step20: Calculate the model score
Step21: Print the confusion matrix for the decision tree model
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv('titanic-train.csv')
df.head(3)
df.info()
df['Pclass'].value_counts()
df['SibSp'].value_counts()
df['Parch'].value_counts()
df[['Ticket', 'Fare', 'Cabin']].head(3)
df['Embarked'].value_counts()
df['Survived'].value_counts()
df.info()
df['Age'].plot(kind='hist', figsize=(10,6))
plt.title('Distribution of Age', size = '20')
plt.xlabel('Age', size = '20')
plt.ylabel('Number of passengers', size = '20')
median_age = df['Age'].median()
plt.axvline(median_age, color = 'r')
median_age
df['Age'].fillna(median_age, inplace = True)
df.info()
df[df['Survived']==1]['Age'].plot(kind='hist', bins = 10, range = (0,100), figsize=(10,6), alpha = 0.3, color = 'g')
df[df['Survived']==0]['Age'].plot(kind='hist', bins = 10, range = (0,100), figsize=(10,6), alpha = 0.3, color = 'r')
plt.title('Distribution of Age', size = '20')
plt.xlabel('Age', size = '20')
plt.ylabel('Number of passengers', size = '20')
plt.legend(['Survived', 'Dead'])
plt.show()
survival_by_gender = df[['Sex','Survived']].pivot_table(columns =
['Survived'], index = ['Sex'], aggfunc=len)
survival_by_gender
survival_by_gender.plot(kind = 'bar', stacked = True)
plt.show()
survival_by_Pclass = df[['Pclass','Survived']].pivot_table(columns =
['Survived'], index = ['Pclass'], aggfunc=len)
survival_by_Pclass
survival_by_Pclass.plot(kind = 'bar', stacked = True)
plt.show()
df['Male'] = df['Sex'].map({'male': 1, 'female': 0})
df[['Sex', 'Male']].head()
actual_dead = len(df[df['Survived'] == 0])
total_passengers = len(df)
ratio_of_dead = actual_dead / float(total_passengers)
print "If I predict everybody dies, I'm correct %0.1f %% of the time" % (100 * ratio_of_dead)
df['Survived'].value_counts()
X = df[['Male', 'Pclass', 'Age']]
y = df['Survived']
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=0)
model
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.2, random_state=0)
model.fit(X_train, y_train)
my_score = model.score(X_test, y_test)
print "Classification Score: %0.2f" % my_score
from sklearn.metrics import confusion_matrix
y_pred = model.predict(X_test)
print "\n=======confusion matrix=========="
print confusion_matrix(y_test, y_pred)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Part 0
Step4: In this lab we will be examining subsets of the tuples we create (e.g., the top rated movies by users). Whenever we examine only a subset of a large dataset, there is the potential that the result will depend on the order we perform operations, such as joins, or how the data is partitioned across the workers. What we want to guarantee is that we always see the same results for a subset, independent of how we manipulate or store the data.
Step6: Even though the two lists contain identical tuples, the difference in ordering sometimes yields a different ordering for the sorted RDD (try running the cell repeatedly and see if the results change or the assertion fails). If we only examined the first two elements of the RDD (e.g., using take(2)), then we would observe different answers - that is a really bad outcome as we want identical input data to always yield identical output. A better technique is to sort the RDD by both the key and value, which we can do by combining the key and value into a single string and then sorting on that string. Since the key is an integer and the value is a unicode string, we can use a function to combine them into a single unicode string (e.g., unicode('%.3f' % key) + ' ' + value) before sorting the RDD using sortBy().
Step7: If we just want to look at the first few elements of the RDD in sorted order, we can use the takeOrdered method with the sortFunction we defined.
Step9: Part 1
Step10: (1b) Movies with Highest Average Ratings
Step11: (1c) Movies with Highest Average Ratings and more than 500 reviews
Step12: Using a threshold on the number of reviews is one way to improve the recommendations, but there are many other good ways to improve quality. For example, you could weight ratings by the number of ratings.
Step14: After splitting the dataset, your training set has about 293,000 entries and the validation and test sets each have about 97,000 entries (the exact number of entries in each dataset varies slightly due to the random nature of the randomSplit() transformation.
Step15: (2c) Using ALS.train()
Step16: (2d) Testing Your Model
Step17: (2e) Comparing Your Model
Step18: You now have code to predict how users will rate movies!
Step19: The user ID 0 is unassigned, so we will use it for your ratings. We set the variable myUserID to 0 for you. Next, create a new RDD myRatingsRDD with your ratings for at least 10 movie ratings. Each entry should be formatted as (myUserID, movieID, rating) (i.e., each entry should be formatted in the same way as trainingRDD). As in the original dataset, ratings should be between 1 and 5 (inclusive). If you have not seen at least 10 of these movies, you can increase the parameter passed to take() in the above cell until there are 10 movies that you have seen (or you can also guess what your rating would be for movies you have not seen).
Step20: (3b) Add Your Movies to Training Dataset
Step21: (3c) Train a Model with Your Ratings
Step22: (3d) Check RMSE for the New Model with Your Ratings
Step23: (3e) Predict Your Ratings
Step24: (3f) Predict Your Ratings
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
from test_helper import Test
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab4', 'small')
ratingsFilename = os.path.join(baseDir, inputPath, 'ratings.dat.gz')
moviesFilename = os.path.join(baseDir, inputPath, 'movies.dat')
numPartitions = 2
rawRatings = sc.textFile(ratingsFilename).repartition(numPartitions)
rawMovies = sc.textFile(moviesFilename)
def get_ratings_tuple(entry):
Parse a line in the ratings dataset
Args:
entry (str): a line in the ratings dataset in the form of UserID::MovieID::Rating::Timestamp
Returns:
tuple: (UserID, MovieID, Rating)
items = entry.split('::')
return int(items[0]), int(items[1]), float(items[2])
def get_movie_tuple(entry):
Parse a line in the movies dataset
Args:
entry (str): a line in the movies dataset in the form of MovieID::Title::Genres
Returns:
tuple: (MovieID, Title)
items = entry.split('::')
return int(items[0]), items[1]
ratingsRDD = rawRatings.map(get_ratings_tuple).cache()
moviesRDD = rawMovies.map(get_movie_tuple).cache()
ratingsCount = ratingsRDD.count()
moviesCount = moviesRDD.count()
print 'There are %s ratings and %s movies in the datasets' % (ratingsCount, moviesCount)
print 'Ratings: %s' % ratingsRDD.take(3)
print 'Movies: %s' % moviesRDD.take(3)
assert ratingsCount == 487650
assert moviesCount == 3883
assert moviesRDD.filter(lambda (id, title): title == 'Toy Story (1995)').count() == 1
assert (ratingsRDD.takeOrdered(1, key=lambda (user, movie, rating): movie)
== [(1, 1, 5.0)])
tmp1 = [(1, u'alpha'), (2, u'alpha'), (2, u'beta'), (3, u'alpha'), (1, u'epsilon'), (1, u'delta')]
tmp2 = [(1, u'delta'), (2, u'alpha'), (2, u'beta'), (3, u'alpha'), (1, u'epsilon'), (1, u'alpha')]
oneRDD = sc.parallelize(tmp1)
twoRDD = sc.parallelize(tmp2)
oneSorted = oneRDD.sortByKey(True).collect()
twoSorted = twoRDD.sortByKey(True).collect()
print oneSorted
print twoSorted
assert set(oneSorted) == set(twoSorted) # Note that both lists have the same elements
assert twoSorted[0][0] < twoSorted.pop()[0] # Check that it is sorted by the keys
assert oneSorted[0:2] != twoSorted[0:2] # Note that the subset consisting of the first two elements does not match
def sortFunction(tuple):
Construct the sort string (does not perform actual sorting)
Args:
tuple: (rating, MovieName)
Returns:
sortString: the value to sort with, 'rating MovieName'
key = unicode('%.3f' % tuple[0])
value = tuple[1]
return (key + ' ' + value)
print oneRDD.sortBy(sortFunction, True).collect()
print twoRDD.sortBy(sortFunction, True).collect()
oneSorted1 = oneRDD.takeOrdered(oneRDD.count(),key=sortFunction)
twoSorted1 = twoRDD.takeOrdered(twoRDD.count(),key=sortFunction)
print 'one is %s' % oneSorted1
print 'two is %s' % twoSorted1
assert oneSorted1 == twoSorted1
# TODO: Replace <FILL IN> with appropriate code
# First, implement a helper function `getCountsAndAverages` using only Python
def getCountsAndAverages(IDandRatingsTuple):
Calculate average rating
Args:
IDandRatingsTuple: a single tuple of (MovieID, (Rating1, Rating2, Rating3, ...))
Returns:
tuple: a tuple of (MovieID, (number of ratings, averageRating))
pid = IDandRatingsTuple[0]
cnt = len(IDandRatingsTuple[1])
avg = float(sum(IDandRatingsTuple[1]))/cnt
return (pid, (cnt, avg))
# TEST Number of Ratings and Average Ratings for a Movie (1a)
Test.assertEquals(getCountsAndAverages((1, (1, 2, 3, 4))), (1, (4, 2.5)),
'incorrect getCountsAndAverages() with integer list')
Test.assertEquals(getCountsAndAverages((100, (10.0, 20.0, 30.0))), (100, (3, 20.0)),
'incorrect getCountsAndAverages() with float list')
Test.assertEquals(getCountsAndAverages((110, xrange(20))), (110, (20, 9.5)),
'incorrect getCountsAndAverages() with xrange')
# TODO: Replace <FILL IN> with appropriate code
# From ratingsRDD with tuples of (UserID, MovieID, Rating) create an RDD with tuples of
# the (MovieID, iterable of Ratings for that MovieID)
movieIDsWithRatingsRDD = (ratingsRDD
.map(lambda (u_id,m_id,rating) : (m_id,rating))
.groupByKey())
print 'movieIDsWithRatingsRDD: %s\n' % movieIDsWithRatingsRDD.take(3)
# Using `movieIDsWithRatingsRDD`, compute the number of ratings and average rating for each movie to
# yield tuples of the form (MovieID, (number of ratings, average rating))
movieIDsWithAvgRatingsRDD = movieIDsWithRatingsRDD.map(getCountsAndAverages)
print 'movieIDsWithAvgRatingsRDD: %s\n' % movieIDsWithAvgRatingsRDD.take(3)
# To `movieIDsWithAvgRatingsRDD`, apply RDD transformations that use `moviesRDD` to get the movie
# names for `movieIDsWithAvgRatingsRDD`, yielding tuples of the form
# (average rating, movie name, number of ratings)
movieNameWithAvgRatingsRDD = (moviesRDD
.join(movieIDsWithAvgRatingsRDD)
.map(lambda (id, (name, (num, avg))): (avg, name, num)))
print 'movieNameWithAvgRatingsRDD: %s\n' % movieNameWithAvgRatingsRDD.take(3)
# TEST Movies with Highest Average Ratings (1b)
Test.assertEquals(movieIDsWithRatingsRDD.count(), 3615,
'incorrect movieIDsWithRatingsRDD.count() (expected 3615)')
movieIDsWithRatingsTakeOrdered = movieIDsWithRatingsRDD.takeOrdered(3)
Test.assertTrue(movieIDsWithRatingsTakeOrdered[0][0] == 1 and
len(list(movieIDsWithRatingsTakeOrdered[0][1])) == 993,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[0] (expected 993)')
Test.assertTrue(movieIDsWithRatingsTakeOrdered[1][0] == 2 and
len(list(movieIDsWithRatingsTakeOrdered[1][1])) == 332,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[1] (expected 332)')
Test.assertTrue(movieIDsWithRatingsTakeOrdered[2][0] == 3 and
len(list(movieIDsWithRatingsTakeOrdered[2][1])) == 299,
'incorrect count of ratings for movieIDsWithRatingsTakeOrdered[2] (expected 299)')
Test.assertEquals(movieIDsWithAvgRatingsRDD.count(), 3615,
'incorrect movieIDsWithAvgRatingsRDD.count() (expected 3615)')
Test.assertEquals(movieIDsWithAvgRatingsRDD.takeOrdered(3),
[(1, (993, 4.145015105740181)), (2, (332, 3.174698795180723)),
(3, (299, 3.0468227424749164))],
'incorrect movieIDsWithAvgRatingsRDD.takeOrdered(3)')
Test.assertEquals(movieNameWithAvgRatingsRDD.count(), 3615,
'incorrect movieNameWithAvgRatingsRDD.count() (expected 3615)')
Test.assertEquals(movieNameWithAvgRatingsRDD.takeOrdered(3),
[(1.0, u'Autopsy (Macchie Solari) (1975)', 1), (1.0, u'Better Living (1998)', 1),
(1.0, u'Big Squeeze, The (1996)', 3)],
'incorrect movieNameWithAvgRatingsRDD.takeOrdered(3)')
# TODO: Replace <FILL IN> with appropriate code
# Apply an RDD transformation to `movieNameWithAvgRatingsRDD` to limit the results to movies with
# ratings from more than 500 people. We then use the `sortFunction()` helper function to sort by the
# average rating to get the movies in order of their rating (highest rating first)
movieLimitedAndSortedByRatingRDD = (movieNameWithAvgRatingsRDD
.filter(lambda (avg, name, num) : num > 500)
.sortBy(sortFunction, False))
print 'Movies with highest ratings: %s' % movieLimitedAndSortedByRatingRDD.take(20)
# TEST Movies with Highest Average Ratings and more than 500 Reviews (1c)
Test.assertEquals(movieLimitedAndSortedByRatingRDD.count(), 194,
'incorrect movieLimitedAndSortedByRatingRDD.count()')
Test.assertEquals(movieLimitedAndSortedByRatingRDD.take(20),
[(4.5349264705882355, u'Shawshank Redemption, The (1994)', 1088),
(4.515798462852263, u"Schindler's List (1993)", 1171),
(4.512893982808023, u'Godfather, The (1972)', 1047),
(4.510460251046025, u'Raiders of the Lost Ark (1981)', 1195),
(4.505415162454874, u'Usual Suspects, The (1995)', 831),
(4.457256461232604, u'Rear Window (1954)', 503),
(4.45468509984639, u'Dr. Strangelove or: How I Learned to Stop Worrying and Love the Bomb (1963)', 651),
(4.43953006219765, u'Star Wars: Episode IV - A New Hope (1977)', 1447),
(4.4, u'Sixth Sense, The (1999)', 1110), (4.394285714285714, u'North by Northwest (1959)', 700),
(4.379506641366224, u'Citizen Kane (1941)', 527), (4.375, u'Casablanca (1942)', 776),
(4.363975155279503, u'Godfather: Part II, The (1974)', 805),
(4.358816276202219, u"One Flew Over the Cuckoo's Nest (1975)", 811),
(4.358173076923077, u'Silence of the Lambs, The (1991)', 1248),
(4.335826477187734, u'Saving Private Ryan (1998)', 1337),
(4.326241134751773, u'Chinatown (1974)', 564),
(4.325383304940375, u'Life Is Beautiful (La Vita \ufffd bella) (1997)', 587),
(4.324110671936759, u'Monty Python and the Holy Grail (1974)', 759),
(4.3096, u'Matrix, The (1999)', 1250)], 'incorrect sortedByRatingRDD.take(20)')
trainingRDD, validationRDD, testRDD = ratingsRDD.randomSplit([6, 2, 2], seed=0L)
print 'Training: %s, validation: %s, test: %s\n' % (trainingRDD.count(),
validationRDD.count(),
testRDD.count())
print trainingRDD.take(3)
print validationRDD.take(3)
print testRDD.take(3)
assert trainingRDD.count() == 292716
assert validationRDD.count() == 96902
assert testRDD.count() == 98032
assert trainingRDD.filter(lambda t: t == (1, 914, 3.0)).count() == 1
assert trainingRDD.filter(lambda t: t == (1, 2355, 5.0)).count() == 1
assert trainingRDD.filter(lambda t: t == (1, 595, 5.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 1287, 5.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 594, 4.0)).count() == 1
assert validationRDD.filter(lambda t: t == (1, 1270, 5.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 1193, 5.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 2398, 4.0)).count() == 1
assert testRDD.filter(lambda t: t == (1, 1035, 5.0)).count() == 1
# TODO: Replace <FILL IN> with appropriate code
import math
def computeError(predictedRDD, actualRDD):
Compute the root mean squared error between predicted and actual
Args:
predictedRDD: predicted ratings for each movie and each user where each entry is in the form
(UserID, MovieID, Rating)
actualRDD: actual ratings where each entry is in the form (UserID, MovieID, Rating)
Returns:
RSME (float): computed RSME value
# Transform predictedRDD into the tuples of the form ((UserID, MovieID), Rating)
predictedReformattedRDD = predictedRDD.map(lambda (u_Id, m_Id, rating): ((u_Id, m_Id), rating))
# Transform actualRDD into the tuples of the form ((UserID, MovieID), Rating)
actualReformattedRDD = actualRDD.map(lambda (u_Id, m_Id, rating): ((u_Id, m_Id), rating))
# Compute the squared error for each matching entry (i.e., the same (User ID, Movie ID) in each
# RDD) in the reformatted RDDs using RDD transformtions - do not use collect()
squaredErrorsRDD = (predictedReformattedRDD
.join(actualReformattedRDD)
.map(lambda (k, (v1,v2)): (k, (v1-v2)**2)))
# Compute the total squared error - do not use collect()
totalError = squaredErrorsRDD.reduce(lambda (k1,v1), (k2,v2): (k1,v1+v2))
# Count the number of entries for which you computed the total squared error
numRatings = squaredErrorsRDD.count()
# Using the total squared error and the number of entries, compute the RSME
return math.sqrt(float(totalError[1])/numRatings)
# sc.parallelize turns a Python list into a Spark RDD.
testPredicted = sc.parallelize([
(1, 1, 5),
(1, 2, 3),
(1, 3, 4),
(2, 1, 3),
(2, 2, 2),
(2, 3, 4)])
testActual = sc.parallelize([
(1, 2, 3),
(1, 3, 5),
(2, 1, 5),
(2, 2, 1)])
testPredicted2 = sc.parallelize([
(2, 2, 5),
(1, 2, 5)])
testError = computeError(testPredicted, testActual)
print 'Error for test dataset (should be 1.22474487139): %s' % testError
testError2 = computeError(testPredicted2, testActual)
print 'Error for test dataset2 (should be 3.16227766017): %s' % testError2
testError3 = computeError(testActual, testActual)
print 'Error for testActual dataset (should be 0.0): %s' % testError3
# TEST Root Mean Square Error (2b)
Test.assertTrue(abs(testError - 1.22474487139) < 0.00000001,
'incorrect testError (expected 1.22474487139)')
Test.assertTrue(abs(testError2 - 3.16227766017) < 0.00000001,
'incorrect testError2 result (expected 3.16227766017)')
Test.assertTrue(abs(testError3 - 0.0) < 0.00000001,
'incorrect testActual result (expected 0.0)')
# TODO: Replace <FILL IN> with appropriate code
from pyspark.mllib.recommendation import ALS
validationForPredictRDD = validationRDD.map(lambda (userID, movieID, rating) : (userID, movieID))
seed = 5L
iterations = 5
regularizationParameter = 0.1
ranks = [4, 8, 12]
errors = [0, 0, 0]
err = 0
tolerance = 0.03
minError = float('inf')
bestRank = -1
bestIteration = -1
for rank in ranks:
model = ALS.train(trainingRDD, rank, seed=seed, iterations=iterations,
lambda_=regularizationParameter)
predictedRatingsRDD = model.predictAll(validationForPredictRDD)
error = computeError(predictedRatingsRDD, validationRDD)
errors[err] = error
err += 1
print 'For rank %s the RMSE is %s' % (rank, error)
if error < minError:
minError = error
bestRank = rank
print 'The best model was trained with rank %s' % bestRank
# TEST Using ALS.train (2c)
Test.assertEquals(trainingRDD.getNumPartitions(), 2,
'incorrect number of partitions for trainingRDD (expected 2)')
Test.assertEquals(validationForPredictRDD.count(), 96902,
'incorrect size for validationForPredictRDD (expected 96902)')
Test.assertEquals(validationForPredictRDD.filter(lambda t: t == (1, 1907)).count(), 1,
'incorrect content for validationForPredictRDD')
Test.assertTrue(abs(errors[0] - 0.883710109497) < tolerance, 'incorrect errors[0]')
Test.assertTrue(abs(errors[1] - 0.878486305621) < tolerance, 'incorrect errors[1]')
Test.assertTrue(abs(errors[2] - 0.876832795659) < tolerance, 'incorrect errors[2]')
# TODO: Replace <FILL IN> with appropriate code
myModel = ALS.train(trainingRDD, rank=bestRank, seed=seed, iterations=iterations, lambda_=regularizationParameter)
testForPredictingRDD = testRDD.map(lambda (userID, movieID, rating) : (userID, movieID))
predictedTestRDD = myModel.predictAll(testForPredictingRDD)
testRMSE = computeError(testRDD, predictedTestRDD)
print 'The model had a RMSE on the test set of %s' % testRMSE
# TEST Testing Your Model (2d)
Test.assertTrue(abs(testRMSE - 0.87809838344) < tolerance, 'incorrect testRMSE')
# TODO: Replace <FILL IN> with appropriate code
trainingAvgRating = trainingRDD.map(lambda (userID, movieID, rating): rating).reduce(lambda r1, r2: (r1+r2))/trainingRDD.count()
print 'The average rating for movies in the training set is %s' % trainingAvgRating
testForAvgRDD = testRDD.map(lambda (userID, movieID, rating): (userID, movieID, trainingAvgRating))
testAvgRMSE = computeError(testRDD, testForAvgRDD)
print 'The RMSE on the average set is %s' % testAvgRMSE
# TEST Comparing Your Model (2e)
Test.assertTrue(abs(trainingAvgRating - 3.57409571052) < 0.000001,
'incorrect trainingAvgRating (expected 3.57409571052)')
Test.assertTrue(abs(testAvgRMSE - 1.12036693569) < 0.000001,
'incorrect testAvgRMSE (expected 1.12036693569)')
print 'Most rated movies:'
print '(average rating, movie name, number of reviews)'
for ratingsTuple in movieLimitedAndSortedByRatingRDD.take(50):
print ratingsTuple
# TODO: Replace <FILL IN> with appropriate code
myUserID = 0
# Note that the movie IDs are the *last* number on each line. A common error was to use the number of ratings as the movie ID.
myRatedMovies = [
(0, 516, 5),
(0, 553, 5),
(0, 811, 4),
(0, 817, 2),
(0, 539, 3),
(0,848, 5),
(0, 1300, 3),
(0, 7895, 5),
(0, 551, 2),
(0, 750, 1)
# The format of each line is (myUserID, movie ID, your rating)
# For example, to give the movie "Star Wars: Episode IV - A New Hope (1977)" a five rating, you would add the following line:
# (myUserID, 260, 5),
]
myRatingsRDD = sc.parallelize(myRatedMovies)
print 'My movie ratings: %s' % myRatingsRDD.take(10)
# TODO: Replace <FILL IN> with appropriate code
trainingWithMyRatingsRDD = trainingRDD.union(myRatingsRDD)
print ('The training dataset now has %s more entries than the original training dataset' %
(trainingWithMyRatingsRDD.count() - trainingRDD.count()))
assert (trainingWithMyRatingsRDD.count() - trainingRDD.count()) == myRatingsRDD.count()
# TODO: Replace <FILL IN> with appropriate code
myRatingsModel = ALS.train(trainingWithMyRatingsRDD, bestRank, seed=seed, iterations=iterations, lambda_=regularizationParameter)
# TODO: Replace <FILL IN> with appropriate code
predictedTestMyRatingsRDD = myRatingsModel.predictAll(testForPredictingRDD)
testRMSEMyRatings = computeError(testRDD, predictedTestMyRatingsRDD)
print 'The model had a RMSE on the test set of %s' % testRMSEMyRatings
# TODO: Replace <FILL IN> with appropriate code
# Use the Python list myRatedMovies to transform the moviesRDD into an RDD with entries that are pairs of the form (myUserID, Movie ID) and that does not contain any movies that you have rated.
myUnratedMoviesRDD = (moviesRDD
.map(lambda (id, title): (0, id))
.filter(lambda (myUserID, movieID): movieID not in [movieID for (myID, movieID, rating) in myRatedMovies]))
# Use the input RDD, myUnratedMoviesRDD, with myRatingsModel.predictAll() to predict your ratings for the movies
predictedRatingsRDD = myRatingsModel.predictAll(myUnratedMoviesRDD)
# TODO: Replace <FILL IN> with appropriate code
# Transform movieIDsWithAvgRatingsRDD from part (1b), which has the form (MovieID, (number of ratings, average rating)), into and RDD of the form (MovieID, number of ratings)
movieCountsRDD = movieIDsWithAvgRatingsRDD.map(lambda (movieID, (num, avg)): (movieID, num))
# Transform predictedRatingsRDD into an RDD with entries that are pairs of the form (Movie ID, Predicted Rating)
predictedRDD = predictedRatingsRDD.map(lambda (myID, movieID, rating): (movieID, rating))
# Use RDD transformations with predictedRDD and movieCountsRDD to yield an RDD with tuples of the form (Movie ID, (Predicted Rating, number of ratings))
predictedWithCountsRDD = (predictedRDD
.join(movieCountsRDD))
# Use RDD transformations with PredictedWithCountsRDD and moviesRDD to yield an RDD with tuples of the form (Predicted Rating, Movie Name, number of ratings), for movies with more than 75 ratings
ratingsWithNamesRDD = (predictedWithCountsRDD
.join(moviesRDD)
.map(lambda (movieID,((PredictedRating, num), name)) : (PredictedRating, name, num))
.filter(lambda (rating, name, num): num>75))
predictedHighestRatedMovies = ratingsWithNamesRDD.takeOrdered(20, key=lambda x: -x[0])
print ('My highest rated movies as predicted (for movies with more than 75 reviews):\n%s' %
'\n'.join(map(str, predictedHighestRatedMovies)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step4: 2.5. Convolution<a id='math
Step5: Figure 2.5.1
Step7: Figure 2.5.2
Step9: Figure 2.5.3
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
import math
from IPython.display import HTML
HTML('../style/code_toggle.html')
import math
from matplotlib import rcParams
rcParams['text.usetex'] = True
#def trianglewave(x, T):
#
# This is a sawtooth, though
#
# return np.mod(x/T,1.)*np.logical_and(x>=0,x<=T)
def trianglewave(x, T):
T is the period.
return np.abs(2.*(np.mod(x/T,1.)-0.5))-0.5
def boxcar(x,a,b,amp):
return amp*np.logical_and(x>=a,x<=b)
def plottriboxconv(a, b, period):
# limits of boxcar Play arround with this
# a = -0.1
# b = 0.1
# Plotting range
xrange = [-2., 2.]
# Create functions
xpoints = 1000
# Resolution element
dx = (xrange[1]-xrange[0])/float(xpoints)
x = np.linspace(xrange[0], xrange[1], xpoints)
y = boxcar(x, a, b, 1.)
# boxcar will be normalised to 1. amp = 1./(b-a) works in the limit of many points, but here we do
# numberofpixelsinbox*dx*amplitude = y.sum *dx*amplitude = 1
# to take into account numerical effects
amp = float(xpoints)/((xrange[1]-xrange[0])* y.sum())
y = boxcar(x, a, b, 1./(b-a))
ycorr = boxcar(x, a, b, amp)
z = trianglewave(x, period)
result = np.convolve(ycorr,z,'same')
result = dx*result
# Start the plot, create a figure instance and a subplot
fig = plt.figure()
ax1 = fig.add_subplot(311)
fig.tight_layout()
plt.subplots_adjust(hspace = 0.6)
# Axis ranges
ax1.axis([xrange[0]+(b-a), xrange[1]-(b-a), z.min()-0.1*(z.max()-z.min()), z.max()+0.1*(z.max()-z.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,z,'b-')
plt.title("Triangle wave", fontsize=14,color='black')
ax2 = fig.add_subplot(312, sharex=ax1)
# Axis ranges
ax2.axis([xrange[0]+(b-a), xrange[1]-(b-a), ycorr.min()-0.1*(ycorr.max()-ycorr.min()), \
ycorr.max()+0.1*(ycorr.max()-ycorr.min())])
# Plot a grid
ax2.grid(True)
# Insert lines at x=0 and y=0
ax2.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax2.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
e1 = int(math.ceil(xpoints*(a-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[:e1],y[:e1],'b-')
ax2.plot([a, a],[0., amp],'b--')
e2 = int(math.floor(xpoints*(b-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[e1:e2],y[e1:e2],'b-')
e3 = xpoints
ax2.plot(x[e2:],y[e2:],'b-')
ax2.plot([b, b],[0., amp],'b--')
plt.title("Rectangle function", fontsize=14,color='black')
ax3 = fig.add_subplot(313, sharex=ax2)
# Axis ranges: mask out border effects
rmin = result.min()
rmax = result.max()
# Just to make the result a bit more beautiful if the function is very flat
if (rmax - rmin) < 0.1:
rmin=rmin-0.1
rmax=rmax+0.1
ax3.axis([xrange[0]+(b-a), xrange[1]-(b-a), rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin)])
# Plot a grid
ax3.grid(True)
# Insert lines at x=0 and y=0
ax3.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax3.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
plr1 = int(xpoints*(b-a)/(xrange[1]-xrange[0]))
plr2 = int(xpoints*(1-(b-a)/(xrange[1]-xrange[0])))
ax3.plot(x[plr1:plr2],result[plr1:plr2],'b-')
plt.title("Triangle wave filtered with rectangle function", fontsize=14,color='black')
# first two arguments give the position of the rectangle, third the period of the Triangle
plottriboxconv(-0.1, 0.1, 1.0)
# first two arguments give the position of the rectangle, third the period of the Triangle
plottriboxconv(-0.5, 0.5, 1.0)
from matplotlib import rcParams
rcParams['text.usetex'] = True
def noisycosinewave(x, amplitude, T, sigma):
T is the period, sigma is the dispersion, amplitude the amplitude
return amplitude*np.cos(2.*math.pi*x/T)+np.random.normal(scale=sigma, size=x.size)
def boxcar(x,a,b,amp):
return amp*np.logical_and(x>=a,x<=b)
def plotcosboxconv(a, b, period, sigma):
# limits of boxcar Play arround with this
# a = -0.1
# b = 0.1
# Plotting range
xrange = [-2., 2.]
# Create functions
xpoints = 1000
# Resolution element
dx = (xrange[1]-xrange[0])/float(xpoints)
x = np.linspace(xrange[0], xrange[1], xpoints)
y = boxcar(x, a, b, 1.)
# boxcar will be normalised to 1. amp = 1./(b-a) works in the limit of many points, but here we do
# numberofpixelsinbox*dx*amplitude = y.sum *dx*amplitude = 1
# to take into account numerical effects
amp = float(xpoints)/((xrange[1]-xrange[0])* y.sum())
y = boxcar(x, a, b, 1./(b-a))
ycorr = boxcar(x, a, b, amp)
z = noisycosinewave(x, 1., period, sigma)
c = np.cos(2.*math.pi*x/period)
result = np.convolve(ycorr,z,'same')
result = dx*result
# Start the plot, create a figure instance and a subplot
fig = plt.figure()
ax1 = fig.add_subplot(411)
fig.tight_layout()
plt.subplots_adjust(hspace = 0.8)
# Axis ranges
ax1.axis([xrange[0]+(b-a), xrange[1]-(b-a), c.min()-0.1*(c.max()-c.min()), c.max()+0.1*(c.max()-c.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,c,'b-')
plt.title("Original function (cos)", fontsize=14,color='black')
ax1 = fig.add_subplot(412)
# Axis ranges
ax1.axis([xrange[0]+(b-a), xrange[1]-(b-a), z.min()-0.1*(z.max()-z.min()), z.max()+0.1*(z.max()-z.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,z,'b-')
plt.title("Noise added", fontsize=14,color='black')
ax2 = fig.add_subplot(413, sharex=ax1)
# Axis ranges
ax2.axis([xrange[0]+(b-a), xrange[1]-(b-a), ycorr.min()-0.1*(ycorr.max()-ycorr.min()), \
ycorr.max()+0.1*(ycorr.max()-ycorr.min())])
# Plot a grid
ax2.grid(True)
# Insert lines at x=0 and y=0
ax2.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax2.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
e1 = int(math.ceil(xpoints*(a-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[:e1],y[:e1],'b-')
ax2.plot([a, a],[0., amp],'b--')
e2 = int(math.floor(xpoints*(b-xrange[0])/(xrange[1]-xrange[0])))
ax2.plot(x[e1:e2],y[e1:e2],'b-')
e3 = xpoints
ax2.plot(x[e2:],y[e2:],'b-')
ax2.plot([b, b],[0., amp],'b--')
plt.title("Rectangle function", fontsize=14,color='black')
ax3 = fig.add_subplot(414, sharex=ax2)
# Axis ranges: mask out border effects
rmin = result.min()
rmax = result.max()
# Just to make the result a bit more beautiful if the function is very flat
if (rmax - rmin) < 0.1:
rmin=rmin-0.1
rmax=rmax+0.1
ax3.axis([xrange[0]+(b-a), xrange[1]-(b-a), rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin)])
# Plot a grid
ax3.grid(True)
# Insert lines at x=0 and y=0
ax3.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax3.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
plr1 = int(xpoints*(b-a)/(xrange[1]-xrange[0]))
plr2 = int(xpoints*(1-(b-a)/(xrange[1]-xrange[0])))
ax3.plot(x[plr1:plr2],result[plr1:plr2],'b-')
plt.title("Noisy function filtered with rectangle function", fontsize=14,color='black')
# first two arguments give the position of the rectangle, third the period of the Triangle
plotcosboxconv(-0.1, 0.1, 1.0, 2.5)
from matplotlib import rcParams
rcParams['text.usetex'] = True
def gausshermetian(x, amp, mu, sigma, h3, h4):
T is the period, sigma is the dispersion, amplitude the amplitude
y = (x-mu)/sigma
return amp*np.exp(-0.5*y**2)*(1+h3*(2*np.sqrt(2.)*y**3-3*np.sqrt(2.)*y)/np.sqrt(6.)+h4*(4*y**4-12*y**2+3)/np.sqrt(24))
#amplitude*np.cos(2.*math.pi*x/T)+np.random.normal(scale=sigma, size=x.size)
def boxcar(x,a,b,amp):
return amp*np.logical_and(x>=a,x<=b)
def plotskewedgaussobs(pos1, pos2, boxwidth, sigma, h3, h4):
# limits of boxcar Play arround with this
# a = -0.1
# b = 0.1
# Plotting range
xrange = [-2., 2.]
# Create functions
xpoints = 1000
# Resolution element
dx = (xrange[1]-xrange[0])/float(xpoints)
x = np.linspace(xrange[0], xrange[1], xpoints)
y = boxcar(x, pos1-boxwidth/2., pos1+boxwidth/2, \
1./boxwidth)+0.5*boxcar(x, pos2-boxwidth/2., pos2+boxwidth/2, 1./boxwidth)
# boxcar will be normalised to 1. amp = 1./(b-a) works in the limit of many points, but here we do
# numberofpixelsinbox*dx*amplitude = y.sum *dx*amplitude = 1
# to take into account numerical effects
z = gausshermetian(x, 1., 0., sigma, h3, h4)
result = np.convolve(y,z,'same')
result = dx*result
# Start the plot, create a figure instance and a subplot
fig = plt.figure()
ax1 = fig.add_subplot(311)
fig.tight_layout()
plt.subplots_adjust(hspace = 0.7)
# Axis ranges
ax1.axis([xrange[0]+boxwidth, xrange[1]-boxwidth, y.min()-0.1*(y.max()-y.min()), y.max()+0.1*(y.max()-y.min())])
# Plot a grid
ax1.grid(True)
# Insert lines at x=0 and y=0
ax1.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax1.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax1.plot(x,y,'b-')
plt.title("Original function, impulse", fontsize=14,color='black')
ax2 = fig.add_subplot(312, sharex=ax1)
# Axis ranges
ax2.axis([xrange[0]+boxwidth, xrange[1]-boxwidth, z.min()-0.1*(z.max()-z.min()), z.max()+0.1*(z.max()-z.min())])
# Plot a grid
ax2.grid(True)
# Insert lines at x=0 and y=0
ax2.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax2.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
ax2.plot(x,z,'b-')
plt.title("Instrumental function", fontsize=14,color='black')
ax3 = fig.add_subplot(313, sharex=ax2)
# Axis ranges: mask out border effects
rmin = result.min()
rmax = result.max()
ax3.axis([xrange[0]+boxwidth, xrange[1]-boxwidth, rmin-0.1*(rmax-rmin), rmax+0.1*(rmax-rmin)])
# Plot a grid
ax3.grid(True)
# Insert lines at x=0 and y=0
ax3.axhline(0.,linewidth=1, color = 'k', linestyle='dashed')
ax3.axvline(0.,linewidth=1, color = 'k', linestyle='dashed')
# Plot function
plr1 = int(xpoints*boxwidth/(xrange[1]-xrange[0]))
plr2 = int(xpoints*(1-boxwidth/(xrange[1]-xrange[0])))
ax3.plot(x[plr1:plr2],result[plr1:plr2],'b-')
plt.title("Image: original function filtered with instrumental function", fontsize=14,color='black')
# first two arguments give the position of the rectangle, third the period of the Triangle
plotskewedgaussobs(0.0, 1.0, 0.01, 0.1, 0.2, 0.1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As a sanity check, let's count how many pictures we have in each training split (train/validation/test)
Step2: So we have indeed 2000 training images, and then 1000 validation images and 1000 test images. In each split, there is the same number of
Step3: Let's take a look at how the dimensions of the feature maps change with every successive layer
Step4: For our compilation step, we'll go with the RMSprop optimizer as usual. Since we ended our network with a single sigmoid unit, we will
Step5: Data preprocessing
Step6: Let's take a look at the output of one of these generators
Step7: Let's fit our model to the data using the generator. We do it using the fit_generator method, the equivalent of fit for data generators
Step8: It is good practice to always save your models after training
Step9: Let's plot the loss and accuracy of the model over the training and validation data during training
Step10: These plots are characteristic of overfitting. Our training accuracy increases linearly over time, until it reaches nearly 100%, while our
Step11: These are just a few of the options available (for more, see the Keras documentation). Let's quickly go over what we just wrote
Step12: If we train a new network using this data augmentation configuration, our network will never see twice the same input. However, the inputs
Step13: Let's train our network using data augmentation and dropout
Step14: Let's save our model -- we will be using it in the section on convnet visualization.
Step15: Let's plot our results again
|
<ASSISTANT_TASK:>
Python Code:
import os, shutil
# The path to the directory where the original
# dataset was uncompressed
original_dataset_dir = '/Users/fchollet/Downloads/kaggle_original_data'
# The directory where we will
# store our smaller dataset
base_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'
os.mkdir(base_dir)
# Directories for our training,
# validation and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
# Directory with our validation cat pictures
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
# Directory with our validation dog pictures
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
# Copy first 1000 cat images to train_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to validation_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 cat images to test_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# Copy first 1000 dog images to train_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to validation_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
# Copy next 500 dog images to test_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
print('total training cat images:', len(os.listdir(train_cats_dir)))
print('total training dog images:', len(os.listdir(train_dogs_dir)))
print('total validation cat images:', len(os.listdir(validation_cats_dir)))
print('total validation dog images:', len(os.listdir(validation_dogs_dir)))
print('total test cat images:', len(os.listdir(test_cats_dir)))
print('total test dog images:', len(os.listdir(test_dogs_dir)))
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
from keras import optimizers
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
from keras.preprocessing.image import ImageDataGenerator
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
for data_batch, labels_batch in train_generator:
print('data batch shape:', data_batch.shape)
print('labels batch shape:', labels_batch.shape)
break
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50)
model.save('cats_and_dogs_small_1.h5')
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# This is module with image preprocessing utilities
from keras.preprocessing import image
fnames = [os.path.join(train_cats_dir, fname) for fname in os.listdir(train_cats_dir)]
# We pick one image to "augment"
img_path = fnames[3]
# Read the image and resize it
img = image.load_img(img_path, target_size=(150, 150))
# Convert it to a Numpy array with shape (150, 150, 3)
x = image.img_to_array(img)
# Reshape it to (1, 150, 150, 3)
x = x.reshape((1,) + x.shape)
# The .flow() command below generates batches of randomly transformed images.
# It will loop indefinitely, so we need to `break` the loop at some point!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(image.array_to_img(batch[0]))
i += 1
if i % 4 == 0:
break
plt.show()
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-4),
metrics=['acc'])
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
# Note that the validation data should not be augmented!
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
# This is the target directory
train_dir,
# All images will be resized to 150x150
target_size=(150, 150),
batch_size=32,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=32,
class_mode='binary')
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=100,
validation_data=validation_generator,
validation_steps=50)
model.save('cats_and_dogs_small_2.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Derivation of the inversion stencil using a non-symmetric forward-backward scheme
Step3: We are here discretizing the equation
Step4: Calculate the finite difference approximation of $\frac{1}{J}\partial_x \left(JAg^{xx}\partial_x f\right)$
Step5: We now back substitute $g\to \partial_x f$
Step6: Calculating the second term
Step7: Calculate the finite difference approximation of $\frac{1}{J}\partial_z \left(JAg^{zz}\partial_z f\right)$
Step8: Calculating the third term
Step9: Collecting terms
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display
from sympy import init_printing
from sympy import symbols, expand, together, as_finite_diff, collect
from sympy import Function, Eq, Subs
from collections import deque
init_printing()
def finiteDifferenceOfOneTerm(factors, wrt, stencil):
Finds the finite different approximation of a term consisting of several factors
Input:
factors - An iterable containing the factors of the term
wrt - Take the derivative of the term with respect to this variable
stencil - An iterable containing the points to be used in the stencil
Output
term - The finite difference approximation of the term
# Take the derivative
factorsDiff = []
for factor in factors:
factorsDiff.append(as_finite_diff(factor.diff(wrt), stencil))
# Putting together terms
term = 0
# Make object for cyclic permutation
cyclPerm = deque(range(len(factors)))
for perm in range(len(cyclPerm)):
# Initialize a dummy term to store temporary variables in
curTerm = factorsDiff[cyclPerm[0]]
for permNr in range(1,len(factors)):
curTerm *= factors[cyclPerm[permNr]]
# Make a cyclic premutation
cyclPerm.rotate(1)
term += curTerm
return term
def fromFunctionToGrid(expr, sym):
Change from @(x,z) to @_xz, where @ represents a function
Input:
expr - The expression to change
sym - symbols('@_xz, @_xp1z, @_xm1z, @_xzp1, @_xzm1')
xp1 = x+hx
zm1 = z-hz
etc.
curFun = str(syms[0]).split('_')[0]
for sym in syms:
curSuffix = str(sym).split('_')[1]
if curSuffix == 'xz':
expr = expr.subs(Function(curFun)(x,z), sym)
elif curSuffix == 'xp1z':
expr = expr.subs(Subs(Function(curFun)(x,z), x, x+hx).doit(), sym)
elif curSuffix == 'xm1z':
expr = expr.subs(Subs(Function(curFun)(x,z), x, x-hx).doit(), sym)
elif curSuffix == 'xzp1':
expr = expr.subs(Subs(Function(curFun)(x,z), z, z+hz).doit(), sym)
elif curSuffix == 'xzm1':
expr = expr.subs(Subs(Function(curFun)(x,z), z, z-hz).doit(), sym)
return expr
x, z, hx, hz = symbols('x, z, h_x, h_z')
hx, hz = symbols('h_x, h_z', positive=True)
f = Function('f')(x, z)
A = Function('A')(x, z)
B = Function('B')(x, z)
gxx = Function('g^x^x')(x, z)
gzz = Function('g^z^z')(x, z)
J = Function('J')(x, z)
# Dummy function
g = Function('g')(x,z)
# Stencils
backwardX = [x-hx, x]
forwardX = [x, x+hx]
backwardZ = [z-hz, z]
forwardZ = [z, z+hz]
fx = f.diff(x)
fxB = as_finite_diff(fx, backwardX)
display(Eq(symbols('f_x'), fx))
display(Eq(symbols('f_x^B'), together(fxB)))
# Define the factors
factors = [J, A, gxx, g]
term1 = finiteDifferenceOfOneTerm(factors, x, forwardX)
term1 /= J
display(Eq(symbols('T_1^F'), term1))
term1 = term1.subs(Subs(g,x,x+hx).doit(), Subs(fxB,x,x+hx).doit())
term1 = term1.subs(g, fxB)
display(Eq(symbols('T_1^F'), term1))
fz = f.diff(z)
fzB = as_finite_diff(fz, backwardZ)
display(Eq(symbols('f_z'), fz))
display(Eq(symbols('f_z^B'), together(fzB)))
# Define the factors
factors = [J, A, gzz, g]
term2 = finiteDifferenceOfOneTerm(factors, z, forwardZ)
term2 /= J
display(Eq(symbols('T_2^F'), term2))
term2 = term2.subs(Subs(g,z,z+hz).doit(), Subs(fzB,z,z+hz).doit())
term2 = term2.subs(g, fzB)
display(Eq(symbols('T_2'), term2))
term3 = B*f
display(Eq(symbols('T_3^F'), term3))
b = term1 + term2 + term3
display(Eq(symbols('b'), b))
# Converting to grid syntax
functions = ['f', 'A', 'J', 'g^x^x', 'g^z^z', 'B']
for func in functions:
curStr = '{0}_xz, {0}_xp1z, {0}_xm1z, {0}_xzp1, {0}_xzm1'.format(func)
syms = symbols(curStr)
b = fromFunctionToGrid(b, syms)
# We must expand before we collect
b = collect(expand(b), symbols('f_xz, f_xp1z, f_xm1z, f_xzp1, f_xzm1'), exact=True)
display(Eq(symbols('b'),b))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. What is spaCy
Step2: In order to use a specific model you need to download it first. If working locally, you will need to download the model just once; however, in the cloud your environment resets and you will need to download the model on each session.
Step3: 2.2. Obtaining Model Info
Step4: 3. Spacy Data Structures and Processing Pipelines
Step5: Note how in the example we could easily access all lemmas and entities found by iterating over the document (variable doc) itself or over its entitities (doc.ents)
Step6: Exercise 3
Step7: 3.3. Usual Pipelines Components and Annotations
Step8: Unlike other spaCy components, the Tokenizer is not a statistical model. A finite set of rules and exceptions are encoded. If you wish to modify its behavior, you cannot retrain the component using labeled data. Instead, you would need to extend the list of rules and exceptions.
Step9: 4.1.2. Sentence Tokenization
Step10: However, be aware that sentences are not identified by the Tokenizer element we have just described, but sentence tokenization is carried out instead as a subproduct of the dependency extraction component, that we will shortly review.
Step11: Example
Step12: 4.2. Part of Speech Tagging
Step13: Exercise 5
Step14: 4.3. Dependency Parser
Step15: 4.4. Named Entity Recognition
Step16: Discussion
Step17: 4.5. Lemmatization
Step18: 4.6. Other Annotations
|
<ASSISTANT_TASK:>
Python Code:
# Common imports
import numpy as np
import pandas as pd
import zipfile as zp
from termcolor import colored
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
#To wrap long text lines
from IPython.display import HTML, display
def set_css():
display(HTML('''
<style>
pre {
white-space: pre-wrap;
}
</style>
'''))
get_ipython().events.register('pre_run_cell', set_css)
#For fancy table Display
%load_ext google.colab.data_table
!pip install --upgrade spacy
import spacy
!python -m spacy download en_core_web_md
spacy.explain('HYPH')
spacy.info('en_core_web_md')
text = 'Modern condensed matter physics research has produced novel materials with fundamental properties that underpin a remarkable number of cutting-edge technologies. It is now generally accepted that novel materials are necessary for critical advances in technologies and whoever discovers novel materials generally controls the science and technology of the future. Transition metal oxides have attracted enormous interest within both the basic and applied science communities. However, for many decades, the overwhelming balance of effort was focused on the 3d-elements (such as iron, copper, etc.) and their compounds; the heavier 4d- and 5d-elements (such as ruthenium, iridium, etc., which constitute two thirds of the d-elements listed in the Periodic Table) and their compounds have been largely ignored until recently. The principal investigator seeks to discover novel materials containing 4d- and/or 5d-elements and understand how they offer wide-ranging opportunities for the discovery of new physics and, ultimately, new device paradigms. This project also provides rigorous training to all students involved, focusing on synthesis and characterization techniques covering a broad spectrum of materials and experimental probes available in the principal investigator\'s laboratory. Technical Abstract: Physics driven by spin-orbit interactions is among the most important topics in contemporary condensed matter physics. Since the spin-orbit interaction is comparable to the on-site Coulomb and other relevant interactions, it creates a unique balance between competing interactions that drive complex behaviors and exotic states not observed in other materials. The project encompasses a systematic effort to elucidate physics of novel phenomena in spin-orbit-coupled and correlated materials and a rigorous search for new materials having exotic ground states. This project focuses on the following areas: (1) Novel phenomena at high pressures and high magnetic fields, (2) Unusual correlations between the insulating gap and magnetic transition in iridates and ruthenates, (3) Exotic metallic and superconducting states in iridates, (4) Mott insulators with "intermediate-strength" spin-orbit interaction and other competing energies, and (5) Single-crystal synthesis and search for novel materials. The principal investigator is one of a few key pioneers who have initiated seminal studies on iridates and, before that, ruthenates, and has comprehensive facilities and proven expertise for single-crystal synthesis and wide-ranging studies of structural, transport, magnetic, thermal and dielectric properties as functions of temperature, magnetic field, pressure and doping.'
print(text)
nlp = spacy.load('en_core_web_md')
doc = nlp(text)
print(colored('============= Original Text =============', 'blue'))
print(doc)
print(colored('\n============= Lemmatized Text =============', 'red'))
print(' '.join([tk.lemma_ for tk in doc]))
print(colored('\n============= Entities Found =============', 'green'))
print('\n'.join([ent.text for ent in doc.ents]))
doc[3:10].vector
#<SOL>
vec1 = nlp('structural, transport, magnetic, thermal and dielectric properties').vector
vec2 = nlp('temperature, magnetic field, pressure and doping').vector
vec3 = nlp('This project also provides rigorous training to all students involved').vector
print(np.linalg.norm(vec1-vec2))
print(np.linalg.norm(vec1-vec3))
print(np.linalg.norm(vec2-vec3))
#</SOL>
#<SOL>
vocab = []
for wd in nlp.vocab:
vocab.append([wd.text, wd.orth, wd.is_stop])
vocab = pd.DataFrame(vocab, columns=['Term', 'Hash', 'Stopword'])
vocab
#</SOL>
#<SOL>
stopw = vocab[vocab['Stopword']==True]
stopw
#</SOL>
#<SOL>
print(len(nlp.vocab))
doc_aux = nlp('This is just a catalogue of some weird nomenclatures to increase the vocabulary size: USA, OMG, teeth, tooth. We can also include words which are in a different language or just typos: mecachis, guau, hause')
print(len(nlp.vocab))
#</SOL>
shortext = 'Natural Language Processing is a key component of many relevant Artificial Intelligence Applications.' \
' Libraries such as spaCy v3 make it simple to benefit from statistical NLP models based on neural networks.' \
' It is estimated that NLP market in the U.S. will grow to around 30000 MUSD during the next five years.' \
' I don\'t know how accurate this is, but a solid growth is guaranteed'
shortdoc = nlp(shortext)
print(colored('============= The original text information is still kept in the Doc object =============', 'blue'))
print(shortdoc)
print(colored('\n============= Identified Tokens =============', 'red'))
for token in shortdoc:
print(token.text, end='\t\t')
#print('\t\t'.join([token.text for token in shortdoc]))
# Add special case rule
from spacy.symbols import ORTH
special_case = [{ORTH: "M"}, {ORTH: "USD"}]
nlp.tokenizer.add_special_case("MUSD", special_case)
shortdoc = nlp(shortext)
print(colored('============= The original text information is still kept in the Doc object =============', 'blue'))
print(shortdoc)
print(colored('\n============= Identified Tokens =============', 'red'))
for token in shortdoc:
print(token.text, end='\t\t')
#print('\t\t'.join([token.text for token in shortdoc]))
for sentence in shortdoc.sents:
print(sentence.text)
!python -m spacy download xx_sent_ud_sm
!pip install --upgrade spacy_langdetect
multilingualtext = 'Natural Language Processing is a key component of many relevant Artificial Intelligence Applications.' \
' El Procesamiento de Lenguaje Natural es un componente de gran importancia en multitud de aplicaciones de la Inteligencia Artificial.' \
' Libraries such as spaCy v3 make it simple to benefit from statistical NLP models based on neural networks.' \
' SpaCy v3 y otras librerías similares hacen posible emplear métodos de NLP basados en redes neuronales de manera sencilla.' \
' It is estimated that NLP market in the U.S. will grow to around 30000 MUSD during the next five years.' \
' Se estima que el mercado del NLP en USA será de alrededor de 30.000 millones de dolares en cinco años.'
#<SOL>
english_text =
spanish_text =
#</SOL>
from spacy.language import Language
from spacy_langdetect import LanguageDetector
# Add LanguageDetector and assign it a string name
@Language.factory("language_detector")
def create_language_detector(nlp, name):
return LanguageDetector(language_detection_function=None)
mult_nlp = spacy.load('xx_sent_ud_sm')
mult_nlp.add_pipe('language_detector', last=True)
mult_doc = mult_nlp(multilingualtext)
# document level language detection. Think of it like average language of the document!
print(colored('============= Document level language detection =============', 'blue'))
print(mult_doc._.language)
# sentence level language detection
print(colored('\n============= Sentence level language detection =============', 'red'))
for sent in mult_doc.sents:
print(sent, sent._.language)
# English and Spanish Texts
print(colored('\n============= English sentences =============', 'green'))
english_text = ' '.join([sent.text for sent in mult_doc.sents if sent._.language['language']=='en'])
print(english_text)
print(colored('\n============= Spanish sentences =============', 'green'))
spanish_text = ' '.join([sent.text for sent in mult_doc.sents if sent._.language['language']=='es'])
print(spanish_text)
text = 'Modern condensed matter physics research has produced novel materials with fundamental properties that underpin a remarkable number of cutting-edge technologies. It is now generally accepted that novel materials are necessary for critical advances in technologies and whoever discovers novel materials generally controls the science and technology of the future. Transition metal oxides have attracted enormous interest within both the basic and applied science communities. However, for many decades, the overwhelming balance of effort was focused on the 3d-elements (such as iron, copper, etc.) and their compounds; the heavier 4d- and 5d-elements (such as ruthenium, iridium, etc., which constitute two thirds of the d-elements listed in the Periodic Table) and their compounds have been largely ignored until recently. The principal investigator seeks to discover novel materials containing 4d- and/or 5d-elements and understand how they offer wide-ranging opportunities for the discovery of new physics and, ultimately, new device paradigms. This project also provides rigorous training to all students involved, focusing on synthesis and characterization techniques covering a broad spectrum of materials and experimental probes available in the principal investigator\'s laboratory. Technical Abstract: Physics driven by spin-orbit interactions is among the most important topics in contemporary condensed matter physics. Since the spin-orbit interaction is comparable to the on-site Coulomb and other relevant interactions, it creates a unique balance between competing interactions that drive complex behaviors and exotic states not observed in other materials. The project encompasses a systematic effort to elucidate physics of novel phenomena in spin-orbit-coupled and correlated materials and a rigorous search for new materials having exotic ground states. This project focuses on the following areas: (1) Novel phenomena at high pressures and high magnetic fields, (2) Unusual correlations between the insulating gap and magnetic transition in iridates and ruthenates, (3) Exotic metallic and superconducting states in iridates, (4) Mott insulators with "intermediate-strength" spin-orbit interaction and other competing energies, and (5) Single-crystal synthesis and search for novel materials. The principal investigator is one of a few key pioneers who have initiated seminal studies on iridates and, before that, ruthenates, and has comprehensive facilities and proven expertise for single-crystal synthesis and wide-ranging studies of structural, transport, magnetic, thermal and dielectric properties as functions of temperature, magnetic field, pressure and doping.'
nlp = spacy.load('en_core_web_md')
doc = nlp(text)
df = pd.DataFrame([[token.text, token.pos_, token.tag_] for token in doc],
columns = ['Token', 'POS', 'TAG'])
df
# Descriptions for POS values
#<SOL>
all_pos = set(df.POS.values.tolist())
pos_df = pd.DataFrame([[el, spacy.explain(el)] for el in all_pos], columns=['POS', 'Description'])
pos_df
#</SOL>
# Descriptions for TAGS values
#<SOL>
all_tag = set(df.TAG.values.tolist())
pos_df = pd.DataFrame([[el, spacy.explain(el)] for el in all_tag], columns=['TAG', 'Description'])
pos_df
#</SOL>
nlp.disable_pipe("parser")
#If you wish to completely remove the component from the pipeline, you can use the following command
#nlp.remove_pipe("parser")
doc = nlp(english_text)
df_ents = pd.DataFrame([[ent.text, ent.label_, spacy.explain(ent.label_)] for ent in doc.ents], columns=['Entity', 'Type', 'Description'])
df_ents
from spacy import displacy
wiki_text = 'Albert Einstein (14 March 1879 – 18 April 1955) was a German-born theoretical physicist, widely acknowledged to be one of the greatest physicists of all time.' \
' Einstein is known widely for developing the theory of relativity, but he also made important contributions to the development of the theory of quantum mechanics.' \
' He received the 1921 Nobel Prize in Physics "for his services to theoretical physics, and especially for his discovery of the law of the photoelectric effect".' \
' Einstein was born in the German Empire, but moved to Switzerland in 1895, forsaking his German citizenship the following year.' \
' Einstein was awarded a PhD by the University of Zürich.' \
' On the eve of World War II, he endorsed a letter to President Franklin D. Roosevelt.'
wiki_doc = nlp(wiki_text)
displacy.render(wiki_doc, style="ent", jupyter=True, options={'distance': 90})
entypes = set([ent.label_ for ent in wiki_doc.ents])
df_ent = pd.DataFrame([[enttyp, spacy.explain(enttyp)] for enttyp in entypes], columns=['Entity type', 'Description'])
df_ent
doc = nlp(text)
print(colored('============= Original text =============', 'blue'))
print(doc.text)
print(colored('\n============= Lemmas =============', 'red'))
print(' '.join([token.lemma_ for token in doc]))
mult_nlp = spacy.load('xx_sent_ud_sm')
mult_nlp.add_pipe('language_detector', last=True)
nlp = spacy.load('en_core_web_md')
nlp.disable_pipe('parser')
nlp.disable_pipe('ner')
valid_POS = set(['VERB', 'NOUN', 'ADJ', 'PROPN'])
specific_stw = set(['relevant', 'simple', 'base'])
def text_preprocessing(rawtext):
#<SOL>
mult_doc = mult_nlp(rawtext)
english_text = ' '.join([sent.text for sent in mult_doc.sents if sent._.language['language']=='en'])
doc = nlp(english_text)
lemmatized = ' '.join([token.lemma_ for token in doc
if token.is_alpha
and token.pos_ in valid_POS
and not token.is_stop
and token.lemma_ not in specific_stw])
return lemmatized
#</SOL>
print(colored('============= Original text =============', 'blue'))
print(multilingualtext)
print(colored('\n============= Lemmatized text =============', 'red'))
print(text_preprocessing(multilingualtext))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simple Model
Step4: Next, create the agent and model classes
Step5: Create a model and run it for 10 steps
Step6: And display a histogram of agent wealths
Step7: Create and run 100 models, and visualize the wealth distribution across all of them
Step8: Adding space
Step10: Create the new model object. (Note that this overwrites the MoneyModel object created above)
Step12: And create the agent to go along with it
Step13: Create a model with 50 agents and a 10x10 grid, and run for 20 steps
Step14: Visualize the number of agents on each grid cell
Step15: Collecting Data
Step16: Compute the agents' Gini coefficient, measuring inequality.
Step18: This MoneyModel is identical to the one above, except for the self.datacollector = ... line at the end of the __init__ method, and the collection in step.
Step19: Now instantiate a model, run it for 100 steps...
Step20: ... And collect and plot the data it generated
Step21: Batch Run
Step22: Set up the batch run
Step23: Run the parameter sweep; this step might take a while
Step24: Export and plot the results
|
<ASSISTANT_TASK:>
Python Code:
# Use matplotlib for inline graphing
import matplotlib.pyplot as plt
%matplotlib inline
from mesa import Agent, Model
from mesa.time import RandomActivation
import random
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def step(self, model):
if self.wealth == 0:
return
other_agent = random.choice(model.schedule.agents)
other_agent.wealth += 1
self.wealth -= 1
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N):
self.running = True
self.num_agents = N
self.schedule = RandomActivation(self)
# Create agents
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
def step(self):
'''Advance the model by one step.'''
self.schedule.step()
model = MoneyModel(10)
for i in range(10):
model.step()
agent_wealth = [a.wealth for a in model.schedule.agents]
plt.hist(agent_wealth)
all_wealth = []
for j in range(100):
# Run the model
model = MoneyModel(10)
for i in range(10):
model.step()
# Store the results
for agent in model.schedule.agents:
all_wealth.append(agent.wealth)
plt.hist(all_wealth, bins=range(max(all_wealth)+1))
from mesa.space import MultiGrid
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N, width, height):
self.running = True
self.num_agents = N
self.grid = MultiGrid(height, width, True)
self.schedule = RandomActivation(self)
# Create agents
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
# Add the agent to a random grid cell
x = random.randrange(self.grid.width)
y = random.randrange(self.grid.height)
self.grid.place_agent(a, (x, y))
def step(self):
self.schedule.step()
class MoneyAgent(Agent):
An agent with fixed initial wealth.
def __init__(self, unique_id):
self.unique_id = unique_id
self.wealth = 1
def move(self, model):
possible_steps = model.grid.get_neighborhood(self.pos, moore=True, include_center=False)
new_position = random.choice(possible_steps)
model.grid.move_agent(self, new_position)
def give_money(self, model):
cellmates = model.grid.get_cell_list_contents([self.pos])
if len(cellmates) > 1:
other = random.choice(cellmates)
other.wealth += 1
self.wealth -= 1
def step(self, model):
self.move(model)
if self.wealth > 0:
self.give_money(model)
model = MoneyModel(50, 10, 10)
for i in range(20):
model.step()
import numpy as np
agent_counts = np.zeros((model.grid.width, model.grid.height))
for cell in model.grid.coord_iter():
cell_content, x, y = cell
agent_count = len(cell_content)
agent_counts[x][y] = agent_count
plt.imshow(agent_counts, interpolation='nearest')
plt.colorbar()
from mesa.datacollection import DataCollector
def compute_gini(model):
'''
Compute the current Gini coefficient.
Args:
model: A MoneyModel instance
Returns:
The Gini Coefficient for the model's current step.
'''
agent_wealths = [agent.wealth for agent in model.schedule.agents]
x = sorted(agent_wealths)
N = model.num_agents
B = sum( xi * (N-i) for i,xi in enumerate(x) ) / (N*sum(x))
return (1 + (1/N) - 2*B)
class MoneyModel(Model):
A model with some number of agents.
def __init__(self, N, width, height):
self.running = True
self.num_agents = N
self.grid = MultiGrid(height, width, True)
self.schedule = RandomActivation(self)
# Create agents
for i in range(self.num_agents):
a = MoneyAgent(i)
self.schedule.add(a)
# Add the agent to a random grid cell
x = random.randrange(self.grid.width)
y = random.randrange(self.grid.height)
self.grid.place_agent(a, (x, y))
# New addition: add a DataCollector:
self.datacollector = DataCollector(model_reporters={"Gini": compute_gini},
agent_reporters={"Wealth": lambda a: a.wealth})
def step(self):
self.datacollector.collect(self) # Collect the data before the agents run.
self.schedule.step()
model = MoneyModel(50, 10, 10)
for i in range(100):
model.step()
gini = model.datacollector.get_model_vars_dataframe()
gini.head()
gini.plot()
agent_wealth = model.datacollector.get_agent_vars_dataframe()
agent_wealth.head()
end_wealth = agent_wealth.xs(99, level="Step")["Wealth"]
end_wealth.hist(bins=range(agent_wealth.Wealth.max()+1))
one_agent_wealth = agent_wealth.xs(14, level="AgentID")
one_agent_wealth.Wealth.plot()
from mesa.batchrunner import BatchRunner
parameters = {"height": 10, "width": 10, "N": range(10, 500, 10)}
batch_run = BatchRunner(MoneyModel, parameters, iterations=5, max_steps=100,
model_reporters={"Gini": compute_gini})
batch_run.run_all()
run_data = batch_run.get_model_vars_dataframe()
run_data.head()
plt.scatter(run_data.N, run_data.Gini)
plt.xlabel("Number of agents")
plt.ylabel("Gini Coefficient")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Explain what the cell below will produce and why. Can you change it so the answer is correct?
Step2: Answer these 3 questions without typing code. Then type code to check your answer.
Step3: What is the type of the result of the expression 3 + 1.5 + 4?
Step4: What would you use to find a number’s square root, as well as its square?
Step5: Strings
Step6: Reverse the string 'hello' using indexing
Step7: Given the string hello, give two methods of producing the letter 'o' using indexing.
Step8: Lists
Step9: Reassign 'hello' in this nested list to say 'goodbye' item in this list
Step10: Sort the list below
Step11: Dictionaries
Step12: Can you sort a dictionary? Why or why not?
Step13: Sets
Step14: Booleans
Step15: Final Question
|
<ASSISTANT_TASK:>
Python Code:
((((10 + 10) * 5) / 10)**2) + 5.00 - 4.75
# Will produce 0 in python 2. produces 0.66 in python 3 because of "true" division
2/3
# the following import will change the outcome
from __future__ import division
2/3
# 4 * (6 + 5) = 44
4 * (6 + 5)
# 4 * 6 + 5 = 29
4 * 6 + 5
# 4 + 6 * 5 = 34
4 + 6 * 5
3 + 1.5 + 4
#x**y for square, x**0.5 for square root
print(2**2)
print(4**0.5)
s = 'hello'
# Print out 'e' using indexing
# Code here
print(s[1])
s ='hello'
# Reverse the string using indexing
# Code here
print(s[::-1])
s ='hello'
# Print out the
# Code here
print(s[-1:])
print(s[len(s) - 1])
print([0,0,0])
print([0] * 3)
l = [1,2,[3,4,'hello']]
l[2][2] = "goodbye"
print(l)
l = [3,4,5,5,6]
result = l.sort()
print(result)
d = {'simple_key':'hello'}
# Grab 'hello'
print(d["simple_key"])
d = {'k1':{'k2':'hello'}}
# Grab 'hello'
print(d["k1"]["k2"])
# Getting a little tricker
d = {'k1':[ {'nest_key':['this is deep',['hello']]} ]}
#Grab hello
print(d["k1"][0]["nest_key"][1][0])
# This will be hard and annoying!
d = {'k1':[
1,2,{'k2':['this is tricky',{'tough':[1,2,['hello']]}
]}
]
}
print(d["k1"][2]["k2"][1]["tough"][2][0])
tup = (2, "yes", 3.0)
print(tup)
l = [1,2,2,33,4,4,11,22,3,3,2]
s = set(l)
print(s)
# Answer before running cell
2 > 3 # False
# Answer before running cell
3 <= 2 # False
# Answer before running cell
3 == 2.0 # False
# Answer before running cell
3.0 == 3 # True
# Answer before running cell
4**0.5 != 2 # False
# two nested lists
l_one = [1,2,[3,4]]
l_two = [1,2,{'k1':4}]
#True or False?
l_one[2][0] >= l_two[2]['k1'] # False, 3 >= 4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We define the symbols we want to use in our linear system. For this demonstration, I don't add the time index but I keep my subscripts consistent with the figure above.
Step2: In this cell we create the square matrix holding the coefficients that multiply the unknown quantities. Note the structure of the matrix. It is a tridiagonal matrix. The function in NumPy is very compact, in SymPy not so much. So I apologize for the syntax in the SymPy/Python code below, but the more compact version can be difficult to read
Step3: Here is our vector of unknown quantities. We know the solution to the $j-1$ time step. All of these symbols represent the value of our field (e.g. concentration, temperature, etc.) at the $j$'th time step.
Step4: If we've got everything correct, this matrix product will reproduce the discrete diffusion equation outlined above. You'll note that the boundary equations are not formed correctly. For reference, here is the discrete form
Step5: It should start to become clear that we can write this linear system (of a tridiagonal matrix and a column vector of unknowns) as a matrix equation
Step6: So the full form of our system is therefore
Step7: All that remains is to solve the above linear system. Instead of using SymPy, we will use some tools in a different Python library.
Step8: Simulation parameters
Step9: A simple function to plot the initial condition
Step10: It is worth noting that these schemes are unconditionally stable - so any choice of time step will produce a solution. The accuracy of the solution does depend on this choice, though.
Step11: We create two solution vectors rather than one whole array to hold all of our solution. This is not particular to the implicit method, but it demonstrates another technique for saving memory and speeding up the calculation. We will fill these matrices and swap them (move data from new into old and overwrite new) at each time step.
Step12: First, some syntax
Step13: The matrix has to be square. It should have the same dimensions as the nubmer of points in the system. The following code snippet was inspired by this post.
Step14: We first need to prime the arrays by copying the initial condition into oldConcentration. Afterwards it will be enough to swap pointers (a variable that points to a memory location).
Step15: Top of Page
|
<ASSISTANT_TASK:>
Python Code:
import sympy as sp
sp.init_session(quiet=True)
var('U_LHS U_RHS')
var('dt dx beta u1:7 b1:7')
hpad = ones(0, 1); vpad = ones(1, 0)
mainDiag = 2*beta+1; offDiag = -beta
M = (sp.diag(vpad, offDiag, offDiag, offDiag, offDiag, offDiag, hpad)+ \
sp.diag(hpad, offDiag, offDiag, offDiag, offDiag, offDiag, vpad)+ \
sp.diag(mainDiag,mainDiag,mainDiag,mainDiag,mainDiag,mainDiag))
M
xmatrix = sp.Matrix([u1,u2,u3,u4,u5,u6])
xmatrix
M*xmatrix
bmatrix = sp.Matrix([(b1+beta*U_LHS),b2,b3,b4,b5,(b6+beta*U_RHS)])
bmatrix
sp.Eq(M*xmatrix,bmatrix)
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
numberOfPoints = 100
lengthOfDomain = 1.0
dx = lengthOfDomain/numberOfPoints
xPoints = np.linspace(0.0, lengthOfDomain, numberOfPoints)
initialCondition = np.sin(xPoints*np.pi/lengthOfDomain)
def plotIC():
fig = plt.figure()
axes = fig.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(xPoints, initialCondition, 'ro')
axes.set_xlabel('Distance $x$')
axes.set_ylabel('Concentration of Stuff $c(x,t)$')
axes.set_title('Initial Conditions');
plotIC()
diffusionCoefficient = 10.0
dt = dx**2/(diffusionCoefficient)
numberOfIterations = 1000
newConcentration = np.zeros((numberOfPoints), dtype='float32')
oldConcentration = np.zeros((numberOfPoints), dtype='float32')
['h','h','h']*3
def tridiag(a, b, c, k1=-1, k2=0, k3=1):
# Here we use Numpy addition to make the job easier.
return np.diag(a, k1) + np.diag(b, k2) + np.diag(c, k3)
a = [-dt*diffusionCoefficient/dx/dx]*(numberOfPoints-1)
b = [2*dt*diffusionCoefficient/dx/dx+1]*(numberOfPoints)
c = [-dt*diffusionCoefficient/dx/dx]*(numberOfPoints-1)
A = tridiag(a, b, c)
A
np.copyto(oldConcentration,initialCondition)
uLHS = 0.0
uRHS = 0.0
numIterations = 200
for i in range(numIterations):
# enforce boundary conditions
oldConcentration[0] = oldConcentration[0] + uLHS*dt*diffusionCoefficient/dx/dx
oldConcentration[-1] = oldConcentration[-1] + uRHS*dt*diffusionCoefficient/dx/dx
# solve the system
np.copyto(newConcentration,np.linalg.solve(A,oldConcentration))
# swap pointers
oldConcentration, newConcentration = newConcentration, oldConcentration
# plot the results
fig2 = plt.figure()
axes = fig2.add_axes([0.1, 0.1, 0.8, 0.8])
axes.plot(xPoints, newConcentration, 'ro')
axes.set_ylim(0,1)
axes.set_xlabel('Distance $x$')
axes.set_ylabel('Concentration of Stuff $c(x,t)$')
axes.set_title('Solution');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: U sledećem primeru, broj ponavljanja zavisiće od broja koji korisnik unese s tastature, a prvi i poslednji prolazak kroz petlju razlikovaće se od ostalih.
Step2: Realizovaćemo istu funkcionalnost iz prethodnog primera na drugačiji način, tako što ćemo štampanje prvog i poslednjeg reda izmestiti iz petlje. Prvi red ćemo štampati pre petlje, a poslednji posle petlje.
Step3: Petlja <i>for</i>
Step4: Funkcija <i>range</i>
Step5: Možemo joj proslediti i dva argumenta, koje će ona tumačiti kao početni i krajnji broj u intervalu zatvorenom s leve strane (prvi broj pripada intervalu, dok poslednji broj ne pripada intervalu).
Step6: Ukoliko funkciji <i>range</i> prosledimo tri argumenta, ona će prvi tumačiti kao početak, a drugi kao kraj intervala zatvorenog s leve strane. Treći argument je korak kojim će niz brojeva u ovom intervalu napredovati (aritmetički).
Step7: Neke operacije nad listama
Step8: Korišćenjem uglastih zagrada možemo promeniti sadržaj liste, odnosno vrednost nekog njenog elementa.
Step9: U Pajtonu postoji mnogo funkcija za rad s listama, poput funkcije <i>len</i> koja računa dužinu liste.
Step10: Petlje i stringovi
Step11: Dužina stringa
Step12: Iteriranje petljom <i>while</i>
Step13: Iteriranje petljom <i>for</i>
Step14: Ugnježdene petlje
Step15: Isto se može postići korišćenjem petlje <i>while</i>.
|
<ASSISTANT_TASK:>
Python Code:
# Initialize the counter
i = 1
# Loop while printing out and incrementing the values of the counter
while i <= 10:
print(i)
# Increment the counter inside the loop
i = i + 1
# Obtain the required number of passes from the user
num_passes = int(input("How many rows would you like printed?\n"))
# Initialize the counter
i = 1
# Loop and print the required number of rows
while i <= num_passes:
if (1 == i):
print("Here is the first row.")
elif (num_passes == i):
print("Here is the last (%dth) row." % i)
else:
print("Here is another row.")
# Increment the counter inside the loop
i += 1 # "i += x" is equivalent to "i = i + x"
# Obtain the required number of passes from the user
num_passes = int(input("How many rows would you like printed?\n"))
# Print the first row BEFORE entering the loop
print("Here is the first row.")
# Initialize the counter to 2, because the first row has already been printed
i = 2
# Print the "middle rows" - all rows except the first and the last one
while (i <= num_passes - 1):
print("Here is another row.")
i += 1
# Print the last row AFTER finishing the loop
print("Here is the last (%dth) row." % i)
for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
print(i)
my_list = range(10) # starting from 0, 10 not included
for i in my_list:
print(i)
for i in range(8, 13): # 8 included, 13 not included
print(i)
for i in range(10, 20, 2):
print(i)
# Prints the ith element of a list of integers
def print_ith_element(input_list, i):
print("The %dth element of the list is %d" % (i, input_list[i]))
# Define a list
my_list = [10, 20, 30, 40, 50]
# Print out a few elements of the list
print_ith_element(my_list, 0)
print_ith_element(my_list, 2)
print_ith_element(my_list, 4)
my_list = [100, 200, 300, 400, 500, 600, 700, 800]
my_list[3] = -1
print(my_list) # The print function can take a list as an argument and print out all of its elements
list1 = [1, 2, 3]
list2 = range(10)
len1 = len(list1)
len2 = len(list2)
print("The length of the first list is %d and the length of the second list is %d" % (len1, len2))
my_str = "My string"
print(my_str[0])
print(my_str[3])
print(my_str[8])
my_str = "My string"
print("String length is %d" % len(my_str))
my_str = "My string"
# Initialize counter
cnt = 0
while cnt < len(my_str):
print("Character number %d is %s" % (cnt, my_str[cnt]))
cnt += 1
my_str = "My string"
for character in my_str:
print(character)
columns = ["A", "B", "C", "D", "E", "F", "G", "H"]
rows = [1, 2, 3, 4, 5, 6, 7, 8]
for c in columns:
for r in rows:
# Print a column-row combination
#
# Set print function's optional argumnt end to an
# empty string to avoid going to a new line after each print
print("%s%s " % (c, r), end="")
# After each printed rown, print a newline to make it look like a chess board
print("\n")
columns = ["A", "B", "C", "D", "E", "F", "G", "H"]
rows = [1, 2, 3, 4, 5, 6, 7, 8]
# The length of the lists
len_c = len(columns)
len_r = len(rows)
# Initialize the column counter
cnt_c = 0
while cnt_c < len_c:
# Initialize the row counter
# The row counter needs to be re-initialized to zero each time
# we move to a new column. That's why we t inside the column loop
cnt_r = 0
while cnt_r < len_r:
# Print a column-row combination
#
# Set print function's optional argumnt end to an
# empty string to avoid going to a new line after each print
print("%s%s " % (columns[cnt_c], rows[cnt_r]), end="")
# Increment the row counter
cnt_r += 1
# Increment the columnt counter
cnt_c += 1
# After each printed rown, print a newline to make it look like a chess board
print("\n")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fetch open data
Step2: It has almost 400,000 rows!
Step3: Yuck, that's a scatter plot. Let's make a 2D (Hess) diagram.
Step4: That looks better. The red-dashed line is the 1
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import seaborn as sns
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt # side-stepping mpl backend
import warnings
warnings.filterwarnings("ignore") #YOLO
import seaborn as sns
sns.set_context('notebook', font_scale=1.5)
!ls data/
dat = pd.read_csv('data/311_Service_Requests_-_Miami-Dade_County.csv')
len(dat.City)
dat.head()
dat.columns
plt.plot(dat['Goal Days'], dat['Actual Completed Days'], '.', alpha=0.1)
plt.xlim(0, 30)
plt.ylim(0, 30)
plt.title('Does 311 meet its goals?')
plt.xlabel('Goal (Days)')
plt.ylabel('Actual (Days)')
H, xbins, ybins = np.histogram2d(dat['Goal Days'], dat['Actual Completed Days'],
bins=(np.linspace(-60, 60, 120),
np.linspace(-60, 60, 120)))
sns.set_style('dark')
# Create a black and white color map where bad data (NaNs) are white
cmap = plt.cm.bone
cmap.set_bad('w', 1.)
# Use the image display function imshow() to plot the result
fig, ax = plt.subplots(figsize=(7, 7))
H[H == 0] = 1 # prevent warnings in log10
ax.imshow(np.log10(H).T, origin='lower',
extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]],
cmap=cmap, interpolation='nearest',
aspect='auto')
ax.plot([0,60],[0, 60], 'r--')
plt.xlim(0,60)
plt.ylim(0,60)
plt.xlabel('Goal (days)')
plt.ylabel('Actual (days)')
dat.columns
diff = dat['Actual Completed Days'] - dat['Goal Days']
good_diff = diff[diff == diff]
plt.hist(good_diff, range=(-100, 100), bins=100)
plt.yscale('linear')
plt.xlabel('Actual - Goal')
plt.ylabel('$N$')
n_good = np.sum(good_diff <= 0)
n_bad = np.sum(good_diff > 0)
n_tot = len(good_diff)
n_good, n_bad, n_tot
print "Miami 311 meets their goal {:.1f}% of the time.".format(n_good*100.0/n_tot)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: On Variational Bounds of Mutual Information
Step2: Variational bound implementations
Step4: InfoNCE contrastive lower bound
Step8: Interpolated lower bounds
Step11: JS-KL hybrid lower bound
Step13: Structured Bounds
Step16: Neural network architectures
Step17: Baselines
Step20: Experiments
Step22: Training code
Step23: Dataset, optimization, and critic parameters. Try experimenting with these.
Step24: Build a dictionary of the mutual information estimators to train and their parameters.
Step25: Train each estimator and store mutual information estimates (this takes ~2 minutes on a modern GPU).
Step26: Results
Step30: Structured Bounds
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow_probability as tfp
tfd = tfp.distributions
tfkl = tf.keras.layers
tfp = tfp.layers
import pandas as pd # used for exponential moving average
from scipy.special import logit
import numpy as np
import matplotlib.pyplot as plt
def reduce_logmeanexp_nodiag(x, axis=None):
batch_size = x.shape[0].value
logsumexp = tf.reduce_logsumexp(x - tf.linalg.tensor_diag(np.inf * tf.ones(batch_size)), axis=axis)
if axis:
num_elem = batch_size - 1.
else:
num_elem = batch_size * (batch_size - 1.)
return logsumexp - tf.math.log(num_elem)
def tuba_lower_bound(scores, log_baseline=None):
if log_baseline is not None:
scores -= log_baseline[:, None]
batch_size = tf.cast(scores.shape[0], tf.float32)
# First term is an expectation over samples from the joint,
# which are the diagonal elmements of the scores matrix.
joint_term = tf.reduce_mean(tf.linalg.diag_part(scores))
# Second term is an expectation over samples from the marginal,
# which are the off-diagonal elements of the scores matrix.
marg_term = tf.exp(reduce_logmeanexp_nodiag(scores))
return 1. + joint_term - marg_term
def nwj_lower_bound(scores):
# equivalent to: tuba_lower_bound(scores, log_baseline=1.)
return tuba_lower_bound(scores - 1.)
def infonce_lower_bound(scores):
InfoNCE lower bound from van den Oord et al. (2018).
nll = tf.reduce_mean(tf.linalg.diag_part(scores) - tf.reduce_logsumexp(scores, axis=1))
# Alternative implementation:
# nll = -tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores, labels=tf.range(batch_size))
mi = tf.math.log(tf.cast(scores.shape[0].value, tf.float32)) + nll
return mi
def log_interpolate(log_a, log_b, alpha_logit):
Numerically stable implementation of log(alpha * a + (1-alpha) * b).
log_alpha = -tf.nn.softplus(-alpha_logit)
log_1_minus_alpha = -tf.nn.softplus(alpha_logit)
y = tf.reduce_logsumexp(tf.stack((log_alpha + log_a, log_1_minus_alpha + log_b)), axis=0)
return y
def compute_log_loomean(scores):
Compute the log leave-one-out mean of the exponentiated scores.
For each column j we compute the log-sum-exp over the row holding out column j.
This is a numerically stable version of:
log_loosum = scores + tfp.math.softplus_inverse(tf.reduce_logsumexp(scores, axis=1, keepdims=True) - scores)
Implementation based on tfp.vi.csiszar_divergence.csiszar_vimco_helper.
max_scores = tf.reduce_max(scores, axis=1, keepdims=True)
lse_minus_max = tf.reduce_logsumexp(scores - max_scores, axis=1, keepdims=True)
d = lse_minus_max + (max_scores - scores)
d_ok = tf.not_equal(d, 0.)
safe_d = tf.where(d_ok, d, tf.ones_like(d))
loo_lse = scores + tfp.math.softplus_inverse(safe_d)
# Normalize to get the leave one out log mean exp
loo_lme = loo_lse - tf.math.log(scores.shape[1].value - 1.)
return loo_lme
def interpolated_lower_bound(scores, baseline, alpha_logit):
Interpolated lower bound on mutual information.
Interpolates between the InfoNCE baseline ( alpha_logit -> -infty),
and the single-sample TUBA baseline (alpha_logit -> infty)
Args:
scores: [batch_size, batch_size] critic scores
baseline: [batch_size] log baseline scores
alpha_logit: logit for the mixture probability
Returns:
scalar, lower bound on MI
batch_size = scores.shape[0].value
# Compute InfoNCE baseline
nce_baseline = compute_log_loomean(scores)
# Inerpolated baseline interpolates the InfoNCE baseline with a learned baseline
interpolated_baseline = log_interpolate(
nce_baseline, tf.tile(baseline[:, None], (1, batch_size)), alpha_logit)
# Marginal term.
critic_marg = scores - tf.linalg.diag_part(interpolated_baseline)[:, None]
marg_term = tf.exp(reduce_logmeanexp_nodiag(critic_marg))
# Joint term.
critic_joint = tf.linalg.diag_part(scores)[:, None] - interpolated_baseline
joint_term = (tf.reduce_sum(critic_joint) -
tf.reduce_sum(tf.linalg.diag_part(critic_joint))) / (batch_size * (batch_size - 1.))
return 1 + joint_term - marg_term
def js_fgan_lower_bound(f):
Lower bound on Jensen-Shannon divergence from Nowozin et al. (2016).
f_diag = tf.linalg.tensor_diag_part(f)
first_term = tf.reduce_mean(-tf.nn.softplus(-f_diag))
n = tf.cast(f.shape[0], tf.float32)
second_term = (tf.reduce_sum(tf.nn.softplus(f)) - tf.reduce_sum(tf.nn.softplus(f_diag))) / (n * (n - 1.))
return first_term - second_term
def js_lower_bound(f):
NWJ lower bound on MI using critic trained with Jensen-Shannon.
The returned Tensor gives MI estimates when evaluated, but its gradients are
the gradients of the lower bound of the Jensen-Shannon divergence.
js = js_fgan_lower_bound(f)
mi = nwj_lower_bound(f)
return js + tf.stop_gradient(mi - js)
def estimate_mutual_information(estimator, x, y, critic_fn,
baseline_fn=None, alpha_logit=None):
Estimate variational lower bounds on mutual information.
Args:
estimator: string specifying estimator, one of:
'nwj', 'infonce', 'tuba', 'js', 'interpolated'
x: [batch_size, dim_x] Tensor
y: [batch_size, dim_y] Tensor
critic_fn: callable that takes x and y as input and outputs critic scores
output shape is a [batch_size, batch_size] matrix
baseline_fn (optional): callable that takes y as input
outputs a [batch_size] or [batch_size, 1] vector
alpha_logit (optional): logit(alpha) for interpolated bound
Returns:
scalar estimate of mutual information
scores = critic_fn(x, y)
if baseline_fn is not None:
# Some baselines' output is (batch_size, 1) which we remove here.
log_baseline = tf.squeeze(baseline_fn(y))
if estimator == 'infonce':
mi = infonce_lower_bound(scores)
elif estimator == 'nwj':
mi = nwj_lower_bound(scores)
elif estimator == 'tuba':
mi = tuba_lower_bound(scores, log_baseline)
elif estimator == 'js':
mi = js_lower_bound(scores)
elif estimator == 'interpolated':
assert alpha_logit is not None, "Must specify alpha_logit for interpolated bound."
mi = interpolated_lower_bound(scores, log_baseline, alpha_logit)
return mi
def mlp(hidden_dim, output_dim, layers, activation):
return tf.keras.Sequential(
[tfkl.Dense(hidden_dim, activation) for _ in range(layers)] +
[tfkl.Dense(output_dim)])
class SeparableCritic(tf.keras.Model):
def __init__(self, hidden_dim, embed_dim, layers, activation, **extra_kwargs):
super(SeparableCritic, self).__init__()
self._g = mlp(hidden_dim, embed_dim, layers, activation)
self._h = mlp(hidden_dim, embed_dim, layers, activation)
def call(self, x, y):
scores = tf.matmul(self._h(y), self._g(x), transpose_b=True)
return scores
class ConcatCritic(tf.keras.Model):
def __init__(self, hidden_dim, layers, activation, **extra_kwargs):
super(ConcatCritic, self).__init__()
# output is scalar score
self._f = mlp(hidden_dim, 1, layers, activation)
def call(self, x, y):
batch_size = tf.shape(x)[0]
# Tile all possible combinations of x and y
x_tiled = tf.tile(x[None, :], (batch_size, 1, 1))
y_tiled = tf.tile(y[:, None], (1, batch_size, 1))
# xy is [batch_size * batch_size, x_dim + y_dim]
xy_pairs = tf.reshape(tf.concat((x_tiled, y_tiled), axis=2), [batch_size * batch_size, -1])
# Compute scores for each x_i, y_j pair.
scores = self._f(xy_pairs)
return tf.transpose(tf.reshape(scores, [batch_size, batch_size]))
def gaussian_log_prob_pairs(dists, x):
Compute log probability for all pairs of distributions and samples.
mu, sigma = dists.mean(), dists.stddev()
sigma2 = sigma**2
normalizer_term = tf.reduce_sum(-0.5 * (np.log(2. * np.pi) + 2.0 * tf.math.log(sigma)), axis=1)[None, :]
x2_term = -tf.matmul(x**2, 1.0 / (2 * sigma2), transpose_b=True)
mu2_term = - tf.reduce_sum(mu**2 / (2 * sigma2), axis=1)[None, :]
cross_term = tf.matmul(x, mu / sigma2, transpose_b=True)
log_prob = normalizer_term + x2_term + mu2_term + cross_term
return log_prob
def build_log_prob_conditional(rho, **extra_kwargs):
True conditional distribution.
def log_prob_conditional(x, y):
mu = x * rho
q_y = tfd.MultivariateNormalDiag(mu, tf.ones_like(mu) * tf.cast(tf.sqrt(1.0 - rho**2), tf.float32))
return gaussian_log_prob_pairs(q_y, y)
return log_prob_conditional
CRITICS = {
'separable': SeparableCritic,
'concat': ConcatCritic,
'conditional': build_log_prob_conditional,
}
def log_prob_gaussian(x):
return tf.reduce_sum(tfd.Normal(0., 1.).log_prob(x), -1)
BASELINES= {
'constant': lambda: None,
'unnormalized': lambda: mlp(hidden_dim=512, output_dim=1, layers=2, activation='relu'),
'gaussian': lambda: log_prob_gaussian,
}
def sample_correlated_gaussian(rho=0.5, dim=20, batch_size=128):
Generate samples from a correlated Gaussian distribution.
x, eps = tf.split(tf.random.normal((batch_size, 2 * dim)), 2, axis=1)
y = rho * x + tf.sqrt(tf.cast(1. - rho**2, tf.float32)) * eps
return x, y
def rho_to_mi(dim, rho):
return -0.5 * np.log(1-rho**2) * dim
def mi_to_rho(dim, mi):
return np.sqrt(1-np.exp(-2.0 / dim * mi))
def mi_schedule(n_iter):
Generate schedule for increasing correlation over time.
mis = np.round(np.linspace(0.5, 5.5-1e-9, n_iter)) *2.0#0.1
return mis.astype(np.float32)
plt.figure(figsize=(6,3))
for i, rho in enumerate([0.5, 0.99]):
plt.subplot(1, 2, i + 1)
x, y = sample_correlated_gaussian(batch_size=500, dim=1, rho=rho)
plt.scatter(x[:, 0], y[:, 0])
plt.title(r'$\rho=%.2f$, $I(X; Y)=%.1f$' % (rho, rho_to_mi(1, rho)))
plt.xlim(-3, 3); plt.ylim(-3, 3);
def train_estimator(critic_params, data_params, mi_params):
Main training loop that estimates time-varying MI.
# Ground truth rho is only used by conditional critic
critic = CRITICS[mi_params.get('critic', 'concat')](rho=None, **critic_params)
baseline = BASELINES[mi_params.get('baseline', 'constant')]()
opt = tf.keras.optimizers.Adam(opt_params['learning_rate'])
@tf.function
def train_step(rho, data_params, mi_params):
# Annoying special case:
# For the true conditional, the critic depends on the true correlation rho,
# so we rebuild the critic at each iteration.
if mi_params['critic'] == 'conditional':
critic_ = CRITICS['conditional'](rho=rho)
else:
critic_ = critic
with tf.GradientTape() as tape:
x, y = sample_correlated_gaussian(dim=data_params['dim'], rho=rho, batch_size=data_params['batch_size'])
mi = estimate_mutual_information(mi_params['estimator'], x, y, critic_, baseline, mi_params.get('alpha_logit', None))
loss = -mi
trainable_vars = []
if isinstance(critic, tf.keras.Model):
trainable_vars += critic.trainable_variables
if isinstance(baseline, tf.keras.Model):
trainable_vars += baseline.trainable_variables
grads = tape.gradient(loss, trainable_vars)
opt.apply_gradients(zip(grads, trainable_vars))
return mi
# Schedule of correlation over iterations
mis = mi_schedule(opt_params['iterations'])
rhos = mi_to_rho(data_params['dim'], mis)
estimates = []
for i in range(opt_params['iterations']):
estimates.append(train_step(rhos[i], data_params, mi_params).numpy())
return np.array(estimates)
data_params = {
'dim': 20,
'batch_size': 64,
}
critic_params = {
'layers': 2,
'embed_dim': 32,
'hidden_dim': 256,
'activation': 'relu',
}
opt_params = {
'iterations': 20000,
'learning_rate': 5e-4,
}
critic_type = 'concat' # or 'separable'
estimators = {
'NWJ': dict(estimator='nwj', critic=critic_type, baseline='constant'),
'TUBA': dict(estimator='tuba', critic=critic_type, baseline='unnormalized'),
'InfoNCE': dict(estimator='infonce', critic=critic_type, baseline='constant'),
'JS': dict(estimator='js', critic=critic_type, baseline='constant'),
'TNCE': dict(estimator='infonce', critic='conditional', baseline='constant'),
# Optimal critic for TUBA
#'TUBA_opt': dict(estimator='tuba', critic='conditional', baseline='gaussian')
}
# Add interpolated bounds
def sigmoid(x):
return 1/(1. + np.exp(-x))
for alpha_logit in [-5., 0., 5.]:
name = 'alpha=%.2f' % sigmoid(alpha_logit)
estimators[name] = dict(estimator='interpolated', critic=critic_type,
alpha_logit=alpha_logit, baseline='unnormalized')
estimates = {}
for estimator, mi_params in estimators.items():
print("Training %s..." % estimator)
estimates[estimator] = train_estimator(critic_params, data_params, mi_params)
# Smooting span for Exponential Moving Average
EMA_SPAN = 200
# Ground truth MI
mi_true = mi_schedule(opt_params['iterations'])
# Names specifies the key and ordering for plotting estimators
names = np.sort(list(estimators.keys()))
lnames = list(map(lambda s: s.replace('alpha', '$\\alpha$'), names))
nrows = min(2, len(estimates))
ncols = int(np.ceil(len(estimates) / float(nrows)))
fig, axs = plt.subplots(nrows, ncols, figsize=(2.7 * ncols, 3 * nrows))
if len(estimates) == 1:
axs = [axs]
axs = np.ravel(axs)
for i, name in enumerate(names):
plt.sca(axs[i])
plt.title(lnames[i])
# Plot estimated MI and smoothed MI
mis = estimates[name]
mis_smooth = pd.Series(mis).ewm(span=EMA_SPAN).mean()
p1 = plt.plot(mis, alpha=0.3)[0]
plt.plot(mis_smooth, c=p1.get_color())
# Plot true MI and line for log(batch size)
plt.plot(mi_true, color='k', label='True MI')
estimator = estimators[name]['estimator']
if 'interpolated' in estimator or 'nce' in estimator:
# Add theoretical upper bound lines
if 'interpolated' in estimator:
log_alpha = -np.log( 1+ tf.exp(-estimators[name]['alpha_logit']))
else:
log_alpha = 1.
plt.axhline(1 + np.log(data_params['batch_size']) - log_alpha, c='k', linestyle='--', label=r'1 + log(K/$\alpha$)' )
plt.ylim(-1, mi_true.max()+1)
plt.xlim(0, opt_params['iterations'])
if i == len(estimates) - ncols:
plt.xlabel('steps')
plt.ylabel('Mutual information (nats)')
plt.legend(loc='best', fontsize=8, framealpha=0.0)
plt.gcf().tight_layout();
def log_prob_pairs(dists, samples):
if isinstance(dists, (tfd.Normal, tfd.MultivariateNormalDiag)):
return gaussian_log_prob_pairs(dists, samples)
batch_size = tf.shape(samples)[0]
multiples = [1] * (1 + len(samples.get_shape().as_list()))
multiples[1] = tf.shape(samples)[0]
samples_tiled = tf.tile(samples[:, None], multiples)
# Compute log probs, size [batch_size, batch_size]
log_probs = dists.log_prob(samples_tiled)
return log_probs
def variational_upper_bound(conditional_dist, marginal_dist, samples):
Variational upper bound on mutual information.
Args:
conditional_dist: true conditional density, p(y|x)
marginal_dist: approximate marginal density, m(y)
samples: samples from the conditional distribution p(y|x)
Returns:
scalar, upper bound on mutual information
return tf.reduce_mean(conditional_dist.log_prob(samples) -
marginal_dist.log_prob(samples))
def minibatch_upper_bound(conditional_dist, samples):
Minibatch upper bound on mutual information.
Args:
conditional_dist: approximate conditional density, e(y|x)
samples: samples from conditional_dist
Returns:
scalar, upper bound on mutual information
log_probs = log_prob_pairs(conditional_dist, samples)
# Batch marginal holds out self (along diagonal), and averages over
# all other elements in the batch.
mask = tf.eye(tf.shape(samples)[0])
log_prob_marginal = tf.reduce_mean(reduce_logmeanexp_nodiag(log_probs, axis=1))
log_prob_cond = tf.reduce_mean(tf.linalg.tensor_diag_part(log_probs))
return log_prob_cond - log_prob_marginal
def minibatch_lower_bound(conditional_dist, samples):
Minibatch lower bound on mutual information.
Args:
conditional_dist: approximate conditional density, e(y|x)
samples: samples from conditional_dist
Returns:
scalar, lower bound on mutual information
batch_marginal_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=tf.zeros(conditional_dist.batch_shape)),
components_distribution=conditional_dist)
return variational_upper_bound(conditional_dist, batch_marginal_dist, samples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 核密度KDE的估计是对每个rug进行估计,然后把所有的KDE曲线加起来,之后进行归一化,得到所有的数据的平均KDE曲线
Step2: 还可以使用distplot()拟合参数分布到数据集,并直观地评估它与观察数据的对应关系
Step3: 可视化数据集中的成对关系¶
Step4: 用分类数据绘图
Step5: 在一个条形图中,散点图通常会重叠。这使得很难看到数据的完整分布。一个简单的解决方案是使用一些随机的“抖动”来调整位置(仅沿分类轴)
Step6: 一种不同的方法是使用函数swarmplot(),该函数将分类轴上的每个散点图点与避免重叠点的算法对齐
Step7: 类别内观察变量的分布
Step8: Violinplots
Step9: 类别内的统计估计
Step10: 绘制“宽格式”数据
Step11: 绘制多层面板分类图
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from scipy import stats, integrate
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
np.random.seed(sum(map(ord, "distributions")))
x = np.random.normal(size=100) # 单变量分布将绘制 直方图,并绘制 KDE (核心密度函数)
sns.distplot(x) # 分布绘制
plt.show()
sns.distplot(x, kde=False, rug=True) # 去除密度曲线并在每个观测点绘制一个小的垂直刻度
# 可以使用该rugplot()功能制作地毯本身,但它也可用于distplot()
plt.show()
sns.distplot(x, bins=20, kde=False, rug=True) # 一共可以分为20个段
plt.show()
sns.kdeplot(x, shade=True) # KDE曲线绘制
plt.show()
sns.set_style("whitegrid")
x = np.random.gamma(6, size=200)
sns.distplot(x, kde=False, fit=stats.gamma)
plt.show()
# 使双变量分布可视化的最熟悉的方法是散点图,其中每个观察点都以x和y值的点显示。这对于两个方面的rug是分不开的。
# 您可以使用matplotlib plt.scatter函数绘制一个散点图,它也是该jointplot()函数显示的默认类型:
sns.set()
mean, cov = [0, 1], [(1, .5), (.5, 1)]
data = np.random.multivariate_normal(mean, cov, 200)
df = pd.DataFrame(data, columns=["x", "y"])
print(df)
sns.jointplot(x="x", y="y", data=df)
plt.show()
# The bivariate analogue of a histogram is known as a “hexbin” plot, because it shows the counts
# of observations that fall within hexagonal bins. This plot works best with relatively large datasets.
# It’s available through the matplotlib plt.hexbin function and as a style in jointplot(). It looks best with a white background:
# 直方图的二元绘制,在数据量大的时候更有效
x, y = np.random.multivariate_normal(mean, cov, 1000).T
with sns.axes_style("white"):
sns.jointplot(x=x, y=y, kind="hex", color="k")
plt.show()
sns.jointplot(x="x", y="y", data=df, kind="kde") # 高维的同样可以绘制 KDE
plt.show()
iris = sns.load_dataset("iris")
sns.pairplot(iris)
plt.show()
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="whitegrid", color_codes=True)
np.random.seed(sum(map(ord, "categorical")))
titanic = sns.load_dataset("titanic")
tips = sns.load_dataset("tips")
iris = sns.load_dataset("iris")
sns.stripplot(x="day", y="total_bill", data=tips) # 分类散点图
plt.show()
sns.stripplot(x="day", y="total_bill", data=tips, jitter=True)
plt.show()
sns.swarmplot(x="day", y="total_bill", data=tips) # 尽量分散的方式绘制
plt.show()
# 添加一个新的分类变量
sns.swarmplot(x="day", y="total_bill", hue="sex", data=tips) # hue 参数新增一个分类变量
plt.show()
# In general, the seaborn categorical plotting functions try to infer the order of categories from the data.
# If your data have a pandas Categorical datatype, then the default order of the categories can be set there.
# For other datatypes, string-typed categories will be plotted in the order they appear in the DataFrame,
# but categories that look numerical will be sorted
sns.swarmplot(x="size", y="total_bill", hue="sex", data=tips)
plt.show()
# 可以调整方向
sns.swarmplot(y="day", x="total_bill", hue="sex", data=tips)
plt.show()
sns.boxplot(x="day", y="total_bill", hue="time", data=tips) # 箱线图
plt.show()
sns.violinplot(x="total_bill", y="day", hue="time", data=tips)
plt.show()
sns.violinplot(y="total_bill", x="day", hue="time", data=tips,split=True)
plt.show()
sns.violinplot(x="day", y="total_bill", hue="sex", data=tips,
split=True, inner="stick", palette="Set3") # 画直方图而不是箱线图
plt.show()
# 可以相互结合
sns.violinplot(x="day", y="total_bill", data=tips, inner=None) # 默认的inner 是箱线图
sns.swarmplot(x="day", y="total_bill", data=tips, color="w", alpha=.5)
plt.show()
sns.countplot(x="deck", data=titanic, palette="Greens_d")
plt.show()
sns.pointplot(x="sex", y="survived", hue="class", data=titanic) # 竖线表示 置信区间
plt.show()
sns.pointplot(x="class", y="survived", hue="sex", data=titanic,
palette={"male": "g", "female": "m"},
markers=["^", "o"], linestyles=["-", "--"])
plt.show()
sns.boxplot(data=iris, orient="h")
plt.show()
sns.factorplot(x="day", y="total_bill", hue="smoker", data=tips, kind="bar") # 带有误差线
tips = sns.load_dataset("tips")
print(tips.describe())
plt.show()
sns.factorplot(x="day", y="total_bill", hue="smoker", # 绘制多列数据
col="time", data=tips, kind="swarm")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Config
Step2: Identifier for storing these features on disk and referring to them later.
Step3: The path to the saved GoogleNews Word2Vec model.
Step4: Read data
Step5: Build features
Step6: Build final features
Step7: Save features
|
<ASSISTANT_TASK:>
Python Code:
from pygoose import *
import os
import warnings
import gensim
from fuzzywuzzy import fuzz
from nltk import word_tokenize
from nltk.corpus import stopwords
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
project = kg.Project.discover()
feature_list_id = '3rdparty_abhishek'
google_news_model_path = os.path.join(project.aux_dir, 'word2vec', 'GoogleNews-vectors-negative300.bin.gz')
df_train = pd.read_csv(project.data_dir + 'train.csv').fillna('').drop(['id', 'qid1', 'qid2'], axis=1)
df_test = pd.read_csv(project.data_dir + 'test.csv').fillna('').drop(['test_id'], axis=1)
stop_words = stopwords.words('english')
def wmd(model, s1, s2):
s1 = str(s1).lower().split()
s2 = str(s2).lower().split()
stop_words = stopwords.words('english')
s1 = [w for w in s1 if w not in stop_words]
s2 = [w for w in s2 if w not in stop_words]
return model.wmdistance(s1, s2)
def norm_wmd(model, s1, s2):
s1 = str(s1).lower().split()
s2 = str(s2).lower().split()
stop_words = stopwords.words('english')
s1 = [w for w in s1 if w not in stop_words]
s2 = [w for w in s2 if w not in stop_words]
return model.wmdistance(s1, s2)
def sent2vec(model, s):
words = s.lower()
words = word_tokenize(words)
words = [w for w in words if not w in stop_words]
words = [w for w in words if w.isalpha()]
M = []
for w in words:
try:
M.append(model[w])
except:
continue
M = np.array(M)
v = M.sum(axis=0)
return v / np.sqrt((v ** 2).sum())
def extend_with_features(data):
data['common_words'] = data.apply(lambda x: len(set(str(x['question1']).lower().split()).intersection(set(str(x['question2']).lower().split()))), axis=1)
data['fuzz_qratio'] = data.apply(lambda x: fuzz.QRatio(str(x['question1']), str(x['question2'])), axis=1)
data['fuzz_WRatio'] = data.apply(lambda x: fuzz.WRatio(str(x['question1']), str(x['question2'])), axis=1)
model = gensim.models.KeyedVectors.load_word2vec_format(google_news_model_path, binary=True)
data['wmd'] = data.apply(lambda x: wmd(model, x['question1'], x['question2']), axis=1)
norm_model = gensim.models.KeyedVectors.load_word2vec_format(google_news_model_path, binary=True)
norm_model.init_sims(replace=True)
data['norm_wmd'] = data.apply(lambda x: norm_wmd(norm_model, x['question1'], x['question2']), axis=1)
question1_vectors = np.zeros((data.shape[0], 300))
for i, q in progressbar(enumerate(data.question1.values), total=len(data)):
question1_vectors[i, :] = sent2vec(model, q)
question2_vectors = np.zeros((data.shape[0], 300))
for i, q in progressbar(enumerate(data.question2.values), total=len(data)):
question2_vectors[i, :] = sent2vec(model, q)
question1_vectors = np.nan_to_num(question1_vectors)
question2_vectors = np.nan_to_num(question2_vectors)
data['cosine_distance'] = [cosine(x, y) for (x, y) in zip(question1_vectors, question2_vectors)]
data['cityblock_distance'] = [cityblock(x, y) for (x, y) in zip(question1_vectors, question2_vectors)]
data['jaccard_distance'] = [jaccard(x, y) for (x, y) in zip(question1_vectors, question2_vectors)]
data['canberra_distance'] = [canberra(x, y) for (x, y) in zip(question1_vectors, question2_vectors)]
data['euclidean_distance'] = [euclidean(x, y) for (x, y) in zip(question1_vectors, question2_vectors)]
data['minkowski_distance'] = [minkowski(x, y, 3) for (x, y) in zip(question1_vectors, question2_vectors)]
data['braycurtis_distance'] = [braycurtis(x, y) for (x, y) in zip(question1_vectors, question2_vectors)]
data['skew_q1vec'] = [skew(x) for x in question1_vectors]
data['skew_q2vec'] = [skew(x) for x in question2_vectors]
data['kur_q1vec'] = [kurtosis(x) for x in question1_vectors]
data['kur_q2vec'] = [kurtosis(x) for x in question2_vectors]
warnings.filterwarnings('ignore')
extend_with_features(df_train)
extend_with_features(df_test)
df_train.drop(['is_duplicate', 'question1', 'question2'], axis=1, inplace=True)
df_test.drop(['question1', 'question2'], axis=1, inplace=True)
X_train = np.array(df_train.values, dtype='float64')
X_test = np.array(df_test.values, dtype='float64')
print('X_train:', X_train.shape)
print('X_test: ', X_test.shape)
df_train.describe().T
feature_names = [
'abh_common_words',
'abh_fuzz_qratio',
'abh_fuzz_WRatio',
'abh_wmd',
'abh_norm_wmd',
'abh_cosine_distance',
'abh_cityblock_distance',
'abh_jaccard_distance',
'abh_canberra_distance',
'abh_euclidean_distance',
'abh_minkowski_distance',
'abh_braycurtis_distance',
'abh_skew_q1vec',
'abh_skew_q2vec',
'abh_kur_q1vec',
'abh_kur_q2vec',
]
project.save_features(X_train, X_test, feature_names, feature_list_id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Inject into the interpreter the functions.
Step2: Construct the histogram containing the input data
Step3: Create the function and try to fit it without setting any parameter. Activate the interactive visualisation with %jsroot on.
Step4: Less than optimal. Set parameters and fit again, draw histogram with error bars
Step5: Much better. Now time to beautify the plot. Construct a TF1 for the background and Lorentzian functions and draw them in the same canvas.
Step6: We can now add a legend
|
<ASSISTANT_TASK:>
Python Code:
import ROOT
%%cpp -d
//Define functions for fitting
// Quadratic background function
double background(double *x, double *par) {
return par[0] + par[1]*x[0] + par[2]*x[0]*x[0];
}
// Lorenzian Peak function
double lorentzianPeak(double *x, double *par) {
return (0.5*par[0]*par[1]/TMath::Pi()) /
TMath::Max(1.e-10,(x[0]-par[2])*(x[0]-par[2])
+ .25*par[1]*par[1]);
}
// Sum of background and peak function
double fitFunction(double *x, double *par) {
return background(x, par) + lorentzianPeak(x, &par[3]);
}
# Your code here
# Your code here
# Your code here
# Your code here
# Your code here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scikit-Learn 中的 make_blobs 方法常被用来生成聚类算法的测试数据,直观地说,make_blobs 会根据用户指定的特征数量、中心点数量、范围等来生成几类数据,这些数据可用于测试聚类算法的效果。接下来,我们导入朴素贝叶斯分类函数 GaussianNB ,构建并拟合模型:
Step2: 为了更清晰直观地展示模型效果,我们构建了另一组随机样本,用于观察朴素贝叶斯分类模型在二维空间中的分类边界,在这里,我们可以调用 predict 函数,来输出模型对样本点的判断结果:
Step3: 我们在分类中看到一个略微弯曲的边界 - 一般来说,高斯朴素贝叶斯的边界是二次的。
Step4: 这些列分别提供了第一个和第二个标签的后验概率。 如果您正在寻找分类中的不确定性估计,这样的贝叶斯方法可能是有用的方法。
Step5: 为了简化,我们选择这些分类中的一些,之后下载测试集。
Step6: 数据样本呈如下形式,包含了一篇网络消息的来源、机构等诸多信息:
Step7: 为了将这些数据用于机器学习,我们需要将每个字符串的内容转换为数值向量。 为此,我们将使用 TF-IDF 向量化器(在特征工程中讨论),并创建一个流水线,将其附加到多项朴素贝叶斯分类器:
Step8: 使用这个流水线,我们可以将模型用于训练数据,并测试数据的预测标签。
Step9: 在我们已经预测了测试数据的标签,我们可以评估它们来了解估计器的性能。 例如,这里是测试数据的真实和预测标签之间的混淆矩阵:
Step10: 显然,即使这个非常简单的分类器,也可以成功将空间话题与电脑话题分开,但是在宗教话题和基督教话题之间会混淆。 这是预期的混乱。
Step11: 让我们试试看:
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from sklearn.datasets import make_blobs
X, y = make_blobs(100, 2, centers=2, random_state=2, cluster_std=1.5)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu');
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(X, y);
rng = np.random.RandomState(0)
Xnew = [-6, -14] + [14, 18] * rng.rand(2000, 2)
ynew = model.predict(Xnew)
plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='RdBu')
lim = plt.axis()
plt.scatter(Xnew[:, 0], Xnew[:, 1], c=ynew, s=20, cmap='RdBu', alpha=0.1)
plt.axis(lim);
yprob = model.predict_proba(Xnew)
yprob[-8:].round(2)
from sklearn.datasets import fetch_20newsgroups
data = fetch_20newsgroups()
data.target_names
categories = ['talk.religion.misc', 'soc.religion.christian',
'sci.space', 'comp.graphics']
train = fetch_20newsgroups(subset='train', categories=categories)
test = fetch_20newsgroups(subset='test', categories=categories)
print(train.data[5])
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import make_pipeline
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
model.fit(train.data, train.target)
labels = model.predict(test.data)
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(test.target, labels)
sns.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
xticklabels=train.target_names, yticklabels=train.target_names)
plt.xlabel('true label')
plt.ylabel('predicted label');
def predict_category(s, train=train, model=model):
pred = model.predict([s])
return train.target_names[pred[0]]
predict_category('sending a payload to the ISS')
predict_category('discussing islam vs atheism')
predict_category('determining the screen resolution')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convenience function for setting up graphs
Step2: Compare this result with the that of the analytical formula on page 63 of the syllabus, which is a summation the product of a cosine and an exponent. The expressions are completely different, but yield the same result
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as sp
def newfig(title='?', xlabel='?', ylabel='?', xlim=None, ylim=None,
xscale='linear', yscale='linear', size_inches=(14, 8)):
'''Setup a new axis for plotting'''
fig, ax = plt.subplots()
fig.set_size_inches(size_inches)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if xlim is not None: ax.set_xlim(xlim)
if ylim is not None: ax.set_ylim(ylim)
ax.grid(True)
return ax
# Aquifer and land-strip properties
kD = 400 # m2/d
S = 0.1 # [-]
L = 200 # m (width of land strip)
A = 2 # m (sudden change of river stage)
# Choose points within the cross section to compute the heads
x = np.linspace(-L/2, +L/2, 101)
# Choosing visualization times, in this case we'll draw one per week (7 d)
# The half time would be T = 0.24 sqrt(L^2 S / kD) see syllabus.
T50 = 0.24 * np.sqrt(L ** 2 * S / kD)
times = np.arange(T50, 11 * T50, T50)
# Superpositionin in time using the erfc function, see syllabus page 62
ax = newfig('superposition in space', 'time [d]', 'head [m]')
for t in times:
s = np.zeros_like(x)
for i in range(1, 11):
ds = A * (-1)**(i-1) * (
sp.erfc((L/2 * (2 * i -1) + x) * np.sqrt(S / (4 * kD * t))) +
sp.erfc((L/2 * (2 * i -1) - x) * np.sqrt(S / (4 * kD * t))))
s = s + ds
plt.plot(x, s, label='t = {:4.2f} d'.format(t))
plt.legend()
plt.ylim(0, A)
plt.show()
# Using the analytical solution on page 63 of the syllabus
ax = newfig('Solution p63 with cos() * exp()', 'x [m]', 's(x, t) [m]', ylim=(0, 2))
b = L/2
for t in times:
s = np.zeros_like(x)
for j in range(1, 11):
ds = A * 4/np.pi * \
(-1)**(j-1) / (2*j - 1) *\
np.cos((2 * j -1) * np.pi/2 * x/b) *\
np.exp(-(2 * j -1)**2 * (np.pi/2)**2 * kD/(b**2 * S) * t)
s = s + ds
ax.plot(x, s, label='t = {:4.2f} d'.format(t))
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: load data
Step2: Select a gap-free segment
Step3: Plot spectra
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.signal as signal
import seawater as sw
from pyspec import spectrum as spec
fni = "data/lg0703_nb150.npz"
data = np.load(fni)
fmax = 135
lon,lat = data['lon1'][:fmax],data['lat1'][:fmax]
u, v = data['u1'][0,:fmax],data['v1'][0,:fmax]
urot, vrot = data['u1_rot'][0,:fmax],data['v1_rot'][0,:fmax]
d,ang = sw.dist(lat,lon,units='km')
dist = np.append(0.,np.cumsum(d))
uspec = spec.Spectrum(urot[:].copy(),dt=5)
vspec = spec.Spectrum(vrot[:].copy(),dt=5)
ksp, Pu_sp = signal.welch(urot[:], fs=1/5., window='hanning', nperseg=urot[:].size, noverlap=False,
detrend='constant', scaling='density', axis=-1)
_, Pv_sp = signal.welch(vrot[:], fs=1/5., window='hanning', nperseg=urot[:].size, noverlap=False,
detrend='constant', scaling='density', axis=-1)
fig = plt.figure(figsize=(9,7))
ax = fig.add_subplot(111)
ax.loglog(uspec.f,uspec.spec,'m')
ax.loglog(ksp,2*Pu_sp,'m--')
ax.loglog(vspec.f,vspec.spec)
ax.loglog(ksp,2*Pv_sp,'b--')
ax.set_xlabel('Wavenumber [cpkm]')
ax.set_ylabel(r'KE spectral density [m$^2$ s$^{-1}$/cpkm]')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Posing the problem
Step2: Porosity decrease with depth
Step3: While the compaction trends for mud happen to be fairly similar in the plot above, the ones for sandy lithologies are very different. This highlights that porosity-depth curves vary significantly from one basin to another, and are strongly affected by overpressures and exhumation. Using local data and geological information is critical. As Giles et al. (1998) have put it, "The use of default compaction curves can introduce significant errors into thermal history and pore- fluid pressure calculations, particularly where little well data are available to calibrate the model." To see how widely - and wildly - variable compaction trends can be, check out the review paper by Giles et al. (1998).
Step4: Now we can do the calculations; here we set the initial depths of a sandstone column $y_1,y_2$ to 2 and 3 kilometers, and we estimate the new thickness and porosity assuming that the column is brought to the surface ($y_1'=0$).
Step5: One issue we need to address is that 'comp_func' six input parameters, but the scipy 'bisect' function only takes one parameter. We create a partial function 'comp_func_1' in which the only variable is 'y2a', the rest are treated as constants
Step6: Write code to (de)compact stratigraphic column with multiple layers
Step7: Let's use this function to decompact a simple stratigraphic column that consists of 5 alternating layers of sand and mud.
Step8: Plot the results
Step9: Now let's see what happens if we use the 90% mud and 90% sand curves from Komminz et al. (2011).
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import functools
from scipy.optimize import bisect
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
plt.rcParams['mathtext.fontset'] = 'cm'
plt.figure(figsize=(2,5))
x = [0,1,1,0,0]
y = [1,1,1.5,1.5,1]
plt.text(-0.6,1.02,'$y_1$',fontsize=16)
plt.text(-0.6,1.52,'$y_2$',fontsize=16)
plt.text(-0.6,1.27,'$\phi$',fontsize=16)
plt.fill(x,y,'y')
x = [3,4,4,3,3]
y = [0.5,0.5,1.15,1.15,0.5]
plt.text(2.25,0.52,'$y_1\'$',fontsize=16)
plt.text(2.25,1.17,'$y_2\'$',fontsize=16)
plt.text(2.25,0.9,'$\phi\'$',fontsize=16)
plt.fill(x,y,'y')
plt.plot([1,3],[1,0.5],'k--')
plt.plot([1,3],[1.5,1.15],'k--')
plt.gca().invert_yaxis()
plt.axis('off');
c_sand = 0.27 # porosity-depth coefficient for sand (km-1)
c_mud = 0.57 # porosity-depth coefficent for mud (km-1)
phi_sand_0 = 0.49 # surface porosity for sand
phi_mud_0 = 0.63 # surface porosity for mud
y = np.arange(0,3.01,0.01)
phi_sand = phi_sand_0 * np.exp(-c_sand*y)
phi_mud = phi_mud_0 * np.exp(-c_mud*y)
plt.figure(figsize=(4,7))
plt.plot(phi_sand,y,'y',linewidth=2,label='sand')
plt.plot(phi_mud,y,'brown',linewidth=2,label='mud')
plt.xlabel('porosity')
plt.ylabel('depth (km)')
plt.xlim(0,0.65)
plt.gca().invert_yaxis()
c_sand = 1000/18605.0 # Kominz et al. 2011 >90% sand curve
c_mud = 1000/1671.0 # Kominz et al. 2011 >90% mud curve
phi_sand_0 = 0.407 # Kominz et al. 2011 >90% sand curve
phi_mud_0 = 0.614 # Kominz et al. 2011 >90% mud curve
phi_sand = phi_sand_0 * np.exp(-c_sand*y)
phi_mud = phi_mud_0 * np.exp(-c_mud*y)
plt.plot(phi_sand,y,'y--',linewidth=2,label='90% sand')
plt.plot(phi_mud,y,'--',color='brown',linewidth=2,label='90% mud')
plt.legend(loc=0, fontsize=10);
# compaction function - the unknown variable is y2a
def comp_func(y2a,y1,y2,y1a,phi,c):
# left hand side of decompaction equation:
LHS = y2a - y1a
# right hand side of decompaction equation:
RHS = y2 - y1 - (phi/c)*(np.exp(-c*y1)-np.exp(-c*y2)) + (phi/c)*(np.exp(-c*y1a)-np.exp(-c*y2a))
return LHS - RHS
c_sand = 0.27 # porosity-depth coefficient for sand (km-1)
phi_sand = 0.49 # surface porosity for sand
y1 = 2.0 # top depth in km
y2 = 3.0 # base depth in km
y1a = 0.0 # new top depth in km
comp_func_1 = functools.partial(comp_func, y1=y1, y2=y2, y1a=y1a, phi=phi_sand, c=c_sand)
y2a = bisect(comp_func_1,y1a,y1a+3*(y2-y1)) # use bisection to find new base depth
phi = (phi_sand/c_sand)*(np.exp(-c_sand*y1)-np.exp(-c_sand*y2))/(y2-y1) # initial average porosity
phia = (phi_sand/c_sand)*(np.exp(-c_sand*y1a)-np.exp(-c_sand*y2a))/(y2a-y1a) # new average porosity
print('new base depth: '+str(round(y2a,2))+' km')
print('initial thickness: '+str(round(y2-y1,2))+' km')
print('new thickness: '+str(round(y2a-y1a,2))+' km')
print('initial porosity: '+str(round(phi,3)))
print('new porosity: '+str(round(phia,3)))
def decompact(tops,lith,new_top,phi_sand,phi_mud,c_sand,c_mud):
tops_new = [] # list for decompacted tops
tops_new.append(new_top) # starting value
for i in range(len(tops)-1):
if lith[i] == 0:
phi = phi_mud; c = c_mud
if lith[i] == 1:
phi = phi_sand; c = c_sand
comp_func_1 = functools.partial(comp_func,y1=tops[i],y2=tops[i+1],y1a=tops_new[-1],phi=phi,c=c)
base_new_a = tops_new[-1]+tops[i+1]-tops[i]
base_new = bisect(comp_func_1, base_new_a, 4*base_new_a) # bisection
tops_new.append(base_new)
return tops_new
tops = np.array([1.0,1.1,1.15,1.3,1.5,2.0])
lith = np.array([0,1,0,1,0]) # lithology labels: 0 = mud, 1 = sand
phi_sand_0 = 0.49 # surface porosity for sand
phi_mud_0 = 0.63 # surface porosity for mud
c_sand = 0.27 # porosity-depth coefficient for sand (km-1)
c_mud = 0.57 # porosity-depth coefficent for mud (km-1)
tops_new = decompact(tops,lith,0.0,phi_sand_0,phi_mud_0,c_sand,c_mud) # compute new tops
def plot_decompaction(tops,tops_new):
for i in range(len(tops)-1):
x = [0,1,1,0]
y = [tops[i], tops[i], tops[i+1], tops[i+1]]
if lith[i] == 0:
color = 'xkcd:umber'
if lith[i] == 1:
color = 'xkcd:yellowish'
plt.fill(x,y,color=color)
x = np.array([2,3,3,2])
y = np.array([tops_new[i], tops_new[i], tops_new[i+1], tops_new[i+1]])
if lith[i] == 0:
color = 'xkcd:umber'
if lith[i] == 1:
color = 'xkcd:yellowish'
plt.fill(x,y,color=color)
plt.gca().invert_yaxis()
plt.tick_params(axis='x',which='both',bottom='off',top='off',labelbottom='off')
plt.ylabel('depth (km)');
plot_decompaction(tops,tops_new)
tops = np.array([1.0,1.1,1.15,1.3,1.5,2.0])
lith = np.array([0,1,0,1,0]) # lithology labels: 0 = mud, 1 = sand
c_sand = 1000/18605.0 # Kominz et al. 2011 >90% sand curve
c_mud = 1000/1671.0 # Kominz et al. 2011 >90% mud curve
phi_sand_0 = 0.407 # Kominz et al. 2011 >90% sand curve
phi_mud_0 = 0.614 # Kominz et al. 2011 >90% mud curve
tops_new = decompact(tops,lith,0.0,phi_sand_0,phi_mud_0,c_sand,c_mud) # compute new tops
plot_decompaction(tops,tops_new)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fetch the daily returns for a stock
Step2: Create a full tear sheet for the single stock
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pyfolio as pf
stock_rets = pf.utils.get_symbol_rets('FB')
pf.create_returns_tear_sheet(stock_rets, live_start_date='2015-12-1')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Custom layers
Step2: Layers
Step3: The full list of pre-existing layers can be seen in the documentation. It includes Dense (a fully-connected layer),
Step4: Implementing custom layers
Step5: Overall code is easier to read and maintain if it uses standard layers whenever possible, as other readers will be familiar with the behavior of standard layers. If you want to use a layer which is not present in tf.keras.layers, consider filing a github issue or, even better, sending us a pull request!
Step6: Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
print(tf.config.list_physical_devices('GPU'))
# In the tf.keras.layers package, layers are objects. To construct a layer,
# simply construct the object. Most layers take as a first argument the number
# of output dimensions / channels.
layer = tf.keras.layers.Dense(100)
# The number of input dimensions is often unnecessary, as it can be inferred
# the first time the layer is used, but it can be provided if you want to
# specify it manually, which is useful in some complex models.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
# To use a layer, simply call it.
layer(tf.zeros([10, 5]))
# Layers have many useful methods. For example, you can inspect all variables
# in a layer using `layer.variables` and trainable variables using
# `layer.trainable_variables`. In this case a fully-connected layer
# will have variables for weights and biases.
layer.variables
# The variables are also accessible through nice accessors
layer.kernel, layer.bias
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
layer = MyDenseLayer(10)
_ = layer(tf.zeros([10, 5])) # Calling the layer `.builds` it.
print([var.name for var in layer.trainable_variables])
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
_ = block(tf.zeros([1, 2, 3, 3]))
block.layers
len(block.variables)
block.summary()
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1),
input_shape=(
None, None, 3)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
my_seq.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Binary Logit
Step3: Multinomial Logit
Step5: This is what the data looks like
Step7: MNL with random sampling of alternatives
|
<ASSISTANT_TASK:>
Python Code:
import os; os.chdir('../choicemodels')
import choicemodels
import numpy as np
import pandas as pd
import statsmodels.api as sm # for binary logit
from collections import OrderedDict # for MNL model specifications
# Import or generate estimation data
Y = np.random.randint(2, size=50) # 50x1 vector of random 0's and 1's
X = np.random.rand(50, 3) # 50x3 matrix of random floats
data = pd.DataFrame(data=X, columns=['x1','x2','x3'])
data['y'] = Y
print(data.describe())
# Fit a model
specification = 'y ~ x1 + x2 + x3'
m = sm.Logit.from_formula(specification, data)
results = m.fit()
print(results.summary())
# Import or generate estimation data
def rand(len, min, max):
Generate `len` random floats uniformly distributed from `min` to `max`
return (max - min) * np.random.rand(len) + min
numalts = 50 # alternatives
X = np.concatenate((rand(numalts/2, 0, 10), rand(numalts/2, 100, 110)))
numobs = 1000 # agents/observations
beta = np.zeros(1000) + 1.5
U = [[beta[n]*x + np.random.gumbel() for x in X] for n in range(numobs)] # utility matrix
choices = [np.argmax(a) for a in U]
# Set up the estimation dataset in long format
d = [[n, i, int(choices[n]==i), X[i]] for n in range(numobs) for i in range(numalts)]
df = pd.DataFrame(d, columns=['obs_id', 'alt_id', 'chosen', 'x'])
print(df.describe())
# Set up model spec
spec = OrderedDict([('x', 'all_same')])
labels = OrderedDict([('x', 'beta_x')])
def estimate_model(init_val):
Initialize and fit a model, returning it as an object. Will use the
current values of `df`, `spec`, and `labels`.
m = choicemodels.MNLogit(data = df,
alt_id_col = 'alt_id',
obs_id_col = 'obs_id',
choice_col = 'chosen',
specification = spec,
names = labels)
results = m.fit_mle(init_vals = np.array([init_val]))
return results
%%time
results = estimate_model(init_val = 1.2)
print(results.summary())
def alts(obs_id, C, K):
This function generates a restricted choice set D for a particular
observation. Expects list `C` of alternatives to sample from (either
the full choice set or a stratrum), int `K` alternatives to sample,
and list `choices` of the alt_id chosen for each obs_id. Returns list
of K alt_id's including the chosen one.
chosen = choices[obs_id] # id of chosen alternative
unchosen = [i for i in C if chosen != i] # id's of unchosen alts
sample_unchosen = np.random.choice(unchosen, size=K-1, replace=False).tolist()
return np.sort([chosen] + sample_unchosen)
# Set up the estimation dataset, which can use the same spec as earlier
C = range(numalts) # choice set to sample from
K = 10
d = [[n, i, int(choices[n]==i), X[i]] for n in range(numobs) for i in alts(n, C, K)]
df = pd.DataFrame(d, columns=['obs_id', 'alt_id', 'chosen', 'x'])
print df.describe()
%%time
results = estimate_model(init_val = 1.2)
print results.summary()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Inspecting Quantization Errors with Quantization Debugger
Step3: We can see that the original model has a much higher top-5 accuracy for our
Step 1. Debugger preparation
Step4: Step 2. Running the debugger and getting the results
Step5: The processed metrics can be accessed with
Step6: For each row in the dump, the op name and index comes first, followed by
Step7: Step 3. Data analysis
Step8: There are many layers with wide ranges, and some layers that have high
Step9: With these layers, you can try selective quantization to see if not quantizing
Step10: In addition to these, skipping quantization for the first few layers also helps
Step11: Selective Quantization
Step12: The accuracy is still lower compared to the original float model, but we have
Step13: With these techniques, we are able to improve the quantized MobileNet V3 model
Step14: The result of model_debug_metrics can be separately seen from
Step15: Using (internal) mlir_quantize API to access in-depth features
Step16: Whole model verify mode
Step17: Selective quantization from an already calibrated model
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Quantization debugger is available from TensorFlow 2.7.0
!pip uninstall -y tensorflow
!pip install tf-nightly
!pip install tensorflow_datasets --upgrade # imagenet_v2 needs latest checksum
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_hub as hub
#@title Boilerplates and helpers
MODEL_URI = 'https://tfhub.dev/google/imagenet/mobilenet_v3_small_100_224/classification/5'
def process_image(data):
data['image'] = tf.image.resize(data['image'], (224, 224)) / 255.0
return data
# Representative dataset
def representative_dataset(dataset):
def _data_gen():
for data in dataset.batch(1):
yield [data['image']]
return _data_gen
def eval_tflite(tflite_model, dataset):
Evaluates tensorflow lite classification model with the given dataset.
interpreter = tf.lite.Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_idx = interpreter.get_input_details()[0]['index']
output_idx = interpreter.get_output_details()[0]['index']
results = []
for data in representative_dataset(dataset)():
interpreter.set_tensor(input_idx, data[0])
interpreter.invoke()
results.append(interpreter.get_tensor(output_idx).flatten())
results = np.array(results)
gt_labels = np.array(list(dataset.map(lambda data: data['label'] + 1)))
accuracy = (
np.sum(np.argsort(results, axis=1)[:, -5:] == gt_labels.reshape(-1, 1)) /
gt_labels.size)
print(f'Top-5 accuracy (quantized): {accuracy * 100:.2f}%')
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(224, 224, 3), batch_size=1),
hub.KerasLayer(MODEL_URI)
])
model.compile(
loss='sparse_categorical_crossentropy',
metrics='sparse_top_k_categorical_accuracy')
model.build([1, 224, 224, 3])
# Prepare dataset with 100 examples
ds = tfds.load('imagenet_v2', split='test[:1%]')
ds = ds.map(process_image)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.representative_dataset = representative_dataset(ds)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_model = converter.convert()
test_ds = ds.map(lambda data: (data['image'], data['label'] + 1)).batch(16)
loss, acc = model.evaluate(test_ds)
print(f'Top-5 accuracy (float): {acc * 100:.2f}%')
eval_tflite(quantized_model, ds)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset(ds)
# my_debug_dataset should have the same format as my_representative_dataset
debugger = tf.lite.experimental.QuantizationDebugger(
converter=converter, debug_dataset=representative_dataset(ds))
debugger.run()
RESULTS_FILE = '/tmp/debugger_results.csv'
with open(RESULTS_FILE, 'w') as f:
debugger.layer_statistics_dump(f)
!head /tmp/debugger_results.csv
layer_stats = pd.read_csv(RESULTS_FILE)
layer_stats.head()
layer_stats['range'] = 255.0 * layer_stats['scale']
layer_stats['rmse/scale'] = layer_stats.apply(
lambda row: np.sqrt(row['mean_squared_error']) / row['scale'], axis=1)
layer_stats[['op_name', 'range', 'rmse/scale']].head()
plt.figure(figsize=(15, 5))
ax1 = plt.subplot(121)
ax1.bar(np.arange(len(layer_stats)), layer_stats['range'])
ax1.set_ylabel('range')
ax2 = plt.subplot(122)
ax2.bar(np.arange(len(layer_stats)), layer_stats['rmse/scale'])
ax2.set_ylabel('rmse/scale')
plt.show()
layer_stats[layer_stats['rmse/scale'] > 0.7][[
'op_name', 'range', 'rmse/scale', 'tensor_name'
]]
suspected_layers = list(
layer_stats[layer_stats['rmse/scale'] > 0.7]['tensor_name'])
suspected_layers.extend(list(layer_stats[:5]['tensor_name']))
debug_options = tf.lite.experimental.QuantizationDebugOptions(
denylisted_nodes=suspected_layers)
debugger = tf.lite.experimental.QuantizationDebugger(
converter=converter,
debug_dataset=representative_dataset(ds),
debug_options=debug_options)
selective_quantized_model = debugger.get_nondebug_quantized_model()
eval_tflite(selective_quantized_model, ds)
debug_options = tf.lite.experimental.QuantizationDebugOptions(
denylisted_ops=['MEAN'])
debugger = tf.lite.experimental.QuantizationDebugger(
converter=converter,
debug_dataset=representative_dataset(ds),
debug_options=debug_options)
selective_quantized_model = debugger.get_nondebug_quantized_model()
eval_tflite(selective_quantized_model, ds)
debug_options = tf.lite.experimental.QuantizationDebugOptions(
layer_debug_metrics={
'mean_abs_error': (lambda diff: np.mean(np.abs(diff)))
},
layer_direct_compare_metrics={
'correlation':
lambda f, q, s, zp: (np.corrcoef(f.flatten(),
(q.flatten() - zp) / s)[0, 1])
},
model_debug_metrics={
'argmax_accuracy': (lambda f, q: np.mean(np.argmax(f) == np.argmax(q)))
})
debugger = tf.lite.experimental.QuantizationDebugger(
converter=converter,
debug_dataset=representative_dataset(ds),
debug_options=debug_options)
debugger.run()
CUSTOM_RESULTS_FILE = '/tmp/debugger_results.csv'
with open(CUSTOM_RESULTS_FILE, 'w') as f:
debugger.layer_statistics_dump(f)
custom_layer_stats = pd.read_csv(CUSTOM_RESULTS_FILE)
custom_layer_stats[['op_name', 'mean_abs_error', 'correlation']].tail()
debugger.model_statistics
from tensorflow.lite.python import convert
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.representative_dataset = representative_dataset(ds)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter._experimental_calibrate_only = True
calibrated_model = converter.convert()
# Note that enable_numeric_verify and enable_whole_model_verify are set.
quantized_model = convert.mlir_quantize(
calibrated_model,
enable_numeric_verify=True,
enable_whole_model_verify=True)
debugger = tf.lite.experimental.QuantizationDebugger(
quant_debug_model_content=quantized_model,
debug_dataset=representative_dataset(ds))
selective_quantized_model = convert.mlir_quantize(
calibrated_model, denylisted_nodes=suspected_layers)
eval_tflite(selective_quantized_model, ds)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: DELF と TensorFlow Hub を使用して画像を一致させる方法
Step2: データ
Step3: 画像のダウンロード、サイズ変更、保存、および表示を行います。
Step4: データに DELF モジュールを適用する
Step5: ベクトルのロケーションと説明を使用して、画像を一致させる
|
<ASSISTANT_TASK:>
Python Code:
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
!pip install scikit-image
from absl import logging
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image, ImageOps
from scipy.spatial import cKDTree
from skimage.feature import plot_matches
from skimage.measure import ransac
from skimage.transform import AffineTransform
from six import BytesIO
import tensorflow as tf
import tensorflow_hub as hub
from six.moves.urllib.request import urlopen
#@title Choose images
images = "Bridge of Sighs" #@param ["Bridge of Sighs", "Golden Gate", "Acropolis", "Eiffel tower"]
if images == "Bridge of Sighs":
# from: https://commons.wikimedia.org/wiki/File:Bridge_of_Sighs,_Oxford.jpg
# by: N.H. Fischer
IMAGE_1_URL = 'https://upload.wikimedia.org/wikipedia/commons/2/28/Bridge_of_Sighs%2C_Oxford.jpg'
# from https://commons.wikimedia.org/wiki/File:The_Bridge_of_Sighs_and_Sheldonian_Theatre,_Oxford.jpg
# by: Matthew Hoser
IMAGE_2_URL = 'https://upload.wikimedia.org/wikipedia/commons/c/c3/The_Bridge_of_Sighs_and_Sheldonian_Theatre%2C_Oxford.jpg'
elif images == "Golden Gate":
IMAGE_1_URL = 'https://upload.wikimedia.org/wikipedia/commons/1/1e/Golden_gate2.jpg'
IMAGE_2_URL = 'https://upload.wikimedia.org/wikipedia/commons/3/3e/GoldenGateBridge.jpg'
elif images == "Acropolis":
IMAGE_1_URL = 'https://upload.wikimedia.org/wikipedia/commons/c/ce/2006_01_21_Ath%C3%A8nes_Parth%C3%A9non.JPG'
IMAGE_2_URL = 'https://upload.wikimedia.org/wikipedia/commons/5/5c/ACROPOLIS_1969_-_panoramio_-_jean_melis.jpg'
else:
IMAGE_1_URL = 'https://upload.wikimedia.org/wikipedia/commons/d/d8/Eiffel_Tower%2C_November_15%2C_2011.jpg'
IMAGE_2_URL = 'https://upload.wikimedia.org/wikipedia/commons/a/a8/Eiffel_Tower_from_immediately_beside_it%2C_Paris_May_2008.jpg'
def download_and_resize(name, url, new_width=256, new_height=256):
path = tf.keras.utils.get_file(url.split('/')[-1], url)
image = Image.open(path)
image = ImageOps.fit(image, (new_width, new_height), Image.ANTIALIAS)
return image
image1 = download_and_resize('image_1.jpg', IMAGE_1_URL)
image2 = download_and_resize('image_2.jpg', IMAGE_2_URL)
plt.subplot(1,2,1)
plt.imshow(image1)
plt.subplot(1,2,2)
plt.imshow(image2)
delf = hub.load('https://tfhub.dev/google/delf/1').signatures['default']
def run_delf(image):
np_image = np.array(image)
float_image = tf.image.convert_image_dtype(np_image, tf.float32)
return delf(
image=float_image,
score_threshold=tf.constant(100.0),
image_scales=tf.constant([0.25, 0.3536, 0.5, 0.7071, 1.0, 1.4142, 2.0]),
max_feature_num=tf.constant(1000))
result1 = run_delf(image1)
result2 = run_delf(image2)
#@title TensorFlow is not needed for this post-processing and visualization
def match_images(image1, image2, result1, result2):
distance_threshold = 0.8
# Read features.
num_features_1 = result1['locations'].shape[0]
print("Loaded image 1's %d features" % num_features_1)
num_features_2 = result2['locations'].shape[0]
print("Loaded image 2's %d features" % num_features_2)
# Find nearest-neighbor matches using a KD tree.
d1_tree = cKDTree(result1['descriptors'])
_, indices = d1_tree.query(
result2['descriptors'],
distance_upper_bound=distance_threshold)
# Select feature locations for putative matches.
locations_2_to_use = np.array([
result2['locations'][i,]
for i in range(num_features_2)
if indices[i] != num_features_1
])
locations_1_to_use = np.array([
result1['locations'][indices[i],]
for i in range(num_features_2)
if indices[i] != num_features_1
])
# Perform geometric verification using RANSAC.
_, inliers = ransac(
(locations_1_to_use, locations_2_to_use),
AffineTransform,
min_samples=3,
residual_threshold=20,
max_trials=1000)
print('Found %d inliers' % sum(inliers))
# Visualize correspondences.
_, ax = plt.subplots()
inlier_idxs = np.nonzero(inliers)[0]
plot_matches(
ax,
image1,
image2,
locations_1_to_use,
locations_2_to_use,
np.column_stack((inlier_idxs, inlier_idxs)),
matches_color='b')
ax.axis('off')
ax.set_title('DELF correspondences')
match_images(image1, image2, result1, result2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: %quickref
Step2: help
Step3: object?
Step4: ! <command>
Step5: %cd
Step6: who/whos
Step7: %time <Python statement/expression>
Step8: %timeit <Python statement/expression>
Step9: %run <python module>.py
Step10: %run -t <python module>.py
Step11: A longer example (for the sake of the correctness of substract_curves.py)
Step12: line_profiler
Step13: <!-- ## [profile/cProfiler](https
Step14: Testing Cython
|
<ASSISTANT_TASK:>
Python Code:
?
%quickref
help
a = 1
a?
!cd /
!pwd
%cd /
!pwd
%cd ~/YAPT
!pwd
%whos
a
%time a=3
a
a
%timeit a=4
a
%run hello_world.py
# %load module.py
a = 1
%run module.py
%whos
a
!cat substract_curves.py
!paste dataset1.txt dataset2.txt
%run -t substract_curves.py dataset1.txt dataset2.txt
# Generate two "random" similar signals.
import io
import numpy as np
x = 0
base = []
with io.open('dataset3.txt', 'w') as file:
for i in range(100):
x += np.random.randint(10) - 4.5
file.write('{}\t{}\n'.format(i*2,x))
base.append(x)
with io.open('dataset4.txt', 'w') as file:
for i in range(99):
x = base[i] + np.random.randint(10) - 4.5
file.write('{}\t{}\n'.format(i*2+1,x))
# Run "substract_curves.py" and its stdout is written in "differences.txt".
import IPython.utils
with IPython.utils.io.capture_output() as captured:
!python substract_curves.py dataset3.txt dataset4.txt
import io
with io.open('differences.txt', 'w') as file:
file.write(captured.stdout)
# Plot the signals.
import matplotlib.pyplot as plt
plt.plot(*np.loadtxt("dataset3.txt",unpack=True), linewidth=2.0)
plt.plot(*np.loadtxt("dataset4.txt",unpack=True), linewidth=2.0)
plt.plot(*np.loadtxt("differences.txt",unpack=True), linewidth=2.0)
plt.show()
!cat profile_quicksort.py
!kernprof -l -v profile_quicksort.py
<!-- !python -m cProfile quicksort.py -->
%load_ext Cython
def f_slow(i):
return i**4 + 3*i**2 + 10
%timeit f_slow(100)
%%cython --annotate
def f_cython(int i):
return i**4 + 3*i**2 + 10
%timeit f_cython(100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The advantage of Python is that it is flexible and easy to program. The time it takes to setup a new calulation is therefore short. But for certain types of calculations Python (and any other interpreted language) can be very slow. It is particularly iterations over large arrays that is difficult to do efficiently.
Step2: Fortran
Step3: Generate a python module using f2py
Step4: Example of a python script that use the module
Step5: Example 1
Step6: The f2py program generated a module declaration file called dsum.pyf. Let's look what's in it
Step7: The module does not know what Fortran subroutine arguments is input and output, so we need to manually edit the module declaration files and mark output variables with intent(out) and input variable with intent(in)
Step8: Compile the fortran code into a module that can be included in python
Step9: Using the module from Python
Step10: Compare performance
Step11: Example 2
Step12: Fortran subroutine for the same thing
Step13: We can directly compile the fortran code to a python module
Step14: Benchmark the different implementations
Step15: Further reading
Step16: Compile the C file into a shared library
Step17: The result is a compiled shared library libfunctions.so
Step18: Now we need to write wrapper functions to access the C library
Step19: Product function
Step20: Cummulative sum
Step21: Simple benchmark
Step22: Further reading
Step23: A build file for generating C code and compiling it into a Python module.
Step24: Cython in the IPython notebook
Step25: Further reading
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
from IPython.display import Image
Image(filename='images/optimizing-what.png')
%%file hellofortran.f
C File hellofortran.f
subroutine hellofortran (n)
integer n
do 100 i=0, n
print *, "Fortran says hello"
100 continue
end
!f2py -c -m hellofortran hellofortran.f
%%file hello.py
import hellofortran
hellofortran.hellofortran(5)
# run the script
!python hello.py
%%file dprod.f
subroutine dprod(x, y, n)
double precision x(n), y
y = 1.0
do 100 i=1, n
y = y * x(i)
100 continue
end
!rm -f dprod.pyf
!f2py -m dprod -h dprod.pyf dprod.f
!cat dprod.pyf
%%file dprod.pyf
python module dprod ! in
interface ! in :dprod
subroutine dprod(x,y,n) ! in :dprod:dprod.f
double precision dimension(n), intent(in) :: x
double precision, intent(out) :: y
integer, optional,check(len(x)>=n),depend(x),intent(in) :: n=len(x)
end subroutine dprod
end interface
end python module dprod
!f2py -c dprod.pyf dprod.f
import dprod
help(dprod)
dprod.dprod(arange(1,50))
# compare to numpy
prod(arange(1.0,50.0))
dprod.dprod(arange(1,10), 5) # only the 5 first elements
xvec = rand(500)
timeit dprod.dprod(xvec)
timeit xvec.prod()
# simple python algorithm: example of a SLOW implementation
# Why? Because the loop is implemented in python.
def py_dcumsum(a):
b = empty_like(a)
b[0] = a[0]
for n in range(1,len(a)):
b[n] = b[n-1]+a[n]
return b
%%file dcumsum.f
c File dcumsum.f
subroutine dcumsum(a, b, n)
double precision a(n)
double precision b(n)
integer n
cf2py intent(in) :: a
cf2py intent(out) :: b
cf2py intent(hide) :: n
b(1) = a(1)
do 100 i=2, n
b(i) = b(i-1) + a(i)
100 continue
end
!f2py -c dcumsum.f -m dcumsum
import dcumsum
a = array([1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0])
py_dcumsum(a)
dcumsum.dcumsum(a)
cumsum(a)
a = rand(10000)
timeit py_dcumsum(a)
timeit dcumsum.dcumsum(a)
timeit a.cumsum()
%%file functions.c
#include <stdio.h>
void hello(int n);
double dprod(double *x, int n);
void dcumsum(double *a, double *b, int n);
void
hello(int n)
{
int i;
for (i = 0; i < n; i++)
{
printf("C says hello\n");
}
}
double
dprod(double *x, int n)
{
int i;
double y = 1.0;
for (i = 0; i < n; i++)
{
y *= x[i];
}
return y;
}
void
dcumsum(double *a, double *b, int n)
{
int i;
b[0] = a[0];
for (i = 1; i < n; i++)
{
b[i] = a[i] + b[i-1];
}
}
!gcc -c -Wall -O2 -Wall -ansi -pedantic -fPIC -o functions.o functions.c
!gcc -o libfunctions.so -shared functions.o
!file libfunctions.so
%%file functions.py
import numpy
import ctypes
_libfunctions = numpy.ctypeslib.load_library('libfunctions', '.')
_libfunctions.hello.argtypes = [ctypes.c_int]
_libfunctions.hello.restype = ctypes.c_void_p
_libfunctions.dprod.argtypes = [numpy.ctypeslib.ndpointer(dtype=numpy.float), ctypes.c_int]
_libfunctions.dprod.restype = ctypes.c_double
_libfunctions.dcumsum.argtypes = [numpy.ctypeslib.ndpointer(dtype=numpy.float), numpy.ctypeslib.ndpointer(dtype=numpy.float), ctypes.c_int]
_libfunctions.dcumsum.restype = ctypes.c_void_p
def hello(n):
return _libfunctions.hello(int(n))
def dprod(x, n=None):
if n is None:
n = len(x)
x = numpy.asarray(x, dtype=numpy.float)
return _libfunctions.dprod(x, int(n))
def dcumsum(a, n):
a = numpy.asarray(a, dtype=numpy.float)
b = numpy.empty(len(a), dtype=numpy.float)
_libfunctions.dcumsum(a, b, int(n))
return b
%%file run_hello_c.py
import functions
functions.hello(3)
!python run_hello_c.py
import functions
functions.dprod([1,2,3,4,5])
a = rand(100000)
res_c = functions.dcumsum(a, len(a))
res_fortran = dcumsum.dcumsum(a)
res_c - res_fortran
timeit functions.dcumsum(a, len(a))
timeit dcumsum.dcumsum(a)
timeit a.cumsum()
%%file cy_dcumsum.pyx
cimport numpy
def dcumsum(numpy.ndarray[numpy.float64_t, ndim=1] a, numpy.ndarray[numpy.float64_t, ndim=1] b):
cdef int i, n = len(a)
b[0] = a[0]
for i from 1 <= i < n:
b[i] = b[i-1] + a[i]
return b
%%file setup.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("cy_dcumsum", ["cy_dcumsum.pyx"])]
)
!python setup.py build_ext --inplace
import cy_dcumsum
a = array([1,2,3,4], dtype=float)
b = empty_like(a)
cy_dcumsum.dcumsum(a,b)
b
a = array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0])
b = empty_like(a)
cy_dcumsum.dcumsum(a, b)
b
py_dcumsum(a)
a = rand(100000)
b = empty_like(a)
timeit py_dcumsum(a)
timeit cy_dcumsum.dcumsum(a,b)
%load_ext cythonmagic
%%cython
cimport numpy
def cy_dcumsum2(numpy.ndarray[numpy.float64_t, ndim=1] a, numpy.ndarray[numpy.float64_t, ndim=1] b):
cdef int i, n = len(a)
b[0] = a[0]
for i from 1 <= i < n:
b[i] = b[i-1] + a[i]
return b
timeit cy_dcumsum2(a,b)
%reload_ext version_information
%version_information ctypes, Cython
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parallelism
Step2: GPU
Step3: Resident Array
|
<ASSISTANT_TASK:>
Python Code:
!hybridizer-cuda ./01-Naive/01-naive.cs ./Common_Files/SparseMatrixNaive.cs -o ./01-Naive/naive.exe -run
!hybridizer-cuda ./02-Parallel/01-parallelism.cs ./Common_Files/SparseMatrixNaive.cs -o ./02-Parallel/01-parallelism.exe -run
!hybridizer-cuda ./03-GPU/01-gpu.cs ./Common_Files/SparseMatrixNaive.cs -o ./03-GPU/01-gpu.exe -run
!hybridizer-cuda ./04-Resident/01-resident.cs ./Common_Files/SparseMatrixResident.cs -o ./04-Resident/01-resident.exe -run
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the Pre-Trained Models and Data
Step2: We look at the layer normalized model trained on the aaron_sheep dataset for now.
Step3: The real fun begins
Step4: The helper functions for encoding a sketch to some latent code $z$ and then decoding it back to a sketch were provided in the original notebook. I just made some minor syntactic changes and removed the behaviour of plotting as a side-effect.
Step5: Now we get a random sample from the test dataset
Step6: We project it into the 128-dimensional latent space using the pre-trained encoder
Step7: Now we can reconstruct the original sketch from the learned latent representation using the pre-trained decoder, with temperature $\tau=0.8$. The temperature parameter controls the level of randomness in the samples generated by the model, which becomes deterministic as $\tau \to 0$, and produces samples that are the most likely point in the probability density function. See pg. 7 of the original paper for further discussion of the effects the temperature parameter has on the sampling process.
Step8: Variance in the Reconstruction
Step9: At the lowest setting of the temperature at $\tau=0.1$, we see the samples consistently share a similar appearance - they all look like vertical strokes emanating from a fluffy cloud. However, they are also consistently dissimilar to the original sketch. In this sense, the samples from the models seems to exhibit high bias and low variance. As we increase the variance in the samples by increasing $\tau$, we start to find some samples that resemble our original sketch. But when we increase $\tau$ a little too much, beyond say 0.8, we begin to see a little too much randomness in the samples.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import tensorflow as tf
from matplotlib.animation import FuncAnimation
from matplotlib.path import Path
from matplotlib import rc
from six.moves import map
from magenta.models.sketch_rnn.sketch_rnn_train import \
(load_env,
load_checkpoint,
reset_graph,
download_pretrained_models,
PRETRAINED_MODELS_URL)
from magenta.models.sketch_rnn.model import Model, sample
from magenta.models.sketch_rnn.utils import (lerp,
slerp,
get_bounds,
to_big_strokes,
to_normal_strokes)
# For inine display of animation
# equivalent to rcParams['animation.html'] = 'html5'
rc('animation', html='html5')
# set numpy output to something sensible
np.set_printoptions(precision=8,
edgeitems=6,
linewidth=200,
suppress=True)
tf.logging.info("TensorFlow Version: {}".format(tf.__version__))
DATA_DIR = ('http://github.com/hardmaru/sketch-rnn-datasets/'
'raw/master/aaron_sheep/')
MODELS_ROOT_DIR = '/tmp/sketch_rnn/models'
DATA_DIR
PRETRAINED_MODELS_URL
download_pretrained_models(
models_root_dir=MODELS_ROOT_DIR,
pretrained_models_url=PRETRAINED_MODELS_URL)
MODEL_DIR = MODELS_ROOT_DIR + '/aaron_sheep/layer_norm'
(train_set,
valid_set,
test_set,
hps_model,
eval_hps_model,
sample_hps_model) = load_env(DATA_DIR, MODEL_DIR)
class SketchPath(Path):
def __init__(self, data, factor=.2, *args, **kwargs):
vertices = np.cumsum(data[::, :-1], axis=0) / factor
codes = np.roll(self.to_code(data[::,-1].astype(int)),
shift=1)
codes[0] = Path.MOVETO
super(SketchPath, self).__init__(vertices,
codes,
*args,
**kwargs)
@staticmethod
def to_code(cmd):
# if cmd == 0, the code is LINETO
# if cmd == 1, the code is MOVETO (which is LINETO - 1)
return Path.LINETO - cmd
def draw(sketch_data, factor=.2, pad=(10, 10), ax=None):
if ax is None:
ax = plt.gca()
x_pad, y_pad = pad
x_pad //= 2
y_pad //= 2
x_min, x_max, y_min, y_max = get_bounds(data=sketch_data,
factor=factor)
ax.set_xlim(x_min-x_pad, x_max+x_pad)
ax.set_ylim(y_max+y_pad, y_min-y_pad)
sketch = SketchPath(sketch_data)
patch = patches.PathPatch(sketch, facecolor='none')
ax.add_patch(patch)
# construct the sketch-rnn model here:
reset_graph()
model = Model(hps_model)
eval_model = Model(eval_hps_model, reuse=True)
sample_model = Model(sample_hps_model, reuse=True)
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
# loads the weights from checkpoint into our model
load_checkpoint(sess=sess, checkpoint_path=MODEL_DIR)
def encode(input_strokes):
strokes = to_big_strokes(input_strokes).tolist()
strokes.insert(0, [0, 0, 1, 0, 0])
seq_len = [len(input_strokes)]
z = sess.run(eval_model.batch_z,
feed_dict={
eval_model.input_data: [strokes],
eval_model.sequence_lengths: seq_len})[0]
return z
def decode(z_input=None, temperature=.1, factor=.2):
z = None
if z_input is not None:
z = [z_input]
sample_strokes, m = sample(
sess,
sample_model,
seq_len=eval_model.hps.max_seq_len,
temperature=temperature, z=z)
return to_normal_strokes(sample_strokes)
sketch = test_set.random_sample()
fig, ax = plt.subplots(figsize=(3, 3),
subplot_kw=dict(xticks=[],
yticks=[],
frame_on=False))
draw(sketch, ax=ax)
plt.show()
z = encode(sketch)
z.shape
sketch_reconstructed = decode(z, temperature=.6)
sketch_reconstructed.shape
fig, ax = plt.subplots(figsize=(3, 3),
subplot_kw=dict(xticks=[],
yticks=[],
frame_on=False))
draw(sketch_reconstructed, ax=ax)
plt.show()
fig, ax_arr = plt.subplots(nrows=5,
ncols=10,
figsize=(8, 4),
subplot_kw=dict(xticks=[],
yticks=[],
frame_on=False))
fig.tight_layout()
for row_num, ax_row in enumerate(ax_arr):
for col_num, ax in enumerate(ax_row):
if not col_num:
draw(sketch, ax=ax)
xlabel = 'original'
else:
t = col_num / 10.
draw(decode(z, temperature=t), ax=ax)
xlabel = r'$\tau={}$'.format(t)
if row_num+1 == len(ax_arr):
ax.set_xlabel(xlabel)
plt.show()
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(6, 3),
subplot_kw=dict(xticks=[],
yticks=[]))
fig.tight_layout()
x_pad, y_pad = 10, 10
x_pad //= 2
y_pad //= 2
(x_min_1,
x_max_1,
y_min_1,
y_max_1) = get_bounds(data=sketch, factor=.2)
(x_min_2,
x_max_2,
y_min_2,
y_max_2) = get_bounds(data=sketch_reconstructed, factor=.2)
x_min = np.minimum(x_min_1, x_min_2)
y_min = np.minimum(y_min_1, y_min_2)
x_max = np.maximum(x_max_1, x_max_2)
y_max = np.maximum(y_max_1, y_max_2)
ax1.set_xlim(x_min-x_pad, x_max+x_pad)
ax1.set_ylim(y_max+y_pad, y_min-y_pad)
ax1.set_xlabel('Original')
ax2.set_xlim(x_min-x_pad, x_max+x_pad)
ax2.set_ylim(y_max+y_pad, y_min-y_pad)
ax2.set_xlabel('Reconstruction')
def animate(i):
original = SketchPath(sketch[:i+1])
reconstructed = SketchPath(sketch_reconstructed[:i+1])
patch1 = ax1.add_patch(patches.PathPatch(original,
facecolor='none'))
patch2 = ax2.add_patch(patches.PathPatch(reconstructed,
facecolor='none'))
return patch1, patch2
frames = np.maximum(sketch.shape[0],
sketch_reconstructed.shape[0])
frames
FuncAnimation(fig,
animate,
frames=frames-1,
interval=15,
repeat_delay=1000*3,
blit=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MNIST Dataset
Step2: Convolution Networks
Step3: Prediction
Step4: Theano & Feature Extraction
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pickle
import os
import gzip
import numpy as np
import theano
import lasagne
from lasagne import layers
from lasagne.updates import nesterov_momentum
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import visualize
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
def load_dataset():
with gzip.open("datasets/mnist/mnist.pkl.gz") as fin:
data = pickle.load(fin)
X_train, y_train = data[0]
X_val, y_val = data[1]
X_test, y_test = data[2]
X_train = X_train.reshape((-1, 1, 28, 28))
X_val = X_val.reshape((-1, 1, 28, 28))
X_test = X_test.reshape((-1, 1, 28, 28))
y_train = y_train.astype(np.uint8)
y_val = y_val.astype(np.uint8)
y_test = y_test.astype(np.uint8)
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
net1 = NeuralNet(
layers=[('input', layers.InputLayer),
('conv2d1', layers.Conv2DLayer),
('maxpool1', layers.MaxPool2DLayer),
('conv2d2', layers.Conv2DLayer),
('maxpool2', layers.MaxPool2DLayer),
('dropout1', layers.DropoutLayer),
('dense', layers.DenseLayer),
('dropout2', layers.DropoutLayer),
('output', layers.DenseLayer),
],
# input layer
input_shape=(None, 1, 28, 28),
# layer conv2d1
conv2d1_num_filters=16,
conv2d1_filter_size=(6, 6),
conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
conv2d1_W=lasagne.init.GlorotUniform(),
# layer maxpool1
maxpool1_pool_size=(2, 2),
# layer conv2d2
conv2d2_num_filters=16,
conv2d2_filter_size=(5, 5),
conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
# layer maxpool2
maxpool2_pool_size=(2, 2),
# dropout1
dropout1_p=0.5,
# dense
dense_num_units=192,
dense_nonlinearity=lasagne.nonlinearities.rectify,
# dropout2
dropout2_p=0.5,
# output
output_nonlinearity=lasagne.nonlinearities.softmax,
output_num_units=10,
# optimization method params
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=8,
verbose=1,
)
# Train the network
nn = net1.fit(X_train, y_train)
preds = net1.predict(X_test)
dense_layer = layers.get_output(net1.layers_['dense'], deterministic=True)
output_layer = layers.get_output(net1.layers_['output'], deterministic=True)
input_var = net1.layers_['input'].input_var
f_output = theano.function([input_var], output_layer)
f_dense = theano.function([input_var], dense_layer)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
from bisect import bisect_left
def solve(N , X , Y ) :
p =[]
for i in range(N ) :
p . append([ X[i ] , Y[i ] ] )
p = sorted(p )
s = { }
s[p[0 ][1 ] ] = 1
for i in range(N ) :
arr = list(s . keys() )
it = bisect_left(arr , p[i ][1 ] )
if(it == len(s ) ) :
s[p[i ][1 ] ] = 1
else :
del s[arr[it ] ]
s[p[i ][1 ] ] = 1
return len(s )
if __name__== ' __main __' :
N = 3
X =[1 , 2 , 0 ]
Y =[2 , 0 , 1 ]
maxintersection = solve(N , X , Y )
print(maxintersection )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Comment ça marche ?
Step2: Explication du code pas à pas
Step3: On donne ensuite un nom à l'application ici ce sera app
Step4: Ensuite vient la partie cruciale
Step5: Cette dernière partie permet juste de faire en sorte que l'application se lance quand on lance le code dans la console ou le terminal.
Step6: Exercice 1
Step7: Ajouter un peu de style à la page
Step8: Le hello_world_green.py
Step9: Ici, au lieu de renvoyer le texte directement, on demande d'injecter dans le template de la page home le message Hello World.
Step10: Regardons tout ça en détail.
Step11: Par rapport à l'exemple précédent, on a ajouté une route et une nouvelle fonction. La nouvelle route est celle d'une deuxième page à qui on applique le template html "page_suivante". Dans cet exemple, on n'injecte rien depuis python dans la page.
Step12: home.html
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
from flask import Flask # pip install flask
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
# if __name__ == "__main__":
# app.run()
from flask import Flask
app = Flask(__name__)
def hello():
return "Hello World!"
if __name__ == "__main__" and "get_ipython" not in locals(): # ne pas exécuter dans un notebook
app.run()
from pyquickhelper.helpgen import NbImage
NbImage("simple_hello.png")
NbImage("green_hello.png")
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("home.html", message = "Hello World!")
#if __name__ == "__main__":
# app.run()
NbImage("2_pages.png")
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import Flask, render_template
app = Flask(__name__)
@app.route("/")
def hello():
return render_template("home.html", message_bienvenue="Bienvenue sur la page d'accueil !")
@app.route("/next")
def suite():
return render_template("page_suivante.html")
#if __name__ == "__main__":
# app.run()
NbImage("login.png")
from flask import Flask, request, render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template("home.html")
@app.route('/', methods=['POST'])
def text_box():
text = request.form['text']
processed_text = text.upper()
return render_template("bienvenue.html" , message = processed_text )
#if __name__ == '__main__':
# app.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Default parameters
Step2: Examples
Step3: Correlated incomes
Step4: Negatively correlated
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed, FloatSlider
%matplotlib inline
rho = 0
N = 5
T = 15
V = 1
def shocks(rho=0, N=N, T=T):
'''Returns an N by T matrix of multivariate normally distributed shocks with correlation rho'''
mean = np.zeros(N)
cov = np.ones((N, N), int)*rho*V
np.fill_diagonal(cov, V)
#print(cov)
e = np.random.multivariate_normal(mean, cov, size=T)
return e
def incomes(rho=0, V=V, N = N, T=T):
'''Generates random incomes for N over T periods'''
t = np.arange(T) # time periods
x0 = np.arange(10,10+N*5,5) # average income
e = shocks(rho=rho, N=N, T=T)*V
X = x0 + e
XT = np.sum(X, axis = 1)
w = x0/np.sum(x0)
XS = np.array([XT * wt for wt in w]).T
return t, X, XS
def plot_cons(rho=0, V=1):
#print('rho = {}'.format(rho))
t, X, XS = incomes(rho=rho, V=V, N=N, T=T)
fig, ax = plt.subplots(figsize=(10,8))
ax.plot(t, X,'x-')
ax.plot(t,XS,linestyle='dashed')
ax.set_xlabel('time')
ax.set_xticks(np.arange(T, step=1))
fig.suptitle(r'$\rho = {:0.2f}$'.format(rho))
ax.grid()
plt.tight_layout();
interact(plot_cons, rho=(-0.25,0.99,0.05));
plot_cons(rho=0, V=1)
plot_cons(rho=0.8, V=1)
plot_cons(rho=-0.2, V=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model specification
Step2: That's not so bad. The Normal priors help regularize the weights. Usually we would add a constant b to the inputs but I omitted it here to keep the code cleaner.
Step3: < 40 seconds on my older laptop. That's pretty good considering that NUTS is having a really hard time. Further below we make this even faster. To make it really fly, we probably want to run the Neural Network on the GPU.
Step4: Plotting the objective function (ELBO) we can see that the optimization slowly improves the fit over time.
Step5: Now that we trained our model, lets predict on the hold-out set using a posterior predictive check (PPC). We use sample_ppc() to generate new data (in this case class predictions) from the posterior (sampled from the variational estimation).
Step6: Hey, our neural network did all right!
Step7: Probability surface
Step8: Uncertainty in predicted value
Step9: We can see that very close to the decision boundary, our uncertainty as to which label to predict is highest. You can imagine that associating predictions with uncertainty is a critical property for many applications like health care. To further maximize accuracy, we might want to train the model primarily on samples from that high-uncertainty region.
Step10: While the above might look a bit daunting, I really like the design. Especially the fact that you define a generator allows for great flexibility. In principle, we could just pool from a database there and not have to keep all the data in RAM.
Step11: As you can see, mini-batch ADVI's running time is much lower. It also seems to converge faster.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pymc3 as pm
import theano.tensor as T
import theano
import sklearn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('white')
from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons
X, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)
X = scale(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)
fig, ax = plt.subplots()
ax.scatter(X[Y==0, 0], X[Y==0, 1], label='Class 0')
ax.scatter(X[Y==1, 0], X[Y==1, 1], color='r', label='Class 1')
sns.despine(); ax.legend()
ax.set(xlabel='X', ylabel='Y', title='Toy binary classification data set');
# Trick: Turn inputs and outputs into shared variables.
# It's still the same thing, but we can later change the values of the shared variable
# (to switch in the test-data later) and pymc3 will just use the new data.
# Kind-of like a pointer we can redirect.
# For more info, see: http://deeplearning.net/software/theano/library/compile/shared.html
ann_input = theano.shared(X_train)
ann_output = theano.shared(Y_train)
n_hidden = 5
# Initialize random weights between each layer
init_1 = np.random.randn(X.shape[1], n_hidden)
init_2 = np.random.randn(n_hidden, n_hidden)
init_out = np.random.randn(n_hidden)
with pm.Model() as neural_network:
# Weights from input to hidden layer
weights_in_1 = pm.Normal('w_in_1', 0, sd=1,
shape=(X.shape[1], n_hidden),
testval=init_1)
# Weights from 1st to 2nd layer
weights_1_2 = pm.Normal('w_1_2', 0, sd=1,
shape=(n_hidden, n_hidden),
testval=init_2)
# Weights from hidden layer to output
weights_2_out = pm.Normal('w_2_out', 0, sd=1,
shape=(n_hidden,),
testval=init_out)
# Build neural-network using tanh activation function
act_1 = T.tanh(T.dot(ann_input,
weights_in_1))
act_2 = T.tanh(T.dot(act_1,
weights_1_2))
act_out = T.nnet.sigmoid(T.dot(act_2,
weights_2_out))
# Binary classification -> Bernoulli likelihood
out = pm.Bernoulli('out',
act_out,
observed=ann_output)
%%time
with neural_network:
# Run ADVI which returns posterior means, standard deviations, and the evidence lower bound (ELBO)
v_params = pm.variational.advi(n=50000)
with neural_network:
trace = pm.variational.sample_vp(v_params, draws=5000)
plt.plot(v_params.elbo_vals)
plt.ylabel('ELBO')
plt.xlabel('iteration')
# Replace shared variables with testing set
ann_input.set_value(X_test)
ann_output.set_value(Y_test)
# Creater posterior predictive samples
ppc = pm.sample_ppc(trace, model=neural_network, samples=500)
# Use probability of > 0.5 to assume prediction of class 1
pred = ppc['out'].mean(axis=0) > 0.5
fig, ax = plt.subplots()
ax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])
ax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')
sns.despine()
ax.set(title='Predicted labels in testing set', xlabel='X', ylabel='Y');
print('Accuracy = {}%'.format((Y_test == pred).mean() * 100))
grid = np.mgrid[-3:3:100j,-3:3:100j]
grid_2d = grid.reshape(2, -1).T
dummy_out = np.ones(grid.shape[1], dtype=np.int8)
ann_input.set_value(grid_2d)
ann_output.set_value(dummy_out)
# Creater posterior predictive samples
ppc = pm.sample_ppc(trace, model=neural_network, samples=500)
cmap = sns.diverging_palette(250, 12, s=85, l=25, as_cmap=True)
fig, ax = plt.subplots(figsize=(10, 6))
contour = ax.contourf(*grid, ppc['out'].mean(axis=0).reshape(100, 100), cmap=cmap)
ax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])
ax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')
cbar = plt.colorbar(contour, ax=ax)
_ = ax.set(xlim=(-3, 3), ylim=(-3, 3), xlabel='X', ylabel='Y');
cbar.ax.set_ylabel('Posterior predictive mean probability of class label = 0');
cmap = sns.cubehelix_palette(light=1, as_cmap=True)
fig, ax = plt.subplots(figsize=(10, 6))
contour = ax.contourf(*grid, ppc['out'].std(axis=0).reshape(100, 100), cmap=cmap)
ax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])
ax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')
cbar = plt.colorbar(contour, ax=ax)
_ = ax.set(xlim=(-3, 3), ylim=(-3, 3), xlabel='X', ylabel='Y');
cbar.ax.set_ylabel('Uncertainty (posterior predictive standard deviation)');
# Set back to original data to retrain
ann_input.set_value(X_train)
ann_output.set_value(Y_train)
# Tensors and RV that will be using mini-batches
minibatch_tensors = [ann_input, ann_output]
minibatch_RVs = [out]
# Generator that returns mini-batches in each iteration
def create_minibatch(data):
rng = np.random.RandomState(0)
while True:
# Return random data samples of set size 100 each iteration
ixs = rng.randint(len(data), size=50)
yield data[ixs]
minibatches = [
create_minibatch(X_train),
create_minibatch(Y_train),
]
total_size = len(Y_train)
%%time
with neural_network:
# Run advi_minibatch
v_params = pm.variational.advi_minibatch(
n=50000, minibatch_tensors=minibatch_tensors,
minibatch_RVs=minibatch_RVs, minibatches=minibatches,
total_size=total_size, learning_rate=1e-2, epsilon=1.0
)
with neural_network:
trace = pm.variational.sample_vp(v_params, draws=5000)
plt.plot(v_params.elbo_vals)
plt.ylabel('ELBO')
plt.xlabel('iteration')
sns.despine()
pm.traceplot(trace);
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.