text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and check data
Step2: ## Analysis
Step3: Does improved weight pruning outperforms regular SET
Step4: Results not in line with what the previous experiment shows - what changed?
Step5: Acc where it reaches the max validation accuracy Consistently decreases in the Weight Magnitude - even in cases where the final acc is higher, as in between [0,0.2] of weight_prune_perc
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from nupic.research.frameworks.dynamic_sparse.common.browser import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rcParams
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(style="whitegrid")
sns.set_palette("colorblind")
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({"axes.grid": True, "grid.linewidth": 0.5})
exps = ['improved_mag_gsc_searchperc2']
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
df.head(5)
# replace hebbian prine
df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True)
df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True)
df.columns
df.shape
df.iloc[1]
df.groupby('model')['model'].count()
# Did any trials failed?
df[df["epochs"]<30]["epochs"].count()
# Removing failed or incomplete trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=30]
df.shape
# which ones failed?
# failed, or still ongoing?
df_origin['failed'] = df_origin["epochs"]<30
df_origin[df_origin['failed']]['epochs']
# helper functions
def mean_and_std(s):
return "{:.3f} ± {:.3f}".format(s.mean(), s.std())
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'model': ['count']})).round(round)
agg(['model'])
agg(['weight_prune_perc'])
agg(['on_perc', 'model'])
# translate model names
rcParams['figure.figsize'] = 16, 8
d = {
'DSNNWeightedMag': 'Dynamic Sparse Neural Network (DSNN)',
'DSNNMixedHeb': 'Sparse Evolutionary Training (SET)',
'SparseModel': 'Static',
}
df_plot = df.copy()
df_plot['model'] = df_plot['model'].apply(lambda x: d[x])
def plot_for_paper():
rcParams['figure.figsize'] = 10,6
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
plt.rcParams.update({'axes.labelsize': 14})
plt.rcParams.update({'legend.fontsize': 14, 'legend.loc': 'lower left'})
plt.rcParams.update({'font.size': 14})
plt.rcParams.update({"axes.grid": True, "grid.linewidth": 0.5})
plt.rcParams.update({'font.family': 'serif'})
plt.rcParams.update({'text.usetex': True})
plot_for_paper()
# sns.scatterplot(data=df_plot, x='weight_prune_perc', y='val_acc_max', hue='model')
fig, ax = plt.subplots()
sns.lineplot(data=df_plot, x='weight_prune_perc', y='val_acc_max', hue='model')
plt.xlabel("% of weights pruned and grown at each epoch ($\\beta$)")
plt.ylabel("test accuracy")
plt.ylim((0.4,1.0))
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=labels[1:])
fig_path = r"~/Google Drive/papers/dynamic_sparse/figures/neurips_2019/pruning_percentage.png"
fig_path = os.path.expanduser(fig_path)
plt.savefig(fig_path, dpi=1000, bbox_inches='tight')
rcParams['figure.figsize'] = 16, 8
filter = df_plot['model'] != 'Static'
sns.lineplot(data=df_plot[filter], x='weight_prune_perc', y='val_acc_max_epoch', hue='model')
sns.lineplot(data=df_plot, x='weight_prune_perc', y='val_acc_last', hue='model')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic data analysis on the dataset
Step2: Display Images
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import keras
from keras.datasets import mnist
# Load the datasets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# What is the type of X_train?
# What is the type of y_train?
# Find number of observations in training data
# Find number of observations in test data
# Display first 2 records of X_train
# Display the first 10 records of y_train
# Find the number of observations for each digit in the y_train dataset
# Find the number of observations for each digit in the y_test dataset
# What is the dimension of X_train?. What does that mean?
from matplotlib import pyplot
import matplotlib as mpl
%matplotlib inline
# Displaying the first training data
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
imgplot = ax.imshow(X_train[0], cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
# Let's now display the 11th record
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set environment variables
Step2: Run the Pipeline locally by using the Beam runner
Step3: Build the container image
Step4: Compile the TFX pipeline using the TFX CLI
Step5: Deploy the compiled pipeline to KFP
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
!pip install -q -U kfp
import os
os.environ['PROJECT_ID'] = 'yourProject' # Set your project.
os.environ['BUCKET'] = 'yourBucket' # Set your bucket.
os.environ['GKE_CLUSTER_NAME'] = 'yourCluster' # Set your GKE cluster name.
os.environ['GKE_CLUSTER_ZONE'] = 'yourClusterZone' # Set your GKE cluster zone.
os.environ['IMAGE_NAME'] = 'tfx-ml'
os.environ['TAG'] = 'tfx0.25.0'
os.environ['ML_IMAGE_URI']=f'gcr.io/{os.environ.get("PROJECT_ID")}/{os.environ.get("IMAGE_NAME")}:{os.environ.get("TAG")}'
os.environ['NAMESPACE'] = 'kubeflow-pipelines'
os.environ['ARTIFACT_STORE_URI'] = f'gs://{os.environ.get("BUCKET")}/tfx_artifact_store'
os.environ['GCS_STAGING_PATH'] = f'{os.environ.get("ARTIFACT_STORE_URI")}/staging'
os.environ['RUNTIME_VERSION'] = '2.2'
os.environ['PYTHON_VERSION'] = '3.7'
os.environ['BEAM_RUNNER'] = 'DirectRunner'
os.environ['MODEL_REGISTRY_URI'] = f'{os.environ.get("ARTIFACT_STORE_URI")}/model_registry'
os.environ['PIPELINE_NAME'] = 'tfx_bqml_scann'
from tfx_pipeline import config
for key, value in config.__dict__.items():
if key.isupper(): print(f'{key}: {value}')
import kfp
import tfx
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx_pipeline import pipeline as pipeline_module
import tensorflow as tf
import ml_metadata as mlmd
from ml_metadata.proto import metadata_store_pb2
import logging
logging.getLogger().setLevel(logging.INFO)
print("TFX Version:", tfx.__version__)
pipeline_root = f'{config.ARTIFACT_STORE_URI}/{config.PIPELINE_NAME}_beamrunner'
model_regisrty_uri = f'{config.MODEL_REGISTRY_URI}_beamrunner'
local_mlmd_sqllite = 'mlmd/mlmd.sqllite'
print(f'Pipeline artifacts root: {pipeline_root}')
print(f'Model registry location: {model_regisrty_uri}')
if tf.io.gfile.exists(pipeline_root):
print("Removing previous artifacts...")
tf.io.gfile.rmtree(pipeline_root)
if tf.io.gfile.exists('mlmd'):
print("Removing local mlmd SQLite...")
tf.io.gfile.rmtree('mlmd')
print("Creating mlmd directory...")
tf.io.gfile.mkdir('mlmd')
metadata_connection_config = metadata_store_pb2.ConnectionConfig()
metadata_connection_config.sqlite.filename_uri = local_mlmd_sqllite
metadata_connection_config.sqlite.connection_mode = 3
print("ML metadata store is ready.")
beam_pipeline_args = [
f'--runner=DirectRunner',
f'--project={config.PROJECT_ID}',
f'--temp_location={config.ARTIFACT_STORE_URI}/beam/tmp'
]
pipeline_module.SCHEMA_DIR = 'tfx_pipeline/schema'
pipeline_module.LOOKUP_CREATOR_MODULE = 'tfx_pipeline/lookup_creator.py'
pipeline_module.SCANN_INDEXER_MODULE = 'tfx_pipeline/scann_indexer.py'
runner = BeamDagRunner()
pipeline = pipeline_module.create_pipeline(
pipeline_name=config.PIPELINE_NAME,
pipeline_root=pipeline_root,
project_id=config.PROJECT_ID,
bq_dataset_name=config.BQ_DATASET_NAME,
min_item_frequency=15,
max_group_size=10,
dimensions=50,
num_leaves=500,
eval_min_recall=0.8,
eval_max_latency=0.001,
ai_platform_training_args=None,
beam_pipeline_args=beam_pipeline_args,
model_regisrty_uri=model_regisrty_uri,
metadata_connection_config=metadata_connection_config,
enable_cache=True
)
runner.run(pipeline)
!gcloud builds submit --tag $ML_IMAGE_URI tfx_pipeline
!rm ${PIPELINE_NAME}.tar.gz
!tfx pipeline compile \
--engine=kubeflow \
--pipeline_path=tfx_pipeline/runner.py
%%bash
gcloud container clusters get-credentials ${GKE_CLUSTER_NAME} --zone ${GKE_CLUSTER_ZONE}
export KFP_ENDPOINT=$(kubectl describe configmap inverse-proxy-config -n ${NAMESPACE} | grep "googleusercontent.com")
kfp --namespace=${NAMESPACE} --endpoint=${KFP_ENDPOINT} \
pipeline upload \
--pipeline-name=${PIPELINE_NAME} \
${PIPELINE_NAME}.tar.gz
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <p style="text-align
Step2: <p style="text-align
Step3: <div class="align-center" style="display
Step4: <p style="text-align
|
<ASSISTANT_TASK:>
Python Code:
5 == 5
5 == 4
1 == 1.0
5 == '5'
2 + 1 == 3
2 ** 3 - 1 == 8 - 1
'a' == 'a'
'Hello Buya' == "Hello Buya"
'hello buya' == 'Hello Buya'
type(True)
type(False)
5 == 3
1 != 1
5 > 3
5 < 3
'hello' == 'Hello'
'MARIO' == 'MARIO '
1.5 == 1
1.0 == 1
1 == 1.0
5 != 7 - 2
5 >= 5.0
'5' > '24' # ראו אזהרה למטה
'Hell' in 'Hello'
'hell' in 'Hello'
'tom' in 'to master'
'2' in '20'
2 in '20' # פייתון תתריע על שגיאה. למה?
price_of_pizza = 100 # NIS
slices_in_pizza = 8
favorite_topping = 'pineapple' # Don't judge!
delivery_time = 30 # In minutes
price_of_pizza >= 50
slices_in_pizza == 6
favorite_topping != 'olives'
price_of_pizza / slices_in_pizza > 10
delivery_time <= 30 # I'm hungry!
favorite_topping = 'olives' # יש פה מלכודת. נסו להבין מה קורה פה.
favorite_topping == 'pineapple' # מה זה יחזיר אחרי הרצת השורה הקודמת? מה זה יחזיר אילו לא הייתה רצה?
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: ブール演算
Step2: 試すと
Step3: ORゲート
Step4: 複雑に見えるが、$K_1=K_2$かつ$n_1=n_2$の場合を考えればヒル式と同じ
Step5: 実は分解を制御しても似たようなことはできる
Step6: 上の式においてA、Bがそれぞれ0もしくは十分に存在する場合の定常状態を考えると
Step7: NOTゲート
Step8: フィードバック制御
Step9: ここにCの量が多くなると$A{\rightarrow}B$の反応を抑制するように制御を加えてみると
Step10: 今度はさきほどのものに発現と分解を加えてみる
Step11: 発現にCの量に応じた抑制を加えると
Step12: ネガティブフィードバック制御
Step13: この場合はヒル係数が小さくなる(スイッチがゆるくなる)と振動は消えてしまう
Step14: フィードフォワード制御
Step15: ここにAの量が多いと$B{\rightarrow}C$の反応を抑制するように制御を加えてみると
Step16: 制御の仕方を変えてみると
Step17: Coherent Feedforward Loop (FFL)
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from ecell4 import *
import matplotlib.pylab as plt
import numpy as np
import seaborn
seaborn.set(font_scale=1.5)
import matplotlib as mpl
mpl.rc("figure", figsize=(6, 4))
def Hill(E, Km, nH):
return E ** nH / (Km ** nH + E ** nH)
data = np.array([[Hill(A, 0.5, 8) * Hill(B, 0.5, 8)
for B in np.linspace(0, 1, 21)]
for A in np.linspace(0, 1, 21)])
plt.imshow(data, cmap='coolwarm')
plt.xlabel('A')
plt.ylabel('B')
plt.colorbar()
plt.clim(0, 1)
plt.show()
with reaction_rules():
~A > A | 0.2
~B > B | 0.2
~X > X | Hill(A, 0.2, 8) * Hill(B, 0.6, 8)
X > ~X | 1.0
~Y > Y | Hill(A, 0.2, 8)
Y > ~Y | 1.0
run_simulation(5, species_list=['X', 'Y', 'A'],
opt_args=['-', lambda t: 0.2, '--', lambda t: 0.6, '--'])
def f(A, B, K1, K2, n1, n2):
term1 = (A / K1) ** n1
term2 = (B / K2) ** n1
return (term1 + term2) / (1 + term1 + term2)
data = np.array([[f(A, B, 0.5, 0.5, 8, 8) for B in np.linspace(0, 1, 21)]
for A in np.linspace(0, 1, 21)])
plt.imshow(data, cmap='coolwarm')
plt.xlabel('A')
plt.ylabel('B')
plt.colorbar()
plt.clim(0, 1)
plt.show()
with reaction_rules():
~A > A | 0.2
~B > B | 0.2
~X > X | f(A, B, 0.2, 0.6, 8, 8)
X > ~X | 1.0
~Y > Y | Hill(B, 0.6, 8)
Y > ~Y | 1.0
run_simulation(5, species_list=['X', 'Y', 'A'],
opt_args=['-', lambda t: 0.2, '--', lambda t: 0.6, '--'])
def Hill_compl(E, Km, nH):
return Km ** nH / (Km ** nH + E ** nH)
data = np.array([[Hill(A, 0.5, 8) / (1 + 100 * Hill_compl(B, 0.3, 8))
for B in np.linspace(0, 1, 21)]
for A in np.linspace(0, 1, 21)])
plt.imshow(data, cmap='coolwarm')
plt.xlabel('A')
plt.ylabel('B')
plt.colorbar()
plt.clim(0, 1)
plt.show()
with reaction_rules():
~A > A | 0.2
~B > B | 0.2
~X > X | Hill(A, 0.2, 8)
X > ~X | (1.0 + 100 * Hill_compl(B, 0.36, 8)) * X
~Y > Y | Hill(A, 0.2, 8)
Y > ~Y | 1.0
run_simulation(5, species_list=['X', 'Y', 'A'],
opt_args=['-', lambda t: 0.2, '--', lambda t: 0.36, '--'],
opt_kwargs={'ylim': (0, 1)})
x = np.linspace(0, 1, 101)
nH = 8
plt.plot(x, [Hill(xi, 0.5, nH) for xi in x], label='Hill eq.')
plt.plot(x, [Hill_compl(xi, 0.5, nH) for xi in x],
label='Complementary Hill eq.')
plt.legend(loc='best')
plt.xlabel('INPUT')
plt.ylabel('OUTPUT')
plt.show()
with reaction_rules():
A > B | 1
B > C | 1
run_simulation(5, {'A': 1})
with reaction_rules():
A > B | 1
B > C | 1
obs = run_simulation(5, {'A': 1}, return_type='observer')
with reaction_rules():
A > B | 1 * Hill_compl(C, 0.1, 8) * A
B > C | 1
run_simulation(5, {'A': 1}, opt_args=('-', obs, '--'))
with reaction_rules():
~A > A | 1
A > B | 1 > C | 1 > ~C | 1
run_simulation(10)
with reaction_rules():
~A > A | Hill_compl(C, 0.5, 8)
A > B | 1 > C | 1 > ~C | 1
run_simulation(16, opt_args=['-', lambda t: 0.5, '--'],
opt_kwargs={'ylim': (0, 1)})
with reaction_rules():
~A > A | Hill_compl(C, 0.3, 8) > ~A | 1
~B > B | Hill_compl(A, 0.5, 8) > ~B | 1
~C > C | Hill_compl(B, 0.7, 8) > ~C | 1
run_simulation(np.linspace(0, 14, 201))
with reaction_rules():
~A > A | Hill_compl(C, 0.3, 4) > ~A | 1
~B > B | Hill_compl(A, 0.5, 4) > ~B | 1
~C > C | Hill_compl(B, 0.7, 4) > ~C | 1
run_simulation(np.linspace(0, 14, 201))
with reaction_rules():
A > B | 1 > C | 1
run_simulation(8, {'A': 1})
with reaction_rules():
A > B | 1 > C | 1
obs = run_simulation(8, {'A': 1}, return_type='observer')
with reaction_rules():
A > B | 1 > C | Hill_compl(A, 0.05, 8) * B
run_simulation(8, {'A': 1}, opt_args=('-', obs, '--'))
with reaction_rules():
~A > A | 1 > ~A | 1
~B > B | Hill(A, 0.5, 8) > ~B | 1
~C > C | Hill(A, 0.5, 8) * Hill_compl(B, 0.5, 8) > ~C | 1
run_simulation(5, opt_args=('-', lambda t: 0.5, '--'))
with reaction_rules():
~B > B | Hill(A, 0.2, 8) > ~B | 1
~C > C | Hill(A, 0.2, 8) * Hill(B, 0.5, 8) > ~C | 1
from ecell4_base.core import *
from ecell4_base import ode
m = get_model()
w = ode.World()
sim = ode.Simulator(w, m)
obs = FixedIntervalNumberObserver(0.01, ['A', 'B', 'C'])
sim.run(1, obs)
w.set_value(Species('A'), 1); sim.initialize()
sim.run(0.3, obs)
w.set_value(Species('A'), 0)
sim.initialize()
sim.run(4, obs)
w.set_value(Species('A'), 1)
sim.initialize()
sim.run(1.5, obs)
w.set_value(Species('A'), 0)
sim.initialize()
sim.run(3.2, obs)
viz.plot_number_observer(obs, '-', lambda t: 0.5, '--')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br>
Step2: <br>
Step3: <br>
Step4: <br>
Step5: <br>
Step6: <br>
Step7: <br>
Step8: <br>
Step9: Búsqueda en una Serie
Step10: <br>
Step11: <br>
Step12: <br>
Step13: <br>
Step14: <br>
Step15: <br>
Step16: <br>
Step17: <br>
Step18: <br>
Step19: <br>
Step20: <br>
Step21: <br>
Step22: <br>
Step23: La estructura de datos DataFrame
Step24: <br>
Step25: <br>
Step26: <br>
Step27: <br>
Step28: <br>
Step29: <br>
Step30: <br>
Step31: <br>
Step32: <br>
Step33: <br>
Step34: <br>
Step35: <br>
Step36: Lectura de un DataFrame
Step37: <br>
Step38: <br>
Step39: <br>
Step40: Haciendo búsquedas en un DataFrame
Step41: <br>
Step42: <br>
Step43: Si contamos sobre los datos originales, veremos que hay 147 países. Cuenta los países para los cuales la máscara Booleana dio False >.<
Step44: <br>
Step45: <br>
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.Series?
animales = ['Tigre', 'Oso', 'Camello']
pd.Series(animales)
numeros = [1, 2, 3]
pd.Series(numeros)
animales = ['Tigre', 'Oso', None]
pd.Series(animales)
numeros = [1, 2, None]
pd.Series(numeros)
import numpy as np
np.nan == None
np.nan == np.nan
print(np.isnan(np.nan))
print(None is None)
print(np.nan is np.nan)
deportes = {'Capoeira': 'Brasil',
'Rayuela': 'Chile',
'Pelota Vasca': 'País Vasco',
'Béisbol': 'Cuba',
'Rugby': 'Gales',
'Golf': 'Escocia',
'Corrida de Toros': 'España',
'Sumo': 'Japón'}
s = pd.Series(deportes)
s
s.index
s = pd.Series(['Tigre', 'Oso', 'Camello'], index=['India', 'America', 'Africa'])
s
deportes = {'Capoeira': 'Brasil',
'Rayuela': 'Chile',
'Pelota Vasca': 'País Vasco',
'Béisbol': 'Cuba',
'Rugby': 'Gales',
'Golf': 'Escocia',
'Corrida de Toros': 'España',
'Sumo': 'Japón'}
s = pd.Series(deportes, index=['Capoeira', 'Sumo', 'Pelota Vasca', 'Natación'])
s
deportes = {'Capoeira': 'Brasil',
'Rayuela': 'Chile',
'Pelota Vasca': 'País Vasco',
'Béisbol': 'Cuba',
'Rugby': 'Gales',
'Golf': 'Escocia',
'Corrida de Toros': 'España',
'Sumo': 'Japón'}
s = pd.Series(deportes)
s
s.iloc[4]
s.loc['Pelota Vasca']
s[4]
s['Pelota Vasca']
deportes = {99: 'Brasil',
100: 'Chile',
101: 'País Vasco',
102: 'Cuba',
103: 'Gales',
104: 'Escocia',
105: 'España',
106: 'Japón'}
s = pd.Series(deportes)
s
s[0] #Esta instrucción no llamará s.iloc[0] como esperaríamos y va a generar un error
s.iloc[0]
s.loc[99]
s = pd.Series([105.00, 223.00, 5, 102.00, 27, -126])
s
total = 0
for elemento in s:
total+=elemento
print(total)
import numpy as np
total = np.sum(s)
print(total)
s = pd.Series(np.random.randint(0,1000,10000))
print(s.head())
print(len(s))
%%timeit -n 100
sumar = 0
for elemento in s:
sumar+=elemento
%%timeit -n 100
sumar = np.sum(s)
s+=2 #Suma 2 a cada elemento de la serie usando broadcasting
s.head()
for etiqueta, valor in s.iteritems():
s.set_value(etiqueta, valor+2)
s.head()
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
for etiqueta, valor in s.iteritems():
s.loc[etiqueta]= valor+2
%%timeit -n 10
s = pd.Series(np.random.randint(0,1000,10000))
s+=2
import pandas as pd
s = pd.Series([1, 2, 3])
s.loc['Animal'] = 'Oso'
s
deportes_originales = pd.Series({'Capoeira': 'Brasil',
'Rayuela': 'Chile',
'Pelota Vasca': 'País Vasco',
'Béisbol': 'Cuba',
'Rugby': 'Gales',
'Golf': 'Escocia',
'Corrida de Toros': 'España',
'Sumo': 'Japón'})
paises_que_aman_el_beisbol = pd.Series(['Venezuela',
'USA',
'Cuba',
'Puerto Rico',
'Dominicana'],
index=['Béisbol',
'Béisbol',
'Béisbol',
'Béisbol',
'Béisbol'])
todos_los_paises = deportes_originales.append(paises_que_aman_el_beisbol)
deportes_originales
paises_que_aman_el_beisbol
todos_los_paises
todos_los_paises.loc['Béisbol']
import pandas as pd
compra_1 = pd.Series({'Nombre': 'Adelis',
'Artículo comprado': 'Libro',
'Costo': 1200})
compra_2 = pd.Series({'Nombre': 'Miguel',
'Artículo comprado': 'Raspberry pi 3',
'Costo': 15000})
compra_3 = pd.Series({'Nombre': 'Jaime',
'Artículo comprado': 'Balón',
'Costo': 5000})
df = pd.DataFrame([compra_1, compra_2, compra_3], index=['Tienda 1', 'Tienda 1', 'Tienda 2'])
df.head()
df.loc['Tienda 2']
type(df.loc['Tienda 2'])
df.loc['Tienda 1']
df.loc['Tienda 1', 'Costo']
df.T
df.T.loc['Costo']
df['Costo']
df.loc['Tienda 1']['Costo']
df.loc[:,['Nombre', 'Costo']]
df.drop('Tienda 1')
df
copiar_df = df.copy()
copiar_df = copiar_df.drop('Tienda 1')
copiar_df
copiar_df.drop?
del copiar_df['Costo']
copiar_df
df['Ubicación'] = ['Venezuela', 'Chile', 'Argentina']
df
!cat olympics.csv
import pandas as pd
df = pd.read_csv('olympics.csv')
df.head()
df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1)
df.head()
df.columns
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold' + col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver' + col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze' + col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#' + col[1:]}, inplace=True)
df.head()
df['Gold'] > 0
only_gold = df.where(df['Gold'] > 0)
only_gold.head()
only_gold['Gold'].count()
df['Gold'].count()
len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)])
df[(df['Gold.1'] > 0) & (df['Gold'] == 0)]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Красивое лучше, чем уродливое.<br>
Step2: Типы данных в np.array
Step3: Создание массивов в numpy
Step4: Заполнение массива
Step5: Случайные значения
Step6: $$f(x) = kx+b$$
Step7: $$MSE(X,\omega, y) = \frac{1}{N} \sum_i (f(x_i, \omega) - y_i)^2$$
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import this
import numpy as np
np.array([1,2,3])
a = np.array([[1,2,3], [4,5,6]])
a = np.array([1,2,3])
b = np.array([4,5,6])
a+b
a*b
a/b
a**b
np.array([1, 2, 4], dtype=np.float32)
a = np.array([1,2,3])
print(a.dtype)
print(a.astype(np.float64).dtype)
np.arange(2, 10, 3, dtype=np.float32)
np.linspace(1,10,10000)
np.zeros((3,1),dtype=np.float16)
np.ones((5,3),dtype=np.float16)
np.random.random((4,2,3))
np.random.randint(1,10,(5,3))
np.random.normal(5, 6, (4,2))
np.random.seed(42)
a = np.zeros((3,2))
b = np.ones((3,2))
np.hstack([a,b])
np.vstack([a, b])
a
a.shape
b = np.array([[1,2],[3,4],[5,6]])
b.T
a.dot(b)
X = np.arange(1,11).reshape((-1,1))
y = np.arange(2,12)+np.random.normal(size=(10))
y = y.reshape((-1,1))
W = np.random.random((2,1))
X = np.hstack([X, np.ones((10,1))])
f(X)
def f(X, W):
return X.dot(W)
def MSE(X, W, y):
return (X.dot(W)-y).T.dot(X.dot(W)-y)/X.shape[0]
def dMSE(X, W, y):
return 2/X.shape[0]*X.T.dot((X.dot(W)-y))
def optimize(W,X,y,a):
for i in range(1000):
W = W - a*dMSE(X,W,y)
MSE(X, W, y)
dMSE(X,W,y)
def optimize(W,X,y,a):
global coef, mses
coef = []
mses = []
for i in range(1000):
coef.append(W)
mses.append(MSE(X,W,y)[0,0])
W = W - a*dMSE(X,W,y)
# print(MSE(X,W,y))
return W
W = np.random.random((2,1))
P = optimize(W, X, y, 0.02)
coef = np.array(coef)
ylabel("k")
xlabel("b")
plot(coef[:,0,0], coef[:,1,0]);
ylabel("MSE")
xlabel("iteration")
plot(mses);
scatter(X[:,0],y.reshape(-1))
plot(X[:,0], f(X, W))
plot(X[:,0], f(X, P))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The histogram tells us a few things
Step2: <img src="Normal_Distribution_PDF.svg">
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
catsData = pd.read_csv('../data/cats.csv')
catsData.head()
%matplotlib inline
import matplotlib.pyplot as plt
catsData.Hwt.hist()
import numpy as np
normal_samples = np.random.normal(loc=-2, scale=0.5, size=500)
%matplotlib inline
plt.hist(normal_samples)
print 'Percent of heart rates greater than 10', (catsData.Hwt > 10).mean()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1) What is the average renewable freshwater resource?
Step2: 2) What is the average withdrawl rate in 2014?
Step3: 3) Which are the 5 countries with most/least renewable freshwater (total, 2014)?
Step4: 4) How much freshwater is there per 100,000 inhabitants for each country? TOP5 with most, TOP5 with least.
Step5: 5) Compared between 2002 and 2014, how did water withdrawl change?
Step6: 6) 5 with biggest increase in withdrawl, 5 with biggest decrease in withdrawl
Step7: 7) Scatter plot
Step8: 8) Scatter plot
Step9: 9) Scatter plot
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
% matplotlib inline
df = pd.read_csv('water_data_class.csv', encoding='latin-1')
df
# only countries with more than 25 million inhabitants and those who have values in every column (2002 that one with most non-values)
big_ones = df[df['population'] > 25000000]
notnull = df[df['annual freshwater withdrawl [%] 2002'].notnull()]
selection = df[(df['population'] > 25000000) & (df['annual freshwater withdrawl [%] 2002'].notnull()) & (df['annual freshwater withdrawl [%] 2002'] != 0)]
selection
selection['total renewable freshwater resources [billion cubic meters] 2014'].describe()
selection['annual freshwater withdrawl [%] 2014'].describe()
# @ TA values over 100% indicate that on top of renewable resources, water of non-renewable sources is withdrawn,
# as described in the WorldBank metadata for the original datasets
selection.sort_values('total renewable freshwater resources [billion cubic meters] 2014', ascending = True).head(5)
selection.sort_values('total renewable freshwater resources [billion cubic meters] 2014', ascending = False).head(5)
#inserting column with population converted to population in 100,000 inhabitants
population_hundret = selection['population']/100000
selection.insert(3,"population in 100,000 inhabitants", population_hundret)
selection
# putting water and population of 100,000 inhabitants into relation
water_per_hundret = (selection['total renewable freshwater resources [billion cubic meters] 2014']/population_hundret)*1000
selection.insert(6, "Renewable freshwater resource per 100,000 inhabitants [million cubic meters]", water_per_hundret)
# sorting for countries with most water
selection.sort_values(by='Renewable freshwater resource per 100,000 inhabitants [million cubic meters]', ascending = False).head(5)
#plotting those with most
plt.style.use('ggplot')
#does not print sorted bars
#selection.sort_values(by='Renewable freshwater resource per 100,000 inhabitants [million cubic meters]', ascending = False).head(5).plot(kind='barh', x='country', y='total renewable freshwater resources [billion cubic meters] 2014', legend=False)
# sorting for countries with least water
selection.sort_values(by='Renewable freshwater resource per 100,000 inhabitants [million cubic meters]').head(5)
#plotting those with least
selection.sort_values(by='Renewable freshwater resource per 100,000 inhabitants [million cubic meters]').head(5).plot(kind="barh", x ="country", y ="Renewable freshwater resource per 100,000 inhabitants [million cubic meters]", legend=False)
selection.columns
withdrawl = selection[['country','annual freshwater withdrawl [%] 2002','annual freshwater withdrawl [%] 2014']]
withdrawl_diff=selection['annual freshwater withdrawl [%] 2014'] - selection['annual freshwater withdrawl [%] 2002']
withdrawl.insert(3, 'Change in withdrawl rate', withdrawl_diff)
withdrawl.head()
withdrawl.sort_values(by='Change in withdrawl rate').head(5)
withdrawl.sort_values(by='Change in withdrawl rate').tail(5)
selection.sort_values(by='population in 100,000 inhabitants', ascending= False).head(10).plot(kind='scatter', x='population in 100,000 inhabitants', y='annual freshwater withdrawl [%] 2014')
#would be nice to put the country's name next to each dot, no idea how to do that unfortunately. Couldn't google useful things
selection.sort_values(by='population in 100,000 inhabitants').head(10).plot(kind='scatter', x='population in 100,000 inhabitants', y='annual freshwater withdrawl [%] 2014')
#would be nice to put the country's name next to each dot, no idea how to do that unfortunately. Couldn't google useful things
#those with most water
selection.sort_values(by='total renewable freshwater resources [billion cubic meters] 2014', ascending=False).head(10).plot(kind='scatter', x='population in 100,000 inhabitants', y='annual freshwater withdrawl [%] 2014')
#those with least water
selection.sort_values(by='total renewable freshwater resources [billion cubic meters] 2014').head(10).plot(kind='scatter', x='population in 100,000 inhabitants', y='annual freshwater withdrawl [%] 2014')
selection.plot(kind='scatter', x='annual freshwater withdrawl [%] 2014', y='Renewable freshwater resource per 100,000 inhabitants [million cubic meters]', xlim=(0,80), ylim=(0,3500))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SPA output
Step2: Speed tests
Step3: This numba test will only work properly if you have installed numba.
Step4: The numba calculation takes a long time the first time that it's run because it uses LLVM to compile the Python code to machine code. After that it's about 4-10 times faster depending on your machine. You can pass a numthreads argument to this function. The optimum numthreads depends on your machine and is equal to 4 by default.
|
<ASSISTANT_TASK:>
Python Code:
import datetime
# scientific python add-ons
import numpy as np
import pandas as pd
# plotting stuff
# first line makes the plots appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
# seaborn makes your plots look better
try:
import seaborn as sns
sns.set(rc={"figure.figsize": (12, 6)})
except ImportError:
print('We suggest you install seaborn using conda or pip and rerun this cell')
# finally, we import the pvlib library
import pvlib
import pvlib
from pvlib.location import Location
tus = Location(32.2, -111, 'US/Arizona', 700, 'Tucson')
print(tus)
golden = Location(39.742476, -105.1786, 'America/Denver', 1830, 'Golden')
print(golden)
golden_mst = Location(39.742476, -105.1786, 'MST', 1830, 'Golden MST')
print(golden_mst)
berlin = Location(52.5167, 13.3833, 'Europe/Berlin', 34, 'Berlin')
print(berlin)
times = pd.date_range(start=datetime.datetime(2014,6,23), end=datetime.datetime(2014,6,24), freq='1Min')
times_loc = times.tz_localize(tus.pytz)
times
pyephemout = pvlib.solarposition.pyephem(times_loc, tus.latitude, tus.longitude)
spaout = pvlib.solarposition.spa_python(times_loc, tus.latitude, tus.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
spaout['elevation'].plot(label='spa')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('spa')
print(spaout.head())
plt.figure()
pyephemout['elevation'].plot(label='pyephem')
spaout['elevation'].plot(label='spa')
(pyephemout['elevation'] - spaout['elevation']).plot(label='diff')
plt.legend(ncol=3)
plt.title('elevation')
plt.figure()
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
spaout['elevation'].plot(label='spa')
(pyephemout['apparent_elevation'] - spaout['elevation']).plot(label='diff')
plt.legend(ncol=3)
plt.title('elevation')
plt.figure()
pyephemout['apparent_zenith'].plot(label='pyephem apparent')
spaout['zenith'].plot(label='spa')
(pyephemout['apparent_zenith'] - spaout['zenith']).plot(label='diff')
plt.legend(ncol=3)
plt.title('zenith')
plt.figure()
pyephemout['apparent_azimuth'].plot(label='pyephem apparent')
spaout['azimuth'].plot(label='spa')
(pyephemout['apparent_azimuth'] - spaout['azimuth']).plot(label='diff')
plt.legend(ncol=3)
plt.title('azimuth')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
spaout = pvlib.solarposition.spa_python(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
spaout['elevation'].plot(label='spa')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('spa')
print(spaout.head())
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(golden.tz), golden.latitude, golden.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
ephemout['apparent_elevation'].plot(label='ephem apparent')
plt.legend(ncol=2)
plt.title('elevation')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
ephemout['apparent_elevation'].plot(label='ephem apparent')
plt.legend(ncol=2)
plt.title('elevation')
plt.xlim(pd.Timestamp('2015-06-28 03:00:00+02:00'), pd.Timestamp('2015-06-28 06:00:00+02:00'))
plt.ylim(-10,10)
loc = berlin
times = pd.DatetimeIndex(start=datetime.date(2015,3,28), end=datetime.date(2015,3,29), freq='5min')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
plt.figure()
pyephemout['azimuth'].plot(label='pyephem')
ephemout['azimuth'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('azimuth')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
times = pd.DatetimeIndex(start=datetime.date(2015,3,30), end=datetime.date(2015,3,31), freq='5min')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
plt.figure()
pyephemout['azimuth'].plot(label='pyephem')
ephemout['azimuth'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('azimuth')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
loc = berlin
times = pd.DatetimeIndex(start=datetime.date(2015,6,28), end=datetime.date(2015,6,29), freq='5min')
pyephemout = pvlib.solarposition.pyephem(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
ephemout = pvlib.solarposition.ephemeris(times.tz_localize(loc.tz), loc.latitude, loc.longitude)
pyephemout['elevation'].plot(label='pyephem')
pyephemout['apparent_elevation'].plot(label='pyephem apparent')
ephemout['elevation'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('elevation')
plt.figure()
pyephemout['azimuth'].plot(label='pyephem')
ephemout['azimuth'].plot(label='ephem')
plt.legend(ncol=2)
plt.title('azimuth')
print('pyephem')
print(pyephemout.head())
print('ephem')
print(ephemout.head())
times_loc = times.tz_localize(loc.tz)
%%timeit
pyephemout = pvlib.solarposition.pyephem(times_loc, loc.latitude, loc.longitude)
#ephemout = pvlib.solarposition.ephemeris(times, loc)
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.ephemeris(times_loc, loc.latitude, loc.longitude)
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.get_solarposition(times_loc, loc.latitude, loc.longitude,
method='nrel_numpy')
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.get_solarposition(times_loc, loc.latitude, loc.longitude,
method='nrel_numba')
%%timeit
#pyephemout = pvlib.solarposition.pyephem(times, loc)
ephemout = pvlib.solarposition.get_solarposition(times_loc, loc.latitude, loc.longitude,
method='nrel_numba', numthreads=16)
%%timeit
ephemout = pvlib.solarposition.spa_python(times_loc, loc.latitude, loc.longitude,
how='numba', numthreads=16)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-1', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Summarize the data that was just read in
Step2: Filtering and Sorting Data
Step3: Sorting
Step4: Modifying Columns
Step5: Handling Missing Values
Step6: Exercise
Step7: Solutions
Step8: Indexing and Slicing Data
Step9: loc
Step10: iloc
Step11: Add another level to the index
Step12: Analyzing Across Time
Step13: Split-Apply-Combine
Step14: Merging Data
Step15: Writing Data
Step16: Other Useful Features
Step17: Map existing values to other values
Step18: Pivot rows to columns
Step19: Randomly sample a DataFrame
Step20: Replace all instances of a value
Step21: One more thing...
|
<ASSISTANT_TASK:>
Python Code:
# Import the pandas and numpy libraries
import pandas as pd
import numpy as np
# Read a file with an absolute path
ufo = pd.read_csv('/Users/josiahdavis/Documents/GitHub/python_data_analysis/ufo_sightings.csv')
# Alterntively, read the the file using a relative path
ufo = pd.read_csv('ufo_sightings.csv')
# Alterntively read in the file from the internet
ufo = pd.read_csv('https://raw.githubusercontent.com/josiahdavis/python_data_analysis/master/ufo_sightings.csv')
# Get help on a function
help(pd.read_csv)
ufo.head(10) # Look at the top 10 observations
ufo.tail() # Bottom x observations (defaults to 5)
ufo.describe() # get summary statistics for columns
ufo.index # "the index" (aka "the labels")
ufo.columns # column names (which is "an index")
ufo.dtypes # data types of each column
ufo.values # underlying numpy array
ufo.info() # concise summary
# Select a single column
ufo['State']
ufo.State # This is equivalent
# Select multiple columns
ufo[['State', 'City','Shape Reported']]
my_cols = ['State', 'City', 'Shape Reported']
ufo[my_cols] # This is equivalent
# Logical filtering
ufo[ufo.State == 'TX'] # Select only rows where State == 'TX'
ufo[~(ufo.State == 'TX')] # Select everything where the test fails
ufo[ufo.State != 'TX'] # Same thing as before
ufo.City[ufo.State == 'TX'] # Select only city columm where State == 'TX'
ufo[ufo.State == 'TX'].City # Same thing as before
ufo[(ufo.State == 'CA') | (ufo.State =='TX')] # Select only records where State is 'CA' or State is 'TX'
ufo_dallas = ufo[(ufo.City == 'Dallas') & (ufo.State =='TX')] # Select only Dallas, TX records
ufo[ufo.City.isin(['Austin','Dallas', 'Houston'])] # Select only Austin, Dallas, or Houston records
ufo.State.order() # only works for a Series
ufo.sort_index(inplace=True) # sort rows by label
ufo.sort_index(ascending=False, inplace=False)
ufo.sort_index(by='State') # sort rows by specific column
ufo.sort_index(by=['State', 'Shape Reported']) # sort by multiple columns
ufo.sort_index(by=['State', 'Shape Reported'], ascending=[False, True], inplace=True) # specify sort order
# Add a new column as a function of existing columns
ufo['Location'] = ufo['City'] + ', ' + ufo['State']
ufo.head()
# Rename columns
ufo.rename(columns={'Colors Reported':'Colors', 'Shape Reported':'Shape'}, inplace=True)
ufo.head()
# Hide a column (temporarily)
ufo.drop(['Location'], axis=1)
# Delete a column (permanently)
del ufo['Location']
# Missing values are often just excluded
ufo.describe() # Excludes missing values
ufo.Shape.value_counts() # Excludes missing values
ufo.Shape.value_counts(dropna=False) # Includes missing values
# Find missing values in a Series
ufo.Shape.isnull() # True if NaN, False otherwise
ufo.Shape.notnull() # False if NaN, True otherwise
ufo.Shape.isnull().sum() # Count the missing values
# Find missing values in a DataFrame
ufo.isnull()
# Count the missing values in a DataFrame
ufo.isnull().sum()
# Exclude rows with missing values in a dataframe
ufo[(ufo.Shape.notnull()) & (ufo.Colors.notnull())]
# Drop missing values
ufo.dropna() # Drop a row if ANY values are missing
ufo.dropna(how='all') # Drop a row only if ALL values are missing
# Fill in missing values for a series
ufo.Colors.fillna(value='Unknown', inplace=True)
# Fill in missing values for the DataFrame
ufo.fillna(value='Unknown', inplace=True)
# Read drinks.csv (in the 'drinks_data' folder) into a DataFrame called 'drinks'
# Print the first 10 rows
# Examine the data types of all columns
# Print the 'beer_servings' Series
# Calculate the average 'beer_servings' for the entire dataset
# Print all columns, but only show rows where the country is in Europe
# Calculate the average 'beer_servings' for all of Europe
# Only show European countries with 'wine_servings' greater than 300
# Determine which 10 countries have the highest 'total_litres_of_pure_alcohol'
# Determine which country has the highest value for 'beer_servings'
# Count the number of occurrences of each 'continent' value and see if it looks correct
# Determine which countries do not have continent designations
# Determine the number of countries per continent. Does it look right?
# Read drinks.csv (in the drinks_data folder) into a DataFrame called 'drinks'
drinks = pd.read_csv('drinks_data/drinks.csv')
# Print the first 10 rows
drinks.head(10)
# Examine the data types of all columns
drinks.dtypes
drinks.info()
# Print the 'beer_servings' Series
drinks.beer_servings
drinks['beer_servings']
# Calculate the average 'beer_servings' for the entire dataset
drinks.describe() # Mean is provided in the summary from describe()
drinks.beer_servings.mean() # Alternatively, calculate the mean directly
# Print all columns, but only show rows where the country is in Europe
drinks[drinks.continent=='EU']
# Calculate the average 'beer_servings' for all of Europe (hint: use the .mean() function)
drinks[drinks.continent=='EU'].beer_servings.mean()
# Only show European countries with 'wine_servings' greater than 300
drinks[(drinks.continent=='EU') & (drinks.wine_servings > 300)]
# Determine which 10 countries have the highest 'total_litres_of_pure_alcohol'
drinks.sort_index(by='total_litres_of_pure_alcohol').tail(10)
# Determine which country has the highest value for 'beer_servings' (hint: use the .max() function)
drinks[drinks.beer_servings==drinks.beer_servings.max()].country
drinks[['country', 'beer_servings']].sort_index(by='beer_servings', ascending=False).head(1) # This is equivalent
# Count the number of occurrences of each 'continent' value and see if it looks correct
drinks.continent.value_counts()
# Determine which countries do not have continent designations
drinks[drinks.continent.isnull()].country
# Due to "na_filter = True" default within pd.read_csv()
help(pd.read_csv)
ufo.set_index('State', inplace=True)
ufo.index
ufo.index.is_unique
ufo.sort_index(inplace=True)
ufo.head(25)
ufo.loc['FL',:] # row with label FL`
ufo.loc[:'FL',:] # rows with labels through'FL'
ufo.loc['FL':'HI', 'City':'Shape'] # rows FL, columns 'City' through 'Shape Reported'
ufo.loc[:, 'City':'Shape'] # all rows, columns 'City' through 'Shape Reported'
ufo.loc[['FL', 'TX'], ['City','Shape']] # rows FL and TX, columns 'City' and 'Shape Reported'
ufo.iloc[0,:] # row with 0th position (first row)
ufo.iloc[0:3,:] # rows with positions 0 through 2 (not 3)
ufo.iloc[0:3, 0:3] # rows and columns with positions 0 through 2
ufo.iloc[:, 0:3] # all rows, columns with positions 0 through 2
ufo.iloc[[0,2], [0,1]] # 1st and 3rd row, 1st and 2nd column
ufo.set_index('City', inplace=True, append=True) # Adds to existing index
ufo.sort_index(inplace=True)
ufo.head(25)
ufo.loc[['ND', 'WY'],:] # Select all records from ND AND WY
ufo.loc['ND':'WY',:] # Select all records from ND THROUGH WY
ufo.loc[('ND', 'Bismarck'),:] # Select all records from Bismark, ND
ufo.loc[('ND', 'Bismarck'):('ND','Casselton'),:] # Select all records from Bismark, ND through Casselton, ND
ufo.reset_index(level='City', inplace=True) # Remove the City from the index
ufo.head()
ufo.reset_index(inplace=True) # Remove all columns from the index
ufo.head()
# Reset the index
ufo.dtypes
# Convert Time column to date-time format (defined in Pandas)
# Reference: https://docs.python.org/2/library/time.html#time.strftime
ufo['Time'] = pd.to_datetime(ufo['Time'], format="%m/%d/%Y %H:%M")
ufo.dtypes
# Compute date range
ufo.Time.min()
ufo.Time.max()
# Slice using time
ufo[ufo.Time > pd.datetime(1995, 1, 1)] # Slice using the time
ufo[(ufo.Time > pd.datetime(1995, 1, 1)) & (ufo.State =='TX')] # Works with other logical conditions, as expected
# Set the index to time
ufo.set_index('Time', inplace=True)
ufo.sort_index(inplace=True)
ufo.head()
# Access particular times/ranges
ufo.loc['1995',:]
ufo.loc['1995-01',:]
ufo.loc['1995-01-01',:]
# Access range of times/ranges
ufo.loc['1995':,:]
ufo.loc['1995':'1996',:]
ufo.loc['1995-12-01':'1996-01',:]
# Access elements of the timestamp
# Reference: http://pandas.pydata.org/pandas-docs/stable/timeseries.html#time-date-components
ufo.index.year
ufo.index.month
ufo.index.weekday
ufo.index.day
ufo.index.time
ufo.index.hour
# Create a new variable with time element
ufo['Year'] = ufo.index.year
ufo['Month'] = ufo.index.month
ufo['Day'] = ufo.index.day
ufo['Weekday'] = ufo.index.weekday
ufo['Hour'] = ufo.index.hour
# For each year, calculate the count of sightings
ufo.groupby('Year').City.count()
# For each Shape, calculate the first sighting, last sighting, and range of sightings.
ufo.groupby('Shape').Year.min()
ufo.groupby('Shape').Year.max()
# Specify the variable outside of the apply statement
ufo.groupby('Shape').Year.apply(lambda x: x.max())
# Specifiy the variable within the apply statement
ufo.groupby('Shape').apply(lambda x: x.Year.max() - x.Year.min())
# Specify a custom function to use in the apply statement
def get_max_year(df):
try:
return df.Year.max()
except:
return ''
ufo.groupby('Shape').apply(lambda x: get_max_year(x))
# Split/combine can occur on multiple columns at the same time
ufo.groupby(['Weekday','Hour']).City.count()
# Read in population data
pop = pd.read_csv('population.csv')
pop.head()
ufo.head()
# Merge the data together
ufo = pd.merge(ufo, pop, on='State', how = 'left')
# Specify keys if columns have different names
ufo = pd.merge(ufo, pop, left_on='State', right_on='State', how = 'left')
# Observe the new Population column
ufo.head()
# Check for values that didn't make it (length)
ufo.Population.isnull().sum()
# Check for values that didn't make it (values)
ufo[ufo.Population.isnull()]
# Change the records that didn't match up using np.where command
ufo['State'] = np.where(ufo['State'] == 'Fl', 'FL', ufo['State'])
# Alternatively, change the state using native python string functionality
ufo['State'] = ufo['State'].str.upper()
# Merge again, this time get all of the records
ufo = pd.merge(ufo, pop, on='State', how = 'left')
ufo.to_csv('ufo_new.csv')
ufo.to_csv('ufo_new.csv', index=False) # Index is not included in the csv
ufo.duplicated() # Series of logicals
ufo.duplicated().sum() # count of duplicates
ufo[ufo.duplicated(['State','Time'])] # only show duplicates
ufo[ufo.duplicated()==False] # only show unique rows
ufo_unique = ufo[~ufo.duplicated()] # only show unique rows
ufo.duplicated(['State','Time']).sum() # columns for identifying duplicates
ufo['Weekday'] = ufo.Weekday.map({ 0:'Mon', 1:'Tue', 2:'Wed',
3:'Thu', 4:'Fri', 5:'Sat',
6:'Sun'})
ufo.groupby(['Weekday','Hour']).City.count()
ufo.groupby(['Weekday','Hour']).City.count().unstack(0) # Make first row level a column
ufo.groupby(['Weekday','Hour']).City.count().unstack(1) # Make second row level a column
# Note: .stack() transforms columns to rows
idxs = np.random.rand(len(ufo)) < 0.66 # create a Series of booleans
train = ufo[idxs] # will contain about 66% of the rows
test = ufo[~idxs] # will contain the remaining rows
ufo.Shape.replace('DELTA', 'TRIANGLE') # replace values in a Series
ufo.replace('PYRAMID', 'TRIANGLE') # replace values throughout a DataFrame
%matplotlib inline
# Plot the number of sightings over time
ufo.groupby('Year').City.count().plot( kind='line',
color='r',
linewidth=2,
title='UFO Sightings by year')
# Plot the number of sightings over the day of week and time of day
ufo.groupby(['Weekday','Hour']).City.count().unstack(0).plot( kind='line',
linewidth=2,
title='UFO Sightings by Time of Day')
# Plot multiple plots on the same plot (plots neeed to be in column format)
ufo_fourth = ufo[(ufo.Year.isin([2011, 2012, 2013, 2014])) & (ufo.Month == 7)]
ufo_fourth.groupby(['Year', 'Day']).City.count().unstack(0).plot( kind = 'bar',
subplots=True,
figsize=(7,9))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: MLP feedforward neural network
Step2: Parameter
Step3: Tuning Neural Networks
Step4: MLP with two layers for smoother boundary
Step5: L2 Penalty and Neural Network
Step6: Neural Network Weights
Step7: Rescale the data
Step8: Warning from model
Step9: Analysis of Model
|
<ASSISTANT_TASK:>
Python Code:
display(mglearn.plots.plot_logistic_regression_graph())
display(mglearn.plots.plot_single_hidden_layer_graph())
display(mglearn.plots.plot_two_hidden_layer_graph())
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.25, random_state=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y,
random_state=42)
mlp = MLPClassifier(solver='lbfgs', random_state=0,
hidden_layer_sizes=[10,10])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=0.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
mlp = MLPClassifier(solver='lbfgs', activation='tanh',
random_state=0, hidden_layer_sizes=[10,10])
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=0.3)
mglearn.discrete_scatter(X_train[:, 0], X_train[:, 1], y_train)
plt.xlabel("Feature 0")
plt.ylabel("Feature 1")
fig, axes = plt.subplots(2, 4, figsize=(20, 8))
for axx, n_hidden_nodes in zip(axes, [10, 100]):
for ax, alpha in zip(axx, [0.0001, 0.01, 0.1, 1]):
mlp = MLPClassifier(solver='lbfgs', random_state=0,
hidden_layer_sizes=[n_hidden_nodes, n_hidden_nodes],
alpha=alpha)
mlp.fit(X_train, y_train)
mglearn.plots.plot_2d_separator(mlp, X_train, fill=True, alpha=0.3, ax=ax)
ax.set_title("n_hidden=[{}, {}]\nalpha={:.4f}".format(n_hidden_nodes,
n_hidden_nodes, alpha))
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
print("Cancer data per-feature maxima\n{}".format(cancer.data.max(axis=0)))
X_train, X_test, y_train, y_test = train_test_split(
cancer.data, cancer.target, stratify=cancer.target, random_state=0)
mlp = MLPClassifier(random_state=42)
mlp.fit(X_train, y_train)
print("Accurary on Training set: {:.2f}".format(mlp.score(X_train, y_train)))
print("Accuracy Test set: {:.2f}".format(mlp.score(X_test, y_test)))
# Compute mean value per feature on Training set
mean_on_train = X_train.mean(axis=0)
# Compute standard deviation of each feature on Training set
std_on_train = X_train.std(axis=0)
# Subtract the mean, and scale by inverse standard deviation
X_train_scaled = (X_train - mean_on_train) / std_on_train
# Do the same for the test set, using min and range of training set
X_test_scaled = (X_test - mean_on_train) / std_on_train
mlp = MLPClassifier(random_state=0)
mlp.fit(X_train_scaled, y_train)
print("Accurary on Training set: {:.3f}".format(mlp.score(X_train_scaled, y_train)))
print("Accuracy Test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
mlp = MLPClassifier(max_iter=1000, alpha=1, random_state=0)
mlp.fit(X_train_scaled, y_train)
print("Accurary on Training set: {:.3f}".format(mlp.score(X_train_scaled, y_train)))
print("Accuracy Test set: {:.3f}".format(mlp.score(X_test_scaled, y_test)))
plt.figure(figsize=(20,5))
plt.imshow(mlp.coefs_[0], interpolation='none', cmap='viridis')
plt.yticks(range(30), cancer.feature_names)
plt.xlabel("Columns in weight matrix")
plt.ylabel("Input feature")
plt.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Class
Step2: This is a basic class definition, the age and salary are needed when creating this object. The new class can be invoked like this
Step3: The __init__ initilaze the variables stored in the class. When they are called inside the class, we should add a self. in front of the variable. The out(Self) method are arbitary functions that can be used by calling Yourclass.yourfunction(). The input to the functions can be added after the self input.
Step4: The While statement
Step5: Performance
Step6: <img src="images/numba-blue-horizontal-rgb.svg" alt="numba" style="width
Step7: Examples
|
<ASSISTANT_TASK:>
Python Code:
def hello(a,b):
return a+b
# Lazy definition of function
hello(1,1)
hello('a','b')
class Person:
def __init__(self,age,salary):
self.age = age
self.salary = salary
def out(self):
print(self.age)
print(self.salary)
a = Person(30,10000)
a.out()
# make a list
students = ['boy', 'boy', 'girl', 'boy', 'girl', 'girl', 'boy', 'boy', 'girl', 'girl', 'boy', 'boy']
boys = 0; girls = 0
for s in students:
if s == 'boy':
boys = boys +1
else:
girls+=1
print("boys:", boys)
print("girls:", girls)
def int_sum(n):
s=0; i=1
while i < n:
s += i*i
i += 1
return s
int_sum(1000)
%timeit int_sum(100000)
import numba
@numba.njit
def int_sum_nb(n):
s=0; i=1
while i < n:
s += i*i
i += 1
return s
int_sum_nb(1000)
%timeit int_sum_nb(100000)
import random
def monte_carlo_pi(n):
acc = 0
for i in range(n):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / n
monte_carlo_pi(1000000)
%timeit monte_carlo_pi(1000000)
@numba.njit
def monte_carlo_pi_nb(n):
acc = 0
for i in range(n):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / n
monte_carlo_pi_nb(1000000)
%timeit monte_carlo_pi_nb(1000000)
@numba.njit
def monte_carlo_pi_nbmt(n):
acc = 0
for i in numba.prange(n):
x = random.random()
y = random.random()
if (x**2 + y**2) < 1.0:
acc += 1
return 4.0 * acc / n
monte_carlo_pi_nbmt(1000000)
%timeit monte_carlo_pi_nbmt(1000000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='step1a'></a>
Step2: <a id='step1b'></a>
Step3: <a id='step1c'></a>
Step4: <a id='step1d'></a>
Step5: <a id='step2'></a>
Step6: <a id='step3'></a>
Step7: <a id='step4'></a>
|
<ASSISTANT_TASK:>
Python Code:
import os
from pathlib import Path
testfolder = str(Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_09')
if not os.path.exists(testfolder):
os.makedirs(testfolder)
print ("Your simulation will be stored in %s" % testfolder)
# VARIABLES of the simulation:
lat = 35.1 # ABQ
lon = -106.7 # ABQ
x=1
y = 2
numpanels=1
limit_angle = 45 # tracker rotation limit angle
backtrack = True
albedo = 'concrete' # ground albedo
hub_height = y*0.75 # H = 0.75
gcr = 0.35
pitch = y/gcr
#pitch = 1.0/gcr # Check from 1Axis_Shading_PVSC2018 file
cumulativesky = False # needed for set1axis and makeScene1axis so simulation is done hourly not with gencumsky.
limit_angle = 45 # tracker rotation limit angle
nMods=10
nRows=3
sensorsy = 200
module_type='test-module'
datewanted='06_24' # sunny day 6/24/1972 (index 4180 - 4195). Valid formats starting version 0.4.0 for full day sim: mm_dd
## Torque tube info
tubetype='round'
material = 'Metal_Grey'
diameter = 0.1
axisofrotationTorqueTube = False # Original PVSC version rotated around the modules like most other software.
# Variables that will get defined on each iteration below:
zgap = 0 # 0.2, 0.3 values tested. Re-defined on each simulation.
visible = False # baseline is no torque tube.
# Simulation Start.
import bifacial_radiance
import numpy as np
print(bifacial_radiance.__version__)
demo = bifacial_radiance.RadianceObj(path = testfolder)
demo.setGround(albedo)
epwfile = demo.getEPW(lat, lon)
metdata = demo.readWeatherFile(epwfile, starttime=datewanted, endtime=datewanted)
trackerdict = demo.set1axis(metdata, limit_angle = limit_angle, backtrack = backtrack, gcr = gcr, cumulativesky = cumulativesky)
trackerdict = demo.gendaylit1axis()
sceneDict = {'pitch':pitch,'hub_height':hub_height, 'nMods': nMods, 'nRows': nRows}
#CASE 0 No torque tube
# When torquetube is False, zgap is the distance from axis of torque tube to module surface, but since we are rotating from the module's axis, this Zgap doesn't matter.
# zgap = 0.1 + diameter/2.0
torquetube = False
customname = '_NoTT'
module_NoTT = demo.makeModule(name=customname,x=x,y=y, numpanels=numpanels)
module_NoTT.addTorquetube(visible=False, axisofrotation=False, diameter=0)
trackerdict = demo.makeScene1axis(trackerdict, module_NoTT, sceneDict, cumulativesky = cumulativesky)
trackerdict = demo.makeOct1axis(trackerdict)
trackerdict = demo.analysis1axis(trackerdict, sensorsy = sensorsy, customname = customname)
#ZGAP 0.1
zgap = 0.1
customname = '_zgap0.1'
tubeParams = {'tubetype':tubetype,
'diameter':diameter,
'material':material,
'axisofrotation':False,
'visible':True} # either pass this into makeModule, or separately into module.addTorquetube()
module_zgap01 = demo.makeModule(name=customname, x=x,y=y, numpanels=numpanels, zgap=zgap, tubeParams=tubeParams)
trackerdict = demo.makeScene1axis(trackerdict, module_zgap01, sceneDict, cumulativesky = cumulativesky)
trackerdict = demo.makeOct1axis(trackerdict)
trackerdict = demo.analysis1axis(trackerdict, sensorsy = sensorsy, customname = customname)
#ZGAP 0.2
zgap = 0.2
customname = '_zgap0.2'
tubeParams = {'tubetype':tubetype,
'diameter':diameter,
'material':material,
'axisofrotation':False,
'visible':True} # either pass this into makeModule, or separately into module.addTorquetube()
module_zgap02 = demo.makeModule(name=customname, x=x,y=y, numpanels=numpanels,zgap=zgap, tubeParams=tubeParams)
trackerdict = demo.makeScene1axis(trackerdict, module_zgap02, sceneDict, cumulativesky = cumulativesky)
trackerdict = demo.makeOct1axis(trackerdict)
trackerdict = demo.analysis1axis(trackerdict, sensorsy = sensorsy, customname = customname)
#ZGAP 0.3
zgap = 0.3
customname = '_zgap0.3'
tubeParams = {'tubetype':tubetype,
'diameter':diameter,
'material':material,
'axisofrotation':False,
'visible':True} # either pass this into makeModule, or separately into module.addTorquetube()
module_zgap03 = demo.makeModule(name=customname,x=x,y=y, numpanels=numpanels, zgap=zgap, tubeParams=tubeParams)
trackerdict = demo.makeScene1axis(trackerdict, module_zgap03, sceneDict, cumulativesky = cumulativesky)
trackerdict = demo.makeOct1axis(trackerdict)
trackerdict = demo.analysis1axis(trackerdict, sensorsy = sensorsy, customname = customname)
import glob
import pandas as pd
resultsfolder = os.path.join(testfolder, 'results')
print (resultsfolder)
filenames = glob.glob(os.path.join(resultsfolder,'*.csv'))
noTTlist = [k for k in filenames if 'NoTT' in k]
zgap10cmlist = [k for k in filenames if 'zgap0.1' in k]
zgap20cmlist = [k for k in filenames if 'zgap0.2' in k]
zgap30cmlist = [k for k in filenames if 'zgap0.3' in k]
# sum across all hours for each case
unsh_front = np.array([pd.read_csv(f, engine='python')['Wm2Front'] for f in noTTlist]).sum(axis = 0)
cm10_front = np.array([pd.read_csv(f, engine='python')['Wm2Front'] for f in zgap10cmlist]).sum(axis = 0)
cm20_front = np.array([pd.read_csv(f, engine='python')['Wm2Front'] for f in zgap20cmlist]).sum(axis = 0)
cm30_front = np.array([pd.read_csv(f, engine='python')['Wm2Front'] for f in zgap30cmlist]).sum(axis = 0)
unsh_back = np.array([pd.read_csv(f, engine='python')['Wm2Back'] for f in noTTlist]).sum(axis = 0)
cm10_back = np.array([pd.read_csv(f, engine='python')['Wm2Back'] for f in zgap10cmlist]).sum(axis = 0)
cm20_back = np.array([pd.read_csv(f, engine='python')['Wm2Back'] for f in zgap20cmlist]).sum(axis = 0)
cm30_back = np.array([pd.read_csv(f, engine='python')['Wm2Back'] for f in zgap30cmlist]).sum(axis = 0)
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = ['Helvetica']
plt.rcParams['axes.linewidth'] = 0.2 #set the value globally
fig = plt.figure()
fig.set_size_inches(4, 2.5)
ax = fig.add_axes((0.15,0.15,0.78,0.75))
#plt.rc('font', family='sans-serif')
plt.rc('xtick',labelsize=8)
plt.rc('ytick',labelsize=8)
plt.rc('axes',labelsize=8)
plt.plot(np.linspace(-1,1,unsh_back.__len__()),(cm30_back - unsh_back)/unsh_back*100, label = '30cm gap',color = 'black') #steelblue
plt.plot(np.linspace(-1,1,unsh_back.__len__()),(cm20_back - unsh_back)/unsh_back*100, label = '20cm gap',color = 'steelblue', linestyle = '--') #steelblue
plt.plot(np.linspace(-1,1,unsh_back.__len__()),(cm10_back - unsh_back)/unsh_back*100, label = '10cm gap',color = 'darkorange') #steelblue
#plt.ylabel('$G_{rear}$ vs unshaded [Wm-2]')#(r'$BG_E$ [%]')
plt.ylabel('$G_{rear}$ / $G_{rear,tubeless}$ -1 [%]')
plt.xlabel('Module X position [m]')
plt.legend(fontsize = 8,frameon = False,loc='best')
#plt.ylim([0, 15])
plt.title('Torque tube shading loss',fontsize=9)
#plt.annotate('South',xy=(-10,9.5),fontsize = 8); plt.annotate('North',xy=(8,9.5),fontsize = 8)
plt.show()
ShadingFactor = (1 - cm30_back.sum() / unsh_back.sum())*100
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Nestly
Step2: Checking amplicon fragment BD distribution
Step3: fragments w/ diffusion + DBL
Step4: BD min/max
Step5: Plotting number of taxa in each fraction
Step6: w/ simulated data
Step7: Total sequence count
Step8: Plotting Shannon diversity for each
Step9: min/max abundances of taxa
Step10: Plotting rank-abundance of heavy fractions
Step11: BD range where an OTU is detected
Step12: Emperical
Step13: BD span of just overlapping taxa
Step14: Plotting abundance distributions
Step15: --OLD--
Step16: Plotting the abundance distribution of top 10 most abundant taxa (bulk samples)
|
<ASSISTANT_TASK:>
Python Code:
import os
import glob
import re
import nestly
%load_ext rpy2.ipython
%%R
library(ggplot2)
library(dplyr)
library(tidyr)
library(gridExtra)
library(phyloseq)
## BD for G+C of 0 or 100
BD.GCp0 = 0 * 0.098 + 1.66
BD.GCp100 = 1 * 0.098 + 1.66
workDir = '/home/nick/notebook/SIPSim/dev/fullCyc/n1147_frag_norm_9_2.5_n5/'
buildDir = os.path.join(workDir, 'Day1_default_run')
R_dir = '/home/nick/notebook/SIPSim/lib/R/'
fragFile= '/home/nick/notebook/SIPSim/dev/bac_genome1147/validation/ampFrags.pkl'
targetFile = '/home/nick/notebook/SIPSim/dev/fullCyc/CD-HIT/target_taxa.txt'
physeqDir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'
physeq_bulkCore = 'bulk-core'
physeq_SIP_core = 'SIP-core_unk'
prefrac_comm_abundance = ['1e9']
richness = 2503 # chao1 estimate for bulk Day 1
seq_per_fraction = ['lognormal', 9.432, 0.5, 10000, 30000] # dist, mean, scale, min, max
bulk_days = [1]
nprocs = 24
# building tree structure
nest = nestly.Nest()
## varying params
nest.add('abs', prefrac_comm_abundance)
## set params
nest.add('bulk_day', bulk_days, create_dir=False)
nest.add('percIncorp', [0], create_dir=False)
nest.add('percTaxa', [0], create_dir=False)
nest.add('np', [nprocs], create_dir=False)
nest.add('richness', [richness], create_dir=False)
nest.add('subsample_dist', [seq_per_fraction[0]], create_dir=False)
nest.add('subsample_mean', [seq_per_fraction[1]], create_dir=False)
nest.add('subsample_scale', [seq_per_fraction[2]], create_dir=False)
nest.add('subsample_min', [seq_per_fraction[3]], create_dir=False)
nest.add('subsample_max', [seq_per_fraction[4]], create_dir=False)
### input/output files
nest.add('buildDir', [buildDir], create_dir=False)
nest.add('R_dir', [R_dir], create_dir=False)
nest.add('fragFile', [fragFile], create_dir=False)
nest.add('targetFile', [targetFile], create_dir=False)
nest.add('physeqDir', [physeqDir], create_dir=False)
nest.add('physeq_bulkCore', [physeq_bulkCore], create_dir=False)
# building directory tree
nest.build(buildDir)
# bash file to run
bashFile = os.path.join(buildDir, 'SIPSimRun.sh')
%%writefile $bashFile
#!/bin/bash
export PATH={R_dir}:$PATH
#-- making DNA pool similar to gradient of interest
echo '# Creating comm file from phyloseq'
phyloseq2comm.r {physeqDir}{physeq_bulkCore} -s 12C-Con -d {bulk_day} > {physeq_bulkCore}_comm.txt
printf 'Number of lines: '; wc -l {physeq_bulkCore}_comm.txt
echo '## Adding target taxa to comm file'
comm_add_target.r {physeq_bulkCore}_comm.txt {targetFile} > {physeq_bulkCore}_comm_target.txt
printf 'Number of lines: '; wc -l {physeq_bulkCore}_comm_target.txt
echo '# Adding extra richness to community file'
printf "1\t{richness}\n" > richness_needed.txt
comm_add_richness.r -s {physeq_bulkCore}_comm_target.txt richness_needed.txt > {physeq_bulkCore}_comm_all.txt
### renaming comm file for downstream pipeline
cat {physeq_bulkCore}_comm_all.txt > {physeq_bulkCore}_comm_target.txt
rm -f {physeq_bulkCore}_comm_all.txt
echo '## parsing out genome fragments to make simulated DNA pool resembling the gradient of interest'
## all OTUs without an associated reference genome will be assigned a random reference (of the reference genome pool)
### this is done through --NA-random
SIPSim fragment_KDE_parse {fragFile} {physeq_bulkCore}_comm_target.txt \
--rename taxon_name --NA-random > fragsParsed.pkl
echo '#-- SIPSim pipeline --#'
echo '# converting fragments to KDE'
SIPSim fragment_KDE \
fragsParsed.pkl \
> fragsParsed_KDE.pkl
echo '# adding diffusion'
SIPSim diffusion \
fragsParsed_KDE.pkl \
--np {np} \
> fragsParsed_KDE_dif.pkl
echo '# adding DBL contamination'
SIPSim DBL \
fragsParsed_KDE_dif.pkl \
--np {np} \
> fragsParsed_KDE_dif_DBL.pkl
echo '# making incorp file'
SIPSim incorpConfigExample \
--percTaxa {percTaxa} \
--percIncorpUnif {percIncorp} \
> {percTaxa}_{percIncorp}.config
echo '# adding isotope incorporation to BD distribution'
SIPSim isotope_incorp \
fragsParsed_KDE_dif_DBL.pkl \
{percTaxa}_{percIncorp}.config \
--comm {physeq_bulkCore}_comm_target.txt \
--np {np} \
> fragsParsed_KDE_dif_DBL_inc.pkl
#echo '# calculating BD shift from isotope incorporation'
#SIPSim BD_shift \
# fragsParsed_KDE_dif_DBL.pkl \
# fragsParsed_KDE_dif_DBL_inc.pkl \
# --np {np} \
# > fragsParsed_KDE_dif_DBL_inc_BD-shift.txt
echo '# simulating gradient fractions'
SIPSim gradient_fractions \
{physeq_bulkCore}_comm_target.txt \
> fracs.txt
echo '# simulating an OTU table'
SIPSim OTU_table \
fragsParsed_KDE_dif_DBL_inc.pkl \
{physeq_bulkCore}_comm_target.txt \
fracs.txt \
--abs {abs} \
--np {np} \
> OTU_abs{abs}.txt
#echo '# simulating PCR'
SIPSim OTU_PCR \
OTU_abs{abs}.txt \
> OTU_abs{abs}_PCR.txt
echo '# subsampling from the OTU table (simulating sequencing of the DNA pool)'
SIPSim OTU_subsample \
--dist {subsample_dist} \
--dist_params mean:{subsample_mean},sigma:{subsample_scale} \
--min_size {subsample_min} \
--max_size {subsample_max} \
OTU_abs{abs}_PCR.txt \
> OTU_abs{abs}_PCR_sub.txt
echo '# making a wide-formatted table'
SIPSim OTU_wideLong -w \
OTU_abs{abs}_PCR_sub.txt \
> OTU_abs{abs}_PCR_sub_w.txt
echo '# making metadata (phyloseq: sample_data)'
SIPSim OTU_sampleData \
OTU_abs{abs}_PCR_sub.txt \
> OTU_abs{abs}_PCR_sub_meta.txt
!chmod 777 $bashFile
!cd $workDir; \
nestrun --template-file $bashFile -d Day1_default_run --log-file log.txt -j 1
workDir1 = os.path.join(workDir, 'Day1_default_run/1e9/')
!cd $workDir1; \
SIPSim KDE_info \
-s fragsParsed_KDE.pkl \
> fragsParsed_KDE_info.txt
%%R -i workDir1
inFile = file.path(workDir1, 'fragsParsed_KDE_info.txt')
df = read.delim(inFile, sep='\t') %>%
filter(KDE_ID == 1)
df %>% head(n=3)
%%R -w 600 -h 300
ggplot(df, aes(median)) +
geom_histogram(binwidth=0.001) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16)
)
workDir1 = os.path.join(workDir, 'Day1_default_run/1e9/')
!cd $workDir1; \
SIPSim KDE_info \
-s fragsParsed_KDE_dif_DBL.pkl \
> fragsParsed_KDE_dif_DBL_info.pkl
%%R -i workDir1
inFile = file.path(workDir1, 'fragsParsed_KDE_dif_DBL_info.pkl')
df = read.delim(inFile, sep='\t') %>%
filter(KDE_ID == 1)
df %>% head(n=3)
%%R -w 600 -h 300
ggplot(df, aes(median)) +
geom_histogram(binwidth=0.001) +
labs(x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16)
)
%%R
## min G+C cutoff
min_GC = 13.5
## max G+C cutoff
max_GC = 80
## max G+C shift
max_13C_shift_in_BD = 0.036
min_BD = min_GC/100.0 * 0.098 + 1.66
max_BD = max_GC/100.0 * 0.098 + 1.66
max_BD = max_BD + max_13C_shift_in_BD
cat('Min BD:', min_BD, '\n')
cat('Max BD:', max_BD, '\n')
%%R
# simulated OTU table file
OTU.table.dir = '/home/nick/notebook/SIPSim/dev/fullCyc/frag_norm_9_2.5_n5/Day1_default_run/1e9/'
OTU.table.file = 'OTU_abs1e9_PCR_sub.txt'
#OTU.table.file = 'OTU_abs1e9_sub.txt'
#OTU.table.file = 'OTU_abs1e9.txt'
%%R -i physeqDir -i physeq_SIP_core -i bulk_days
# bulk core samples
F = file.path(physeqDir, physeq_SIP_core)
physeq.SIP.core = readRDS(F)
physeq.SIP.core.m = physeq.SIP.core %>% sample_data
physeq.SIP.core = prune_samples(physeq.SIP.core.m$Substrate == '12C-Con' &
physeq.SIP.core.m$Day %in% bulk_days,
physeq.SIP.core) %>%
filter_taxa(function(x) sum(x) > 0, TRUE)
physeq.SIP.core.m = physeq.SIP.core %>% sample_data
physeq.SIP.core
%%R -w 800 -h 300
## dataframe
df.EMP = physeq.SIP.core %>% otu_table %>%
as.matrix %>% as.data.frame
df.EMP$OTU = rownames(df.EMP)
df.EMP = df.EMP %>%
gather(sample, abundance, 1:(ncol(df.EMP)-1))
df.EMP = inner_join(df.EMP, physeq.SIP.core.m, c('sample' = 'X.Sample'))
df.EMP.nt = df.EMP %>%
group_by(sample) %>%
mutate(n_taxa = sum(abundance > 0)) %>%
ungroup() %>%
distinct(sample) %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
## plotting
p = ggplot(df.EMP.nt, aes(Buoyant_density, n_taxa)) +
geom_point(color='blue') +
geom_line(color='blue') +
#geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Number of taxa') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p
%%R -w 800 -h 300
# loading file
F = file.path(workDir1, OTU.table.file)
df.SIM = read.delim(F, sep='\t')
## edit table
df.SIM.nt = df.SIM %>%
filter(count > 0) %>%
group_by(library, BD_mid) %>%
summarize(n_taxa = n()) %>%
filter(BD_mid >= min_BD,
BD_mid <= max_BD)
## plot
p = ggplot(df.SIM.nt, aes(BD_mid, n_taxa)) +
geom_point(color='red') +
geom_line(color='red') +
geom_point(data=df.EMP.nt, aes(x=Buoyant_density), color='blue') +
geom_line(data=df.EMP.nt, aes(x=Buoyant_density), color='blue') +
#geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Number of taxa') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p
%%R -w 800 -h 300
# normalized by max number of taxa
## edit table
df.SIM.nt = df.SIM.nt %>%
group_by() %>%
mutate(n_taxa_norm = n_taxa / max(n_taxa))
df.EMP.nt = df.EMP.nt %>%
group_by() %>%
mutate(n_taxa_norm = n_taxa / max(n_taxa))
## plot
p = ggplot(df.SIM.nt, aes(BD_mid, n_taxa_norm)) +
geom_point(color='red') +
geom_line(color='red') +
geom_point(data=df.EMP.nt, aes(x=Buoyant_density), color='blue') +
geom_line(data=df.EMP.nt, aes(x=Buoyant_density), color='blue') +
#geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
scale_y_continuous(limits=c(0, 1)) +
labs(x='Buoyant density', y='Number of taxa\n(fraction of max)') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p
%%R -w 800 -h 300
# simulated
df.SIM.s = df.SIM %>%
group_by(library, BD_mid) %>%
summarize(total_abund = sum(count)) %>%
rename('Day' = library, 'Buoyant_density' = BD_mid) %>%
ungroup() %>%
mutate(dataset='simulated')
# emperical
df.EMP.s = df.EMP %>%
group_by(Day, Buoyant_density) %>%
summarize(total_abund = sum(abundance)) %>%
ungroup() %>%
mutate(dataset='emperical')
# join
df.j = rbind(df.SIM.s, df.EMP.s) %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
df.SIM.s = df.EMP.s = ""
# plot
ggplot(df.j, aes(Buoyant_density, total_abund, color=dataset)) +
geom_point() +
geom_line() +
scale_color_manual(values=c('blue', 'red')) +
labs(x='Buoyant density', y='Total sequences per sample') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
%%R
shannon_index_long = function(df, abundance_col, ...){
# calculating shannon diversity index from a 'long' formated table
## community_col = name of column defining communities
## abundance_col = name of column defining taxon abundances
df = df %>% as.data.frame
cmd = paste0(abundance_col, '/sum(', abundance_col, ')')
df.s = df %>%
group_by_(...) %>%
mutate_(REL_abundance = cmd) %>%
mutate(pi__ln_pi = REL_abundance * log(REL_abundance),
shannon = -sum(pi__ln_pi, na.rm=TRUE)) %>%
ungroup() %>%
dplyr::select(-REL_abundance, -pi__ln_pi) %>%
distinct_(...)
return(df.s)
}
%%R
# calculating shannon
df.SIM.shan = shannon_index_long(df.SIM, 'count', 'library', 'fraction') %>%
filter(BD_mid >= min_BD,
BD_mid <= max_BD)
df.EMP.shan = shannon_index_long(df.EMP, 'abundance', 'sample') %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
%%R -w 800 -h 300
# plotting
p = ggplot(df.SIM.shan, aes(BD_mid, shannon)) +
geom_point(color='red') +
geom_line(color='red') +
geom_point(data=df.EMP.shan, aes(x=Buoyant_density), color='blue') +
geom_line(data=df.EMP.shan, aes(x=Buoyant_density), color='blue') +
scale_y_continuous(limits=c(4, 7.5)) +
labs(x='Buoyant density', y='Shannon index') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
p
%%R -h 300 -w 800
# simulated
df.SIM.s = df.SIM %>%
filter(rel_abund > 0) %>%
group_by(BD_mid) %>%
summarize(min_abund = min(rel_abund),
max_abund = max(rel_abund)) %>%
ungroup() %>%
rename('Buoyant_density' = BD_mid) %>%
mutate(dataset = 'simulated')
# emperical
df.EMP.s = df.EMP %>%
group_by(Buoyant_density) %>%
mutate(rel_abund = abundance / sum(abundance)) %>%
filter(rel_abund > 0) %>%
summarize(min_abund = min(rel_abund),
max_abund = max(rel_abund)) %>%
ungroup() %>%
mutate(dataset = 'emperical')
df.j = rbind(df.SIM.s, df.EMP.s) %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
# plotting
ggplot(df.j, aes(Buoyant_density, max_abund, color=dataset, group=dataset)) +
geom_point() +
geom_line() +
scale_color_manual(values=c('blue', 'red')) +
labs(x='Buoyant density', y='Maximum relative abundance') +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none'
)
%%R -w 900
# simulated
df.SIM.s = df.SIM %>%
select(BD_mid, rel_abund) %>%
rename('Buoyant_density' = BD_mid) %>%
mutate(dataset='simulated')
# emperical
df.EMP.s = df.EMP %>%
group_by(Buoyant_density) %>%
mutate(rel_abund = abundance / sum(abundance)) %>%
ungroup() %>%
filter(rel_abund > 0) %>%
select(Buoyant_density, rel_abund) %>%
mutate(dataset='emperical')
# join
df.j = rbind(df.SIM.s, df.EMP.s) %>%
filter(Buoyant_density > 1.73) %>%
mutate(Buoyant_density = round(Buoyant_density, 3),
Buoyant_density_c = as.character(Buoyant_density))
df.j$Buoyant_density_c = reorder(df.j$Buoyant_density_c, df.j$Buoyant_density)
ggplot(df.j, aes(Buoyant_density_c, rel_abund)) +
geom_boxplot() +
scale_color_manual(values=c('blue', 'red')) +
labs(x='Buoyant density', y='Maximum relative abundance') +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
axis.text.x = element_text(angle=60, hjust=1),
legend.position = 'none'
)
%%R
# loading comm file
F = file.path(workDir1, 'bulk-core_comm_target.txt')
df.comm = read.delim(F, sep='\t') %>%
dplyr::select(library, taxon_name, rel_abund_perc) %>%
rename('bulk_abund' = rel_abund_perc) %>%
mutate(bulk_abund = bulk_abund / 100)
## joining
df.SIM.j = inner_join(df.SIM, df.comm, c('library' = 'library',
'taxon' = 'taxon_name')) %>%
filter(BD_mid >= min_BD,
BD_mid <= max_BD)
df.SIM.j %>% head(n=3)
%%R
bulk_days = c(1)
%%R
physeq.dir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'
physeq.bulk = 'bulk-core'
physeq.file = file.path(physeq.dir, physeq.bulk)
physeq.bulk = readRDS(physeq.file)
physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk = prune_samples(physeq.bulk.m$Exp_type == 'microcosm_bulk' &
physeq.bulk.m$Day %in% bulk_days, physeq.bulk)
physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk
%%R
physeq.bulk.n = transform_sample_counts(physeq.bulk, function(x) x/sum(x))
physeq.bulk.n
%%R
# making long format of each bulk table
bulk.otu = physeq.bulk.n %>% otu_table %>% as.data.frame
ncol = ncol(bulk.otu)
bulk.otu$OTU = rownames(bulk.otu)
bulk.otu = bulk.otu %>%
gather(sample, abundance, 1:ncol)
bulk.otu = inner_join(physeq.bulk.m, bulk.otu, c('X.Sample' = 'sample')) %>%
dplyr::select(OTU, abundance) %>%
rename('bulk_abund' = abundance)
bulk.otu %>% head(n=3)
%%R
# joining tables
df.EMP.j = inner_join(df.EMP, bulk.otu, c('OTU' = 'OTU')) %>%
filter(Buoyant_density >= min_BD,
Buoyant_density <= max_BD)
df.EMP.j %>% head(n=3)
%%R -h 400
# filtering & combining emperical w/ simulated data
## emperical
max_BD_range = max(df.EMP.j$Buoyant_density) - min(df.EMP.j$Buoyant_density)
df.EMP.j.f = df.EMP.j %>%
filter(abundance > 0) %>%
group_by(OTU) %>%
summarize(mean_rel_abund = mean(bulk_abund),
min_BD = min(Buoyant_density),
max_BD = max(Buoyant_density),
BD_range = max_BD - min_BD,
BD_range_perc = BD_range / max_BD_range * 100) %>%
ungroup() %>%
mutate(dataset = 'emperical')
## simulated
max_BD_range = max(df.SIM.j$BD_mid) - min(df.SIM.j$BD_mid)
df.SIM.j.f = df.SIM.j %>%
filter(count > 0) %>%
group_by(taxon) %>%
summarize(mean_rel_abund = mean(bulk_abund),
min_BD = min(BD_mid),
max_BD = max(BD_mid),
BD_range = max_BD - min_BD,
BD_range_perc = BD_range / max_BD_range * 100) %>%
ungroup() %>%
rename('OTU' = taxon) %>%
mutate(dataset = 'simulated')
## join
df.j = rbind(df.EMP.j.f, df.SIM.j.f) %>%
filter(BD_range_perc > 0,
mean_rel_abund > 0)
## plotting
ggplot(df.j, aes(mean_rel_abund, BD_range_perc, color=dataset)) +
geom_point(alpha=0.5, shape='O') +
#stat_density2d() +
#scale_fill_gradient(low='white', high='red', na.value='grey50') +
#scale_x_log10(limits=c(min(df.j$mean_rel_abund, na.rm=T), 1e-2)) +
#scale_y_continuous(limits=c(90, 100)) +
scale_x_log10() +
scale_y_continuous() +
scale_color_manual(values=c('blue', 'red')) +
labs(x='Pre-fractionation abundance', y='% of total BD range') +
#geom_vline(xintercept=0.001, linetype='dashed', alpha=0.5) +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
panel.grid = element_blank(),
legend.position = 'none'
)
%%R -i targetFile
df.target = read.delim(targetFile, sep='\t')
df.target %>% nrow %>% print
df.target %>% head(n=3)
%%R
# filtering to just target taxa
df.j.t = df.j %>%
filter(OTU %in% df.target$OTU)
## plotting
ggplot(df.j.t, aes(mean_rel_abund, BD_range_perc, color=dataset)) +
geom_point(alpha=0.5, shape='O') +
#stat_density2d() +
#scale_fill_gradient(low='white', high='red', na.value='grey50') +
#scale_x_log10(limits=c(min(df.j$mean_rel_abund, na.rm=T), 1e-2)) +
#scale_y_continuous(limits=c(90, 100)) +
scale_x_log10() +
scale_y_continuous() +
scale_color_manual(values=c('blue', 'red')) +
labs(x='Pre-fractionation abundance', y='% of total BD range') +
#geom_vline(xintercept=0.001, linetype='dashed', alpha=0.5) +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
panel.grid = element_blank(),
legend.position = 'none'
)
%%R
## emperical
df.EMP.j.f = df.EMP.j %>%
filter(abundance > 0) %>%
dplyr::select(OTU, sample, abundance, Buoyant_density, bulk_abund) %>%
mutate(dataset = 'emperical')
## simulated
df.SIM.j.f = df.SIM.j %>%
filter(count > 0) %>%
dplyr::select(taxon, fraction, count, BD_mid, bulk_abund) %>%
rename('OTU' = taxon,
'sample' = fraction,
'Buoyant_density' = BD_mid,
'abundance' = count) %>%
mutate(dataset = 'simulated')
df.j = rbind(df.EMP.j.f, df.SIM.j.f) %>%
group_by(sample) %>%
mutate(rel_abund = abundance / sum(abundance))
df.j %>% head(n=3) %>% as.data.frame
%%R -w 800 -h 400
# plotting absolute abundances of subsampled
## plot
p = ggplot(df.j, aes(Buoyant_density, abundance, fill=OTU)) +
geom_area(stat='identity', position='dodge', alpha=0.5) +
#geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Subsampled community\n(absolute abundance)') +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none',
axis.title.y = element_text(vjust=1),
axis.title.x = element_blank(),
plot.margin=unit(c(0.1,1,0.1,1), "cm")
)
p
%%R -w 800 -h 400
# plotting relative abundances of subsampled
p = ggplot(df.j, aes(Buoyant_density, rel_abund, fill=OTU)) +
geom_area(stat='identity', position='dodge', alpha=0.5) +
#geom_vline(xintercept=c(BD.GCp0, BD.GCp100), linetype='dashed', alpha=0.5) +
labs(x='Buoyant density', y='Subsampled community\n(relative abundance)') +
facet_grid(dataset ~ .) +
theme_bw() +
theme(
text = element_text(size=16),
legend.position = 'none',
axis.title.y = element_text(vjust=1),
axis.title.x = element_blank(),
plot.margin=unit(c(0.1,1,0.1,1), "cm")
)
p
%%R
physeq.SIP.core.n = transform_sample_counts(physeq.SIP.core, function(x) x/sum(x))
physeq.SIP.core.n
%%R
physeq.dir = '/var/seq_data/fullCyc/MiSeq_16SrRNA/515f-806r/lib1-7/phyloseq/'
physeq.bulk = 'bulk-core'
physeq.file = file.path(physeq.dir, physeq.bulk)
physeq.bulk = readRDS(physeq.file)
physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk = prune_samples(physeq.bulk.m$Exp_type == 'microcosm_bulk' &
physeq.bulk.m$Day %in% bulk_days, physeq.bulk)
physeq.bulk.m = physeq.bulk %>% sample_data
physeq.bulk
%%R
physeq.bulk.n = transform_sample_counts(physeq.bulk, function(x) x/sum(x))
physeq.bulk.n
%%R
# making long format of SIP OTU table
SIP.otu = physeq.SIP.core.n %>% otu_table %>% as.data.frame
ncol = ncol(SIP.otu)
SIP.otu$OTU = rownames(SIP.otu)
SIP.otu = SIP.otu %>%
gather(sample, abundance, 1:ncol)
SIP.otu = inner_join(physeq.SIP.core.m, SIP.otu, c('X.Sample' = 'sample')) %>%
select(-core_dataset, -Sample_location, -Sample_date, -Sample_treatment,
-Sample_subtreatment, -library, -Sample_type)
SIP.otu %>% head(n=3)
%%R
# making long format of each bulk table
bulk.otu = physeq.bulk.n %>% otu_table %>% as.data.frame
ncol = ncol(bulk.otu)
bulk.otu$OTU = rownames(bulk.otu)
bulk.otu = bulk.otu %>%
gather(sample, abundance, 1:ncol)
bulk.otu = inner_join(physeq.bulk.m, bulk.otu, c('X.Sample' = 'sample')) %>%
select(OTU, abundance) %>%
rename('bulk_abund' = abundance)
bulk.otu %>% head(n=3)
%%R
# joining tables
SIP.otu = inner_join(SIP.otu, bulk.otu, c('OTU' = 'OTU'))
SIP.otu %>% head(n=3)
%%R -w 900 -h 900
# for each gradient, plotting gradient rel_abund vs bulk rel_abund
ggplot(SIP.otu, aes(bulk_abund, abundance)) +
geom_point(alpha=0.2) +
geom_point(shape='O', alpha=0.6) +
facet_wrap(~ Buoyant_density) +
labs(x='Pre-fractionation relative abundance',
y='Fraction relative abundance') +
theme_bw() +
theme(
text = element_text(size=16)
)
%%R -w 900 -h 900
# for each gradient, plotting gradient rel_abund vs bulk rel_abund
ggplot(SIP.otu, aes(bulk_abund, abundance)) +
geom_point(alpha=0.2) +
geom_point(shape='O', alpha=0.6) +
scale_x_continuous(limits=c(0,0.01)) +
scale_y_continuous(limits=c(0,0.01)) +
facet_wrap(~ Buoyant_density) +
labs(x='Pre-fractionation relative abundance',
y='Fraction relative abundance') +
theme_bw() +
theme(
text = element_text(size=16),
axis.text.x = element_text(angle=90, hjust=1, vjust=0.5)
)
%%R -w 500 -h 300
# checking bulk rank-abundance
tmp = bulk.otu %>%
mutate(rank = row_number(-bulk_abund))
ggplot(tmp, aes(rank, bulk_abund)) +
geom_point()
%%R -w 900
top.n = filter(tmp, rank <= 10)
SIP.otu.f = SIP.otu %>%
filter(OTU %in% top.n$OTU)
ggplot(SIP.otu.f, aes(Buoyant_density, abundance, group=OTU, fill=OTU)) +
#geom_point() +
#geom_line() +
geom_area(position='dodge', alpha=0.4) +
labs(y='Relative abundance', x='Buoyant density') +
theme_bw() +
theme(
text = element_text(size=16)
)
%%R -w 600 -h 400
# Number of gradients that each OTU is found in
max_BD_range = max(SIP.otu$Buoyant_density) - min(SIP.otu$Buoyant_density)
SIP.otu.f = SIP.otu %>%
filter(abundance > 0) %>%
group_by(OTU) %>%
summarize(bulk_abund = mean(bulk_abund),
min_BD = min(Buoyant_density),
max_BD = max(Buoyant_density),
BD_range = max_BD - min_BD,
BD_range_perc = BD_range / max_BD_range * 100) %>%
ungroup()
ggplot(SIP.otu.f, aes(bulk_abund, BD_range_perc, group=OTU)) +
geom_point() +
scale_x_log10() +
labs(x='Pre-fractionation abundance', y='% of total BD range') +
geom_vline(xintercept=0.001, linetype='dashed', alpha=0.5) +
theme_bw() +
theme(
text = element_text(size=16)
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Neural Network
Step2: If you already trained the dataset there will be a pickle file with the trained network available. Now underneath we test in on the test-set
Step3: SVM
Step4: test using python definitons from other files
|
<ASSISTANT_TASK:>
Python Code:
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
#make random test and train set
from sklearn import cross_validation
from sklearn.cross_validation import train_test_split
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.3, random_state=0)
%run NeuralNetwork
import cPickle as pickle
#the neural network is based on code by Riaan Zoetmulder
inputData = X
targetData = y
myNN = NN.NNetwork(len(inputData[1]) , 60, 1 , 0.1, 0.5)
myNN.backPropagation(np.asarray(inputData), np.asarray(targetData), 1000)
#saves the trained state of the network
with open('NeuralNetwork.p', 'wb') as output_file:
pickle.dump(myNN, output_file, -1)
import cpickle as pickle
#has definition accuracy, accuracy(y_target, y_predict)
%run modelSelection
#important to have this magic line inplace, otherwise the notebook will not plot
%matplotlib inline
#this imports the file from the folder by running all definitions from file will be in the memory of the kernel
%run PCA_visualization
kPCA_visualization2d(X, y)
%run notebook_import_test
print_import()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preprocessing the Data
Step2: Accuracy Metrics
Step3: Build Models and Evaluate
Step4: Decision Tree Model - Shallow
Step5: Decision Tree Model - Full
Step6: K-Fold Cross Validation
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
df = pd.read_csv('data/historical_loan.csv')
df.head()
df.years = df.years.fillna(np.mean(df.years))
#Load the preprocessing module
from sklearn import preprocessing
categorical_variables = df.dtypes[df.dtypes=="object"].index.tolist()
categorical_variables
for i in categorical_variables:
lbl = preprocessing.LabelEncoder()
lbl.fit(list(df[i]))
df[i] = lbl.transform(df[i])
df.head()
X = df.iloc[:,1:8]
y = df.iloc[:,0]
from sklearn import tree
from sklearn import metrics
def model_evaluation(data, target, model, model_name):
model_fit = model.fit(data, target)
pred = model_fit.predict(data)
proba = model_fit.predict_proba(data)
fpr, tpr, thresholds = metrics.roc_curve(target, proba[:,1])
roc_auc = metrics.auc(fpr, tpr)
print("Model: %s" % model_name)
# Scores for the model
print("accuracy: %.3f" % metrics.accuracy_score(target, pred))
print("recall: %.3f" % metrics.precision_score(target, pred))
print("precision: %.3f" % metrics.recall_score(target, pred))
print("confusion_matrix:")
print(metrics.confusion_matrix(target, pred))
print("auc: %.3f" % metrics.auc(fpr, tpr))
# ROC Curve
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label='AUC = %0.2f'% roc_auc)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.1,1.2])
plt.ylim([-0.1,1.2])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
return roc_auc
benchmark = tree.DecisionTreeClassifier(max_depth = 1)
benchmark
model_evaluation(X, y, benchmark, "benchmark")
Shallow = tree.DecisionTreeClassifier(max_depth=10)
Shallow
model_evaluation(X, y, Shallow, "Shallow")
Full = tree.DecisionTreeClassifier()
Full
model_evaluation(X, y, Full, "Full")
from sklearn.model_selection import StratifiedKFold
from scipy import interp
def model_evaluation_crossval(data, target, model, model_name):
data = np.array(data)
target = np.array(target)
cv = StratifiedKFold(n_splits=5)
# Create the color options
cmap = plt.get_cmap('viridis')
indices = np.linspace(0, cmap.N, 5)
colors = [cmap(int(i)) for i in indices]
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
# intiate plot
plt.figure(figsize=(8, 8))
i = 0
for (train, test) in cv.split(data, target):
print(train, test)
probas_ = model.fit(data[train], target[train]).predict_proba(data[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = metrics.roc_curve(target[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = metrics.auc(fpr, tpr)
plt.plot(fpr, tpr, lw=2, color=colors[i],
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i = i + 1
# ROC Curve
mean_tpr /= cv.get_n_splits(data, target)
mean_tpr[-1] = 1.0
mean_auc = metrics.auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='k', label='random')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.xlim([-0.1,1.1])
plt.ylim([-0.1,1.1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
model_evaluation_crossval(X, y, Shallow, "Shallow")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 교차 엔트로피 오차 함수 (Cross-Entropy Cost Function)
Step6: 과최적화 문제
Step10: Hyper-Tangent Activation and Rectified Linear Unit (ReLu) Activation
Step11: Rectified Linear Unit (ReLu) Activation 함수는 무한대 크기의 activation 값이 가능하며 가중치총합 $a$가 큰 경우에도 기울기(gradient)가 0 이되며 사라지지 않는다는 장점이 있다.
|
<ASSISTANT_TASK:>
Python Code:
sigmoid = lambda x: 1/(1+np.exp(-x))
sigmoid_prime = lambda x: sigmoid(x)*(1-sigmoid(x))
xx = np.linspace(-10, 10, 1000)
plt.plot(xx, sigmoid(xx));
plt.plot(xx, sigmoid_prime(xx));
%cd /home/dockeruser/neural-networks-and-deep-learning/src
%ls
import mnist_loader
import network2
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
net = network2.Network([784, 30, 10], cost=network2.QuadraticCost)
net.large_weight_initializer()
%time result1 = net.SGD(training_data, 10, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)
net = network2.Network([784, 30, 10], cost=network2.CrossEntropyCost)
net.large_weight_initializer()
%time result2 = net.SGD(training_data, 10, 10, 0.5, evaluation_data=test_data, monitor_evaluation_accuracy=True)
plt.plot(result1[1], 'bo-', label="quadratic cost")
plt.plot(result2[1], 'rs-', label="cross-entropy cost")
plt.legend(loc=0)
plt.show()
from ipywidgets import interactive
from IPython.display import Audio, display
def softmax_plot(z1=0, z2=0, z3=0, z4=0):
exps = np.array([np.exp(z1), np.exp(z2), np.exp(z3), np.exp(z4)])
exp_sum = exps.sum()
plt.bar(range(len(exps)), exps/exp_sum)
plt.xlim(-0.3, 4.1)
plt.ylim(0, 1)
plt.xticks([])
v = interactive(softmax_plot, z1=(-3, 5, 0.01), z2=(-3, 5, 0.01), z3=(-3, 5, 0.01), z4=(-3, 5, 0.01))
display(v)
z = np.linspace(-5, 5, 100)
a = np.tanh(z)
plt.plot(z, a)
plt.show()
z = np.linspace(-5, 5, 100)
a = np.maximum(z, 0)
plt.plot(z, a)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccr-iitm', 'sandbox-2', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: http
Step2: Total number of birth in year 2010 by sex
Step3: Insert prop column for each group
Step4: Verify that the prop clumn sums to 1 within all the groups
Step5: Extract a subset of the data with the top 10 names for each sex
Step6: Aggregate all birth by the first latter from name column
Step7: Normalize the table
Step8: Plot proportion of boys and girls names starting in each letter
|
<ASSISTANT_TASK:>
Python Code:
%pwd
import pandas as pd
names2010 = pd.read_csv('/resources/yob2010.txt', names=['name', 'sex', 'births'])
names2010
names2010.groupby('sex').births.sum()
def add_prop(group):
# Integer division floors
births = group.births.astype(float)
group['prop'] = births / births.sum()
return group
names2010 = names2010.groupby(['sex']).apply(add_prop)
names2010
import numpy as np
np.allclose(names2010.groupby(['sex']).prop.sum(), 1)
def get_top10(group):
return group.sort_index(by='births', ascending=False)[:10]
grouped = names2010.groupby(['sex'])
top10 = grouped.apply(get_top10)
top10.index = np.arange(len(top10))
top10
# extract first letter from name column
get_first_letter = lambda x: x[0]
first_letters = names2010.name.map(get_first_letter)
first_letters.name = 'first_letter'
table = names2010.pivot_table('births', index=first_letters,
columns=['sex'], aggfunc=sum)
table.head()
table.sum()
letter_prop = table / table.sum().astype(float)
%matplotlib inline
import matplotlib.pyplot as plt
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
letter_prop['M'].plot(kind='bar', rot=0, ax=axes[0], title='Male')
letter_prop['F'].plot(kind='bar', rot=0, ax=axes[1], title='Female',
legend=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Table of Contents
Step2: Back to Top
Step3: If SYSTOOLS is not part of the path, you can update it with the following SQL.
Step4: From this point on you won't need to added the SYSTOOLS schema on the front of any of your SQL
Step5: Back to Top
Step6: This is an example of a poorly formatted JSON document.
Step7: Back to Top
Step8: If you want to extract the entire contents of a JSON field, you need to use the BSON2JSON function.
Step9: One thing that you should note is that the JSON that is retrieved has been modified slightly so that
Step10: The following SQL will inject a bad value into the beginning of the JSON field to test the results from the
Step11: The BSON_VALIDATE should return a zero for this particular row since it is not a valid BSON document.
Step12: Back to Top
Step13: We can check the count of records to make sure that 42 employees were added to our table.
Step14: Additional DEPARTMENT Table
Step15: Back to Top
Step16: If the size of the field being returned is larger that the field specification,
Step17: In the case of character fields, you may need to specify a larger return
Step18: Back to Top
Step19: If you specify "
Step20: If you need to access a specific array element in a field, you can use the "dot"
Step21: If you attempt to retrieve the pay field, you will end up with a NULL value, not
Step22: The results contain 40 employees who have a middle initial, and two that do not.
Step23: If you only want to know how many employee have the middle initial field (midinit)
Step24: Back to Top
Step25: You need to ensure that the data types from both JSON functions are compatible for
Step26: The following SQL will generate a list of data types and field names found within this document.
Step27: The following sections will show how we can get atomic (non-array) types out of
Step28: Back to Top
Step29: The JSON_TYPE function will verify that this is an integer field (Type=16).
Step30: You can retrieve an integer value with either the 'i' flag or the 'l' flag.
Step31: We can ask that the value be interpreted as a BIGINT by using the 'l' flag,
Step32: The next SQL statement will create a field with a BIGINT size. Note that we don't
Step33: The JSON_TYPE function will verify that this is a big integer field (Type=18).
Step34: We can check to see that the data is stored in the document as a BIGINT by
Step35: Returning the data as an integer type 'i' will fail since the number is too big
Step36: Specifying the 'I' flag will make the data be returned properly.
Step37: Since we have an integer in the JSON field, we also have the option of returning
Step38: Back to Top
Step39: The JSON_TYPE function will verify that this is a numeric field (Type=1).
Step40: Numeric data can be retrieved in either number (n) formant, integer (i - note that
Step41: You may wonder why number format (n) results in an answer that has a fractional
Step42: A floating-point number is recognized by the Exx specifier in the number. The
Step43: The JSON_TYPE function will verify that this is a floating point field (Type=1).
Step44: The floating-point value can be retrieved as a number, integer, or floating point value.
Step45: Back to Top
Step46: We will double-check what type the field is in the JSON record.
Step47: To retrieve the value, we can ask that it be formatted as an integer or number.
Step48: You can also retrieve a boolean field as a character or
Step49: Back to Top
Step50: Querying the data type of this field using JSON_VAL will return a value of 9 (date type).
Step51: If you decide to use a character string to represent a date, you can use either
Step52: Using the 'd' specification will return the value as a date.
Step53: What about timestamps? If you decide to store a timestamp into a field, you can
Step54: Retrieving it as a Date will also work, but the time portion will be removed.
Step55: You can also ask for the timestamp value by using the 'ts'
Step56: To force the value to return just the time portion, either
Step57: Back to Top
Step58: You must specify a length for the 's' parameter otherwise
Step59: Back to Top
Step60: By default, only the first number of an array is returned
Step61: The TABLE( ... ) specification in the FROM clause is used
Step62: Only a subset of the results is shown above, but you will
Step63: You can even find out how many people are sharing
Step64: Back to Top
Step65: Back to Top
Step66: An easier way is to use the JSON_GET_POS_ARR_INDEX function.
Step67: If we used quotes around the phone number, the function will not match any of
Step68: To add a new field to the record, the JSON_UPDATE function needs to specify the
Step69: Retrieving the document shows that the lastname field has now been added to the record.
Step70: If you specify a field that is an array type and do not
Step71: Running the SQL against the original phone data will work properly.
Step72: To remove the phone number field you need to use the $unset keyword and set the field to null.
Step73: Back to Top
Step74: The following command will time the select statement.
Step75: To create an index on the empno field, we use the JSON_VAL function to extract the
Step76: Rerunning the SQL results in the following performance
Step77: Db2 can now use the index to retrieve the record and the following plot shows the increased throughput.
Step78: Back to Top
Step79: To use INSTEAD OF triggers, a view needs to be created on
Step80: At this point we can create three INSTEAD OF triggers to handle insert,
Step81: On UPDATES, the sequence number remains the same, and the BSON field is updated
Step82: Finally, the DELETE trigger will just remove the row.
Step83: Applications will only deal with the EMP_TXS view. Any
Step84: Selecting from the EMP_TXS view will return the JSON in a readable format
Step85: The base table only contains the BSON but the view translates the value back into a readable format.
Step86: If you want to manipulate the BSON directly (say change the employee number),
Step87: And we can check it using our original view.
|
<ASSISTANT_TASK:>
Python Code:
%run db2.ipynb
%%sql -q
CREATE FUNCTION SYSTOOLS.JSON_TABLE(
INJSON BLOB(16M), INELEM VARCHAR(2048), RETTYPE VARCHAR(100))
RETURNS TABLE(TYPE INTEGER, VALUE VARCHAR(2048))
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
NO EXTERNAL ACTION
DISALLOW PARALLEL
SCRATCHPAD 2048
EXTERNAL NAME 'db2json!jsonTable';
CREATE FUNCTION SYSTOOLS.JSON_TYPE(
INJSON BLOB(16M), INELEM VARCHAR(2048), MAXLENGTH INTEGER)
RETURNS INTEGER
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
RETURNS NULL ON NULL INPUT
NO EXTERNAL ACTION
EXTERNAL NAME 'db2json!jsonType';
CREATE FUNCTION SYSTOOLS.JSON_LEN(
INJSON BLOB(16M), INELEM VARCHAR(2048))
RETURNS INTEGER
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
NO EXTERNAL ACTION
SCRATCHPAD 2048
EXTERNAL NAME 'db2json!jsonLen';
CREATE FUNCTION SYSTOOLS.BSON2JSON(INBSON BLOB(16M)) RETURNS CLOB(16M)
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
NO EXTERNAL ACTION
SCRATCHPAD 2048
EXTERNAL NAME 'db2json!jsonBsonToJson';
CREATE FUNCTION SYSTOOLS.JSON2BSON(INJSON CLOB(16M)) RETURNS BLOB(16M)
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
NO EXTERNAL ACTION
SCRATCHPAD 2048
EXTERNAL NAME 'db2json!jsonToBson';
CREATE FUNCTION SYSTOOLS.JSON_GET_POS_ARR_INDEX(
INJSON BLOB(16M), QUERY VARCHAR(32672) FOR BIT DATA)
RETURNS INTEGER
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
CALLED ON NULL INPUT
NO EXTERNAL ACTION
SCRATCHPAD 2048
EXTERNAL NAME 'db2json!jsonGetPosArrIndex';
CREATE FUNCTION SYSTOOLS.JSON_UPDATE(
INJSON BLOB(16M), INELEM VARCHAR(32672))
RETURNS BLOB(16M)
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
CALLED ON NULL INPUT
NO EXTERNAL ACTION
SCRATCHPAD 2048
EXTERNAL NAME 'db2json!jsonUpdate2';
CREATE FUNCTION SYSTOOLS.BSON_VALIDATE(
INJSON BLOB(16M))
RETURNS INT
LANGUAGE C
PARAMETER STYLE SQL
PARAMETER CCSID UNICODE
NO SQL
NOT FENCED
DETERMINISTIC
ALLOW PARALLEL
RETURNS NULL ON NULL INPUT
NO EXTERNAL ACTION
EXTERNAL NAME 'db2json!jsonValidate';
%sql VALUES CURRENT PATH
%sql SET CURRENT PATH = CURRENT PATH, SYSTOOLS
%sql -q DROP TABLE TESTJSON
%%sql
CREATE TABLE TESTJSON
(
JSON_FIELD BLOB(4000) INLINE LENGTH 4000
)
%%sql
INSERT INTO TESTJSON VALUES ( JSON2BSON('{Name:"George"}') )
%sql -j select bson2json(json_field) from testjson
%%sql
INSERT INTO TESTJSON VALUES
( JSON2BSON('{Name:, Age: 32}'))
%%sql
SELECT CAST(JSON_FIELD AS VARCHAR(60)) FROM TESTJSON
%%sql -j
SELECT BSON2JSON(JSON_FIELD) FROM TESTJSON
%%sql
SELECT BSON_VALIDATE(JSON_FIELD) FROM TESTJSON
%%sql
UPDATE TESTJSON
SET JSON_FIELD = BLOB('!') || JSON_FIELD
%%sql
SELECT BSON_VALIDATE(JSON_FIELD) FROM TESTJSON
%%sql
DROP TABLE JSON_EMP;
CREATE TABLE JSON_EMP
(
SEQ INT NOT NULL GENERATED ALWAYS AS IDENTITY,
EMP_DATA BLOB(4000) INLINE LENGTH 4000
);
INSERT INTO JSON_EMP(EMP_DATA) VALUES
JSON2BSON( '{ "empno":"000010", "firstnme":"CHRISTINE", "midinit":"I", "lastname":"HAAS", "workdept":"A00", "phoneno":[3978], "hiredate":"01/01/1995", "job":"PRES", "edlevel":18, "sex":"F", "birthdate":"08/24/1963", "pay" : { "salary":152750.00, "bonus":1000.00, "comm":4220.00} }'),
JSON2BSON( '{"empno":"000020","firstnme":"MICHAEL","lastname":"THOMPSON", "workdept":"B01","phoneno":[3476,1422],"hiredate":"10/10/2003", "job":"MANAGER","edlevel":18,"sex":"M","birthdate":"02/02/1978", "pay": {"salary":94250.00,"bonus":800.00,"comm":3300.00}}'),
JSON2BSON( '{"empno":"000030","firstnme":"SALLY","midinit":"A","lastname":"KWAN", "workdept":"C01","phoneno":[4738],"hiredate":"04/05/2005", "job":"MANAGER","edlevel":20,"sex":"F","birthdate":"05/11/1971", "pay": {"salary":98250.00,"bonus":800.00,"comm":3060.00} }'),
JSON2BSON( '{ "empno":"000050","firstnme":"JOHN","midinit":"B","lastname":"GEYER", "workdept":"E01","phoneno":[6789],"hiredate":"08/17/1979", "job":"MANAGER","edlevel":16,"sex":"M","birthdate":"09/15/1955", "pay": {"salary":80175.00,"bonus":800.00,"comm":3214.00} }'),
JSON2BSON( '{ "empno":"000060","firstnme":"IRVING","lastname":"STERN", "workdept":"D11","phoneno":[6423,2433],"hiredate":"09/14/2003", "job":"MANAGER","edlevel":16,"sex":"M","birthdate":"07/07/1975", "pay": {"salary":72250.00,"bonus":500.00,"comm":2580.00} }'),
JSON2BSON( '{ "empno":"000070","firstnme":"EVA","midinit":"D","lastname":"PULASKI", "workdept":"D21","phoneno":[7831,1422,4567],"hiredate":"09/30/2005", "job":"MANAGER","edlevel":16,"sex":"F","birthdate":"05/26/2003", "pay": {"salary":96170.00,"bonus":700.00,"comm":2893.00} }'),
JSON2BSON( '{ "empno":"000090","firstnme":"EILEEN","midinit":"W","lastname":"HENDERSON", "workdept":"E11","phoneno":[5498],"hiredate":"08/15/2000", "job":"MANAGER","edlevel":16,"sex":"F","birthdate":"05/15/1971", "pay": {"salary":89750.00,"bonus":600.00,"comm":2380.00} }'),
JSON2BSON( '{ "empno":"000100","firstnme":"THEODORE","midinit":"Q","lastname":"SPENSER", "workdept":"E21","phoneno":[0972],"hiredate":"06/19/2000", "job":"MANAGER","edlevel":14,"sex":"M","birthdate":"12/18/1980", "pay": {"salary":86150.00,"bonus":500.00,"comm":2092.00} }'),
JSON2BSON( '{ "empno":"000110","firstnme":"VINCENZO","midinit":"G","lastname":"LUCCHESSI", "workdept":"A00","phoneno":[3490,3567],"hiredate":"05/16/1988", "job":"SALESREP","edlevel":19,"sex":"M","birthdate":"11/05/1959", "pay": {"salary":66500.00,"bonus":900.00,"comm":3720.00} }'),
JSON2BSON( '{ "empno":"000120","firstnme":"SEAN","midinit":"","lastname":"O''CONNELL", "workdept":"A00","phoneno":[2167,1533],"hiredate":"12/05/1993", "job":"CLERK","edlevel":14,"sex":"M","birthdate":"10/18/1972", "pay": {"salary":49250.00,"bonus":600.00,"comm":2340.00} }'),
JSON2BSON( '{ "empno":"000130","firstnme":"DELORES","midinit":"M","lastname":"QUINTANA", "workdept":"C01","phoneno":[4578],"hiredate":"07/28/2001", "job":"ANALYST","edlevel":16,"sex":"F","birthdate":"09/15/1955", "pay": {"salary":73800.00,"bonus":500.00,"comm":1904.00} }'),
JSON2BSON( '{ "empno":"000140","firstnme":"HEATHER","midinit":"A","lastname":"NICHOLLS", "workdept":"C01","phoneno":[1793],"hiredate":"12/15/2006", "job":"ANALYST","edlevel":18,"sex":"F","birthdate":"01/19/1976", "pay": {"salary":68420.00,"bonus":600.00,"comm":2274.00} }'),
JSON2BSON( '{ "empno":"000150","firstnme":"BRUCE","midinit":"","lastname":"ADAMSON", "workdept":"D11","phoneno":[4510],"hiredate":"02/12/2002", "job":"DESIGNER","edlevel":16,"sex":"M","birthdate":"05/17/1977", "pay": {"salary":55280.00,"bonus":500.00,"comm":2022.00} }'),
JSON2BSON( '{ "empno":"000160","firstnme":"ELIZABETH","midinit":"R","lastname":"PIANKA", "workdept":"D11","phoneno":[3782,9322],"hiredate":"10/11/2006", "job":"DESIGNER","edlevel":17,"sex":"F","birthdate":"04/12/1980", "pay": {"salary":62250.00,"bonus":400.00,"comm":1780.00} }'),
JSON2BSON( '{ "empno":"000170","firstnme":"MASATOSHI","midinit":"J","lastname":"YOSHIMURA", "workdept":"D11","phoneno":[2890],"hiredate":"09/15/1999", "job":"DESIGNER","edlevel":16,"sex":"M","birthdate":"01/05/1981", "pay": {"salary":44680.00,"bonus":500.00,"comm":1974.00} }'),
JSON2BSON( '{ "empno":"000180","firstnme":"MARILYN","midinit":"S","lastname":"SCOUTTEN", "workdept":"D11","phoneno":[1682,9945],"hiredate":"07/07/2003", "job":"DESIGNER","edlevel":17,"sex":"F","birthdate":"02/21/1979", "pay": {"salary":51340.00,"bonus":500.00,"comm":1707.00} }'),
JSON2BSON( '{ "empno":"000190","firstnme":"JAMES","midinit":"H","lastname":"WALKER", "workdept":"D11","phoneno":[2986,3644],"hiredate":"07/26/2004", "job":"DESIGNER","edlevel":16,"sex":"M","birthdate":"06/25/1982", "pay": {"salary":50450.00,"bonus":400.00,"comm":1636.00} }'),
JSON2BSON( '{ "empno":"000200","firstnme":"DAVID","midinit":"","lastname":"BROWN", "workdept":"D11","phoneno":[4501,2522],"hiredate":"03/03/2002", "job":"DESIGNER","edlevel":16,"sex":"M","birthdate":"05/29/1971", "pay": {"salary":57740.00,"bonus":600.00,"comm":2217.00} }'),
JSON2BSON( '{ "empno":"000210","firstnme":"WILLIAM","midinit":"T","lastname":"JONES", "workdept":"","phoneno":[0942],"hiredate":"04/11/1998", "job":"DESIGNER","edlevel":17,"sex":"M","birthdate":"02/23/2003", "pay": {"salary":68270.00,"bonus":400.00,"comm":1462.00} }'),
JSON2BSON( '{ "empno":"000220","firstnme":"JENNIFER","midinit":"K","lastname":"LUTZ", "workdept":"D11","phoneno":[0672],"hiredate":"08/29/1998", "job":"DESIGNER","edlevel":18,"sex":"F","birthdate":"03/19/1978", "pay": {"salary":49840.00,"bonus":600.00,"comm":2387.00} }'),
JSON2BSON( '{ "empno":"000230","firstnme":"JAMES","midinit":"J","lastname":"JEFFERSON", "workdept":"D21","phoneno":[2094,8999,3756],"hiredate":"11/21/1996", "job":"CLERK","edlevel":14,"sex":"M","birthdate":"05/30/1980", "pay": {"salary":42180.00,"bonus":400.00,"comm":1774.00} }'),
JSON2BSON( '{ "empno":"000240","firstnme":"SALVATORE","midinit":"M","lastname":"MARINO", "workdept":"D21","phoneno":[3780],"hiredate":"12/05/2004", "job":"CLERK","edlevel":17,"sex":"M","birthdate":"03/31/2002", "pay": {"salary":48760.00,"bonus":600.00,"comm":2301.00} }'),
JSON2BSON( '{ "empno":"000250","firstnme":"DANIEL","midinit":"S","lastname":"SMITH", "workdept":"D21","phoneno":[0961],"hiredate":"10/30/1999", "job":"CLERK","edlevel":15,"sex":"M","birthdate":"11/12/1969", "pay": {"salary":49180.00,"bonus":400.00,"comm":1534.00} }'),
JSON2BSON( '{ "empno":"000260","firstnme":"SYBIL","midinit":"P","lastname":"JOHNSON", "workdept":"D21","phoneno":[8953,2533],"hiredate":"09/11/2005", "job":"CLERK","edlevel":16,"sex":"F","birthdate":"10/05/1976", "pay": {"salary":47250.00,"bonus":300.00,"comm":1380.00} }'),
JSON2BSON( '{ "empno":"000270","firstnme":"MARIA","midinit":"L","lastname":"PEREZ", "workdept":"D21","phoneno":[9001],"hiredate":"09/30/2006", "job":"CLERK","edlevel":15,"sex":"F","birthdate":"05/26/2003", "pay": {"salary":37380.00,"bonus":500.00,"comm":2190.00} }'),
JSON2BSON( '{ "empno":"000280","firstnme":"ETHEL","midinit":"R","lastname":"SCHNEIDER", "workdept":"E11","phoneno":[8997,1422],"hiredate":"03/24/1997", "job":"OPERATOR","edlevel":17,"sex":"F","birthdate":"03/28/1976", "pay": {"salary":36250.00,"bonus":500.00,"comm":2100.00} }'),
JSON2BSON( '{ "empno":"000290","firstnme":"JOHN","midinit":"R","lastname":"PARKER", "workdept":"E11","phoneno":[4502],"hiredate":"05/30/2006", "job":"OPERATOR","edlevel":12,"sex":"M","birthdate":"07/09/1985", "pay": {"salary":35340.00,"bonus":300.00,"comm":1227.00} }'),
JSON2BSON( '{ "empno":"000300","firstnme":"PHILIP","midinit":"X","lastname":"SMITH", "workdept":"E11","phoneno":[2095],"hiredate":"06/19/2002", "job":"OPERATOR","edlevel":14,"sex":"M","birthdate":"10/27/1976", "pay": {"salary":37750.00,"bonus":400.00,"comm":1420.00} }'),
JSON2BSON( '{ "empno":"000310","firstnme":"MAUDE","midinit":"F","lastname":"SETRIGHT", "workdept":"E11","phoneno":[3332,8005],"hiredate":"09/12/1994", "job":"OPERATOR","edlevel":12,"sex":"F","birthdate":"04/21/1961", "pay": {"salary":35900.00,"bonus":300.00,"comm":1272.00} }'),
JSON2BSON( '{ "empno":"000320","firstnme":"RAMLAL","midinit":"V","lastname":"MEHTA", "workdept":"E21","phoneno":[9990,1533],"hiredate":"07/07/1995", "job":"FIELDREP","edlevel":16,"sex":"M","birthdate":"08/11/1962", "pay": {"salary":39950.00,"bonus":400.00,"comm":1596.00} }'),
JSON2BSON( '{ "empno":"000330","firstnme":"WING","midinit":"","lastname":"LEE", "workdept":"E21","phoneno":[2103,2453],"hiredate":"02/23/2006", "job":"FIELDREP","edlevel":14,"sex":"M","birthdate":"07/18/1971", "pay": {"salary":45370.00,"bonus":500.00,"comm":2030.00} }'),
JSON2BSON( '{ "empno":"000340","firstnme":"JASON","midinit":"R","lastname":"GOUNOT", "workdept":"E21","phoneno":[5698,7744],"hiredate":"05/05/1977", "job":"FIELDREP","edlevel":16,"sex":"M","birthdate":"05/17/1956", "pay": {"salary":43840.00,"bonus":500.00,"comm":1907.00} }'),
JSON2BSON( '{ "empno":"200010","firstnme":"DIAN","midinit":"J","lastname":"HEMMINGER", "workdept":"A00","phoneno":[3978,2564],"hiredate":"01/01/1995", "job":"SALESREP","edlevel":18,"sex":"F","birthdate":"08/14/1973", "pay": {"salary":46500.00,"bonus":1000.00,"comm":4220.00} }'),
JSON2BSON( '{ "empno":"200120","firstnme":"GREG","midinit":"","lastname":"ORLANDO", "workdept":"A00","phoneno":[2167,1690],"hiredate":"05/05/2002", "job":"CLERK","edlevel":14,"sex":"M","birthdate":"10/18/1972", "pay": {"salary":39250.00,"bonus":600.00,"comm":2340.00} }'),
JSON2BSON( '{ "empno":"200140","firstnme":"KIM","midinit":"N","lastname":"NATZ", "workdept":"C01","phoneno":[1793],"hiredate":"12/15/2006", "job":"ANALYST","edlevel":18,"sex":"F","birthdate":"01/19/1976", "pay": {"salary":68420.00,"bonus":600.00,"comm":2274.00} }'),
JSON2BSON( '{ "empno":"200170","firstnme":"KIYOSHI","midinit":"","lastname":"YAMAMOTO", "workdept":"D11","phoneno":[2890],"hiredate":"09/15/2005", "job":"DESIGNER","edlevel":16,"sex":"M","birthdate":"01/05/1981", "pay": {"salary":64680.00,"bonus":500.00,"comm":1974.00} }'),
JSON2BSON( '{ "empno":"200220","firstnme":"REBA","midinit":"K","lastname":"JOHN", "workdept":"D11","phoneno":[0672],"hiredate":"08/29/2005", "job":"DESIGNER","edlevel":18,"sex":"F","birthdate":"03/19/1978", "pay": {"salary":69840.00,"bonus":600.00,"comm":2387.00} }'),
JSON2BSON( '{ "empno":"200240","firstnme":"ROBERT","midinit":"M","lastname":"MONTEVERDE", "workdept":"D21","phoneno":[3780,6823],"hiredate":"12/05/2004", "job":"CLERK","edlevel":17,"sex":"M","birthdate":"03/31/1984", "pay": {"salary":37760.00,"bonus":600.00,"comm":2301.00} }'),
JSON2BSON( '{ "empno":"200280","firstnme":"EILEEN","midinit":"R","lastname":"SCHWARTZ", "workdept":"E11","phoneno":[8997,9410],"hiredate":"03/24/1997", "job":"OPERATOR","edlevel":17,"sex":"F","birthdate":"03/28/1966", "pay": {"salary":46250.00,"bonus":500.00,"comm":2100.00} }'),
JSON2BSON( '{ "empno":"200310","firstnme":"MICHELLE","midinit":"F","lastname":"SPRINGER", "workdept":"E11","phoneno":[3332,7889],"hiredate":"09/12/1994", "job":"OPERATOR","edlevel":12,"sex":"F","birthdate":"04/21/1961", "pay": {"salary":35900.00,"bonus":300.00,"comm":1272.00} }'),
JSON2BSON( '{ "empno":"200330","firstnme":"HELENA","midinit":"","lastname":"WONG", "workdept":"E21","phoneno":[2103],"hiredate":"02/23/2006", "job":"FIELDREP","edlevel":14,"sex":"F","birthdate":"07/18/1971", "pay": {"salary":35370.00,"bonus":500.00,"comm":2030.00} }'),
JSON2BSON( '{ "empno":"200340","firstnme":"ROY","midinit":"R","lastname":"ALONZO", "workdept":"E21","phoneno":[5698,1533],"hiredate":"07/05/1997", "job":"FIELDREP","edlevel":16,"sex":"M","birthdate":"05/17/1956", "pay": {"salary":31840.00,"bonus":500.00,"comm":1907.00} }')
;
%sql SELECT COUNT(*) FROM JSON_EMP
%%sql -q
DROP TABLE JSON_DEPT;
CREATE TABLE JSON_DEPT
(
SEQ INT NOT NULL GENERATED ALWAYS AS IDENTITY,
DEPT_DATA BLOB(4000) INLINE LENGTH 4000
);
INSERT INTO JSON_DEPT(DEPT_DATA) VALUES
JSON2BSON('{"deptno":"A00", "mgrno":"000010", "admrdept":"A00", "deptname":"SPIFFY COMPUTER SERVICE DIV."}'),
JSON2BSON('{"deptno":"B01", "mgrno":"000020", "admrdept":"A00", "deptname":"PLANNING" }'),
JSON2BSON('{"deptno":"C01", "mgrno":"000030", "admrdept":"A00", "deptname":"INFORMATION CENTER" }'),
JSON2BSON('{"deptno":"D01", "admrdept":"A00", "deptname":"DEVELOPMENT CENTER" }'),
JSON2BSON('{"deptno":"D11", "mgrno":"000060", "admrdept":"D01", "deptname":"MANUFACTURING SYSTEMS" }'),
JSON2BSON('{"deptno":"D21", "mgrno":"000070", "admrdept":"D01", "deptname":"ADMINISTRATION SYSTEMS" }'),
JSON2BSON('{"deptno":"E01", "mgrno":"000050", "admrdept":"A00", "deptname":"SUPPORT SERVICES" }'),
JSON2BSON('{"deptno":"E11", "mgrno":"000090", "admrdept":"E01", "deptname":"OPERATIONS" }'),
JSON2BSON('{"deptno":"E21", "mgrno":"000100", "admrdept":"E01", "deptname":"SOFTWARE SUPPORT" }'),
JSON2BSON('{"deptno":"F22", "admrdept":"E01", "deptname":"BRANCH OFFICE F2" }'),
JSON2BSON('{"deptno":"G22", "admrdept":"E01", "deptname":"BRANCH OFFICE G2" }'),
JSON2BSON('{"deptno":"H22", "admrdept":"E01", "deptname":"BRANCH OFFICE H2" }'),
JSON2BSON('{"deptno":"I22", "admrdept":"E01", "deptname":"BRANCH OFFICE I2" }'),
JSON2BSON('{"deptno":"J22", "admrdept":"E01", "deptname":"BRANCH OFFICE J2" }')
;
%%sql
SELECT trim(JSON_VAL(EMP_DATA,'lastname','s:40')),
JSON_VAL(EMP_DATA,'pay.salary','f')
FROM JSON_EMP
WHERE
JSON_VAL(EMP_DATA,'empno','s:6') = '200170'
%%sql
SELECT JSON_VAL(EMP_DATA,'lastname','s:7')
FROM JSON_EMP
WHERE
JSON_VAL(EMP_DATA,'empno','s:6') = '000010'
%%sql
SELECT LEFT(JSON_VAL(EMP_DATA,'lastname','s:20'),7)
FROM JSON_EMP
WHERE
JSON_VAL(EMP_DATA,'empno','s:6') = '200170'
%sql SELECT JSON_VAL(EMP_DATA, 'phoneno.2', 'i') FROM JSON_EMP
%sql SELECT JSON_VAL(EMP_DATA, 'phoneno', 'i:na') FROM JSON_EMP
%%sql
SELECT JSON_VAL(EMP_DATA,'pay.salary','i'),
JSON_VAL(EMP_DATA,'pay.bonus','i'),
JSON_VAL(EMP_DATA,'pay.comm','i')
FROM JSON_EMP
WHERE
JSON_VAL(EMP_DATA,'empno','s:6') = '200170'
%%sql
SELECT JSON_VAL(EMP_DATA,'lastname','s:30'),
JSON_VAL(EMP_DATA,'midinit','u')
FROM JSON_EMP
%%sql
SELECT COUNT(*) FROM JSON_EMP
WHERE JSON_VAL(EMP_DATA,'midinit','s:40') IS NULL
%%sql
SELECT COUNT(*) FROM JSON_EMP
WHERE JSON_VAL(EMP_DATA,'midinit','s:40') IS NULL AND
JSON_VAL(EMP_DATA,'midinit','u') IS NOT NULL
%%sql
SELECT JSON_VAL(EMP_DATA,'empno','s:6') AS EMPNO,
JSON_VAL(EMP_DATA,'lastname','s:20') AS LASTNAME,
JSON_VAL(DEPT_DATA,'deptname','s:30') AS DEPTNAME
FROM JSON_EMP, JSON_DEPT
WHERE
JSON_VAL(DEPT_DATA,'deptno','s:3') =
JSON_VAL(EMP_DATA,'workdept','s:3')
FETCH FIRST 5 ROWS ONLY
%%sql -q
DROP TABLE TYPES;
CREATE TABLE TYPES
(DATA BLOB(4000) INLINE LENGTH 4000);
INSERT INTO TYPES VALUES
JSON2BSON(
'{
"string" : "string",
"integer" : 1,
"number" : 1.1,
"date" : {"$date": "2016-06-20T13:00:00"},
"boolean" : true,
"array" : [1,2,3],
"object" : {type: "main", phone: [1,2,3]}
}');
%%sql
SELECT 'STRING',JSON_TYPE(DATA, 'string', 2048) FROM TYPES
UNION ALL
SELECT 'INTEGER',JSON_TYPE(DATA, 'integer', 2048) FROM TYPES
UNION ALL
SELECT 'NUMBER',JSON_TYPE(DATA, 'number', 2048) FROM TYPES
UNION ALL
SELECT 'DATE',JSON_TYPE(DATA, 'date', 2048) FROM TYPES
UNION ALL
SELECT 'BOOLEAN', JSON_TYPE(DATA, 'boolean', 2048) FROM TYPES
UNION ALL
SELECT 'ARRAY', JSON_TYPE(DATA, 'array', 2048) FROM TYPES
UNION ALL
SELECT 'OBJECT', JSON_TYPE(DATA, 'object', 2048) FROM TYPES
%%sql -q
DROP TABLE SANDBOX;
CREATE TABLE SANDBOX (DATA BLOB(4000) INLINE LENGTH 4000);
%%sql
INSERT INTO SANDBOX VALUES
JSON2BSON('{"count":9782333}')
%%sql
SELECT JSON_TYPE(DATA,'count',2048) AS TYPE
FROM SANDBOX
%sql SELECT JSON_VAL(DATA,'count','i') FROM SANDBOX
%sql SELECT JSON_VAL(DATA,'count','l') FROM SANDBOX
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"count":94123512223422}');
%sql SELECT JSON_TYPE(DATA,'count',2048) AS TYPE FROM SANDBOX
%sql SELECT JSON_TYPE(DATA,'count',2048) FROM SANDBOX
%sql SELECT JSON_VAL(DATA,'count','i') FROM SANDBOX
%sql SELECT JSON_VAL(DATA,'count','l') FROM SANDBOX
%%sql
SELECT JSON_VAL(DATA,'count','n') AS DECIMAL,
JSON_VAL(DATA,'count','f') AS FLOAT
FROM SANDBOX
%%sql -q
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"salary":92342.20}');
%sql SELECT JSON_TYPE(DATA,'salary',2048) AS TYPE FROM SANDBOX
%%sql
SELECT JSON_VAL(DATA,'salary','n') AS DECIMAL,
JSON_VAL(DATA,'salary','i') AS INTEGER,
JSON_VAL(DATA,'salary','f') AS FLOAT
FROM SANDBOX
%sql SELECT DEC(JSON_VAL(DATA,'salary','n'),9,2) AS DECIMAL FROM SANDBOX
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"salary":9.2523E01}');
%sql SELECT JSON_TYPE(DATA,'salary',2048) AS TYPE FROM SANDBOX
%%sql
SELECT JSON_VAL(DATA,'salary','n') AS DECIMAL,
JSON_VAL(DATA,'salary','i') AS INTEGER,
JSON_VAL(DATA,'salary','f') AS FLOAT
FROM SANDBOX
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"valid":true, "invalid":false}');
%sql SELECT JSON_TYPE(DATA,'valid',2048) AS TYPE FROM SANDBOX
%%sql
SELECT JSON_VAL(DATA,'valid','n') AS TRUE_DECIMAL,
JSON_VAL(DATA,'valid','i') AS TRUE_INTEGER,
JSON_VAL(DATA,'invalid','n') AS FALSE_DECIMAL,
JSON_VAL(DATA,'invalid','i') AS FALSE_INTEGER
FROM SANDBOX
%%sql
SELECT JSON_VAL(DATA,'valid','s:5') AS TRUE_STRING,
JSON_VAL(DATA,'valid','b:2') AS TRUE_BINARY,
JSON_VAL(DATA,'invalid','s:5') AS FALSE_STRING,
JSON_VAL(DATA,'invalid','b:2') AS FALSE_BINARY
FROM SANDBOX
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"today":{"$date":"2016-07-01T12:00:00"}}');
%sql SELECT JSON_TYPE(DATA,'today',2048) FROM SANDBOX
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"today":"2016-07-01"}');
SELECT JSON_VAL(DATA,'today','s:10') FROM SANDBOX;
%sql SELECT JSON_VAL(DATA,'today','d') FROM SANDBOX
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"today":"' || VARCHAR(NOW()) || '"}');
SELECT JSON_VAL(DATA,'today','s:30') FROM SANDBOX;
%sql SELECT JSON_VAL(DATA,'today','d') FROM SANDBOX
%sql SELECT JSON_VAL(DATA,'today','ts') FROM SANDBOX
%sql SELECT TIME(JSON_VAL(DATA,'today','ts')) FROM SANDBOX
%sql SELECT JSON_VAL(EMP_DATA, 'lastname', 's:10') FROM JSON_EMP
%sql SELECT JSON_VAL(EMP_DATA, 'lastname', 's:8') FROM JSON_EMP
%sql SELECT JSON_VAL(EMP_DATA, 'phoneno', 'i') FROM JSON_EMP
%%sql
SELECT PHONES.TYPE, CAST(PHONES.VALUE AS VARCHAR(10)) AS VALUE
FROM JSON_EMP E,
TABLE( JSON_TABLE(E.EMP_DATA,'phoneno','i') ) AS PHONES
WHERE JSON_VAL(E.EMP_DATA,'empno','s:6') = '000010'
%%sql -a
SELECT JSON_VAL(E.EMP_DATA, 'lastname', 's:10') AS LASTNAME,
CAST(PHONES.VALUE AS VARCHAR(10)) AS PHONE
FROM JSON_EMP E,
TABLE( JSON_TABLE(E.EMP_DATA,'phoneno','i') ) AS PHONES
%%sql
SELECT JSON_VAL(E.EMP_DATA, 'lastname', 's:10') AS LASTNAME,
CAST (PHONES.VALUE AS VARCHAR(10)) AS PHONE
FROM JSON_EMP E,
TABLE( JSON_TABLE(E.EMP_DATA,'phoneno','i') ) AS PHONES
ORDER BY PHONE
%%sql
SELECT CAST(PHONES.VALUE AS VARCHAR(10)) AS PHONE, COUNT(*) AS COUNT
FROM JSON_EMP E,
TABLE( JSON_TABLE(E.EMP_DATA,'phoneno','i') ) AS PHONES
GROUP BY PHONES.VALUE HAVING COUNT(*) > 1
ORDER BY PHONES.VALUE
%%sql
SELECT JSON_VAL(E.EMP_DATA, 'lastname', 's:10') AS LASTNAME,
JSON_LEN(E.EMP_DATA, 'phoneno') AS PHONE_COUNT
FROM JSON_EMP E
%%sql
SELECT JSON_VAL(E.EMP_DATA, 'lastname', 's:10') AS LASTNAME,
CAST(PHONES.VALUE AS VARCHAR(10)) AS PHONE
FROM JSON_EMP E,
TABLE( JSON_TABLE(E.EMP_DATA,'phoneno','i') ) AS PHONES
WHERE PHONES.VALUE = 1422
%%sql
SELECT JSON_VAL(EMP_DATA, 'lastname', 's:10') AS LASTNAME
FROM JSON_EMP
WHERE JSON_GET_POS_ARR_INDEX(EMP_DATA,
JSON2BSON('{"phoneno":1422}')) >= 0
%%sql
DELETE FROM SANDBOX;
INSERT INTO SANDBOX VALUES
JSON2BSON('{"phone":"[1111,2222,3333]"}');
%%sql
UPDATE SANDBOX
SET DATA =
JSON_UPDATE(DATA,'{ $set: {"lastname":"HAAS"}}')
%sql -j SELECT BSON2JSON(DATA) FROM SANDBOX
%%sql -j
UPDATE SANDBOX
SET DATA =
JSON_UPDATE(DATA,'{ $set: {"phone":"9999"}}');
SELECT BSON2JSON(DATA) FROM SANDBOX;
%sql -j SELECT BSON2JSON(DATA) FROM SANDBOX
%%sql -j
UPDATE SANDBOX
SET DATA =
JSON_UPDATE(DATA,'{ $set: {"phone.0":9999}}');
SELECT BSON2JSON(DATA) FROM SANDBOX;
%%sql -j
UPDATE SANDBOX
SET DATA =
JSON_UPDATE(DATA,'{ $unset: {"phone":null}}');
SELECT BSON2JSON(DATA) FROM SANDBOX;
%%sql -q
DROP INDEX IX_JSON;
SELECT JSON_VAL(EMP_DATA, 'lastname', 's:20') AS LASTNAME
FROM JSON_EMP
WHERE JSON_VAL(EMP_DATA, 'empno', 's:6') = '000010';
noindex = %sql -t \
SELECT JSON_VAL(EMP_DATA, 'lastname', 's:20') AS LASTNAME \
FROM JSON_EMP \
WHERE JSON_VAL(EMP_DATA, 'empno', 's:6') = '000010'
%%sql
CREATE INDEX IX_JSON ON JSON_EMP
(JSON_VAL(EMP_DATA,'empno','s:6'));
withindex = %%sql -t \
SELECT JSON_VAL(EMP_DATA, 'lastname', 's:20') AS LASTNAME \
FROM JSON_EMP \
WHERE JSON_VAL(EMP_DATA, 'empno', 's:6') = '000010' \
%sql -pb WITH RESULTS(RUN, RESULT) AS (VALUES ('No Index',{noindex}),('With Index',{withindex})) SELECT * FROM RESULTS
%%sql -q
DROP TABLE BASE_EMP_TXS;
CREATE TABLE BASE_EMP_TXS (
SEQNO INT NOT NULL GENERATED ALWAYS AS IDENTITY,
INFO VARCHAR(4000),
BSONINFO BLOB(4000) INLINE LENGTH 4000
);
%%sql
CREATE OR REPLACE VIEW EMP_TXS AS
(SELECT SEQNO, BSON2JSON(BSONINFO) AS INFO FROM BASE_EMP_TXS)
%%sql -d
CREATE OR REPLACE TRIGGER I_EMP_TXS
INSTEAD OF INSERT ON EMP_TXS
REFERENCING NEW AS NEW_TXS
FOR EACH ROW MODE DB2SQL
BEGIN ATOMIC
INSERT INTO BASE_EMP_TXS VALUES (
DEFAULT,
NULL,
SYSTOOLS.JSON2BSON(NEW_TXS.INFO)
);
END
@
%%sql -d
CREATE OR REPLACE TRIGGER U_EMP_TXS
INSTEAD OF UPDATE ON EMP_TXS
REFERENCING NEW AS NEW_TXS OLD AS OLD_TXS
FOR EACH ROW MODE DB2SQL
BEGIN ATOMIC
UPDATE BASE_EMP_TXS
SET (INFO, BSONINFO) = (NULL,
SYSTOOLS.JSON2BSON(NEW_TXS.INFO))
WHERE
BASE_EMP_TXS.SEQNO = OLD_TXS.SEQNO;
END
@
%%sql -d
CREATE OR REPLACE TRIGGER D_EMP_TX
INSTEAD OF DELETE ON EMP_TXS
REFERENCING OLD AS OLD_TXS
FOR EACH ROW MODE DB2SQL
BEGIN ATOMIC
DELETE FROM BASE_EMP_TXS
WHERE
BASE_EMP_TXS.SEQNO = OLD_TXS.SEQNO;
END
@
%%sql
INSERT INTO EMP_TXS(INFO) VALUES (
'{
"empno":"000010",
"firstnme":"CHRISTINE",
"midinit":"I",
"lastname":"HAAS",
"workdept":"A00",
"phoneno":[3978],
"hiredate":"01/01/1995",
"job":"PRES",
"edlevel":18,
"sex":"F",
"birthdate":"08/24/1963",
"pay" : {
"salary":152750.00,
"bonus":1000.00,
"comm":4220.00}
}')
%sql -j SELECT INFO FROM EMP_TXS
%%sql -j
UPDATE EMP_TXS SET INFO = '{"empno":"000010"}' WHERE SEQNO = 1;
SELECT INFO FROM EMP_TXS;
%%sql
UPDATE BASE_EMP_TXS
SET BSONINFO = JSON_UPDATE(BSONINFO,
'{$set: {"empno":"111111"}}')
WHERE SEQNO = 1
%sql -j SELECT INFO FROM EMP_TXS
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Dataset Creation
Step2: The dataset stands as follows. There are more than 2.8 million entries being all the values of the cells integers
Step3: The devices ids are now linked to the applabels so that it contains the information about the different categories of the apps is included. In this case the 20 digit identifier of the app is substituted by a category that identifies the kind of app that is active in each device.
Step4: The new dataset is cleansed removing the empty values and changing the type of variable so that they can be merged afterwards without increasing disproportionately the number of rows.
Step5: Information is retrieved about the size and type of the variables in the dataset. The dataset is now ready to be merged.
Step6: The dataset containing data about the usage of the apps, the activity, device id is linked with the dataset containing the device id and information about the gender, age and age group. The age groups used are the ones provided in the dataset and have not been changed.
Step7: Information about the new datasets is retrieved to see the type of variables and to clean the data eliminating any missing cell.
Step8: Further cleansing is done dropping the empty cells and resetting the index to have a better view of the data points available.
Step9: In the Chinese market the number of apps that is actually used from the ones installed in the devices equate to roughly 50%.
Step10: Visual inspection of the distribution of data between ages show that the data is skewed to the right, showing that most of the data represents the age group between 20 and 35 years old. This corresponds to the grouping that has been done age-wise.
Step11: Each group contains 20 apps in total. From an apps usage standpoint, the most important group of apps according to its usage corresponds to financial, lottery and gaming apps followed by apps related to smart appliances and wearables in second place. The third place is taken by the apps used for banking and payment services. On the lower end the apps that show a smaller usage are the leisure related ones like photography, wedding planners or trips abroad. This apps equate to 90 and are presenting a lower usage due to its characteristics and the sample taste.
Step12: Dummy variables are created based on the apps that are installed on the devices. In this case 484 additional columns are created with categorical data that counts the appearance of each app category in the device. To avoid the dummy variable trap whereby collinearity exists between the variables one redundant category is dropped when transforming the categorical variables into dummies.
Step13: Before adding the data about the phone brands and models, a dictionary has been taken from Kaggle and augmented with additional brands to translate the brands from Chinese to English. Additionally, those brands that have no translation have been designed with “UBXX” where UB stands for unknown brand and XX are two digits. This has been done to easy queries on the dataframe and visualizations of the dataset. The translation of the brands form Chinese to English has been included in the phone dataset and dummy variables have been created for each brand. This gives as a result a sparse data frame with zero if the device is not of that brand and 1 when the device is associated to a certain brand.
Step14: The phone brands dataset has been added to the previous dataset containing all the information about apps, age, gender, etc. In the instances in which the device id does not appear the NaN results have been filled in with zeros. The devices that are duplicate have been deleted to make the dataset manageable from a computing perspective reducing from two million rows to 23k data rows. The device id and device model columns have been dropped as in further research they were not adding information for the feature generation. Additionally, a dummy variable has been created for the variable gender.
Step15: As it is shown in the graph below, eight brands represent 90% of the sample in the Chinese market.
Step16: To avoid the dummy variable trap whereby collinearity exists between the variables one redundant category is dropped when transforming the categorical variables into dummies.
Step17: The new dataset has been cleansed eliminating all the columns that are zeros. In this case, 345 columns have been deleted related to brands that were not associated to devices. The output variable that groups gender and age “group” has been transformed into numbers so that supervised algorithms are used for prediction.
Step18: The gender split in the data is not even, being more male than women in the customer base of the company. This has an impact in the further segmentation by age (including gender) expecting groups that have a lower number of data points in the case it is for women. A visual inspection of the groups show that the groups are unbalanced. This will impact the accuracy of the models for classification purposes. Further resampling is required and in this case, up-sampling will be considered using the number of samples in the largest group
Step19: As the different groups are not balanced, the minority groups are balanced resampling the data and up-sampling the minority groups. This will allow the different algorithms to achieve a better accuracy in general. Algorithms like random forest are highly sensitive to unbalanced groups in the output variable.
Step20: As part of the data cleansing actions, the device id column has been dropped and dummy variables have been created for the gender variable additionally all those columns that are constant and filled in only with zeros have been dropped. To avoid the dummy variable trap whereby collinearity exists between the variables one redundant category is dropped when transforming the categorical variables into dummies. The different groups have been mapped to 0-11 to be used in further modelling. In this case although different trials have taken place, after improving the significance of the features the twelve groups for the classification problem have been kept.
Step21: As it can be appreciated the average age of the users is 31 years with a standard deviation of nearly 10 years. The interquartile age is between 26 and 37 years. For each of the labels and brands it can be seen that all the ones included appear at least once as it was expected from the data wrangling actions. Although group appears in the table, the results obtained means that the group M23-26 grouping male between 23 and 26 is the most significant one in terms of appearance.
Step22: The dataset has been split between the predictors and the output variable. As abovementioned, the output variable is the group of age and gender and the input variables are the features created based on the app labels, gender and phone brand used by the users. To improve the quality of the features the data has been preprocessed and scaled.
Step23: The size of the final dataset is 274 variables and 40k rows of data from which features will be generated.
Step24: To have a better understanding of the correlation between variables, and due to the large amount of variables in this dataset, a correlation matrix has been created. To check the level of correlation between variables, only those with a correlation greater than 0.5 and lower than 1 (in absolute terms) have been printed.
Step25: As it can be observed only the pone brand "OPPO" and the app label 706 present the highest correlation equal to 0.42. Further inspection shows that this brand includes a "customized" app in all the phones to access special features. As expected there is no negative correlation higher than 0.4 (in absolute terms) due to the characteristics of the data. There are no apps that cannot be installed in certain phone brands or that decrease when the number of phones of that brand increases.
Step26: 3. Feature Generation
Step27: Due to the characteristics of the variables the use of all the features create a lot of noise and when the supervised models are run, there is overfitting in all cases. Hence, the number of variables is reduced using Kbest using only those that have an explanatory power higher than 1. A dataframe is created with this new set of features.
Step28: To select the features, Feature Importance using Random Forest is used. In this case the result given by the random forest feature selection is not meaningful therefore the result given by kbest() will be used as a first step before running PCA for generating the new set of features. From the random forest feature selection it can be observed that the number of features has to be reduced significantly as there are a lot of features that are adding noise and with low explanatory power.
Step29: The feature selection using Radnom forest shows the high contribution of age, gender and certain brands over the apps ids. It cn be seen that all of them have a contribution and after running iteratively on the 16 best features selected by Random Forest the accuracy obtained with Logistic Regression was very low. Once features from kbest were added to the feature space accuracy results improved. Hence the features obtained using kbest() will be the ones that will be transformed using PCA. As a second step, the variables selected by Kbest are transformed using PCA analysis. For this, the correlation matrix of the new set of variables is created and non existing values are filled in with zeros.
Step30: Calculate the eigen-values and vectors and determine the explained variance by each component.
Step31: From the PCA analysis abovementioned the PCA features are created. After analysing the feature generation using recursive feature analysis and feature selection using kbest(), the PCA features will be used. Additionally, there is no need to have the initial features as predictors or now which ones have been used as the main purpose of the classification process is to maximize the accuracy.
Step32: 4. Model Selection
Step33: All models' hyperparameters will be tuned in the training set using cross validation and gridsearch tuning several parameters considered in the pipeline (gridsearch). Except for specific cases that will be discussed one by one, the algorithm or variations (solver, etc) to be used has been decided in advance considering the topology of the dataset.
Step34: The tuned model is fit and run on the test set and the computational effort is measured considering the time required to fit the test set.
Step35: The model requires 3.3 min to run which will be used as a time threshold to measure the computational effort of other models. To calculate the accuracy the model is evaluated on the test set. From the classification report it can be seen that the data representing each class is evenly distributed across the classes. This reduces the probability of bias of the model when calculating the probabilities of each predicted value.
Step36: The overall accuracy of the Logistic Regression model is 95.62%. Although the overall accuracy is good and the computational effort low, the classification report and confusion matrix show overfitting as there will be some data points misclassified. The high accuracy of the model can be explained by the lack of correlation between the predictors (independent variables). Only two variables out of 120 presented a correlation of 0.5 (max correlation value in the dataset) which have been eliminated when during the feature generation process and through PCA. The PCA components are all independent from each other and by definition there is no collinearity between the 120 components that have been chosen. Additionally linearity between the independent variable and the log odds exists. Furthermore, the dataset is big enough to use this classification algorithm.
Step37: The value for "alpha" is the smallest of the values chosen to tune the parameters of the model. In this case the gridsearch has been carried out with values ranging from 0.001 to 10 before fitting the above set. In all cases, 10 was the value that was appearing as the best one in terms of overall accuracy. The tuned model is fit and run on the test set and the computational effort is measured considering the time required to fit the test set.
Step38: Once the algorithm is trained it is run on the test set. The maximum precision obtained is 70% for the first class being all of the rest lower than 50%. From an accuracy perspective this algorithm doesn't seem to be a good candidate for our product. Cross validation has been performed to avoid overfitting
Step39: In this case the low accuracy of the Naïve-Bayes classifier can be explained because of the continuous structure of the data once the scaler and PCA has been applied. Although the features are strongly independent due to the PCA transformation of them, this classifier is good when it is used for binary problems with two classes. In this case, the existence of 12 different classes makes it harder for this algorithm to classify accurately.
Step40: The value for "n_neighbors" is the smallest of the values chosen to tune the parameters of the model. In this case the gridsearch has been carried out with values ranging from 3 to 11, always odd as the number of classes is even before fitting the above set. In all cases, 5 was the value that was appearing as the best one in terms of overall accuracy. The algorithm to be used has been set to "auto" and the algorithm used is "brute force" in this case as k < n_samples/2 and no specific metrics have been given. In this case the leaf size has been set to the default value of 30 considering the number of features used. The choice fo distance as the weight instead of uniform is reasonable were the points are weighted by the inverse of their distance. In this case features are different from each other ranging from different types of apps to device brands. The tuned model is fit and run on the test set and the computational effort is measured considering the time required to fit the test set.
Step41: In this case, due to the characteristics of the algorithm (lazy algorithm) all the work is done in the previous step as the prediction is required. This algorithm goes through all the dataset comparing each data point with the instances that it has previously seen while it was trained. This could be the reason why this step requires some more time than the previous and next ones (1.1 min per fold).
Step42: As it was expected the accuracy obtained for this problem with this algorithm is lower than the values that are normally obtained with lower dimensionalities. As previously discussed this has to do with the fact that the Euclidean distance doesn´t perform accurately with high dimensionality problems and that it is used to measure the weight of each vote through the inverse of its value.
Step43: The parameters show that the smoothing continues to be loose as a first option as it is a regression with a gradient descendent algorithm. Regarding the loss, the hinge loss is used which means that the real loss, in case it is not convergent due to the sparse data used, is replaced by the upper bond forcing its convergence. Time required is significantly higher than with previous classifiers.
Step44: From a visual inspection of the classification report it can be observed how the lack of clear boundaries between data points is impacting the overall accuracy. Only in the case of group 6 the precision is over 90%, for the rest of them the maximum precision obtained is at a maximum of 60%. This will show up when the results are cross validated giving low accuracies.
Step45: As the features describing each group are scattered not having clear boundaries between each group, the result obtained from the SGD algorithm is low and it is not expected to grow with a higher number of iterations. In this case and using square hinge the decision boundary will not be able to improve the misclassification of the different points. In any case, the accuracy is too low to be used 47.88%. Although the SGD solves the same classification problems than the logistic regression and can be more efficient in this case the improvement from a computing effort perspective does not compensate the low results obtained by the classifier.
Step46: The number of trees used is 170 being the depth of each tree 31. After several simulations, the parameters have stayed stable around these values. The random forest algorithm is an ensemble algorithm that works bringing together different decision trees and being more powerful than an individual decision tree. In this case it introduces randomness because it choses from a random sample of features the one that is decreasing how often a randomly chosen element will be incorrectly labelled using gini criterion. The high number of estimators ensure the accuracy of the model while it increases the computational effort required. In this case, it is important to have the classes balanced as in one of the 31 nodes it could happen that one of the minority classes disappear. From the random forest feature selection only "is active", "gender" and "age" add have significant explanatory power being the rest of the features noise to the model. The noise has been reduced by reducing the number ofo features and applying PCA to the model to gain in the explanatory power of the variance. The number of trees in the random forest classifier decreases the risk of overfitting while the number of nodes "depth" reduces the samples available and features available in each sample which can increase the risk of misclassification reducing the overall accuracy. Moreover "deep" trees can compensate the lower risk of overfitting by increasing the number of trees as it increases the probability of overfitting overall.
Step47: There is a significant increase in the computational effort required by this algorithm as it was expected. The classification report and the classification matrix present overfitting as the precision in all cases is one and there are no misclassified elements. To avoid this overfitting problem, cross validation has been performed on the random forest.
Step48: In this case, the overall accuracy of the model is 81.04% which is somehow low for this type of algorithms. In this case and after running the random forest feature selection this might have happened due to the additional features that are increasing the noise instead of adding information to the algorithm based on this feature selection process. As there are only four, when run in a set of trees of 31 nodes, it can happen that none of them remains until the end misclassifying results based on the remaining features that the algorithm picks randomly.
Step49: Although the Linear SVC has been implemented as it is more scalable than the support vector classifier, the time required to fit the values compared to the other algorithms makes it a weak candidate to go into production. Compared to the logistic regression classifier there must be a significant increase so that this classifier worth it in terms of computational effort. C has been tuned to 100 to control the misclassification that is allowed by this classifier. The tradeoff between the values of this parameter is the bias-variance trade off. If the parameter is low the classifier will allow small numbers of misclassification having a low bias but allowing a high variance. In our case, the parameter is high which is better from a scaling perspective but might have a high amount of bias under fitting the data in the classification problem. Hinge loss has been selected as the loss function to calculate the weights of the misclassifications on the training data and apply them to the test data.
Step50: Although the computational effort required when fitting the dataset is lower than initially expected, the accuracy is higher than in the case of the random forest and very close to the logistic regression classifier. The classification report and the classification matrix present overfitting as the precision in all cases is one and there are no misclassified elements. To avoid this overfitting problem, cross validation has been performed on the random forest.
Step51: In this case the support vector classifier uses a linear kernel for the kernel trick and a sparse representation of data (aligned to the features that have been generated for this problem) reducing the amount of computing effort required. In this case, there is a need to create linear hyperplanes that is able to separate one class over the rest until the 12 have been classified. Due to the characteristics of the data (PCA components are used as features and are oriented to maximize the explanation of the variance across classes) added to the C parameter the accuracy of the Linear Support Vector Classifier is higher than expected requiring less computational effort than the initially foreseen.
Step52: In this case, as it is a boosting model, the data is passed over and over again tuning the parameters every time the data is passed. This is the reason why compared to the random forest it requires so much time. The number of trees is higher than the number of trees used in random forest and the depth is nearly doubling the one previously calculated for the random forest. In this case the computational effort has grown exponentially compared to other classifiers so it is expected that the accuracy is much higher than in the case of the logistic regression classifier to be a candidate for production. In this case, as it is based on random forest a slight increase of its accuracy is expected, hence it will not be as high as the one achieved with the logistic regression for the same reasons that applied to the random forest.
Step53: In this case, the algorithm uses the gradient descendent algorithm to follow the steepest path that reduces the loss. In each step the tree is fitted to predict a negative gradient. The friedman_mse parameter used by default calculates the step after the direction has been set. As in the case of Random Forest, this classifier presents overfitting in the classification report and confusion matrix. To reduce the overfitting cross validation is applied.
Step54: The gradient boosting algorithm has a similar approach to the random forest in the classification of the classes. In this case the depth of the trees is bigger and the number of trees also. The number of trees helps to reduce the overfitting while the depth has a negative effect in the misclassification reducing the accuracy. The same principle regarding the feature selection applies than in the case of random forest so it is not strange to find in this case an overall accuracy close to the one obtained with random forest. In the implementation of the descendent gradient over the random forest algorithm has not significantly improved the accuracy while it has increased the computational effort required to achieve it. Hence, this algorithm is discarded for production.
Step55: The time required to tune the parameters has been lower than expected especially for a dataset as large as ours. The use of a high C aims to classify the training examples correctly by selecting more samples as support vectors. In this case, the use of PC helps to set up the boundaries using data that has been preprocessed to explain the maximum possible variance by rotating it. The model is fit in the test set.
Step56: As expected, the confusion matrix and classification report present overfitting. To avoid the overfitting presented in both cases, cross validation is used to fit to obtain the overall accuracy.
Step57: In this case the accuracy obtained is 92.29% which is similar to the one obtained with the logistic regression model but it requires less computational effort. This is due to the transformation done when preprocessing the data with PCA as the kernel trick requires less time (the data is already in the axis that maximize the variance) than expected and therefore less computational power. This is a strong candidate to go into production if the PCA is maintained before the algorithm is run as otherwise the time will increase quadratic (as complexity).
Step58: To match the input requirements of the neural network, the dependent variable is transformed into a categorical sparse matrix. This matrix will have the same number of rows but will create one row per class used in the dependent vector.
Step59: Although gridsearchCV from sklearn can be used to tune the parameters if the neural network in this case and due to computational restrictions, a trial and error approach has been selected. The process to build the model has been as follows.
Step60: After several trials the optimum batch size for the model and epochs are 1250 and 200 respectively. Once the model is trained on the train set is tested and an overall accuracy of 98.91% is achieved. This accuracy is higher than all the accuracies achieved with the previous models.
Step61: Although the accuracy obtained by this model is higher than the one achieved by any other model, the logistic regression and support vector machine models are the preferred ones as candidates for production. The neural network, although it gives a higher accuracy does not allow to distinguish the explanatory power of each of the features that are used. This is not required during image recognition but is necessary for the purpose of this exercise. Furthermore, the model is far more complex than the previous one and requires more computational power once it is in production. For all this reasons although the accuracy is higher than in the previous cases, the logistic regression and support vector machine models are still the best candidates for production.
Step62: The dataset contains information that will not be normalized and that will be used afterwards such as gender, age and group (age range).
Step63: To work with the different clustering techniques a copy of the dataset will be done and the information will be normalized. This will improve the performance of the different clustering techniques.
Step64: A dataframe containing the information about the groups, age and gender is built and the groups are codified into integers. An independent variable is created containing this information to check the accuracy of the clustering technique that is selected.
Step65: Once the affinity propagation technique has been tested, the number of clusters is excessive for the data points available reason why this technique is discarded.
Step66: Once the affinity propagation technique is discarded, the meanshift technique is tested. In this case different bandwidths have been tested to see the kernel density surfaces that better suits the data. Higher bandwidths create a smoother kernel density surface, leading to fewer peaks because smaller hills are smoothed out, whereas lower bandwidths lead to a surface with more peaks.
Step67: The silhouette score is compared between spectral clustering and kmeans. After checking and discarding the Affinity clustering technique due to the number of clusters obtained, both spectral and kmeans are compared to select the best one from a clustering perspective. The number of clusters varies from 2 to 11 (as initially preset) to see which one offers better clusters based on the silhouette score.
Step68: For lower numbers of clusters kmeans present a higher silhouette score while for big number of clusters close to the preset number of groups spectral clustering performs better. Further analysis will be dome from a kmeans clustering perspective to see the best number of clusters for this clustering technique following the elbow technique.
Step69: The best number of clusters from both the silhouette and the elbow technique perspective is six that will be the number of clusters used following this technique to cluster the customer base. The data will be clustered using kmeans and 6 clusters to get insights about the customer base. In this case, five, six and seven clusters have been tried being 6 the most relevant one.
Step70: The predictive capacity of kmeans clustering with six clusters will be checked. In this case the predictive performance is very low 15% compared to the one obtained from the supervised models. Hence its use as a predictive tool is discarded for classification purposes.
Step71: The data is aggregated by clusters and gender to inspect the average age per cluster. In all clusters the average age is the same standard deviation so it is not representative of the groups
Step72: Information regarding the apps that people use is obtained from the clusters. It can be seen that three apps are the most used ones across clusters App 549, 706 and 548 that according to the documentation equate to Property Industry 1.0, Customized 1 and Industry tag. In the first and third cases they are construction inspection and tagging apps. The second case seems to be an app that is preinstalled in the phone. This might indicate the kind of population we are addressing although the calls are done from all around China. It could be the case that the sample is representative of a specific kind of population and the data has already being segmented reason why the average age in all clusters is nearly the same and around 30 years.
Step73: In this case and following the same clustering scheme, only information relative to the phone brands has been kept. It appears that for the population under analysis, Xiaomi, OPPO and Samsung are the most popular ones being Vivo more popular among the female than male and Meizu more popular among male compared to female.
Step74: To be able to plot the different clusters PCA is run and the 2 principal components are extracted. In this case two are preferred to three components due to computational restrictions.
Step75: Once the PC are defined, a plot function is defined with the characteristics of the plot that is going to be done.
|
<ASSISTANT_TASK:>
Python Code:
#Uplaod the data into the notbook and select the rows that will be used after previous visual inspection of the datasets
datadir = 'D:/Users/Borja.gonzalez/Desktop/Thinkful-DataScience-Borja'
gatrain = pd.read_csv('gender_age_train.csv',usecols=['device_id','gender','age','group'] )
gatest = pd.read_csv('gender_age_test.csv')
phone = pd.read_csv('phone_brand_device_model.csv')
app_events = pd.read_csv('app_events.csv', usecols = ['event_id', 'app_id','is_active'])
applabels = pd.read_csv('app_labels.csv')
events = pd.read_csv('events.csv', usecols = ['event_id', 'device_id'])
# Get rid of duplicate device ids in phone
phone = phone.drop_duplicates('device_id',keep='first')
#Merge the app_events and events to accrss app_ids.
device_apps = (
app_events
# Merge on event_id
.merge(events, how = 'left', left_on = 'event_id', right_on = 'event_id')
# event_id itself is not interesting
.drop('event_id', axis = 1)
# Because the events correspond to more than just
# being installed, there are many duplicates
.drop_duplicates())
#Get information about the new dataset
print(device_apps.info())
#Print the first five rows fo the dataset
device_apps.head()
#Translate the apps_id to apps categories to build features
apps = (
device_apps
# Merge on event_id
.merge(applabels, how = 'left', left_on = 'app_id', right_on = 'app_id')
# event_id itself is not interesting
.drop('app_id', axis = 1)
# Because the events correspond to more than just
# being installed, there are many duplicates
.drop_duplicates())
#Clean the dataset removing empty cells from the data set
apps = apps.dropna()
#convert first or second to str or int
apps['device_id'] = apps['device_id'].astype(int)
#Motor['Motor'] = Motor['Motor'].astype(str)
#Merge the column back into the dataset
gatrain['device_id'] = gatrain['device_id'].astype(int)
#Get information of the new dataset and chekc tha the transformation into integers has happened before
#merging datasets
print(apps.info())
#Print the first five rows of the dataframe
apps.head()
#Merge dataset about devices and apps with demographic data
apps_with_groups = (
apps
# Merge on event_id
.merge(gatrain, how = 'left', on = 'device_id')
# event_id itself is not interesting
# .drop('device_id', axis = 1)
# Because the events correspond to more than just
# being installed, there are many duplicates
.drop_duplicates())
#Get information about the dataset
print(apps_with_groups.info())
#Identify nul values in the dataset
print(apps_with_groups.isnull().sum())
#Drop empty cells
apps_with_groups = apps_with_groups.dropna()
#Reset index in the new dataset
apps_with_groups.reset_index(drop= True)
#Print first five rows
apps_with_groups.head()
#Plot installed vs active apps
plt.figure(figsize=(20, 5))
sns.set_style("white")
ax = sns.countplot(x="is_active", data=apps_with_groups, palette="Set3")
ax.set_title('Active vs Installed Apps')
ax.set_ylabel('Number of Occurrences')
ax.set_xticklabels(['Installed','Active'], fontsize=10)
plt.tight_layout()
plt.show()
#Plot the distribution of age in the dataset
plt.figure(figsize=(20, 9))
apps_with_groups.age.hist(bins=50, grid = False)
plt.title('Age Distribution of the Sample')
plt.xlabel('Age')
plt.ylabel('Frequency')
plt.tight_layout()
plt.show()
#Plot grouping the apps in different bins to understand its usage.
#Bins range from 10-50 for more detailed analysis
plt.figure(figsize=(20, 9))
apps_with_groups.label_id.value_counts(bins = 50).plot(kind='bar', grid=False)
plt.title('Apps grouped by usage')
plt.xlabel('Group of Apps')
plt.ylabel('Occurrences')
plt.tight_layout()
plt.show()
#Create dummy variables for the categories found in the apps
dataset_with_dummy_variables = pd.get_dummies(apps_with_groups, columns = ['label_id'], sparse = True).reset_index(drop= True)
#Print the first 5 rows of data
dataset_with_dummy_variables.head()
#Add a brand name "UBXX" per brand in Chinese for which we don´t have a translation
english_phone_brands_mapping = {"三星": "samsung","天语": "Ktouch", "海信": "hisense", "联想": "lenovo", "欧比": "obi",
"爱派尔": "ipair", "努比亚": "nubia", "优米": "youmi", "朵唯": "dowe", "黑米": "heymi",
"锤子": "hammer", "酷比魔方": "koobee", "美图": "meitu", "尼比鲁": "nibilu", "一加": "oneplus",
"优购": "yougo", "诺基亚": "nokia", "糖葫芦": "candy", "中国移动": "ccmc", "语信": "yuxin",
"基伍": "kiwu", "青橙": "greeno", "华硕": "asus", "夏新": "panasonic", "维图": "weitu",
"艾优尼": "aiyouni", "摩托罗拉": "moto", "乡米": "xiangmi", "米奇": "micky", "大可乐": "bigcola",
"沃普丰": "wpf", "神舟": "hasse", "摩乐": "mole", "飞秒": "fs", "米歌": "mige", "富可视": "fks",
"德赛": "desci", "梦米": "mengmi", "乐视": "lshi", "小杨树": "smallt", "纽曼": "newman",
"邦华": "banghua", "E派": "epai", "易派": "epai", "普耐尔": "pner", "欧新": "ouxin", "西米": "ximi",
"海尔": "haier", "波导": "bodao", "糯米": "nuomi", "唯米": "weimi", "酷珀": "kupo", "谷歌": "google",
"昂达": "ada", "聆韵": "lingyun", "小米": "Xiaomi", "华为": "Huawei", "魅族": "Meizu", "中兴": "ZTE",
"酷派": "Coolpad", "金立": "Gionee", "SUGAR": "SUGAR", "OPPO": "OPPO", "vivo": "vivo", "HTC": "HTC",
"LG": "LG", "ZUK": "ZUK", "TCL": "TCL", "LOGO": "LOGO", "SUGAR": "SUGAR", "Lovme": "Lovme",
"PPTV": "PPTV", "ZOYE": "ZOYE", "MIL": "MIL", "索尼" : "Sony", "欧博信" : "Opssom", "奇酷" : "Qiku",
"酷比" : "CUBE", "康佳" : "Konka", "亿通" : "Yitong", "金星数码" : "JXD", "至尊宝" : "Monkey King",
"百立丰" : "Hundred Li Feng", "贝尔丰" : "Bifer", "百加" : "Bacardi", "诺亚信" : "Noain",
"广信" : "Kingsun", "世纪天元" : "Ctyon", "青葱" : "Cong", "果米" : "Taobao", "斐讯" : "Phicomm",
"长虹" : "Changhong", "欧奇" : "Oukimobile", "先锋" : "XFPLAY", "台电" : "Teclast", "大Q" : "Daq",
"蓝魔" : "Ramos", "奥克斯" : "AUX", "索尼" : "Sony", "欧博信" : "Opssom", "奇酷" : "Qiku",
"酷比" : "CUBE", "康佳" : "Konka", "亿通" : "Yitong", "金星数码" : "JXD", "至尊宝" : "Monkey King",
"百立丰" : "Hundred Li Feng", "贝尔丰" : "Bifer", "百加" : "Bacardi", "诺亚信" : "Noain",
"广信" : "Kingsun", "世纪天元" : "Ctyon", "青葱" : "Cong", "果米" : "Taobao", "斐讯" : "Phicomm",
"长虹" : "Changhong", "欧奇" : "Oukimobile", "先锋" : "XFPLAY", "台电" : "Teclast", "大Q" : "Daq",
"蓝魔" : "Ramos", "奥克斯" : "AUX", "飞利浦": "Philips", "智镁": "Zhimei", "惠普": "HP",
"原点": "Origin", "戴尔": "Dell", "碟米": "Diemi", "西门子": "Siemens", "亚马逊": "Amazon",
"宏碁": "Acer",
'世纪星': "UB1", '丰米': "UB2", '优语':'UB3', '凯利通': "UB4", '唯比': "UB5", '嘉源': "UB6",
'大显': "UB7", '天宏时代': "UB8", '宝捷讯': 'UB9','帷幄': 'UB10', '德卡诺': 'UB11',
'恒宇丰': 'UB12', '本为': 'UB13', '极米': 'UB14', '欧乐迪': 'UB15', '欧乐酷': 'UB16',
'欧沃': 'UB17', '瑞米': 'UB18', '瑞高': 'UB19', '白米': 'UB20', '虾米': 'UB21', '赛博宇华': 'UB22',
'首云': 'UB23', '鲜米': 'UB24'}
#Replace the brands in Chinese for the brands in English
phone['phone_brand'].replace(english_phone_brands_mapping, inplace=True)
#Drop the device model column as it is not adding information
phone = phone.drop('device_model',axis=1)
#Retreive information about the new dataset before getting the dummy variables
phone.info()
#Plot the distribution of brands by occurrence
plt.figure(figsize=(20, 9))
phone.phone_brand.value_counts().plot(kind='bar', grid=False)
plt.title('Distribution of Phones per Brand')
plt.xlabel('Brands')
plt.ylabel('Occurrences')
plt.tight_layout()
plt.show()
#Build the dummy variables with the phone brand
phone_dummies = pd.get_dummies(phone, columns = ['phone_brand'], sparse = True).reset_index(drop= True)
#Merge the demographic dataset with the phone dataset
final_dataset = (
dataset_with_dummy_variables
# Merge on event_id
.merge(phone_dummies, how = 'left', left_on = 'device_id', right_on = 'device_id')
# event_id itself is not interesting
.fillna(0))
#Information about the dataset
final_dataset.info()
#Drop from the final dataset duplicates for the devices keeping the first one
final_dataset = final_dataset.drop_duplicates('device_id',keep= 'first').reset_index(drop=True)
#Get infomation of the new dataset to see the rows that have been eliminated
print(final_dataset.info())
#Print the first five rows of the dataset
final_dataset.head()
#Visualize the number of answers by Gender and by Category
#Check the outcome variable and see if there is any imbalance
plt.figure(figsize=(20, 5))
sns.set_style("white")
plt.subplot(1, 2, 1)
ax = sns.countplot(x="gender", data=final_dataset, palette="Set2")
ax.set_title('Classification by Gender')
ax.set_ylabel('Number of Occurrences')
ax.set_xticklabels(['Male','Female'], fontsize=10)
plt.ylim(0, 17500)
plt.subplot(1, 2, 2)
ax = sns.countplot(x="group", data=final_dataset, palette="Set1")
ax.set_title('Classification by Age Range & Gender')
ax.set_ylabel('Number of Occurrences')
ax.set_xticklabels(['M39+','M32-38', 'M29-31', 'M27-28', 'M23-26','M22-','F43+','F33-42', 'F29-32', 'F27-28', 'F24-26', 'F23-'], fontsize=10)
plt.ylim(0, 4000)
plt.tight_layout()
plt.show()
#Count number of datapoints for each group to resample all of them.
print(final_dataset['group'].value_counts())
#Upsample minority classes
# Separate majority and minority classes
final_dataset_majority = final_dataset[final_dataset.group=='M32-38']
#Minorty classes
final_dataset_minority_1 = final_dataset[final_dataset.group=='M39+']
final_dataset_minority_2 = final_dataset[final_dataset.group=='M23-26']
final_dataset_minority_3 = final_dataset[final_dataset.group=='M29-31']
final_dataset_minority_4 = final_dataset[final_dataset.group=='M22-']
final_dataset_minority_5 = final_dataset[final_dataset.group=='F33-42']
final_dataset_minority_6 = final_dataset[final_dataset.group=='M27-28']
final_dataset_minority_7 = final_dataset[final_dataset.group=='F29-32']
final_dataset_minority_8 = final_dataset[final_dataset.group=='F23-']
final_dataset_minority_9 = final_dataset[final_dataset.group=='F43+']
final_dataset_minority_10 = final_dataset[final_dataset.group=='F24-26']
final_dataset_minority_11 = final_dataset[final_dataset.group=='F27-28']
# Upsample airlines minorities
final_dataset_upsampled_1 = resample(final_dataset_minority_1, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_2 = resample(final_dataset_minority_2, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_3 = resample(final_dataset_minority_3, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_4 = resample(final_dataset_minority_4, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_5 = resample(final_dataset_minority_5, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_6 = resample(final_dataset_minority_6, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_7 = resample(final_dataset_minority_7, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_8 = resample(final_dataset_minority_8, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_9 = resample(final_dataset_minority_9, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_10 = resample(final_dataset_minority_10, replace=True, n_samples=3338, random_state=123)
final_dataset_upsampled_11 = resample(final_dataset_minority_11, replace=True, n_samples=3338, random_state=123)
# Combine majority class with upsampled minority classes
final_dataset_upsampled = pd.concat([final_dataset_majority, final_dataset_upsampled_1, final_dataset_upsampled_2,
final_dataset_upsampled_3, final_dataset_upsampled_4, final_dataset_upsampled_5,
final_dataset_upsampled_6, final_dataset_upsampled_7, final_dataset_upsampled_8,
final_dataset_upsampled_9, final_dataset_upsampled_10, final_dataset_upsampled_11])
# Display new class counts
final_dataset_upsampled.group.value_counts()
#Get dummy variables for the gender variable and reset index
clean_final_dataset = pd.get_dummies(final_dataset_upsampled, columns = ['gender'], sparse = True,).reset_index(drop= True)
#Delete column with device_id
clean_final_dataset = clean_final_dataset.drop('device_id', axis = 1)
#Delete columns that are all zeros
clean_final_dataset = clean_final_dataset.drop(clean_final_dataset.columns[(clean_final_dataset == 0).all()], axis = 1)
#Substitute the categorical output variable
clean_final_dataset['group'] = clean_final_dataset['group'].map({'M39+' :0,'M32-38':1, 'M29-31':2, 'M27-28':3, 'M23-26':4,'M22-': 5,
'F43+' :6,'F33-42':7, 'F29-32':8, 'F27-28':9, 'F24-26':10, 'F23-':11 })
#Print the first five rows of the new dataset
clean_final_dataset.head()
#Describe the data using statistics
clean_final_dataset.describe()
#Build the predictors and output variables
X = clean_final_dataset.drop('group',axis = 1)
y = clean_final_dataset.group
#Preprocess and scale data
names = X.columns
X_processed = pd.DataFrame(normalize(preprocessing.scale(X)), columns = names)
#Check shape of X_processed and y
print(X_processed.shape, y.shape)
#Print the groups in y
print(y.unique())
#Build the correlation matrix between scores
correlation_mat = X_processed.corr()
#Check the correlation between values
corr_values = correlation_mat.unstack()
corr_values_sorted = corr_values.sort_values(kind="quicksort", ascending=False)
#Check high postivie correlations (between 0.4 and 1)
print(('Values presenting correlation between 0.4 and 1 (excluding 1) \n\n {}:')
.format(corr_values_sorted[corr_values_sorted.between(0.4, 0.99, inclusive=True)]))
#Check high negative correlations (between -0.4 and -1)
print(('Values presenting correlation between -0.4 and -1 (excluding -1) \n\n {}:')
.format(corr_values_sorted[corr_values_sorted.between(-0.4, -0.99, inclusive=True)]))
#Split the dataset into a training and testing dataset 70/30
X_train, X_test, y_train, y_test = train_test_split(X_processed, y,test_size=0.3, random_state=42)
#Feature Selection.
#Scores for the most relevant features (should we start with the one that has more explanatory power)
#Feature extraction. Starting and fitting the model
test = SelectKBest()
fit = test.fit(X_processed, y)
#Identify features with highest score from a predictive perspective (for all programs)
names = X_processed.columns
#Put the features and scores into a dataframe.
best_features = pd.DataFrame(fit.scores_, index = names).reset_index()
best_features.columns = ['Best Features', 'Scores']
#Show the features in descending order from those that have more explanatory power to the ones that have less.
best_features.sort_values(by=['Scores'], ascending=False)
#Select all the features that have an explanatory power higher than 1.
list_of_best_features = best_features.loc[best_features['Scores'] > 1]['Best Features'].tolist()
#Create a dataframe with the new features
columns = list_of_best_features
X_best_features = pd.DataFrame(X_processed, columns = columns).reset_index(drop= True)
#Print information of the dataset
X_best_features.info()
#Start the Random Forest Classifier
rf = RandomForestClassifier()
rf.fit(X_processed, y)
#Define feature importance
feature_importance = rf.feature_importances_
# Make importances relative to max importance.
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
#Plot the features importance
plt.figure(figsize=(7, 50))
plt.subplot(1, 1, 1)
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, X_processed.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Feature Importance')
plt.show()
# Assign the value to a new variable
corr_variable = X_best_features
#Build the correlation matrix
correlation_matrix = corr_variable.corr()
#Clean the correlation matrix adding zeros to the cells that are non existent
correlation_matrix = correlation_matrix.fillna(0)
#Eigenvectores & Eigenvalues
eig_vals, eig_vecs = np.linalg.eig(correlation_matrix)
sklearn_pca = PCA(n_components=len(corr_variable.columns))
Y_sklearn = sklearn_pca.fit_transform(correlation_matrix)
print(
'The percentage of total variance in the dataset explained by each',
'component from Sklearn PCA.\n',
sklearn_pca.explained_variance_ratio_
)
#PCA features
# Create a scaler object
sc = StandardScaler()
# Fit the scaler to the features and transform
X_std = X_best_features
# Create a PCA object from Scree plot the number of components is 120
pca = decomposition.PCA(n_components=120)
# Fit the PCA and transform the data
X_std_pca = pca.fit_transform(X_std)
# View the new feature data's shape
X_std_pca.shape
# Create a new dataframe with the new features
X_pca = pd.DataFrame(X_std_pca)
#Check the shape of the dataframe containing the PCA components
X_pca.shape
#Split into test and train sets
X_train, X_test, y_train, y_test = train_test_split(X_pca, y, test_size=0.3, random_state=42)
#KFold for cross validation analysis
kf = KFold(5)
# Initialize and fit the model.
log_reg = LogisticRegression(class_weight='balanced', multi_class= 'multinomial', solver = 'lbfgs', max_iter = 1500)
#Tune parameters: C parameter
c_param = [100, 200, 1000]
parameters = {'C': c_param}
#Fit parameters
log_reg_tuned = GridSearchCV(log_reg, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
log_reg_tuned.fit(X_train, y_train)
#Print the best parameters
print(('Best paramenters logistic regression:\n {}\n').format(log_reg_tuned.best_params_))
#Once the model has been trained test it on the test dataset
log_reg_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = log_reg_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define the Target values
target_names = ['0.0', '1.0','2.0','3.0','4.0','5.0','6.0','7.0','8.0','9.0','10.0','11,0']
#Print the Classification Report
print(('Classification Report: \n {}').format(classification_report(y_test, predtest_y, target_names=target_names)))
#Calculate the confusion matrix
confusion_lr = confusion_matrix(y_test, predtest_y)
#Print the Confusion Matrix
print(('Confusion Matrix: \n\n {}\n').format(confusion_lr))
#Print the overall accuracy per class
print(('Logistic Regression set accuracy: {0:.2f} % \n').format(cross_val_score(log_reg_tuned, X_test,
y_test,cv=kf).mean()*100))
# Initialize and fit the model.
naive_bayes_bernoulli= BernoulliNB()
#Tune hyperparameters
#Create range of values to fit parameters
alpha = [10, 100, 200, 1000]
parameters = {'alpha': alpha}
#Fit parameters using gridsearch
naive_bayes_bernoulli_tuned = GridSearchCV(naive_bayes_bernoulli, n_jobs = -1, param_grid=parameters, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
naive_bayes_bernoulli_tuned.fit(X_train, y_train)
#Print the best parameters
print(('Best paramenters logistic Naive-Bayes Bernoulli: \n{}\n').format(naive_bayes_bernoulli_tuned.best_params_))
#Once the model has been trained test it on the test dataset
naive_bayes_bernoulli_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = naive_bayes_bernoulli_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define the Target values
target_names = ['0.0', '1.0','2.0','3.0','4.0','5.0','6.0','7.0','8.0','9.0','10.0','11,0']
#Print the Classification Report
print(('Classification Report: \n {}').format(classification_report(y_test, predtest_y, target_names=target_names)))
#Calculate the Confusion Matrix
confusion_ber = confusion_matrix(y_test, predtest_y)
#Print the Confusion Matrix
print(('Confusion Matrix: \n\n {}\n').format(confusion_ber))
#Print the overall accuracy
print(('Bernoulli Classifier set accuracy: {0:.2f} % \n').format(cross_val_score(naive_bayes_bernoulli_tuned,
X_test,
y_test,
cv=kf).mean()*100))
#Train model
# Initialize and fit the model.
KNN = KNeighborsClassifier()
#Tune hyperparameters
#Create range of values to fit parameters
neighbors = [5, 7,9, 11]
weights = ['uniform', 'distance']
#Fit parameters
parameters = {'n_neighbors': neighbors, 'weights': weights}
#Fit parameters using gridsearch
KNN_tuned = GridSearchCV(KNN, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
KNN_tuned.fit(X_train, y_train)
#Print the best parameters
print(('Best paramenters KNN:\n {}\n').format(
KNN_tuned.best_params_))
#Once the model has been trained test it on the test dataset
KNN_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = KNN_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define targets
target_names = ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0']
#Print classification report
print(('Classification Report KNN: \n {}\n').format(classification_report(y_test, predtest_y, target_names=target_names)))
#Calculate confusion matrix
confusion_knn = confusion_matrix(y_test, predtest_y)
#Print confusion matrix
print(('Confusion Matrix KNN: \n\n {}\n\n').format(confusion_knn))
#Print overall accuracy
print(('KNN accuracy: {0:.2f} %\n').format(cross_val_score(KNN_tuned, X_test, y_test,cv=kf).mean()*100))
#Train model
# Initialize and fit the model.
SGD = SGDClassifier(class_weight = 'balanced', max_iter=1000)
#Tune hyperparameters
#Create range of values to fit parameters
loss_param = ['hinge', 'squared_hinge']
alpha_param = [0.0001, 0.001, 0.01, 0.1, 1]
#Fit parameters
parameters = {'loss': loss_param,'alpha': alpha_param}
#Fit parameters using gridsearch
SGD_tuned = GridSearchCV(SGD, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
SGD_tuned.fit(X_train, y_train)
#Print the best parameters
print(('Best paramenters SDG:\n {}\n').format(SGD_tuned.best_params_))
#Once the model has been trained test it on the test dataset
SGD_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = SGD_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define the Target values
target_names = ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0']
#Print the Classification report
print(('Classification Report: \n {}\n').format(classification_report(y_test, predtest_y,target_names=target_names)))
#Calculate the confusion Matrix
confusion_sgd = confusion_matrix(y_test, predtest_y)
#Print the Confusion Matrix
print(('Confusion Matrix: \n\n {}\n\n').format(confusion_sgd))
#Print the SGD overall accuracy
print(('SGD accuracy: {0:.2f} %\n').format(cross_val_score(SGD_tuned, X_test, y_test,cv=kf).mean()*100))
# Initialize and fit the model.
rf = RandomForestClassifier()
#Tune hyperparameters
#Create range of values to fit parameters
n_estimators_param = np.arange(50,191,10)
max_depth_param = np.arange(1,40,5)
#Fit parameters
parameters = {'n_estimators': n_estimators_param, 'max_depth': max_depth_param}
#Fit parameters using gridsearch
rf_tuned = GridSearchCV(rf, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
rf_tuned.fit(X_train, y_train)
#Print the best parameters
print(('Best paramenters Random Forest:\n {}\n').format(rf_tuned.best_params_))
#Once the model has been trained test it on the test dataset
rf_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = rf_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define the targets
target_names = ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0']
#Print the classification report
print(('Classification Report RF: \n {}\n').format(classification_report(y_test, predtest_y,target_names=target_names)))
#Calculate the confusion matrix
confusion_rf = confusion_matrix(y_test, predtest_y)
#Print the confusion matrix
print(('Confusion Matrix RF: \n\n {}\n\n').format(confusion_rf))
#Print the overall accuracy
print(('Random Forest accuracy RF: {0:.2f} %\n').format(cross_val_score(rf_tuned,
X_test,
y_test, cv=kf).mean()*100))
# Initialize and fit the model.
LSVC = LinearSVC(multi_class = 'crammer_singer')
#Tune hyperparameters
#Create range of values to fit parameters
loss_param = ['hinge','squared_hinge']
C_param = [10, 100, 1000]
#Fit parameters
parameters = {'loss': loss_param, 'C': C_param}
#Fit parameters using gridsearch
LSVC_tuned = GridSearchCV(LSVC, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
LSVC_tuned.fit(X_train, y_train)
#Print the best parameters
print(('Best paramenters Linear SVC:\n {}\n').format(LSVC_tuned.best_params_))
#Once the model has been trained test it on the test dataset
LSVC_tuned.fit(X_test, y_test)
# Predict on test set
predtest_y = LSVC_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define the targets
target_names = ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0']
#Print the classification report
print(('Classification Report: \n {}\n').format(
classification_report(y_test, predtest_y,
target_names=target_names)))
#Calculate the confusion matrix
confusion_svc = confusion_matrix(y_test, predtest_y)
#Print the confusion matrix
print((
'Confusion Matrix: \n\n {}\n\n').format(confusion_svc))
#Print the overall accuracy
print((
'Linear SVC accuracy: {0:.2f} %\n'
).format(cross_val_score(LSVC_tuned, X_test, y_test,cv=kf).mean()*100))
# Train model
GBC = GradientBoostingClassifier()
#Tune hyperparameters
#Create range of values to fit parameters
n_estimators_param = np.arange(140,211,10)
max_depth_param = np.arange(30,71,10)
#Fit parameters
parameters = {'n_estimators': n_estimators_param, 'max_depth': max_depth_param}
#Fit parameters
GBC_tuned = GridSearchCV(GBC, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned model
GBC_tuned.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", GBC_tuned.best_params_)
#Fit on the test set
GBC_tuned.fit(X_test, y_test)
# Predict on test set
predtestgb_y = GBC_tuned.predict(X_test)
#Evaluation of the model (testing)
#Define the targets
target_names = ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0']
#Print the classification report
print(('Classification Report: \n {}\n').format(classification_report(y_test, predtestgb_y,target_names=target_names)))
#Calculate the confusion matrix
confusion_GBC = confusion_matrix(y_test, predtestgb_y)
#Print the confusion matrix
print(('Confusion Matrix: \n\n {}\n\n').format(confusion_GBC))
#Print the overall accuracy
print(( 'Gradient Boosting Classifier accuracy: {0:.2f} %\n').format(cross_val_score(GBC_tuned
, X_test, y_test,cv=kf).mean()*100))
# Train model
# Initialize and fit the model.
svc = SVC(class_weight='balanced')
#Tune hyperparameters
#Create range of values to fit parameters
C_param = [10,100,1000]
#Fit parameters
parameters = {'C': C_param}
#Fit parameters using gridsearch
svc_tunned = GridSearchCV(svc, param_grid=parameters, n_jobs = -1, cv=kf, verbose = 1)
#Fit the tunned classifier in the training space
svc_tunned.fit(X_train, y_train)
#The best hyper parameters set
print("Best Hyper Parameters:", svc_tunned.best_params_)
#Fit tunned model on Test set
svc_tunned.fit(X_test, y_test)
# Predict on training set
predtestsvc_y = svc_tunned.predict(X_test)
#Evaluation of the model (testing)
#Define the targets
target_names = ['0.0', '1.0', '2.0', '3.0', '4.0', '5.0', '6.0', '7.0', '8.0', '9.0', '10.0', '11.0']
#Print the classification report
print(('Classification Report: \n {}\n').format(classification_report(y_test, predtestsvc_y, target_names=target_names)))
#Calculate the confusion matrix
confusion_SVC = confusion_matrix(y_test, predtestsvc_y)
#Print the confusion matrix
print(('Confusion Matrix: \n\n {}\n\n').format(confusion_SVC))
#Print the overall accuracy
print(( 'SVC accuracy: {0:.2f} %\n').format(cross_val_score(svc_tunned, X_test, y_test,cv=kf).mean()*100))
#Split data in a train and test set
X_tr, X_te, y_tr, y_te = train_test_split(X_processed, y,test_size=0.3, random_state=42)
#Check the size of each of the sets.
X_tr.shape, X_te.shape, y_tr.shape, y_te.shape
# Convert class vectors to binary class matrices
# So instead of one column with 10 values, create 12 binary columns
y_train_mlp = keras.utils.to_categorical(y_tr, num_classes=12)
y_test_mlp = keras.utils.to_categorical(y_te, num_classes=12)
print(y_train_mlp.shape, y_test_mlp.shape)
#Start the model
model = Sequential()
#Set up the outter layer
model.add(Dense(264, activation='relu', input_dim=274))
# Dropout layers remove features and fight overfitting
model.add(Dropout(0.5))
model.add(Dense(264, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(264, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(264, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(264, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(264, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(264, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# End with a number of units equal to the number of classes we have for our outcome
model.add(Dense(12, activation='softmax'))
#Model features
model.summary()
# Compile the model to put it all together.
RMS = RMSprop()
model.compile(loss='categorical_crossentropy',optimizer= RMS,metrics=['accuracy'])
#Run the model
history = model.fit(X_tr, y_train_mlp,
epochs=200,
verbose=1,
batch_size=1250)
#Evaluate the model
score = model.evaluate(X_te, y_test_mlp, verbose=1)
#Print results
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#Delete column with device_id
uns_final_dataset = final_dataset.drop('device_id', axis = 1)
#Delete columns that are all zeros
uns_final_dataset = uns_final_dataset.drop(uns_final_dataset.columns[(uns_final_dataset == 0).all()], axis = 1)
#Print the first five rows of the new dataset
uns_final_dataset.head()
#Drop gender, age and group for the analysis and obtain the information describing the dataset
uns_final_dataset_clean= uns_final_dataset.drop(['gender','age','group'],axis=1)
#Extract information
uns_final_dataset_clean.info()
#Make a copy of DF
X_tr = uns_final_dataset_clean
#Standardize
X_tr_std = normalize(X_tr)
#Dataframe containing information about the group, age and gender
uns_final_dataset_additional_information = final_dataset[['group','age','gender']]
#Codify the groups into integers
uns_final_dataset_additional_information['group'] = uns_final_dataset_additional_information['group'].map({'M39+' :0,'M32-38':1, 'M29-31':2, 'M27-28':3, 'M23-26':4,'M22-': 5,
'F43+' :6,'F33-42':7, 'F29-32':8, 'F27-28':9, 'F24-26':10, 'F23-':11 })
#Build the indepnedent bariable to check the classfying accuracy of the final clustering technique
y = uns_final_dataset_additional_information['group']
# Declare the model and fit it.
af = AffinityPropagation().fit(X_tr_std)
# Pull the number of clusters and cluster assignments for each data point.
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
labels = af.labels_
print('Estimated number of clusters: {}'.format(n_clusters_))
# Here we set the bandwidth. This function automatically derives a bandwidth
# number based on an inspection of the distances among points in the data.
for quantile in np.linspace(0.1,1,10,endpoint=False):
bandwidth = estimate_bandwidth(X_tr_std, quantile=quantile, n_samples=5000)
# Declare and fit the model.
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X_tr_std)
# Extract cluster assignments for each data point.
labels = ms.labels_
# Coordinates of the cluster centers.
cluster_centers = ms.cluster_centers_
# Count clusters.
n_clusters_ = len(np.unique(labels))
print('Bandwidth:', quantile)
print("Number of estimated clusters: {}".format(n_clusters_))
#Compare from a silhouette_score perspective kmeans against Spectral Clustering
range_n_clusters = np.arange(10)+2
for n_clusters in range_n_clusters:
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
spec_clust = SpectralClustering(n_clusters=n_clusters)
cluster_labels1 = spec_clust.fit_predict(X_tr_std)
silhouette_avg1 = silhouette_score(X_tr_std, cluster_labels1)
kmeans = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10).fit(X_tr_std)
cluster_labels2 = kmeans.fit_predict(X_tr_std)
silhouette_avg2 = silhouette_score(X_tr_std, cluster_labels2)
print("For n_clusters =", n_clusters,
"av. sil_score for Spec. clust is :", silhouette_avg1,
"av. sil_score for kmeans is :",silhouette_avg2 )
# In order to find the optimal number of K we use elbow method.
#Iterate on the number of clusters
cluster_error = []
range_n_clusters = range(2,12, 1)
for k in range_n_clusters:
kmeanModel = KMeans(n_clusters=k, init='k-means++', n_init=11)
kmeanModel.fit(X_tr_std)
cluster_error.append(kmeanModel.inertia_)
#Build DataFrame
clusters_df = pd.DataFrame({ "num_clusters": range_n_clusters , 'cluster_error': cluster_error })
# Plot the elbow Plot
plt.figure(figsize=(10,5))
plt.plot(clusters_df.num_clusters, clusters_df.cluster_error, marker = "o")
plt.xlabel('k')
plt.ylabel('Cluster Error')
plt.title('The Elbow Method')
plt.show()
#Cluster the data
#Number of clusters
num_clusters=6
#Cluster the information
kmeans = KMeans(n_clusters=num_clusters, init='k-means++', n_init=10).fit(X_tr_std)
labels = kmeans.labels_
#Glue back to original data
X_tr['clusters'] = labels
X_tr['gender'] = uns_final_dataset.gender
X_tr['age'] = uns_final_dataset.age
X_tr['group'] = uns_final_dataset.group
clusters = kmeans.fit_predict(X_tr_std)
# Permute the labels
labels = np.zeros_like(clusters)
for i in range(num_clusters):
mask = (clusters == i)
labels[mask] = mode(y[mask])[0]
# Compute the accuracy
print(accuracy_score(y, labels))
#Determine pivot table
X_age = X_tr.pivot_table(values='age', index=["clusters","gender"], aggfunc=[np.mean,np.std])
#Print the age distribution
print(X_age)
#Show only columns containing apps
filter_col = [col for col in X_tr if col.startswith('label_id')]
#Determine pivot table
X_apps = X_tr.pivot_table(values=filter_col, index=["clusters","gender"], aggfunc=np.sum)
#Print the apps that are more relevant
print(X_apps.idxmax(axis=1))
#Show table
print(X_apps)
#Filter columns that only have the brands
filter_col = [col for col in X_tr if col.startswith('phone_brand')]
#Determine pivot table
X_phone_brand = X_tr.pivot_table(values=filter_col, index=["clusters",'gender'], aggfunc=np.sum)
#Print the brand that appears more times
print(X_phone_brand.idxmax(axis=1))
#Print the table for further analysis
print(X_phone_brand)
# mash the data down into 2 dimensions
ndimensions = 2
#Run PCA analysis and create 2 PC
pca = PCA(n_components=ndimensions, random_state=123)
pca.fit(X_tr_std)
#Transform the PC to arrays to build a dataframe
X_pca_array = pca.transform(X_tr_std)
#Build the dataframe with PCA components
X_pca = pd.DataFrame(X_pca_array, columns=['PC1','PC2']) # PC=principal component
#Build a dataframe copying the result obtained form PCA analysis
df_plot = X_pca.copy()
#Add to the dataframe the labels of the clusters and the information relative to the groups
df_plot['clusters'] = X_tr['clusters']
df_plot['groups'] = y
#Print head
df_plot.head()
#Plot the clusters using PCA components
#Define the plotting function
def plotData(df, groupby):
"make a scatterplot of the first two principal components of the data, colored by the groupby field"
# make a figure with just one subplot specifying multiple subplots
fig, ax = plt.subplots(figsize = (7,7))
# color map
cmap = mpl.cm.get_cmap('prism')
#plot each cluster on the same graph.
for i, cluster in df.groupby(groupby):
cluster.plot(ax = ax, # need to pass this so all scatterplots are on same graph
kind = 'scatter',
x = 'PC1', y = 'PC2',
color = cmap(i/(num_clusters-1)), # cmap maps a number to a color
label = "%s %i" % (groupby, i),
s=30) # dot size
ax.grid()
ax.axhline(0, color='black')
ax.axvline(0, color='black')
ax.set_title("Principal Components Analysis (PCA) of Mobile Operator Data")
# forked from https://www.kaggle.com/bburns/iris-exploration-pca-k-means-and-gmm-clustering
#Plot the clusters each datapoint was assigned to
plotData(df_plot, 'clusters')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define the Manufactured solution
Step2: Compute the forcing function.
Step3: Build a string of the exact and forcing function to be copied to the input file (levelset_mms.i).
Step4: Demonstrate how the solution reaches stead-state.
Step5: Compute the exact solution at a point (0.1).
Step6: Show the compute results with the exact solution.
Step7: Convergence Plot
Step8: Fit line to data.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import glob
from sympy import *
import numpy
import matplotlib.pyplot as plt
import pandas
init_printing()
x,t,a,b= symbols('x t a b')
u = 1+a*exp(1/(10*t))*sin(2*pi/b*x)
u
f = diff(u, t) + diff(u, x)
f
str(u).replace('**', '^')
str(f).replace('**', '^')
filenames = glob.glob('level_set_mms_0*.csv')
print filenames
results = []
for fname in filenames:
results.append(pandas.DataFrame(pandas.read_csv(fname, index_col='time')))
times = results[-1]['point'].keys()
pfunc = Lambda(t, u.subs([(x, 0.1), (a, 1), (b, 8)]))
exact = pandas.Series([pfunc(i).evalf() for i in times], index=times)
fig = plt.figure(figsize=(18,9))
axes = fig.add_subplot(111)
axes.plot(exact.keys(), exact.values, '-k', linewidth=3, label='exact') # pandas.Series plot method not working
for i in range(len(results)):
x = results[i]['point'].keys()
y = results[i]['point'].values
axes.plot(x, y, label='Level ' + str(i))
plt.legend(loc='lower left')
n = len(results)
error = numpy.zeros(n)
h = numpy.zeros(n)
for i in range(n):
error[i] = results[i]['error'].iloc[-1]
h[i] = 1./results[i]['h'].iloc[-1]
coefficients = numpy.polyfit(numpy.log10(h), numpy.log10(error), 1)
coefficients
fig = plt.figure(figsize=(18,9))
axes = fig.add_subplot(111)
axes.plot(h, error, 'sk')
axes.set(xscale='log', yscale='log', xlabel='1/h', ylabel='L2 Error',)
polynomial = numpy.poly1d(coefficients)
axes.plot(h, pow(10, polynomial(numpy.log10(h))))
axes.grid(True, which='both')
plt.text(h[0], error[-1], 'Slope: ' + str(coefficients[0]), fontsize=14)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Creating Figures
Step2: First, create an empty figure with 2 subplots
Step3: Now let's actually plot the data using the plot method on an axis
Step4: Many of the basic formatting problems you have will be solved by the magic of tight_layout. Before you start tweaking how you figure looks, try it out.
Step5: To save your figure you can use the savefig command
Step6: TRY IT
Step7: 3.2 Tick marks
Step8: Oh no! That changed it for the last plot but not for the first plot.
Step9: 3.3 Axis limits
Step10: 3.4 Setting tick labels
Step11: 3.5 Legend
Step12: TRY IT
Step13: Many of the same formatting options as the line plot are available for these additional plots. There are also some other options. The gallery (section 5) is the best place to find all the options.
Step14: 4.2 Plotting images
Step15: TRY IT
|
<ASSISTANT_TASK:>
Python Code:
# In IPython or the IPython notebook, it's easiest to use the pylab magic, which
# imports matplotlib, numpy, and scipy.
# The matplotlib notebook flag means that plots will be shown interactively in the
# notebooks, rather than in pop-up windows.
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
# First we make some data to plot
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
fig, axes = plt.subplots(1,2)
# We plot one line on the first axis
axes[0].plot(x, y1)
# and both lines on the second axis
axes[1].plot(x, y1)
axes[1].plot(x, y2);
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1)
axes[1].plot(x, y1)
axes[1].plot(x, y2)
plt.tight_layout();
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1)
axes[1].plot(x, y1)
axes[1].plot(x, y2)
fig.savefig('first_plot.png', format='png');
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1, color='r', linewidth=5)
axes[1].plot(x, y1, color='#ffaa00', linewidth=0.5, linestyle='--')
axes[1].plot(x, y2, color='green', linestyle='-.');
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1, color='r', linewidth=5)
axes[1].plot(x, y1, color='#ffaa00', linewidth=0.5, linestyle='--')
axes[1].plot(x, y2, color='green', linestyle='-.')
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, 1]);
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1, color='r', linewidth=5)
axes[1].plot(x, y1, color='#ffaa00', linewidth=0.5, linestyle='--')
axes[1].plot(x, y2, color='green', linestyle='-.')
# Set the current axis to the first subplot
fig.sca(axes[0])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, 1])
# Set the current axis to the second subplot
fig.sca(axes[1])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, 1]);
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1, color='r', linewidth=5)
axes[1].plot(x, y1, color='#ffaa00', linewidth=0.5, linestyle='--')
axes[1].plot(x, y2, color='green', linestyle='-.')
# Set the current axis to the first subplot
fig.sca(axes[0])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, 1])
# set x and y limits
plt.xlim(-np.pi, np.pi)
plt.ylim(-1, 1)
# Set the current axis to the second subplot
fig.sca(axes[1])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
plt.yticks([-1, 0, 1])
# set x and y limits
plt.xlim(-np.pi, np.pi)
plt.ylim(-1, 1);
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
axes[0].plot(x, y1, color='r', linewidth=5)
axes[1].plot(x, y1, color='#ffaa00', linewidth=0.5, linestyle='--')
axes[1].plot(x, y2, color='green', linestyle='-.')
# Set the current axis to the first subplot
fig.sca(axes[0])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
# You probably don't want to set the labels when you just want the exact numbers.
plt.yticks([-1, 0, 1], ['-1', '0', '1'])
# set x and y limits
plt.xlim(-np.pi, np.pi)
plt.ylim(-1, 1)
# Set the current axis to the second subplot
fig.sca(axes[1])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.yticks([-1, 0, 1], ['-1', '0', '1'])
# set x and y limits
plt.xlim(-np.pi, np.pi)
plt.ylim(-1, 1);
x = np.linspace(-2*np.pi, 2*np.pi)
y1 = np.sin(x)
y2 = np.cos(x)
fig, axes = plt.subplots(1,2)
# Let's set labels here
axes[0].plot(x, y1, color='r', linewidth=5, label='sin(x)')
axes[1].plot(x, y1, color='#ffaa00', linewidth=0.5, linestyle='--', label='sin(x)')
axes[1].plot(x, y2, color='green', linestyle='-.', label='cos(x)')
# Set the current axis to the first subplot
fig.sca(axes[0])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
# You probably don't want to set the labels when you just want the exact numbers.
plt.yticks([-1, 0, 1], ['-1', '0', '1'])
plt.legend(loc='best')
# set x and y limits
plt.xlim(-np.pi, np.pi)
plt.ylim(-1, 1)
# Set the current axis to the second subplot
fig.sca(axes[1])
# set x and y ticks
plt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi], ['-pi', '-pi/2', '0', 'pi/2', 'pi'])
plt.yticks([-1, 0, 1], ['-1', '0', '1'])
plt.legend(loc='upper right')
# set x and y limits
plt.xlim(-np.pi, np.pi)
plt.ylim(-1, 1);
# Make some data to plot
x = np.arange(0, 100)
y = np.random.rand(100) # 100 random numbers
# Make a figure with 6 subplots and axes
# Notice that we are doing some arguement unpacking to get six subplots. You can use indexing instead if you prefer
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)
# Add data to each axis. Optional arguments to each method will customize each plot.
ax1.bar(x,y)
ax2.scatter(x,y)
ax3.barh(x,y)
ax4.boxplot(x)
ax5.loglog(x,y)
ax6.semilogx(x,y)
fig, ax = plt.subplots(1,1)
ax.scatter(x, y, marker='x')
ax.scatter(x, y + 2, marker='>', color='#00aaff')
# Read image from file and display it
img1 = plt.imread('astronaut.png')
# Uncomment following line to prove it still works without the alpha channel
# img1 = img1[:,:, 0:3]
fig, ax = plt.subplots(1,1)
ax.imshow(img1)
# We can plot random noise in the viridis colormap.
img2 = np.random.rand(128, 128)
fig, ax = plt.subplots(1,1)
ax = ax.imshow(img2, cmap='viridis')
# %load http://matplotlib.org/mpl_examples/pylab_examples/contour_demo.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the Fermi distribution
Step3: In this equation
Step4: Write a function plot_fermidist(mu, kT) that plots the Fermi distribution $F(\epsilon)$ as a function of $\epsilon$ as a line plot for the parameters mu and kT.
Step5: Use interact with plot_fermidist to explore the distribution
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Image
from IPython.html.widgets import interact, interactive, fixed
Image('fermidist.png')
def fermidist(energy, mu, kT):
Compute the Fermi distribution at energy, mu and kT.
return (np.exp((energy-mu)/kT)+1)**-1
assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)
assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),
np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,
0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))
np.arange(0,10.01,0.01)
def plot_fermidist(mu, kT):
energy=np.arange(0,10.01,0.01)
plt.figure(figsize=(10,6))
plt.plot(energy,fermidist(energy,mu,kT))
plt.tick_params(axis='x', top='off')
plt.tick_params(axis='y', right='off')
plt.xlabel('Energy')
plt.xlim(left=0, right=10)
plt.ylim(bottom=0.0,top=1.0)
plt.ylabel('Fermi Distribution')
plot_fermidist(4.0, 1.0)
assert True # leave this for grading the plot_fermidist function
interact(plot_fermidist, mu=(0.0,5.0,0.1), kT=(0.1,10.0,0.1));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Please find jax implementation of this notebook here
Step4: Basics
Step6: Tokenization
Step9: Vocabulary
Step10: Here are the top 10 words (and their codes) in our corpus.
Step11: Here is a tokenization of a few sentences.
Step13: Putting it altogether
Step14: One-hot encodings
Step16: Language modeling
Step17: For example, let us generate a sequence 0,1,..,34, and then extract subsequences of length 5. Each minibatch will have 2 such subsequences, starting at random offsets. There is no ordering between the subsequences, either within or across minibatches. There are $\lfloor (35-1)/5 \rfloor = 6$ such subsequences, so the iterator will generate 3 minibatches, each of size 2.
Step19: Sequential ordering
Step20: Below we give an example. We see that the first subsequence in batch 1
Step24: Data iterator
Step26: Machine translation
Step28: Preprocessing
Step30: We tokenize at the word level. The following tokenize_nmt function tokenizes the the first num_examples text sequence pairs, where each token is either a word or a punctuation mark.
Step31: Vocabulary
Step34: Truncation and padding
Step37: Data iterator
Step38: Show the first minibatch.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(seed=1)
import math
try:
import torch
except ModuleNotFoundError:
%pip install -qq torch
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import data
!mkdir figures # for saving plots
import collections
import re
import random
import os
import requests
import zipfile
import hashlib
# Required functions for downloading data
def download(name, cache_dir=os.path.join("..", "data")):
Download a file inserted into DATA_HUB, return the local filename.
assert name in DATA_HUB, f"{name} does not exist in {DATA_HUB}."
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split("/")[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # Hit cache
print(f"Downloading {fname} from {url}...")
r = requests.get(url, stream=True, verify=True)
with open(fname, "wb") as f:
f.write(r.content)
return fname
def download_extract(name, folder=None):
Download and extract a zip/tar file.
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == ".zip":
fp = zipfile.ZipFile(fname, "r")
elif ext in (".tar", ".gz"):
fp = tarfile.open(fname, "r")
else:
assert False, "Only zip/tar files can be extracted."
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
DATA_HUB = dict()
DATA_URL = "http://d2l-data.s3-accelerate.amazonaws.com/"
DATA_HUB["time_machine"] = (DATA_URL + "timemachine.txt", "090b5e7e70c295757f55df93cb0a180b9691891a")
def read_time_machine():
Load the time machine dataset into a list of text lines.
with open(download("time_machine"), "r") as f:
lines = f.readlines()
return [re.sub("[^A-Za-z]+", " ", line).strip().lower() for line in lines]
lines = read_time_machine()
print(f"number of lines: {len(lines)}")
for i in range(11):
print(i, lines[i])
nchars = 0
nwords = 0
for i in range(len(lines)):
nchars += len(lines[i])
words = lines[i].split()
nwords += len(words)
print("total num characters ", nchars)
print("total num words ", nwords)
def tokenize(lines, token="word"):
Split text lines into word or character tokens.
if token == "word":
return [line.split() for line in lines]
elif token == "char":
return [list(line) for line in lines]
else:
print("ERROR: unknown token type: " + token)
tokens = tokenize(lines)
for i in range(11):
print(tokens[i])
class Vocab:
Vocabulary for text.
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# Sort according to frequencies
counter = count_corpus(tokens)
self.token_freqs = sorted(counter.items(), key=lambda x: x[1], reverse=True)
# The index for the unknown token is 0
self.unk, uniq_tokens = 0, ["<unk>"] + reserved_tokens
uniq_tokens += [token for token, freq in self.token_freqs if freq >= min_freq and token not in uniq_tokens]
self.idx_to_token, self.token_to_idx = [], dict()
for token in uniq_tokens:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
def count_corpus(tokens):
Count token frequencies.
# Here `tokens` is a 1D list or 2D list
if len(tokens) == 0 or isinstance(tokens[0], list):
# Flatten a list of token lists into a list of tokens
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
vocab = Vocab(tokens)
print(list(vocab.token_to_idx.items())[:10])
for i in [0, 10]:
print("words:", tokens[i])
print("indices:", vocab[tokens[i]])
def load_corpus_time_machine(max_tokens=-1):
Return token indices and the vocabulary of the time machine dataset.
lines = read_time_machine()
tokens = tokenize(lines, "char")
vocab = Vocab(tokens)
# Since each text line in the time machine dataset is not necessarily a
# sentence or a paragraph, flatten all the text lines into a single list
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
corpus, vocab = load_corpus_time_machine()
len(corpus), len(vocab)
print(corpus[:20])
print(list(vocab.token_to_idx.items())[:10])
print([vocab.idx_to_token[i] for i in corpus[:20]])
x = torch.tensor(corpus[:3])
print(x)
X = F.one_hot(x, len(vocab))
print(X.shape)
print(X)
def seq_data_iter_random(corpus, batch_size, num_steps):
Generate a minibatch of subsequences using random sampling.
# Start with a random offset (inclusive of `num_steps - 1`) to partition a
# sequence
corpus = corpus[random.randint(0, num_steps - 1) :]
# Subtract 1 since we need to account for labels
num_subseqs = (len(corpus) - 1) // num_steps
# The starting indices for subsequences of length `num_steps`
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# In random sampling, the subsequences from two adjacent random
# minibatches during iteration are not necessarily adjacent on the
# original sequence
random.shuffle(initial_indices)
def data(pos):
# Return a sequence of length `num_steps` starting from `pos`
return corpus[pos : pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# Here, `initial_indices` contains randomized starting indices for
# subsequences
initial_indices_per_batch = initial_indices[i : i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield torch.tensor(X), torch.tensor(Y)
my_seq = list(range(35))
b = 0
for X, Y in seq_data_iter_random(my_seq, batch_size=2, num_steps=5):
print("batch: ", b)
print("X: ", X, "\nY:", Y)
b += 1
def seq_data_iter_sequential(corpus, batch_size, num_steps):
Generate a minibatch of subsequences using sequential partitioning.
# Start with a random offset to partition a sequence
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = torch.tensor(corpus[offset : offset + num_tokens])
Ys = torch.tensor(corpus[offset + 1 : offset + 1 + num_tokens])
Xs, Ys = Xs.reshape(batch_size, -1), Ys.reshape(batch_size, -1)
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i : i + num_steps]
Y = Ys[:, i : i + num_steps]
yield X, Y
for X, Y in seq_data_iter_sequential(my_seq, batch_size=2, num_steps=5):
print("X: ", X, "\nY:", Y)
def load_corpus_time_machine(max_tokens=-1):
Return token indices and the vocabulary of the time machine dataset.
lines = read_time_machine()
tokens = tokenize(lines, "char")
vocab = Vocab(tokens)
# Since each text line in the time machine dataset is not necessarily a
# sentence or a paragraph, flatten all the text lines into a single list
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
class SeqDataLoader: # @save
An iterator to load sequence data.
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
if use_random_iter:
self.data_iter_fn = seq_data_iter_random
else:
self.data_iter_fn = seq_data_iter_sequential
self.corpus, self.vocab = load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps, use_random_iter=False, max_tokens=10000): # @save
Return the iterator and the vocabulary of the time machine dataset.
data_iter = SeqDataLoader(batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
data_iter, vocab = load_data_time_machine(2, 5)
print(list(vocab.token_to_idx.items())[:10])
b = 0
for X, Y in data_iter:
print("batch: ", b)
print("X: ", X, "\nY:", Y)
b += 1
if b > 2:
break
DATA_HUB["fra-eng"] = (DATA_URL + "fra-eng.zip", "94646ad1522d915e7b0f9296181140edcf86a4f5")
def read_data_nmt():
Load the English-French dataset.
data_dir = download_extract("fra-eng")
with open(os.path.join(data_dir, "fra.txt"), "r") as f:
return f.read()
raw_text = read_data_nmt()
print(raw_text[:100])
def preprocess_nmt(text):
Preprocess the English-French dataset.
def no_space(char, prev_char):
return char in set(",.!?") and prev_char != " "
# Replace non-breaking space with space, and convert uppercase letters to
# lowercase ones
text = text.replace("\u202f", " ").replace("\xa0", " ").lower()
# Insert space between words and punctuation marks
out = [" " + char if i > 0 and no_space(char, text[i - 1]) else char for i, char in enumerate(text)]
return "".join(out)
text = preprocess_nmt(raw_text)
print(text[:110])
def tokenize_nmt(text, num_examples=None):
Tokenize the English-French dataset.
source, target = [], []
for i, line in enumerate(text.split("\n")):
if num_examples and i > num_examples:
break
parts = line.split("\t")
if len(parts) == 2:
source.append(parts[0].split(" "))
target.append(parts[1].split(" "))
return source, target
source, target = tokenize_nmt(text)
source[:10], target[:10]
src_vocab = Vocab(source, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
len(src_vocab)
# French has more high frequency words than English
target_vocab = Vocab(target, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
len(target_vocab)
def truncate_pad(line, num_steps, padding_token):
Truncate or pad sequences.
if len(line) > num_steps:
return line[:num_steps] # Truncate
return line + [padding_token] * (num_steps - len(line)) # Pad
print(truncate_pad(source[0], 10, "pad"))
print(truncate_pad(src_vocab[source[0]], 10, src_vocab["<pad>"]))
def build_array_nmt(lines, vocab, num_steps):
Transform text sequences of machine translation into minibatches.
lines = [vocab[l] for l in lines]
lines = [l + [vocab["<eos>"]] for l in lines]
array = torch.tensor([truncate_pad(l, num_steps, vocab["<pad>"]) for l in lines])
valid_len = (array != vocab["<pad>"]).type(torch.int32).sum(1)
return array, valid_len
num_steps = 10
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
print(src_array.shape)
print(src_valid_len.shape)
print(src_array[0, :]) # go, ., eos, pad, ..., pad
print(src_valid_len[0])
def load_array(data_arrays, batch_size, is_train=True):
Construct a PyTorch data iterator.
dataset = data.TensorDataset(*data_arrays)
return data.DataLoader(dataset, batch_size, shuffle=is_train)
def load_data_nmt(batch_size, num_steps, num_examples=600):
Return the iterator and the vocabularies of the translation dataset.
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = Vocab(source, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
tgt_vocab = Vocab(target, min_freq=2, reserved_tokens=["<pad>", "<bos>", "<eos>"])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
train_iter, src_vocab, tgt_vocab = load_data_nmt(batch_size=2, num_steps=8)
for X, X_valid_len, Y, Y_valid_len in train_iter:
print("X:", X.type(torch.int32))
print("valid lengths for X:", X_valid_len)
print("Y:", Y.type(torch.int32))
print("valid lengths for Y:", Y_valid_len)
break
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For all algorithms in baselines summary data is saved into a folder defined by logger. By default, a folder $TMPDIR/openai-<date>-<time> is used; you can see the location of logger directory at the beginning of the training in the message like this
Step2: Notice also the usage of OPENAI_LOG_FORMAT environment variable - this allows us to suppress printing to stdout.
Step3: will search for all folders with baselines-compatible results in ~/logs/cartpole-ppo and subfolders and return a list of Result objects. Each Result object is a named tuple with the following fields
Step4: The raw learning curve from a single run can be very noisy. To smoothen it and analyze if any learning actually takes place, we can use plot_util.smooth() function
Step5: Similar curve can be obtained by using logger summaries (instead of raw episode data in monitor.csv)
Step6: Note, however, that raw episode data is stored by the Monitor wrapper, and hence looks similar for all algorithms, whereas progress data is handled by the algorithm itself, and hence can vary (column names, type of data available) between algorithms.
Step7: The results of 12 runs from the cell above can be loaded just as before, via (we discard first result that is actually from the very first run in the previous section)
Step8: But how do we plot all 12 of them in a sensible manner? baselines.common.plot_util module provides plot_results function to do just that
Step9: The results are split into two groups based on batch size and are plotted on a separate graph. More specifically, by default plot_results considers digits after dash at the end of the directory name to be seed id and groups the runs that differ only by those together.
Step10: The lighter shade shows the standard deviation of data, and darker shade - error in estimate of the mean (that is, standard deviation divided by square root of number of seeds). Note that averaging over seeds requires resampling to a common grid, which, in turn, requires smoothing (using language of signal processing, we need to do low-pass filtering before resampling to avoid aliasing effects). You can change the amount of smoothing by adjusting resample and smooth_step arguments to achieve desired smoothing effect See the docstring of plot_util function for more info.
Step11: Option split_fn=labmda _
|
<ASSISTANT_TASK:>
Python Code:
!pip install git+https://github.com/openai/baselines > ~/pip_install_baselines.log
!OPENAI_LOGDIR=$HOME/logs/cartpole-ppo OPENAI_LOG_FORMAT=csv python -m baselines.run --alg=ppo2 --env=CartPole-v0 --num_timesteps=30000 --nsteps=128
from baselines.common import plot_util as pu
results = pu.load_results('~/logs/cartpole-ppo')
import matplotlib.pyplot as plt
import numpy as np
r = results[0]
plt.plot(np.cumsum(r.monitor.l), r.monitor.r)
plt.plot(np.cumsum(r.monitor.l), pu.smooth(r.monitor.r, radius=10))
plt.plot(r.progress.total_timesteps, r.progress.eprewmean)
!for seed in $(seq 0 5); do OPENAI_LOG_FORMAT=csv OPENAI_LOGDIR=$HOME/logs/cartpole-ppo-hp/b32-$seed python -m baselines.run --alg=ppo2 --env=CartPole-v0 --num_timesteps=3e4 --seed=$seed --nsteps=32; done
!for seed in $(seq 0 5); do OPENAI_LOG_FORMAT=csv OPENAI_LOGDIR=$HOME/logs/cartpole-ppo-hp/b128-$seed python -m baselines.run --alg=ppo2 --env=CartPole-v0 --num_timesteps=3e4 --seed=$seed --nsteps=128; done
results = pu.load_results('~/logs/cartpole-ppo-hp'); print(len(results))
pu.plot_results(results)
pu.plot_results(results, average_group=True)
pu.plot_results(results, average_group=True, split_fn=lambda _: '')
pu.plot_results(results, average_group=True, split_fn=lambda _: '', shaded_std=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: nltk
Step2: Les données
Step3: Bien que déjà réduit par rapport au fichier original du concours, contenant plus de 15M de lignes, le fichier cdiscount_test.csv.zip, contenant 1M de lignes est encore volumineux.
Step4: La commande suivante permet d'afficher les premières lignes du fichiers.
Step5: La commande suivante permet d'afficher un exemple de produits pour chaque Catégorie de niveau 1.
Step6: Distribution des classes
Step7: Q Que peut-on dire sur la distribution de ces classes?
Step8: Nettoyage des données
Step9: Suppression des posibles balises HTML dans la description
Step10: Conversion du texte en minuscule
Step11: Remplacement de caractères spéciaux
Step12: Suppression des accents
Step13: Supprime les caractères qui ne sont ne sont pas des lettres minuscules
Step14: Remplace la description par une liste de mots (tokens), supprime les mots de moins de 2 lettres ainsi que les stopwords
Step15: On applique également la suppression des accents à cette liste
Step16: Enfin on crée des tokens, liste de mots dans la description produit, en supprimant les éléments de notre description produit qui sont présent dans la liste de stopword.
Step17: Racinisation (Stem) chaque tokens
Step18: Fonction de nettoyage de texte
Step19: Applique le nettoyage sur toutes les lignes de la DataFrame et créé deux nouvelles Dataframe (avant et sans l'étape de racinisation).
Step20: Nettoyage des DataFrames
Step21: Affiche les 5 premières lignes de la DataFrame d'apprentissage après nettoyage.
Step22: Taille du dictionnaire de mots pour le dataset avant et après la racinisation.
Step23: Wordcloud
Step24: Wordcloud de l'ensemble des description à l'état brut.
Step25: Wordcloud après racinisation et nettoyage
Step26: Vous pouvez observer que les mots "voir et "present" sont les plus représentés. Cela est du au fait que la pluspart des descriptions se terminent par "Voir la présentation". C'est deux mots ne sont donc pas informatif car présent dans beaucoup de catégorie différente. C'est une bon exemple de stopword propre à un problème spécifique.
|
<ASSISTANT_TASK:>
Python Code:
#Importation des librairies utilisées
import unicodedata
import time
import pandas as pd
import numpy as np
import random
import nltk
import re
import collections
import itertools
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sb
sb.set_style("whitegrid")
import sklearn.cross_validation as scv
# nltk.download("all")
def split_dataset(input_path, nb_line, tauxValid):
data_all = pd.read_csv(input_path,sep=",", nrows=nb_line)
data_all = data_all.fillna("")
data_train, data_valid = scv.train_test_split(data_all, test_size = tauxValid)
time_end = time.time()
return data_train, data_valid
input_path = "data/cdiscount_train.csv.zip"
nb_line=100000 # part totale extraite du fichier initial ici déjà réduit
tauxValid = 0.05
data_train, data_valid = split_dataset(input_path, nb_line, tauxValid)
# Cette ligne permet de visualiser les 5 premières lignes de la DataFrame
N_train = data_train.shape[0]
N_valid = data_valid.shape[0]
print("Train set : %d elements, Validation set : %d elements" %(N_train, N_valid))
data_train.head(5)
data_train.groupby("Categorie1").first()[["Description","Libelle","Marque"]]
#Count occurence of each Categorie
data_count = data_train["Categorie1"].value_counts()
#Rename index to add percentage
new_index = [k+ ": %.2f%%" %(v*100/N_train) for k,v in data_count.iteritems()]
data_count.index = new_index
fig=plt.figure(figsize= (10,10))
ax = fig.add_subplot(1,1,1)
data_count.plot.barh(logx = False)
plt.show()
data_valid.to_csv("data/cdiscount_valid.csv", index=False)
data_train.to_csv("data/cdiscount_train_subset.csv", index=False)
i = 0
description = data_train.Description.values[i]
print("Original Description : " + description)
from bs4 import BeautifulSoup #Nettoyage d'HTML
txt = BeautifulSoup(description,"html.parser",from_encoding='utf-8').get_text()
print(txt)
txt = txt.lower()
print(txt)
txt = txt.replace(u'\u2026','.')
txt = txt.replace(u'\u00a0',' ')
print(txt)
txt = unicodedata.normalize('NFD', txt).encode('ascii', 'ignore').decode("utf-8")
print(txt)
txt = re.sub('[^a-z_]', ' ', txt)
print(txt)
## listes de mots à supprimer dans la description des produits
## Depuis NLTK
nltk_stopwords = nltk.corpus.stopwords.words('french')
## Depuis Un fichier externe.
lucene_stopwords =open("data/lucene_stopwords.txt","r").read().split(",") #En local
## Union des deux fichiers de stopwords
stopwords = list(set(nltk_stopwords).union(set(lucene_stopwords)))
stopwords[:10]
stopwords = [unicodedata.normalize('NFD', sw).encode('ascii', 'ignore').decode("utf-8") for sw in stopwords]
stopwords[:10]
tokens = [w for w in txt.split() if (len(w)>2) and (w not in stopwords)]
remove_words = [w for w in txt.split() if (len(w)<2) or (w in stopwords)]
print(tokens)
print(remove_words)
## Fonction de setmming de stemming permettant la racinisation
stemmer=nltk.stem.SnowballStemmer('french')
tokens_stem = [stemmer.stem(token) for token in tokens]
print(tokens_stem)
# Fonction clean générale
def clean_txt(txt):
### remove html stuff
txt = BeautifulSoup(txt,"html.parser",from_encoding='utf-8').get_text()
### lower case
txt = txt.lower()
### special escaping character '...'
txt = txt.replace(u'\u2026','.')
txt = txt.replace(u'\u00a0',' ')
### remove accent btw
txt = unicodedata.normalize('NFD', txt).encode('ascii', 'ignore').decode("utf-8")
###txt = unidecode(txt)
### remove non alphanumeric char
txt = re.sub('[^a-z_]', ' ', txt)
### remove french stop words
tokens = [w for w in txt.split() if (len(w)>2) and (w not in stopwords)]
### french stemming
tokens_stem = [stemmer.stem(token) for token in tokens]
### tokens = stemmer.stemWords(tokens)
return ' '.join(tokens), " ".join(tokens_stem)
def clean_marque(txt):
txt = re.sub('[^a-zA-Z0-9]', '_', txt).lower()
return txt
# fonction de nettoyage du fichier(stemming et liste de mots à supprimer)
def clean_df(input_data, column_names= ['Description', 'Libelle', 'Marque']):
nb_line = input_data.shape[0]
print("Start Clean %d lines" %nb_line)
# Cleaning start for each columns
time_start = time.time()
clean_list=[]
clean_stem_list=[]
for column_name in column_names:
column = input_data[column_name].values
if column_name == "Marque":
array_clean = np.array(list(map(clean_marque,column)))
clean_list.append(array_clean)
clean_stem_list.append(array_clean)
else:
A = np.array(list(map(clean_txt,column)))
array_clean = A[:,0]
array_clean_stem = A[:,1]
clean_list.append(array_clean)
clean_stem_list.append(array_clean_stem)
time_end = time.time()
print("Cleaning time: %d secondes"%(time_end-time_start))
#Convert list to DataFrame
array_clean = np.array(clean_list).T
data_clean = pd.DataFrame(array_clean, columns = column_names)
array_clean_stem = np.array(clean_stem_list).T
data_clean_stem = pd.DataFrame(array_clean_stem, columns = column_names)
return data_clean, data_clean_stem
# Take approximately 2 minutes fors 100.000 rows
warnings.filterwarnings("ignore")
data_valid_clean, data_valid_clean_stem = clean_df(data_valid)
warnings.filterwarnings("ignore")
data_train_clean, data_train_clean_stem = clean_df(data_train)
data_train_clean.head(5)
data_train_clean_stem.head(5)
concatenate_text = " ".join(data_train["Description"].values)
list_of_word = concatenate_text.split(" ")
N = len(set(list_of_word))
print(N)
concatenate_text = " ".join(data_train_clean["Description"].values)
list_of_word = concatenate_text.split(" ")
N = len(set(list_of_word))
print(N)
concatenate_text = " ".join(data_train_clean_stem["Description"].values)
list_of_word_stem = concatenate_text.split(" ")
N = len(set(list_of_word_stem))
print(N)
from wordcloud import WordCloud
A=WordCloud(background_color="black")
A.generate_from_text?
all_descr = " ".join(data_valid.Description.values)
wordcloud_word = WordCloud(background_color="black", collocations=False).generate_from_text(all_descr)
plt.figure(figsize=(10,10))
plt.imshow(wordcloud_word,cmap=plt.cm.Paired)
plt.axis("off")
plt.show()
all_descr_clean_stem = " ".join(data_valid_clean_stem.Description.values)
wordcloud_word = WordCloud(background_color="black", collocations=False).generate_from_text(all_descr_clean_stem)
plt.figure(figsize=(10,10))
plt.imshow(wordcloud_word,cmap=plt.cm.Paired)
plt.axis("off")
plt.show()
data_valid_clean.to_csv("data/cdiscount_valid_clean.csv", index=False)
data_train_clean.to_csv("data/cdiscount_train_clean.csv", index=False)
data_valid_clean_stem.to_csv("data/cdiscount_valid_clean_stem.csv", index=False)
data_train_clean_stem.to_csv("data/cdiscount_train_clean_stem.csv", index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Interact basics
Step3: Use the interact function to interact with the print_sum function.
Step5: Write a function named print_string that prints a string and additionally prints the length of that string if a boolean parameter is True.
Step6: Use the interact function to interact with the print_string function.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact, interactive, fixed
from IPython.display import display
def print_sum(a, b):
Print the sum of the arguments a and b.
print(a+b)
interact(print_sum, a=(-10.0,10.0,0.1), b=(-8,8,2))
assert True # leave this for grading the print_sum exercise
def print_string(s, length=False):
Print the string s and optionally its length.
print(s)
if length:
print(len(s))
interact(print_string, s='Hello World!', length=True);
assert True # leave this for grading the print_string exercise
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Historical opening, closing, high, low
Step2: Volume vs Average Volume
Step3: Convert the data collected into numpy arrays
Step4: Stack the data
Step5: Need today's data for the features selected
Step6: Collect today's data int a numpy array
Step7: Scikit-learn
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
x_axis = np.arange(0+1, len(historical)+1)
plt.plot(x_axis, historical_opening, 'b', x_axis, historical_closing, 'r')
plt.xlabel('Day')
plt.ylabel('Price ($)')
#plt.figure(figsize=(20,10))
plt.title("Stock price: Opening vs Closing")
plt.show();
plt.plot(x_axis, historical_high, 'b', x_axis, historical_low, 'g', x_axis, historical_opening, 'y', x_axis, historical_closing, 'r')
plt.xlabel('Day')
plt.ylabel('Price ($)')
#plt.figure(figsize=(20,10))
plt.show();
plt.plot(x_axis,historical_volume, 'g', x_axis, average_volume, 'b')
plt.xlabel('Day')
plt.ylabel('Volume')
plt.show()
opening = np.array(historical_opening)
volume = np.array(historical_volume)
high = np.array(historical_high)
low = np.array(historical_low)
avg_vol = np.array(average_volume)
closing = np.array(historical_closing)
data = np.vstack((opening, high, low, volume, avg_vol))
shape1, shape2 = data.shape
data = data.reshape(shape2, shape1)
data.shape
opening_price = company.get_open()
todays_volume = company.get_volume()
high = company.get_days_high()
low = company.get_days_low()
avg_volume = average_volume[0]
print opening_price
print todays_volume
print high
print low
print avg_volume
#target_pre = np.asarray(closing)
#target = np.vstack(target_pre)
today =np.array((opening_price, high, low, todays_volume, avg_volume))
from sklearn import svm
clf = svm.SVR(gamma=0.00001, C=29000)
#Fit takes in data (#_samples X #_of_features array), and target(closing - 1 X #_of_Sample_size array)
fit = clf.fit(data[:-10],closing[:-10])
predict = clf.predict(today)
graph = clf.fit(data, closing).predict(data)
date = company.get_trade_datetime()
name = cn.find_name(ticker)
print name, "[" , ticker, "]" #get company name using the ticker symbol
print "\nPredicted [closing] price for ", date[:10], ": $", predict[0]
company.refresh()
#change = (company.get_price() - company.get_open())/company.get_open()
#print change
print "current price $", company.get_price()
#print "% change today ",
print data[:,0].shape
print closing.shape
print graph
plt.scatter(data[:,0], closing, c='k', label='data')
plt.hold('on')
plt.plot(data[:,0],graph, c='g', label = 'Linear')
plt.ylabel('target')
plt.xlabel('data')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 深度学习中的许多报错都来自于矩阵/向量尺寸不匹配。 如果你可以保持矩阵/向量的尺寸不变,那么将消除大多错误。
Step2: 预处理数据集的常见步骤是:
Step5: 建立神经网络的主要步骤是: 1.定义模型结构(例如输入特征的数量) 2.初始化模型的参数 3.循环:
Step7: ### 前向和后向传播
Step9: 优化函数
Step10: 将所有功能合并到模型中
Step11: 训练准确100%。 这是一个很好的情况:你的模型正在运行,并且具有足够的容量来适合训练数据。 测试误差为68%。 考虑到我们使用的数据集很小,并且逻辑回归是线性分类器,对于这个简单的模型来说,这实际上还不错。 但请放心,下周你将建立一个更好的分类器!
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
# from ..data.deeplearningai17761.lr_utils import load_dataset
def load_dataset():
train_dataset = h5py.File('../data/deeplearningai17761/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features
train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels
test_dataset = h5py.File('../data/deeplearningai17761/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features
test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = np.array(test_dataset["list_classes"][:]) # the list of classes
train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes
# train_set_x_orig和test_set_x_orig的每一行都是代表图像的数组。
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# 可以通过运行以下代码来可视化示例。 还可以随意更改index值并重新运行以查看其他图像
# Example of a picture
index = 2
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
# 图像尺寸
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
### 重塑训练和测试数据集,以便将大小(num_px,num_px,3)的图像展平为单个形状的向量(num_px+num_px+3, num)。
# X_flatten = X.reshape(X.shape [0],-1).T # 其中X.T是X的转置矩阵 当剩下的为-1时,表示剩下的数据
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
train_set_x_flatten.shape,test_set_x_flatten.shape
# 标准化,像素值红、绿、蓝色通道(RGB),因此像素值实际上是一个从0到255的三个数字的向量
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
# GRADED FUNCTION: sigmoid
def sigmoid(z):
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
### START CODE HERE ### (≈ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
# 初始化参数
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T, X) + b) # compute activation
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A),keepdims=True) # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = (1 / m) * np.dot(X, (A - Y).T)
db = (1 / m) * np.sum(A - Y)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 200 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 200 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T, X) + b)
### END CODE HERE ###
# for i in range(A.shape[1]):
# # Convert probabilities A[0,i] to actual predictions p[0,i]
# ### START CODE HERE ### (≈ 4 lines of code)
# if A[0, i] <= 0.5:
# Y_prediction[0, i] = 0
# else:
# Y_prediction[0, i] = 1
### END CODE HERE ###
# 另一种方式
Y_prediction[np.where(A>=0.5)]=1
assert(Y_prediction.shape == (1, m))
return Y_prediction
# Y_prediction对测试集的预测
# Y_prediction_train对训练集的预测
# w,损失,optimize()输出的梯度
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
w,b = initialize_with_zeros(X_train.shape[0])
params, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = params["w"]
b = params["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 5000, learning_rate = 0.005, print_cost = True)
# Example of a picture that was wrongly classified.
index = 2
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[int(d["Y_prediction_test"][0,index])].decode("utf-8") + "\" picture.")
# 学习率 为了使梯度下降起作用,你必须明智地选择学习率。
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
# 使用自己的图像进行测试
## START CODE HERE ## (PUT YOUR IMAGE NAME)
#my_image = "cat_in_iran.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
from skimage.transform import resize
fname = '../data/deeplearningai17761/cat_in_iran.jpg'
image = np.array(plt.imread(fname))
image.shape
# my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_image=resize(image, output_shape=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 17. decision trees
Step3: 파티션의 엔트로피
Step6: 의사결정나무 만들기
Step8: ~~~
Step9: 이제 학습용 데이터로부터 실제 나무를 구축!!!
Step10: 랜덤포레스트
|
<ASSISTANT_TASK:>
Python Code:
from collections import Counter, defaultdict
from functools import partial
import math, random
def entropy(class_probabilities):
클래스에 속할 확률을 입력하면 엔트로피를 계산하라
return sum(-p * math.log(p, 2) for p in class_probabilities if p)
#각 클래스 레이블의 확률은 별도로 계산
#엔트로피를 구할 때는 어떤 레이블에 어떤 확률값이 주어졌는지까지는 알 필요가 없고,
#레이블과 무관하게 확률 값들만 알면됨
def class_probabilities(labels):
total_count = len(labels)
return [count / total_count
for count in Counter(labels).values()]
def data_entropy(labeled_data):
labels = [label for _, label in labeled_data]
probabilities = class_probabilities(labels)
return entropy(probabilities)
def partition_entropy(subsets):
subsets는 레이블이 있는 데이터의 list의 list이다.
그에 대한 파티션 엔트로피를 구하라
total_count = sum(len(subset) for subset in subsets)
return sum( data_entropy(subset) * len(subset) / total_count
for subset in subsets )
inputs = [
({'level':'Senior','lang':'Java','tweets':'no','phd':'no'}, False),
({'level':'Senior','lang':'Java','tweets':'no','phd':'yes'}, False),
({'level':'Mid','lang':'Python','tweets':'no','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'no','phd':'no'}, True),
({'level':'Junior','lang':'R','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'R','tweets':'yes','phd':'yes'}, False),
({'level':'Mid','lang':'R','tweets':'yes','phd':'yes'}, True),
({'level':'Senior','lang':'Python','tweets':'no','phd':'no'}, False),
({'level':'Senior','lang':'R','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'yes','phd':'no'}, True),
({'level':'Senior','lang':'Python','tweets':'yes','phd':'yes'},True),
({'level':'Mid','lang':'Python','tweets':'no','phd':'yes'}, True),
({'level':'Mid','lang':'Java','tweets':'yes','phd':'no'}, True),
({'level':'Junior','lang':'Python','tweets':'no','phd':'yes'},False)
]
# 1. 가장 낮은 엔트로피를 반환하는 파티션을 찾는다.
def partition_by(inputs, attribute):
attribute에 따라 inputs의 파티션을 나누자
groups = defaultdict(list)
for input in inputs:
key = input[0][attribute]
groups[key].append(input)
return groups
# 2. 엔트로피를 계산
def partition_entropy_by(inputs,attribute):
주어진 파티션에 대응되는 엔트로피를 계산
partitions = partition_by(inputs, attribute)
return partition_entropy(partitions.values())
# 3. 전체 데이터셋에 대해 엔트로피를 최소화하는 파티션을 찾는다.
for key in ['level','lang','tweets','phd']:
print(key, partition_entropy_by(inputs, key))
print()
#직급의 가능한 각 값에 대해 가지를 나눠 서브트리를 만들자.
#직급이 Mid인 경우는 예측값이 True
#Senior인 경우 True or False
senior_inputs = [(input, label)
for input, label in inputs if input["level"] == "Senior"]
for key in ['lang', 'tweets', 'phd']:
print(key, partition_entropy_by(senior_inputs, key))
print()
junior_inputs = [(input, label)
for input, label in inputs if input["level"] == "Junior"]
for key in ['lang', 'tweets', 'phd']:
print(key, partition_entropy_by(junior_inputs, key))
print()
def classify(tree, input):
의사결정나무 tree로 주어진 입력값 input을 분류
# 잎 노드이면 값을 반환
if tree in [True, False]:
return tree
# 그게 아니라면 데이터의 변수로 파티션을 나눔
# key로 변수 값, 값으로 서브트리를 나타내는 dict를 사용하면됨
attribute, subtree_dict = tree
subtree_key = input.get(attribute) # None if input is missing attribute
if subtree_key not in subtree_dict: # 키에 해당하는 서브트리가 존재하지 않을때
subtree_key = None # None 서브트리를 사용
subtree = subtree_dict[subtree_key] # 적절한 서브트리를 선택
return classify(subtree, input) # 입력된 데이터를 분류
def build_tree_id3(inputs, split_candidates=None):
# if this is our first pass,
# all keys of the first input are split candidates
if split_candidates is None:
split_candidates = inputs[0][0].keys()
# count Trues and Falses in the inputs
num_inputs = len(inputs)
num_trues = len([label for item, label in inputs if label])
num_falses = num_inputs - num_trues
if num_trues == 0: # if only Falses are left
return False # return a "False" leaf
if num_falses == 0: # if only Trues are left
return True # return a "True" leaf
if not split_candidates: # if no split candidates left
return num_trues >= num_falses # return the majority leaf
# otherwise, split on the best attribute
best_attribute = min(split_candidates,
key=partial(partition_entropy_by, inputs))
partitions = partition_by(inputs, best_attribute)
new_candidates = [a for a in split_candidates
if a != best_attribute]
# recursively build the subtrees
subtrees = { attribute : build_tree_id3(subset, new_candidates)
for attribute, subset in partitions.items() }
subtrees[None] = num_trues > num_falses # default case
return (best_attribute, subtrees)
print("building the tree")
tree = build_tree_id3(inputs)
print(tree)
print("Junior / Java / tweets / no phd", classify(tree,
{ "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "no"} ))
print("Junior / Java / tweets / phd", classify(tree,
{ "level" : "Junior",
"lang" : "Java",
"tweets" : "yes",
"phd" : "yes"} ))
#심지어 관찰된 적 없는 값이 변수에 등장하거나 변수 값 자체가 누락되더라도 분류가 가능
print("Intern", classify(tree, { "level" : "Intern" } ))
print("Senior", classify(tree, { "level" : "Senior" } ))
#심지어 관찰된 적 없는 값이 변수에 등장하거나 변수 값 자체가 누락되더라도 분류가 가능
def forest_classify(trees, input):
votes = [classify(tree, input) for tree in trees]
vote_counts = Counter(votes)
return vote_counts.most_common(1)[0][0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's localize the N100m (using MEG only)
Step2: Calculate and visualise magnetic field predicted by dipole with maximum GOF
Step3: Estimate the time course of a single dipole with fixed position and
|
<ASSISTANT_TASK:>
Python Code:
from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.simulation import simulate_evoked
from nilearn.plotting import plot_anat
from nilearn.datasets import load_mni152_template
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
# Plot the result in 3D brain with the MRI image using Nilearn
# In MRI coordinates and in MNI coordinates (template brain)
trans = mne.read_trans(fname_trans)
subject = 'sample'
mni_pos = mne.head_to_mni(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
mri_pos = mne.head_to_mri(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
fig_T1 = plot_anat(t1_fname, cut_coords=mri_pos[0], title='Dipole loc.')
template = load_mni152_template()
fig_template = plot_anat(template, cut_coords=mni_pos[0],
title='Dipole loc. (MNI Space)')
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4])
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, -pred_evoked], weights='equal')
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2], **plot_params)
plt.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Les modules suivants vous avez installées avec pip à partir de requirements.txt. Ou bien vous avez installé anaconda (Mac ou Windows), et dans ce cas toutes les modules sont incluses.
Step2: L'installation de TensorFlow se fait différemment
Step3: Et puis, pour tester le point le plus difficile, nous allons dessiner un parabole. Vous devez voir un parabole ci-dessous.
|
<ASSISTANT_TASK:>
Python Code:
import logging
import time
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
import sklearn
import pandas as pd
from sklearn import datasets
from sklearn import svm
import pylab as pl
from matplotlib.colors import ListedColormap
import sklearn as sk
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model.logistic import LogisticRegression
import matplotlib.font_manager
import matplotlib
%matplotlib inline
import tensorflow
x = np.linspace(-100, 100, 201)
plt.plot(x, x * x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run a simple callback as soon as possible
Step2: Coroutine Examples
Step3: You can use as many awaits as you like in a couroutine
Step4: All normal control structures can be used
Step5: Exceptions work just like you would expect
Step6: Multiple Coroutines can be combined and executed concurrently
Step7: Note that this only took one second, not two!
Step8: Note that nothing happens as long as the coroutine is not awaited.
Step9: Not awaiting a coroutine raises an error.
Step10: Awaiting something that is not awaitable raises an error.
Step11: Async for-loop
Step12: The async for-loop saves us the boilerplate code to await each next value. Note that it runs sequentially (i.e., the elements are fetched after each other).
Step13: Async Context Manager
Step14: One example is using locks (even though this doesn't require async exiting).
|
<ASSISTANT_TASK:>
Python Code:
import asyncio
loop = asyncio.get_event_loop()
def hello_world():
print('Hello World!')
loop.stop()
loop.call_soon(hello_world)
loop.run_forever()
async def aprint(text):
await asyncio.sleep(1)
print(text)
return 42
loop.run_until_complete(aprint('Hello world!'))
async def aprint_twice(text):
await asyncio.sleep(1)
print(text)
await asyncio.sleep(1)
print(text + ' (once more)')
return 42
loop.run_until_complete(aprint_twice('Hello world!'))
async def aprint_twice():
for i in range(1, 7):
await asyncio.sleep(0.5)
if i % 2:
print('even')
else:
print('uneven, waiting some more...')
await asyncio.sleep(1)
loop.run_until_complete(aprint_twice())
async def raiser():
await asyncio.sleep(1)
raise ValueError()
async def catcher():
try:
await raiser()
except ValueError:
print('caught something')
loop.run_until_complete(catcher())
tasks = asyncio.gather(aprint('Task 1'), aprint('Task 2'))
loop.run_until_complete(tasks)
async def remember_me():
print('I started.')
await aprint('Did I forget something?')
a = remember_me()
a = 42
a = aprint('Did I forget something?')
loop.run_until_complete(a)
del(a)
async def fail():
await aprint
loop.run_until_complete(fail())
from motor.motor_asyncio import AsyncIOMotorClient
collection = AsyncIOMotorClient().aiotest.test
loop.run_until_complete(collection.insert({'value': i} for i in range(10)))
async def f():
async for doc in collection.find():
print(doc)
loop.run_until_complete(f())
loop.run_until_complete(collection.drop())
class AsyncContextManager:
async def __aenter__(self):
await aprint('entering context')
async def __aexit__(self, exc_type, exc, tb):
await aprint('exiting context')
async def use_async_context():
async with AsyncContextManager():
print('Hello World!')
loop.run_until_complete(use_async_context())
lock = asyncio.Lock()
async def use_lock():
async with lock:
await asyncio.sleep(1)
print('one after the other...')
tasks = asyncio.gather(use_lock(), use_lock())
loop.run_until_complete(tasks)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Title
Step2: This is along phi. I take the G element of the kin matrix corresponding to the second derivative.
Step3: column or rows?
|
<ASSISTANT_TASK:>
Python Code:
fileh5 = '/home/alessio/Desktop/Noise_Or_Not/m-only_IR_longer_with_nac_2_1_0000/allInput.h5'
inp = qp.readWholeH5toDict(fileh5)
wf2 = np.zeros_like(inp['potCube'],dtype=complex)
allp,allg,allt,alls = wf2.shape
wf = wf2[:,:,:,0].reshape(allp,allg,allt,1)
dime = allp*allg*allt
print(dime,allp,allg,allt)
np.empty((dime,dime))
wf[0,0,0,0] = 1
#qp.Cderivative3dMu(0,wf,inp)
wf[0,0,0,0] = 0
dime=65000
np.empty((dime,dime))
492800*492800
inp.keys()
# I need to cut all those thing and create a consistent inp dictionary with all those arrays cut down to the small non-extrapolated cube
potential = data['potCube']
ground = potential[:,:,:,0]
zero_g = ground - np.amin(ground)
# find the minimum
p_min,g_min,t_min = np.unravel_index(zero_g.argmin(), zero_g.shape)
# The three potentials are
p_linear_pot = zero_g[:,g_min,t_min]
g_linear_pot = zero_g[p_min,:,t_min]
t_linear_pot = zero_g[p_min,g_min,:]
p_linear_pot = p_linear_pot
fig = plt.figure(figsize=(5,5))
plt.plot(p_linear_pot);
# plt.plot(g_linear_pot)
# plt.plot(t_linear_pot)
#
kin = data['kinCube']
p_linear_kin = kin[:,g_min,t_min]
p_linear_mu = p_linear_kin[:,0,2]
#
p_delta = -0.005
g_delta = -0.00535816080362
t_delta = 0.00390081087821
# 52 here is [len(phis)-3] -> [55-3] because I insert 3 numbers into the toeplitz matrix
from scipy.linalg import toeplitz
p_matrix = toeplitz([-(5/2), (4/3), -(1/12), *np.zeros(52)])
np.set_printoptions(linewidth=250, edgeitems=5)
p_matrix_divided = p_matrix/(dphi**2)
p_matrix_divided
# linear mu saved in kin matrix is already 1/mass... this is why here I multiply instead of dividing
new = (p_matrix_divided*p_linear_mu)
new
p_linear_pot_diagonal = np.diag(p_linear_pot)
to_diagonalize = p_linear_pot_diagonal + new
p_linear_pot_diagonal = p_linear_pot_diagonal/27.21
to_diagonalize
eigVal,eigVec = np.linalg.eig(to_diagonalize)
index_min = np.unravel_index(eigVal.argmin(), eigVal.shape)
eigVal,index_min, eigVal[index_min]
eigVec
p_gaussian = eigVec[index_min]
p_gaussian, np.linalg.norm(p_gaussian)
fig = plt.figure(figsize=(5,5))
plt.plot(phis_ext,p_gaussian);
np.sort(eigVal)
eigVal
eigVal[28]
eigVec[28]
fig = plt.figure(figsize=(5,5))
plt.plot(phis_ext,eigVec[28]);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We will look at an arbitrary expression $f(x, y)$
Step2: One way to evaluate above expression numerically is to invoke the subs method followed by the evalf method
Step3: However, if we need to do this repeatedly it can be quite slow
Step4: even compared to a simple lambda function
Step5: SymPy can also create a function analogous to f above. The function for doing so is called lambdify
Step6: Note how we specified modules above
Step7: NumPy's broadcasting (handling of different shapes) then works as expected
Step8: Behind the scenes lambdify constructs a string representation of the Python code and uses Python's eval function to compile the function.
Step9: Exercise
Step10: Use either the %exercise or %load magic to get the exercise / solution respecitvely
Step11: Replace ??? with the correct expression above.
|
<ASSISTANT_TASK:>
Python Code:
import sympy as sym
sym.init_printing()
x, y = sym.symbols('x y')
expr = 3*x**2 + sym.log(x**2 + y**2 + 1)
expr
expr.subs({x: 17, y: 42}).evalf()
%timeit expr.subs({x: 17, y: 42}).evalf()
import math
f = lambda x, y: 3*x**2 + math.log(x**2 + y**2 + 1)
f(17, 42)
%timeit f(17, 42)
g = sym.lambdify([x, y], expr, modules=['math'])
g(17, 42)
%timeit g(17, 42)
import numpy as np
xarr = np.linspace(17, 18, 5)
h = sym.lambdify([x, y], expr)
out = h(xarr, 42)
out.shape
yarr = np.linspace(42, 43, 7).reshape((1, 7))
out2 = h(xarr.reshape((5, 1)), yarr) # if we would try to use g() here, it would fail
out2.shape
z = z1, z2, z3 = sym.symbols('z:3')
expr2 = x*y*(z1 + z2 + z3)
func2 = sym.lambdify([x, y, z], expr2)
func2(1, 2, (3, 4, 5))
xplot = np.outer(np.linspace(-5, 5, 100), np.ones(100))
yplot = xplot.T
%load_ext scipy2017codegen.exercise
%exercise exercise_lambdify_expr.py
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(15, 13))
ax = plt.axes(projection='3d')
ax.plot_surface(xplot, yplot, zplot, cmap=plt.cm.coolwarm)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('$%s$' % sym.latex(d2fdxdy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You will notice that the output of the cell is placed immediately underneath the cell, and that a number appears to the left of the cell to indicate the order in which the cell was run
Step2: You should also notice, that if you modify the variable in one cell, it will only update in another cell when you re-run it.
Step3: Here we've assigned data to the variables text, number and pi_value,
Step4: Everything in Python has a type. To get the type of something, we can pass it
Step5: The variable text is of type str, short for "string". Strings hold
Step6: We can also use comparison and logic operators
Step7: Sequential types
Step8: A for loop can be used to access the elements in a list or other Python data
Step9: Indentation is very important in Python. Note that the second line in the
Step10: Challenge - Tuples
Step11: Dictionaries
Step12: Dictionaries work a lot like lists - except that you index them with keys.
Step13: In Python, a "Traceback" is a multi-line error block printed out for the
Step14: Using for loops with dictionaries is a little more complicated. We can do
Step15: or
Step16: Challenge - Can you do reassignment in a dictionary?
Step17: It is important to note that dictionaries are "unordered" and do not remember
Step18: Each time we call a function that's in a library, we use the syntax
Step19: We can see that there were 16,800 rows parsed. Each row has 10
Step20: Notice when you assign the imported DataFrame to a variable, Python does not
Step21: which prints contents like above.
Step22: Exploring Our Obesity Survey Data
Step23: As expected, it's a DataFrame (or, to use the full name that Python uses to refer
Step24: All the values in a column have the same type. For example, year has type
Step25: Methods are called in a similar fashion using the syntax df_object.method().
Step26: Let's get a list of all the years. The pd.unique function tells us all of
Step27: Challenge - Statistics
Step28: We can also extract one specific metric if we wish
Step29: But if we want to summarize by one or more variables, for example sex, we can
Step30: The pandas function describe will return descriptive stats including
Step31: The groupby command is powerful in that it allows us to quickly generate
Step32: Or, we can also count just the rows with the USA
Step33: Challenge - Make a list
Step34: Quick & Easy Plotting Data Using Pandas
Step35: We can also look at how many countries were measured in each year
Step36: (Perhaps a boring graph, but a good way to check that each year is consistent)
Step37: Summary Plotting Challenge
Step38: We can plot the above with
Step39: You can use the .unstack() method to transform grouped data into columns
Step40: This calculates the mean of bmis for each sex within each year as a table
Step41: Below we'll use .unstack() on our grouped data to figure out the BMI that each sex was measured at each year.
Step42: Now, create a stacked bar plot with that data where the BMi for each sex are stacked by year.
|
<ASSISTANT_TASK:>
Python Code:
# Like this first line, anything following a hash character (for the rest of that line) is considered a comment, and won't be run as code
text_str = "Congratulations, you've just run some Python code!"
print(text_str)
print(text_str)
text = "Weill Cornell Medicine" # An example of a string
number = 42 # An example of an integer
pi_value = 3.1415 # An example of a float
text
type(text)
type(number)
type(6.02)
2 + 2 # Addition
6 * 7 # Multiplication
2 ** 16 # Power
13 % 5 # Modulo
3 > 4
True and True
True or False
numbers = [1, 2, 3]
numbers[0]
for num in numbers:
print(num)
# Tuples use parentheses
a_tuple= (1, 2, 3)
another_tuple = ('blue', 'green', 'red')
# Note: lists use square brackets
a_list = [1, 2, 3]
# 1:
# 2:
translation = {'one': 1, 'two': 2}
translation['one']
rev = {1: 'one', 2: 'two'}
rev[1]
bad = {[1, 2, 3]: 3}
rev = {1: 'one', 2: 'two'}
rev[3] = 'three'
rev
for key, value in rev.items():
print(key, '->', value)
for key in rev.keys():
print(key, '->', rev[key])
# 1
challenge_dict =
# 2
# 3
# 4
import pandas as pd
# Note that pd.read_csv is used because we imported pandas as pd
pd.read_csv("data/NCD_RisC_bmi.csv")
df_surveys = pd.read_csv("data/NCD_RisC_bmi.csv")
df_surveys
df_surveys.head() # The head() function displays the first several lines of a file. It is discussed below.
type(df_surveys)
df_surveys.dtypes
df_surveys.columns
# Look at the column names
df_surveys.columns
pd.unique(df_surveys['year'])
df_surveys['bmi'].describe()
df_surveys['bmi'].min()
df_surveys['bmi'].max()
df_surveys['bmi'].mean()
df_surveys['bmi'].std()
df_surveys['bmi'].count()
# Group data by sex
grouped_data = df_surveys.groupby('sex')
# Summary statistics for all numeric columns by sex
grouped_data.describe()
# Provide the mean for each numeric column by sex
grouped_data.mean()
# Count the number of samples by species
year_counts = df_surveys.groupby('country')['year'].count()
print(year_counts)
df_surveys.groupby('country')['year'].count()['United States of America']
# Multiply all weight values by 2
df_surveys_doubled = df_surveys['bmi'] * 2
# Make sure figures appear inline in Jupyter Notebook
%matplotlib inline
# Create a quick bar chart
year_bmi = df_surveys.groupby("year").mean()["bmi"]
year_bmi.plot(kind='bar');
total_count = df_surveys.groupby('year')['country'].nunique()
# Let's plot that too
total_count.plot(kind='bar');
df_surveys.groupby("country").mean()['bmi'].sort_values()
d = {'one' : pd.Series([1., 2., 3.], index=['a', 'b', 'c']),'two' : pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
pd.DataFrame(d)
# Plot stacked data so columns 'one' and 'two' are stacked
my_df = pd.DataFrame(d)
my_df.plot(kind='bar',stacked=True,title="The title of my graph")
by_year_sex = df_surveys.groupby(['year','sex'])
year_sex_mean = by_year_sex['obese_pc'].mean()
year_sex_mean.head()
spc = year_sex_mean.unstack()
spc
s_plot = spc.plot(kind='bar',stacked=True,title="Mean BMI by year and sex")
s_plot.set_ylabel("Prevalence of obesity")
s_plot.set_xlabel("Year")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and check data
Step2: ## Analysis
Step3: Results
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import tabulate
import pprint
import click
import numpy as np
import pandas as pd
from ray.tune.commands import *
from nupic.research.frameworks.dynamic_sparse.common.browser import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import rcParams
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set(style="whitegrid")
sns.set_palette("colorblind")
# exps = ['replicate_hsd_test2']
# exps = ['replicate_hsd_debug1']
# exps = ['replicate_hsd_debug3_8x']
exps = ['replicate_hsd_debug6_8x']
paths = [os.path.expanduser("~/nta/results/{}".format(e)) for e in exps]
df = load_many(paths)
df.head(5)
# replace hebbian prine
df['hebbian_prune_perc'] = df['hebbian_prune_perc'].replace(np.nan, 0.0, regex=True)
df['weight_prune_perc'] = df['weight_prune_perc'].replace(np.nan, 0.0, regex=True)
df.columns
df.shape
df.iloc[1]
df.groupby('model')['model'].count()
num_epochs = 25
# Did any trials failed?
df[df["epochs"]<num_epochs]["epochs"].count()
# Removing failed or incomplete trials
df_origin = df.copy()
df = df_origin[df_origin["epochs"]>=num_epochs]
df.shape
# which ones failed?
# failed, or still ongoing?
df_origin['failed'] = df_origin["epochs"]<num_epochs
df_origin[df_origin['failed']]['epochs']
# helper functions
def mean_and_std(s):
return "{:.2f} ± {:.2f}".format(s.mean()*100, s.std()*100)
def round_mean(s):
return "{:.0f}".format(round(s.mean()))
stats = ['min', 'max', 'mean', 'std']
def agg(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'val_acc_last': stats,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max_epoch': round_mean,
'val_acc_max': stats,
'val_acc_last': stats,
'model': ['count']})).round(round)
def agg_paper(columns, filter=None, round=3):
if filter is None:
return (df.groupby(columns)
.agg({'val_acc_max': mean_and_std,
'val_acc_last': mean_and_std,
'train_acc_last': mean_and_std,
'model': ['count']})).round(round)
else:
return (df[filter].groupby(columns)
.agg({'val_acc_max': mean_and_std,
'val_acc_last': mean_and_std,
'train_acc_last': mean_and_std,
'model': ['count']})).round(round)
agg(['model'])
agg_paper(['model'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 可以看json中文字的程式碼
Step2: 傳入六個變數,回傳一個Json
Step3: url = u"http
|
<ASSISTANT_TASK:>
Python Code:
def cnv2utf8(mstr):
#print mstr
#print urllib.quote(mstr.encode(u"utf8"))
return urllib.quote(mstr.encode(u"utf8"))
class MyPrettyPrinter(pprint.PrettyPrinter):
def format(self, object, context, maxlevels, level):
if isinstance(object, unicode):
return (object.encode('utf8'), True, False)
return pprint.PrettyPrinter.format(self, object, context, maxlevels, level)
def printJson(aObj):
MyPrettyPrinter().pprint(aObj)
def getData(location, product, top, skip, EndDate, StartDate):
##變數宣告
# url是API的網址
# ahash是替代的變數值
url = u"http://m.coa.gov.tw/OpenData/FarmTransData.aspx?$top={top}&$skip={skip}&Market={Market}&Crop={Crop}&EndDate={EndDate}&StartDate={StartDate}"
ahash={
u"{top}" :top,
u"{skip}" :skip,
u"{Market}" :cnv2utf8(location),
u"{Crop}" :cnv2utf8(product),
u"{EndDate}" :EndDate,
u"{StartDate}":StartDate,
}
## 這裡再做的就是將ahash內的key用value換掉
for abc in ahash:
url=url.replace(abc,ahash[abc])
#print url
# 到API抓資料回來
rsps = urllib.urlopen( url.encode(u"utf8") )
np.random.seed(1337)
alist = [1, 2, 3, 4, 5]
for x in alist:
if int(np.random.random()*10)>7 :
print "X"
else:
print x
return json.loads(rsps.read())
pd.read_json(json.dumps(getData(u"", u"本島萵苣", u"10000", u"",u"105.06.30",u"103.05.01"))).to_csv(u"123.csv",encoding='utf-8')
#url = u"http://m.coa.gov.tw/OpenData/FarmTransData.aspx?$top={top}&$skip={skip}&filter={filter}"
#ahash={u"99110001":u"August",
# u"99110002":u"vicky"}
# ahash[u"99110001"]
# ahash={
u"{top}":u"20",
u"{skip}":u"100",
u"{filter}":u"Market=台北二&Crop=椰子",
# }
# ahash
#ahash
# for abc in ahash:
print abc,ahash[abc]
#url.replace(u"gov",u"com")
#urllib.quote(u"椰子".encode(u"utf8"))
#ahash={
u"{top}":u"20",
u"{skip}":u"100",
u"{filter}":urllib.quote(u"Market=台北二&Crop=椰子".encode(u"utf8")),
#}
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Looking at https
Step2: sample from the dist
|
<ASSISTANT_TASK:>
Python Code:
import torch
import numpy as np
import math
import matplotlib.pyplot as plt
num_samples = 7
torch.manual_seed(123)
order = 3
W_gnd = torch.randn(order + 1)
left = -3
right = 3
variance = torch.randn(1)[0] * 10
# print('variance', variance)
# help(torch.arange)
x1 = torch.arange(left, right, (right - left) / 250)
# print(x1)
def expand_x(x1, order):
N = x1.size()[0]
x2 = torch.zeros(N, order + 1)
for k in range(order + 1):
x2[:, k] = x1.pow(k)
return x2
x2 = expand_x(x1, order)
y_gnd = x2 @ W_gnd
x_samples = torch.rand(num_samples) * (right - left) + left
x_samples_aug = expand_x(x_samples, order=order)
y_samples = x_samples_aug @ W_gnd + torch.randn(num_samples) * np.sqrt(variance)
plt.plot(x1.numpy(), y_gnd.numpy())
# print('x_samples', x_samples)
# print('y_samples', y_samples)
plt.scatter(x_samples.numpy(), y_samples.numpy())
plt.show()
a_0 = 1
b_0 = 1
# prec_0 = torch.zeros(order + 1, order + 1)
prec_0 = torch.eye(order + 1)
# print('prec_0', prec_0)
mu_0 = torch.zeros(order + 1, 1)
X = x_samples_aug
Y = y_samples.view(-1, 1)
N = num_samples
prec_n = X.transpose(0, 1) @ X + prec_0
# print('prec_n', prec_n)
mu_n = prec_n.inverse() @ ((prec_0 @ mu_0) + X.transpose(0, 1) @ Y)
# print('mu_n', mu_n)
a_n = a_0 + N / 2
b_n = b_0 + 0.5 * (Y.transpose(0, 1) @ Y + mu_0.transpose(0, 1) @ prec_0 @ mu_0 \
- mu_n.transpose(0, 1) @ prec_n @ mu_n)
# print('mu_n', mu_n)
# print('a_n', a_n)
# print('b_n', b_n)
num_line_samples = 10
for i in range(num_line_samples):
var = np.random.wald(a_n, b_n[0][0])
cov = prec_n.inverse().numpy() / var
w = np.random.multivariate_normal(mu_n.view(-1).numpy(), cov)
# print('w', w)
# print('a_n', a_n, 'b_n', b_n)
# print('var', var)
y = x2 @ torch.from_numpy(w).float()
plt.plot(x1.numpy(), y.numpy())
plt.scatter(x_samples.numpy(), y_samples.numpy())
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The general form of writing out such combined conditional statements is as follows
Step2: For very simple conditional checks, you can write the if statement on a single line as a single expression, and the result will be the expression before the if if the condition is true or the expression after the else otherwise.
Step3: Comparisons and truth
Step4: Python has two additional comparison operators <tt>is</tt> and <tt>is not</tt>.
Step5: Another example with lists x, y, and z
Step6: In Python even expressions that do not involve an obvious boolean value can be assigned a status of "truthfulness"; the value of an item itself can be forced to be considered as either True or False inside an if statement. For the Python built-in types discussed in this chapter the following are deemed to be False in such a context
|
<ASSISTANT_TASK:>
Python Code:
x = -3
if x > 0:
print("Value is positive")
elif x < 0:
print("Value is negative")
else:
print("Value is zero")
gene = "BRCA2"
geneExpression = -1.2
if geneExpression < 0:
print(gene, "is downregulated")
elif geneExpression > 0:
print(gene, "is upregulated")
else:
pass
x = 11
if x < 10:
s = "Yes"
else:
s = "No"
print(s)
# Could also be written onto one line
s = "Yes" if x < 10 else "No"
print(s)
x = -5
if x > 0 and x < 10:
print("In range A")
elif x < 0 or x > 10:
print("In range B")
a = 500
b = 500
print(a == b) # True
print(a is b) # False
x = [123, 54, 92, 87, 33]
y = x[:] # y is a copy of x
z = x
print(x)
print(y)
print(z)
print("Are values of y and x the same?", y == x)
print("Are objects y and x the same?", y is x)
print("Are values of z and x the same?", z == x)
print("Are objects z and x the same?", z is x)
# Let's change x
x[1] = 23
print(x)
print(y)
print(z)
print("Are values of y and x the same?", y == x)
print("Are objects y and x the same?", y is x)
print("Are values of z and x the same?", z == x)
print("Are objects z and x the same?", z is x)
x = '' # An empty string
y = ['a'] # A list with one item
if x:
print("x is true")
else:
print("x is false")
if y:
print("y is true")
else:
print("y is false")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 0b. Data Location
Step2: 0c. Load Data
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
# You can add anything you need as you work
F_META = '../Day2/dsfp_ztf_meta.npy'
F_FEATS = '../Day2/dsfp_ztf_feats.npy'
D_STAMPS = '../Day2/dsfp_ztf_png_stamps'
meta_np = np.load(F_META)
feats_np = np.load(F_FEATS)
COL_NAMES = ['diffmaglim', 'magpsf', 'sigmapsf', 'chipsf', 'magap', 'sigmagap',
'distnr', 'magnr', 'sigmagnr', 'chinr', 'sharpnr', 'sky',
'magdiff', 'fwhm', 'classtar', 'mindtoedge', 'magfromlim', 'seeratio',
'aimage', 'bimage', 'aimagerat', 'bimagerat', 'elong', 'nneg',
'nbad', 'ssdistnr', 'ssmagnr', 'sumrat', 'magapbig', 'sigmagapbig',
'ndethist', 'ncovhist', 'jdstarthist', 'jdendhist', 'scorr', 'label']
# NOTE FROM Umaa: I've decided to eliminate the following features. Dropping them.
#
COL_TO_DROP = ['ndethist', 'ncovhist', 'jdstarthist', 'jdendhist',
'distnr', 'magnr', 'sigmagnr', 'chinr', 'sharpnr',
'classtar', 'ssdistnr', 'ssmagnr', 'aimagerat', 'bimagerat',
'magapbig', 'sigmagapbig', 'scorr']
feats_df = pd.DataFrame(data=feats_np, index=meta_np['candid'], columns=COL_NAMES)
print("There are {} columns left.".format(len(feats_df.columns)))
print("They are: {}".format(list(feats_df.columns)))
feats_df.drop(columns=COL_TO_DROP, inplace=True)
#feats_df.describe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To keep the calculations below manageable we specify a single nside=64 healpixel in an arbitrary location of the DESI footprint.
Step2: Specifying the random seed makes our calculations reproducible.
Step4: Define a couple wrapper routines we will use below several times.
Step5: Tracer QSOs
Step6: The various read methods return a dictionary with (hopefully self-explanatory) target- and mock-specific quantities.
Step7: Now we can generate the spectra as well as the targeting catalogs (targets) and corresponding truth table.
Step8: The truth catalog contains the target-type-agnostic, known properties of each object (including the noiseless photometry), while the objtruth catalog contains different information depending on the type of target.
Step9: Next, let's run target selection, after which point the targets catalog should look just like an imaging targeting catalog (here, using the DR7 data model).
Step10: And indeed, we can see that only a subset of the QSOs were identified as targets (the rest scattered out of the QSO color selection boxes).
Step11: Finally, let's plot some example spectra.
Step12: Generating QSO spectra with cosmological Lya skewers proceeds along similar lines.
Step13: Lets plot together some of the spectra with the old and new continum model
Step14: And finally we compare the colors, for the two runs with the new and old continum
Step15: Conclusion
Step16: Demonstrate the other extragalactic target classes
Step17: LRGs
Step18: ELGs
Step19: BGS
Step20: Next, demonstrate how to generate spectra of stars...
Step21: MWS_NEARBY
Step22: White dwarfs (WDs)
Step23: Finally demonstrate how to generate (empyt) SKY spectra.
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from desiutil.log import get_logger, DEBUG
log = get_logger()
import seaborn as sns
sns.set(style='white', font_scale=1.1, palette='Set2')
%matplotlib inline
healpixel = 26030
nside = 64
seed = 555
rand = np.random.RandomState(seed)
def plot_subset(wave, flux, truth, objtruth, nplot=16, ncol=4, these=None,
xlim=None, loc='right', targname='', objtype=''):
Plot a random sampling of spectra.
nspec, npix = flux.shape
if nspec < nplot:
nplot = nspec
nrow = np.ceil(nplot / ncol).astype('int')
if loc == 'left':
xtxt, ytxt, ha = 0.05, 0.93, 'left'
else:
xtxt, ytxt, ha = 0.93, 0.93, 'right'
if these is None:
these = rand.choice(nspec, nplot, replace=False)
these = np.sort(these)
ww = (wave > 5500) * (wave < 5550)
fig, ax = plt.subplots(nrow, ncol, figsize=(2.5*ncol, 2*nrow), sharey=False, sharex=True)
for thisax, indx in zip(ax.flat, these):
thisax.plot(wave, flux[indx, :] / np.median(flux[indx, ww]))
if objtype == 'STAR' or objtype == 'WD':
thisax.text(xtxt, ytxt, r'$T_{{eff}}$={:.0f} K'.format(objtruth['TEFF'][indx]),
ha=ha, va='top', transform=thisax.transAxes, fontsize=13)
else:
thisax.text(xtxt, ytxt, 'z={:.3f}'.format(truth['TRUEZ'][indx]),
ha=ha, va='top', transform=thisax.transAxes, fontsize=13)
thisax.xaxis.set_major_locator(plt.MaxNLocator(3))
if xlim:
thisax.set_xlim(xlim)
for thisax in ax.flat:
thisax.yaxis.set_ticks([])
thisax.margins(0.2)
fig.suptitle(targname)
fig.subplots_adjust(wspace=0.05, hspace=0.05, top=0.93)
from desitarget.mock.mockmaker import QSOMaker
QSO = QSOMaker(seed=seed)
dir(QSOMaker)
data = QSO.read(healpixels=healpixel, nside=nside)
for key in sorted(list(data.keys())):
print('{:>20}'.format(key))
%time flux, wave, targets, truth, objtruth = QSO.make_spectra(data)
print(flux.shape, wave.shape)
truth
objtruth
QSO.select_targets(targets, truth)
targets
from desitarget.targetmask import desi_mask
isqso = (targets['DESI_TARGET'] & desi_mask.QSO) != 0
print('Identified {} / {} QSO targets.'.format(np.count_nonzero(isqso), len(targets)))
plot_subset(wave, flux, truth, objtruth, targname='QSO')
from desitarget.mock.mockmaker import LYAMaker
mockfile='/project/projectdirs/desi/mocks/lya_forest/london/v9.0/v9.0.0/master.fits'
LYA = LYAMaker(seed=seed, balprob=0.25)
lyadata = LYA.read(mockfile=mockfile,healpixels=healpixel, nside=nside)
%time lyaflux, lyawave, lyatargets, lyatruth, lyaobjtruth = LYA.make_spectra(lyadata)
lyaobjtruth
plot_subset(lyawave, lyaflux, lyatruth, lyaobjtruth, xlim=(3500, 5500), targname='LYA')
#Now lets generate the same spectra but including the different features and the new continum model.
#For this we need to reload the desitarget module, for some reason it seems not be enough with defining a diferen variable for the LYAMaker
del sys.modules['desitarget.mock.mockmaker']
from desitarget.mock.mockmaker import LYAMaker
LYA = LYAMaker(seed=seed,sqmodel='lya_simqso_model_develop',balprob=0.25)
lyadata_continum = LYA.read(mockfile=mockfile,healpixels=healpixel, nside=nside)
%time lyaflux_cont, lyawave_cont, lyatargets_cont, lyatruth_cont, lyaobjtruth_cont = LYA.make_spectra(lyadata_continum)
plt.figure(figsize=(20, 10))
indx=rand.choice(len(lyaflux),9)
for i in range(9):
plt.subplot(3, 3, i+1)
plt.plot(lyawave,lyaflux[indx[i]],label="Old Continum")
plt.plot(lyawave_cont,lyaflux_cont[indx[i]],label="New Continum")
plt.legend()
plt.plot(lyatruth["FLUX_W1"],lyatruth_cont["FLUX_W1"]/lyatruth["FLUX_W1"]-1,'.')
plt.xlabel("FLUX_W1")
plt.ylabel(r"FLUX_W1$^{new}$/FLUX_W1-1")
plt.plot(lyatruth["FLUX_W2"],lyatruth_cont["FLUX_W2"]/lyatruth["FLUX_W2"]-1,'.')
plt.xlabel("FLUX_W2")
plt.ylabel(r"(FLUX_W2$^{new}$/FLUX_W2)-1")
plt.hist(lyatruth["FLUX_W1"],bins=100,label="Old Continum",alpha=0.7)
plt.hist(lyatruth_cont["FLUX_W1"],bins=100,label="New Continum",histtype='step',linestyle='--')
plt.xlim(0,100) #Limiting to 100 to see it better.
plt.xlabel("FLUX_W1")
plt.legend()
plt.hist(lyatruth["FLUX_W2"],bins=100,label="Old Continum",alpha=0.7)
plt.hist(lyatruth_cont["FLUX_W2"],bins=100,label="New Continum",histtype='step',linestyle='--')
plt.xlim(0,100) #Limiting to 100 to see it better.
plt.xlabel("FLUX_W2")
plt.legend()
del sys.modules['desitarget.mock.mockmaker']
from desitarget.mock.mockmaker import LYAMaker ##Done in order to reload the desitarget, it doesn't seem to be enough with initiating a diferent variable for the LYAMaker class.
LYA = LYAMaker(seed=seed,sqmodel='lya_simqso_model',balprob=0.25,add_dla=True,add_metals="all",add_lyb=True)
lyadata_all= LYA.read(mockfile=mockfile,healpixels=healpixel, nside=nside)
%time lyaflux_all, lyawave_all, lyatargets_all, lyatruth_all, lyaobjtruth_all = LYA.make_spectra(lyadata_all)
plot_subset(lyawave_all, lyaflux_all, lyatruth_all, lyaobjtruth_all, xlim=(3500, 5500), targname='LYA')
def demo_mockmaker(Maker, seed=None, nrand=16, loc='right'):
TARGET = Maker(seed=seed)
log.info('Reading the mock catalog for {}s'.format(TARGET.objtype))
tdata = TARGET.read(healpixels=healpixel, nside=nside)
log.info('Generating {} random spectra.'.format(nrand))
indx = rand.choice(len(tdata['RA']), np.min( (nrand, len(tdata['RA'])) ) )
tflux, twave, ttargets, ttruth, tobjtruth = TARGET.make_spectra(tdata, indx=indx)
log.info('Selecting targets')
TARGET.select_targets(ttargets, ttruth)
plot_subset(twave, tflux, ttruth, tobjtruth, loc=loc,
targname=tdata['TARGET_NAME'], objtype=TARGET.objtype)
from desitarget.mock.mockmaker import LRGMaker
%time demo_mockmaker(LRGMaker, seed=seed, loc='left')
from desitarget.mock.mockmaker import ELGMaker
%time demo_mockmaker(ELGMaker, seed=seed, loc='left')
from desitarget.mock.mockmaker import BGSMaker
%time demo_mockmaker(BGSMaker, seed=seed)
from desitarget.mock.mockmaker import MWS_MAINMaker
%time demo_mockmaker(MWS_MAINMaker, seed=seed, loc='left')
from desitarget.mock.mockmaker import MWS_NEARBYMaker
%time demo_mockmaker(MWS_NEARBYMaker, seed=seed, loc='left')
from desitarget.mock.mockmaker import WDMaker
%time demo_mockmaker(WDMaker, seed=seed, loc='right')
from desitarget.mock.mockmaker import SKYMaker
SKY = SKYMaker(seed=seed)
skydata = SKY.read(healpixels=healpixel, nside=nside)
skyflux, skywave, skytargets, skytruth, objtruth = SKY.make_spectra(skydata)
SKY.select_targets(skytargets, skytruth)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Today's lab covers
Step2: Let's load in the data
Step3: Question 1
Step4: Question 2
Step5: Question 4
Step6: Question 5
Step7: Question 6
Step8: Nice! With pipelines, we can combine any number of transformations and treat the whole thing as a single classifier.
Step9: Question 8
Step10: Question 9
Step11: Question 11
Step12: Question 12
Step13: Question 13
Step14: Now, run the cell below to find the test error of your simple linear model, your polynomial model, and your regularized polynomial model.
Step15: Nice! You've use regularization and cross-validation to fit an accurate polynomial model to the dataset.
|
<ASSISTANT_TASK:>
Python Code:
!pip install -U sklearn
import numpy as np
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
import sklearn.linear_model as lm
import scipy.io as sio
!pip install -U okpy
from client.api.notebook import Notebook
ok = Notebook('lab11.ok')
!head train.csv
data = pd.read_csv('train.csv')
X = data[['X']].as_matrix()
y = data['y'].as_matrix()
X.shape, y.shape
_ = plt.plot(X[:, 0], y, '.')
plt.xlabel('Change in water level (X)')
plt.ylabel('Water flow out of dam (y)')
def plot_data_and_curve(curve_x, curve_y):
plt.plot(X[:, 0], y, '.')
plt.plot(curve_x, curve_y, '-')
plt.ylim(-20, 60)
plt.xlabel('Change in water level (X)')
plt.ylabel('Water flow out of dam (y)')
linear_clf = lm.LinearRegression() #SOLUTION
# Fit your classifier
linear_clf.fit(X, y)
# Predict a bunch of points to draw best fit line
all_x = np.linspace(-55, 55, 200).reshape(-1, 1)
line = linear_clf.predict(all_x)
plot_data_and_curve(all_x, line)
from sklearn.preprocessing import PolynomialFeatures
X_poly = PolynomialFeatures(degree=8).fit_transform(X) #SOLUTION
X_poly.shape
poly_clf = lm.LinearRegression() #SOLUTION
# Fit your classifier
poly_clf.fit(X_poly, y) #SOLUTION
# Set curve to your model's predictions on all_x
curve = poly_clf.predict(PolynomialFeatures(degree=8).fit_transform(all_x)) #SOLUTION
plot_data_and_curve(all_x, curve)
def mse(predicted_y, actual_y):
return np.mean((predicted_y - actual_y) ** 2)
line_training_error = mse(linear_clf.predict(X), y) #SOLUTION
poly_training_error = mse(poly_clf.predict(PolynomialFeatures(degree=8).fit_transform(X)), y) #SOLUTION
line_training_error, poly_training_error
from sklearn.pipeline import make_pipeline
poly_pipeline = make_pipeline(PolynomialFeatures(degree=8), lm.LinearRegression()) #SOLUTION
# Fit the pipeline on X and y
poly_pipeline.fit(X, y) #SOLUTION
# Compute the training error
pipeline_training_error = mse(poly_pipeline.predict(X), y) #SOLUTION
pipeline_training_error
from sklearn.model_selection import train_test_split
np.random.seed(42)
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.33) #SOLUTION
X_train.shape, X_valid.shape
# Fit the linear classifier
linear_clf.fit(X_train, y_train) #SOLUTION
# Fit the polynomial pipeline
poly_pipeline.fit(X_train, y_train) #SOLUTION
X_train_line_error = mse(linear_clf.predict(X_train), y_train) #SOLUTION
X_valid_line_error = mse(linear_clf.predict(X_valid), y_valid) #SOLUTION
X_train_poly_error = mse(poly_pipeline.predict(X_train), y_train) #SOLUTION
X_valid_poly_error = mse(poly_pipeline.predict(X_valid), y_valid) #SOLUTION
X_train_line_error, X_valid_line_error, X_train_poly_error, X_valid_poly_error
ridge_pipeline = make_pipeline(PolynomialFeatures(degree=8), lm.Ridge(normalize=True, alpha=1.)) #SOLUTION
# Fit your classifier
ridge_pipeline.fit(X_train, y_train) #SOLUTION
# Set curve to your model's predictions on all_x
ridge_curve = ridge_pipeline.predict(all_x) #SOLUTION
plot_data_and_curve(all_x, ridge_curve)
ridge_train_error = mse(ridge_pipeline.predict(X_train), y_train) #SOLUTION
ridge_valid_error = mse(ridge_pipeline.predict(X_valid), y_valid) #SOLUTION
ridge_train_error, ridge_valid_error
alphas = [0.0001, 0.001, 0.01, 0.1, 0.5, 1.0, 10.0]
# Your code to find the best alpha
def compute_error(alpha):
pline = make_pipeline(PolynomialFeatures(degree=8),
lm.Ridge(normalize=True, alpha=alpha))
pline.fit(X_train, y_train)
return mse(pline.predict(X_valid), y_valid)
errors = [compute_error(alpha) for alpha in alphas]
best_alpha_idx = np.argmin(errors)
best_alpha, best_error = alphas[best_alpha_idx], errors[best_alpha_idx]
best_alpha, best_error
best_pipeline = make_pipeline(PolynomialFeatures(degree=8), lm.Ridge(normalize=True, alpha=best_alpha)) #SOLUTION
best_pipeline.fit(X_train, y_train)
best_curve = best_pipeline.predict(all_x)
plot_data_and_curve(all_x, best_curve)
test_data = pd.read_csv('test.csv')
X_test = data[['X']].as_matrix()
y_test = data['y'].as_matrix()
line_test_error = mse(linear_clf.predict(X_test), y_test)
poly_test_error = mse(poly_pipeline.predict(X_test), y_test)
best_test_error = mse(best_pipeline.predict(X_test), y_test)
line_test_error, poly_test_error, best_test_error
i_finished_the_lab = False
_ = ok.grade('qcompleted')
_ = ok.backup()
_ = ok.submit()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Part B
Step2: Part C
|
<ASSISTANT_TASK:>
Python Code:
truth = "This is some text.\nMore text, but on a different line!\nInsert your favorite meme here.\n"
pred = read_file_contents("q1data/file1.txt")
assert truth == pred
retval = -1
try:
retval = read_file_contents("nonexistent/path.txt")
except:
assert False
else:
assert retval is None
import os
if os.path.exists("q1data/file1_out.txt"):
os.remove("q1data/file1_out.txt")
assert count_lines("q1data/file1.txt", "q1data/file1_out.txt")
assert os.path.exists("q1data/file1_out.txt")
assert int(open("q1data/file1_out.txt", "r").read()) == 3
if os.path.exists("meaningless"):
os.remove("meaningless")
r1 = None
try:
r1 = count_lines("yet/another/nonexistent/path.txt", "meaningless")
except:
assert False
else:
assert not r1
r2 = None
try:
r2 = count_lines("q1data/file1.txt", "/this/should/throw/an/error.txt")
except:
assert False
else:
assert not r2
if os.path.exists("q1data/out_again.txt"):
os.remove("q1data/out_again.txt")
assert acount_lines("q1data/file1.txt", "q1data/out_again.txt")
assert os.path.exists("q1data/out_again.txt")
assert int(open("q1data/out_again.txt", "r").read()) == 3
assert acount_lines("q1data/file2.txt", "q1data/out_again.txt")
assert os.path.exists("q1data/out_again.txt")
assert int("".join(open("q1data/out_again.txt", "r").read().split("\n"))) == 34
r1 = None
try:
r1 = acount_lines("yet/another/nonexistent/path.txt", "meaningless")
except:
assert False
else:
assert not r1
r2 = None
try:
r2 = acount_lines("q1data/file2.txt", "/this/should/throw/an/error.txt")
except:
assert False
else:
assert not r2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting the data
Step2: These SVHN files are .mat files typically used with Matlab. However, we can load them in with scipy.io.loadmat which we imported above.
Step3: Here I'm showing a small sample of the images. Each of these is 32x32 with 3 color channels (RGB). These are the real images we'll pass to the discriminator and what the generator will eventually fake.
Step4: Here we need to do a bit of preprocessing and getting the images into a form where we can pass batches to the network. First off, we need to rescale the images to a range of -1 to 1, since the output of our generator is also in that range. We also have a set of test and validation images which could be used if we're trying to identify the numbers in the images.
Step5: Network Inputs
Step6: Generator
Step7: Discriminator
Step9: Model Loss
Step11: Optimizers
Step12: Building the model
Step13: Here is a function for displaying generated images.
Step14: And another function we can use to train our network. Notice when we call generator to create the samples to display, we set training to False. That's so the batch normalization layers will use the population statistics rather than the batch statistics. Also notice that we set the net.input_real placeholder when we run the generator's optimizer. The generator doesn't actually use it, but we'd get an error without it because of the tf.control_dependencies block we created in model_opt.
Step15: Hyperparameters
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import loadmat
import tensorflow as tf
!mkdir data
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
data_dir = 'data/'
if not isdir(data_dir):
raise Exception("Data directory doesn't exist!")
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(data_dir + "train_32x32.mat"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Training Set') as pbar:
urlretrieve(
'http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
data_dir + 'train_32x32.mat',
pbar.hook)
if not isfile(data_dir + "test_32x32.mat"):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='SVHN Testing Set') as pbar:
urlretrieve(
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
data_dir + 'test_32x32.mat',
pbar.hook)
trainset = loadmat(data_dir + 'train_32x32.mat')
testset = loadmat(data_dir + 'test_32x32.mat')
idx = np.random.randint(0, trainset['X'].shape[3], size=36)
fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),)
for ii, ax in zip(idx, axes.flatten()):
ax.imshow(trainset['X'][:,:,:,ii], aspect='equal')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.subplots_adjust(wspace=0, hspace=0)
def scale(x, feature_range=(-1, 1)):
# scale to (0, 1)
x = ((x - x.min())/(255 - x.min()))
# scale to feature_range
min, max = feature_range
x = x * (max - min) + min
return x
class Dataset:
def __init__(self, train, test, val_frac=0.5, shuffle=False, scale_func=None):
split_idx = int(len(test['y'])*(1 - val_frac))
self.test_x, self.valid_x = test['X'][:,:,:,:split_idx], test['X'][:,:,:,split_idx:]
self.test_y, self.valid_y = test['y'][:split_idx], test['y'][split_idx:]
self.train_x, self.train_y = train['X'], train['y']
self.train_x = np.rollaxis(self.train_x, 3)
self.valid_x = np.rollaxis(self.valid_x, 3)
self.test_x = np.rollaxis(self.test_x, 3)
if scale_func is None:
self.scaler = scale
else:
self.scaler = scale_func
self.shuffle = shuffle
def batches(self, batch_size):
if self.shuffle:
idx = np.arange(len(dataset.train_x))
np.random.shuffle(idx)
self.train_x = self.train_x[idx]
self.train_y = self.train_y[idx]
n_batches = len(self.train_y)//batch_size
for ii in range(0, len(self.train_y), batch_size):
x = self.train_x[ii:ii+batch_size]
y = self.train_y[ii:ii+batch_size]
yield self.scaler(x), y
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, output_dim, reuse=False, alpha=0.2, training=True):
with tf.variable_scope('generator', reuse=reuse):
# First fully connected layer
x1 = tf.layers.dense(z, 4*4*512)
# Reshape it to start the convolutional stack
x1 = tf.reshape(x1, (-1, 4, 4, 512))
x1 = tf.layers.batch_normalization(x1, training=training)
x1 = tf.maximum(alpha * x1, x1)
# 4x4x512 now
x2 = tf.layers.conv2d_transpose(x1, 256, 5, strides=2, padding='same')
x2 = tf.layers.batch_normalization(x2, training=training)
x2 = tf.maximum(alpha * x2, x2)
# 8x8x256 now
x3 = tf.layers.conv2d_transpose(x2, 128, 5, strides=2, padding='same')
x3 = tf.layers.batch_normalization(x3, training=training)
x3 = tf.maximum(alpha * x3, x3)
# 16x16x128 now
# Output layer
logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same')
# 32x32x3 now
out = tf.tanh(logits)
return out
def discriminator(x, reuse=False, alpha=0.2):
with tf.variable_scope('discriminator', reuse=reuse):
# Input layer is 32x32x3
x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same')
relu1 = tf.maximum(alpha * x1, x1)
# 16x16x64
x2 = tf.layers.conv2d(relu1, 128, 5, strides=2, padding='same')
bn2 = tf.layers.batch_normalization(x2, training=True)
relu2 = tf.maximum(alpha * bn2, bn2)
# 8x8x128
x3 = tf.layers.conv2d(relu2, 256, 5, strides=2, padding='same')
bn3 = tf.layers.batch_normalization(x3, training=True)
relu3 = tf.maximum(alpha * bn3, bn3)
# 4x4x256
# Flatten it
flat = tf.reshape(relu3, (-1, 4*4*256))
logits = tf.layers.dense(flat, 1)
out = tf.sigmoid(logits)
return out, logits
def model_loss(input_real, input_z, output_dim, alpha=0.2):
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
g_model = generator(input_z, output_dim, alpha=alpha)
d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))
d_loss = d_loss_real + d_loss_fake
return d_loss, g_loss
def model_opt(d_loss, g_loss, learning_rate, beta1):
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
# Get weights and bias to update
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
# Optimize
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
class GAN:
def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5):
tf.reset_default_graph()
self.input_real, self.input_z = model_inputs(real_size, z_size)
self.d_loss, self.g_loss = model_loss(self.input_real, self.input_z,
real_size[2], alpha=alpha)
self.d_opt, self.g_opt = model_opt(self.d_loss, self.g_loss, learning_rate, beta1)
def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)):
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.axis('off')
img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8)
ax.set_adjustable('box-forced')
im = ax.imshow(img, aspect='equal')
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)):
saver = tf.train.Saver()
sample_z = np.random.uniform(-1, 1, size=(72, z_size))
samples, losses = [], []
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in dataset.batches(batch_size):
steps += 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z})
_ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x})
if steps % print_every == 0:
# At the end of each epoch, get the losses and print them out
train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x})
train_loss_g = net.g_loss.eval({net.input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
if steps % show_every == 0:
gen_samples = sess.run(
generator(net.input_z, 3, reuse=True, training=False),
feed_dict={net.input_z: sample_z})
samples.append(gen_samples)
_ = view_samples(-1, samples, 6, 12, figsize=figsize)
plt.show()
saver.save(sess, './checkpoints/generator.ckpt')
with open('samples.pkl', 'wb') as f:
pkl.dump(samples, f)
return losses, samples
real_size = (32,32,3)
z_size = 100
learning_rate = 0.0002
batch_size = 128
epochs = 25
alpha = 0.2
beta1 = 0.5
# Create the network
net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1)
dataset = Dataset(trainset, testset)
losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5))
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
_ = view_samples(-1, samples, 6, 12, figsize=(10,5))
_ = view_samples(-1, samples, 6, 12, figsize=(10,5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Campaign Example
Step2: Identity Example
Step3: Indicator Example
Step4: If the patterns were identical the result would have been 100.
Step5: Malware Example
Step6: Threat Actor Example
Step7: Tool Example
Step8: Vulnerability Example
Step9: Other Examples
Step10: Some object types do not have a defined method for calculating semantic similarity and by default will give a warning and a result of zero.
Step11: By default, comparing objects of different spec versions will result in a ValueError.
Step12: You can optionally allow comparing across spec versions by providing a configuration dictionary using ignore_spec_version like in the next example
Step13: Detailed Results
Step14: You can also retrieve the detailed results in a dictionary so the detailed results information can be accessed and used more programatically. The object_similarity() function takes an optional third argument, called prop_scores. This argument should be a dictionary into which the detailed debugging information will be stored.
Step15: Custom Comparisons
Step16: Notice how there is a difference in the semantic similarity scores, simply due to the fact that custom weights were used.
Step17: Custom Semantic Similarity Functions
Step18: You can also customize the comparison of an entire object type instead of just how each property is compared. To do this, provide a weights dictionary to object_similarity() and in this dictionary include a key of "method" whose value is your custom semantic similarity function for that object type.
Step19: You can also write custom functions for comparing objects of your own custom types. Like in the previous example, you can use the built-in functions listed above to help with this, or write your own. In the following example we define semantic similarity for our new x-foobar object type. Notice that this time we have included support for detailed results with prop_scores.
Step20: Checking Graph Similarity and Equivalence
|
<ASSISTANT_TASK:>
Python Code:
import stix2
from stix2 import AttackPattern, Environment, MemoryStore
env = Environment(store=MemoryStore())
ap1 = AttackPattern(
name="Phishing",
external_references=[
{
"url": "https://example2",
"source_name": "some-source2",
},
],
)
ap2 = AttackPattern(
name="Spear phishing",
external_references=[
{
"url": "https://example2",
"source_name": "some-source2",
},
],
)
print(env.object_similarity(ap1, ap2))
print(env.object_equivalence(ap1, ap2, threshold=90))
from stix2 import Campaign
c1 = Campaign(
name="Someone Attacks Somebody",)
c2 = Campaign(
name="Another Campaign",)
print(env.object_similarity(c1, c2))
print(env.object_equivalence(c1, c2, threshold=90))
from stix2 import Identity
id1 = Identity(
name="John Smith",
identity_class="individual",
description="Just some guy",
)
id2 = Identity(
name="John Smith",
identity_class="individual",
description="A person",
)
print(env.object_similarity(id1, id2))
print(env.object_equivalence(id1, id2, threshold=90))
from stix2.v21 import Indicator
ind1 = Indicator(
indicator_types=['malicious-activity'],
pattern_type="stix",
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
valid_from="2017-01-01T12:34:56Z",
)
ind2 = Indicator(
indicator_types=['malicious-activity'],
pattern_type="stix",
pattern="[file:hashes.MD5 = '79054025255fb1a26e4bc422aef54eb4']",
valid_from="2017-01-01T12:34:56Z",
)
print(env.object_similarity(ind1, ind2))
print(env.object_equivalence(ind1, ind2, threshold=90))
from stix2 import Location
loc1 = Location(
latitude=38.889,
longitude=-77.023,
)
loc2 = Location(
latitude=40.713,
longitude=-74.006,
)
print(env.object_similarity(loc1, loc2))
print(env.object_equivalence(loc1, loc2, threshold=90))
from stix2 import Malware
MALWARE_ID = "malware--9c4638ec-f1de-4ddb-abf4-1b760417654e"
mal1 = Malware(id=MALWARE_ID,
malware_types=['ransomware'],
name="Cryptolocker",
is_family=False,
)
mal2 = Malware(id=MALWARE_ID,
malware_types=['ransomware', 'dropper'],
name="Cryptolocker",
is_family=False,
)
print(env.object_similarity(mal1, mal2))
print(env.object_equivalence(mal1, mal2, threshold=90))
from stix2 import ThreatActor
THREAT_ACTOR_ID = "threat-actor--8e2e2d2b-17d4-4cbf-938f-98ee46b3cd3f"
ta1 = ThreatActor(id=THREAT_ACTOR_ID,
threat_actor_types=["crime-syndicate"],
name="Evil Org",
aliases=["super-evil"],
)
ta2 = ThreatActor(id=THREAT_ACTOR_ID,
threat_actor_types=["spy"],
name="James Bond",
aliases=["007"],
)
print(env.object_similarity(ta1, ta2))
print(env.object_equivalence(ta1, ta2, threshold=90))
from stix2 import Tool
t1 = Tool(
tool_types=["remote-access"],
name="VNC",
)
t2 = Tool(
tool_types=["remote-access"],
name="VNC",
description="This is a tool"
)
print(env.object_similarity(t1, t2))
print(env.object_equivalence(t1, t2, threshold=90))
from stix2 import Vulnerability
vuln1 = Vulnerability(
name="Heartbleed",
external_references=[
{
"url": "https://example",
"source_name": "some-source",
},
],
)
vuln2 = Vulnerability(
name="Heartbleed",
)
print(env.object_similarity(vuln1, vuln2))
print(env.object_equivalence(vuln1, vuln2, threshold=90))
print(env.object_similarity(ind1, vuln1))
from stix2 import Report
r1 = Report(
report_types=["campaign"],
name="Bad Cybercrime",
published="2016-04-06T20:03:00.000Z",
object_refs=["indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7"],
)
r2 = Report(
report_types=["campaign"],
name="Bad Cybercrime",
published="2016-04-06T20:03:00.000Z",
object_refs=["indicator--a740531e-63ff-4e49-a9e1-a0a3eed0e3e7"],
)
print(env.object_similarity(r1, r2))
from stix2.v20 import Identity as Identity20
id20 = Identity20(
name="John Smith",
identity_class="individual",
)
print(env.object_similarity(id2, id20))
from stix2.v20 import Identity as Identity20
id20 = Identity20(
name="John Smith",
identity_class="individual",
)
print(env.object_similarity(id2, id20, **{"_internal": {"ignore_spec_version": True}}))
import logging
logging.basicConfig(format='%(message)s')
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ta3 = ThreatActor(
threat_actor_types=["crime-syndicate"],
name="Evil Org",
aliases=["super-evil"],
)
ta4 = ThreatActor(
threat_actor_types=["spy"],
name="James Bond",
aliases=["007"],
)
print(env.object_similarity(ta3, ta4))
logger.setLevel(logging.ERROR)
ta5 = ThreatActor(
threat_actor_types=["crime-syndicate", "spy"],
name="Evil Org",
aliases=["super-evil"],
)
ta6 = ThreatActor(
threat_actor_types=["spy"],
name="James Bond",
aliases=["007"],
)
prop_scores = {}
print("Semantic equivalence score using standard weights: %s" % (env.object_similarity(ta5, ta6, prop_scores)))
print(prop_scores)
for prop in prop_scores:
if prop not in ["matching_score", "sum_weights"]:
print ("Prop: %s | weight: %s | contributing_score: %s" % (prop, prop_scores[prop]['weight'], prop_scores[prop]['contributing_score']))
else:
print ("%s: %s" % (prop, prop_scores[prop]))
weights = {
"threat-actor": { # You must specify the object type
"name": (30, stix2.equivalence.object.partial_string_based), # Each property's value must be a tuple
"threat_actor_types": (50, stix2.equivalence.object.partial_list_based), # The 1st component must be the weight
"aliases": (20, stix2.equivalence.object.partial_list_based) # The 2nd component must be the comparison function
}
}
print("Using standard weights: %s" % (env.object_similarity(ta5, ta6)))
print("Using custom weights: %s" % (env.object_similarity(ta5, ta6, **weights)))
prop_scores = {}
weights = {
"threat-actor": {
"name": (45, stix2.equivalence.object.partial_string_based),
"threat_actor_types": (10, stix2.equivalence.object.partial_list_based),
"aliases": (45, stix2.equivalence.object.partial_list_based),
},
}
env.object_similarity(ta5, ta6, prop_scores, **weights)
print(prop_scores)
def my_string_compare(p1, p2):
if p1 == p2:
return 1
else:
return 0
weights = {
"threat-actor": {
"name": (45, my_string_compare),
"threat_actor_types": (10, stix2.equivalence.object.partial_list_based),
"aliases": (45, stix2.equivalence.object.partial_list_based),
},
}
print("Using custom string comparison: %s" % (env.object_similarity(ta5, ta6, **weights)))
def custom_semantic_similarity_method(obj1, obj2, **weights):
sum_weights = 0
matching_score = 0
# Compare name
w = weights['name']
sum_weights += w
contributing_score = w * stix2.equivalence.object.partial_string_based(obj1['name'], obj2['name'])
matching_score += contributing_score
# Compare aliases only for spies
if 'spy' in obj1['threat_actor_types'] + obj2['threat_actor_types']:
w = weights['aliases']
sum_weights += w
contributing_score = w * stix2.equivalence.object.partial_list_based(obj1['aliases'], obj2['aliases'])
matching_score += contributing_score
return matching_score, sum_weights
weights = {
"threat-actor": {
"name": 60,
"aliases": 40,
"method": custom_semantic_similarity_method
}
}
print("Using standard weights: %s" % (env.object_similarity(ta5, ta6)))
print("Using a custom method: %s" % (env.object_similarity(ta5, ta6, **weights)))
def _x_foobar_checks(obj1, obj2, prop_scores, **weights):
matching_score = 0.0
sum_weights = 0.0
if stix2.equivalence.object.check_property_present("name", obj1, obj2):
w = weights["name"]
sum_weights += w
contributing_score = w * stix2.equivalence.object.partial_string_based(obj1["name"], obj2["name"])
matching_score += contributing_score
prop_scores["name"] = (w, contributing_score)
if stix2.equivalence.object.check_property_present("color", obj1, obj2):
w = weights["color"]
sum_weights += w
contributing_score = w * stix2.equivalence.object.partial_string_based(obj1["color"], obj2["color"])
matching_score += contributing_score
prop_scores["color"] = (w, contributing_score)
prop_scores["matching_score"] = matching_score
prop_scores["sum_weights"] = sum_weights
return matching_score, sum_weights
prop_scores = {}
weights = {
"x-foobar": {
"name": 60,
"color": 40,
"method": _x_foobar_checks,
},
"_internal": {
"ignore_spec_version": False,
},
}
foo1 = {
"type":"x-foobar",
"id":"x-foobar--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
"name": "Zot",
"color": "red",
}
foo2 = {
"type":"x-foobar",
"id":"x-foobar--0c7b5b88-8ff7-4a4d-aa9d-feb398cd0061",
"name": "Zot",
"color": "blue",
}
print(env.object_similarity(foo1, foo2, prop_scores, **weights))
print(prop_scores)
import json
from stix2 import Relationship
g1 = [
AttackPattern(
name="Phishing",
external_references=[
{
"url": "https://example2",
"source_name": "some-source2",
},
],
),
Campaign(name="Someone Attacks Somebody"),
Identity(
name="John Smith",
identity_class="individual",
description="Just some guy",
),
Indicator(
indicator_types=['malicious-activity'],
pattern_type="stix",
pattern="[file:hashes.MD5 = 'd41d8cd98f00b204e9800998ecf8427e']",
valid_from="2017-01-01T12:34:56Z",
),
Malware(id=MALWARE_ID,
malware_types=['ransomware'],
name="Cryptolocker",
is_family=False,
),
ThreatActor(id=THREAT_ACTOR_ID,
threat_actor_types=["crime-syndicate"],
name="Evil Org",
aliases=["super-evil"],
),
Relationship(
source_ref=THREAT_ACTOR_ID,
target_ref=MALWARE_ID,
relationship_type="uses",
),
Report(
report_types=["campaign"],
name="Bad Cybercrime",
published="2016-04-06T20:03:00.000Z",
object_refs=[THREAT_ACTOR_ID, MALWARE_ID],
),
]
g2 = [
AttackPattern(
name="Spear phishing",
external_references=[
{
"url": "https://example2",
"source_name": "some-source2",
},
],
),
Campaign(name="Another Campaign"),
Identity(
name="John Smith",
identity_class="individual",
description="A person",
),
Indicator(
indicator_types=['malicious-activity'],
pattern_type="stix",
pattern="[file:hashes.MD5 = '79054025255fb1a26e4bc422aef54eb4']",
valid_from="2017-01-01T12:34:56Z",
),
Malware(id=MALWARE_ID,
malware_types=['ransomware', 'dropper'],
name="Cryptolocker",
is_family=False,
),
ThreatActor(id=THREAT_ACTOR_ID,
threat_actor_types=["spy"],
name="James Bond",
aliases=["007"],
),
Relationship(
source_ref=THREAT_ACTOR_ID,
target_ref=MALWARE_ID,
relationship_type="uses",
),
Report(
report_types=["campaign"],
name="Bad Cybercrime",
published="2016-04-06T20:03:00.000Z",
object_refs=[THREAT_ACTOR_ID, MALWARE_ID],
),
]
memstore1 = MemoryStore(g1)
memstore2 = MemoryStore(g2)
prop_scores = {}
similarity_result = env.graph_similarity(memstore1, memstore2, prop_scores)
equivalence_result = env.graph_equivalence(memstore1, memstore2, threshold=60)
print(similarity_result)
print(equivalence_result)
print(json.dumps(prop_scores, indent=4, sort_keys=False))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: There are two other keyboard shortcuts for running code
Step2: If the Kernel dies you will be prompted to restart it. Here we call the low-level system libc.time routine with the wrong argument via
Step3: Cell menu
Step4: Output is asynchronous
Step5: Large outputs
Step6: Beyond a certain point, output will scroll automatically
|
<ASSISTANT_TASK:>
Python Code:
a = 10
print(a)
import time
time.sleep(10)
import sys
from ctypes import CDLL
# This will crash a Linux or Mac system
# equivalent calls can be made on Windows
# Uncomment these lines if you would like to see the segfault
# dll = 'dylib' if sys.platform == 'darwin' else 'so.6'
# libc = CDLL("libc.%s" % dll)
# libc.time(-1) # BOOM!!
print("hi, stdout")
from __future__ import print_function
print('hi, stderr', file=sys.stderr)
import time, sys
for i in range(8):
print(i)
time.sleep(0.5)
for i in range(50):
print(i)
for i in range(500):
print(2**i - 1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'codes':[[71020], [77085], [36415], [99213, 99287], [99234, 99233, 99233]]})
def g(df):
for i in df.index:
df.loc[i, 'codes'] = sorted(df.loc[i, 'codes'])
df = df.codes.apply(pd.Series)
cols = list(df)
for i in range(len(cols)):
cols[i]+=1
df.columns = cols
return df.add_prefix('code_')
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pick two isotopes, H-1 and Fe-56 and check total production
Step2: Note
Step3: SNII and SNIa
|
<ASSISTANT_TASK:>
Python Code:
#from imp import *
#s=load_source('sygma','/home/nugrid/nugrid/SYGMA/SYGMA_online/SYGMA_dev/sygma.py')
%pylab nbagg
import sygma as s
reload(s)
print s.__file__
#import matplotlib
#matplotlib.use('nbagg')
#import matplotlib.pyplot as plt
#matplotlib.use('nbagg')
#import numpy as np
from scipy.integrate import quad
from scipy.interpolate import UnivariateSpline
import os
s1=s.sygma(mgal=1e11,iniZ=0.02,yield_interp='None',imf_type='salpeter',table='yield_tables/isotope_yield_table.txt',sn1a_on=False)
Yield_tot_sim_h1=s1.history.ism_iso_yield[-1][0] #get total final H-1
Yield_tot_sim_fe56=s1.history.ism_iso_yield[-1][60] #get total final H-1
print s1.history.isotopes[0],Yield_tot_sim_h1
print s1.history.isotopes[60],Yield_tot_sim_fe56
import read_yields as ry
path = os.environ['SYGMADIR']+'/yield_tables/isotope_yield_table.txt'
ytables = ry.read_nugrid_yields(path,excludemass=[32,60])
print 'total IMF range: ',s1.imf_bdys
print 'yield IMF range: ',s1.imf_mass_ranges,
masses=[1,1.65,2,3,4,5,6,7,15,20,25] #should be conform with imf_mass_ranges
k_N=1e11*0.35/ (0.1**-0.35 - 100**-0.35) #(I)
k=-1
ytot_h1=0
ytot_fe56=0
for mrange in s1.imf_mass_ranges:
k=k+1
N_range=k_N/1.35 * (mrange[0]**-1.35 - mrange[1]**-1.35) #(II)
y_h1=ytables.get(M=masses[k],Z=0.02,specie='H-1')
y_fe56=ytables.get(M=masses[k],Z=0.02,specie='Fe-56')
ytot_h1 = ytot_h1 + y_h1*N_range
ytot_fe56 = ytot_fe56 + y_fe56*N_range
print 'H-1, should be 1', ytot_h1/Yield_tot_sim_h1
print 'Fe-56, should be 1', ytot_fe56/Yield_tot_sim_fe56
print len(s1.history.t_m_bdys)
print len(s1.history.timesteps)
print s1.history.t_m_bdys
s7=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1e9,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=-1,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=True, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn',pop3_table='yield_tables/popIII_h1.txt')
s8=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1e9,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=-1,iniZ=0.0001)
s7.plot_sn_distr(marker1='o',color1='b',marker2='s',markevery=1)
s8.plot_sn_distr(marker1='d',marker2='x',color2='r',markevery=1)
s8=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1e9,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=200,iniZ=0.0001)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Once you have calculated what you think x, y and z are, add print statements to the code above and run it to check. Make sure you understand the results you find.
Step2: Once you have calculated what you think x, y and z are, add print statements to the code above and run it to check.
Step3: Given a sphere of volume 14,137cm3 calculate its radius to the closest whole number (this will involve rearranging the formula and you may need to good how to find a cube root with python)
Step4: 2.1 Working with Conditions
Step5: 2.2 Flow Control
Step6: 3.1 It's All Quite Logical
Step7: Assume that the code executes for a given value of x and answer the following questions. Only test the code AFTER you have worked out the answers and typed them below.
Step8: 3.3 Nested Conditions
Step9: Note how the indentation makes it easier to work out which ‘level’ the code is operating on. In the code above, lines 4 and 7 are at the same indentation meaning that both will be skipped if the initial condition (on line 1) is False.
Step10: 4.1. Alterative Execution
Step11: 4.2. Conditional Execution
Step12: 4.3. Nested Execution
Step13: 4.4a Bonus! Managing Errors
Step14: 4.4b Bonus! Managing Errors
|
<ASSISTANT_TASK:>
Python Code:
x = 12
y = 10
z = x + y
x = x + y
y = z + y
x = 1 + 2 * 3 / 4
y = (1 + 2) * (3 / 4)
z = 1 + 2 * (3 / 4)
print(x)
print(y)
print(z)
pi = 3.141592653589793
r = 12 / 2
vol = (4/3) * pi * (r**3)
print(vol)
vol=14137
r3 = vol / ((4/3) * pi)
r = r3**(1/3)
print(round(r))
hours=10
if hours > 0:
print(" ")
hours=2
if hours > 0:
print("Hours are greater than zero")
if hours < 10:
print("Hours are less than 10")
a = 1
b = 2
x = a
if x == b:
print("inside 1")
print("inside 2")
print("inside 3")
print("after condition")
x = 1
if x > 0 and x < 5:
print("Joe")
if x > 0 or x < 5:
print("Aled")
if not(x > 0):
print("Sarah")
for x in range(0,9):
# ... do something ...
for x in range(0,9):
print(x)
if(x%2 == 0):
print("x is even")
else:
print("x is odd")
print(x)
if x != y: #line 1
print("x is not equal to y")
if(x > y): #line 4
print("x is greater than y")
else: #line 7
print("x is less than y")
else:
print("insert conclusion here")
x = 2
y = 3
if x != y: #line 1
print("x is not equal to y")
if(x > y): #line 4
print("x is greater than y")
else: #line 7
print("x is less than y")
else:
print("x is equal to y")
ageA = 20
ageB = 19
if ageA > ageB:
print("Person A is older than Person B")
else:
print("Person A is not older than Person B")
ageA = 20
ageB = 20
if ageA > ageB:
print("Person A is older than Person B")
elif ageA < ageB:
print("Person B is older than Person A")
else:
print("Person A and Person B are the same age")
ageA = 25
ageB = 30
largeD = 20
diff = ageA - ageB
if diff <= (largeD * -1):
print("Person A is much younger than Person B")
elif diff < 0 and diff > (largeD * -1):
print("Person A is a little younger than Person B")
elif diff == 0:
print("Person A is the same age as Person B")
elif diff >= (largeD * 1):
print("Person A is much older than Person B")
elif diff > 0 and diff < (largeD * 1):
print("Person A is a little older than Person B")
ageA = "five"
ageB = 30
largeD = 20
try:
diff = ageA - ageB
except:
print("Invalid Input")
else:
if diff <= (largeD * -1):
print("Person A is much younger than Person B")
elif diff < 0 and diff > (largeD * -1):
print("Person A is a little younger than Person B")
elif diff == 0:
print("Person A is the same age as Person B")
elif diff >= (largeD * 1):
print("Person A is much older than Person B")
elif diff > 0 and diff < (largeD * 1):
print("Person A is a little older than Person B")
#read more at https://docs.python.org/3/tutorial/errors.html#handling-exceptions
ageA = -1
ageB = 30
largeD = 20
diff = ageA - ageB
if ageA < 0 or ageB < 0:
raise Exception("Invalid Age")
else:
if diff <= (largeD * -1):
print("Person A is much younger than Person B")
elif diff < 0 and diff > (largeD * -1):
print("Person A is a little younger than Person B")
elif diff == 0:
print("Person A is the same age as Person B")
elif diff >= (largeD * 1):
print("Person A is much older than Person B")
elif diff > 0 and diff < (largeD * 1):
print("Person A is a little older than Person B")
#read more at https://docs.python.org/3/tutorial/errors.html#raising-exceptions
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 마크다운으로 메모 작성하기...!
|
<ASSISTANT_TASK:>
Python Code:
sess = tf.Session()
print(str(sess.run(hello),encoding = "utf-8"))
# print(sess.run(hello))
sess.close()
a = tf.constant(1234, dtype=tf.float32)
b = tf.constant(5000, dtype=tf.float32)
print(a)
print(b)
add_op = a + b
print(add_op)
with tf.Session() as sess:
print(sess.run(add_op))
add_op2 = tf.add(a,b)
with tf.Session() as sess:
print(sess.run(add_op2))
%matplotlib inline
import matplotlib.pyplot as plt
plt.hist([1,2,3])
plt.show()
import numpy as np
x = np.arange(-20,20,0.1)
y = np.sin(x)
plt.plot(x,y)
a = tf.constant(100)
b = tf.constant(50)
add_op = a + b
v = tf.Variable(0)
let_op = tf.assign(v, add_op)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
_, v_val = sess.run([let_op,v])
print(v_val)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Geometry
Step2: Event data
Step3: Save for later
Step4: With old data
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.collections
import geopandas as gpd
import open_cp.network
import open_cp.sources.chicago
import open_cp.geometry
#data_path = os.path.join("/media", "disk", "Data")
data_path = os.path.join("..", "..", "..", "..", "..", "..", "Data")
open_cp.sources.chicago.set_data_directory(data_path)
#tiger_path = os.path.join("/media", "disk", "TIGER Data")
tiger_path = os.path.join("..", "..", "..", "..", "..", "..", "Data", "TIGER Data")
filename = os.path.join(tiger_path, "tl_2016_17031_roads")
tiger_frame = gpd.GeoDataFrame.from_file(filename)
chicago = tiger_frame.to_crs({"init":"epsg:3528"})
chicago.head()
south_side = open_cp.sources.chicago.get_side("South")
mask = chicago.geometry.map(lambda x : x.intersects(south_side))
frame = chicago[mask]
frame.head()
all_nodes = []
for geo in frame.geometry:
for pt in geo.coords:
all_nodes.append(pt)
b = open_cp.network.PlanarGraphNodeOneShot(all_nodes)
for geo in frame.geometry:
path = list(geo.coords)
b.add_path(path)
b.remove_duplicate_edges()
graph = b.build()
reduced = open_cp.network.simple_reduce_graph(graph)
graph.number_edges, reduced.number_edges
filename = open_cp.sources.chicago.get_default_filename()
timed_points = open_cp.sources.chicago.load(filename, ["BURGLARY"])
timed_points.number_data_points
timed_points = open_cp.geometry.intersect_timed_points(timed_points, south_side)
timed_points.number_data_points
fig, ax = plt.subplots(figsize=(12,12))
lc = matplotlib.collections.LineCollection(graph.as_lines(), color="black", linewidth=0.5)
ax.add_collection(lc)
ax.scatter(timed_points.xcoords, timed_points.ycoords)
xmin, ymin, xmax, ymax = *timed_points.bounding_box.min, *timed_points.bounding_box.max
xd, yd = xmax - xmin, ymax - ymin
ax.set(xlim=(xmin-xd/20, xmax+xd/20), ylim=(ymin-yd/20, ymax+yd/20))
None
fig, axes = plt.subplots(ncols=2, figsize=(18,8))
for ax in axes:
lc = matplotlib.collections.LineCollection(graph.as_lines(), color="black", linewidth=0.5)
ax.add_collection(lc)
ax.scatter(timed_points.xcoords, timed_points.ycoords)
axes[0].set(xlim=[358000, 360000], ylim=[570000, 572000])
axes[1].set(xlim=[362000, 364000], ylim=[565000, 567000])
import pickle, lzma
with lzma.open("input.pic.xz", "wb") as f:
pickle.dump(timed_points, f)
with open("input.graph", "wb") as f:
f.write(graph.dump_bytes())
filename = os.path.join(data_path, "chicago_all_old.csv")
timed_points = open_cp.sources.chicago.load(filename, ["BURGLARY"], type="all")
timed_points.number_data_points
timed_points = open_cp.geometry.intersect_timed_points(timed_points, south_side)
timed_points.number_data_points
with lzma.open("input_old.pic.xz", "wb") as f:
pickle.dump(timed_points, f)
with open("input_old.graph", "wb") as f:
f.write(graph.dump_bytes())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'besm-2-7', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What was the most popular type of complaint, and how many times was it filed?
Step2: Make a horizontal bar graph of the top 5 most frequent complaint types.
Step3: Which borough has the most complaints per capita? Since it's only 5 boroughs, you can do the math manually.
Step4: According to your selection of data, how many cases were filed in March? How about May?
Step5: I'd like to see all of the 311 complaints called in on April 1st.
Step6: What was the most popular type of complaint on April 1st?
Step7: What were the most popular three types of complaint on April 1st
Step8: What month has the most reports filed? How many? Graph it.
Step9: What week of the year has the most reports filed? How many? Graph the weekly complaints.
Step10: Noise complaints are a big deal. Use .str.contains to select noise complaints, and make an chart of when they show up annually. Then make a chart about when they show up every day (cyclic).
Step11: Which were the top five days of the year for filing complaints? How many on each of those days? Graph it.
Step12: What hour of the day are the most complaints? Graph a day of complaints.
Step13: One of the hours has an odd number of complaints. What are the most common complaints at that hour, and what are the most common complaints the hour before and after?
Step14: So odd. What's the per-minute breakdown of complaints between 12am and 1am? You don't need to include 1am.
Step15: Looks like midnight is a little bit of an outlier. Why might that be? Take the 5 most common agencies and graph the times they file reports at (all day, not just midnight).
Step16: Graph those same agencies on an annual basis - make it weekly. When do people like to complain? When does the NYPD have an odd number of complaints?
Step17: Maybe the NYPD deals with different issues at different times? Check the most popular complaints in July and August vs the month of May. Also check the most common complaints for the Housing Preservation Bureau (HPD) in winter vs. summer.
|
<ASSISTANT_TASK:>
Python Code:
df=pd.read_csv("311-2014.csv", nrows=200000)
dateutil.parser.parse(df['Created Date'][0])
def parse_date(str_date):
return dateutil.parser.parse(str_date)
df['created_datetime']=df['Created Date'].apply(parse_date)
df.index=df['created_datetime']
df['Complaint Type'].describe()
df.groupby(by='Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5).plot(kind='barh').invert_yaxis()
df.groupby(by='Borough')['Borough'].count()
boro_pop={
'BRONX': 1438159,
'BROOKLYN': 2621793,
'MANHATTAN': 1636268,
'QUEENS': 2321580,
'STATEN ISLAND': 473279}
boro_df=pd.Series.to_frame(df.groupby(by='Borough')['Borough'].count())
boro_df['Population']=pd.DataFrame.from_dict(boro_pop, orient='index')
boro_df['Complaints']=boro_df['Borough']
boro_df.drop('Borough', axis=1, inplace=True)
boro_df['Per Capita']=boro_df['Complaints']/boro_df['Population']
boro_df['Per Capita'].plot(kind='bar')
df['2015-03']['Created Date'].count()
df['2015-05']['Created Date'].count()
df['2015-04-01']
df['2015-04-01'].groupby(by='Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(1)
df['2015-04-01'].groupby(by='Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(3)
df.resample('M')['Unique Key'].count().sort_values(ascending=False)
df.resample('M').count().plot(y='Unique Key')
df.resample('W')['Unique Key'].count().sort_values(ascending=False).head(5)
df.resample('W').count().plot(y='Unique Key')
noise_df=df[df['Complaint Type'].str.contains('Noise')]
noise_df.resample('M').count().plot(y='Unique Key')
noise_df.groupby(by=noise_df.index.hour).count().plot(y='Unique Key')
df.resample('D')['Unique Key'].count().sort_values(ascending=False).head(5)
df.resample('D')['Unique Key'].count().sort_values().tail(5).plot(kind='barh')
df['Unique Key'].groupby(by=df.index.hour).count().sort_values(ascending=False)
df['Unique Key'].groupby(df.index.hour).count().plot()
df[df.index.hour==0].groupby(by='Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
df[df.index.hour==1].groupby(by='Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
df[df.index.hour==11].groupby(by='Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
midnight_df = df[df.index.hour==0]
midnight_df.groupby(midnight_df.index.minute)['Unique Key'].count().sort_values(ascending=False)
df.groupby('Agency')['Unique Key'].count().sort_values(ascending=False).head(5)
ax=df[df['Agency']=='NYPD'].groupby(df[df['Agency']=='NYPD'].index.hour)['Unique Key'].count().plot(legend=True, label='NYPD')
df[df['Agency']=='HPD'].groupby(df[df['Agency']=='HPD'].index.hour)['Unique Key'].count().plot(ax=ax, legend=True, label='HPD')
df[df['Agency']=='DOT'].groupby(df[df['Agency']=='DOT'].index.hour)['Unique Key'].count().plot(ax=ax, legend=True, label='DOT')
df[df['Agency']=='DPR'].groupby(df[df['Agency']=='DPR'].index.hour)['Unique Key'].count().plot(ax=ax, legend=True, label='DPR')
df[df['Agency']=='DOHMH'].groupby(df[df['Agency']=='DOHMH'].index.hour)['Unique Key'].count().plot(ax=ax, legend=True, label='DOHMH')
ax=df[df['Agency']=='NYPD'].groupby(df[df['Agency']=='NYPD'].index.week)['Unique Key'].count().plot(legend=True, label='NYPD')
df[df['Agency']=='HPD'].groupby(df[df['Agency']=='HPD'].index.week)['Unique Key'].count().plot(ax=ax, legend=True, label='HPD')
df[df['Agency']=='DOT'].groupby(df[df['Agency']=='DOT'].index.week)['Unique Key'].count().plot(ax=ax, legend=True, label='DOT')
df[df['Agency']=='DPR'].groupby(df[df['Agency']=='DPR'].index.week)['Unique Key'].count().plot(ax=ax, legend=True, label='DPR')
df[df['Agency']=='DOHMH'].groupby(df[df['Agency']=='DOHMH'].index.week)['Unique Key'].count().plot(ax=ax, legend=True, label='DOHMH')
nypd=df[df['Agency']=='NYPD']
nypd[(nypd.index.month==7) | (nypd.index.month==8)].groupby('Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
nypd[nypd.index.month==5].groupby('Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
# seems like mostly noise complaints and bad parking to me
hpd=df[df['Agency']=='HPD']
hpd[(hpd.index.month>=6) & (hpd.index.month<=8)].groupby('Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
# i would consider summer to be june to august.
hpd[(hpd.index.month==12) | (hpd.index.month<=2)].groupby('Complaint Type')['Complaint Type'].count().sort_values(ascending=False).head(5)
# pretty similar list, but people probably notice a draft from their bad window or door in the winter more easily than summer
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Carregando dados de IDH-M da Wikipedia
Step2: Análise
Step3: Testando hipótese
Step4: A resposta de diversos testes, para um nível de 5% de significância, mostra que há fortes evidências que sim.
Step5: Impacto por partido ou nível do IDH-M em 2000
Step6: Não foi possível observar diferença significantiva entre os partidos.
Step7: GEE
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import requests as req
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.stats import ttest_ind, ttest_rel
from scipy.stats import gaussian_kde
from statsmodels.formula.api import ols, mixedlm, gee
from statsmodels.stats.outliers_influence import OLSInfluence
from statsmodels.regression.linear_model import OLSResults
from patsy import dmatrix
np.set_printoptions(precision=3)
idhm_df = pd.read_csv("../data/brazil_states_idh_2000_2010.csv", index_col=0)
idhm_df
idhm_df.describe()
f = plt.figure(14)
idhm_df[["I2000","I2010","Ratio"]].hist(bins=10)
plt.figure()
sns.kdeplot(idhm_df["I2000"], shade=True);
sns.kdeplot(idhm_df["I2010"], shade=True);
sns.kdeplot(idhm_df["Ratio"], shade=True);
ttest_rel(idhm_df['I2000'], idhm_df['I2010'])
import scipy
import scikits.bootstrap as bootstrap
# compute 95% confidence intervals around the mean
CIs00 = bootstrap.ci(data=idhm_df["I2000"])
CIs10 = bootstrap.ci(data=idhm_df["I2010"])
CIsR = bootstrap.ci(data=idhm_df["Ratio"])
print("IDHM 2000 mean 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs00)))
print("IDHM 2010 mean 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs10)))
print("IDHM ratio mean 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIsR)))
CIs00 = bootstrap.ci(data=idhm_df["I2000"], statfunction=scipy.median)
CIs10 = bootstrap.ci(data=idhm_df["I2010"], statfunction=scipy.median)
CIsR = bootstrap.ci(data=idhm_df["Ratio"], statfunction=scipy.median)
print("IDHM 2000 median 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs00)))
print("IDHM 2010 median 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIs10)))
print("IDHM ratio median 95% confidence interval. Low={0:.3f}\tHigh={1:.3f}".format(*tuple(CIsR)))
state_parties_df = pd.read_csv("../data/brazil_states_parties_2000-2010.csv", index_col=0)
state_parties_df
state_regions_df = pd.read_csv("../data/brazil_states_regions.csv", index_col=0)
state_regions_df
df = idhm_df.merge(state_parties_df, on="Estado")
df = df.merge(state_regions_df, on="Estado")
df
sns.factorplot("idh_level_2000","Ratio",data=df, kind="box")
sns.factorplot("Regiao","Ratio",data=df, kind="box")
sns.set()
sns.pairplot(df, hue="idh_level_2000", size=2.5)
sns.coefplot("Ratio ~ PT + PSDB + Outros + C(idh_level_2000) - 1", df, palette="Set1");
sns.coefplot("Ratio ~ Outros==0 + Outros - 1", df, palette="Set1");
sns.set(style="whitegrid")
sns.residplot(df.Outros,df.Ratio, color="navy", lowess=True, order=1)
sns.coefplot("Ratio ~ PT==0 + PT - 1", df, palette="Set1");
sns.set(style="whitegrid")
sns.residplot(df[df.PT>0].PT, df[df.PT>0].Ratio, color="navy", order=1)
sns.coefplot("Ratio ~ PSDB==0 + PSDB + np.multiply(PSDB, PSDB) - 1", df, palette="Set1");
sns.set(style="whitegrid")
sns.residplot(df[df.PSDB>0].PSDB, df[df.PSDB>0].Ratio, color="navy", lowess=True, order=2)
sns.coefplot("Ratio ~ PT + PSDB + Outros + C(idh_level_2000) - 1", df, palette="Set1");
sns.coefplot("Ratio ~ PT + PSDB + C(idh_level_2000)", df, palette="Set1");
sns.coefplot("Ratio ~ PT + Outros + C(idh_level_2000)", df, palette="Set1");
sns.coefplot("Ratio ~ PSDB + Outros + C(idh_level_2000)", df, palette="Set1");
formula = "Ratio ~ PT + PSDB + C(idh_level_2000) + C(Regiao)"
model = ols(formula, df).fit()
model.summary()
sns.lmplot("I2000", "I2010", data=df, legend=True, size=10, n_boot=10000, ci=95)
sns.jointplot("I2000", "I2010", data=df, kind='resid',color=sns.color_palette()[2], size=10)
sns.coefplot("I2010 ~ I2000", data=df, intercept=True)
sns.coefplot("I2010 ~ I2000", data=df, groupby="idh_level_2000", intercept=True)
sns.lmplot("I2000", "I2010", data=df, hue="idh_level_2000", col="idh_level_2000", legend=True, size=6, n_boot=10000, ci=99)
sns.lmplot("I2000", "I2010", data=df, hue="Regiao", col="Regiao", col_wrap=2, legend=True, size=6, n_boot=10000, ci=99)
md = ols("I2010 ~ I2000 + C(Regiao)", df).fit()
print(md.summary())
rrr = md.get_robustcov_results()
rrp = rrr.outlier_test("fdr_bh", 0.1)
idx = rrp[rrp["fdr_bh(p)"] <= 0.1].index
print("Estados fora da média:\n",df.ix[idx.values])
rrp[rrp["fdr_bh(p)"] <= 0.1]
import statsmodels.api as sm
md = gee("Ratio ~ PT + PSDB ", df.idh_level_2000, df, cov_struct=sm.cov_struct.Exchangeable())
mdf = md.fit()
print(mdf.summary())
print(mdf.cov_struct.summary())
plt.plot(mdf.fittedvalues, mdf.resid, 'o', alpha=0.5)
plt.xlabel("Fitted values", size=17)
plt.ylabel("Residuals", size=17)
sns.jointplot(mdf.fittedvalues, mdf.resid, size=10, kind="kde")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2> Define an API calling function </h2>
Step2: <h2> Test the Sentiment Analysis </h2>
Step3: <h2>Use the Dataproc cluster to run a Spark job that uses the Machine Learning API </h2>
Step4: <h2> Working with the results in Python </h2>
Step5: <h2> Using another feature of the Natural Language API </h2>
Step6: <h2> Working with the results in Spark </h2>
Step 1
Step7: <h2> Save as text to Cloud Storage </h2>
|
<ASSISTANT_TASK:>
Python Code:
APIKEY="AIzaSyBQrrl4SZhE3QtxsnbjY2WTdgcBz0G0Rfs" # CHANGE
print APIKEY
PROJECT_ID = "qwiklabs-gcp-14067121d7b1d12c" # CHANGE
print PROJECT_ID
BUCKET = "qwiklabs-gcp-14067121d7b1d12c" # CHANGE
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT_ID
from googleapiclient.discovery import build
print("\n","Google Cloud API Client credentials established")
def SentimentAnalysis(text):
from googleapiclient.discovery import build
lservice = build('language', 'v1beta1', developerKey=APIKEY)
response = lservice.documents().analyzeSentiment(
body={
'document': {
'type': 'PLAIN_TEXT',
'content': text
}
}).execute()
return response
print("\n","Sentiment Analysis function defined.")
sampleline = u'There are places I remember, all my life though some have changed.'
results = SentimentAnalysis(sampleline)
print("\n","This is the Python object that is returned; a dictionary.")
print("\n")
print("Function returns :",type(results))
print(results)
import json
print("\n","This is the JSON formatted version of the object")
print(json.dumps(results, sort_keys=True, indent=4))
# Working with the smaller sample file
#
lines = sc.textFile("/sampledata/road-not-taken.txt")
#
# The Spark map transformation will execute SentimentAnalysis on each element in lines and store the result in sentiment.
# Remember that due to lazy execution, this line just queues up the transformation, it does not run yet.
# So you will not see errors at this point.
#
sentiment = lines.map(SentimentAnalysis)
#
#
print (type(sentiment))
# sentiment is a pyspark.rdd.PipelinedRDD
#
# If it is properly formed then an action such as sentiment.collect() will run the job.
# If not properly formed, it will throw errors.
#
output = sentiment.collect()
#
# The sentiment rdd contains JSON returns. In python these are collected into a list of dictionaries.
#
print(type(output))
print("\n")
for line in output:
print(line)
#
# Ouput is a list of dictionaries
# When the list is iterated, each line is one dictionary
# And the dictionary is double-subscripted
#
for line in output:
print("Score: ",line['documentSentiment']['score'], " Magnitude :",line['documentSentiment']['magnitude'])
def TailoredAnalysis(text):
from googleapiclient.discovery import build
lservice = build('language', 'v1beta1', developerKey=APIKEY)
response = lservice.documents().analyzeEntities(
body={
'document': {
'type': 'PLAIN_TEXT',
'content': text
}
}).execute()
return response
print("\n","Tailored Analysis function defined.")
# [STEP 1] HDFS
#lines = sc.textFile("/sampledata/road-not-taken.txt")
#
#
# [STEP 2] Cloud Storage
#lines = sc.textFile("gs://<your-bucket>/time-machine-P1.txt")
#lines = sc.textFile("gs://<your-bucket>/time-machine-P2.txt")
#lines = sc.textFile("gs://<your-bucket>/time-machine-P3.txt")
#lines = sc.textFile("gs://<your-bucket>/time-machine-P4.txt")
lines = sc.textFile("gs://qwiklabs-gcp-14067121d7b1d12c/time-machine-P1.txt")
#
#
#
entities = lines.map(TailoredAnalysis)
from operator import add
rdd = entities.map(lambda x: x['entities'])
#
# results = rdd.flatMap(lambda x: x ).filter(lambda x: x['type']==u'PERSON').map(lambda x:(x['name'],1)).reduceByKey(add)
#
# It is common practice to use line continuation "\" to help make the chain more readable
results = rdd.flatMap(lambda x: x )\
.filter(lambda x: x['type']==u'PERSON')\
.map(lambda x:(x['name'],1))\
.reduceByKey(add)
print(sorted(results.take(25)))
# [STEP 3] Cloud Storage
#lines = sc.textFile("gs://<your-bucket>/time-machine-P1.txt")
#lines = sc.textFile("gs://<your-bucket>/time-machine-P2.txt")
#lines = sc.textFile("gs://<your-bucket>/time-machine-P3.txt")
#lines = sc.textFile("gs://<your-bucket>/time-machine-P4.txt")
#
lines = sc.textFile("gs://qwiklabs-gcp-14067121d7b1d12c/time-machine-P2.txt")
#
entities = lines.map(TailoredAnalysis)
from operator import add
rdd = entities.map(lambda x: x['entities'])
#
# results = rdd.flatMap(lambda x: x ).filter(lambda x: x['type']==u'PERSON').map(lambda x:(x['name'],1)).reduceByKey(add)
#
# It is common practice to use line continuation "\" to help make the chain more readable
results = rdd.flatMap(lambda x: x )\
.filter(lambda x: x['type']==u'LOCATION')\
.map(lambda x:(x['name'],1))\
.reduceByKey(add)
print(sorted(results.take(25)))
# Replace with your bucket
#
results.repartition(1).saveAsTextFile("gs://qwiklabs-gcp-14067121d7b1d12c/sampleoutput/")
print("Output to Cloud Storage is complete.")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below are the lists I created that will help me narrow my search. I created the list called search because the key was only allowed to contain 3 lower case letters. Next I created a list of plain text english to help me filter out unwanted messages.
Step2: Next I create a function that will check if a line of text is plain english, by comparing its components with my english list above.
Step3: Now I begin the search for the key. This computation takes a minute or two because I am searching through all 17500 key possibilities and matching those with all 1200 cipher entries, for a total of about 20 million computations. I print every key that returns a plain text message. In the end I get very lucky, because the program only prints one key.
Step4: Now I know the key is 'god'. I will use that key to decipher the message below. I use the same method to print the message as I did to find the key.
Step5: Now that I know the message I can compute the ASCII sum of the message
|
<ASSISTANT_TASK:>
Python Code:
ciphertxt = open('cipher.txt', 'r')
cipher = ciphertxt.read().split(',') #Splits the ciphertxt into a list, splits at every ,
cipher = [int(i) for i in cipher]
ciphertxt.close()
search = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
english = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',',','?',"'",'!',';','"','.','(',')','-','1','2','3','4','5','6','7','8','9','0',' ']
def is_plain_text(text):
result = True
for letter in text:
if letter not in english:
result = False
break
return result
for x in search:
for y in search:
for z in search:
message = ""
i = 0 #Counter i allows me to apply the components of key at every third entry of the message
for entry in cipher:
if i == 0 or i % 3 == 0:
message = message + chr(entry^ord(x))
elif i == 1 or (i-1) % 3 == 0:
message = message + chr(entry^ord(y))
elif i == 2 or (i-2) % 3 == 0:
message = message + chr(entry^ord(z))
i = i + 1
if is_plain_text(message) == True:
print("A potential key is: " + x + y + z)
message = ""
i = 0
for entry in cipher:
if i == 0 or i % 3 == 0:
message = message + chr(entry^ord('g'))
elif i == 1 or (i-1) % 3 == 0:
message = message + chr(entry^ord('o'))
elif i == 2 or (i-2) % 3 == 0:
message = message + chr(entry^ord('d'))
i = i + 1
print(message)
sum = 0
for char in message:
sum = sum + ord(char)
print("The ASCII sum is: " + str(sum))
# This cell will be used for grading, leave it at the end of the notebook.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You need to restart your kernel to register the new installs running the below cells
Step3: <h2> Save the query from earlier </h2>
Step4: <h2> Create ML dataset using tf.transform and Dataflow </h2>
|
<ASSISTANT_TASK:>
Python Code:
%%bash
source activate py2env
pip uninstall -y google-cloud-dataflow
conda install -y pytz==2018.4
pip install apache-beam[gcp] tensorflow_transform==0.8.0
%%bash
pip freeze | grep -e 'flow\|beam'
import tensorflow as tf
import apache_beam as beam
print(tf.__version__)
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml' # REPLACE WITH YOUR PROJECT ID
PROJECT = 'cloud-training-demos' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
!gcloud config set project $PROJECT
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
query=
SELECT
weight_pounds,
is_male,
mother_age,
mother_race,
plurality,
gestation_weeks,
mother_married,
ever_born,
cigarette_use,
alcohol_use,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE year > 2000
import google.datalab.bigquery as bq
df = bq.Query(query + " LIMIT 100").execute().result().to_dataframe()
df.head()
%writefile requirements.txt
tensorflow-transform==0.8.0
import datetime
import apache_beam as beam
import tensorflow_transform as tft
from tensorflow_transform.beam import impl as beam_impl
def preprocess_tft(inputs):
import copy
import numpy as np
def center(x):
return x - tft.mean(x)
result = copy.copy(inputs) # shallow copy
result['mother_age_tft'] = center(inputs['mother_age'])
result['gestation_weeks_centered'] = tft.scale_to_0_1(inputs['gestation_weeks'])
result['mother_race_tft'] = tft.string_to_int(inputs['mother_race'])
return result
#return inputs
def cleanup(rowdict):
import copy, hashlib
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,mother_race,plurality,gestation_weeks,mother_married,cigarette_use,alcohol_use'.split(',')
STR_COLUMNS = 'key,is_male,mother_race,mother_married,cigarette_use,alcohol_use'.split(',')
FLT_COLUMNS = 'weight_pounds,mother_age,plurality,gestation_weeks'.split(',')
# add any missing columns, and correct the types
def tofloat(value, ifnot):
try:
return float(value)
except (ValueError, TypeError):
return ifnot
result = {
k : str(rowdict[k]) if k in rowdict else 'None' for k in STR_COLUMNS
}
result.update({
k : tofloat(rowdict[k], -99) if k in rowdict else -99 for k in FLT_COLUMNS
})
# modify opaque numeric race code into human-readable data
races = dict(zip([1,2,3,4,5,6,7,18,28,39,48],
['White', 'Black', 'American Indian', 'Chinese',
'Japanese', 'Hawaiian', 'Filipino',
'Asian Indian', 'Korean', 'Samaon', 'Vietnamese']))
if 'mother_race' in rowdict and rowdict['mother_race'] in races:
result['mother_race'] = races[rowdict['mother_race']]
else:
result['mother_race'] = 'Unknown'
# cleanup: write out only the data we that we want to train on
if result['weight_pounds'] > 0 and result['mother_age'] > 0 and result['gestation_weeks'] > 0 and result['plurality'] > 0:
data = ','.join([str(result[k]) for k in CSV_COLUMNS])
result['key'] = hashlib.sha224(data).hexdigest()
yield result
def preprocess(query, in_test_mode):
import os
import os.path
import tempfile
import tensorflow as tf
from apache_beam.io import tfrecordio
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
job_name = 'preprocess-babyweight-features' + '-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S')
if in_test_mode:
import shutil
print('Launching local job ... hang on')
OUTPUT_DIR = './preproc_tft'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
else:
print('Launching Dataflow job {} ... hang on'.format(job_name))
OUTPUT_DIR = 'gs://{0}/babyweight/preproc_tft/'.format(BUCKET)
import subprocess
subprocess.call('gsutil rm -r {}'.format(OUTPUT_DIR).split())
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': job_name,
'project': PROJECT,
'max_num_workers': 24,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True,
'requirements_file': 'requirements.txt'
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
if in_test_mode:
RUNNER = 'DirectRunner'
else:
RUNNER = 'DataflowRunner'
# set up metadata
raw_data_schema = {
colname : dataset_schema.ColumnSchema(tf.string, [], dataset_schema.FixedColumnRepresentation())
for colname in 'key,is_male,mother_race,mother_married,cigarette_use,alcohol_use'.split(',')
}
raw_data_schema.update({
colname : dataset_schema.ColumnSchema(tf.float32, [], dataset_schema.FixedColumnRepresentation())
for colname in 'weight_pounds,mother_age,plurality,gestation_weeks'.split(',')
})
raw_data_metadata = dataset_metadata.DatasetMetadata(dataset_schema.Schema(raw_data_schema))
def read_rawdata(p, step, test_mode):
if step == 'train':
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) < 3'.format(query)
else:
selquery = 'SELECT * FROM ({}) WHERE ABS(MOD(hashmonth, 4)) = 3'.format(query)
if in_test_mode:
selquery = selquery + ' LIMIT 100'
#print('Processing {} data from {}'.format(step, selquery))
return (p
| '{}_read'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query=selquery, use_standard_sql=True))
| '{}_cleanup'.format(step) >> beam.FlatMap(cleanup)
)
# run Beam
with beam.Pipeline(RUNNER, options=opts) as p:
with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, 'tmp')):
# analyze and transform training
raw_data = read_rawdata(p, 'train', in_test_mode)
raw_dataset = (raw_data, raw_data_metadata)
transformed_dataset, transform_fn = (
raw_dataset | beam_impl.AnalyzeAndTransformDataset(preprocess_tft))
transformed_data, transformed_metadata = transformed_dataset
_ = transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, 'train'),
coder=example_proto_coder.ExampleProtoCoder(
transformed_metadata.schema))
# transform eval data
raw_test_data = read_rawdata(p, 'eval', in_test_mode)
raw_test_dataset = (raw_test_data, raw_data_metadata)
transformed_test_dataset = (
(raw_test_dataset, transform_fn) | beam_impl.TransformDataset())
transformed_test_data, _ = transformed_test_dataset
_ = transformed_test_data | 'WriteTestData' >> tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, 'eval'),
coder=example_proto_coder.ExampleProtoCoder(
transformed_metadata.schema))
_ = (transform_fn
| 'WriteTransformFn' >>
transform_fn_io.WriteTransformFn(os.path.join(OUTPUT_DIR, 'metadata')))
job = p.run()
if in_test_mode:
job.wait_until_finish()
print("Done!")
preprocess(query, in_test_mode=False)
%%bash
gsutil ls gs://${BUCKET}/babyweight/preproc_tft/*-00000*
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction et notations
Step2: Application
Step3: Nous sommes pour l'instant intéressé uniquement par les années en poste et les salaires. On trie les données en fonction des années en poste et on affiche le tout.
Step4: On applique notre algorithme batch avec des iterations successives
Step5: On cherche maintenant effectuer la régression linéaire avec l'algorithme stochastique
Step6: Comparons maintenant la vitesse de convergence des algorithmes
Step7: Regardons maintenant la solution obtenue avec l'équation normale
Step8: Discrimination salariale Femme/Homme
Step9: Comparons le résultat
|
<ASSISTANT_TASK:>
Python Code:
% matplotlib inline
from numpy import zeros, zeros_like, ones, vstack, mod, loadtxt
import matplotlib.pyplot as plt
from numpy.linalg import pinv
def h(theta, x):
y_estimated = 0.
for theta_i, x_i in zip(theta, x):
y_estimated += theta_i*x_i
return y_estimated
def J(theta, X, Y):
cost = 0.
m = X.shape[0]
for x_i, y_i in zip(X, Y):
cost += (h(theta, x_i) - y_i)**2
return 1./(2.*m)*cost
def batch_update(theta, X, Y, alpha=0.01):
new_theta = zeros_like(theta)
m = X.shape[0]
for j, theta_j in enumerate(theta):
sum_derivative = 0.
for i, (x_i, y_i) in enumerate(zip(X,Y)):
sum_derivative += (y_i - h(theta,x_i))*x_i[j]
new_theta[j] = theta_j + alpha*(sum_derivative)
return new_theta
def stochastic_update(theta, X, Y, alpha=0.01):
new_theta = theta.copy()
m = X.shape[0]
for i, (x_i, y_i) in enumerate(zip(X,Y)):
for j, theta_j in enumerate(new_theta):
new_theta[j] = new_theta[j] + alpha*(y_i - h(new_theta, x_i))*x_i[j]
return new_theta
def normal_equation(X, Y):
return pinv(X.T.dot(X)).dot(X.T).dot(Y)
!wget http://data.princeton.edu/wws509/datasets/salary.raw
raw_data = loadtxt('salary.raw')
print (raw_data[0:3, :].astype(int))
year_column = 2
salary_column = 5
year_min = -1; year_max = 26
salary_min = 10000; salary_max = 40000
data = raw_data[:, [year_column, salary_column]]
def plot_salaires(X, Y, color=None, label=None):
if color is not None:
plt.plot(X, Y, color+'o', alpha=0.5, label=label)
else:
plt.plot(X, Y, 'ok', alpha=0.5)
plt.title(u'Salaires universitaires à Princeton')
plt.xlabel(u'Carrière universitaire (années)')
plt.ylabel(u'Salaire annuel ($)')
plt.xlim((year_min, year_max))
plt.ylim((salary_min, salary_max))
if label is not None:
plt.legend(loc='lower right')
def plot_regression(theta, color='b-', alpha=1):
plt.plot([year_min, year_max], [h(theta, [1, year_min]), h(theta, [1, year_max])],
color, alpha=alpha)
plot_salaires(data[:,0], data[:,1])
m = data.shape[0]
n = data.shape[1]
X = ones(shape=(m, 2))
X[:,1] = data[:,0]
Y = data[:, 1]
theta = zeros(shape=(n, 1))
plot_salaires(X[:,1], Y)
alpha = 0.0002
iteration = 1000
J_batch = zeros(iteration)
for i in range(iteration):
theta = batch_update(theta, X, Y, alpha)
J_batch[i] = J(theta, X, Y)
if (mod(i, iteration/10) == 0):
plot_regression(theta, 'b-', alpha=float(i)/iteration)
plot_regression(theta, 'k-')
plot_salaires(X[:,1], Y)
theta = zeros(shape=(n, 1))
alpha = 0.001
iteration = 400
J_stochastic = zeros(iteration)
for i in range(iteration):
theta = stochastic_update(theta, X, Y, alpha)
J_stochastic[i] = J(theta, X, Y)
if (mod(i, iteration/10) == 0):
plot_regression(theta, 'r-', alpha=float(i)/iteration)
plot_regression(theta, 'k-')
plt.figure()
f = plt.plot(J_batch, 'b', label='batch')
g = plt.plot(J_stochastic, 'r', label='stochastic')
plt.title('Convergence of cost function')
plt.xlabel('iterations')
plt.ylabel('error')
plt.legend()
plot_salaires(X[:,1], Y)
theta = normal_equation(X, Y)
plot_regression(theta, 'g-')
sex = 0
men = 0
women = 1
men_index = raw_data[:, sex] == men
X_men = ones(shape=(men_index.sum(), 2))
X_men[:,1] = raw_data[men_index, year_column]
Y_men = raw_data[men_index, salary_column]
theta_men = normal_equation(X_men, Y_men)
women_index = raw_data[:, sex] == women
X_women = ones(shape=(women_index.sum(), 2))
X_women[:,1] = raw_data[women_index, year_column]
Y_women = raw_data[women_index, salary_column]
theta_women = normal_equation(X_women, Y_women)
plot_salaires(X_men[:, 1], Y_men, color='b', label='Homme')
plot_regression(theta_men, 'b-')
plot_salaires(X_women[:, 1], Y_women, color='r', label='Femme')
plot_regression(theta_women, 'r-')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulate Tasks
Step2: 3.0.1a visualize synaptic matrix (sample subject)
Step3: 3.0.1 Visualize actual estimated 'intrinsic FC's from Pearson FC and MultRegFC
Step6: 3.1 Run Task information transfer mapping classifying each of the different rules (4-way classification)
Step8: 3.2.1 Define some basic functions
Step9: 3.2.3 Run information transfer mapping analysis on subjects using MultReg FC
Step10: 3.2.3 Statistical testing on results and plot
Step11: 3.3 Statistical testing on results and plot using FWE-correction (permutation testing)
Step12: 4.0 Compute BGC using resting-state MultRegFC
Step13: Correct for FWE instead
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from scipy import sparse
% matplotlib inline
import scipy.stats as stats
import statsmodels.api as sm
import CompModel_v7 as cm
cm = reload(cm)
import multiprocessing as mp
import sklearn.preprocessing as preprocessing
import sklearn.svm as svm
import statsmodels.sandbox.stats.multicomp as mc
import multregressionconnectivity as mreg
import sys
sys.path.append('utils/')
import permutationTesting as pt
import os
os.environ['OMP_NUM_THREADS'] = str(1)
from matplotlib.colors import Normalize
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
# Basic simulation parameters
# Simulation parameters sampled at 10ms
Tmax = 10000 # 100 seconds per block (10 trials perblock, each trial lasts 4 seconds)
Tmaxrest = 60000 # 500 seconds of rest
nblocks = 20
# Network parameters
g = 1.0
s = 1.0
nsubjs = 30
ncommunities = 5
nodespernetwork = 50
totalnodes = ncommunities*nodespernetwork
localtasks = range(1,5)
flexhubtasks = range(5,9)
flexandlocalnets = range(9,13)
flexandlocalnet2 = range(13,17)
ntasks = len(localtasks) + len(flexhubtasks) + len(flexandlocalnets) + len(flexandlocalnet2)
nblocks = 20
## First four tasks are local tasks
localtasks = range(1,5)
localcommunity = 4 # local community to stimulate the local tasks
## Second four tasks are 'flexhub' tasks
flexhubtasks = range(5,9)
ntasks = len(flexhubtasks) + len(localtasks)
Tmax = 10000
Tmaxrest = 60000
# g = 1.0
# s = 1.0
autocorrfactor = 0
samplingrate = 1.0
TRLength=100
#### Set up subject networks ####
# Parameters for subject's networks
ncommunities = 5
innetwork_dsity = .35
outnetwork_dsity = .05
hubnetwork_dsity = .20
nodespernetwork = 50
totalnodes = nodespernetwork*ncommunities
##########
# Construct structural matrix
W = cm.generateStructuralNetwork(ncommunities=ncommunities, innetwork_dsity=innetwork_dsity,
outnetwork_dsity=outnetwork_dsity, hubnetwork_dsity=hubnetwork_dsity,
nodespernetwork=nodespernetwork, showplot=False)
# Construct synaptic matrix
G = cm.generateSynapticNetwork(W, showplot=False)
plt.figure()
# norm = MidpointNormalize(midpoint=0)
plt.imshow(G,origin='lower',interpolation='none')
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Synaptic Weight Matrix', y=1.04, fontsize=18)
plt.colorbar()
# plt.savefig('SingleSubj_SynapticWeightMatrix.pdf')
fcmat_pearson = np.zeros((totalnodes,totalnodes,nsubjs))
fcmat_multreg = np.zeros((totalnodes,totalnodes,nsubjs))
for subj in range(nsubjs):
indir = '/projects2/ModalityControl2/data/resultsMaster/Manuscript4_CompModelv7_resubmission/restfc/'
# Load in pearson FC matrix
filename1 = 'subj' + str(subj) + '_restfc_pearson.txt'
fcmat_pearson[:,:,subj] = np.loadtxt(indir + filename1, delimiter=',')
# Loda in multreg FC matrix
filename2 = 'subj' + str(subj) + '_restfc_multreg.txt'
fcmat_multreg[:,:,subj] = np.loadtxt(indir + filename2, delimiter=',')
plt.figure()
avg = np.mean(fcmat_pearson,axis=2)
np.fill_diagonal(avg,0)
plt.imshow(avg ,origin='lower',interpolation='none')#,vmin=0)
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Group Rest FC Matrix\nPearson FC', y=1.04, fontsize=18)
plt.colorbar()
#plt.savefig('Fig1a_CompModel5_GroupRestFC_Pearson.pdf')
plt.figure()
avg = np.mean(fcmat_multreg,axis=2)
np.fill_diagonal(avg,0)
# norm = MidpointNormalize(midpoint=0)
plt.imshow(avg ,origin='lower',interpolation='none')#,vmin=-.08,vmax=.08)
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Group Rest FC Matrix\nMultiple Regression FC', y=1.04, fontsize=18)
plt.colorbar()
# plt.savefig('Fig1b_CompModel5_GroupRestFC_MultReg.pdf')
plt.figure()
avg = np.mean(fcmat_multreg,axis=2)
np.fill_diagonal(avg,0)
norm = MidpointNormalize(midpoint=0)
plt.imshow(avg, origin='lower',interpolation='none', cmap='OrRd',vmin=0)
plt.xlabel('Regions')
plt.ylabel('Regions')
plt.title('Group Rest FC Matrix\nMultiple Regression FC', y=1.04, fontsize=18)
plt.colorbar()
def setUpActFlowRSAMat(subj,net,fromnet,tasks,nblocks=20,fc='multreg'):
Retrieves actflow data from subject and puts it in an SVM ready format
tasks input -- an array or list of task numbers corresponding to which set of tasks you want to analyze
May want only local tasks or flexhub tasks
nsamples = len(tasks)*nblocks
nfeatures = nodespernetwork # regions per network
svm_mat = np.zeros((nsamples,nfeatures))
labels = np.zeros((nsamples,))
indir = '/projects2/ModalityControl2/data/resultsMaster/Manuscript4_CompModelv7_resubmission/actflow_predictions/'
indcount = 0
for task in tasks:
if fc=='multreg':
filename = 'subj'+str(subj)+'_task'+str(task)+'_net'+str(fromnet)+'tonet'+str(net)+'_multregFC.txt'
elif fc=='pearson':
filename = 'subj'+str(subj)+'_task'+str(task)+'_net'+str(fromnet)+'tonet'+str(net)+'_pearsonFC.txt'
actflowdat = np.loadtxt(indir+filename,delimiter=',')
svm_mat[indcount:(indcount+nblocks),:] = actflowdat.T
labels[indcount:(indcount+nblocks)] = task
indcount += nblocks
return svm_mat, labels
def setUpBetasRSAMat(subj,net,tasks,nblocks=20):
Retrieves in task beta from subject and puts it in an SVM ready format
tasks input -- an array or list of task numbers corresponding to which set of tasks you want to analyze
May want only local tasks or flexhub tasks
nfeatures = nodespernetwork # Number of regions for each network
nsamples = len(tasks)*nblocks
svm_mat = np.zeros((nsamples,nfeatures))
labels =np.zeros((nsamples,))
net_ind = np.arange(net*nodespernetwork,net*nodespernetwork+nodespernetwork)
indir = '/projects2/ModalityControl2/data/resultsMaster/Manuscript4_CompModelv7_resubmission/task_betas/'
indcount = 0
for task in tasks:
filename = 'subj'+str(subj)+'_task'+str(task)+'_allblocks.txt'
betas = np.loadtxt(indir + filename, delimiter=',')
# Get relevant network data
svm_mat[indcount:(indcount+nblocks),:] = betas[net_ind,:].T # get all trials
labels[indcount:(indcount+nblocks)] = task
indcount += nblocks
return svm_mat, labels
def runActFlowRSA((subj,net,fromnet,tasks,nblocks,fc)):
Runs a leave-block-out CV style SVM analysis (leaving 4 blocks out per CV)
Trains on predicted ActFlow data
Tests on real data (betas)
actflow_mat, labels = setUpActFlowRSAMat(subj,net,fromnet,tasks,nblocks=nblocks,fc=fc)
real_mat, labels = setUpBetasRSAMat(subj,net,tasks,nblocks=nblocks)
# actflow_mat = preprocessing.scale(actflow_mat,axis=0)
# real_mat = preprocessing.scale(real_mat,axis=0)
ncvs = nblocks
indices = np.arange(actflow_mat.shape[0])
matched_rhos = []
mismatch_rhos = []
for cv in range(ncvs):
task_ind = {}
prototype = {}
# Construct prototypes of each task
for task in tasks:
# Get indices for this particular task
task_ind[task] = np.where(labels==task)[0]
# Decide which one is your 'comparison test trial' will be
test_ind = task_ind[task][cv]
# Find the indices for the prototypes
train_ind = np.setxor1d(test_ind,task_ind[task])
prototype[task] = np.mean(real_mat[train_ind,:],axis=0)
# Now compare each pair of tasks with the prototype
for task_a in tasks:
for task_b in tasks:
test_ind = task_ind[task_a][cv] # Compare task a
rho_tmp = stats.spearmanr(prototype[task_b].T,actflow_mat[test_ind,:].T)[0] # With task b
rho_tmp = np.arctanh(rho_tmp)
if task_a==task_b:
# Match!
matched_rhos.append(rho_tmp)
else:
mismatch_rhos.append(rho_tmp)
# Get averages
matched_rhos_avg = np.mean(matched_rhos)
mismatch_rhos_avg = np.mean(mismatch_rhos)
return matched_rhos_avg, mismatch_rhos_avg
# Empty variables for FlexHub task analysis
rho_mat_match_flexhub = np.zeros((ncommunities,ncommunities,nsubjs))
rho_mat_mismatch_flexhub = np.zeros((ncommunities,ncommunities,nsubjs))
for i in range(ncommunities):
for j in range(ncommunities):
if i==j: continue
fromnet = i
net = j
nblocks = nblocks
fc='multreg'
## First run on flexhub tasks
inputs = []
for subj in range(nsubjs): inputs.append((subj,net,fromnet,flexhubtasks,nblocks,fc))
# Run multiprocessing
pool = mp.Pool(processes=15)
results_flexhub = pool.map_async(runActFlowRSA, inputs).get()
pool.close()
pool.join()
## Get results
for subj in range(nsubjs):
match, mismatch = results_flexhub[subj]
rho_mat_match_flexhub[i,j,subj],rho_mat_mismatch_flexhub[i,j,subj] = match, mismatch
# Instantiate empty result matrices
tmat_flexhub = np.zeros((ncommunities,ncommunities))
pmat_flexhub = np.ones((ncommunities,ncommunities))
for i in range(ncommunities):
for j in range(ncommunities):
if i==j: continue
t, p = stats.ttest_rel(rho_mat_match_flexhub[i,j,:],rho_mat_mismatch_flexhub[i,j,:])
tmat_flexhub[i,j] = t
# One-sided p-value
if t > 0:
p = p/2.0
elif t < 0:
p = 1.0 - p/2.0
pmat_flexhub[i,j] = p
## FlexHub Tasks
# Run FDR correction on p-values (Don't get diagonal values)
qmat_flexhub = np.ones((ncommunities,ncommunities))
triu_ind = np.triu_indices(ncommunities,k=1)
tril_ind = np.tril_indices(ncommunities,k=-1)
all_ps = np.hstack((pmat_flexhub[triu_ind],pmat_flexhub[tril_ind]))
h, all_qs = mc.fdrcorrection0(all_ps)
# the first half of all qs belong to triu, second half belongs to tril
qmat_flexhub[triu_ind] = all_qs[:len(triu_ind[0])]
qmat_flexhub[tril_ind] = all_qs[len(tril_ind[0]):]
binary_mat_flexhub = qmat_flexhub < .05
rho_diff_mat_flexhub = np.mean(rho_mat_match_flexhub,axis=2) - np.mean(rho_mat_mismatch_flexhub,axis=2)
plt.figure()
threshold_acc = np.multiply(binary_mat_flexhub,tmat_flexhub)
norm = MidpointNormalize(midpoint=0)
plt.imshow(threshold_acc,norm=norm,origin='lower',interpolation='None',cmap='bwr')
plt.title('Network-to-Network Information Transfer\n(FDR-corrected)\nFlexHub Tasks -- MultReg FC',fontsize=16, y=1.02)
plt.colorbar()
plt.yticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.xticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.ylabel('Network ActFlow FROM',fontsize=15)
plt.xlabel('Network ActFlow TO',fontsize=15)
plt.tight_layout()
# plt.savefig('SFig_CompModel_Network2Network_RSA_MultRegFC_HubNetStim_.pdf')
# Instantiate empty result matrices
tfwe_flexhub = np.zeros((ncommunities,ncommunities))
pfwe_flexhub = np.ones((ncommunities,ncommunities))
ite_flexhub = rho_mat_match_flexhub - rho_mat_mismatch_flexhub
indices = np.ones((ncommunities,ncommunities))
np.fill_diagonal(indices,0)
flatten_ind = np.where(indices==1)
## FlexHub Tasks
t, p = pt.permutationFWE(ite_flexhub[flatten_ind[0],flatten_ind[1],:], permutations=1000, nproc=15)
p = 1.0 - p
np.fill_diagonal(pfwe_flexhub,1.0)
tfwe_flexhub[flatten_ind[0],flatten_ind[1]] = t
pfwe_flexhub[flatten_ind[0],flatten_ind[1]] = p
binary_mat_flexhub = pfwe_flexhub < 0.05
# Print statistics to place in text of paper
# Compute average t-value of hub-network transfers
sig_ind = pfwe_flexhub<0.05
nonsig_ind = pfwe_flexhub>0.05
print 'Average significant T-value:', np.mean(tfwe_flexhub[sig_ind])
print 'Maximum significant P-value:', np.max(pfwe_flexhub[sig_ind])
print 'Average significant ITE:', np.mean(ite_flexhub[sig_ind])
print '\n'
print 'Average non-significant T-value:', np.mean(tfwe_flexhub[nonsig_ind])
print 'Average non-significant P-value:', np.mean(pfwe_flexhub[nonsig_ind])
print 'Average non-significant ITE:', np.mean(ite_flexhub[nonsig_ind])
ite_mat_flexhub = np.mean(rho_mat_match_flexhub,axis=2) - np.mean(rho_mat_mismatch_flexhub,axis=2)
plt.figure()
threshold_acc = np.multiply(binary_mat_flexhub,tmat_flexhub)
norm = MidpointNormalize(midpoint=0)
plt.imshow(threshold_acc,norm=norm,origin='lower',interpolation='None',cmap='bwr')
plt.title('Network-to-Network Information Transfer\n(FWE-corrected)\nFlexHub Tasks -- MultReg FC',fontsize=16, y=1.02)
plt.colorbar()
plt.yticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.xticks(range(ncommunities), ['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'])
plt.ylabel('Network ActFlow FROM',fontsize=15)
plt.xlabel('Network ActFlow TO',fontsize=15)
plt.tight_layout()
# plt.savefig('SFig_CompModel_Network2Network_RSA_MultRegFC_HubNetStim_.pdf')
outofnet_intrinsicFC = np.zeros((ncommunities,nsubjs))
indices = np.arange(nodespernetwork*ncommunities)
for subj in range(nsubjs):
for net in range(0,ncommunities):
# if net == hubnet: continue
net_ind = np.arange(nodespernetwork*net,nodespernetwork*net + nodespernetwork)
net_ind.shape = (len(net_ind),1)
outofnet_ind = np.setxor1d(net_ind,indices)
outofnet_ind.shape = (len(outofnet_ind),1)
outofnet_intrinsicFC[net,subj] = np.mean(fcmat_multreg[net_ind, outofnet_ind.T, subj])
fcmean = np.mean(outofnet_intrinsicFC,axis=1)
fcerr = np.std(outofnet_intrinsicFC,axis=1)/np.sqrt(nsubjs)
fig = plt.bar(range(len(fcmean)), fcmean, yerr=fcerr)
# fig = plt.ylim([.09,0.10])
fig = plt.xticks(np.arange(.4,5.4,1.0),['FlexHub', 'Net1', 'Net2', 'Net3', 'Net4'],fontsize=14)
fig = plt.ylabel('Multiple Regression FC', fontsize=16)
fig = plt.xlabel('Networks', fontsize=16)
fig = plt.title("Out-of-Network (BGC) Intrinsic FC", fontsize=18, y=1.02)
fig = plt.tight_layout()
# pp2 = PdfPages('Fig1_CompModel_OutNetIntrinsicFC.pdf')
# pp2.savefig(fig)
# pp2.close()
pvals = []
tvals = []
hubnet = 0
for net in range(ncommunities):
if hubnet == net: continue
t, p = stats.ttest_rel(outofnet_intrinsicFC[hubnet,:],outofnet_intrinsicFC[net,:])
tvals.append(t)
pvals.append(p)
qvals = mc.fdrcorrection0(pvals)[1]
for net in range(ncommunities):
if net == hubnet:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5)
else:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5), '\t t =', round(tvals[net-1],3), '\t p =', round(pvals[net-1],3), '\t q =', round(qvals[net-1],3)
print 'Average t-value for hub network greater than local networks:', np.mean(tvals)
contrast = np.zeros((ncommunities-1,outofnet_intrinsicFC.shape[1]))
hubnet = 0
i = 0
for net in range(ncommunities):
if hubnet == net: continue
t, p = stats.ttest_rel(outofnet_intrinsicFC[hubnet,:],outofnet_intrinsicFC[net,:])
contrast[i,:] = outofnet_intrinsicFC[hubnet,:] - outofnet_intrinsicFC[net,:]
i += 1
t, p_fwe = pt.permutationFWE(contrast, permutations=1000, nproc=15)
p_fwe = 1.0 - p_fwe
qvals = mc.fdrcorrection0(pvals)[1]
for net in range(ncommunities):
if net == hubnet:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5)
else:
print 'Average out-of-network GBC of network', net, ':', round(np.mean(outofnet_intrinsicFC[net,:]),5), '\t t =', round(t[net-1],3), '\t p =', round(p_fwe[net-1],3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Move up a directory for easier access
Step2: Find the other data
Step3: Which do I want to plot on the same distribution?
Step4: General plot settings
Step5: Plot as-is
Step6: Compare simulation to experiment
Step7: Try $A_{sym} = W(180)/W_{min}$
Step8: Try plotting everything together
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)
import pandas as pd
os.getcwd()
os.chdir('..')
os.getcwd()
sys.path.append('../scripts')
import bicorr_plot as bicorr_plot
import bicorr_math as bicorr_math
angle_bin_edges = np.arange(10.01,181,10)
angle_bin_centers = bicorr_math.calc_centers(angle_bin_edges)
angle_bin_edges.shape
os.listdir('cgmf/datap')
Asym_df_meas = pd.read_csv(r'Cf072115_to_Cf072215b/datap/Asym_df.csv', index_col=0)
Asym_df_cgmf = pd.read_csv(r'cgmf/datap/Asym_df.csv', index_col=0)
Asym_df_freya = pd.read_csv(r'freya/datap/Asym_df.csv', index_col=0)
Asym_df_ipol = pd.read_csv(r'ipol/datap/Asym_df.csv', index_col=0)
Asym_df_ipol_noct = pd.read_csv(r'ipol_noct/datap/Asym_df.csv', index_col=0)
Asym_dfs=[Asym_df_meas,Asym_df_cgmf,Asym_df_freya,Asym_df_ipol,Asym_df_ipol_noct]
legends =['Experiment', 'CGMF', 'FREYA', 'PoliMi', 'PoliMi-No CT']
fmts = ['x', 's', 'D', 'o', '^']
colors = ['#5d269b', '#dd673b', '#80bc31', '#3cbfe0', '#4242f4']
to_plot = [0,1,2,3]
line_thickness = 1
ebar_width = 3
Asym_df_cgmf.head()
fig = plt.figure(figsize=(4,4))
ax = plt.gca()
for i in to_plot:
Asym_df = Asym_dfs[i]
ax.errorbar(Asym_df['emin'],
Asym_df['Asym'],
yerr=Asym_df['Asym_err'],
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='none',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
ax.set_xlim([0.8,4])
ax.set_ylim([.5,4.5])
ax.set_xlabel('$E_{min}$ (MeV)')
ax.set_ylabel('$A_{sym}$')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
ax.text(1,4,'(a)', size=15, backgroundcolor='white')
leg = plt.legend([legends[i] for i in to_plot],bbox_to_anchor=(.46,.53))
leg.get_frame().set_edgecolor('w')
plt.tight_layout()
bicorr_plot.save_fig_to_folder('Asym_vs_Emin_compare',r'compare\fig')
def plot_calcs(Asym_df):
x = Asym_df['emin']
y = Asym_df['Asym']
yerr = Asym_df['Asym_err']
return x, y, yerr
Asym_df_exp = Asym_dfs[0]
to_plot = [1,2,3]
fig = plt.figure(figsize=(4,4))
ax = plt.gca()
x, y_exp, yerr_exp = plot_calcs(Asym_df_exp)
for i in to_plot:
Asym_df = Asym_dfs[i]
x_sim, y_sim, yerr_sim = plot_calcs(Asym_df)
y,yerr = bicorr_math.prop_err_division(y_sim,yerr_sim,y_exp,yerr_exp)
ax.errorbar(x, y, yerr=yerr,
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='none',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
leg = plt.legend([legends[i] for i in to_plot],bbox_to_anchor=(.4,.6))
leg.get_frame().set_edgecolor('w')
plt.axhline(1.0,color='gray', linewidth=1,linestyle='--')
ax.set_xlim([0.8,4])
ax.set_ylim([.5,2])
ax.set_xlabel('$E_{min}$ (MeV)')
ax.set_ylabel(r'$\left[A_{sym}\right]_{SIM} / \left[A_{sym}\right]_{EXP}$')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(.5))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.1))
ax.text(1,1.83,'(b)', size=15, backgroundcolor='white')
plt.tight_layout()
bicorr_plot.save_fig_to_folder('Asym_vs_Emin_diff',r'compare\fig')
os.getcwd()
Asym_df
fig = plt.figure(figsize=(4,4))
ax = plt.gca()
for i in to_plot:
Asym_df = Asym_dfs[i]
ax.errorbar(Asym_df['emin'],
Asym_df['Asym_min'],
yerr=Asym_df['Asym_min_err'],
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='none',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
ax.set_xlim([0,4])
ax.set_ylim([.5,4.5])
ax.set_xlabel('$E_{min}$')
ax.set_ylabel('$A_{sym}$')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
plt.legend([legends[i] for i in to_plot])
plt.tight_layout()
bicorr_plot.save_fig_to_folder('Asym_min_vs_Emin_compare',r'compare\fig')
fig = plt.figure(figsize=(6,6))
ax = plt.gca()
for i in to_plot:
Asym_df = Asym_dfs[i]
ax.errorbar(Asym_df['emin'],
Asym_df['Asym'],
yerr=Asym_df['Asym_err'],
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='k',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
ax.errorbar(Asym_df['emin'],
Asym_df['Asym_min'],
yerr=Asym_df['Asym_min_err'],
fmt=fmts[i],
markeredgewidth=1,
markerfacecolor='none',
elinewidth=line_thickness,
capthick = line_thickness,
capsize = ebar_width,
c=colors[i])
ax.set_xlim([0,4])
ax.set_ylim([1,4])
ax.set_xlabel('$E_{min}$')
ax.set_ylabel('$A_{sym}$')
# Set up ticks
ax.tick_params(axis='both',
which='major',
direction='inout',
length=6,
color='k',
bottom=True, right=True, top=True, left=True)
ax.tick_params(axis='both',
which='minor',
direction='in',
length=3,
bottom=True, right=True, top=True, left=True)
# Major
ax.xaxis.set_major_locator(MultipleLocator(1))
ax.yaxis.set_major_locator(MultipleLocator(1))
# Minor
ax.xaxis.set_minor_locator(MultipleLocator(.2))
ax.yaxis.set_minor_locator(MultipleLocator(0.2))
plt.legend([legends[i] for i in np.repeat(to_plot,2).tolist()])
plt.tight_layout()
np.repeat(to_plot,2).tolist()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simplicity of using backward()
Step2: The simple operations defined a forward path $z=(2x)^3$, $z$ will be the final output Variable we would like to compute gradient
Step3: The gradients of both $y$ and $z$ are None, since the function returns the gradient for the leaves, which is $x$ in this case. At the very beginning, I was assuming something like this
Step4: Now let's set $x$ to be a matrix. Note that $z$ will also be a matrix.
Step5: We can clearly see the gradients of $z$ are computed w.r.t to each dimension of $x$, because the operations are all element-wise. T.FloatTensor([1, 0]) will give the gradients for first column of $x$.
Step6: What is retain_graph doing?
|
<ASSISTANT_TASK:>
Python Code:
import torch as T
import torch.autograd
from torch.autograd import Variable
import numpy as np
'''
Define a scalar variable, set requires_grad to be true to add it to backward path for computing gradients
It is actually very simple to use backward()
first define the computation graph, then call backward()
'''
x = Variable(T.randn(1, 1), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
print('y', y)
#define one more operation to check the chain rule
z = y ** 3
print('z', z)
#yes, it is just as simple as this to compute gradients:
z.backward()
print('z gradient', z.grad)
print('y gradient', y.grad)
print('x gradient', x.grad) # note that x.grad is also a Variable
x = Variable(T.randn(1, 1), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#define one more operation to check the chain rule
z = y ** 3
z.backward(T.FloatTensor([1]), retain_graph=True)
print('Keeping the default value of grad_variables gives')
print('z gradient', z.grad)
print('y gradient', y.grad)
print('x gradient', x.grad)
x.grad.data.zero_()
z.backward(T.FloatTensor([0.1]), retain_graph=True)
print('Modifying the default value of grad_variables to 0.1 gives')
print('z gradient', z.grad)
print('y gradient', y.grad)
print('x gradient', x.grad)
'''
Try to set x to be column vector or row vector! You'll see different behaviors.
'''
x = Variable(T.randn(2, 2), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#define one more operation to check the chain rule
z = y ** 3
print('z shape:', z.size())
z.backward(T.FloatTensor([1, 0]), retain_graph=True)
print('x gradient', x.grad)
x.grad.data.zero_() #the gradient for x will be accumulated, it needs to be cleared.
z.backward(T.FloatTensor([0, 1]), retain_graph=True)
print('x gradient', x.grad)
x.grad.data.zero_()
z.backward(T.FloatTensor([1, 1]), retain_graph=True)
print('x gradient', x.grad)
x = Variable(T.randn(2, 2), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#print('y', y)
#define one more operation to check the chain rule
z = y ** 3
out = z.mean()
print('out', out)
out.backward(T.FloatTensor([1]), retain_graph=True)
print('x gradient', x.grad)
x.grad.data.zero_()
out.backward(T.FloatTensor([0.1]), retain_graph=True)
print('x gradient', x.grad)
x = Variable(T.randn(2, 2), requires_grad=True) #x is a leaf created by user, thus grad_fn is none
print('x', x)
#define an operation on x
y = 2 * x
#print('y', y)
#define one more operation to check the chain rule
z = y ** 3
out = z.mean()
print('out', out)
out.backward(T.FloatTensor([1])) #without setting retain_graph to be true, this gives an error.
print('x gradient', x.grad)
x.grad.data.zero_()
out.backward(T.FloatTensor([0.1]))
print('x gradient', x.grad)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can also sample independently from this toy LogPDF, and add that to the visualisation
Step2: We now try to sample from the distribution with MCMC
Step3: The adaptive covariance fails to get into the funnel region.
Step4: Now check how close the result is to the expected result, using the Kullback-Leibler divergence, and compare this to the result from sampling directly.
Step5: Hamiltonian Monte Carlo fares much better on this curved density.
Step6: Hamiltonian Monte Carlo does better than adaptive but still not great.
Step7: Visualising the path of one of the chains the sampler struggles to explore both the neck and the outside region efficiently.
|
<ASSISTANT_TASK:>
Python Code:
import pints
import pints.toy
import numpy as np
import matplotlib.pyplot as plt
# Create log pdf
log_pdf = pints.toy.NealsFunnelLogPDF()
# Plot marginal density
levels = np.linspace(-7, -1, 20)
x = np.linspace(-10, 10, 100)
y = np.linspace(-10, 10, 100)
X, Y = np.meshgrid(x, y)
Z = [[log_pdf.marginal_log_pdf(i, j) for i in x] for j in y]
plt.contour(X, Y, Z, levels = levels)
plt.xlabel('x_i')
plt.ylabel('nu')
plt.show()
direct = log_pdf.sample(1500)
plt.contour(X, Y, Z, levels=levels, colors='k', alpha=0.2)
plt.scatter(direct[:, 0], direct[:, 9], alpha=0.2)
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.show()
# Create an adaptive covariance MCMC routine
x0 = np.random.uniform(-25, 25, size=(3, 10))
mcmc = pints.MCMCController(log_pdf, 3, x0, method=pints.HaarioBardenetACMC)
# Stop after 10000 iterations
mcmc.set_max_iterations(3000)
# Disable logging
mcmc.set_log_to_screen(False)
# Run!
print('Running...')
chains = mcmc.run()
print('Done!')
# Discard warm-up
chains = [chain[1000:] for chain in chains]
stacked = np.vstack(chains)
plt.contour(X, Y, Z, levels=levels, colors='k', alpha=0.2)
plt.scatter(stacked[:, 0], stacked[:, 9], alpha=0.2)
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.show()
print(log_pdf.kl_divergence(stacked))
print(log_pdf.kl_divergence(direct))
# Create an adaptive covariance MCMC routine
x0 = np.random.uniform(0, 10, size=(3, 10))
sigma0 = np.repeat(0.25, 10)
mcmc = pints.MCMCController(log_pdf, 3, x0, method=pints.HamiltonianMCMC, sigma0=sigma0)
# Stop after 10000 iterations
mcmc.set_max_iterations(500)
# Disable logging
mcmc.set_log_to_screen(False)
# Run!
print('Running...')
chains = mcmc.run()
print('Done!')
stacked = np.vstack(chains)
print(log_pdf.kl_divergence(stacked))
print(log_pdf.kl_divergence(direct))
divergent_transitions = mcmc.samplers()[0].divergent_iterations()
plt.contour(X, Y, Z, levels=levels, colors='k', alpha=0.2)
plt.plot(chains[2][:, 1], chains[2][:, 9], alpha=0.5)
plt.scatter(chains[0][divergent_transitions, 0], chains[0][divergent_transitions, 1], color='red')
plt.xlim(-10, 10)
plt.ylim(-10, 10)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As you can see, the images in the Kaggle data set are far from being evenly distributed. Many classes have fewer than ten observations while, on the other extreme, a couple of classes have more than forty observations.
Step2: Now let's see how many observations we have in total.
Step3: That's quite a bit of data to work with. Now, let's do a bit more analysis on the distribution using the pandas values_counts method.
Step4: How about we plot all the classes that have more than 20 examples?
Step5: If we're too lazy to count how many of these classes there are, we could just do it this way
Step6: Let's plot all the classes with less than or equal to 20 observations.
Step7: Data Selection
Step8: Now we can print out our data.
Step9: What if we wanted to use our previously created histogram variable to return a list of data frames grouped by class? This is easy, too.
Step10: Let's see what the first element of our df_list looks like..
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
#the above call us to display the seaborn plots within the IPython notebook
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("/Users/.../Machine Learning Competitions/Kaggle/Right Whale Recognition Challenge/features/rgbHistogramsTrainSet8Bins.csv", sep = ",");
plot = sns.countplot(x="WhaleID", data=data, palette = "Blues_d");
# Link: ax.xaxis.set_major_formatter(plt.NullFormatter())
plot.xaxis.set_major_formatter(plt.NullFormatter())
num_obs = data.shape[0]
print num_obs
# Make a new histogram of classes
histogram = data["WhaleID"].value_counts()
# Turn it into a dictionary for later use
# The dictionary is in the form {"Whale_ID" : num_observations}
histogram_dict = histogram.to_dict()
# This code looks is a little complicated, so let's break it down.
# First we are using a map expression to 'map' each row index of our data
# frame into a boolean value that tells us whether we want to include that
# row of our data frame for the indices variable.
# The first argument to the map method is a function on the indices.
# The second argument to the map is the list of our data frame indices.
# The function looks at a row of the data frame given by a particular
# index, accesses its "WhaleID" value, passes it to the histogram_dict
# we created earlier, returns the number of observations belonging to
# that class, then returns true or false dependent on whether the returned
# value is greater than 20 or not.
# Link: http://pandas.pydata.org/pandas-docs/stable/indexing.html#boolean-indexing
# Link: http://www.python-course.eu/lambda.php
indices = map(lambda x: histogram_dict[data.ix[x,]["WhaleID"]] > 20, range(num_obs))
plot = sns.countplot(x = "WhaleID", data = data[indices], palette = "Greens_d")
# The below code fails b/c it requires the labels
# Link: http://stackoverflow.com/questions/26540035/rotate-label-text-in-seaborn-factorplot
# plot.set_xticklabels(rotation=90)
# So use this label adjustment code instead
# Link: http://stackoverflow.com/questions/31859285/rotate-tick-labels-for-seaborn-barplot
for item in plot.get_xticklabels():
item.set_rotation(80)
# Link: http://stackoverflow.com/questions/12765833/counting-the-number-of-true-booleans-in-a-python-list
print sum(histogram > 20)
indices = map(lambda x: histogram_dict[data.ix[x,]["WhaleID"]] <= 20, range(num_obs))
plot = sns.countplot(x = "WhaleID", data = data[indices], palette = "Purples_d")
plot.xaxis.set_major_formatter(plt.NullFormatter())
# Link: http://stackoverflow.com/questions/7571635/fastest-way-to-check-if-a-value-exist-in-a-list
two_whales_data = data[map(lambda x: data.ix[x,]['WhaleID'] in ['whale_38681', 'whale_95370'], range(num_obs))];
print two_whales_data
def return_data_frames_by_class(data_frame, class_list, y_column_name):
df_list = [];
num_obs = len(data_frame.index)
for class_name in class_list:
bools = map(lambda x: is_row_part_of_class(x, data_frame, class_name, y_column_name), range(num_obs));
df_list.append(data_frame[bools]);
return df_list
def is_row_part_of_class(data_frame_index, data_frame, class_name, y_column_name):
return data_frame.ix[data_frame_index,][y_column_name] == class_name
class_list = histogram.axes[0].tolist()
df_list = return_data_frames_by_class(data, class_list, 'WhaleID')
print df_list[0]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set paramaters
Step2: Next, let's configure modeling options.
Step3: Train the model
Step4: Next cell triggers model training job in BigQuery which takes some time to finish depending on dataset size and model complexity. Set verbose=True, if you want to verify training query details.
Step5: Following cell allows you to see detailed information about the input features used to train a model. It provides following columns
Step6: Evaluate the model
|
<ASSISTANT_TASK:>
Python Code:
# Uncomment to install required python modules
# !sh ../utils/setup.sh
# Add custom utils module to Python environment
import os
import sys
sys.path.append(os.path.abspath(os.pardir))
from gps_building_blocks.cloud.utils import bigquery as bigquery_utils
from utils import model
from utils import helpers
configs = helpers.get_configs('config.yaml')
dest_configs, run_id_configs = configs.destination, configs.run_id
# GCP project ID
PROJECT_ID = dest_configs.project_id
# Name of the BigQuery dataset
DATASET_NAME = dest_configs.dataset_name
# To distinguish the separate runs of the training pipeline
RUN_ID = run_id_configs.train
# BigQuery table name containing model development dataset
FEATURES_DEV_TABLE = f'features_dev_table_{RUN_ID}'
# BigQuery table name containing model testing dataset
FEATURES_TEST_TABLE = f'features_test_table_{RUN_ID}'
# Output model name to save in BigQuery
MODEL_NAME = f'propensity_model_{RUN_ID}'
bq_utils = bigquery_utils.BigQueryUtils(project_id=PROJECT_ID)
# Read in Features table schema to select feature names for model training
sql = ("SELECT column_name "
f"FROM `{PROJECT_ID}.{DATASET_NAME}`.INFORMATION_SCHEMA.COLUMNS "
f"WHERE table_name='{FEATURES_DEV_TABLE}';")
print(sql)
features_schema = bq_utils.run_query(sql).to_dataframe()
# Columns to remove from the feature list
to_remove = ['window_start_ts', 'window_end_ts', 'snapshot_ts', 'user_id',
'label', 'key', 'data_split']
# Selected features for model training
training_features = [v for v in features_schema['column_name']
if v not in to_remove]
print('Number of training features:', len(training_features))
print(training_features)
# Set parameters for AUTOML_CLASSIFIER model
FEATURE_COLUMNS = training_features
TARGET_COLUMN = 'label'
params = {
'model_path': f'{PROJECT_ID}.{DATASET_NAME}.{MODEL_NAME}',
'features_table_path': f'{PROJECT_ID}.{DATASET_NAME}.{FEATURES_DEV_TABLE}',
'feature_columns': FEATURE_COLUMNS,
'target_column': TARGET_COLUMN,
'MODEL_TYPE': 'AUTOML_CLASSIFIER',
'BUDGET_HOURS': 1.0,
# Enable data_split_col if you want to use custom data split.
# Details on AUTOML data split column:
# https://cloud.google.com/automl-tables/docs/prepare#split
# 'DATA_SPLIT_COL': 'data_split',
'OPTIMIZATION_OBJECTIVE': 'MAXIMIZE_AU_ROC'
}
propensity_model = model.PropensityModel(bq_utils=bq_utils,
params=params)
propensity_model.train(verbose=False)
propensity_model.get_feature_info()
# Model performance on the model development dataset on which the final
# model has been trained
EVAL_TABLE_NAME = FEATURES_DEV_TABLE
eval_params = {
'eval_table_path': f'{PROJECT_ID}.{DATASET_NAME}.{EVAL_TABLE_NAME}',
'threshold': 0.5
}
propensity_model.evaluate(eval_params, verbose=False)
# Model performance on the held out test dataset
EVAL_TABLE_NAME = FEATURES_TEST_TABLE
eval_params = {
'eval_table_path': f'{PROJECT_ID}.{DATASET_NAME}.{EVAL_TABLE_NAME}',
'threshold': 0.5
}
propensity_model.evaluate(eval_params, verbose=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importamos las librerías creadas para trabajar
Step2: Generamos los datasets de todos los días
Step3: Se procesan las listas anteriores, se concatenan por motor según
Step4: Medimos el número de filas de los archivos (con y sin valores perdidos)
Step5: Visualizamos los datos de la tabla
Step6: Se calcula los datos perdidos por motor
Step7: Se calcula la relación entre los datos efectivos por motor y
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import ext_datos as ext
import procesar as pro
import time_plot as tplt
dia1 = ext.extraer_data('dia1')
cd ..
dia2 = ext.extraer_data('dia2')
cd ..
dia3 = ext.extraer_data('dia3')
cd ..
dia4 = ext.extraer_data('dia4')
motoresdia1 = pro.procesar(dia1)
motoresdia2 = pro.procesar(dia2)
motoresdia3 = pro.procesar(dia3)
motoresdia4 = pro.procesar(dia4)
con1 , sin1 = len(motoresdia1), len(motoresdia1.dropna())
con2 , sin2 = len(motoresdia2), len(motoresdia2.dropna())
con3 , sin3 = len(motoresdia3), len(motoresdia3.dropna())
con4 , sin4 = len(motoresdia4), len(motoresdia4.dropna())
d = {'datos_con_valores_perdidos':[con1,con2,con3,con4],\
'datos_sin_valores_perdidos':[sin1,sin2,sin3,sin4]}
tabla1= pd.DataFrame(d,index=[1,2,3,4])
tabla1
m1d1, m2d1 = len(motoresdia1.motorRpm_m1.dropna()),\
len(motoresdia1.motorRpm_m2.dropna())
m1d2, m2d2 = len(motoresdia2.motorRpm_m1.dropna()),\
len(motoresdia2.motorRpm_m2.dropna())
m1d3, m2d3 = len(motoresdia3.motorRpm_m1.dropna()),\
len(motoresdia3.motorRpm_m2.dropna())
m1d4, m2d4 = len(motoresdia4.motorRpm_m1.dropna()),\
len(motoresdia4.motorRpm_m2.dropna())
p1d1, p2d1 = round(m1d1/float(con1),4) , round(m2d1/float(con1),4)
p1d2, p2d2 = round(m1d2/float(con2),4) , round(m2d2/float(con2),4)
p1d3 , p2d3 = round(m1d3/float(con3),4) , round(m2d3/float(con3),4)
p1d4 , p2d4 = round(m1d4/float(con4),4) , round(m2d4/float(con4),4)
labels = {'motor1':[100*p1d1, 100*p1d2, 100*p1d3, 100*p1d4],'motor2':\
[100*p2d1, 100*p2d2, 100*p2d3, 100*p2d4]}
tabla2 = pd.DataFrame(labels, index=[1,2,3,4])
tabla2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Define Dataset Metadata
Step2: 2. Define Data Input Function
Step3: b. Data pipeline input function
Step4: 3. Define Feature Columns
Step5: 4. Define an Estimator Creation Function
Step6: b. Define the estimator
Step7: 5. Run Experiment
Step8: b. Define serving function
Step9: c. Define TrainSpec and EvaluSpec
Step10: d. Run the experiment via train_and_evaluate
Step11: 6. Evaluate the Model
Step12: 7. Prediction
Step13: Serving via the Saved Model
|
<ASSISTANT_TASK:>
Python Code:
MODEL_NAME = 'class-model-01'
TRAIN_DATA_FILES_PATTERN = 'data/train-*.tfrecords'
VALID_DATA_FILES_PATTERN = 'data/valid-*.tfrecords'
TEST_DATA_FILES_PATTERN = 'data/test-*.tfrecords'
RESUME_TRAINING = False
PROCESS_FEATURES = True
EXTEND_FEATURE_COLUMNS = True
MULTI_THREADING = True
HEADER = ['key','x','y','alpha','beta','target']
HEADER_DEFAULTS = [[0], [0.0], [0.0], ['NA'], ['NA'], [0.0]]
NUMERIC_FEATURE_NAMES = ['x', 'y']
CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY = {'alpha':['ax01', 'ax02'], 'beta':['bx01', 'bx02']}
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.keys())
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
TARGET_NAME = 'target'
TARGET_LABELS = ['positive', 'negative']
UNUSED_FEATURE_NAMES = list(set(HEADER) - set(FEATURE_NAMES) - {TARGET_NAME})
print("Header: {}".format(HEADER))
print("Numeric Features: {}".format(NUMERIC_FEATURE_NAMES))
print("Categorical Features: {}".format(CATEGORICAL_FEATURE_NAMES))
print("Target: {} - labels: {}".format(TARGET_NAME, TARGET_LABELS))
print("Unused Features: {}".format(UNUSED_FEATURE_NAMES))
def parse_tf_example(example_proto):
feature_spec = {}
for feature_name in NUMERIC_FEATURE_NAMES:
feature_spec[feature_name] = tf.FixedLenFeature(shape=(), dtype=tf.float32)
for feature_name in CATEGORICAL_FEATURE_NAMES:
feature_spec[feature_name] = tf.FixedLenFeature(shape=(), dtype=tf.string)
feature_spec[TARGET_NAME] = tf.FixedLenFeature(shape=(), dtype=tf.string)
parsed_features = tf.parse_example(serialized=example_proto, features=feature_spec)
target = parsed_features.pop(TARGET_NAME)
return parsed_features, target
def process_features(features):
features["x_2"] = tf.square(features['x'])
features["y_2"] = tf.square(features['y'])
features["xy"] = tf.multiply(features['x'], features['y']) # features['x'] * features['y']
features['dist_xy'] = tf.sqrt(tf.squared_difference(features['x'],features['y']))
return features
def tfrecods_input_fn(files_name_pattern, mode=tf.estimator.ModeKeys.EVAL,
num_epochs=None,
batch_size=200):
shuffle = True if mode == tf.estimator.ModeKeys.TRAIN else False
print("")
print("* data input_fn:")
print("================")
print("Input file(s): {}".format(files_name_pattern))
print("Batch size: {}".format(batch_size))
print("Epoch Count: {}".format(num_epochs))
print("Mode: {}".format(mode))
print("Shuffle: {}".format(shuffle))
print("================")
print("")
file_names = tf.matching_files(files_name_pattern)
dataset = data.TFRecordDataset(filenames=file_names)
if shuffle:
dataset = dataset.shuffle(buffer_size=2 * batch_size + 1)
dataset = dataset.batch(batch_size)
dataset = dataset.map(lambda tf_example: parse_tf_example(tf_example))
if PROCESS_FEATURES:
dataset = dataset.map(lambda features, target: (process_features(features), target))
dataset = dataset.repeat(num_epochs)
iterator = dataset.make_one_shot_iterator()
features, target = iterator.get_next()
return features, target
features, target = tfrecods_input_fn(files_name_pattern="")
print("Feature read from TFRecords: {}".format(list(features.keys())))
print("Target read from TFRecords: {}".format(target))
def extend_feature_columns(feature_columns, hparams):
num_buckets = hparams.num_buckets
embedding_size = hparams.embedding_size
buckets = np.linspace(-3, 3, num_buckets).tolist()
alpha_X_beta = tf.feature_column.crossed_column(
[feature_columns['alpha'], feature_columns['beta']], 4)
x_bucketized = tf.feature_column.bucketized_column(
feature_columns['x'], boundaries=buckets)
y_bucketized = tf.feature_column.bucketized_column(
feature_columns['y'], boundaries=buckets)
x_bucketized_X_y_bucketized = tf.feature_column.crossed_column(
[x_bucketized, y_bucketized], num_buckets**2)
x_bucketized_X_y_bucketized_embedded = tf.feature_column.embedding_column(
x_bucketized_X_y_bucketized, dimension=embedding_size)
feature_columns['alpha_X_beta'] = alpha_X_beta
feature_columns['x_bucketized_X_y_bucketized'] = x_bucketized_X_y_bucketized
feature_columns['x_bucketized_X_y_bucketized_embedded'] = x_bucketized_X_y_bucketized_embedded
return feature_columns
def get_feature_columns(hparams):
CONSTRUCTED_NUMERIC_FEATURES_NAMES = ['x_2', 'y_2', 'xy', 'dist_xy']
all_numeric_feature_names = NUMERIC_FEATURE_NAMES.copy()
if PROCESS_FEATURES:
all_numeric_feature_names += CONSTRUCTED_NUMERIC_FEATURES_NAMES
numeric_columns = {feature_name: tf.feature_column.numeric_column(feature_name)
for feature_name in all_numeric_feature_names}
categorical_column_with_vocabulary = \
{item[0]: tf.feature_column.categorical_column_with_vocabulary_list(item[0], item[1])
for item in CATEGORICAL_FEATURE_NAMES_WITH_VOCABULARY.items()}
feature_columns = {}
if numeric_columns is not None:
feature_columns.update(numeric_columns)
if categorical_column_with_vocabulary is not None:
feature_columns.update(categorical_column_with_vocabulary)
if EXTEND_FEATURE_COLUMNS:
feature_columns = extend_feature_columns(feature_columns, hparams)
return feature_columns
feature_columns = get_feature_columns(tf.contrib.training.HParams(num_buckets=5,embedding_size=3))
print("Feature Columns: {}".format(feature_columns))
def get_wide_deep_columns():
feature_columns = list(get_feature_columns(hparams).values())
dense_columns = list(
filter(lambda column: isinstance(column, feature_column._NumericColumn) |
isinstance(column, feature_column._EmbeddingColumn),
feature_columns
)
)
categorical_columns = list(
filter(lambda column: isinstance(column, feature_column._VocabularyListCategoricalColumn) |
isinstance(column, feature_column._BucketizedColumn),
feature_columns)
)
sparse_columns = list(
filter(lambda column: isinstance(column,feature_column._HashedCategoricalColumn) |
isinstance(column, feature_column._CrossedColumn),
feature_columns)
)
indicator_columns = list(
map(lambda column: tf.feature_column.indicator_column(column),
categorical_columns)
)
deep_feature_columns = dense_columns + indicator_columns
wide_feature_columns = categorical_columns + sparse_columns
return wide_feature_columns, deep_feature_columns
def create_estimator(run_config, hparams, print_desc=False):
wide_feature_columns, deep_feature_columns = get_wide_deep_columns()
estimator = tf.estimator.DNNLinearCombinedClassifier(
n_classes= len(TARGET_LABELS),
label_vocabulary=TARGET_LABELS,
dnn_feature_columns = deep_feature_columns,
linear_feature_columns = wide_feature_columns,
dnn_hidden_units= hparams.hidden_units,
dnn_optimizer= tf.train.AdamOptimizer(),
dnn_activation_fn= tf.nn.elu,
dnn_dropout= hparams.dropout_prob,
config= run_config
)
if print_desc:
print("")
print("*Estimator Type:")
print("================")
print(type(estimator))
print("")
print("*deep columns:")
print("==============")
print(deep_feature_columns)
print("")
print("wide columns:")
print("=============")
print(wide_feature_columns)
print("")
return estimator
TRAIN_SIZE = 12000
NUM_EPOCHS = 1000
BATCH_SIZE = 500
NUM_EVAL = 10
TOTAL_STEPS = (TRAIN_SIZE/BATCH_SIZE)*NUM_EPOCHS
CHECKPOINT_STEPS = int((TRAIN_SIZE/BATCH_SIZE) * (NUM_EPOCHS/NUM_EVAL))
hparams = tf.contrib.training.HParams(
num_epochs = NUM_EPOCHS,
batch_size = BATCH_SIZE,
hidden_units=[16, 12, 8],
num_buckets = 6,
embedding_size = 3,
max_steps = TOTAL_STEPS,
dropout_prob = 0.001)
model_dir = 'trained_models/{}'.format(MODEL_NAME)
run_config = tf.contrib.learn.RunConfig(
save_checkpoints_steps=CHECKPOINT_STEPS,
tf_random_seed=19830610,
model_dir=model_dir
)
print(hparams)
print("Model Directory:", run_config.model_dir)
print("")
print("Dataset Size:", TRAIN_SIZE)
print("Batch Size:", BATCH_SIZE)
print("Steps per Epoch:",TRAIN_SIZE/BATCH_SIZE)
print("Total Steps:", TOTAL_STEPS)
print("Required Evaluation Steps:", NUM_EVAL)
print("That is 1 evaluation step after each",NUM_EPOCHS/NUM_EVAL," epochs")
print("Save Checkpoint After",CHECKPOINT_STEPS,"steps")
def csv_serving_input_fn():
SERVING_HEADER = ['x','y','alpha','beta']
SERVING_HEADER_DEFAULTS = [[0.0], [0.0], ['NA'], ['NA']]
rows_string_tensor = tf.placeholder(dtype=tf.string,
shape=[None],
name='csv_rows')
receiver_tensor = {'csv_rows': rows_string_tensor}
row_columns = tf.expand_dims(rows_string_tensor, -1)
columns = tf.decode_csv(row_columns, record_defaults=SERVING_HEADER_DEFAULTS)
features = dict(zip(SERVING_HEADER, columns))
return tf.estimator.export.ServingInputReceiver(
process_features(features), receiver_tensor)
train_spec = tf.estimator.TrainSpec(
input_fn = lambda: tfrecods_input_fn(
TRAIN_DATA_FILES_PATTERN,
mode = tf.estimator.ModeKeys.TRAIN,
num_epochs=hparams.num_epochs,
batch_size=hparams.batch_size
),
max_steps=hparams.max_steps,
hooks=None
)
eval_spec = tf.estimator.EvalSpec(
input_fn = lambda: tfrecods_input_fn(
VALID_DATA_FILES_PATTERN,
mode=tf.estimator.ModeKeys.EVAL,
num_epochs=1,
batch_size=hparams.batch_size
),
exporters=[tf.estimator.LatestExporter(
name="predict", # the name of the folder in which the model will be exported to under export
serving_input_receiver_fn=csv_serving_input_fn,
exports_to_keep=1,
as_text=True)],
steps=None,
hooks=None
)
if not RESUME_TRAINING:
print("Removing previous artifacts...")
shutil.rmtree(model_dir, ignore_errors=True)
else:
print("Resuming training...")
tf.logging.set_verbosity(tf.logging.INFO)
time_start = datetime.utcnow()
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
estimator = create_estimator(run_config, hparams, True)
tf.estimator.train_and_evaluate(
estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec
)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
TRAIN_SIZE = 12000
VALID_SIZE = 3000
TEST_SIZE = 5000
train_input_fn = lambda: tfrecods_input_fn(files_name_pattern= TRAIN_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TRAIN_SIZE)
valid_input_fn = lambda: tfrecods_input_fn(files_name_pattern= VALID_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= VALID_SIZE)
test_input_fn = lambda: tfrecods_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TEST_SIZE)
estimator = create_estimator(run_config, hparams)
train_results = estimator.evaluate(input_fn=train_input_fn, steps=1)
print()
print("############################################################################################")
print("# Train Measures: {}".format(train_results))
print("############################################################################################")
valid_results = estimator.evaluate(input_fn=valid_input_fn, steps=1)
print()
print("############################################################################################")
print("# Valid Measures: {}".format(valid_results))
print("############################################################################################")
test_results = estimator.evaluate(input_fn=test_input_fn, steps=1)
print()
print("############################################################################################")
print("# Test Measures: {}".format(test_results))
print("############################################################################################")
import itertools
predict_input_fn = lambda: tfrecods_input_fn(files_name_pattern= TEST_DATA_FILES_PATTERN,
mode= tf.estimator.ModeKeys.PREDICT,
batch_size= 5)
predictions = list(itertools.islice(estimator.predict(input_fn=predict_input_fn),5))
print("")
print("* Predicted Classes: {}".format(list(map(lambda item: item["class_ids"][0]
,predictions))))
print("* Predicted Probabilities: {}".format(list(map(lambda item: list(item["probabilities"])
,predictions))))
import os
export_dir = model_dir +"/export/predict/"
saved_model_dir = export_dir + "/" + os.listdir(path=export_dir)[-1]
print(saved_model_dir)
predictor_fn = tf.contrib.predictor.from_saved_model(
export_dir = saved_model_dir,
signature_def_key="predict"
)
output = predictor_fn({'csv_rows': ["0.5,1,ax01,bx02", "-0.5,-1,ax02,bx02"]})
print(output)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Download & Process Mordor Dataset
Step2: Analytic I
Step3: Analytic II
Step4: Analytic III
Step5: Analytic IV
Step6: Analytic V
Step7: Analytic VI
|
<ASSISTANT_TASK:>
Python Code:
from openhunt.mordorutils import *
spark = get_spark()
mordor_file = "https://raw.githubusercontent.com/OTRF/mordor/master/datasets/small/windows/execution/host/empire_launcher_vbs.zip"
registerMordorSQLTable(spark, mordor_file, "mordorTable")
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, Channel
FROM mordorTable
WHERE (Channel = "Microsoft-Windows-PowerShell/Operational" OR Channel = "Windows PowerShell")
AND (EventID = 400 OR EventID = 4103)
'''
)
df.show(10,False)
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, NewProcessName, ParentProcessName
FROM mordorTable
WHERE LOWER(Channel) = "security"
AND EventID = 4688
AND NewProcessName LIKE "%powershell.exe"
AND NOT ParentProcessName LIKE "%explorer.exe"
'''
)
df.show(10,False)
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, Image, ParentImage
FROM mordorTable
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 1
AND Image LIKE "%powershell.exe"
AND NOT ParentImage LIKE "%explorer.exe"
'''
)
df.show(10,False)
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, Image, ImageLoaded
FROM mordorTable
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 7
AND (lower(Description) = "system.management.automation" OR lower(ImageLoaded) LIKE "%system.management.automation%")
'''
)
df.show(10,False)
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, Image, PipeName
FROM mordorTable
WHERE Channel = "Microsoft-Windows-Sysmon/Operational"
AND EventID = 17
AND lower(PipeName) LIKE "\\\\pshost%"
'''
)
df.show(10,False)
df = spark.sql(
'''
SELECT `@timestamp`, Hostname, Message
FROM mordorTable
WHERE Channel = "Microsoft-Windows-PowerShell/Operational"
AND EventID = 53504
'''
)
df.show(10,False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoder Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step40: Batch and pad the source and target sequences
Step43: Train
Step45: Save Parameters
Step47: Checkpoint
Step50: Sentence to Sequence
Step52: Translate
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (5, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
#source_id_text = [[source_vocab_to_int.get(word, source_vocab_to_int['<UNK>']) for word in sentence.split(' ')] for sentence in source_text.split('\n')]
#target_id_text = [[target_vocab_to_int.get(word, target_vocab_to_int['<UNK>']) for word in sentence.split(' ')] + [target_vocab_to_int['<EOS>']] for sentence in target_text.split('\n')]
tmp_source_text = source_text.split("\n")
tmp_target_text = target_text.split("\n")
source_id_text = []
target_id_text= []
for sentence in tmp_source_text:
tmp = []
for word in sentence.split():
tmp.append(source_vocab_to_int[word])
source_id_text.append(tmp)
for sentence in tmp_target_text:
tmp = []
for word in sentence.split():
tmp.append(target_vocab_to_int[word])
tmp.append(target_vocab_to_int['<EOS>'])
target_id_text.append(tmp)
return (source_id_text, target_id_text)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, learning rate, and lengths of source and target sequences.
:return: Tuple (input, targets, learning rate, keep probability, target sequence length,
max target sequence length, source sequence length)
input_ = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='target')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
len_source = tf.placeholder(tf.int32, [None], name='source_sequence_length')
len_target = tf.placeholder(tf.int32, [None], name='target_sequence_length')
max_target = tf.reduce_max(len_target, name='max_target_len')
return (input_, targets, learning_rate, keep_prob, len_target, max_target, len_source)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoder_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for encoding
:param target_data: Target Placehoder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
go_id = target_vocab_to_int['<GO>']
last_word = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
preproc_target = tf.concat([tf.fill([batch_size, 1], go_id), last_word], 1)
return preproc_target
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_encoding_input(process_decoder_input)
from imp import reload
reload(tests)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:param source_sequence_length: a list of the lengths of each sequence in the batch
:param source_vocab_size: vocabulary size of source data
:param encoding_embedding_size: embedding size of source data
:return: tuple (RNN output, RNN state)
embeded_inputs = tf.contrib.layers.embed_sequence(rnn_inputs, vocab_size=source_vocab_size, embed_dim=encoding_embedding_size)
def rnn_cell():
return tf.contrib.rnn.LSTMCell(num_units=rnn_size, initializer=tf.truncated_normal_initializer(stddev=0.1))
cell = tf.contrib.rnn.MultiRNNCell([rnn_cell() for _ in range(num_layers)])
dropped_cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)
rnn_output, rnn_state = tf.nn.dynamic_rnn(dropped_cell, embeded_inputs, sequence_length=source_sequence_length, dtype=tf.float32)
return (rnn_output, rnn_state)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input,
target_sequence_length, max_summary_length,
output_layer, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_summary_length: The length of the longest sequence in the batch
:param output_layer: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing training logits and sample_id
helper = tf.contrib.seq2seq.TrainingHelper(dec_embed_input, target_sequence_length, time_major=False)
dropout_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
decoder = tf.contrib.seq2seq.BasicDecoder(dropout_cell, helper, encoder_state, output_layer)
decoder_output = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, maximum_iterations=max_summary_length)
return decoder_output[0]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, max_target_sequence_length,
vocab_size, output_layer, batch_size, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param max_target_sequence_length: Maximum length of target sequences
:param vocab_size: Size of decoder/target vocabulary
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_layer: Function to apply the output layer
:param batch_size: Batch size
:param keep_prob: Dropout keep probability
:return: BasicDecoderOutput containing inference logits and sample_id
start_token = tf.tile(tf.constant([start_of_sequence_id], dtype=tf.int32), [batch_size])
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings, start_token, end_of_sequence_id)
decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, helper, encoder_state, output_layer)
decoder_output = tf.contrib.seq2seq.dynamic_decode(decoder, impute_finished=True, maximum_iterations=max_target_sequence_length)
return decoder_output[0]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_input, encoder_state,
target_sequence_length, max_target_sequence_length,
rnn_size,
num_layers, target_vocab_to_int, target_vocab_size,
batch_size, keep_prob, decoding_embedding_size):
Create decoding layer
:param dec_input: Decoder input
:param encoder_state: Encoder state
:param target_sequence_length: The lengths of each sequence in the target batch
:param max_target_sequence_length: Maximum length of target sequences
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param target_vocab_size: Size of target vocabulary
:param batch_size: The size of the batch
:param keep_prob: Dropout keep probability
:param decoding_embedding_size: Decoding embedding size
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
#dec_embeded_inputs = tf.contrib.layers.embed_sequence(dec_input, target_vocab_size, decoding_embedding_size)
dec_embeddings = tf.Variable(tf.truncated_normal([target_vocab_size, decoding_embedding_size]))
dec_embeded_inputs = tf.nn.embedding_lookup(dec_embeddings, dec_input)
start_of_sequence_id = target_vocab_to_int['<GO>']
end_of_sequence_id = target_vocab_to_int['<EOS>']
def rnn_cell():
return tf.contrib.rnn.LSTMCell(num_units=rnn_size, initializer=tf.truncated_normal_initializer(stddev=0.1))
dec_cell = tf.contrib.rnn.MultiRNNCell([rnn_cell() for _ in range(num_layers)])
dec_rnn_output = Dense(target_vocab_size, kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
with tf.variable_scope('decoding'):
training_bdo = decoding_layer_train(encoder_state, dec_cell, dec_embeded_inputs, target_sequence_length, max_target_sequence_length, dec_rnn_output, keep_prob)
with tf.variable_scope('decoding', reuse=True):
inference_bdo = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id, max_target_sequence_length, target_vocab_size, dec_rnn_output, batch_size, keep_prob)
return (training_bdo, inference_bdo)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size,
source_sequence_length, target_sequence_length,
max_target_sentence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param source_sequence_length: Sequence Lengths of source sequences in the batch
:param target_sequence_length: Sequence Lengths of target sequences in the batch
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training BasicDecoderOutput, Inference BasicDecoderOutput)
ecoder_output, encoder_state = encoding_layer(input_data, rnn_size, num_layers, keep_prob,
source_sequence_length, source_vocab_size,
enc_embedding_size)
decoder_input = process_decoder_input(target_data, target_vocab_to_int, batch_size)
training_bdo, inference_bdo = decoding_layer(decoder_input, encoder_state, target_sequence_length,
max_target_sentence_length, rnn_size, num_layers,
target_vocab_to_int, target_vocab_size, batch_size,
keep_prob, dec_embedding_size)
return (training_bdo, inference_bdo)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 5
# Batch Size
batch_size = 512
# RNN Size
rnn_size = 400
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 196
decoding_embedding_size = 196
# Learning Rate
learning_rate = 0.001
# Dropout Keep Probability
keep_probability = 0.75
display_step = 100
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_target_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length = model_inputs()
#sequence_length = tf.placeholder_with_default(max_target_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(tf.reverse(input_data, [-1]),
targets,
keep_prob,
batch_size,
source_sequence_length,
target_sequence_length,
max_target_sequence_length,
len(source_vocab_to_int),
len(target_vocab_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers,
target_vocab_to_int)
training_logits = tf.identity(train_logits.rnn_output, name='logits')
inference_logits = tf.identity(inference_logits.sample_id, name='predictions')
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
def pad_sentence_batch(sentence_batch, pad_int):
Pad sentences with <PAD> so that each sentence of a batch has the same length
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(sources, targets, batch_size, source_pad_int, target_pad_int):
Batch targets, sources, and the lengths of their sentences together
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
# Slice the right amount for the batch
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
# Pad
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_sources_batch, pad_targets_batch, pad_source_lengths, pad_targets_lengths
DON'T MODIFY ANYTHING IN THIS CELL
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1])],
'constant')
return np.mean(np.equal(target, logits))
# Split data to training and validation sets
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = source_int_text[:batch_size]
valid_target = target_int_text[:batch_size]
(valid_sources_batch, valid_targets_batch, valid_sources_lengths, valid_targets_lengths ) = next(get_batches(valid_source,
valid_target,
batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>']))
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch, sources_lengths, targets_lengths) in enumerate(
get_batches(train_source, train_target, batch_size,
source_vocab_to_int['<PAD>'],
target_vocab_to_int['<PAD>'])):
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths,
keep_prob: keep_probability})
if batch_i % display_step == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch,
source_sequence_length: sources_lengths,
target_sequence_length: targets_lengths,
keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_sources_batch,
source_sequence_length: valid_sources_lengths,
target_sequence_length: valid_targets_lengths,
keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(valid_targets_batch, batch_valid_logits)
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.4f}, Validation Accuracy: {:>6.4f}, Loss: {:>6.4f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
sentence = sentence.lower().split(' ')
return [vocab_to_int.get(v, vocab_to_int['<UNK>']) for v in sentence]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence]*batch_size,
target_sequence_length: [len(translate_sentence)*2]*batch_size,
source_sequence_length: [len(translate_sentence)]*batch_size,
keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in translate_logits]))
print(' French Words: {}'.format(" ".join([target_int_to_vocab[i] for i in translate_logits])))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We can index elements in an array using the square bracket and indices
Step2: If we omit an index of a multidimensional array it returns the whole row (or, in general, a N-1 dimensional array)
Step3: The same thing can be achieved with using
Step4: We can assign new values to elements in an array using indexing
Step5: Index slicing
Step6: Array slices are mutable
Step7: We can omit any of the three parameters in M[lower
Step8: Negative indices counts from the end of the array (positive index from the begining)
Step9: Index slicing works exactly the same way for multidimensional arrays
Step10: Indexing and Array Memory Management
Step11: Note
Step12: <img src="images/storage_index.png" />
Step13: We can also index masks
Step14: Alternatively
Step15: This feature is very useful to conditionally select elements from an array, using for example comparison operators
Step16: Alternatively, we can use the condition (mask) array directly within brackets to index the array
Step17: Exercises on Indexing
Step18: np.repeat
Step19: np.tile
Step20: np.concatenate
Step21: np.hstack and np.vstack
Step22: Iterating over array elements
Step23: When we need to iterate over each element of an array and modify its elements, it is convenient to use the enumerate function to obtain both the element and its index in the for loop
Step25: Vectorizing functions
Step26: OK, that didn't work because we didn't write the Theta function so that it can handle with vector input...
Step27: np.frompyfunc
Step28: Excercise
Step29: And we also have another vector adj of values that contains some adjusting factors that we might want to apply to each sample (row) of data in D
Step30: Naive Solution
Step31: This is a reasonable approach when coding in a low-level programming language
Step32: Nice, it's shorter too, and slightly faster! To appreciate even more performance gain, of our np.tile solution, we could try increasing the size of D to a bigger structure
Step33: The loop-in-Python method takes ~1.5 seconds, the stretching method takes ~48 milliseconds, a ~75x speedup.
Step34: How Broadcasting works
Step35: Element-wise array-array operations
Step36: If we multiply arrays with compatible shapes, we get an element-wise multiplication of each row
Step37: Matrix algebra
Step38: A new dedicated Infix operator for Matrix Multiplication
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(42) # Setting the random seed
# a vector: the argument to the array function is a Python list
v = np.random.rand(10)
v
# a matrix: the argument to the array function is a nested Python list
M = np.random.rand(10, 2)
M
# v is a vector, and has only one dimension, taking one index
v[0]
# M is a matrix, or a 2 dimensional array, taking two indices
M[1,1]
M[1]
M[1,:] # row 1
M[:,1] # column 1
M[0,0] = 1
M
# also works for rows and columns
M[1,:] = 0
M[:,1] = -1
M
a = np.array([1,2,3,4,5])
a
a[1:3]
a[1:3] = [-2,-3]
a
a[::] # lower, upper, step all take the default values
a[::2] # step is 2, lower and upper defaults to the beginning and end of the array
a[:3] # first three elements
a[3:] # elements from index 3
a = np.array([1,2,3,4,5])
a[-1] # the last element in the array
a[-3:] # the last three elements
A = np.array([[n+m*10 for n in range(5)]
for m in range(5)])
A
# a block from the original array
A[1:4, 1:4]
# strides
A[::2, ::2]
import numpy as np
FC = np.array([[1, 2, 3], [4, 5, 6],
[7, 8, 9], [10, 11, 12]], order='F')
CC = np.array([[1, 2, 3], [4, 5, 6],
[7, 8, 9], [10, 11, 12]], order='C')
FC[0, 1]
CC[0, 1]
FC.shape
CC.shape
row_indices = [1, 2, 3]
A[row_indices]
col_indices = [1, 2, -1] # remember, index -1 means the last element
A[row_indices, col_indices]
b = np.array([n for n in range(5)])
b
row_mask = np.array([True, False, True, False, False])
b[row_mask]
# same thing
row_mask = np.array([1,0,1,0,0], dtype=bool)
b[row_mask]
x = np.arange(0, 10, 0.5)
x
mask = (5 < x)
mask
x[mask]
x[(5 < x)]
a = np.array([[1, 2], [3, 4]])
# repeat each element 3 times
np.repeat(a, 3)
# tile the matrix 3 times
np.tile(a, 3)
b = np.array([[5, 6]])
np.concatenate((a, b), axis=0)
np.concatenate((a, b.T), axis=1)
np.vstack((a,b))
np.hstack((a,b.T))
v = np.array([1,2,3,4])
for element in v:
print(element)
M = np.array([[1,2], [3,4]])
for row in M:
print("row", row)
for element in row:
print(element)
for row_idx, row in enumerate(M):
print("row_idx", row_idx, "row", row)
for col_idx, element in enumerate(row):
print("col_idx", col_idx, "element", element)
# update the matrix M: square each element
M[row_idx, col_idx] = element ** 2
# each element in M is now squared
M
def Theta(x):
Scalar implemenation of the Heaviside step function.
if x >= 0:
return 1
else:
return 0
Theta(array([-3,-2,-1,0,1,2,3]))
Theta_vec = np.vectorize(Theta)
Theta_vec(np.array([-3,-2,-1,0,1,2,3]))
Theta_ufunc = np.frompyfunc(Theta, 1, 1)
print("Result: ", Theta_ufunc(np.arange(4)))
D = np.array([ [0.3, 2.5, 3.5],
[2.9, 27.5, 0],
[0.4, 1.3, 23.9],
[14.4, 6, 2.3]])
adj = np.array([9, 4, 4])
%%timeit
# Create a new array filled with zeros, of the same shape as macros.
result = np.zeros_like(D)
# Now multiply each row of macros by cal_per_macro. In Numpy, `*` is
# element-wise multiplication between two arrays.
for i in range(D.shape[0]):
result[i, :] = D[i, :] * adj
result
%%timeit
adj_stretch = np.tile(adj, (D.shape[0], 1))
D * adj_stretch
adj_stretch
D_large = np.random.rand(10**6, 10)
adj_large = np.random.rand(10)
D_large.shape, adj_large.shape
%%timeit
# Create a new array filled with zeros, of the same shape as macros.
result_large = np.zeros_like(D_large)
# Now multiply each row of macros by cal_per_macro. In Numpy, `*` is
# element-wise multiplication between two arrays.
for i in range(D_large.shape[0]):
result_large[i, :] = D_large[i, :] * adj_large
%%timeit
adj_large_stretch = np.tile(adj_large, (D_large.shape[0], 1))
D_large * adj_large_stretch
%%timeit
## Back to our example
D * adj # Broadcasting
v1 = np.arange(0, 5)
v1 * 2
v1 + 2
A = np.array([[n+m*10 for n in range(5)] for m in range(5)])
print('A * 2: ', '\n', A * 2)
print('A + 2: ', '\n', A + 2)
A * A # element-wise multiplication
v1 * v1
A.shape, v1.shape
A * v1 #Broadcasting
np.dot(A, A)
np.dot(A, v1)
np.dot(v1, v1)
A @ v1
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <img src="resources/jupyter-main-logo.svg" alt="Jupyter" height="200" width="200">
Step3: Magic
Step4: Bash
Step5: HTML
Step6: Embed YouTube videos.
Step7: Configuration / Environment
Step8: Notebook specific configuration values
|
<ASSISTANT_TASK:>
Python Code:
some_global_variable = 6
def my_function(arg):
This is a docstring.
some_global_variable = 1
return some_global_variable
print(my_function(5))
some_global_variable
%time some_list = [x**x for x in range(9001)]
!sudo python3.6 -m pip install matplotlib
%matplotlib notebook
import matplotlib, numpy, pandas, seaborn
from matplotlib import pyplot as plt
ts = pd.Series(numpy.random.randn(1000), index=pandas.date_range('1/1/2000', periods=1000))
ts = ts.cumsum()
df = pandas.DataFrame(numpy.random.randn(1000, 4), index=ts.index,
columns=['A', 'B', 'C', 'D'])
df = df.cumsum()
df.plot(); plt.legend(loc='best')
!dir
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
from IPython.core.display import HTML
css = open('style-table.css').read() + open('style-notebook.css').read()
HTML('<style>{}</style>'.format(css))
from IPython.display import YouTubeVideo
YouTubeVideo('HW29067qVWk')
!ipython profile locate
# Otherwise run: !ipython profile create
# Sample output:
# [ProfileCreate] Generating default config file: 'C:\\Users\\randall.white\\.ipython\\profile_default\\ipython_config.py'
# [ProfileCreate] Generating default config file: 'C:\\Users\\randall.white\\.ipython\\profile_default\\ipython_kernel_config.py'
import time
for i in range(5):
time.sleep(.5)
print(i)
time.sleep(10)
print("I'm waiting to see this...")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new bundle. See Building a System for more details.
Step2: Relevant Parameters
Step3: Influence on Orbits (Positions)
Step4: Influence on Light Curves (Fluxes)
Step5: To make things easier to compare, let's disable limb darkening
Step6: Since we doubled the distance from 1 to 2 m, we expect the entire light curve at 2 m to be divided by 4 (note the y-scales on the plots below).
Step7: Note that 'pblum' is defined such that a (spherical, non-eclipsed, non-limb darkened) star with a pblum of 4pi will contribute a flux of 1.0 at 1.0 m (the default distance).
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.2,<2.3"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
print(b.get_parameter(qualifier='distance', context='system'))
print(b.get_parameter(qualifier='t0', context='system'))
b.add_dataset('orb', times=np.linspace(0,3,101), dataset='orb01')
b.set_value('distance', 1.0)
b.run_compute(model='dist1')
b.set_value('distance', 2.0)
b.run_compute(model='dist2')
afig, mplfig = b['orb01'].plot(y='ws', show=True, legend=True)
b.add_dataset('lc', times=np.linspace(0,1,101), dataset='lc01')
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'logarithmic')
b.set_value_all('ld_coeffs', [0.,0.])
b.set_value('distance', 1.0)
b.run_compute(model='dist1', overwrite=True)
b.set_value('distance', 2.0)
b.run_compute(model='dist2', overwrite=True)
afig, mplfig = b['lc01'].plot(show=True, legend=True)
b.add_dataset('mesh', times=[0], dataset='mesh01', columns=['intensities@lc01', 'abs_intensities@lc01'])
b.set_value('distance', 1.0)
b.run_compute(model='dist1', overwrite=True)
b.set_value('distance', 2.0)
b.run_compute(model='dist2', overwrite=True)
print("dist1 abs_intensities: ", np.nanmean(b.get_value(qualifier='abs_intensities', component='primary', dataset='lc01', model='dist1')))
print("dist2 abs_intensities: ", np.nanmean(b.get_value(qualifier='abs_intensities', component='primary', dataset='lc01', model='dist2')))
print("dist1 intensities: ", np.nanmean(b.get_value(qualifier='intensities', component='primary', dataset='lc01', model='dist1')))
print("dist2 intensities: ", np.nanmean(b.get_value(qualifier='intensities', component='primary', dataset='lc01', model='dist2')))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Pandas
Step2: Here is a small amount of stock data for APPL
Step3: Read this as into a DataFrame
Step4: And view the HTML representation
Step5: SymPy
Step6: Vincent
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import display
import pandas
%%writefile data.csv
Date,Open,High,Low,Close,Volume,Adj Close
2012-06-01,569.16,590.00,548.50,584.00,14077000,581.50
2012-05-01,584.90,596.76,522.18,577.73,18827900,575.26
2012-04-02,601.83,644.00,555.00,583.98,28759100,581.48
2012-03-01,548.17,621.45,516.22,599.55,26486000,596.99
2012-02-01,458.41,547.61,453.98,542.44,22001000,540.12
2012-01-03,409.40,458.24,409.00,456.48,12949100,454.53
df = pandas.read_csv('data.csv')
df
from sympy.interactive.printing import init_printing
init_printing(use_latex='mathjax')
from __future__ import division
import sympy as sym
from sympy import *
x, y, z = symbols("x y z")
k, m, n = symbols("k m n", integer=True)
f, g, h = map(Function, 'fgh')
Rational(3,2)*pi + exp(I*x) / (x**2 + y)
a = 1/x + (x*sin(x) - 1)/x
a
(1/cos(x)).series(x, 0, 6)
import vincent
import pandas as pd
import pandas.io.data as web
import datetime
all_data = {}
date_start = datetime.datetime(2010, 1, 1)
date_end = datetime.datetime(2014, 1, 1)
for ticker in ['AAPL', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.DataReader(ticker, 'yahoo', date_start, date_end)
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
vincent.initialize_notebook()
line = vincent.Line(price[['AAPL', 'IBM', 'YHOO', 'MSFT']], width=600, height=300)
line.axis_titles(x='Date', y='Price')
line.legend(title='Ticker')
display(line)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Define kernels of neuronal response to static gratings
Step2: 2. Estimate neural response to preferred and opposite directions
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries
import numpy as np
%config InlineBackend.figure_format = 'retina'
%matplotlib inline
import matplotlib.pyplot as plt
kernel_fast = np.array([0, .5, 1, .8, .4, .2, .1, 0])
kernel_slow = np.hstack([np.arange(0,1,.2),np.arange(1,0,-.04)])
plt.figure(figsize=(5,6))
plt.subplot(2,1,1)
plt.plot(kernel_fast,'k')
plt.xlim((0,30))
plt.ylabel('Neural response\n(fast)',size=15)
plt.subplot(2,1,2)
plt.plot(kernel_slow,'k')
plt.xlim((0,30))
plt.xlabel('Time (a.u.)',size=20)
plt.ylabel('Neural response\n(slow)',size=15)
# Define times of sustained-response-inducing (slow)
# and transient-response-inducing (fast) stimuli
slow_event_times = np.arange(0,100,20)
fast_event_times = np.arange(10,110,20)
# Compute rasters of events
N = 200
slow_event_raster = np.zeros(N)
slow_event_raster[slow_event_times] = 1
fast_event_raster = np.zeros(N)
fast_event_raster[fast_event_times] = 1
# Compute trace of neural activity
slow_neural = np.convolve(slow_event_times, kernel_slow, mode='same')
fast_neural = np.convolve(fast_event_times, kernel_fast, mode='same')
neural = slow_neural + fast_neural
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <table class="tfo-notebook-buttons" align="left">
Step2: Sentences
Step3: Run the model
Step5: Semantic similarity
|
<ASSISTANT_TASK:>
Python Code:
#@title Copyright 2020 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
!pip install --quiet "tensorflow-text==2.8.*"
import seaborn as sns
from sklearn.metrics import pairwise
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_text as text # Imports TF ops for preprocessing.
#@title Configure the model { run: "auto" }
BERT_MODEL = "https://tfhub.dev/google/experts/bert/wiki_books/2" # @param {type: "string"} ["https://tfhub.dev/google/experts/bert/wiki_books/2", "https://tfhub.dev/google/experts/bert/wiki_books/mnli/2", "https://tfhub.dev/google/experts/bert/wiki_books/qnli/2", "https://tfhub.dev/google/experts/bert/wiki_books/qqp/2", "https://tfhub.dev/google/experts/bert/wiki_books/squad2/2", "https://tfhub.dev/google/experts/bert/wiki_books/sst2/2", "https://tfhub.dev/google/experts/bert/pubmed/2", "https://tfhub.dev/google/experts/bert/pubmed/squad2/2"]
# Preprocessing must match the model, but all the above use the same.
PREPROCESS_MODEL = "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3"
sentences = [
"Here We Go Then, You And I is a 1999 album by Norwegian pop artist Morten Abel. It was Abel's second CD as a solo artist.",
"The album went straight to number one on the Norwegian album chart, and sold to double platinum.",
"Among the singles released from the album were the songs \"Be My Lover\" and \"Hard To Stay Awake\".",
"Riccardo Zegna is an Italian jazz musician.",
"Rajko Maksimović is a composer, writer, and music pedagogue.",
"One of the most significant Serbian composers of our time, Maksimović has been and remains active in creating works for different ensembles.",
"Ceylon spinach is a common name for several plants and may refer to: Basella alba Talinum fruticosum",
"A solar eclipse occurs when the Moon passes between Earth and the Sun, thereby totally or partly obscuring the image of the Sun for a viewer on Earth.",
"A partial solar eclipse occurs in the polar regions of the Earth when the center of the Moon's shadow misses the Earth.",
]
preprocess = hub.load(PREPROCESS_MODEL)
bert = hub.load(BERT_MODEL)
inputs = preprocess(sentences)
outputs = bert(inputs)
print("Sentences:")
print(sentences)
print("\nBERT inputs:")
print(inputs)
print("\nPooled embeddings:")
print(outputs["pooled_output"])
print("\nPer token embeddings:")
print(outputs["sequence_output"])
#@title Helper functions
def plot_similarity(features, labels):
Plot a similarity matrix of the embeddings.
cos_sim = pairwise.cosine_similarity(features)
sns.set(font_scale=1.2)
cbar_kws=dict(use_gridspec=False, location="left")
g = sns.heatmap(
cos_sim, xticklabels=labels, yticklabels=labels,
vmin=0, vmax=1, cmap="Blues", cbar_kws=cbar_kws)
g.tick_params(labelright=True, labelleft=False)
g.set_yticklabels(labels, rotation=0)
g.set_title("Semantic Textual Similarity")
plot_similarity(outputs["pooled_output"], sentences)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set a valid date frame for building the network.
Step2: Filter data according to date frame and export to .gexf file
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from bigbang.archive import Archive
from bigbang.archive import load as load_archive
import bigbang.parse as parse
import bigbang.graph as graph
import bigbang.mailman as mailman
import bigbang.process as process
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from pprint import pprint as pp
import pytz
import os
#Insert a list of archive names
archives_names = ["6lo"]
cwd = os.getcwd()
archives_paths = list()
for archive_name in archives_names:
archives_paths.append('../../archives/'+archive_name+'.csv')
archives_list = [load_archive(archive_path).data for archive_path in archives_paths]
archives = Archive(pd.concat(archives_list))
archives_data = archives.data
#The oldest date and more recent date for the whole mailing lists are displayed, so you WON't set an invalid time frame
print archives_data['Date'].min()
print archives_data['Date'].max()
#set the date frame
date_from = pd.datetime(2000,11,1,tzinfo=pytz.utc)
date_to = pd.datetime(2111,12,1,tzinfo=pytz.utc)
def filter_by_date(df,d_from,d_to):
return df[(df['Date'] > d_from) & (df['Date'] < d_to)]
#create filtered network
archives_data_filtered = filter_by_date(archives_data, date_from, date_to)
network = graph.messages_to_interaction_graph(archives_data_filtered)
#export the network in a format that you can open in Gephi.
#insert a file name
file_name = 'architecture_discuss_for_gephi.gexf'
network = nx.write_gexf(network, cwd+file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Ejercicios Comparación Tamaño del componente Gigante
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import networkx as nx
import seaborn as sns
%matplotlib inline
edges = np.genfromtxt('0.edges', dtype="int", delimiter=" ")
G = nx.read_edgelist('0.edges', delimiter=" ")
def total_edges(edges):
return (len(G.nodes())*(len(G.nodes()-1)))/2
def p_edges(edges):
return 2*float(len(edges))/total_edges(edges)
N = len(G.nodes())
print("N=%d" %(N))
p = 2*len(edges)/(N*(N-1))
G_ran = nx.gnp_random_graph(N, p)
ax = sns.distplot(list(G.degree().values()))
sns.distplot(list(G_ran.degree().values()))
# Plot results
import matplotlib.pyplot as plt
import plotly.plotly as py
from plotly.graph_objs import Scatter, Figure, Layout
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
def biggest_component_size(N,p):
G_ran = nx.gnp_random_graph(N,p)
components_ran = list(nx.connected_component_subgraphs(G_ran))
components_ran_len = [len(component) for component in components_ran]
max_component_ran = 0
max_index_ran = -1
for i in range(0, len(components_ran_len)):
if components_ran_len[i] > max_component_ran:
max_component_ran = components_ran_len[i]
max_index_ran = i
random_network_biggest_component_size = (len(components_ran[max_index_ran]))
components_real = list(nx.connected_component_subgraphs(G))
components_real_len = [len(component) for component in components_real]
max_component_real = 0
max_index_real = -1
for i in range(0, len(components_real_len)):
if components_real_len[i] > max_component_real:
max_component_real = components_real_len[i]
max_index_real = i
real_network_biggest_component_size = (len(components_real[max_index_real]))
return (random_network_biggest_component_size, real_network_biggest_component_size)
def component_comparison(number_of_iterations, N, p):
rand = []
real = []
for i in range(number_of_iterations):
result = biggest_component_size(N,p)
rand.append(result[0])
real.append(result[1])
sample = list(range(1,number_of_iterations+1))
plt.plot(sample, rand, "o")
plt.plot(sample, real, "o")
component_comparison(100,N,p)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import section specific modules
Step3: 2.3. Fourier Series<a id='math
Step4: That should be good enough for our purposes here. Next we create a function to sum the Fourier series.
Step5: Let's see what happens if we decompose a square wave.
Step6: As can be seen from the figure, the Fourier series approximates the square wave. However at such a low order (i.e. $m = 10$) it doesn't do a very good job. Actually an infinite number of Fourier series coefficients are required to fully capture a square wave. Below is an interactive demonstration that allows you to vary the parameters on the Fourier series decomposition. Note in particular what happens if we make the period too small. Also feel free to apply it to functions other than the square wave (but make sure to adjust the domain accordingly.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import HTML
HTML('../style/course.css') #apply general CSS
from IPython.display import HTML
from ipywidgets import interact
HTML('../style/code_toggle.html')
def FS_coeffs(x, m, func, T=2.0*np.pi):
Computes Fourier series (FS) coeffs of func
Input:
x = input vector at which to evaluate func
m = the order of the coefficient
func = the function to find the FS of
T = the period of func (defaults to 2 pi)
# Evaluate the integrand
am_int = func(x)*np.exp(-1j*2.0*m*np.pi*x/T)
# Use trapezoidal integration to get the coefficient
am = np.trapz(am_int,x)
return am/T
def FS_sum(x, m, func, period=None):
# If no period is specified use entire domain
if period is None:
period = np.abs(x.max() - x.min())
# Evaluate the coefficients and sum the series
f_F = np.zeros(x.size, dtype=np.complex128)
for i in xrange(-m,m+1):
am = FS_coeffs(x, i, func, T=period)
f_F += am*np.exp(2.0j*np.pi*i*x/period)
return f_F
# define square wave function
def square_wave(x):
I = np.argwhere(np.abs(x) <= 0.5)
tmp = np.zeros(x.size)
tmp[I] = 1.0
return tmp
# Set domain and compute square wave
N = 250
x = np.linspace(-1.0,1.0,N)
# Compute the FS up to order m
m = 10
sw_F = FS_sum(x, m, square_wave, period=2.0)
# Plot result
plt.figure(figsize=(15,5))
plt.plot(x, sw_F.real, 'g', label=r'$ Fourier \ series $')
plt.plot(x, square_wave(x), 'b', label=r'$ Square \ wave $')
plt.title(r"$FS \ decomp \ of \ square \ wave$",fontsize=20)
plt.xlabel(r'$x$',fontsize=18)
plt.ylim(-0.05,1.5)
plt.legend()
def inter_FS(x,m,func,T):
f_F = FS_sum(x, m, func, period=T)
plt.plot(x,f_F.real,'b')
plt.plot(x,func(x),'g')
interact(lambda m,T:inter_FS(x=np.linspace(-1.0,1.0,N),m=m,func=square_wave,T=T),
m=(5,100,1),T=(0,2*np.pi,0.5)) and None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps
Step2: Inline Question #1
Step3: You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5
Step5: You should expect to see a slightly better performance than with k = 1.
Step6: Cross-validation
|
<ASSISTANT_TASK:>
Python Code:
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print(X_train.shape, X_test.shape)
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print(dists.shape)
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print('Difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print('Difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# Let's compare how fast the implementations are
def time_function(f, *args):
Call a function f with args and return the time (in seconds) that it took to execute.
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print('Two loop version took %f seconds' % two_loop_time)
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print('One loop version took %f seconds' % one_loop_time)
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print('No loop version took %f seconds' % no_loop_time)
# you should see significantly faster performance with the fully vectorized implementation
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
X_train_folds = np.array_split(X_train, num_folds)
y_train_folds = np.array_split(y_train, num_folds)
################################################################################
# END OF YOUR CODE #
################################################################################
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
for k in k_choices:
accuracies = []
for i in range(num_folds):
X_train_cv = np.vstack(X_train_folds[0:i] + X_train_folds[i+1:])
y_train_cv = np.hstack(y_train_folds[0:i] + y_train_folds[i+1:])
X_valid_cv = X_train_folds[i]
y_valid_cv = y_train_folds[i]
classifier.train(X_train_cv, y_train_cv)
dists = classifier.compute_distances_no_loops(X_valid_cv)
accuracy = float(np.sum(classifier.predict_labels(dists, k) == y_valid_cv)) / y_valid_cv.shape[0]
accuracies.append(accuracy)
k_to_accuracies[k] = accuracies
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print('k = %d, accuracy = %f' % (k, accuracy))
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 1
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Write a nwb file
Step2: Read the NWB file
Step3: Using Neo NWBIO
|
<ASSISTANT_TASK:>
Python Code:
blocks = []
# Define Neo blocks
bl0 = neo.Block(name='First block')
bl1 = neo.Block(name='Second block')
bl2 = neo.Block(name='Third block')
print("bl0.segments = ", bl0.segments)
print("bl1.segments = ", bl1.segments)
print("bl2.segments = ", bl2.segments)
blocks = [bl0, bl1, bl2]
print("blocks = ", blocks)
num_seg = 4 # number of segments
for blk in blocks:
for ind in range(num_seg): # number of Segment
seg = neo.Segment(name='segment %s %d' % (blk.name, ind), index=ind)
blk.segments.append(seg)
for seg in blk.segments: # AnalogSignal objects
# 3 AnalogSignals
a = AnalogSignal(np.random.randn(num_seg, 44)*pq.nA, sampling_rate=10*kHz)
b = AnalogSignal(np.random.randn(num_seg, 64)*pq.nA, sampling_rate=10*kHz)
c = AnalogSignal(np.random.randn(num_seg, 33)*pq.nA, sampling_rate=10*kHz)
seg.analogsignals.append(a)
seg.analogsignals.append(b)
seg.analogsignals.append(c)
blocks
filename = 'My_first_dataset_neo9.nwb'
writer = NWBIO(filename, mode='w')
writer.write(blocks)
io = pynwb.NWBHDF5IO(filename, mode='r') # Open a file with NWBHDF5IO
_file = io.read()
print(_file)
_file.acquisition
reader = NWBIO(filename, mode='r')
all_blocks = reader.read()
all_blocks
first_block = reader.read_block() # Read the first block
first_block
# Plotting settings
%matplotlib inline
show_bar_plot = False # Change setting to plot distribution of object sizes in the HDF5 file
plot_single_file = True # Plot all files or a single example file
output_filenames = filename
print("output_filenames = ", output_filenames)
# Select the files to plot
filenames = output_filenames
print("filenames = ", filenames)
# Changed
from nwb_docutils.doctools.render import HierarchyDescription, NXGraphHierarchyDescription
import matplotlib.pyplot as plt
# Create the plots for all files
file_hierarchy = HierarchyDescription.from_hdf5(filenames)
file_graph = NXGraphHierarchyDescription(file_hierarchy)
fig = file_graph.draw(show_plot=False,
figsize=(12,16),
label_offset=(0.0, 0.0065),
label_font_size=10)
plot_title = filenames + " \n " + "#Datasets=%i, #Attributes=%i, #Groups=%i, #Links=%i" % (len(file_hierarchy['datasets']), len(file_hierarchy['attributes']), len(file_hierarchy['groups']), len(file_hierarchy['links']))
plt.title(plot_title)
plt.show()
# Show a sorted bar plot with the sizes of all datasets in the file
if show_bar_plot:
d = {i['name']: np.prod(i['size']) for i in file_hierarchy['datasets']}
l = [w for w in sorted(d, key=d.get, reverse=True)]
s = [d[w] for w in l]
p = np.arange(len(l))
fig,ax = plt.subplots(figsize=(16,7))
ax.set_title(filename)
ax.bar(p, s, width=1, color='r')
ax.set_xticks(p+1)
ax.set_xticklabels(l)
ax.set_yscale("log", nonposy='clip')
fig.autofmt_xdate(bottom=0.2, rotation=90, ha='right')
plt.show()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: トレーニングするモデルの定義
Step2: リモートエグゼキュータのセットアップ
Step3: トレーニングの実行
|
<ASSISTANT_TASK:>
Python Code:
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
import collections
import time
import tensorflow as tf
import tensorflow_federated as tff
source, _ = tff.simulation.datasets.emnist.load_data()
def map_fn(example):
return collections.OrderedDict(
x=tf.reshape(example['pixels'], [-1, 784]), y=example['label'])
def client_data(n):
ds = source.create_tf_dataset_for_client(source.client_ids[n])
return ds.repeat(10).batch(20).map(map_fn)
train_data = [client_data(n) for n in range(10)]
input_spec = train_data[0].element_spec
def model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(784,)),
tf.keras.layers.Dense(units=10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
return tff.learning.from_keras_model(
model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
trainer = tff.learning.build_federated_averaging_process(
model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02))
def evaluate(num_rounds=10):
state = trainer.initialize()
for round in range(num_rounds):
t1 = time.time()
state, metrics = trainer.next(state, train_data)
t2 = time.time()
print('Round {}: loss {}, round time {}'.format(round, metrics.loss, t2 - t1))
import grpc
ip_address = '0.0.0.0' #@param {type:"string"}
port = 80 #@param {type:"integer"}
channels = [grpc.insecure_channel(f'{ip_address}:{port}') for _ in range(10)]
tff.backends.native.set_remote_execution_context(channels)
evaluate()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Using 8 estimators (usage of one per core if 8 cores (jobs) are used)
Step2: Plot of the training time in seconds of each RFC against the number of used cores (number of jobs)
|
<ASSISTANT_TASK:>
Python Code:
# Load neccessary libraries changed pandas import for convinience
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
# creation of a dataset consisting of 250k samples
# with the following parameters
samples = 250*1000
features = 40
informative = 5
redundant=4
X, Y = make_classification(n_samples=samples,
n_features=features,
n_informative=informative,
n_redundant=4)
# Split-out validation dataset
validation_size = 0.20
seed = 7
scoring = 'accuracy'
X_train, X_validation, Y_train, Y_validation = train_test_split(X,
Y,
test_size=validation_size,
random_state=seed)
# Create Random Forest Classifier
estimators = 8 # For mapping one estimator per core in case of max 8 cores
jobs = 8
time_it_results = []
for _ in range(jobs):
rf_class = RandomForestClassifier(n_estimators=estimators, n_jobs=(_+1))
tr = %timeit -o rf_class.fit(X_train, Y_train)
time_it_results.append(tr)
# best_times are extracted
best_times = [timer.best for timer in time_it_results]
x = np.arange(1,9)
labels = ['%i. Core' % i for i in x]
fig = plt.figure()
fig.suptitle('Training Time per number of cores')
ax = fig.add_subplot(111)
ax.set_xlabel('Number of cores')
ax.set_ylabel('Training time (s)')
ax.plot(x, best_times)
plt.xticks(x, labels, rotation='vertical')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This notebook will generate a file in the current directory with the name "changelog_VERSION.md". You can edit and append this on front of the CHANGELOG file for the package release.
Step2: Total commits by subpackage
Step3: List Contributors
Step12: Disaggregate by PR, Issue
|
<ASSISTANT_TASK:>
Python Code:
# get date of last tag
from subprocess import Popen, PIPE
x, err = Popen('git log -1 --tags --simplify-by-decoration --pretty="%ai"| cat', stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True).communicate()
start_date = x.split()[0].decode('utf-8')
start_date
# today's date
import datetime
release_date = str(datetime.datetime.today()).split()[0]
release_date
package_name = 'giddy'
# release_date = '2019-12-20'
# start_date = '2019-12-20'
from __future__ import print_function
import os
import json
import re
import sys
import pandas
from datetime import datetime, timedelta
from time import sleep
from subprocess import check_output
try:
from urllib import urlopen
except:
from urllib.request import urlopen
import ssl
import yaml
context = ssl._create_unverified_context()
CWD = os.path.abspath(os.path.curdir)
CWD
since_date = '--since="{start}"'.format(start=start_date)
since_date
since = datetime.strptime(start_date+" 0:0:0", "%Y-%m-%d %H:%M:%S")
since
# get __version__
f = "../{package}/__init__.py".format(package=package_name)
with open(f, 'r') as initfile:
exec(initfile.readline())
cmd = ['git', 'log', '--oneline', since_date]
ncommits = len(check_output(cmd).splitlines())
ncommits
identities = {'Levi John Wolf': ('ljwolf', 'Levi John Wolf'),
'Serge Rey': ('Serge Rey', 'Sergio Rey', 'sjsrey', 'serge'),
'Wei Kang': ('Wei Kang', 'weikang9009'),
'Dani Arribas-Bel': ('Dani Arribas-Bel', 'darribas')
}
def regularize_identity(string):
string = string.decode()
for name, aliases in identities.items():
for alias in aliases:
if alias in string:
string = string.replace(alias, name)
if len(string.split(' '))>1:
string = string.title()
return string.lstrip('* ')
author_cmd = ['git', 'log', '--format=* %aN', since_date]
from collections import Counter
ncommits = len(check_output(cmd).splitlines())
all_authors = check_output(author_cmd).splitlines()
counter = Counter([regularize_identity(author) for author in all_authors])
# global_counter += counter
# counters.update({'.'.join((package,subpackage)): counter})
unique_authors = sorted(set(all_authors))
unique_authors = counter.keys()
unique_authors
from datetime import datetime, timedelta
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
element_pat = re.compile(r'<(.+?)>')
rel_pat = re.compile(r'rel=[\'"](\w+)[\'"]')
def parse_link_header(headers):
link_s = headers.get('link', '')
urls = element_pat.findall(link_s)
rels = rel_pat.findall(link_s)
d = {}
for rel,url in zip(rels, urls):
d[rel] = url
return d
def get_paged_request(url):
get a full list, handling APIv3's paging
results = []
while url:
#print("fetching %s" % url, file=sys.stderr)
f = urlopen(url)
results.extend(json.load(f))
links = parse_link_header(f.headers)
url = links.get('next')
return results
def get_issues(project="pysal/giddy", state="closed", pulls=False):
Get a list of the issues from the Github API.
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE)
return get_paged_request(url)
def _parse_datetime(s):
Parse dates in the format returned by the Github API.
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
Convert a list of issues to a dict, keyed by issue number.
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
Return True if the given issue is a pull request.
return 'pull_request_url' in issue
def issues_closed_since(period=timedelta(days=365), project="pysal/pysal", pulls=False):
Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
period = datetime.now() - period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, period.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url)
# allclosed = get_issues(project=project, state='closed', pulls=pulls, since=period)
filtered = [i for i in allclosed if _parse_datetime(i['closed_at']) > period]
# exclude rejected PRs
if pulls:
filtered = [ pr for pr in filtered if pr['merged_at'] ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
Return a list of issues sorted by closing date date.
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
Summary report about a list of issues, printing number and title.
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title'].encode('utf-8')))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))
all_issues = {}
all_pulls = {}
total_commits = 0
#prj='pysal/libpysal'
prj = 'pysal/{package}'.format(package=package_name)
issues = issues_closed_since(since, project=prj,pulls=False)
pulls = issues_closed_since(since, project=prj,pulls=True)
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
issue_listing = []
for issue in issues:
entry = "{title} (#{number})".format(title=issue['title'],number=issue['number'])
issue_listing.append(entry)
pull_listing = []
for pull in pulls:
entry = "{title} (#{number})".format(title=pull['title'],number=pull['number'])
pull_listing.append(entry)
pull_listing
message = "We closed a total of {total} issues (enhancements and bug fixes) through {pr} pull requests".format(total=n_total, pr=n_pulls)
message = "{msg}, since our last release on {previous}.".format(msg=message, previous=str(start_date))
message
message += "\n\n## Issues Closed\n"
print(message)
issues = "\n".join([" - "+issue for issue in issue_listing])
message += issues
message += "\n\n## Pull Requests\n"
pulls = "\n".join([" - "+pull for pull in pull_listing])
message += pulls
print(message)
people = "\n".join([" - "+person for person in unique_authors])
print(people)
message +="\n\nThe following individuals contributed to this release:\n\n{people}".format(people=people)
print(message)
head = "# Version {version} ({release_date})\n\n".format(version=__version__, release_date=release_date)
# new_content = head+message+"\n"
# print(new_content)
# #insert the new changes in the begining of CHANGELOG.md
# with open("../CHANGELOG.md", 'r+') as file:
# content = file.read()
# file.seek(0, 0)
# file.write(new_content+ content)
# outfile = 'changelog_{version}.md'.format(version=__version__)
outfile = 'changelog.md'
with open(outfile, 'w') as of:
of.write(head+message)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variables y tipos de datos
Step2: En Python podemos utilizar como nombre de variable cualquier secuencia de caracteres alfanuméricos, siempre que comience por una letra del alfabeto y no sea una palabra reservada por el propio lenguaje (típicamente, nombres de funciones y palabras clave).
Step3: En el ejemplo anterior, hemos declarado tres variables diferentes. Sin embargo, el tipo de dato que estábamos almacenando era el mismo
Step4: Los valores que contienen las variabes pueden ser reasignados en cualquier momento. Eso sí, solo almacenan el último valor que hayamos asignado.
Step5: Operaciones aritméticas
Step6: Si retomamos lo aprendido anteriormente acerca de las variables, veremos que podemos declarar variables a partir de operaciones matemáticas, por ejemplo
Step7: Gestión de cadenas (strings)
Step8: ¡OJO! Podemos definir como valores de variables cadenas formadas por secuencias de números, siempre que se declaren entre comillas. Es muy importante entender la diferencia entre
Step9: Conversión ente cadenas y enteros
Step10: Para hacer el paso contrario, es decir, transformar cadenas en enteros, podemos utilizar la función int().
Step11: Recuerda, cualquier secuencia alfanumérica encerrada entre comillas será tratada como una cadena de caracteres (string) aunque esté formada únicamente por dígitos.
Step12: Listas
Step13: Las listas son estructuras de datos ordenadas, lo que implica que podemos acceder a los elementos individuales a través de un índice (la posición que ocupa en la secuencia). El índice se especifica indicando el nombre de la lista y un entero entre corchetes
Step14: Podemos acceder a porciones de una lista usando una notación especial en los corchetes del índice. nombre_de_lista[n
Step15: Las cadenas, al igual que las listas, son estructuras ordenadas. No lo hemos mencionado antes, pero podemos utilizar esta misma notación de índices para acceder a elementos y a porciones de una cadena.
Step16: Métodos sobre listas
Step17: Tuplas
Step18: Diccionarios
Step19: Podemos acceder a los elementos del diccionaro a través del nombre de la clave
Step20: Métodos sobre diccionarios
Step21: Los diccionarios pueden contener otros diccionarios anidados, lo que conlleva que podemos manejar estructuras bastante complejas. A modo de ejemplo
Step22: Ejercicio final en clase
Step23: Para simular una colección de películas y series vistas en orden cronológico, creo un lista de Pyhton en la que voy incluyendo las fichas de películas a medida que las voy viendo.
|
<ASSISTANT_TASK:>
Python Code:
print('Esto es un mensaje')
# NOTA: en Python, las líneas que comienzan con # son comentarios
# El intérprete no las lee. Los humanos sí deberíamos leerlas :-)
mivariable = 34
edad = 25
year = 1992
print(mivariable)
print(year)
print('mivariable')
print('year')
print('El niño come manzanas.')
print(255666)
# esta celda dará error, al menos mientras no declaremos la variable llamada otraVariable
print(otraVariable)
# números enteros (integer)
hijos = 3
# números reales (float), siempre indicando los decimales con punto
precio = 350.25
longitud = 1.5
# cadenas de texto (string), siempre entre comillas simples o dobles
nombre = "Pedro"
apellidos = 'Sanz Hernández'
# valores booleanos (bool): solo pueden ser True o False, escritos tal cual sin comillas
animal = True
mineral = False
# imprimimos algunos de ellos por pantalla
print(hijos)
print("Me llamo", nombre, apellidos) # fíjate cómo puedo imprimir más de una cosa con la función print
print(precio)
print(mineral)
nombre = 'Ana'
apellidos = 'Serrano de la Oca'
print(nombre, apellidos)
#suma
print(5+6)
# resta
print(10-3)
# multiplicación
print(25*6)
# división
print(2558/800000)
# potencias
print(2**3)
# módulo hace referencia al resto de la división 10/2.
print(10%2)
print(6+5+12*2)
print((6+5+12)*2)
print(6+5+(12*2))
# una suma sencilla
suma = 9+5
print(suma)
# calculamos el área de un cuadrado
lado = 1.5
area = lado**2 # es equivalente a lado*lado
print(area)
# vamos a calcular el precio final de un producto cuyo valor es 19,95€
iva = 21
precio = 19.95
precio_final = precio + (precio * iva)/100
print(precio_final)
# calculamos la edad actual de una persona
ahora = 2016
fechaNacimiento = 1985
edad = ahora - fechaNacimiento
print(edad)
nombre = "Godofredo de Orléans"
oracion = 'GREEN COLORLESS IDEAS SLEEP FURIOUSLY'
parrafo = '''En un lugar de la Mancha, de cuyo nombre no quiero acordarme,
no ha mucho tiempo que vivía un "hidalgo" de los de lanza en astillero,
adarga antigua, rocín flaco y galgo corredor. Una olla de algo más vaca
que carnero, salpicón las más noches, duelos y quebrantos los sábados,
lentejas los viernes, algún palomino de añadidura los domingos, consumían
las tres partes de su hacienda.'''
numero1 = 45852236
numero2 = '45852236'
# podemos realizar operaciones aritméticas entre números
print(numero1 + 1)
# pero no entre cadenas y números: esto da un error
print(numero2 + 1)
# sin embargo, sí puedo utilizar el operador '+' para concatenar varias cadenas
print(numero2 + '1')
print('¡Hola' + ' ' + 'amigo!')
print("otra", "vez " * 3)
print("otra" + "vez" * 3)
# guardamos en numero1 un entero
numero1 = 45852236
# en numero2, guardamos el valor de numero1 convertido a cadena de texto con la función str()
numero2 = str(numero1)
# con la función type() imprimimos por pantalla el tipo de dato que guarda una variable
print(type(numero1)) # es un entero: int
print(type(numero2)) # es una cadena: str
print("\nOtros tipos de datos son:")
print(type(2.3))
print(type(True))
print(type(""))
# guardamos en numero1 una cadena
numero1 = '45852236'
# en numero2, guardamos el valor de numero1 convertido a entero con la función int()
numero2 = int(numero1)
# con la función type() imprimimos por pantalla el tipo de dato que guarda una variable
print(type(numero1)) # es una cadena: str
print(type(numero2)) # es un entero: int
# esto no va a funcionar :-/ no podemos convertir cadenas a enteros
otracosa = "hola amigo"
print(int(otracosa))
print(len(nombre))
print(len(oracion))
print(len(parrafo))
print(len(nombre)) # devuelve la longitud (en número de caracteres) de una cadena
print(oracion.lower()) # transforma una cadena a minúsculas (lowercas)
print(oracion)
print(parrafo.upper()) # transforma una cadena a mayúsculas (uppercas)
print(parrafo) #
print('AbCdEfGhIjKlMnÑoPqRsTuVwXyZ'.swapcase()) # transforma mayúsculas a minúsculas y viceversa
# incluso funciona con las letras con diacríticos
print("áéíóúüûÁÉÍÓÚÛÜ".swapcase())
numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
dias = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"]
misCosas = [23.4, True, oracion.lower(), len(nombre)]
listaVacia = []
# los imprimimos por pantalla
print(numeros)
print(dias)
print(misCosas)
print(listaVacia)
print(type(numeros))
print(len(numeros))
print(len(misCosas))
print('el primer día de la semana es', dias[0])
print('el tercer día de la semana es ' + dias[2])
print('y el último es', dias[-1])
print('el tercer día de la semana es ' + dias[2] + ' y el sexto día es ' + dias[-2])
print('el tercer día de la semana es', dias[2], 'y el sexto día es', dias[-2])
# podemos utilizar los índices también para reasignar valores a los elementos de una lista
# p. ej., cambiamos el primer valor de la lista misCosas
misCosas[0] = 44
print(misCosas)
# ¡OJO! si intentamos acceder a un índice que no existe, el intérprete nos dará error
print(misCosas[8])
# imprime los tres primeros elementos de la variable dias
print(dias[:3])
# imprime elementos del 3º al 6ª de la variable números
print(numeros[2:6])
# imprime los tres últimos números de la variable números
print(numeros[-3:-1])
print(dias[3:5])
cadena = 'perrogatorana'
print(cadena[:5])
print(cadena[5:9])
print(cadena[-4:])
# practica los índices en listas y en cadenas, es muy importante tener solutura
pal1 = 'perro'
pal2 = 'soles'
pal3 = 'azul'
pal4 = 'amigos'
pal5 = 'cafés'
pal6 = 'hola'
print(pal1[-1] == 's')
print(pal2[-1] == 's')
print(pal3[-1] == 's')
print(pal4[-1] == 's')
print(pal5[-1] == 's')
print(pal6[-1] == 's')
print("---------------------")
print(pal1.endswith('s'))
print(pal2.endswith('s'))
print(pal3.endswith('s'))
print(pal4.endswith('s'))
print(pal5.endswith('s'))
print(pal6.endswith('s'))
# creamos una lista vacía
miLista = []
# ¿está realmente vacía?
print(len(miLista))
# ¿qué posición ocupa "jueves" o "domingo" en la lista días
print(dias.index('jueves'))
print(dias.index('domingo'))
# esto, sin embargo, dará error
print(dias.index('DOMINGO'))
miLista = []
# añadimos varios elementos nuevos y vemos cómo va aumentando la lista
miLista.append(2)
print(miLista)
miLista.append('palabra')
print(miLista)
miLista.append(25**3)
print(miLista)
# añadimos como cuarto elemento otra lista de elementos: fíjate en la estructura del resultado
miLista.append(numeros)
print(miLista)
print(len(miLista))
# ¿cuántos elementos tiene miLista?
# estas dos líneas son equivalentes, pero tienen sutiles diferencias.
# aquí, concatenamos con el símbolo + tres cadenas dentro de una misma intrucción print
# uno de los elementos es originariamente un número entero, por eso hay que transformarlo antes con str
print('la lista miLista tiene ' + str(len(miLista)) + ' elementos')
# aquí, imprimimos tres elementos diferentes en una misma instrucción print
print('la lista miLista tiene', len(miLista), 'elementos')
print(miLista)
# insertamos como segundo elemento otro nuevo valor
miLista.insert(1, 'segundo elemento')
print(miLista)
miLista[1] = "second element"
print(miLista)
# más ejemplos con los métodos insert y append
l = []
print(l)
l.append('hola')
print(l)
l.append('adiós')
print(l)
l.insert(1, 'segundo elemento')
print(l)
l.insert(2, 'otra cosa')
print(l)
dias = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"]
numeros = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(dias)
# eliminarmos el elemento 2 y el elemento "palabra" de miLista
dias.pop(2)
print(dias)
dias.remove('sábado')
print(dias)
letras = ['a', 'b', 'c', 'a']
print(letras.count('aaaaa'))
letras.remove('a')
print(letras)
print(letras.count('a'))
# también puedo eliminar el último elemento de la lista
miLista.pop(-1)
print(miLista)
# más pruebas concatenando listas
dias = ["lunes", "martes", "miércoles", "jueves", "viernes", "sábado", "domingo"]
dias.extend(numeros)
print(dias)
# fíjate en que la lista numeros crece de manera indefinida
numeros.extend(dias)
print(numeros)
# creo tres tripletas, cada una almacenando el nombre, apellido y sexo de unas personas ficticias
tripleta1 = ("Carlos", "Pujol", "H")
tripleta2 = ("Montse", "Santos", "M")
tripleta3 = ("Ana", "Ruiz", "M")
# creo una lista de personas y añado las tripletas por orden
personas = []
personas.append(tripleta1)
personas.append(tripleta2)
personas.append(tripleta3)
print(personas)
# las tuplas son secuencias ordenadas, y como tales podemos acceder a sus elementos
# a través de índices numéricos
print(len(personas[0]))
print(personas[1][1])
# entre llaves, separando claves y valores con dos puntos, y separando pares con comas
victor = {"nombre": "Victor", "apellido": "Peinado", "sexo": "H", "edad": 38}
print(victor)
# declarando el diccionario como una estructura vacía y añadiendo los pares clave:valor después
antonio = {}
antonio["nombre"] = "Antonio"
antonio["apellido"] = "Santos"
antonio["sexo"] = "H"
antonio["edad"] = 26
print(antonio)
# creo una lista de diccionarios: OJO, la lista sí está ordenada
personas = [victor, antonio]
print(personas)
print(type(personas))
print(type(personas[-1]))
print(type(personas[-1]["edad"]))
print(victor['nombre'])
print(antonio['apellido'])
print(personas[1]['nombre'])
# ¿cuántos pares clave:valor tiene el diccionario antonio?
print(len(antonio))
# dame las claves
print(antonio.keys())
# dame los valores
print(antonio.values())
# recuerda que para crear nuevos pares clave:valor, basta con asignarles un valor
antonio['altura'] = 1.79
# podemos comprobar si el diccionario tiene determinadas claves
print('email' in antonio)
print('altura' in antonio)
print(antonio)
# eliminamos la clave altura de antonio
antonio.pop('altura')
print('altura' in antonio) # y esto ahora devuelve False
print(antonio)
# creo un diccionario de la familia Pig, en principio vacío
familiaPig = {}
# creo varios diccionarios por cada miembro de la familia
peppa = {"nombre": "Peppa", "apellido": "Pig", "email": "peppa@pig.com",}
george = {"nombre": "George", "apellido": "Pig", "email": "george@pig.com",}
daddy = {"nombre": "Daddy", "apellido": "Pig", "email": "daddy@pig.com",}
mommy = {"nombre": "Mommy", "apellido": "Pig", "email": "mommy@pig.com",}
familiaPig['hija'] = peppa
familiaPig['hijo'] = george
familiaPig['padre'] = daddy
familiaPig['madre'] = mommy
print(familiaPig)
# ¿Cuál es el email de la madre?
print(familiaPig["madre"]["email"])
# ¿Y el nombre de la hija?
print(familiaPig["hija"]["nombre"])
# en lugar de un diccionario, puedo crear también una lista con los miembros de la familia ordenados por edad
familia = [daddy, mommy, peppa, george]
# ahora solo puedo acceder por índices, así que ¿cuál es el email del primer miembro de la familia?
print(familia[0]["email"])
# ¿Y el nombre del miembro más joven?
print(familia[-1]["nombre"])
# esta celda no hace nada, salvo insertar una imagen externa
from IPython.core.display import Image
Image('http://peppapigjuegos.com/img/wallpapers/2pag/peppa_pig_family.jpg')
pelicula = {}
pelicula['titulo'] = "The Arrival"
pelicula['ano'] = 2016
pelicula['reparto'] = ["Amy Adams", "Jeremy Renner", "Forest Whitaker",]
pelicula['esBuena'] = True
print(pelicula)
print("----------------------------")
# imprime el título
print("La película se titula", pelicula['titulo'], ".")
# y más datos asociados a la peli
print("La película tiene" + str(len(pelicula["reparto"])) + "actores" + ".")
# normalmente no necesitaremos esto, pero aquí hay un ejemplo de cómo formatear
# cadenas complejas con print
print("La película se titula {} y tiene {} actores.".format(pelicula["titulo"], len(pelicula["reparto"])))
# creamos una ficha para una serie
serie = {"titulo": "Narcos", "ano":2015, "reparto":["Uno", "Otro"]}
print(serie)
print(serie["titulo"])
# añadimos más datos
serie["esBuena"] = True
print(serie)
coleccion = []
coleccion.append(serie)
print(coleccion)
coleccion.append(pelicula)
print(coleccion)
# añado una tercera serie, y la meto en la colección
serie = {"titulo": "The Expanse", "ano":2015, "reparto":["Fulano", "Mengano"]}
coleccion.append(serie)
print(coleccion)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Quick mass track loader
Step2: Preliminary definitions, including masses and file extensions.
Step3: It's quite curious as to why the GAS07 and AGSS09 tracks show opposite relative effects with respect to the GS98. Should look at the atmosphere structures at depth to determine if there are any intrinsic differences in the atmospheres that are causing their opposite behavior.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
cd /Users/grefe950/evolve/dmestar/trk/
def loadTrack(filename):
return np.genfromtxt(filename, usecols=(0, 1, 2, 3, 4, 5))
masses = [0.1, 0.5, 1.0, 1.5]
# directory extensions
gs98_dir = 'gs98/p000/a0/amlt1884'
gas07_dir = 'gas07/p000/a0/amlt2202'
agss09_dir = 'agss09/p000/a0/amlt1991'
# file name extensions
gs98_ext = '_GS98_p000_p0_y28_mlt1.884.trk'
gas07_ext = '_GAS07_p000_p0_y26_mlt2.202.trk'
agss09_ext = '_AGSS09_p000_p0_y27_mlt1.991.trk'
colors = {2:'#0094b2', 1:'#B22222', 0:'#56b4ea', 3:'#555555'}
fig, ax = plt.subplots(2, 2, figsize=(12., 12.))
for i in range(len(masses)):
mass = masses[i]
row = i/2
col = i%2
# set axis properties
axis = ax[row, col]
axis.tick_params(which='major', axis='both', length=15., labelsize=16.)
axis.set_xlabel('effective temperature (K)', fontsize=18.)
axis.set_ylabel('log(L/Lo)', fontsize=18.)
axis.invert_xaxis()
# load mass tracks
gs98 = loadTrack('{:s}/m{:04.0f}{:s}'.format(gs98_dir, mass*1000., gs98_ext))
gas07 = loadTrack('{:s}/m{:04.0f}{:s}'.format(gas07_dir, mass*1000., gas07_ext))
agss09 = loadTrack('{:s}/m{:04.0f}{:s}'.format(agss09_dir, mass*1000., agss09_ext))
axis.plot(10**gs98[:,1], gs98[:,3], lw=3, c=colors[3], label='GS98')
axis.plot(10**gas07[:,1], gas07[:,3], dashes=(2.0, 2.0), lw=3, c=colors[1], label='GAS07')
axis.plot(10**agss09[:,1], agss09[:,3], dashes=(20., 5.), lw=3, c=colors[2], label='AGSS09')
axis.legend(loc=2, fontsize=14.)
fig.tight_layout()
colors = {2:'#0094b2', 1:'#B22222', 0:'#56b4ea', 3:'#555555'}
fig, ax = plt.subplots(4, 1, figsize=(8., 16.))
for i in range(len(masses)):
mass = masses[i]
# set axis properties
axis = ax[i]
axis.tick_params(which='major', axis='both', length=15., labelsize=16.)
axis.set_ylabel('effective temperature (K)', fontsize=18.)
axis.set_xlabel('age (Gyr)', fontsize=18.)
# load mass tracks
gs98 = loadTrack('{:s}/m{:04.0f}{:s}'.format(gs98_dir, mass*1000., gs98_ext))
gas07 = loadTrack('{:s}/m{:04.0f}{:s}'.format(gas07_dir, mass*1000., gas07_ext))
agss09 = loadTrack('{:s}/m{:04.0f}{:s}'.format(agss09_dir, mass*1000., agss09_ext))
axis.semilogx(gs98[:,0]/1.0e9, 10**gs98[:,1], lw=3, c=colors[3], label='GS98')
axis.semilogx(gas07[:,0]/1.0e9, 10**gas07[:,1], dashes=(2.0, 2.0), lw=3, c=colors[1], label='GAS07')
axis.semilogx(agss09[:,0]/1.0e9, 10**agss09[:,1], dashes=(20., 5.), lw=3, c=colors[2], label='AGSS09')
axis.legend(loc=2, fontsize=14.)
fig.tight_layout()
cd /usr/local/dmestar/data/atm/
gs98_atm = np.genfromtxt('./phx/GS98/t010/Zp0d0.ap0d0_t010.dat')
gs98K_atm = np.genfromtxt('./kur/GS98/t010/kurucz_z+0.00_a+0.00_t02_tau010.sbc')
gas07_atm = np.genfromtxt('./mrc/GAS07/t010/marcs_z+0.00_a+0.00_m1.0_t02_tau010.sbc')
agss09_atm = np.genfromtxt('./phx/AGSS09/t010/Zp0d0.ap0d0_t010.dat')
fig, ax = plt.subplots(3, 1, figsize=(8., 12.), sharex=True)
for i in range(len(ax)):
col = -3*(i + 1) + i%3
axis = ax[i]
axis.grid(True)
axis.set_xlabel('effective temperature (K)', fontsize=18., family='serif')
axis.set_ylabel('temperature at $\\tau = 10$ (K)', fontsize=18., family='serif')
axis.set_xlim(2500., 8000.)
axis.set_ylim(3000., 12000.)
axis.tick_params(which='major', axis='both', length=15., labelsize=14.)
axis.plot(gs98_atm[:,0], gs98_atm[:, col], lw=7, c=colors[3], alpha=0.5, label='GS98')
#axis.plot(gs98K_atm[:,0], gs98K_atm[:, col], 'o', markersize=10., c=colors[0], label='GS98K')
axis.plot(gas07_atm[:,0], gas07_atm[:, col], lw=4, c=colors[1], alpha=0.8, label='GAS07')
axis.plot(agss09_atm[:,0], agss09_atm[:, col], lw=4, c=colors[2], alpha=0.9, label='AGSS09')
axis.legend(loc=2, fontsize=14.)
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
dict = {'abc':'1/2/2003', 'def':'1/5/2017', 'ghi':'4/10/2013'}
df = pd.DataFrame({'Member':['xyz', 'uvw', 'abc', 'def', 'ghi'], 'Group':['A', 'B', 'A', 'B', 'B'], 'Date':[np.nan, np.nan, np.nan, np.nan, np.nan]})
def g(dict, df):
df["Date"] = df["Member"].apply(lambda x: dict.get(x)).fillna(np.NAN)
for i in range(len(df)):
if df.loc[i, 'Member'] not in dict.keys():
df.loc[i, 'Date'] = '17/8/1926'
df["Date"] = pd.to_datetime(df["Date"])
df["Date"] = df["Date"].dt.strftime('%d-%b-%Y')
return df
df = g(dict.copy(),df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Funkce a třídy
Step2: Funkce může být opakovaně použita kde sčítání různých argumentů (čísel, textu i listů)
Step4: Poznámka
Step7: Třídy
Step8: Instance objektu může být vytvořena následovně. Všimněte si, že po přespání refence na instanci, je instance hned zničena.
Step10: Funkce del (destruktor) se většinou nepoužívá. Ale funkce init (konstruktor) je nejčastější způsob jak inicializovat proměnné nebo činnost instance. V následujícím příkladě má konstruktor instance dva argumenty, které se uloží tak, aby byly přístupné i ostatním funkcím v dané instanci.
Step11: Následuje příklad jak předat argumenty konstruktoru a jak zavolat vytvořenou funkci dané instance.
Step12: Dědičnost
Step13: Takto zděděné třídy mají všechny proměnné a funkce původní třídy Food.
|
<ASSISTANT_TASK:>
Python Code:
def my_function(a, b):
This function sum together two variables (if they are summable).
return a + b
my_function(2, 5)
my_function("Spam ", "eggs")
my_function([1, 2, "A"], [5, 5.3])
def my_function(arg1, arg2, kwarg1=0, kwarg2=0):
This function accepts two args and two kwargs.
Product is sum of all args and kwargs
return arg1 + arg2 + kwarg1 + kwarg2
my_function(2, 3., kwarg1=1.5, kwarg2=2)
my_function(2, 3., 1.5, 2.)
my_function(2, 3.)
my_function(2, 3., kwarg2=3.)
class Example():
def __init__(self):
This is constructor. This function runs during creation.
print("Instance created.")
def __del__(self):
This is something like destructor.
This function runs when the last pointer to the instance is lost.
It is the last will of the instance.
print("Instance deleted")
f = Example()
f = None
class Food():
def __init__(self, portion_size, unit_mass):
self.portion_size = portion_size # make it accessible from outside
self.unit_mass = unit_mass
self.UNIT = "g"
def get_portion_mass(self):
This function returns mass of the portion with unit as string.
return str(self.portion_size * self.unit_mass) + " " + self.UNIT
f = Food(10, 30) # create food with specific parameters
f.get_portion_mass() # get mass of a single portion
class Fruit(Food):
def __init__(self, portion_size, unit_mass, sweetness=0):
super(self.__class__, self).__init__(portion_size, unit_mass)
self.sweetness = sweetness
class Vegetable(Food):
def __init__(self, portion_size, unit_mass, is_green=False):
super(self.__class__, self).__init__(portion_size, unit_mass)
self.is_green = is_green
apple = Fruit(10, 30, 50)
apple.sweetness
apple.get_portion_mass()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import torch
lens = load_data()
max_len = max(lens)
mask = torch.arange(max_len).expand(len(lens), max_len) < lens.unsqueeze(1)
mask = mask.type(torch.LongTensor)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Air Quality Dataset
Step2: Insurance Dataset
|
<ASSISTANT_TASK:>
Python Code:
from feature_selector import FeatureSelector
import pandas as pd
air_quality = pd.read_csv('data/AirQualityUCI.csv')
air_quality['Date'] = pd.to_datetime(air_quality['Date'])
air_quality['Date'] = (air_quality['Date'] - air_quality['Date'].min()).dt.total_seconds()
air_quality['Time'] = [int(x[:2]) for x in air_quality['Time']]
air_quality.head()
labels = air_quality['PT08.S5(O3)']
air_quality = air_quality.drop(columns = 'PT08.S5(O3)')
fs = FeatureSelector(data = air_quality, labels = labels)
fs.identify_all(selection_params = {'missing_threshold': 0.5, 'correlation_threshold': 0.7,
'task': 'regression', 'eval_metric': 'l2',
'cumulative_importance': 0.9})
fs.plot_collinear()
fs.plot_missing()
fs.plot_feature_importances(threshold = 0.9)
fs.data_all.head()
air_quality_removed = fs.remove(methods = 'all', keep_one_hot=False)
fs.ops
fs.plot_collinear(plot_all=True)
insurance = pd.read_csv('data/caravan-insurance-challenge.csv')
insurance = insurance[insurance['ORIGIN'] == 'train']
labels = insurance['CARAVAN']
insurance = insurance.drop(columns = ['ORIGIN', 'CARAVAN'])
insurance.head()
fs = FeatureSelector(data = insurance, labels = labels)
fs.identify_all(selection_params = {'missing_threshold': 0.8, 'correlation_threshold': 0.85,
'task': 'classification', 'eval_metric': 'auc',
'cumulative_importance': 0.8})
fs.plot_feature_importances(threshold=0.8)
fs.plot_collinear()
insurance_missing_zero = fs.remove(methods = ['missing', 'zero_importance'])
to_remove = fs.check_removal()
fs.feature_importances.head()
insurance_removed = fs.remove(methods = 'all', keep_one_hot=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: read in data
Step2: repeat data fixing from previous exercise
Step3: 1a. Plot the daily temperature over the course of the year. (This should probably be a line chart.)
Step4: 1b. Create a bar chart that shows the average temperature and humidity by month.
Step5: 2. Use a scatterplot to show how the daily rental volume varies with temperature. Use a different series (with different colors) for each season.
Step6: 3. Create another scatterplot to show how daily rental volume varies with windspeed. As above, use a different series for each season.
Step7: 4. How do the rental volumes vary with geography? Compute the average daily rentals for each station and use this as the radius for a scatterplot of each station's latitude and longitude.
Step8: pull out the lat and long from 'stations' file data, turn it into a new dataframe 'stations_geo'
Step9: make a new dataframe 'station_count' that sums the occurrences of each station name in 'usage_stations'
Step10: create a new colum 'rides' in 'stations_geo' which is populated with the data from 'station_count'
Step11: get rid of all thye 'NaN' rows, then plot the data on a scatterplot where the radius of each point is the total number of rides at that location divided by the 366 days of the year
|
<ASSISTANT_TASK:>
Python Code:
from pandas import Series, DataFrame
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
%matplotlib inline
weather = pd.read_table('data/daily_weather.tsv')
stations = pd.read_table('data/stations.tsv')
usage = pd.read_table('data/usage_2012.tsv')
weather['date'] = pd.to_datetime(weather['date'])
weather.loc[weather['season_code'] == 1, 'season_desc'] = 'winter'
weather.loc[weather['season_code'] == 2, 'season_desc'] = 'spring'
weather.loc[weather['season_code'] == 3, 'season_desc'] = 'summer'
weather.loc[weather['season_code'] == 4, 'season_desc'] = 'fall'
weather.loc[weather['season_desc'] == 'winter', 'season_code'] = 4
weather.loc[weather['season_desc'] == 'spring', 'season_code'] = 1
weather.loc[weather['season_desc'] == 'summer', 'season_code'] = 2
weather.loc[weather['season_desc'] == 'fall', 'season_code'] = 3
weather.plot(x='date', y='temp')
plt.show()
temp_humid = weather[['temp', 'humidity']].groupby(weather['date'].dt.month).mean()
temp_humid.plot(kind='bar', width=0.75, color=['#EE4444','#4444EE'])
plt.show()
spring_daily_vol = weather.loc[weather['season_desc'] == 'spring']
summer_daily_vol = weather.loc[weather['season_desc'] == 'summer']
fall_daily_vol = weather.loc[weather['season_desc'] == 'fall']
winter_daily_vol = weather.loc[weather['season_desc'] == 'winter']
spr_ax = spring_daily_vol.plot(kind='scatter', x='temp', y='total_riders', c='yellow', s=50, alpha=.4)
sum_ax = summer_daily_vol.plot(kind='scatter', x='temp', y='total_riders', c='lightgreen', s=50, alpha=.4, ax=spr_ax)
fal_ax = fall_daily_vol.plot(kind='scatter', x='temp', y='total_riders', c='#ee5555', s=50, alpha=.4, ax=sum_ax)
win_ax = winter_daily_vol.plot(kind='scatter', x='temp', y='total_riders', c='lightblue', s=50, alpha=.4, ax=fal_ax)
plt.title('Temp vs Daily Rental Volume')
plt.show()
spr_ax = spring_daily_vol.plot(kind='scatter', x='windspeed', y='total_riders', c='yellow', s=50, alpha=.4)
sum_ax = summer_daily_vol.plot(kind='scatter', x='windspeed', y='total_riders', c='lightgreen', s=50, alpha=.4, ax=spr_ax)
fal_ax = fall_daily_vol.plot(kind='scatter', x='windspeed', y='total_riders', c='#ee5555', s=50, alpha=.4, ax=sum_ax)
win_ax = winter_daily_vol.plot(kind='scatter', x='windspeed', y='total_riders', c='lightblue', s=50, alpha=.4, ax=fal_ax)
plt.title('Windspeed vs Daily Rental Volume')
plt.show()
usage_stations = usage[['station_start']]
usage_stations.head()
stations_geo = DataFrame({'lat': stations.lat, 'long': stations.long})
stations_geo.index = stations.station.values
stations_geo.head()
station_count = DataFrame(usage_stations['station_start'].value_counts())
station_count.head()
stations_geo['rides'] = station_count
stations_geo.head()
cleared = stations_geo.dropna()
cleared.head()
cleared.plot(kind='scatter', x='long', y='lat', s=(cleared['rides'] / 366) * 5, alpha=0.6, figsize=(10, 10))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Finetuning the LM
Step3: In our current vocabulary, it is very unlikely that the ids correspond to what is in the vocabulary used to train the pretrain model. The tokens are sorted by frequency (apart from the special tokens that are all first) so that order is specific to the corpus used. For instance, the word 'house' has different ids in the our current vocab and the pretrained one.
Step4: We somehow need to match our pretrained weights to the new vocabulary. This is done on the embeddings and the decoder (since the weights between embeddings and decoders are tied) by putting the rows of the embedding matrix (or decoder bias) in the right order.
Step5: Now let's check that the word "house" was properly converted.
Step6: We can load the pretrained weights in our model before beginning training.
Step7: If we want to apply discriminative learning rates, we need to split our model in different layer groups. Let's have a look at our model.
Step8: Then we split by doing two groups for each rnn/corresponding dropout, then one last group that contains the embeddings/decoder. This is the one that needs to be trained the most as we may have new embeddings vectors.
Step9: First we train with the RNNs freezed.
Step10: Then the whole model with discriminative learning rates.
Step11: We only need to save the encoder (first part of the model) for the classification, as well as the vocabulary used (we will need to use the same in the classification task).
Step12: Classifier
Step13: Ignore padding
Step14: Let's see how this works
Step15: We need to pass to the utility functions the lengths of our sentences because it's applied after the embedding, so we can't see the padding anymore.
Step16: We create a PackedSequence object that contains all of our unpadded sequences
Step17: This object can be passed to any RNN directly while retaining the speed of CuDNN.
Step18: Then we can unpad it with the following function for other modules
Step19: We need to change our model a little bit to use this.
Step20: Concat pooling
Step21: We can check we have padding with 1s at the end of each text (except the first which is the longest).
Step22: PyTorch puts 0s everywhere we had padding in the output when unpacking.
Step23: So the last hidden state isn't the last element of output. Let's check we got everything right.
Step24: Our pooling layer properly ignored the padding, so now let's group it with a classifier.
Step25: Then we just have to feed our texts to those two blocks, (but we can't give them all at once to the AWD_LSTM or we might get OOM error
Step26: Training
Step27: Predicting on the padded batch or on the individual unpadded samples give the same results.
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
#export
from exp.nb_12a import *
path = datasets.untar_data(datasets.URLs.IMDB)
ll = pickle.load(open(path/'ll_lm.pkl', 'rb'))
bs,bptt = 128,70
data = lm_databunchify(ll, bs, bptt)
vocab = ll.train.proc_x[1].vocab
# ! wget http://files.fast.ai/models/wt103_tiny.tgz -P {path}
# ! tar xf {path}/wt103_tiny.tgz -C {path}
dps = tensor([0.1, 0.15, 0.25, 0.02, 0.2]) * 0.5
tok_pad = vocab.index(PAD)
emb_sz, nh, nl = 300, 300, 2
model = get_language_model(len(vocab), emb_sz, nh, nl, tok_pad, *dps)
old_wgts = torch.load(path/'pretrained'/'pretrained.pth')
old_vocab = pickle.load(open(path/'pretrained'/'vocab.pkl', 'rb'))
idx_house_new, idx_house_old = vocab.index('house'),old_vocab.index('house')
house_wgt = old_wgts['0.emb.weight'][idx_house_old]
house_bias = old_wgts['1.decoder.bias'][idx_house_old]
def match_embeds(old_wgts, old_vocab, new_vocab):
wgts = old_wgts['0.emb.weight']
bias = old_wgts['1.decoder.bias']
wgts_m,bias_m = wgts.mean(dim=0),bias.mean()
new_wgts = wgts.new_zeros(len(new_vocab), wgts.size(1))
new_bias = bias.new_zeros(len(new_vocab))
otoi = {v:k for k,v in enumerate(old_vocab)}
for i,w in enumerate(new_vocab):
if w in otoi:
idx = otoi[w]
new_wgts[i],new_bias[i] = wgts[idx],bias[idx]
else: new_wgts[i],new_bias[i] = wgts_m,bias_m
old_wgts['0.emb.weight'] = new_wgts
old_wgts['0.emb_dp.emb.weight'] = new_wgts
old_wgts['1.decoder.weight'] = new_wgts
old_wgts['1.decoder.bias'] = new_bias
return old_wgts
wgts = match_embeds(old_wgts, old_vocab, vocab)
test_near(wgts['0.emb.weight'][idx_house_new],house_wgt)
test_near(wgts['1.decoder.bias'][idx_house_new],house_bias)
model.load_state_dict(wgts)
model
def lm_splitter(m):
groups = []
for i in range(len(m[0].rnns)): groups.append(nn.Sequential(m[0].rnns[i], m[0].hidden_dps[i]))
groups += [nn.Sequential(m[0].emb, m[0].emb_dp, m[0].input_dp, m[1])]
return [list(o.parameters()) for o in groups]
for rnn in model[0].rnns:
for p in rnn.parameters(): p.requires_grad_(False)
cbs = [partial(AvgStatsCallback,accuracy_flat),
CudaCallback, Recorder,
partial(GradientClipping, clip=0.1),
partial(RNNTrainer, α=2., β=1.),
ProgressCallback]
learn = Learner(model, data, cross_entropy_flat, opt_func=adam_opt(),
cb_funcs=cbs, splitter=lm_splitter)
lr = 2e-2
cbsched = sched_1cycle([lr], pct_start=0.5, mom_start=0.8, mom_mid=0.7, mom_end=0.8)
learn.fit(1, cbs=cbsched)
for rnn in model[0].rnns:
for p in rnn.parameters(): p.requires_grad_(True)
lr = 2e-3
cbsched = sched_1cycle([lr/2., lr/2., lr], pct_start=0.5, mom_start=0.8, mom_mid=0.7, mom_end=0.8)
learn.fit(10, cbs=cbsched)
torch.save(learn.model[0].state_dict(), path/'finetuned_enc.pth')
pickle.dump(vocab, open(path/'vocab_lm.pkl', 'wb'))
torch.save(learn.model.state_dict(), path/'finetuned.pth')
vocab = pickle.load(open(path/'vocab_lm.pkl', 'rb'))
proc_tok,proc_num,proc_cat = TokenizeProcessor(),NumericalizeProcessor(vocab=vocab),CategoryProcessor()
il = TextList.from_files(path, include=['train', 'test'])
sd = SplitData.split_by_func(il, partial(grandparent_splitter, valid_name='test'))
ll = label_by_func(sd, parent_labeler, proc_x = [proc_tok, proc_num], proc_y=proc_cat)
pickle.dump(ll, open(path/'ll_clas.pkl', 'wb'))
ll = pickle.load(open(path/'ll_clas.pkl', 'rb'))
vocab = pickle.load(open(path/'vocab_lm.pkl', 'rb'))
bs,bptt = 64,70
data = clas_databunchify(ll, bs)
#export
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
x,y = next(iter(data.train_dl))
x.size()
lengths = x.size(1) - (x == 1).sum(1)
lengths[:5]
tst_emb = nn.Embedding(len(vocab), 300)
tst_emb(x).shape
128*70
packed = pack_padded_sequence(tst_emb(x), lengths, batch_first=True)
packed
packed.data.shape
len(packed.batch_sizes)
8960//70
tst = nn.LSTM(300, 300, 2)
y,h = tst(packed)
unpack = pad_packed_sequence(y, batch_first=True)
unpack[0].shape
unpack[1]
#export
class AWD_LSTM1(nn.Module):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182."
initrange=0.1
def __init__(self, vocab_sz, emb_sz, n_hid, n_layers, pad_token,
hidden_p=0.2, input_p=0.6, embed_p=0.1, weight_p=0.5):
super().__init__()
self.bs,self.emb_sz,self.n_hid,self.n_layers,self.pad_token = 1,emb_sz,n_hid,n_layers,pad_token
self.emb = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)
self.emb_dp = EmbeddingDropout(self.emb, embed_p)
self.rnns = [nn.LSTM(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz), 1,
batch_first=True) for l in range(n_layers)]
self.rnns = nn.ModuleList([WeightDropout(rnn, weight_p) for rnn in self.rnns])
self.emb.weight.data.uniform_(-self.initrange, self.initrange)
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
def forward(self, input):
bs,sl = input.size()
mask = (input == self.pad_token)
lengths = sl - mask.long().sum(1)
n_empty = (lengths == 0).sum()
if n_empty > 0:
input = input[:-n_empty]
lengths = lengths[:-n_empty]
self.hidden = [(h[0][:,:input.size(0)], h[1][:,:input.size(0)]) for h in self.hidden]
raw_output = self.input_dp(self.emb_dp(input))
new_hidden,raw_outputs,outputs = [],[],[]
for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
raw_output = pack_padded_sequence(raw_output, lengths, batch_first=True)
raw_output, new_h = rnn(raw_output, self.hidden[l])
raw_output = pad_packed_sequence(raw_output, batch_first=True)[0]
raw_outputs.append(raw_output)
if l != self.n_layers - 1: raw_output = hid_dp(raw_output)
outputs.append(raw_output)
new_hidden.append(new_h)
self.hidden = to_detach(new_hidden)
return raw_outputs, outputs, mask
def _one_hidden(self, l):
"Return one hidden state."
nh = self.n_hid if l != self.n_layers - 1 else self.emb_sz
return next(self.parameters()).new(1, self.bs, nh).zero_()
def reset(self):
"Reset the hidden states."
self.hidden = [(self._one_hidden(l), self._one_hidden(l)) for l in range(self.n_layers)]
class Pooling(nn.Module):
def forward(self, input):
raw_outputs,outputs,mask = input
output = outputs[-1]
lengths = output.size(1) - mask.long().sum(dim=1)
avg_pool = output.masked_fill(mask[:,:,None], 0).sum(dim=1)
avg_pool.div_(lengths.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),lengths-1], max_pool, avg_pool], 1) #Concat pooling.
return output,x
emb_sz, nh, nl = 300, 300, 2
tok_pad = vocab.index(PAD)
enc = AWD_LSTM1(len(vocab), emb_sz, n_hid=nh, n_layers=nl, pad_token=tok_pad)
pool = Pooling()
enc.bs = bs
enc.reset()
x,y = next(iter(data.train_dl))
output,c = pool(enc(x))
x
test_near((output.sum(dim=2) == 0).float(), (x==tok_pad).float())
for i in range(bs):
length = x.size(1) - (x[i]==1).long().sum()
out_unpad = output[i,:length]
test_near(out_unpad[-1], c[i,:300])
test_near(out_unpad.max(0)[0], c[i,300:600])
test_near(out_unpad.mean(0), c[i,600:])
def bn_drop_lin(n_in, n_out, bn=True, p=0., actn=None):
layers = [nn.BatchNorm1d(n_in)] if bn else []
if p != 0: layers.append(nn.Dropout(p))
layers.append(nn.Linear(n_in, n_out))
if actn is not None: layers.append(actn)
return layers
class PoolingLinearClassifier(nn.Module):
"Create a linear classifier with pooling."
def __init__(self, layers, drops):
super().__init__()
mod_layers = []
activs = [nn.ReLU(inplace=True)] * (len(layers) - 2) + [None]
for n_in, n_out, p, actn in zip(layers[:-1], layers[1:], drops, activs):
mod_layers += bn_drop_lin(n_in, n_out, p=p, actn=actn)
self.layers = nn.Sequential(*mod_layers)
def forward(self, input):
raw_outputs,outputs,mask = input
output = outputs[-1]
lengths = output.size(1) - mask.long().sum(dim=1)
avg_pool = output.masked_fill(mask[:,:,None], 0).sum(dim=1)
avg_pool.div_(lengths.type(avg_pool.dtype)[:,None])
max_pool = output.masked_fill(mask[:,:,None], -float('inf')).max(dim=1)[0]
x = torch.cat([output[torch.arange(0, output.size(0)),lengths-1], max_pool, avg_pool], 1) #Concat pooling.
x = self.layers(x)
return x
def pad_tensor(t, bs, val=0.):
if t.size(0) < bs:
return torch.cat([t, val + t.new_zeros(bs-t.size(0), *t.shape[1:])])
return t
class SentenceEncoder(nn.Module):
def __init__(self, module, bptt, pad_idx=1):
super().__init__()
self.bptt,self.module,self.pad_idx = bptt,module,pad_idx
def concat(self, arrs, bs):
return [torch.cat([pad_tensor(l[si],bs) for l in arrs], dim=1) for si in range(len(arrs[0]))]
def forward(self, input):
bs,sl = input.size()
self.module.bs = bs
self.module.reset()
raw_outputs,outputs,masks = [],[],[]
for i in range(0, sl, self.bptt):
r,o,m = self.module(input[:,i: min(i+self.bptt, sl)])
masks.append(pad_tensor(m, bs, 1))
raw_outputs.append(r)
outputs.append(o)
return self.concat(raw_outputs, bs),self.concat(outputs, bs),torch.cat(masks,dim=1)
def get_text_classifier(vocab_sz, emb_sz, n_hid, n_layers, n_out, pad_token, bptt, output_p=0.4, hidden_p=0.2,
input_p=0.6, embed_p=0.1, weight_p=0.5, layers=None, drops=None):
"To create a full AWD-LSTM"
rnn_enc = AWD_LSTM1(vocab_sz, emb_sz, n_hid=n_hid, n_layers=n_layers, pad_token=pad_token,
hidden_p=hidden_p, input_p=input_p, embed_p=embed_p, weight_p=weight_p)
enc = SentenceEncoder(rnn_enc, bptt)
if layers is None: layers = [50]
if drops is None: drops = [0.1] * len(layers)
layers = [3 * emb_sz] + layers + [n_out]
drops = [output_p] + drops
return SequentialRNN(enc, PoolingLinearClassifier(layers, drops))
emb_sz, nh, nl = 300, 300, 2
dps = tensor([0.4, 0.3, 0.4, 0.05, 0.5]) * 0.25
model = get_text_classifier(len(vocab), emb_sz, nh, nl, 2, 1, bptt, *dps)
def class_splitter(m):
enc = m[0].module
groups = [nn.Sequential(enc.emb, enc.emb_dp, enc.input_dp)]
for i in range(len(enc.rnns)): groups.append(nn.Sequential(enc.rnns[i], enc.hidden_dps[i]))
groups.append(m[1])
return [list(o.parameters()) for o in groups]
for p in model[0].parameters(): p.requires_grad_(False)
cbs = [partial(AvgStatsCallback,accuracy),
CudaCallback, Recorder,
partial(GradientClipping, clip=0.1),
ProgressCallback]
model[0].module.load_state_dict(torch.load(path/'finetuned_enc.pth'))
learn = Learner(model, data, F.cross_entropy, opt_func=adam_opt(), cb_funcs=cbs, splitter=class_splitter)
lr = 1e-2
cbsched = sched_1cycle([lr], mom_start=0.8, mom_mid=0.7, mom_end=0.8)
learn.fit(1, cbs=cbsched)
for p in model[0].module.rnns[-1].parameters(): p.requires_grad_(True)
lr = 5e-3
cbsched = sched_1cycle([lr/2., lr/2., lr/2., lr], mom_start=0.8, mom_mid=0.7, mom_end=0.8)
learn.fit(1, cbs=cbsched)
for p in model[0].parameters(): p.requires_grad_(True)
lr = 1e-3
cbsched = sched_1cycle([lr/8., lr/4., lr/2., lr], mom_start=0.8, mom_mid=0.7, mom_end=0.8)
learn.fit(2, cbs=cbsched)
x,y = next(iter(data.valid_dl))
pred_batch = learn.model.eval()(x.cuda())
pred_ind = []
for inp in x:
length = x.size(1) - (inp == 1).long().sum()
inp = inp[:length]
pred_ind.append(learn.model.eval()(inp[None].cuda()))
assert near(pred_batch, torch.cat(pred_ind))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: States are represented as integers. The first 9 bits encode the marks of the player X, the next 9 bits encode the marks of the player O. Initially, the board is empty and hence no bit is set.
Step2: The function set_bits takes one parameter
Step3: The function set_bit(n) returns a number that has exactly the $\texttt{n}^\textrm{th}$ bit set.
Step4: The function to_board(state) takes an integer state as its argument that represents a state of the game Tic-Tac-Toe. It converts this state into a string that represents the board corresponding to the state.
Step5: Given a state that is represented as a number, the function empty(state) returns the set of indexes of those cells such that neither player X nor player O has placed a mark in the cell. Note that there are 9 cells on the board. Each of these cells can hold either an 'X' or an 'O'. If the $i^\textrm{th}$ cell is marked with a 'X', then the $i^\textrm{th}$ bit of state is set. If instead the $i^\textrm{th}$ cell is marked with an 'O', then the $(9+i)^\textrm{th}$ bit of state is set. If the $i^\textrm{th}$ cell is not yet marked, then both the $i^\textrm{th}$ bit and the $(9+i)^\textrm{th}$ are $0$.
Step6: Given a state and the player who is next to move, the function next_states(state, player) computes the set of states that can be reached from state.
Step7: The global variable gAllLines is a list of eight bit masks. These masks can be used to test whether there are three identical marks in a row, column, or diagonal.
Step8: The function utility takes one argument
Step9: finished(state) is True if the game is over.
Step10: The function get_move asks the user to input a move in the format r,c where r is the row and the c is the column where the next symbol is to be placed.
Step11: The function final_msg(state) informs the user about the result of the game once the game is finished.
Step12: Drawing the Board
Step13: This function creates the canvas for the start state. It draws an empty board which is later used for the game.
Step14: The function get_symbol(state, row, col) takes three arguments
Step15: This function takes three arguments
|
<ASSISTANT_TASK:>
Python Code:
gPlayers = [0, 1]
gStart = 0
def set_bits(Bits):
result = 0
for b in Bits:
result |= 1 << b # bitwise or 2**b
return result
"{:b}".format(set_bits([0, 1, 4]))
assert set_bits([0, 1, 4]) == 2**0 + 2**1 + 2**4
def set_bit(n):
return 1 << n
assert set_bit(7) == 2 ** 7
def to_board(state):
result = '+-+-+-+\n'
for cell in range(9):
if state & (2 ** cell) != 0:
result += '|X'
elif state & (2 ** (cell + 9)) != 0:
result += '|O'
else:
result += '| '
if (cell + 1) % 3 == 0:
result += '|\n+-+-+-+\n'
return result
print(to_board(set_bits([0,2,3,5,7,9+1,9+4,9+6])))
def empty(state):
Free = { n for n in range(9) }
Free -= { n for n in range(9) if state & (1 << n) != 0 }
Free -= { n for n in range(9) if state & (1 << (9 + n)) != 0 }
return Free
state = set_bits([2, 3, 5, 9+1, 9+4, 9+8])
print(to_board(state))
empty(state)
def next_states(state, player):
Empty = empty(state)
Result = []
for n in Empty:
next_state = state | set_bit(player * 9 + n)
Result.append(next_state)
return Result
state = set_bits([2,3,5,10,13,15])
print(f'state:\n{to_board(state)}')
print('next states:')
for s in next_states(state, 0):
print(to_board(s))
gAllLines = [ set_bits([0,1,2]), # 1st row
set_bits([3,4,5]), # 2nd row
set_bits([6,7,8]), # 3rd row
set_bits([0,3,6]), # 1st column
set_bits([1,4,7]), # 2nd column
set_bits([2,5,8]), # 3rd column
set_bits([0,4,8]), # falling diagonal
set_bits([2,4,6]), # rising diagonal
]
for state in gAllLines:
print(to_board(state))
def utility(state):
for mask in gAllLines:
if state & mask == mask:
return 1 # the computer has won
if (state >> 9) & mask == mask:
return -1 # the computer has lost
# 511 == 2**9 - 1 = 0b1_1111_1111
if (state & 511) | (state >> 9) != 511: # the board is not yet filled
return None
# at this point, the board has been filled, but there is no winner hence its a draw
return 0 # it's a draw
s1 = set_bits([0, 2, 3, 6, 1+9, 4+9, 5+9]) # 'X' has won
print(to_board(s1))
utility(s1)
s2 = set_bits([0, 2, 6, 8, 1+9, 4+9, 7+9]) # 'O' has won
print(to_board(s2))
utility(s2)
s3 = set_bits([0, 2, 5, 6, 7, 1+9, 3+9, 4+9, 8+9]) # it's a draw
print(to_board(s3))
print(utility(s3))
s4 = set_bits([0, 2, 5, 6, 1+9, 3+9, 4+9]) # it ain't over yet
print(to_board(s4))
print(utility(s4))
def finished(state):
return utility(state) != None
s = set_bits([0, 2, 5, 6, 7, 1+9, 3+9, 4+9, 8+9])
print(to_board(s))
finished(s)
def get_move(state):
while True:
try:
row, col = input('Enter move here: ').split(',')
row, col = int(row), int(col)
if row not in {0, 1, 2} or col not in {0, 1, 2}:
print('Illegal input. The move has to be a pair of the form "row,col"')
print('where row and col are numbers from the set {0,1,2}.')
continue
mask = set_bit(row * 3 + col)
if state & (mask | (mask << 9)) == 0:
return state | (mask << 9)
print("Don't cheat! Please try again.")
except:
print('Illegal input.')
print('row and col are numbers from the set {0,1,2}.')
def final_msg(state):
if finished(state):
if utility(state) == -1:
print('You have won!')
elif utility(state) == 1:
print('The computer has won!')
else:
print("It's a draw.");
return True
return False
import ipycanvas as cnv
size = 150
def create_canvas():
canvas = cnv.Canvas(size=(size * 3, size * 3 + 50))
display(canvas)
return canvas
def get_symbol(state, row, col):
mask = set_bit(row * 3 + col)
if mask & state == mask:
return 'X'
if mask & (state >> 9) == mask:
return 'O'
return ' '
def draw(state, canvas, value):
canvas.clear()
n = 3
canvas.font = '90px sans-serif'
canvas.text_align = 'center'
canvas.text_baseline = 'middle'
for row in range(n):
for col in range(n):
x = col * size
y = row * size
canvas.line_width = 3.0
canvas.stroke_rect(x, y, size, size)
symbol = get_symbol(state, row, col)
if symbol != ' ':
x += size // 2
y += size // 2
if symbol == 'X':
canvas.fill_style ='red'
else:
canvas.fill_style ='blue'
canvas.fill_text(symbol, x, y)
canvas.font = '12px sans-serif'
canvas.fill_style = 'green'
for row in range(n):
for col in range(n):
x = col * size + 16
y = row * size + 141
canvas.fill_text(f'({row}, {col})', x, y)
canvas.font = '20px sans-serif'
canvas.fill_style = 'black'
x = 1.5 * size
y = 3.2 * size
canvas.fill_text(str(value), x, y)
draw(set_bits([0, 2, 5, 6, 1+9, 3+9, 4+9]), create_canvas(), -1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Channel Attribution -- Building Blocks of Interpretability
Step2: Setup (feel free to skip)
Step3: BarsWidget
Step4: Spritemaps
Step5: Attribution Code
Step6: Channel attributions from article teaser
Step7: Bigger channel attribution!!!
Step8: Channel Attribution - Path Integrated
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install --quiet lucid==0.0.5
!npm install -g svelte-cli@2.2.0
import numpy as np
import tensorflow as tf
import lucid.modelzoo.vision_models as models
from lucid.misc.io import show
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform as transform
from lucid.misc.io import show, load
from lucid.misc.io.reading import read
from lucid.misc.io.showing import _image_url, _display_html
import lucid.scratch.web.svelte as lucid_svelte
model = models.InceptionV1()
model.load_graphdef()
%%html_define_svelte ChannelAttrWidget
<div class="figure">
<div class="channel_list" >
{{#each attrsPos as attr}}
<div class="entry">
<div class="sprite" style="background-image: url({{spritemap_url}}); width: {{sprite_size}}px; height: {{sprite_size}}px; background-position: -{{sprite_size*(attr.n%sprite_n_wrap)}}px -{{sprite_size*Math.floor(attr.n/sprite_n_wrap)}}px;"></div>
<div class="value" style="background-color: hsl({{(attr.v > 0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)">{{attr.v}}</div>
</div>
{{/each}}
{{#if attrsPos.length > 5}}
<br style="clear:both;">
<br style="clear:both;">
{{/if}}
<div class="gap">...</div>
{{#each attrsNeg as attr}}
<div class="entry">
<div class="sprite" style="background-image: url({{spritemap_url}}); width: {{sprite_size}}px; height: {{sprite_size}}px; background-position: -{{sprite_size*(attr.n%sprite_n_wrap)}}px -{{sprite_size*Math.floor(attr.n/sprite_n_wrap)}}px;"></div>
<div class="value" style="background-color: hsl({{(attr.v > 0)? 210 : 0}}, {{100*Math.abs(attr.v)/1.8}}%, {{100-30*Math.abs(attr.v)/1.8}}%)">{{attr.v}}</div>
</div>
{{/each}}
</div>
<br style="clear:both">
</div>
<style>
.entry{
float: left;
margin-right: 4px;
}
.gap {
float: left;
margin: 8px;
font-size: 400%;
}
</style>
<script>
function range(n){
return Array(n).fill().map((_, i) => i);
}
export default {
data () {
return {
spritemap_url: "",
sprite_size: 110,
sprite_n_wrap: 22,
attrsPos: [],
attrsNeg: [],
};
},
computed: {
},
helpers: {range}
};
</script>
%%html_define_svelte BarsWidget
<div class="figure">
<div class="channel_list" >
{{#each vals as val}}
<div class="bar" style="height: {{15*Math.abs(val)}}px; background-color: hsl({{(val > 0)? 210 : 0}}, {{Math.max(90, 110*Math.abs(val)/1.8)}}%, {{Math.min(80, 100-40*Math.abs(val)/1.8)}}%);">
</div>
{{/each}}
</div>
<br style="clear:both">
</div>
<style>
.channel_list {
background-color: #FEFEFE;
}
.bar {
width: 1.5px;
height: 10px;
display: inline-block;
}
</style>
<script>
export default {
data () {
return {
vals: []
};
}
};
</script>
layer_spritemap_sizes = {
'mixed3a' : 16,
'mixed3b' : 21,
'mixed4a' : 22,
'mixed4b' : 22,
'mixed4c' : 22,
'mixed4d' : 22,
'mixed4e' : 28,
'mixed5a' : 28,
}
def googlenet_spritemap(layer):
assert layer in layer_spritemap_sizes
size = layer_spritemap_sizes[layer]
url = "https://storage.googleapis.com/lucid-static/building-blocks/googlenet_spritemaps/sprite_%s_channel_alpha.jpeg" % layer
return size, url
def score_f(logit, name):
if name is None:
return 0
elif name == "logsumexp":
base = tf.reduce_max(logit)
return base + tf.log(tf.reduce_sum(tf.exp(logit-base)))
elif name in model.labels:
return logit[model.labels.index(name)]
else:
raise RuntimeError("Unsupported")
def channel_attr_simple(img, layer, class1, class2, n_show=4):
# Set up a graph for doing attribution...
with tf.Graph().as_default(), tf.Session() as sess:
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# Compute activations
acts = T(layer).eval()
# Compute gradient
logit = T("softmax2_pre_activation")[0]
score = score_f(logit, class1) - score_f(logit, class2)
t_grad = tf.gradients([score], [T(layer)])[0]
grad = t_grad.eval()
# Let's do a very simple linear approximation attribution.
# That is, we say the attribution of y to x is
# the rate at which x changes y times the value of x.
attr = (grad*acts)[0]
# Then we reduce down to channels.
channel_attr = attr.sum(0).sum(0)
# Now we just need to present the results.
# Get spritemaps
spritemap_n, spritemap_url = googlenet_spritemap(layer)
# Let's show the distribution of attributions
print "Distribution of attribution accross channels:"
print ""
lucid_svelte.BarsWidget({"vals" : [float(v) for v in np.sort(channel_attr)[::-1]]})
# Let's pick the most extreme channels to show
ns_pos = list(np.argsort(-channel_attr)[:n_show])
ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])
# ... and show them with ChannelAttrWidget
print ""
print "Top", n_show, "channels in each direction:"
print ""
lucid_svelte.ChannelAttrWidget({
"spritemap_url": spritemap_url,
"sprite_size": 110,
"sprite_n_wrap": spritemap_n,
"attrsPos": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_pos],
"attrsNeg": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_neg]
})
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png")
channel_attr_simple(img, "mixed4d", "Labrador retriever", "tiger cat", n_show=3)
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png")
channel_attr_simple(img, "mixed4d", "vase", "lemon", n_show=3)
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/sunglasses_tux.png")
channel_attr_simple(img, "mixed4d", "bow tie", "sunglasses", n_show=3)
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png")
channel_attr_simple(img, "mixed4d", "Labrador retriever", "tiger cat", n_show=30)
def channel_attr_path(img, layer, class1, class2, n_show=4, stochastic_path=False, N = 100):
# Set up a graph for doing attribution...
with tf.Graph().as_default(), tf.Session() as sess:
t_input = tf.placeholder_with_default(img, [None, None, 3])
T = render.import_model(model, t_input, t_input)
# Compute activations
acts = T(layer).eval()
# Compute gradient
logit = T("softmax2_pre_activation")[0]
score = score_f(logit, class1) - score_f(logit, class2)
t_grad = tf.gradients([score], [T(layer)])[0]
# Inegrate on a path from acts=0 to acts=acts
attr = np.zeros(acts.shape[1:])
for n in range(N):
acts_ = acts * float(n) / N
if stochastic_path:
acts_ *= (np.random.uniform(0, 1, [528])+np.random.uniform(0, 1, [528]))/1.5
grad = t_grad.eval({T(layer): acts_})
attr += 1.0 / N * (grad*acts)[0]
# Then we reduce down to channels.
channel_attr = attr.sum(0).sum(0)
# Now we just need to present the results.
# Get spritemaps
spritemap_n, spritemap_url = googlenet_spritemap(layer)
# Let's show the distribution of attributions
print "Distribution of attribution accross channels:"
print ""
lucid_svelte.BarsWidget({"vals" : [float(v) for v in np.sort(channel_attr)[::-1]]})
# Let's pick the most extreme channels to show
ns_pos = list(np.argsort(-channel_attr)[:n_show])
ns_neg = list(np.argsort(channel_attr)[:n_show][::-1])
# ... and show them with ChannelAttrWidget
print ""
print "Top", n_show, "channels in each direction:"
print ""
lucid_svelte.ChannelAttrWidget({
"spritemap_url": spritemap_url,
"sprite_size": 110,
"sprite_n_wrap": spritemap_n,
"attrsPos": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_pos],
"attrsNeg": [{"n": n, "v": str(float(channel_attr[n]))[:5]} for n in ns_neg]
})
def compare_attr_methods(img, class1, class2):
_display_html("<h2>Linear Attribution</h2>")
channel_attr_simple(img, "mixed4d", class1, class2, n_show=10)
_display_html("<br><br><h2>Path Integrated Attribution</h2>")
channel_attr_path(img, "mixed4d", class1, class2, n_show=10)
_display_html("<br><br><h2>Stochastic Path Integrated Attribution</h2>")
channel_attr_path(img, "mixed4d", class1, class2, n_show=10, stochastic_path=True)
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/dog_cat.png")
compare_attr_methods(img, "Labrador retriever", "tiger cat")
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/flowers.png")
compare_attr_methods(img, "vase", "lemon")
img = load("https://storage.googleapis.com/lucid-static/building-blocks/examples/pig.jpeg")
compare_attr_methods(img, "hog", "dalmatian")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Determination of $\zeta$
|
<ASSISTANT_TASK:>
Python Code:
from scipy import matrix, sqrt, pi, cos, sin, set_printoptions
p0 = 2224.0 # converted from kN to Newton
rho1 = 183E-6 ; rho2 = 368E-6 # converted from μm to m
w1 = 16.0 ; w2 = 25.0
th1 = 15.0 ; th2 = 55.0
d2r = pi/180.
cos1 = cos(d2r*th1) ; cos2 = cos(d2r*th2)
sin1 = sin(d2r*th1) ; sin2 = sin(d2r*th2)
# the unknowns are k and m
# coefficient matrix, row i is 1, omega_i^2
coeff = matrix(((1, -w1**2),(1, -w2**2)))
# kt i.e., know term, cos(theta_i)/rho_i * p_0
kt = matrix((cos1/rho1,cos2/rho2)).T*p0
print(coeff)
print(kt)
k_and_m = coeff.I*kt
k, m = k_and_m[0,0], k_and_m[1,0]
wn2, wn = k/m, sqrt(k/m)
print(' k m wn2 wn')
print(k, m, wn2, wn)
z1 = p0*sin1/rho1/k/2/(w1/wn)
z2 = p0*sin2/rho2/k/2/(w2/wn)
print(z1*100, z2*100)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To explore statistics that quantify effect size, we'll look at the difference in height between men and women. I used data from the Behavioral Risk Factor Surveillance System (BRFSS) to estimate the mean and standard deviation of height in cm for adult women and men in the U.S.
Step2: The following function evaluates the normal (Gaussian) probability density function (PDF) within 4 standard deviations of the mean. It takes and rv object and returns a pair of NumPy arrays.
Step3: Here's what the two distributions look like.
Step4: Let's assume for now that those are the true distributions for the population. Of course, in real life we never observe the true population distribution. We generally have to work with a random sample.
Step5: Both samples are NumPy arrays. Now we can compute sample statistics like the mean and standard deviation.
Step6: The sample mean is close to the population mean, but not exact, as expected.
Step7: And the results are similar for the female sample.
Step8: On average, men are 14--15 centimeters taller. For some applications, that would be a good way to describe the difference, but there are a few problems
Step9: But a problem with relative differences is that you have to choose which mean to express them relative to.
Step10: Part Two
Step11: A better, but slightly more complicated threshold is the place where the PDFs cross.
Step12: In this example, there's not much difference between the two thresholds.
Step13: And how many women are above it
Step14: The "overlap" is the total area under the curves that ends up on the wrong side of the threshold.
Step15: Or in more practical terms, you might report the fraction of people who would be misclassified if you tried to use height to guess sex
Step16: Another way to quantify the difference between distributions is what's called "probability of superiority", which is a problematic term, but in this context it's the probability that a randomly-chosen man is taller than a randomly-chosen woman.
Step18: Overlap (or misclassification rate) and "probability of superiority" have two good properties
Step19: Computing the denominator is a little complicated; in fact, people have proposed several ways to do it. This implementation uses the "pooled standard deviation", which is a weighted average of the standard deviations of the two groups.
Step21: Most people don't have a good sense of how big $d=1.9$ is, so let's make a visualization to get calibrated.
Step23: Here's the function that takes Cohen's $d$, plots normal distributions with the given effect size, and prints their overlap and superiority.
Step24: Here's an example that demonstrates the function
Step25: And an interactive widget you can use to visualize what different values of $d$ mean
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import numpy
import scipy.stats
import matplotlib.pyplot as pyplot
from IPython.html.widgets import interact, fixed
from IPython.html import widgets
# seed the random number generator so we all get the same results
numpy.random.seed(17)
# some nice colors from http://colorbrewer2.org/
COLOR1 = '#7fc97f'
COLOR2 = '#beaed4'
COLOR3 = '#fdc086'
COLOR4 = '#ffff99'
COLOR5 = '#386cb0'
%matplotlib inline
mu1, sig1 = 178, 7.7
male_height = scipy.stats.norm(mu1, sig1)
mu2, sig2 = 163, 7.3
female_height = scipy.stats.norm(mu2, sig2)
def eval_pdf(rv, num=4):
mean, std = rv.mean(), rv.std()
xs = numpy.linspace(mean - num*std, mean + num*std, 100)
ys = rv.pdf(xs)
return xs, ys
xs, ys = eval_pdf(male_height)
pyplot.plot(xs, ys, label='male', linewidth=4, color=COLOR2)
xs, ys = eval_pdf(female_height)
pyplot.plot(xs, ys, label='female', linewidth=4, color=COLOR3)
pyplot.xlabel('height (cm)')
None
male_sample = male_height.rvs(1000)
female_sample = female_height.rvs(1000)
mean1, std1 = male_sample.mean(), male_sample.std()
mean1, std1
mean2, std2 = female_sample.mean(), female_sample.std()
mean2, std2
difference_in_means = male_sample.mean() - female_sample.mean()
difference_in_means # in cm
# Exercise: what is the relative difference in means, expressed as a percentage?
relative_difference = difference_in_means / male_sample.mean()
relative_difference * 100 # percent
relative_difference = difference_in_means / female_sample.mean()
relative_difference * 100 # percent
simple_thresh = (mean1 + mean2) / 2
simple_thresh
thresh = (std1 * mean2 + std2 * mean1) / (std1 + std2)
thresh
male_below_thresh = sum(male_sample < thresh)
male_below_thresh
female_above_thresh = sum(female_sample > thresh)
female_above_thresh
overlap = male_below_thresh / len(male_sample) + female_above_thresh / len(female_sample)
overlap
misclassification_rate = overlap / 2
misclassification_rate
# Exercise: suppose I choose a man and a woman at random.
# What is the probability that the man is taller?
sum(x > y for x, y in zip(male_sample, female_sample)) / len(male_sample)
def CohenEffectSize(group1, group2):
Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / numpy.sqrt(pooled_var)
return d
CohenEffectSize(male_sample, female_sample)
def overlap_superiority(control, treatment, n=1000):
Estimates overlap and superiority based on a sample.
control: scipy.stats rv object
treatment: scipy.stats rv object
n: sample size
control_sample = control.rvs(n)
treatment_sample = treatment.rvs(n)
thresh = (control.mean() + treatment.mean()) / 2
control_above = sum(control_sample > thresh)
treatment_below = sum(treatment_sample < thresh)
overlap = (control_above + treatment_below) / n
superiority = sum(x > y for x, y in zip(treatment_sample, control_sample)) / n
return overlap, superiority
def plot_pdfs(cohen_d=2):
Plot PDFs for distributions that differ by some number of stds.
cohen_d: number of standard deviations between the means
control = scipy.stats.norm(0, 1)
treatment = scipy.stats.norm(cohen_d, 1)
xs, ys = eval_pdf(control)
pyplot.fill_between(xs, ys, label='control', color=COLOR3, alpha=0.7)
xs, ys = eval_pdf(treatment)
pyplot.fill_between(xs, ys, label='treatment', color=COLOR2, alpha=0.7)
o, s = overlap_superiority(control, treatment)
print('overlap', o)
print('superiority', s)
plot_pdfs(2)
slider = widgets.FloatSliderWidget(min=0, max=4, value=2)
interact(plot_pdfs, cohen_d=slider)
None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading data
Step2: encoding catagorical features
Step3: splitting data into test and train
Step4: seperating features and class in both test and train sets
Step5: getting best parameters
Step6: creating model with best parameter combination and reporting metrics
Step7: creating a test file with predicted results to visualize
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors import KNeighborsClassifier
import xgboost as xgb
import numpy as np
kobe = pd.read_csv('data.csv', sep=',')
kobe= kobe[np.isfinite(kobe['shot_made_flag'])]
del kobe['lat']
del kobe['lon']
del kobe['game_id']
del kobe['team_id']
del kobe['team_name']
kobe_2 = pd.read_csv('data.csv', sep=',')
kobe_2= kobe_2[np.isfinite(kobe_2['shot_made_flag'])]
del kobe_2['lat']
del kobe_2['lon']
del kobe_2['game_id']
del kobe_2['team_id']
del kobe_2['team_name']
mt_up = preprocessing.LabelEncoder()
kobe.matchup = mt_up.fit_transform(kobe.matchup )
#kobe_2.matchup = mt_up.fit_transform(kobe.matchup )
opp = preprocessing.LabelEncoder()
kobe.opponent = opp.fit_transform(kobe.opponent )
#kobe_2.opponent = opp.fit_transform(kobe.opponent )
dt = preprocessing.LabelEncoder()
kobe.game_date = dt.fit_transform(kobe.game_date )
#kobe_2.game_date = dt.fit_transform(kobe.game_date )
at = preprocessing.LabelEncoder()
kobe.action_type = at.fit_transform(kobe.action_type )
#kobe_2.action_type = at.fit_transform(kobe.action_type )
cst = preprocessing.LabelEncoder()
kobe.combined_shot_type = cst.fit_transform(kobe.combined_shot_type )
#kobe_2.combined_shot_type = cst.fit_transform(kobe.combined_shot_type )
seson = preprocessing.LabelEncoder()
kobe.season = seson.fit_transform(kobe.season )
#kobe_2.season = seson.fit_transform(kobe.season )
st = preprocessing.LabelEncoder()
kobe.shot_type = st.fit_transform(kobe.shot_type )
#kobe_2.shot_type = st.fit_transform(kobe.shot_type )
sza = preprocessing.LabelEncoder()
kobe.shot_zone_area = sza.fit_transform(kobe.shot_zone_area )
#kobe_2.shot_zone_area = sza.fit_transform(kobe.shot_zone_area )
szb = preprocessing.LabelEncoder()
kobe.shot_zone_basic = szb.fit_transform(kobe.shot_zone_basic )
#kobe_2.shot_zone_basic = szb.fit_transform(kobe.shot_zone_basic )
szr = preprocessing.LabelEncoder()
kobe.shot_zone_range = szr.fit_transform(kobe.shot_zone_range )
#kobe_2.shot_zone_range = szr.fit_transform(kobe.shot_zone_range )
from sklearn.cross_validation import train_test_split
# Generate the training set. Set random_state to be able to replicate results.
train = kobe.sample(frac=0.6, random_state=1)
train_2 = kobe_2.sample(frac=0.6, random_state=1)
# Select anything not in the training set and put it in the testing set.
test = kobe.loc[~kobe.index.isin(train.index)]
test_2 = kobe_2.loc[~kobe_2.index.isin(train_2.index)]
columns = kobe.columns.tolist()
columns = [c for c in columns if c not in ["shot_made_flag","team_id","team_name"]]
kobe_train_x =train[columns]
kobe_test_x =test[columns]
kobe_train_y=train['shot_made_flag']
kobe_test_y=test['shot_made_flag']
print(kobe_train_x.shape)
print(kobe_test_x.shape)
print(kobe_train_y.shape)
print(kobe_test_y.shape)
def optimization(depth, n_est,l_r):
maxacc=0
best_depth=0
best_n_est=0
best_l_r=0
for i in range(1,depth):
for j in n_est:
for k in l_r:
gbm = xgb.XGBClassifier(max_depth=i, n_estimators=j, learning_rate=k).fit(kobe_train_x, kobe_train_y)
predicted = gbm.predict(kobe_test_x)
key=str(i)+"_"+str(j)+"_"+str(k)
accu=accuracy_score(kobe_test_y, predicted)
if(accu>maxacc):
maxacc=accu
best_depth=i
best_n_est=j
best_l_r=k
print(maxkey+" "+str(maxacc))
return(best_depth,best_n_est,best_l_r)
n_est=[5,10,20,50,100,150,200,250,300,350,400,450,500,550,600,650,700,750,800,850,900,950,1000]
depth=10
l_r = [0.0001, 0.001, 0.01,0.05, 0.1, 0.2, 0.3]
best_depth,best_n_est,best_l_r=optimization(depth,n_est,l_r)
#hard coded the best features
gbm = xgb.XGBClassifier(max_depth=4, n_estimators=600, learning_rate=0.01).fit(kobe_train_x, kobe_train_y)
predicted = gbm.predict(kobe_test_x)
# summarize the fit of the model
print(metrics.classification_report(kobe_test_y, predicted))
print("Confusion Matrix")
print(metrics.confusion_matrix(kobe_test_y, predicted))
accuracy=accuracy_score(kobe_test_y, predicted)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
test_2['predicted']=predicted
test_2.to_csv(path_or_buf='test_with_predictions.csv', sep=',')
test_2.head(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'ec-earth3-veg-lr', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.