Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
3,400
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install "gym>=0.21.0"
!pip install tf-agents
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import numpy as np
from tf_agents.environments import py_environment
from tf_agents.environments import tf_environment
from tf_agents.environments import tf_py_environment
from tf_agents.environments import utils
from tf_agents.specs import array_spec
from tf_agents.environments import wrappers
from tf_agents.environments import suite_gym
from tf_agents.trajectories import time_step as ts
class PyEnvironment(object):
def reset(self):
Return initial_time_step.
self._current_time_step = self._reset()
return self._current_time_step
def step(self, action):
Apply action and return new time_step.
if self._current_time_step is None:
return self.reset()
self._current_time_step = self._step(action)
return self._current_time_step
def current_time_step(self):
return self._current_time_step
def time_step_spec(self):
Return time_step_spec.
@abc.abstractmethod
def observation_spec(self):
Return observation_spec.
@abc.abstractmethod
def action_spec(self):
Return action_spec.
@abc.abstractmethod
def _reset(self):
Return initial_time_step.
@abc.abstractmethod
def _step(self, action):
Apply action and return new time_step.
environment = suite_gym.load('CartPole-v0')
print('action_spec:', environment.action_spec())
print('time_step_spec.observation:', environment.time_step_spec().observation)
print('time_step_spec.step_type:', environment.time_step_spec().step_type)
print('time_step_spec.discount:', environment.time_step_spec().discount)
print('time_step_spec.reward:', environment.time_step_spec().reward)
action = np.array(1, dtype=np.int32)
time_step = environment.reset()
print(time_step)
while not time_step.is_last():
time_step = environment.step(action)
print(time_step)
class CardGameEnv(py_environment.PyEnvironment):
def __init__(self):
self._action_spec = array_spec.BoundedArraySpec(
shape=(), dtype=np.int32, minimum=0, maximum=1, name='action')
self._observation_spec = array_spec.BoundedArraySpec(
shape=(1,), dtype=np.int32, minimum=0, name='observation')
self._state = 0
self._episode_ended = False
def action_spec(self):
return self._action_spec
def observation_spec(self):
return self._observation_spec
def _reset(self):
self._state = 0
self._episode_ended = False
return ts.restart(np.array([self._state], dtype=np.int32))
def _step(self, action):
if self._episode_ended:
# The last action ended the episode. Ignore the current action and start
# a new episode.
return self.reset()
# Make sure episodes don't go on forever.
if action == 1:
self._episode_ended = True
elif action == 0:
new_card = np.random.randint(1, 11)
self._state += new_card
else:
raise ValueError('`action` should be 0 or 1.')
if self._episode_ended or self._state >= 21:
reward = self._state - 21 if self._state <= 21 else -21
return ts.termination(np.array([self._state], dtype=np.int32), reward)
else:
return ts.transition(
np.array([self._state], dtype=np.int32), reward=0.0, discount=1.0)
environment = CardGameEnv()
utils.validate_py_environment(environment, episodes=5)
get_new_card_action = np.array(0, dtype=np.int32)
end_round_action = np.array(1, dtype=np.int32)
environment = CardGameEnv()
time_step = environment.reset()
print(time_step)
cumulative_reward = time_step.reward
for _ in range(3):
time_step = environment.step(get_new_card_action)
print(time_step)
cumulative_reward += time_step.reward
time_step = environment.step(end_round_action)
print(time_step)
cumulative_reward += time_step.reward
print('Final Reward = ', cumulative_reward)
env = suite_gym.load('Pendulum-v1')
print('Action Spec:', env.action_spec())
discrete_action_env = wrappers.ActionDiscretizeWrapper(env, num_actions=5)
print('Discretized Action Spec:', discrete_action_env.action_spec())
class TFEnvironment(object):
def time_step_spec(self):
Describes the `TimeStep` tensors returned by `step()`.
def observation_spec(self):
Defines the `TensorSpec` of observations provided by the environment.
def action_spec(self):
Describes the TensorSpecs of the action expected by `step(action)`.
def reset(self):
Returns the current `TimeStep` after resetting the Environment.
return self._reset()
def current_time_step(self):
Returns the current `TimeStep`.
return self._current_time_step()
def step(self, action):
Applies the action and returns the new `TimeStep`.
return self._step(action)
@abc.abstractmethod
def _reset(self):
Returns the current `TimeStep` after resetting the Environment.
@abc.abstractmethod
def _current_time_step(self):
Returns the current `TimeStep`.
@abc.abstractmethod
def _step(self, action):
Applies the action and returns the new `TimeStep`.
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
print(isinstance(tf_env, tf_environment.TFEnvironment))
print("TimeStep Specs:", tf_env.time_step_spec())
print("Action Specs:", tf_env.action_spec())
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
# reset() creates the initial time_step after resetting the environment.
time_step = tf_env.reset()
num_steps = 3
transitions = []
reward = 0
for i in range(num_steps):
action = tf.constant([i % 2])
# applies the action and returns the new TimeStep.
next_time_step = tf_env.step(action)
transitions.append([time_step, action, next_time_step])
reward += next_time_step.reward
time_step = next_time_step
np_transitions = tf.nest.map_structure(lambda x: x.numpy(), transitions)
print('\n'.join(map(str, np_transitions)))
print('Total reward:', reward.numpy())
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
time_step = tf_env.reset()
rewards = []
steps = []
num_episodes = 5
for _ in range(num_episodes):
episode_reward = 0
episode_steps = 0
while not time_step.is_last():
action = tf.random.uniform([1], 0, 2, dtype=tf.int32)
time_step = tf_env.step(action)
episode_steps += 1
episode_reward += time_step.reward.numpy()
rewards.append(episode_reward)
steps.append(episode_steps)
time_step = tf_env.reset()
num_steps = np.sum(steps)
avg_length = np.mean(steps)
avg_reward = np.mean(rewards)
print('num_episodes:', num_episodes, 'num_steps:', num_steps)
print('avg_length', avg_length, 'avg_reward:', avg_reward)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Environments
Step9: Python Environments
Step10: In addition to the step() method, environments also provide a reset() method that starts a new sequence and provides an initial TimeStep. It is not necessary to call the reset method explicitly. We assume that environments reset automatically, either when they get to the end of an episode or when step() is called the first time.
Step11: So we see that the environment expects actions of type int64 in [0, 1] and returns TimeSteps where the observations are a float32 vector of length 4 and discount factor is a float32 in [0.0, 1.0]. Now, let's try to take a fixed action (1,) for a whole episode.
Step12: Creating your own Python Environment
Step13: Let's make sure we did everything correctly defining the above environment. When creating your own environment you must make sure the observations and time_steps generated follow the correct shapes and types as defined in your specs. These are used to generate the TensorFlow graph and as such can create hard to debug problems if we get them wrong.
Step14: Now that we know the environment is working as intended, let's run this environment using a fixed policy
Step15: Environment Wrappers
Step25: The wrapped discrete_action_env is an instance of py_environment.PyEnvironment and can be treated like a regular Python environment.
Step26: The current_time_step() method returns the current time_step and initializes the environment if needed.
Step27: Note the specs are now of type
Step28: Whole Episodes
|
3,401
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip uninstall -y opencv-python
!pip install -U -q "tensorflow>=2.9.0" "tf-models-official"
import pprint
import tempfile
from IPython import display
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_models as tfm
# These are not in the tfm public API for v2.9. They will be available in v2.10
from official.vision.serving import export_saved_model_lib
import official.core.train_lib
exp_config = tfm.core.exp_factory.get_exp_config('resnet_imagenet')
tfds_name = 'cifar10'
ds_info = tfds.builder(tfds_name ).info
ds_info
# Configure model
exp_config.task.model.num_classes = 10
exp_config.task.model.input_size = list(ds_info.features["image"].shape)
exp_config.task.model.backbone.resnet.model_id = 18
# Configure training and testing data
batch_size = 128
exp_config.task.train_data.input_path = ''
exp_config.task.train_data.tfds_name = tfds_name
exp_config.task.train_data.tfds_split = 'train'
exp_config.task.train_data.global_batch_size = batch_size
exp_config.task.validation_data.input_path = ''
exp_config.task.validation_data.tfds_name = tfds_name
exp_config.task.validation_data.tfds_split = 'test'
exp_config.task.validation_data.global_batch_size = batch_size
logical_device_names = [logical_device.name for logical_device in tf.config.list_logical_devices()]
if 'GPU' in ''.join(logical_device_names):
print('This may be broken in Colab.')
device = 'GPU'
elif 'TPU' in ''.join(logical_device_names):
print('This may be broken in Colab.')
device = 'TPU'
else:
print('Running on CPU is slow, so only train for a few steps.')
device = 'CPU'
if device=='CPU':
train_steps = 20
exp_config.trainer.steps_per_loop = 5
else:
train_steps=5000
exp_config.trainer.steps_per_loop = 100
exp_config.trainer.summary_interval = 100
exp_config.trainer.checkpoint_interval = train_steps
exp_config.trainer.validation_interval = 1000
exp_config.trainer.validation_steps = ds_info.splits['test'].num_examples // batch_size
exp_config.trainer.train_steps = train_steps
exp_config.trainer.optimizer_config.learning_rate.type = 'cosine'
exp_config.trainer.optimizer_config.learning_rate.cosine.decay_steps = train_steps
exp_config.trainer.optimizer_config.learning_rate.cosine.initial_learning_rate = 0.1
exp_config.trainer.optimizer_config.warmup.linear.warmup_steps = 100
pprint.pprint(exp_config.as_dict())
display.Javascript("google.colab.output.setIframeHeight('300px');")
logical_device_names = [logical_device.name for logical_device in tf.config.list_logical_devices()]
if exp_config.runtime.mixed_precision_dtype == tf.float16:
tf.keras.mixed_precision.set_global_policy('mixed_float16')
if 'GPU' in ''.join(logical_device_names):
distribution_strategy = tf.distribute.MirroredStrategy()
elif 'TPU' in ''.join(logical_device_names):
tf.tpu.experimental.initialize_tpu_system()
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='/device:TPU_SYSTEM:0')
distribution_strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
print('Warning: this will be really slow.')
distribution_strategy = tf.distribute.OneDeviceStrategy(logical_device_names[0])
with distribution_strategy.scope():
model_dir = tempfile.mkdtemp()
task = tfm.core.task_factory.get_task(exp_config.task, logging_dir=model_dir)
tf.keras.utils.plot_model(task.build_model(), show_shapes=True)
for images, labels in task.build_inputs(exp_config.task.train_data).take(1):
print()
print(f'images.shape: {str(images.shape):16} images.dtype: {images.dtype!r}')
print(f'labels.shape: {str(labels.shape):16} labels.dtype: {labels.dtype!r}')
plt.hist(images.numpy().flatten());
label_info = ds_info.features['label']
label_info.int2str(1)
def show_batch(images, labels, predictions=None):
plt.figure(figsize=(10, 10))
min = images.numpy().min()
max = images.numpy().max()
delta = max - min
for i in range(12):
plt.subplot(6, 6, i + 1)
plt.imshow((images[i]-min) / delta)
if predictions is None:
plt.title(label_info.int2str(labels[i]))
else:
if labels[i] == predictions[i]:
color = 'g'
else:
color = 'r'
plt.title(label_info.int2str(predictions[i]), color=color)
plt.axis("off")
plt.figure(figsize=(10, 10))
for images, labels in task.build_inputs(exp_config.task.train_data).take(1):
show_batch(images, labels)
plt.figure(figsize=(10, 10));
for images, labels in task.build_inputs(exp_config.task.validation_data).take(1):
show_batch(images, labels)
model, eval_logs = tfm.core.train_lib.run_experiment(
distribution_strategy=distribution_strategy,
task=task,
mode='train_and_eval',
params=exp_config,
model_dir=model_dir,
run_post_eval=True)
tf.keras.utils.plot_model(model, show_shapes=True)
for key, value in eval_logs.items():
print(f'{key:20}: {value.numpy():.3f}')
for images, labels in task.build_inputs(exp_config.task.train_data).take(1):
predictions = model.predict(images)
predictions = tf.argmax(predictions, axis=-1)
show_batch(images, labels, tf.cast(predictions, tf.int32))
if device=='CPU':
plt.suptitle('The model was only trained for a few steps, it is not expected to do well.')
# Saving and exporting the trained model
export_saved_model_lib.export_inference_graph(
input_type='image_tensor',
batch_size=1,
input_image_size=[32, 32],
params=exp_config,
checkpoint_path=tf.train.latest_checkpoint(model_dir),
export_dir='./export/')
# Importing SavedModel
imported = tf.saved_model.load('./export/')
model_fn = imported.signatures['serving_default']
plt.figure(figsize=(10, 10))
for data in tfds.load('cifar10', split='test').batch(12).take(1):
predictions = []
for image in data['image']:
index = tf.argmax(model_fn(image[tf.newaxis, ...])['logits'], axis=1)[0]
predictions.append(index)
show_batch(data['image'], data['label'], predictions)
if device=='CPU':
plt.suptitle('The model was only trained for a few steps, it is not expected to do better than random.')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image classification with Model Garden
Step2: Import TensorFlow, TensorFlow Datasets, and a few helper libraries.
Step3: The tensorflow_models package contains the ResNet vision model, and the official.vision.serving model contains the function to save and export the tuned model.
Step4: Configure the ResNet-18 model for the Cifar-10 dataset
Step5: Adjust the model and dataset configurations so that it works with Cifar-10 (cifar10).
Step6: Adjust the trainer configuration.
Step7: Print the modified configuration.
Step8: Set up the distribution strategy.
Step9: Create the Task object (tfm.core.base_task.Task) from the config_definitions.TaskConfig.
Step10: Visualize the training data
Step11: Use ds_info (which is an instance of tfds.core.DatasetInfo) to lookup the text descriptions of each class ID.
Step12: Visualize a batch of the data.
Step13: Visualize the testing data
Step14: Train and evaluate
Step15: Print the accuracy, top_5_accuracy, and validation_loss evaluation metrics.
Step16: Run a batch of the processed training data through the model, and view the results
Step17: Export a SavedModel
Step18: Test the exported model.
Step19: Visualize the predictions.
|
3,402
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import pastas as ps
import matplotlib.pyplot as plt
ps.set_log_level("ERROR")
ps.show_versions(numba=True, lmfit=True)
head = pd.read_csv("data_wagna/head_wagna.csv", index_col=0, parse_dates=True,
squeeze=True, skiprows=2).loc["2006":]
evap = pd.read_csv("data_wagna/evap_wagna.csv", index_col=0, parse_dates=True,
squeeze=True, skiprows=2)
rain = pd.read_csv("data_wagna/rain_wagna.csv", index_col=0, parse_dates=True,
squeeze=True, skiprows=2)
ax = head.plot(figsize=(10,3), marker=".", linestyle=" ", color="k")
ax1 = plt.axes([0.95,0.2,0.3,0.68])
ax1.semilogx(ps.stats.acf(head).values, color="k") # Plot on log-scale
ax.set_title("Groundwater level [MASL]")
ax1.set_title("Autocorrelation");
mls_ar = {}
dts = 11
# Model settings
tmin = "2007-01-01"
tmax = "2016-12-31"
solver = ps.LmfitSolve
# The two models we compare here
config = {
"Linear": [ps.FourParam, ps.rch.Linear()],
"Nonlinear": [ps.Exponential, ps.rch.FlexModel()],
}
for name, [rfunc, rch] in config.items():
for dt in range(1, dts, 2):
# Create the basic Pastas model
ml_name = f"{name}_{dt}"
ml = ps.Model(head.iloc[::dt], name=ml_name)
# Add the recharge model
sm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=rfunc,
name="rch")
ml.add_stressmodel(sm)
# Change parameter settings for non-linear recharge model
if name == "Nonlinear":
ml.set_parameter("rch_srmax", vary=False)
ml.set_parameter("rch_kv", vary=True)
ml.set_parameter("constant_d", initial=262)
# Solve the model
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=solver,
method="least_squares")
mls_ar[ml_name] = ml
mls_arma = {}
for ml_name, ml in mls_ar.items():
ml = ml.copy(name=ml.name)
#Change the noise model
ml.del_noisemodel()
ml.add_noisemodel(ps.ArmaModel())
# Solve the model
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=solver,
method="least_squares")
mls_arma[ml_name] = ml
data = pd.DataFrame(index=range(dt, 1), columns=config.keys())
for ml in mls_ar.values():
name, i = ml.name.split("_")
n = ml.noise(tmin=tmin, tmax=tmax).asfreq(f"{i}D").fillna(0.0)
data.loc[int(i), name] = ps.stats.durbin_watson(n)[0]
data2 = pd.DataFrame(index=range(dt, 1), columns=config.keys())
for ml in mls_arma.values():
name, i = ml.name.split("_")
n = ml.noise(tmin=tmin, tmax=tmax).asfreq(f"{i}D").fillna(0.0)
data2.loc[int(i), name] = ps.stats.durbin_watson(n)[0]
# Plot the results
fig, [ax1, ax2] = plt.subplots(2,1, sharex=True, figsize=(5, 4), sharey=True)
# AR1 Model
data.plot(ax=ax1, marker=".", legend=False)
ax1.set_ylabel("DW [-]")
ax1.axhline(2., c="k", linestyle="--", zorder=-1)
ax1.text(1, 2.07, "Line of no autocorrelation")
ax1.grid()
ax1.set_title("AR(1) Noise model")
# ArmaModel
data2.plot(ax=ax2, marker=".", legend=False)
ax2.set_ylabel("DW [-]")
ax2.set_yticks([1, 1.5, 2.])
ax2.axhline(2., c="k", linestyle="--", zorder=-10)
ax2.set_ylim(0.5, 2.3)
ax2.grid()
ax2.legend(ncol=3, loc=4)
ax2.set_xlabel("$\Delta t$ [days]")
ax2.set_title("ARMA(1,1) Noise model")
plt.tight_layout()
mls = {}
dt = 10 # Select the time interval between GWL observations
for name, [rfunc, rch] in config.items():
for start in range(0, dt, 2):
ml_name = f"{name}_{start+1}"
ml = ps.Model(head.iloc[start::dt], name=ml_name)
# Add the recharge model
sm = ps.RechargeModel(rain, evap, recharge=rch, rfunc=rfunc, name="rch")
ml.add_stressmodel(sm)
if name == "Nonlinear":
ml.set_parameter("rch_srmax", vary=False)
ml.set_parameter("rch_kv", vary=True)
ml.set_parameter("constant_d", initial=262)
# Solve the model
ml.add_noisemodel(ps.ArmaModel())
ml.solve(tmin=tmin, tmax=tmax, report=False, solver=solver,
method="least_squares")
mls[ml_name] = ml
# Extract the optimal parameters and estimated standard errors
data = {}
for name in config.keys():
ml = mls["{}_1".format(name)]
p = ml.parameters
mi = pd.MultiIndex.from_product([p.index[p.vary == True].to_list(), ["opt", "std"]])
data[name] = pd.DataFrame(data=np.nan, index=range(dt, 1), columns=mi, dtype=float)
for ml in mls.values():
name, i = ml.name.split("_")
df = data[name]
for par in ml.parameters.index[ml.parameters.vary == True]:
df.loc[int(i), (par, "opt")] = ml.parameters.loc[par, "optimal"]
df.loc[int(i), (par, "std")] = ml.parameters.loc[par, "stderr"]
#df.loc[:, (slice(None), "std")] *= 1.96
df = pd.concat(data, axis=1)
# Plot the results
fig, axes = plt.subplots(8,2, sharex=True, figsize=(9,7))
axes = axes.flatten()
kwargs = dict(legend=False, color="0", capsize=2, linestyle="-", marker=".")
labels = [["$A$", "$n$", "$a$", "$b$", "$f$", "$d$", "$\\alpha$", "$\\beta$"],
["$A$", "$a$", "$k_s$", "$\\gamma$", "$k_v$", "$d$", "$\\alpha$", "$\\beta$"]]
for j, rch in enumerate(["Linear", "Nonlinear"]):
axes[j].set_title(rch)
for i, par in enumerate(df[rch].columns.get_level_values(0).unique()):
df.xs((rch, par), axis=1, level=[0, 1]).plot(ax=axes[i*2+j], yerr="std", **kwargs)
axes[i*2+j].set_ylabel(labels[j][i])
for i in range(2):
axes[-i-1].set_xlabel("Calibration")
plt.tight_layout()
rch = {"Linear": pd.DataFrame(columns=range(dt, 1)),
"Nonlinear": pd.DataFrame(columns=range(dt, 1))}
for ml in mls.values():
name, i = ml.name.split("_")
rch[name].loc[:, i] = ml.get_stress("rch", tmin=tmin,
tmax="2019-12-31").resample("A").sum()
df1 = pd.concat(rch, axis=1)
df1.index = df1.index.year
fig, [ax1, ax2, ax3] = plt.subplots(3,1, figsize=(6,6))
for ml in mls.values():
if ml.name.split("_")[0] == "Linear":
ax = ax1
color = "C0"
else:
ax = ax2
color = "C1"
ml.oseries.plot(ax=ax, linestyle="-", marker=" ", c="k")
ml.simulate(tmax="2020").plot(ax=ax, alpha=0.5, c=color, x_compat=True)
ax.set_xticks([])
ax.set_ylabel("GWL [m]")
ax.set_xlim("2007", "2020")
df1.groupby(level=0, axis=1).mean().plot.bar(yerr=1.96 * df1.groupby(level=0, axis=1).std(), ax=ax3, width=0.7)
plt.legend(ncol=3, loc=2, bbox_to_anchor=(0, 3.7))
plt.ylabel("R [mm yr$^{-1}$]")
plt.xlabel("");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Read Data and plot autocorrelation
Step2: 2. Run models with AR(1) noise model
Step3: 3. Run models with ARMA(1,1) noise model
Step4: 4. Plot and compare the the results
Step5: 5. Consistency of parameter estimates
Step6: The plot above shows the estimated optimal parameters and the 95% confidence intervals of the parameters. While most of the optimal parameter are relatively stable between calibrations, some parameters show larger variations. For the linear model these are, for example, $a$ and $n$, while for the non-linear model these are $k_s$ and $\gamma$. The values of these parameters seem correlated, and it might thus be difficult to estimate the individual parameter values.
|
3,403
|
<ASSISTANT_TASK:>
Python Code:
from jyquickhelper import add_notebook_menu
add_notebook_menu()
x = 5
y = 10
z = x + y
print (z) # affiche z
x = 2
y = x + 1
print (y)
x += 5
print (x)
a = 0
for i in range (0, 10) :
a = a + i # répète dix fois cette ligne
print (a)
a = 10
if a > 0 :
print(a) # un seul des deux blocs est pris en considération
else :
a -= 1
print (a)
a = 10
print (a) # quelle est la différence
print ("a") # entre les deux lignes
s = "texte"
s += "c"
print (s)
print("2" + "3")
print(2+3)
a = 5
a + 4
print (a) # ou voudrait voir 9 mais c'est 5 qui apparaît
a = 0
for i in range (0, 10)
a = a + i
print (a)
a = 0
for i in range (0, 10):
a = a + i
print (a)
a = 0
s = "e"
print (a + s)
a = 0
for i in range (0, 10) :
a = (a + (i+2)*3
print (a)
14%2, 233%2
%load_ext tutormagic
%%tutor --lang python3
a = 0
for i in range (0, 10):
a = a + i
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Partie 1
Step2: On programme sert souvent à automatiser un calcul comme le calcul mensuel du taux de chômage, le taux d'inflation, le temps qu'il fera demain... Pour pouvoir répéter ce même calcul sur des valeurs différentes, il faut pouvoir décrire ce calcul sans savoir ce que sont ces valeurs. Un moyen simple est de les nommer
Step3: Lorsqu'on programme, on passe son temps à écrire des calculs à partir de variables pour les stocker dans d'autres variables voire dans les mêmes variables. Lorsqu'on écrit y=x+5, cela veut dire qu'on doit ajouter 5 à x et qu'on stocke le résultat dans y. Lorsqu'on écrit x += 5, cela veut dire qu'on doit ajouter 5 à x et qu'on n'a plus besoin de la valeur que x contenait avant l'opération.
Step4: Le mot-clé print n'a pas d'incidence sur le programme. En revanche, il permet d'afficher l'état d'une variable au moment où on exécute l'instruction print.
Step5: Les chaînes de caractères
Step6: Toute valeur a un type et cela détermine les opérations qu'on peut faire dessus. 2 + 2 fait 4 pour tout le monde. 2 + "2" fait quatre pour un humain, mais est incompréhensible pour l'ordinateur car on ajoute deux choses différentes (torchon + serviette).
Step7: Partie 2
Step8: Une erreur de syntaxe
Step9: Une autre erreur de syntaxe
Step10: Une opération interdite
Step11: Un nombre impair de...
Step12: Partie 3
Step13: Tutor Magic
|
3,404
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import keras
import keras.backend as K
from keras.layers import Conv2D
from keras.models import Sequential
%matplotlib inline
inputs = np.random.randint(1, 9, size=(4, 4))
inputs
def show_matrix(m, color, cmap, title=None):
rows, cols = len(m), len(m[0])
fig, ax = plt.subplots(figsize=(cols, rows))
ax.set_yticks(list(range(rows)))
ax.set_xticks(list(range(cols)))
ax.xaxis.tick_top()
if title is not None:
ax.set_title('{} {}'.format(title, m.shape), y=-0.5/rows)
plt.imshow(m, cmap=cmap, vmin=0, vmax=1)
for r in range(rows):
for c in range(cols):
text = '{:>3}'.format(int(m[r][c]))
ax.text(c-0.2, r+0.15, text, color=color, fontsize=15)
plt.show()
def show_inputs(m, title='Inputs'):
show_matrix(m, 'b', plt.cm.Vega10, title)
def show_kernel(m, title='Kernel'):
show_matrix(m, 'r', plt.cm.RdBu_r, title)
def show_output(m, title='Output'):
show_matrix(m, 'g', plt.cm.GnBu, title)
show_inputs(inputs)
show_inputs(np.random.randint(100, 255, size=(4, 4)))
kernel = np.random.randint(1, 5, size=(3, 3))
kernel
show_kernel(kernel)
def convolve(m, k):
m_rows, m_cols = len(m), len(m[0]) # matrix rows, cols
k_rows, k_cols = len(k), len(k[0]) # kernel rows, cols
rows = m_rows - k_rows + 1 # result matrix rows
cols = m_rows - k_rows + 1 # result matrix cols
v = np.zeros((rows, cols), dtype=m.dtype) # result matrix
for r in range(rows):
for c in range(cols):
v[r][c] = np.sum(m[r:r+k_rows, c:c+k_cols] * k) # sum of the element-wise multiplication
return v
output = convolve(inputs, kernel)
output
show_output(output)
output[0][0]
inputs[0:3, 0:3]
kernel
np.sum(inputs[0:3, 0:3] * kernel) # sum of the element-wise multiplication
def convolution_matrix(m, k):
m_rows, m_cols = len(m), len(m[0]) # matrix rows, cols
k_rows, k_cols = len(k), len(k[0]) # kernel rows, cols
# output matrix rows and cols
rows = m_rows - k_rows + 1
cols = m_rows - k_rows + 1
# convolution matrix
v = np.zeros((rows*cols, m_rows, m_cols))
for r in range(rows):
for c in range(cols):
i = r * cols + c
v[i][r:r+k_rows, c:c+k_cols] = k
v = v.reshape((rows*cols), -1)
return v, rows, cols
C, rows, cols = convolution_matrix(inputs, kernel)
show_kernel(C, 'Convolution Matrix')
def column_vector(m):
return m.flatten().reshape(-1, 1)
x = column_vector(inputs)
x
show_inputs(x)
output = C @ x
output
show_output(output)
output = output.reshape(rows, cols)
output
show_output(output)
show_kernel(C.T, 'Transposed Convolution Matrix')
x2 = np.random.randint(1, 5, size=(4, 1))
x2
show_inputs(x2)
output2 = (C.T @ x2)
output2
show_output(output2)
output2 = output2.reshape(4, 4)
output2
show_output(output2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Convolution Operation
Step2: The matrix is visualized as below. The higher the intensity the bright the cell color is.
Step3: We are using small values so that the display look simpler than with big values. If we use 0-255 just like an gray scale image, it'd look like below.
Step4: Apply a convolution operation on these values can produce big values that are hard to nicely display.
Step5: Convolution
Step6: The result of the convolution operation is as follows
Step7: One important point of such convolution operation is that it keeps the positional connectivity between the input values and the output values.
Step8: So, 9 values in the input matrix is used to produce 1 value in the output matrix.
Step9: If we reshape the input into a column vector, we can use the matrix multiplication to perform convolution.
Step10: We reshape it into the desired shape.
Step11: This is exactly the same output as before.
Step12: Let's make a new input whose shape is 4x1.
Step13: We matrix-multiply C.T with x2 to up-sample x2 from 4 (2x2) to 16 (4x4). This operation has the same connectivity as the convolution but in the backward direction.
|
3,405
|
<ASSISTANT_TASK:>
Python Code:
def hello_world():
print('hello world')
# wrap hello world in a function that does logging
def wrap_hello():
print('Enter: hello_world')
hello_world()
print('Exit: hello_world')
wrap_hello()
# to wrap any function at all, write a generic wrapper that takes the a function as input
def logthis(func):
print('Enter: {}'.format(func.__name__))
func()
print('Exit: {}'.format(func.__name__))
logthis(hello_world)
# add the ability to handle arbitrary parameters and return types
def logthis(func):
def wrapper(*args, **kwargs):
print('Enter: {}'.format(func.__name__))
result = func(*args, **kwargs)
print('Exit: {}'.format(func.__name__))
return result
return wrapper # return the inner function
logged_hello = logthis(hello_world)
logged_hello()
# now you can replace hello_world with the wrapped function
hello_world=logged_hello
hello_world()
# the only problem is that function metadata is wrong
hello_world.__name__
#instead use functools.wraps and a decorator on your function, which otherwise is equivalent
from functools import wraps
def logthis(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('Enter: {}'.format(func.__name__))
result = func(*args, **kwargs)
print('Exit: {}'.format(func.__name__))
return result
return wrapper # return the inner function
hello_world=None
@logthis
def hello_world():
print('hello world')
hello_world()
#if you use wraps, you can also access the raw undecorated function using __wrapped__
raw_hello = hello_world.__wrapped__
raw_hello()
# Decorator that takes arguments
def logged(prefix):
prefix = prefix if prefix else ''
logprefix = prefix + ':' if len(prefix) > 0 else prefix
def decorate(func):
@wraps(func)
def wrapper(*args, **kwargs):
print('{}Enter:{}'.format(logprefix, func.__name__))
result = func(*args, **kwargs)
print('{}Exit:{}'.format(logprefix, func.__name__))
return result
return wrapper
return decorate
@logged('logger')
def hello_world():
print('hello world')
hello_world()
# always declare classmethod and staticmethod decorators first (so they are applied last)
class Spam:
@logthis
def instance_method(self, n):
print(self, n)
while n > 0:
n -= 1
@classmethod
@logthis
def class_method(cls, n):
print(cls, n)
while n > 0:
n -= 1
@staticmethod
@logthis
def static_method(n):
print(n)
while n > 0:
n-=1
s = Spam()
s.instance_method(4)
Spam.class_method(10)
# Singleton metaclass
class Singleton(type):
def __init__(self, *args, **kwargs):
self.__instance = None
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
if self.__instance is None:
self.__instance = super().__call__(*args, **kwargs)
return self.__instance
else:
return self.__instance
# Example
class Spam(metaclass=Singleton):
def __init__(self):
print('Creating Spam')
a = Spam() #generates 'Creating Spam'
b=Spam() #no print message produced
b is a
# Metaclass to cache objects by string provided in ctor
import weakref
class Cached(type):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__cache = weakref.WeakValueDictionary()
def __call__(self, *args):
if args in self.__cache:
return self.__cache[args]
else:
obj = super().__call__(*args)
self.__cache[args] = obj
return obj
# Example
class Spam(metaclass=Cached):
def __init__(self, name):
print('Creating Spam({!r})'.format(name))
self.name = name
a =Spam('a')
b=Spam('b')
b2 = Spam('b')
a is b
b is b2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Advanced Decorators
Step2: 3. Metaclasses
|
3,406
|
<ASSISTANT_TASK:>
Python Code:
!rm -rf /tmp/ImageNetTrainTransfer
#Import
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import random
from PIL import Image
#Inception preprocessing code from https://github.com/tensorflow/models/blob/master/slim/preprocessing/inception_preprocessing.py
#useful to maintain training dimension
from utils import inception_preprocessing
import sys
#from inception import inception
'''
Uso di slim e nets_factory (come per SLIM Tensorflow https://github.com/tensorflow/models/blob/master/slim/train_image_classifier.py)
per il ripristino della rete.
Le reti devono essere censite in nets_factory (v. struttura file nella directory di questo notebook)
'''
slim = tf.contrib.slim
from nets import nets_factory
#Global Variables
IMAGE_NET_ROOT_PATH = '/home/carnd/transfer-learning-utils/tiny-imagenet-200/'
#IMAGE_NET_ROOT_PATH = '/data/lgrazioli/'
IMAGE_NET_LABELS_PATH = IMAGE_NET_ROOT_PATH + 'words.txt'
IMAGE_NET_TRAIN_PATH = IMAGE_NET_ROOT_PATH + 'train/'
TRAINING_CHECKPOINT_DIR = '/tmp/ImageNetTrainTransfer'
#Transfer learning CHECKPOINT PATH
#File ckpt della rete
CHECKPOINT_PATH = '/home/carnd/transfer-learning-utils/inception_v4.ckpt'
#Reading label file as Panda dataframe
labels_df = pd.read_csv(IMAGE_NET_LABELS_PATH, sep='\\t', header=None, names=['id','labels'])
labels_df.head(5)
labels_df.count()
#new_labels = []
labels_lengths = []
for idx, row in labels_df.iterrows():
#Convertire a stringa perchè alcuni sono float
current_labels = tuple(str(row['labels']).split(','))
#new_labels.append(current_labels)
labels_lengths.append(len(current_labels))
labels_df['labels_length'] = labels_lengths
labels_indices = [idx for idx, _ in labels_df.iterrows()]
labels_df['indices'] = labels_indices
labels_df.head(20)
train_paths = []
for idx, label_dir in enumerate(os.listdir(IMAGE_NET_TRAIN_PATH)):
image_dir_path = IMAGE_NET_TRAIN_PATH + label_dir + '/images/'
print("Processing label {0}".format(label_dir))
for image in os.listdir(image_dir_path):
#Estrazione class_id
class_id = image.split('.')[0].split('_')[0]
#Lookup su labels df
target_label = labels_df[labels_df['id'] == class_id] #=> pass to tf.nn.one_hot
#Estrazione del label
target_label = target_label['labels'].values[0]
train_paths.append((image_dir_path + image,
class_id,
image.split('.')[0].split('_')[1],
target_label
))
if idx == 10:
break
train_df = pd.DataFrame(train_paths, columns=['im_path','class', 'im_class_id', 'target_label'])
print(train_df.count())
train_df.head()
#Remove black and white images
uncorrect_images = 0
#Salvataggio indici di immagini da eliminare
to_remove_indexes = []
for idx, record in train_df.iterrows():
#Leggo immagine come np.array
im_array = np.array(Image.open(record['im_path']))
#Se non ha 3 canali la aggiungo a quelle da eliminare
if im_array.shape[-1] != 3:
uncorrect_images += 1
to_remove_indexes.append(idx)
if idx % 20 == 0:
sys.stdout.write("\rProcessed {0} images".format(idx))
sys.stdout.flush()
#Rimozione righe identificate
train_df = train_df.drop(train_df.index[to_remove_indexes])
print("New size: {0}".format(len(train_df)))
print("Removed {0} images".format(uncorrect_images))
#Eventuale campionamento da passare al generatore input
example_file_list = list(train_df.im_path)
print(len(example_file_list))
labels_dict = {}
unique_labels = set(labels_df['labels'])
for idx, target in enumerate(unique_labels):
labels_dict[target] = idx
num_classes = len(labels_dict)
num_classes
example_label_list = []
for idx, value in train_df.iterrows():
example_label_list.append(labels_dict[value['target_label']])
len(example_label_list)
num_classes = len(set(example_label_list))
num_classes
reducted_label_dict = {}
for idx,value in enumerate(set(example_label_list)):
reducted_label_dict[value] = idx
for idx,label in enumerate(example_label_list):
example_label_list[idx] = reducted_label_dict[label]
'''
get_network_fn for returning the corresponding network function.
Se num_classes è da cambiare, impostare is_training a True
Ritorna la funzione definita nel corrispetivo file della rete
'''
model_name = 'inception_v4'
inception_net_fn = nets_factory.get_network_fn(model_name,
num_classes=1001,
is_training = False
)
'''
with tf.device('/gpu:0'):
sampl_input = tf.placeholder(tf.float32, [None, 300,300, 3], name='incpetion_input_placeholder')
#Invocazione della model fn per la definizione delle variabili della rete
#Usa questi tensori che sono quelli per i quali passa il modello
#Necessario per ripristinare il grafo
print(inception_net_fn(sampl_input))
'''
EPOCHS = 50
BATCH_SIZE = 32
#Serve per capire quando il generatore è passato a batch appartenenti a una nuova epoca
BATCH_PER_EPOCH = np.ceil(len(example_file_list) / BATCH_SIZE)
def parse_single_image(filename_queue):
#Dequeue a file name from the file name queue
#filename, y = filename_queue.dequeue()
#Non bisogna invocare il dequeue il parametro della funziona è già lo scodamento
filename, y = filename_queue[0], filename_queue[1]
#A y manca solo il one-hot
y = tf.one_hot(y, num_classes)
#Read image
raw = tf.read_file(filename)
#convert in jpg (in GPU!)
jpeg_image = tf.image.decode_jpeg(raw)
#Preprocessing with inception preprocessing
jpeg_image = inception_preprocessing.preprocess_image(jpeg_image, 300, 300, is_training=True)
return jpeg_image, y
#jpeg_image = parse_single_image(filename_queue)
def get_batch(filenames, labels, batch_size, num_epochs=None):
#Coda lettura file, slice_input_producer accetta una lista di liste (stessa dimensione)
#Risultato dello scodamento è l'elemento corrente di ciascuna delle liste
#Le liste sono rispettivamente la lista di file e la lista dei label
filename_queue = tf.train.slice_input_producer([filenames, labels])
#Lettura singolo record
jpeg_image,y = parse_single_image(filename_queue)
# min_after_dequeue defines how big a buffer we will randomly sample
# from -- bigger means better shuffling but slower start up and more
# memory used.
# capacity must be larger than min_after_dequeue and the amount larger
# determines the maximum we will prefetch. Recommendation:
# min_after_dequeue + (num_threads + a small safety margin) * batch_size
min_after_dequeue = 10
capacity = min_after_dequeue + 3 * batch_size
#tensors è la lista dei tensori delle single feature e immagini. Esegue batch_size volte i tensori example e label per ottenere il batch
#num_threads incrementa effettivamente l'utilizzo della CPU (confermato dal throughput visisible sul cloudera manager,
#resta comunque un throughput lento ....
example_batch = tf.train.shuffle_batch(
tensors=[jpeg_image, y], batch_size=batch_size, capacity=capacity,
min_after_dequeue=min_after_dequeue, allow_smaller_final_batch=True, num_threads=2)
return example_batch
#TF Graph, per ora recupera solamente un batch
with tf.device('/cpu:0'):
with tf.name_scope('preprocessing') as scope:
x,y = get_batch(example_file_list, example_label_list, batch_size=BATCH_SIZE)
#x = tf.contrib.layers.flatten(x)
with tf.device('/gpu:0'):
#inception prelogits
inception_net_fn(x)
#prelogits = tf.placeholder(tf.float32, [None, 1536], name='prelogits_placeholder')
prelogits = tf.get_default_graph().get_tensor_by_name("InceptionV4/Logits/PreLogitsFlatten/Reshape:0")
with tf.device('/gpu:0'):
with tf.variable_scope('trainable'):
'''with tf.variable_scope('hidden') as scope:
hidden = tf.layers.dense(
prelogits,
units=128,
activation=tf.nn.relu
)'''
#Kenerl init None = glooroot initializers (sttdev = 1/sqrt(n))
with tf.variable_scope('readout') as scope:
output = tf.layers.dense(
prelogits,
units=num_classes,
activation=None
)
with tf.variable_scope('train_op') as scope:
# Define loss and optimizer
targetvars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "trainable")
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost, var_list=targetvars)
# Accuracy
correct_pred = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar('accuracy', accuracy)
tf.summary.scalar('loss', cost)
init = tf. global_variables_initializer()
merged_summeries = tf.summary.merge_all()
#GPU config
config = tf.ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True
#Saver per restoring inception net
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
sess.run(init)
writer = tf.summary.FileWriter(TRAINING_CHECKPOINT_DIR,
sess.graph)
#Start populating the filename queue.
coord = tf.train.Coordinator()
#Senza questa chiamata non partono i thread per popolare la coda che permette di eseguire la read
threads = tf.train.start_queue_runners(coord=coord)
#Current epoch and step servono a capire quando cambiare epoca e quando fermarsi
current_epoch = 0
current_step = 0
while current_epoch < EPOCHS:
x_batch, y_batch = sess.run([x,y])
#Forward pass nella incpetion net
#inception_pre_logits = sess.run(tf.get_default_graph().get_tensor_by_name("InceptionV4/Logits/PreLogitsFlatten/Reshape:0"),
#feed_dict={sampl_input: x_batch})
sess.run(optimizer, feed_dict={x: x_batch, y: y_batch})
#print(x_batch.shape)
if current_step % 10 == 0:
#print("Batch shape {}".format(x_batch.shape))
print("Current step: {0}".format(current_step))
train_loss, train_accuracy, train_summ = sess.run([cost,accuracy,merged_summeries],
feed_dict={x: x_batch, y: y_batch})
print("Loss: {0} accuracy {1}".format(train_loss, train_accuracy))
writer.add_summary(train_summ, current_epoch * current_step + 1)
#Cambiare epoca, raggiunto il massimo per l'epoca corrente
if current_step == (BATCH_PER_EPOCH - 1):
current_epoch += 1
current_step = 0
print("EPOCH {0}".format(current_epoch))
#Epoche terminate -> chiudere
if current_epoch >= EPOCHS:
break
if current_step == 0 and current_epoch == 0:
writer.add_graph(sess.graph)
#train_summary = sess.run([merged_summeries], feed_dict={x: x_batch, y: y_batch})
#writer.add_summary(train_summary, current_step)
current_step += 1
#for i in range(10):
#converted_im = sess.run(jpeg_image)
#print(converted_im.shape)
#Chiusura del coordinator (chiudi i thread di lettura)
coord.request_stop()
coord.join(threads)
sess.close()
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "trainable")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lettura file words di ImageNet
Step2: Aggiunta colonna di lunghezza del label (quante classi contiene ogni label).
Step3: Train DF
Step4: Pulizia delle immagini che non sono nel formato desiderato da inception_preprocessing (3 canali).
Step5: Definizione dizionario dei labels
Step6: Costruzione lista dei label (stesso ordine della lista di file)
Step7: Transfer Learning
Step8: Input pipeline
|
3,407
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
pylab.rcParams['figure.figsize'] = (6, 6)
import datajoint as dj
from pipeline.preprocess import *
(dj.ERD.from_sequence([Prepare, Sync, ExtractRaw, Spikes])-1).draw()
dj.ERD(Prepare).add_parts().draw()
dj.ERD(ExtractRaw).add_parts().draw()
Prepare.GalvoAverageFrame().heading
frames = Prepare.GalvoAverageFrame() & dict(animal_id=8623, slice=3, scan_idx=6)
frames
# fetch data
g, r = frames.fetch.order_by('channel')['frame']
# plot data
def normalize(img):
m = img.min()
return (img - m)/(img.max() - m)
plt.imshow(np.stack(
[normalize(r), normalize(g), np.zeros_like(r)], 2))
grid(False)
experiment.Fluorophore()
# select all slices imaged with GCaMP6f
slices = Prepare()*Slice() & (
experiment.Session.Fluorophore() & 'fluorophore="GCaMP6f"') & Prepare.GalvoAverageFrame()
slices
keys = list(slices.fetch.keys())
key = keys[8] # pick an arbitrary frame
key
frames = Prepare.GalvoAverageFrame() & key
g, r = frames.fetch.order_by('channel')['frame']
plt.imshow(np.stack(
[normalize(r), normalize(g), np.zeros_like(r)], 2))
grid(False)
calcium = ComputeTraces.Trace()
spikes = Spikes.RateTrace()
traces = calcium*spikes # combined raw and spike rate traces
traces.heading
Method.Galvo() # trace extraction methods for galvo imaging
SpikeMethod()
traces &= dict(extract_method=2, spike_method=3)
traces
key = np.random.choice(list(traces.fetch.keys())) # pick a random trace
key
# fetch synchronized times
times, nslices = (Sync()*Prepare.Galvo() & key).fetch1['frame_times', 'nslices']
times = times[0,::nslices] - times[0,0] # start at 0
calcium, spikes = (traces & key).fetch1['trace', 'rate_trace']
import seaborn as sns
with sns.axes_style('whitegrid'):
fig, ax = plt.subplots(2,1, sharex=True)
ax[0].plot(times, calcium.squeeze(), label='Ca Trace')
ax[1].plot(times, spikes.squeeze(), label='Spike Rate')
ax[0].set_ylabel('Fluorescence')
ax[1].set_ylabel('inferred spike rate')
ax[1].set_xlabel('time [s]')
sns.despine(fig)
fig.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The preprocess schema extracts, aligns, and synchronizes multiphoton trace data from both galvo and AOD systems.
Step2: Here are the main elements of the preprocess schema
Step3: Prepare and ExtractRaw combine the information from the galvo-based two-photon setups and the AOD-based setup. They perform all the operations that are specific to each type of imaging so that subsequent processing is no longer distinguished.
Step4: Similarly, preprocess.ExtractRaw has several part tables.
Step5: Example
Step6: If you know the exact identity of the scan of interest, use it as restriction
Step7: Alternatively, get a whole set of scans matching some condition.
Step8: To address each entity in the set, get its primary key values with the fetch.keys() iterator. Then each key can be used in a restriction.
Step9: Example
Step10: Select the preprocessing method
Step11: Let's plot a random trace
|
3,408
|
<ASSISTANT_TASK:>
Python Code:
raw_dataset = pd.read_csv(source_path + "Speed_Dating_Data.csv")
raw_dataset.head(3)
raw_dataset_copy = raw_dataset
#merged_datasets = raw_dataset.merge(raw_dataset_copy, left_on="pid", right_on="iid")
#merged_datasets[["iid_x","gender_x","pid_y","gender_y"]].head(5)
#same_gender = merged_datasets[merged_datasets["gender_x"] == merged_datasets["gender_y"]]
#same_gender.head()
columns_by_types = raw_dataset.columns.to_series().groupby(raw_dataset.dtypes).groups
raw_dataset.dtypes.value_counts()
raw_dataset.isnull().sum().head(3)
summary = raw_dataset.describe() #.transpose()
print summary
#raw_dataset.groupby("gender").agg({"iid": pd.Series.nunique})
raw_dataset.groupby('gender').iid.nunique()
raw_dataset.groupby('career').iid.nunique().sort_values(ascending=False).head(5)
raw_dataset.groupby(["gender","match"]).iid.nunique()
local_path = "/Users/sandrapietrowska/Documents/Trainings/luigi/data_source/"
local_filename = "Speed_Dating_Data.csv"
my_variables_selection = ["iid", "pid", "match","gender","date","go_out","sports","tvsports","exercise","dining",
"museums","art","hiking","gaming","clubbing","reading","tv","theater","movies",
"concerts","music","shopping","yoga"]
class RawSetProcessing(object):
This class aims to load and clean the dataset.
def __init__(self,source_path,filename,features):
self.source_path = source_path
self.filename = filename
self.features = features
# Load data
def load_data(self):
raw_dataset_df = pd.read_csv(self.source_path + self.filename)
return raw_dataset_df
# Select variables to process and include in the model
def subset_features(self, df):
sel_vars_df = df[self.features]
return sel_vars_df
@staticmethod
# Remove ids with missing values
def remove_ids_with_missing_values(df):
sel_vars_filled_df = df.dropna()
return sel_vars_filled_df
@staticmethod
def drop_duplicated_values(df):
df = df.drop_duplicates()
return df
# Combine processing stages
def combiner_pipeline(self):
raw_dataset = self.load_data()
subset_df = self.subset_features(raw_dataset)
subset_no_dup_df = self.drop_duplicated_values(subset_df)
subset_filled_df = self.remove_ids_with_missing_values(subset_no_dup_df)
return subset_filled_df
raw_set = RawSetProcessing(local_path, local_filename, my_variables_selection)
dataset_df = raw_set.combiner_pipeline()
dataset_df.head(3)
# Number of unique participants
dataset_df.iid.nunique()
dataset_df.shape
def get_partner_features(df):
#print df[df["iid"] == 1]
df_partner = df.copy()
df_partner = df_partner.drop(['pid','match'], 1).drop_duplicates()
#print df_partner.shape
merged_datasets = df.merge(df_partner, how = "inner",left_on="pid", right_on="iid",suffixes=('_me','_partner'))
#print merged_datasets[merged_datasets["iid_me"] == 1]
return merged_datasets
feat_eng_df = get_partner_features(dataset_df)
feat_eng_df.head(3)
import sklearn
print sklearn.__version__
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
import subprocess
#features = list(["gender","age_o","race_o","goal","samerace","imprace","imprelig","date","go_out","career_c"])
features = list(["gender","date","go_out","sports","tvsports","exercise","dining","museums","art",
"hiking","gaming","clubbing","reading","tv","theater","movies","concerts","music",
"shopping","yoga"])
suffix_me = "_me"
suffix_partner = "_partner"
#add suffix to each element of list
def process_features_names(features, suffix_1, suffix_2):
features_me = [feat + suffix_1 for feat in features]
features_partner = [feat + suffix_2 for feat in features]
features_all = features_me + features_partner
return features_all
features_model = process_features_names(features, suffix_me, suffix_partner)
explanatory = feat_eng_df[features_model]
explained = feat_eng_df[label]
clf = tree.DecisionTreeClassifier(min_samples_split=20,min_samples_leaf=10,max_depth=4)
clf = clf.fit(explanatory, explained)
# Download http://www.graphviz.org/
with open("data.dot", 'w') as f:
f = tree.export_graphviz(clf, out_file=f, feature_names= features_model, class_names="match")
import subprocess
subprocess.call(['dot', '-Tpdf', 'data.dot', '-o' 'data.pdf'])
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(explanatory, explained, test_size=0.3, random_state=0)
parameters = [
{'criterion': ['gini','entropy'], 'max_depth': [4,6,10,12,14],
'min_samples_split': [10,20,30], 'min_samples_leaf': [10,15,20]
}
]
scores = ['precision', 'recall']
dtc = tree.DecisionTreeClassifier()
clf = GridSearchCV(dtc, parameters,n_jobs=3, cv=5, refit=True)
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print("")
clf = GridSearchCV(dtc, parameters, cv=5,
scoring='%s_macro' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print("")
print(clf.best_params_)
print("")
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print("")
best_param_dtc = tree.DecisionTreeClassifier(criterion="entropy",min_samples_split=10,min_samples_leaf=10,max_depth=14)
best_param_dtc = best_param_dtc.fit(explanatory, explained)
best_param_dtc.feature_importances_
raw_dataset.rename(columns={"age_o":"age_of_partner","race_o":"race_of_partner"},inplace=True)
raw_data = {
'subject_id': ['14', '15', '16', '17', '18'],
'first_name': ['Sue', 'Maria', 'Sandra', 'Kate', 'Aurelie'],
'last_name': ['Bonder', 'Black', 'Balwner', 'Brice', 'Btisan'],
'pid': ['4', '5', '6', '7', '8'],}
df_a = pd.DataFrame(raw_data, columns = ['subject_id', 'first_name', 'last_name','pid'])
df_a
raw_data = {
'subject_id': ['4', '5', '6', '7', '8'],
'first_name': ['Billy', 'Brian', 'Bran', 'Bryce', 'Betty'],
'last_name': ['Bonder', 'Black', 'Balwner', 'Brice', 'Btisan'],
'pid': ['14', '15', '16', '17', '18'],}
df_b = pd.DataFrame(raw_data, columns = ['subject_id', 'first_name', 'last_name','pid'])
df_b
df_a.merge(df_b, left_on='pid', right_on='subject_id', how='outer', suffixes=('_me','_partner'))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data exploration
Step3: Data processing
Step4: Feature engineering
Step5: Modelling
Step6: Variables selection
Step7: Decision Tree
Step8: Tuning Parameters
Step9: Check
|
3,409
|
<ASSISTANT_TASK:>
Python Code:
%load_ext sql
%sql mysql://studentuser:studentpw@mysqlserver/dognitiondb
%sql USE dognitiondb
%config SqlMagic.displaylimit=25
%%sql
SELECT ct.created_at, DAYOFWEEK(ct.created_at)
FROM complete_tests ct
LIMIT 49, 200
%%sql
SELECT ct.created_at, DAYOFWEEK(ct.created_at),
CASE DAYOFWEEK(ct.created_at)
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday
FROM complete_tests ct
LIMIT 49, 200
%%sql
SELECT DAYOFWEEK(ct.created_at),
CASE DAYOFWEEK(ct.created_at)
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
GROUP BY weekday
ORDER BY num_tests DESC
%%sql
SELECT DAYOFWEEK(ct.created_at),
CASE DAYOFWEEK(ct.created_at)
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN dogs d ON ct.dog_guid=d.dog_guid
WHERE
(d.exclude=0 OR d.exclude IS NULL)
GROUP BY weekday
ORDER BY num_tests DESC
%%sql
SELECT COUNT(d.dog_guid)
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
%%sql
SELECT COUNT(DISTINCT d.dog_guid)
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
%%sql
SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
%%sql
SELECT DAYOFWEEK(ct.created_at),
CASE DAYOFWEEK(ct.created_at)
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY weekday
ORDER BY num_tests DESC
%%sql
SELECT DAYOFWEEK(c.created_at) AS dayasnum, YEAR(c.created_at) AS year,
COUNT(c.created_at) AS numtests,
(CASE
WHEN DAYOFWEEK(c.created_at)=1 THEN "Su"
WHEN DAYOFWEEK(c.created_at)=2 THEN "Mo"
WHEN DAYOFWEEK(c.created_at)=3 THEN "Tu"
WHEN DAYOFWEEK(c.created_at)=4 THEN "We"
WHEN DAYOFWEEK(c.created_at)=5 THEN "Th"
WHEN DAYOFWEEK(c.created_at)=6 THEN "Fr"
WHEN DAYOFWEEK(c.created_at)=7 THEN "Sa"
END) AS daylabel
FROM complete_tests c JOIN
(SELECT DISTINCT dog_guid
FROM dogs d JOIN users u
ON d.user_guid=u.user_guid
WHERE ((u.exclude IS NULL OR u.exclude=0)
AND (d.exclude IS NULL OR d.exclude=0))
) AS dogs_cleaned
ON c.dog_guid=dogs_cleaned.dog_guid
GROUP BY daylabel
ORDER BY numtests DESC;
%%sql
SELECT DAYOFWEEK(ct.created_at),
CASE DAYOFWEEK(ct.created_at)
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
YEAR(ct.created_at) AS year,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY weekday, year
ORDER BY year ASC, num_tests DESC
%%sql
SELECT DAYOFWEEK(ct.created_at),
CASE DAYOFWEEK(ct.created_at)
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
YEAR(ct.created_at) AS year,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
AND u.country='US' AND u.state NOT IN ('HI', 'AK')
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY weekday, year
ORDER BY year ASC, num_tests DESC
%%sql
SELECT DAYOFWEEK(DATE_SUB(ct.created_at,INTERVAL 6 HOUR)) AS weeknum,
CASE DAYOFWEEK(DATE_SUB(ct.created_at,INTERVAL 6 HOUR))
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
YEAR(ct.created_at) AS year,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
AND u.country='US' AND u.state NOT IN ('HI', 'AK')
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY weekday, year
ORDER BY year ASC, num_tests DESC
%%sql
SELECT DAYOFWEEK(DATE_SUB(ct.created_at,INTERVAL 6 HOUR)) AS weeknum,
CASE DAYOFWEEK(DATE_SUB(ct.created_at,INTERVAL 6 HOUR))
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
YEAR(ct.created_at) AS year,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
AND u.country='US' AND u.state NOT IN ('HI', 'AK')
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY weekday, year
ORDER BY year ASC, num_tests DESC
%%sql
SELECT DAYOFWEEK(DATE_SUB(ct.created_at,INTERVAL 6 HOUR)) AS weeknum,
CASE DAYOFWEEK(DATE_SUB(ct.created_at,INTERVAL 6 HOUR))
WHEN 1 THEN 'Sun'
WHEN 2 THEN 'Mon'
WHEN 3 THEN 'Tue'
WHEN 4 THEN 'Wed'
WHEN 5 THEN 'Thu'
WHEN 6 THEN 'Fri'
WHEN 7 THEN 'Sat'
END AS weekday,
YEAR(ct.created_at) AS year,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
AND u.country='US' AND u.state NOT IN ('HI', 'AK')
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY weekday, year
ORDER BY year ASC, FIELD(weekday, 'Mon','Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun') ASC
%%sql
SELECT dd.country,
COUNT(ct.created_at) AS num_tests
FROM complete_tests ct
JOIN
(SELECT
DISTINCT d.dog_guid, u.country
FROM users u JOIN dogs d ON u.user_guid=d.user_guid
WHERE (d.exclude=0 OR d.exclude IS NULL) AND (u.exclude=0 OR u.exclude IS NULL)
) AS dd
ON ct.dog_guid=dd.dog_guid
GROUP BY dd.country
ORDER BY num_tests DESC
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <img src="https
Step2: Of course, the results of the query in Question 1 would be much easier to interpret if the output included the name of the day of the week (or a relevant abbreviation) associated with each time stamp rather than a number index.
Step3: Now that we are confident we have the correct syntax for extracting weekday labels from the created_at time stamps, we can start building our larger query that examines the number of tests completed on each weekday.
Step4: So far these results suggest that users complete the most tests on Sunday night and the fewest tests on Friday night. We need to determine if this trend remains after flagged dog_guids and user_guids are excluded. Let's start by removing the dog_guids that have an exclude flag. We'll exclude user_guids with an exclude flag in later queries.
Step5: Now we need to exclude the user_guids that have a value of "1" in the exclude column as well. One way to do this would be to join the completed_tests, dogs, and users table with a sequence of inner joins. However, we've seen in previous lessons that there are duplicate rows in the users table. These duplicates will get passed through the join and will affect the count calculations. To illustrate this, compare the following two queries.
Step6: Question 6
Step7: The strategy we will use to handle duplicate rows in the users table will be to, first, write a subquery that retrieves the distinct dog_guids from an inner join between the dogs and users table with the appropriate records excluded. Then, second, we will join the result of this subquery to the complete_tests table and group the results according to the day of the week.
Step8: Question 8
Step9: These results still suggest that Sunday is the day when the most tests are completed and Friday is the day when the fewest tests are completed. However, our first query suggested that more tests were completed on Tuesday than Saturday; our current query suggests that slightly more tests are completed on Saturday than Tuesday, now that flagged dog_guids and user_guids are excluded.
Step10: These results suggest that although the precise order of the weekdays with the most to fewest completed tests changes slightly from year to year, Sundays always have a lot of completed tests, and Fridays always have the fewest or close to the fewest completed tests. So far, it seems like it might be a good idea for Dognition to target reminder or encouragement messages to customers on Sundays. However, there is one more issue our analysis does not address. All of the time stamps in the created_at column are in Coordinated Universal Time (abbreviated UTC). This is a time convention that is constant around the globe. Nonetheless, as the picture below illustrates, countries and states have different time zones. The same UTC time can correspond with local times in different countries that are as much as 24 hours apart
Step11: The next step is to adjust the created_at times for differences in time zone. Most United States states (excluding Hawaii and Alaska) have a time zone of UTC time -5 hours (in the eastern-most regions) to -8 hours (in the western-most regions). To get a general idea for how much our weekday analysis is likely to change based on time zone, we will subtract 6 hours from every time stamp in the complete_tests table. Although this means our time stamps can be inaccurate by 1 or 2 hours, people are not likely to be playing Dognition games at midnight, so 1-2 hours should not affect the weekdays extracted from each time stamp too much.
Step12: Question 12
Step13: You can try re-running the query with time-zone corrections of 5, 7, or 8 hours, and the results remain essentially the same. All of these analyses suggest that customers are most likely to complete tests around Sunday and Monday, and least likely to complete tests around the end of the work week, on Thursday and Friday. This is certainly valuable information for Dognition to take advantage of.
Step14: Unfortunately other database platforms do not have the ORDER BY FIELD functionality. To achieve the same result in other platforms, you would have to use a CASE statement or a more advanced solution
|
3,410
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
class Halley:
def __init__(self, a):
self.f = lambda x: a[0] + a[1] * x + a[2] * tf.pow(x, 2) + a[3] * tf.pow(x, 3) + a[4] * tf.pow(x, 4)
self.df = lambda x: tf.gradients(self.f(x), x)[0] # TensorFlow does automatic differentiation!
self.ddf = lambda x: tf.gradients(self.df(x), x)[0]
def compute_one_iteration(self, x):
return x - ((2 * self.f(x) * self.df(x)) / (2 * tf.pow(self.df(x), 2) - self.f(x) * self.ddf(x)))
# answer is supposed to be 7.411
with tf.Session() as sess:
a = [-1.0,1.0,12.0,-4.0,1.0]
x0 = tf.constant(12.0)
halley = Halley(a)
answer = halley.compute_one_iteration(x0)
result = sess.run(answer)
print(result)
import tensorflow as tf
import numpy as np
class Halley:
def __init__(self, a):
self.f = lambda x: a[0] + a[1] * x + a[2] * tf.pow(x, 2) + a[3] * tf.pow(x, 3) + a[4] * tf.pow(x, 4)
self.df = lambda x: tf.gradients(self.f(x), x)[0]
self.ddf = lambda x: tf.gradients(self.df(x), x)[0]
def compute_one_iteration(self, x):
return x - ((2 * self.f(x) * self.df(x)) / (2 * tf.pow(self.df(x), 2) - self.f(x) * self.ddf(x)))
# answer is supposed to be [7.4111586, 4.459961, 2.2138097]
with tf.Session() as sess:
a = [-1.0,1.0,12.0,-4.0,1.0]
x0 = tf.constant(12.0)
halley = Halley(a)
x1 = halley.compute_one_iteration(x0)
x2 = halley.compute_one_iteration(x1)
x3 = halley.compute_one_iteration(x2)
result = sess.run([x1, x2, x3])
print(result)
import tensorflow as tf
import numpy as np
import tensorflow as tf
import numpy as np
class Halley:
def __init__(self, a):
self.f = lambda x: a[0] + a[1] * x + a[2] * tf.pow(x, 2) + a[3] * tf.pow(x, 3) + a[4] * tf.pow(x, 4)
self.df = lambda x: tf.gradients(self.f(x), x)[0]
self.ddf = lambda x: tf.gradients(self.df(x), x)[0]
def compute_one_iteration(self, x):
return x - ((2 * self.f(x) * self.df(x)) / (2 * tf.pow(self.df(x), 2) - self.f(x) * self.ddf(x)))
def prev_and_curr(self, iterno, prev, x):
return iterno+1, x, self.compute_one_iteration(x)
def compute(self, x0, maxiter, epsilon):
return tf.while_loop(lambda i, prev, x: tf.logical_and(tf.abs(prev-x) > epsilon, i < maxiter),
self.prev_and_curr, (0, x0-2*epsilon, x0))
# init parameters
# answer is supposed to be -0.31365424 or 0.259158
with tf.Session() as sess:
a = [-1.0,1.0,12.0,-4.0,1.0]
x0 = tf.constant(12.0)
halley = Halley(a)
xn = halley.compute(x0, 100, 0.01)
result = sess.run(xn)
print(result[2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Iterate 3 times
Step2: Iterate until condition
|
3,411
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import pyther as pt
def main_eos():
print("-" * 79)
components = ["METHANE"]
MODEL = "PR"
specification = "constants"
component_eos = pt.parameters_eos_constans(components, MODEL, specification)
#print(component_eos)
#print('-' * 79)
methane = component_eos[component_eos.index==components]
#print(methane)
methane_elv = methane[["Tc", "Pc", "k", "d1"]]
#print(methane_elv)
Tc = np.array(methane["Tc"])
Pc = np.array(methane["Pc"])
Vc = np.array(methane["Vc"])
omega = np.array(methane["Omega"])
k = np.array(methane["k"])
d1 = np.array(methane["d1"])
punto_critico = np.array([Pc, Vc])
print("Tc main = ", Tc)
print("Pc main = ", Pc)
print("punto critico = ", punto_critico)
data_elv = pt.function_elv(components, Vc, Tc, Pc, omega, k, d1)
#print(data_elv)
return data_elv, Vc, Pc
volumen = envolvente[0][0]
presion = envolvente[0][1]
Vc, Pc = envolvente[1], envolvente[2]
plt.plot(volumen,presion)
plt.scatter(Vc, Pc)
plt.xlabel('Volumen [=] $mol/cm^3$')
plt.ylabel('Presión [=] bar')
plt.grid(True)
plt.text(Vc * 1.4, Pc * 1.01, "Punto critico")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Luego de hacer la importación de las librerías que se van a utilizar, en la función main_eos() definida por un usuario se realiza la especificación de la sustancia pura junto con el modelo de ecuación de estado y parámetros que se requieren en la función "pt.function_elv(components, Vc, Tc, Pc, omega, k, d1)" que realiza los cálculos del algoritmo que se describió previamente.
Step2: 9.4 Resultados
|
3,412
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
a = np.array([1, 2, 3])
print(a)
print('')
b = np.array([(1, 2, 3), (4, 5, 6)])
print(b)
a + b
a * b
b - a
a**2
10*np.sin(a) # seno trigonométrico
b<35
print('Axis 1: %s' % b[0])
print(np.average(b))
print(b.sum())
print(b.min())
print(b.max())
b.sum(axis=0) # sum of each column
b.min(axis=1) # min of each row
b.cumsum(axis=1) # cumulative sum along each row
a = np.zeros((3, 5))
print(a)
np.ones((2,3,4), dtype=np.int16) # dtype can also be specified
np.empty([2,3]) # uninitialized, output may vary
c = np.full(5, 2, dtype=np.float)
print(c)
d = np.full((2, 2), 10)
print(d)
np.arange(10, 30, 5)
np.arange(0, 2, 0.3) # it accepts float arguments
c = np.random.rand(2, 3)
print(c)
np.transpose(c)
d = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(d)
d[:, 0]
d[:, 1]
d[:, 0:2]
d[:, 2]
x = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) # start:stop:step
np.save('/tmp/x.npy', x)
del(x)
x = np.load('/tmp/x.npy')
print(x)
# Usar memory mapping quando o conteúdo não couber em memória RAM
x_mm = np.memmap('x_mm.npy', dtype='float32', mode='w+', shape=(1000, 100))
x_mm[999][99] = 12.5
print(x_mm[999][99])
# Delete the memmap instance to close the file (the mmap file is not deleted).
# Deletion flushes memory changes to disk before removing the object.
del(x_mm)
# Load
x_mm = np.memmap('x_mm.npy', dtype='float32', mode='r', shape=(1000, 100))
print(x_mm[999][99])
#1
t = np.zeros((10, 10))
print('%d bytes' % (t.size * t.itemsize))
#2
f = np.arange(9).reshape(3,3)
print(f)
#3
z = np.random.random(30)
print(z)
print(z.mean())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercícios
|
3,413
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
file_name_string = 'C:/Users/Charles Kelly/Desktop/Exercise Files/02_07/Final/EmployeesWithGrades.xlsx'
employees_df = pd.read_excel(file_name_string, 'Sheet1', index_col=None, na_values=['NA'])
employees_df
employees_df["Grade"] = employees_df["Grade"].astype("category")
employees_df["Grade"].cat.categories = ["excellent", "good", "acceptable", "poor", "unacceptable"]
employees_df
employees_df.groupby('Grade').count()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Change data type
Step2: Rename the categories
Step3: Values in data frame have not changed
Step4: tabulate Department, Name, and YearsOfService, by Grade
|
3,414
|
<ASSISTANT_TASK:>
Python Code:
from crpropa import *
## settings for MHD model (must be set according to model)
filename_bfield = "clues_primordial.dat" ## filename of the magnetic field
gridOrigin = Vector3d(0,0,0) ## origin of the 3D data, preferably at boxOrigin
gridSize = 1024 ## size of uniform grid in data points
h = 0.677 ## dimensionless Hubble parameter
size = 249.827/h *Mpc ## physical edgelength of volume in Mpc
b_factor = 1. ## global renormalization factor for the field
## settings of simulation
boxOrigin = Vector3d( 0, 0, 0,) ## origin of the full box of the simulation
boxSize = Vector3d( size, size, size ) ## end of the full box of the simulation
## settings for computation
minStep = 10.*kpc ## minimum length of single step of calculation
maxStep = 4.*Mpc ## maximum length of single step of calculation
tolerance = 1e-2 ## tolerance for error in iterative calculation of propagation step
spacing = size/(gridSize) ## resolution, physical size of single cell
m = ModuleList()
## instead of computing propagation without Lorentz deflection via
# m.add(SimplePropagation(minStep,maxStep))
## initiate grid to hold field values
vgrid = Grid3f( gridOrigin, gridSize, spacing )
## load values to the grid
loadGrid( vgrid, filename_bfield, b_factor )
## use grid as magnetic field
bField = MagneticFieldGrid( vgrid )
## add propagation module to the simulation to activate deflection in supplied field
m.add(PropagationCK( bField, tolerance, minStep, maxStep))
#m.add(DeflectionCK( bField, tolerance, minStep, maxStep)) ## this was used in older versions of CRPropa
m.add( PeriodicBox( boxOrigin, boxSize ) )
m.add( MaximumTrajectoryLength( 400*Mpc ) )
source = Source()
source.add( SourceUniformBox( boxOrigin, boxSize ))
filename_density = "mass-density_clues.dat" ## filename of the density field
source = Source()
## initialize grid to hold field values
mgrid = ScalarGrid( gridOrigin, gridSize, spacing )
## load values to grid
loadGrid( mgrid, filename_density )
## add source module to simulation
source.add( SourceDensityGrid( mgrid ) )
import numpy as np
filename_halos = 'clues_halos.dat'
# read data from file
data = np.loadtxt(filename_halos, unpack=True, skiprows=39)
sX = data[0]
sY = data[1]
sZ = data[2]
mass_halo = data[5]
## find only those mass halos inside the provided volume (see Hackstein et al. 2018 for more details)
Xdown= sX >= 0.25
Xup= sX <= 0.75
Ydown= sY >= 0.25
Yup= sY <= 0.75
Zdown= sZ >= 0.25
Zup= sZ <= 0.75
insider= Xdown*Xup*Ydown*Yup*Zdown*Zup
## transform relative positions to physical positions within given grid
sX = (sX[insider]-0.25)*2*size
sY = (sY[insider]-0.25)*2*size
sZ = (sZ[insider]-0.25)*2*size
## collect all sources in the multiple sources container
smp = SourceMultiplePositions()
for i in range(0,len(sX)):
pos = Vector3d( sX[i], sY[i], sZ[i] )
smp.add( pos, 1. )
## add collected sources
source = Source()
source.add( smp )
## use isotropic emission from all sources
source.add( SourceIsotropicEmission() )
## set particle type to be injected
A, Z = 1, 1 # proton
source.add( SourceParticleType( nucleusId(A,Z) ) )
## set injected energy spectrum
Emin, Emax = 1*EeV, 1000*EeV
specIndex = -1
source.add( SourcePowerLawSpectrum( Emin, Emax, specIndex ) )
filename_output = 'data/output_MW.txt'
obsPosition = Vector3d(0.5*size,0.5*size,0.5*size) # position of observer, MW is in center of constrained simulations
obsSize = 800*kpc ## physical size of observer sphere
## initialize observer that registers particles that enter into sphere of given size around its position
obs = Observer()
obs.add( ObserverSmallSphere( obsPosition, obsSize ) )
## write registered particles to output file
obs.onDetection( TextOutput( filename_output ) )
## choose to not further follow particles paths once detected
obs.setDeactivateOnDetection(True)
## add observer to module list
m.add(obs)
N = 1000
m.showModules() ## optional, see summary of loaded modules
m.setShowProgress(True) ## optional, see progress during runtime
m.run(source, N, True) ## perform simulation with N particles injected from source
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: to make use of periodicity of the provided data grid, use
Step2: to not follow particles forever, use
Step3: Uniform injection
Step4: Injection following density field
Step5: Mass Halo injection
Step6: additional source properties
Step7: Observer
Step8: finally run the simulation by
|
3,415
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np # 数値計算を行うライブラリ
import scipy as sp # 科学計算ライブラリ
from scipy import stats # 統計計算ライブラリ
significance = 0.05
o = [17, 10, 6, 7, 15, 5] # 実測値
e = [10, 10, 10, 10, 10, 10] # 理論値
chi2, p = stats.chisquare(o, f_exp = e)
print('chi2 値は %(chi2)s' %locals())
print('確率は %(p)s' %locals())
if p < significance:
print('有意水準 %(significance)s で、有意な差があります' %locals())
else:
print('有意水準 %(significance)s で、有意な差がありません' %locals())
# 練習2.1
# 対応のないt検定
significance = 0.05
X = [68, 75, 80, 71, 73, 79, 69, 65]
Y = [86, 83, 76, 81, 75, 82, 87, 75]
t, p = stats.ttest_ind(X, Y)
print('t 値は %(t)s' %locals())
print('確率は %(p)s' %locals())
if p < significance:
print('有意水準 %(significance)s で、有意な差があります' %locals())
else:
print('有意水準 %(significance)s で、有意な差がありません' %locals())
class_one = [70, 75, 70, 85, 90, 70, 80, 75]
class_two = [85, 80, 95, 70, 80, 75, 80, 90]
# 練習2.2
# 対応のあるt検定
significance = 0.05
X = [68, 75, 80, 71, 73, 79, 69, 65]
Y = [86, 83, 76, 81, 75, 82, 87, 75]
t, p = stats.ttest_rel(X, Y)
print('t 値は %(t)s' %locals())
print('確率は %(p)s' %locals())
if p < significance:
print('有意水準 %(significance)s で、有意な差があります' %locals())
else:
print('有意水準 %(significance)s で、有意な差がありません' %locals())
kokugo = [90, 75, 75, 75, 80, 65, 75, 80]
sansuu = [95, 80, 80, 80, 75, 75, 80, 85]
# 練習2.3
# 1要因の分散分析
significance = 0.05
a = [34, 39, 50, 72, 54, 50, 58, 64, 55, 62]
b = [63, 75, 50, 54, 66, 31, 39, 45, 48, 60]
c = [49, 36, 46, 56, 52, 46, 52, 68, 49, 62]
f, p = stats.f_oneway(a, b, c)
print('f 値は %(f)s' %locals())
print('確率は %(p)s' %locals())
if p < significance:
print('有意水準 %(significance)s で、有意な差があります' %locals())
else:
print('有意水準 %(significance)s で、有意な差がありません' %locals())
group1 = [80, 75, 80, 90, 95, 80, 80, 85, 85, 80, 90, 80, 75, 90, 85, 85, 90, 90, 85, 80]
group2 = [75, 70, 80, 85, 90, 75, 85, 80, 80, 75, 80, 75, 70, 85, 80, 75, 80, 80, 90, 80]
group3 = [80, 80, 80, 90, 95, 85, 95, 90, 85, 90, 95, 85, 98, 95, 85, 85, 90, 90, 85, 85]
# 練習2.4
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h2 STYLE="background
Step2: <h4 style="padding
Step3: <h2 STYLE="background
Step4: <h4 style="padding
Step5: <h4 style="border-bottom
Step6: <h4 style="padding
Step7: <h2 STYLE="background
Step8: <h4 style="padding
|
3,416
|
<ASSISTANT_TASK:>
Python Code:
# ! pip install -u gremlinpython graphistry
# ! pip install -u pandas
# see https://rapids.ai/ if trying GPU dataframes
! pip show gremlinpython graphistry | grep 'Name\|Version'
import graphistry
graphistry.__version__
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
NEPTUNE_READER_PROTOCOL='wss'
NEPTUNE_READER_HOST='neptunedbcluster-abc.cluster-ro-xyz.us-east-1.neptune.amazonaws.com'
NEPTUNE_READER_PORT='8182'
endpoint = f'{NEPTUNE_READER_PROTOCOL}://{NEPTUNE_READER_HOST}:{NEPTUNE_READER_PORT}/gremlin'
endpoint
#import logging
#logging.basicConfig(level=logging.DEBUG)
graphistry.register(**GRAPHISTRY_CFG)
g = graphistry.neptune(endpoint=endpoint)
g._gremlin_client
%%time
g2 = g.gremlin('g.E().limit(10000)')
print('NODES:')
g2._nodes.info()
g2._nodes.sample(3)
print('EDGES:')
print(g2._edges.info())
g2._edges.sample(3)
%%time
# Enrich nodes dataframe with any available server property data
g3 = g2.fetch_nodes()
print(g3._nodes.info())
g3._nodes.sample(3)
%%time
g3.plot()
%%time
g4 = (g3
# Add node column 'type' based on gremlin-provided column 'label'
# The legend auto-detects this column and appears
.nodes(lambda g: g._nodes.assign(type=g._nodes['label']))
.encode_point_color('type', categorical_mapping={
'website': 'blue',
'transientId': 'green'
})
.encode_point_icon('type', categorical_mapping ={
'website': 'link',
'transientId': 'barcode'
})
.addStyle(bg={'color': '#eee'}, page={'title': 'My Graph'})
# More: https://hub.graphistry.com/docs/api/1/rest/url/
.settings(url_params={'play': 2000})
)
g4.plot()
%%time
url = g4.plot(render=False)
url
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Imports
Step2: Configure
Step3: Connect
Step4: Query & plot
Step5: Customize your visuals & Embed
Step6: Generate URL for other systems
|
3,417
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
a = tf.constant([1,2,3])
b = tf.constant([4,5,6,7])
def g(a,b):
tile_a = tf.tile(tf.expand_dims(a, 1), [1, tf.shape(b)[0]])
tile_a = tf.expand_dims(tile_a, 2)
tile_b = tf.tile(tf.expand_dims(b, 0), [tf.shape(a)[0], 1])
tile_b = tf.expand_dims(tile_b, 2)
cart = tf.concat([tile_a, tile_b], axis=2)
return cart
result = g(a.__copy__(),b.__copy__())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,418
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
# %matplotlib inline
%matplotlib tk
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
os.getcwd()
os.chdir('C:\\Users\\manolis\\Desktop\\PycharmProjects\\IRAS\\IRAS\\ref IR spectra')
os.listdir()
nist = pd.read_csv( 'guaiacol IR_solution.jdx', sep=' ', skiprows=36, skipfooter=1, header=None, index_col=0, engine='python' )
nist.head()
dx = 0.806952
wave = np.concatenate(np.array(list(map(lambda x: np.array(x)+dx*np.array([0,1,2,3,4]), nist.index.values))))
wave = wave.reshape((len(wave), 1))
nist = np.ndarray.flatten(nist.values)
nist = nist.reshape((len(nist),1))
# rm nans
nist = nist[~np.isnan(nist)]
wave= wave[:len(nist)]
plt.plot(wave,nist)
nist_absorb = 2 - np.log10(nist*100)
# rm nan vals
# nist_absorb = nist_absorb[~np.isnan(nist_absorb)]
# nist_absorb = nist_absorb.reshape((len(nist_absorb),1))
# wave = wave[:len(nist_absorb)]
# rm nan values
np.sum(~np.isnan(nist_absorb))
nist_absorb = nist_absorb[~np.isnan(nist_absorb)]
# replace inf values with an absorbance of 1
nist_absorb[np.where(nist_absorb == np.inf)] = 1
nist_absorb = nist_absorb.reshape((len(nist_absorb),1))
wave = wave[:len(nist_absorb)]
# plt.figure(figsize=(12, 12))
plt.plot(wave[::-1], nist_absorb[::-1], label='nist')
plt.show()
# plt.legend()
def normalize_wrt(X, x_max_val):
Leaving this as a function in case i need it in the future... Normalizes with respect to the value you want. Not
necessarily the max value in the column
:param X:
:param x_max_val:
:return:
# TODO may need to fix normalization
x_norm = (X - X.min(axis=0)) / float((x_max_val.values - X.min(axis=0).values))
return x_norm
np.where(nist_absorb==np.inf)
np.max(nist_absorb)
os.chdir('C:\\Users\\manolis\\Desktop\\PycharmProjects\\IRAS\\IRAS\\GUA work\\\Pt(100)\\IR chamber\\IR data\\GUA IR')
os.listdir()
fig, ax = plt.subplots()
colnames = ['Wavenumber', 'Intensity']
file = 'GUA_1E-08_140 sec_big dose.0.dpt'
IR_spectra = pd.read_csv(file, '\t', header=None, names=colnames, dtype=float)
IR_spectra.set_index(colnames[0], inplace=True)
ax.plot(IR_spectra, label='big dose')
plt.legend()
IR_spectra.idxmax()
big = normalize_wrt(X=IR_spectra,x_max_val=np.max(IR_spectra))
fig2, ax2 = plt.subplots()
ax2.plot(big, label='normalized')
os.listdir()
file1 = 'GUA_8e-09_90 sec _seq anneal_anneal to 100 K.0.dpt'
# fig3, ax3 = plt.subplots()
colnames = ['Wavenumber', 'Intensity']
IR_spectra100 = pd.read_csv(file1, '\t', header=None, names=colnames, dtype=float)
IR_spectra100.set_index(colnames[0], inplace=True)
# ax3.plot(IR_spectra, label='100 K')
K100 = normalize_wrt(X=IR_spectra100,x_max_val=np.max(IR_spectra100))
fig3, ax3 = plt.subplots()
ax3.plot(K100, label='100K')
plt.legend()
fig4, ax4 = plt.subplots()
# plot the ratio of the 100 K spectrum and the big spectrum
ax4.plot(K100/big)
# print((K100/big))
file3 = 'GUA_8e-09_90 sec _seq anneal_anneal to 191 K.0.dpt'
# fig3, ax3 = plt.subplots()
colnames = ['Wavenumber', 'Intensity']
IR_spectra191 = pd.read_csv(file3, '\t', header=None, names=colnames, dtype=float)
IR_spectra191.set_index(colnames[0], inplace=True)
# ax3.plot(IR_spectra, label='100 K')
K191 = normalize_wrt(X=IR_spectra191,x_max_val=np.max(IR_spectra191))
fig5, ax5 = plt.subplots()
ax5.plot(K191, label='191K')
plt.legend()
fig6, ax6 = plt.subplots()
# plot the ratio of the 191 K spectrum and the big spectrum
ax6.plot(K191/big)
file4 = 'GUA_8e-09_90 sec _seq anneal_anneal to 249 K.0.dpt'
colnames = ['Wavenumber', 'Intensity']
IR_spectra249 = pd.read_csv(file4, '\t', header=None, names=colnames, dtype=float)
IR_spectra249.set_index(colnames[0], inplace=True)
K249 = normalize_wrt(X=IR_spectra249,x_max_val=np.max(IR_spectra249))
fig7, ax7 = plt.subplots()
ax7.plot(K249, label='249K')
plt.legend()
bCCO_191 = K191.loc[744.4]
K191.iloc[1680:1695]
# nCC_191 = K191.loc[1508.1]
# K191.loc[1508.1]
# df.loc[df['favcount'].argmax(), 'sn']
# K191.max(axis=1).idxmax()
# K191.max(axis=0)
K191.loc[lambda x: ]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We determined that the NIST data for Guaiacol (2-methoxyphenol) had 18 values that were zero in the transmittance data and therefore infinity when we calculated the absorbance data.
Step2: At this point, we are going to consider our data for a very large exposure of GUA on Pt(100). The exposure was 1e-8 Torr for 140s. This is a lot greater than the doses used in our IR experiments and is a good approximation of liquid GUA.
Step3: 100 K IR spectra of GUA adsorbed on Pt(100)
Step4: Let's try this again with the 191 K spectrum
|
3,419
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import mne
data_path = mne.datasets.sample.data_path()
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evokeds = mne.read_evokeds(fname, baseline=(None, 0), proj=True)
print(evokeds)
evoked = mne.read_evokeds(fname, condition='Left Auditory', baseline=(None, 0),
proj=True)
print(evoked)
print(evoked.info)
print(evoked.times)
print(evoked.nave) # Number of averaged epochs.
print(evoked.first) # First time sample.
print(evoked.last) # Last time sample.
print(evoked.comment) # Comment on dataset. Usually the condition.
print(evoked.kind) # Type of data, either average or standard_error.
data = evoked.data
print(data.shape)
print('Data from channel {0}:'.format(evoked.ch_names[10]))
print(data[10])
evoked = mne.EvokedArray(data, evoked.info, tmin=evoked.times[0])
evoked.plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The
Step2: Notice that the reader function returned a list of evoked instances. This is
Step3: If you're gone through the tutorials of raw and epochs datasets, you're
Step4: The evoked data structure also contains some new attributes easily
Step5: The data is also easily accessible. Since the evoked data arrays are usually
Step6: The data is arranged in an array of shape (n_channels, n_times). Notice
Step7: If you want to import evoked data from some other system and you have it in a
|
3,420
|
<ASSISTANT_TASK:>
Python Code:
from random import choice
from time import sleep
from rv.api import Pattern, Project, m, NOTE, NOTECMD
from sunvox.api import init, Slot
init(None, 44100, 2, 0)
slot = Slot()
project = Project()
inst = project.output << project.new_module(
m.AnalogGenerator,
sustain=False,
release=100,
)
pattern = Pattern(lines=4)
project.attach_pattern(pattern)
def randomize_notes(pdata, module):
for row in pdata:
for note in row:
note.module = choice([0, module.index + 1])
note.note = NOTECMD(choice(list(NOTE))) if note.module else NOTECMD.EMPTY
randomize_notes(pattern.data, inst)
pattern.data
slot.load(project)
slot.play_from_beginning()
def update_pdata(pdata, pattern):
width = len(pattern.data[0])
for y, line in enumerate(pattern.data):
for x, note in enumerate(line):
pnote = pdata[y * width + x]
pnote.note = note.note
pnote.vel = note.vel
pnote.module = note.module
pnote.ctl = note.ctl
pnote.val = note.val
pdata = slot.get_pattern_data(0)
slot.play_from_beginning()
for _ in range(25):
randomize_notes(pattern.data, inst)
update_pdata(pdata, pattern)
sleep(.5)
slot.stop()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize SunVox
Step2: Create a project with an Analog Generator
Step3: Create a tiny 4×4 pattern
Step4: Randomize the notes in the pattern
Step5: Load & play the project
Step6: Update pattern data in place during playback
|
3,421
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
from IPython.html.widgets import interact
def char_probs(s):
Find the probabilities of the unique characters in the string s.
Parameters
----------
s : str
A string of characters.
Returns
-------
probs : dict
A dictionary whose keys are the unique characters in s and whose values
are the probabilities of those characters.
d={}
f=[]
for i in range(len(s)):
d[s[i]]=s.count(s[i])
for j in d:
f.append(d[j]/len(s))
for m in range(len(s)):
for k in range(0,len(d)):
d[s[m]]=f[k]
return d
test1 = char_probs('aaaa')
assert np.allclose(test1['a'], 1.0)
test2 = char_probs('aabb')
assert np.allclose(test2['a'], 0.5)
assert np.allclose(test2['b'], 0.5)
test3 = char_probs('abcd')
assert np.allclose(test3['a'], 0.25)
assert np.allclose(test3['b'], 0.25)
assert np.allclose(test3['c'], 0.25)
assert np.allclose(test3['d'], 0.25)
lst=char_probs('abcd')
a=np.array(lst['a'])
a
def entropy(d):
Compute the entropy of a dict d whose values are probabilities.
assert np.allclose(entropy({'a': 0.5, 'b': 0.5}), 1.0)
assert np.allclose(entropy({'a': 1.0}), 0.0)
# YOUR CODE HERE
raise NotImplementedError()
assert True # use this for grading the pi digits histogram
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Character counting and entropy
Step4: The entropy is a quantiative measure of the disorder of a probability distribution. It is used extensively in Physics, Statistics, Machine Learning, Computer Science and Information Science. Given a set of probabilities $P_i$, the entropy is defined as
Step5: Use IPython's interact function to create a user interface that allows you to type a string into a text box and see the entropy of the character probabilities of the string.
|
3,422
|
<ASSISTANT_TASK:>
Python Code:
from quantopian.pipeline import Pipeline
from quantopian.research import run_pipeline
from quantopian.pipeline.data.builtin import USEquityPricing
from quantopian.pipeline.factors import SimpleMovingAverage
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30)
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
def make_pipeline():
mean_close_10 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=10)
mean_close_30 = SimpleMovingAverage(inputs=[USEquityPricing.close], window_length=30)
percent_difference = (mean_close_10 - mean_close_30) / mean_close_30
return Pipeline(
columns={
'percent_difference': percent_difference
}
)
result = run_pipeline(make_pipeline(), '2015-05-05', '2015-05-05')
result
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For this example, we need two factors
Step2: Then, let's create a percent difference factor by combining our mean_close_30 factor with our mean_close_10 factor.
Step3: In this example, percent_difference is still a Factor even though it's composed as a combination of more primitive factors. We can add percent_difference as a column in our pipeline. Let's define make_pipeline to create a pipeline with percent_difference as a column (and not the mean close factors)
Step4: Let's see what the new output looks like
|
3,423
|
<ASSISTANT_TASK:>
Python Code:
%%file example.py
from mpi4py.MPI import COMM_WORLD as communicator
import random
# Draw one random integer between 0 and 100
i = random.randint(0, 100)
print('Rank %d' %communicator.rank + ' drew a random integer: %d' %i )
# Gather the results
integer_list = communicator.gather( i, root=0 )
if communicator.rank == 0:
print('\nRank 0 gathered the results:')
print(integer_list)
! mpirun -np 3 python example.py
%%file parallel_script.py
from classification import nearest_neighbor_prediction
import numpy as np
from mpi4py.MPI import COMM_WORLD as communicator
# Load data
train_images = np.load('./data/train_images.npy')
train_labels = np.load('./data/train_labels.npy')
test_images = np.load('./data/test_images.npy')
# Use only the data that this rank needs
N_test = len(test_images)
if communicator.rank == 0:
i_start = 0
i_end = N_test/2
elif communicator.rank == 1:
i_start = N_test/2
i_end = N_test
small_test_images = test_images[i_start:i_end]
# Predict the results
small_test_labels = nearest_neighbor_prediction(small_test_images, train_images, train_labels)
# Assignement: gather the labels on one process and have it write it to a file
# Hint: you can use np.hstack to merge a list of arrays into a single array,
# and np.save to save an array to a file.
%%time
! mpirun -np 2 python parallel_script.py
# Load and split the set of test images
test_images = np.load('data/test_images.npy')
split_arrays_list = np.array_split( test_images, 4 )
# Print the corresponding shape
print( 'Shape of the original array:' )
print( test_images.shape )
print('Shape of the splitted arrays:')
for array in split_arrays_list:
print( array.shape )
%%file parallel_script.py
from classification import nearest_neighbor_prediction
import numpy as np
from mpi4py.MPI import COMM_WORLD as communicator
# Load data
train_images = np.load('./data/train_images.npy')
train_labels = np.load('./data/train_labels.npy')
test_images = np.load('./data/test_images.npy')
# Assignement: use the function np.array_split the data `test_images` among the processes
# Have each process select their own small array.
small_test_images = #.....
# Predict the results and gather it on rank 0
small_test_labels = nearest_neighbor_prediction(small_test_images, train_images, train_labels)
# Assignement: gather the labels on one process and have it write it to a file
# Hint: you can use np.hstack to merge a list of arrays into a single array,
# and np.save to save an array to a file.
%%time
! mpirun -np 4 python parallel_script.py
# Load the data from the file
test_images = np.load('data/test_images.npy')
test_labels_parallel = np.load('data/test_labels_parallel.npy')
# Define function to have a look at the data
def show_random_digit( images, labels=None ):
"Show a random image out of `images`,
with the corresponding label if available
i = np.random.randint(len(images))
image = images[i].reshape((28, 28))
plt.imshow( image, cmap='Greys' )
if labels is not None:
plt.title('Label: %d' %labels[i])
show_random_digit( test_images, test_labels_parallel )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: What happened?
Step2: The code executes faster than the serial example, because each process has a smaller amount of work, and the two processes execute this work in parallel.
Step3: Assignement
Step5: Check the results
|
3,424
|
<ASSISTANT_TASK:>
Python Code:
import pandas
df = pandas.read_csv('C:\\Users\\user\\Desktop\\TRABALHO-4.csv', sep=';')
df
tempo_medio = 1.1558 / 30
print("O tempo médio entre as chegadas à fila é", tempo_medio*60, "minutos.")
taxa_de_chegada = 1 / tempo_medio
print(taxa_de_chegada)
pandas.DataFrame.mean(df['SAIDA-ENTRADA (min)'])
taxa_de_atendimento = 60 / 2.0478
print(taxa_de_atendimento)
import math
from IPython.display import display, Math, Latex
def teoriafilas(lam, u, s=1):
#calculo do \rho:
rho = lam/(u*s)
#calculo do Pzero:
pzero = 0
for n in range (s):
pzero += (((lam/u)**n)/math.factorial(n))+(((lam/u)**s)/(math.factorial(s)*(1-(lam/(s*u)))))
pzero = pzero**(-1)
#calculo do P:
p = (((lam/u)**s)/(math.factorial(s)*(1-(lam/(s*u)))))*pzero
#calculo do Lq:
Lq = ((((lam/u)**s)*lam*u*s)/(math.factorial(s)*((u*s-lam)**2)))*pzero
#calculo do L:
L= Lq+(lam/u)
#calculo do tempo medio:
Wq = Lq/lam
#calculo do tempo provavel:
W = L/lam
return (rho, pzero, p, Lq, L, Wq*60, W*60)
teoriafilas(25.956047759127877, 29.299736302373276, 3)
teoriafilas(25.956047759127877*2, 29.299736302373276, 3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Respostas
Step2: d) A taxa de chegada de usuários no sistema
Step3: $$\Lambda = 25.956\ clientes/hora$$
Step4: f) A taxa de atendimento dos usuários (μ)
Step5: $$\mu = 29.300\ clientes/hora$$
Step6: Para os valores de S, lambda e μ obtidos
Step7: Com a taxa de chamada duplicada
|
3,425
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Jeff Hanna <jeff.hanna@gmail.com>
#
# License: BSD (3-clause)
import mne
from mne import io
from mne.datasets import refmeg_noise
from mne.preprocessing import ICA
import numpy as np
print(__doc__)
data_path = refmeg_noise.data_path()
raw_fname = data_path + '/sample_reference_MEG_noise-raw.fif'
raw = io.read_raw_fif(raw_fname).crop(300, 600).load_data()
select_picks = np.concatenate(
(mne.pick_types(raw.info, meg=True)[-32:],
mne.pick_types(raw.info, meg=False, ref_meg=True)))
plot_kwargs = dict(
duration=100, order=select_picks, n_channels=len(select_picks),
scalings={"mag": 8e-13, "ref_meg": 2e-11})
raw.plot(**plot_kwargs)
raw.plot_psd(fmax=30)
raw_tog = raw.copy()
ica_kwargs = dict(
method='picard',
fit_params=dict(tol=1e-4), # use a high tol here for speed
)
all_picks = mne.pick_types(raw_tog.info, meg=True, ref_meg=True)
ica_tog = ICA(n_components=60, allow_ref_meg=True, **ica_kwargs)
ica_tog.fit(raw_tog, picks=all_picks)
# low threshold (2.0) here because of cropped data, entire recording can use
# a higher threshold (2.5)
bad_comps, scores = ica_tog.find_bads_ref(raw_tog, threshold=2.0)
# Plot scores with bad components marked.
ica_tog.plot_scores(scores, bad_comps)
# Examine the properties of removed components. It's clear from the time
# courses and topographies that these components represent external,
# intermittent noise.
ica_tog.plot_properties(raw_tog, picks=bad_comps)
# Remove the components.
raw_tog = ica_tog.apply(raw_tog, exclude=bad_comps)
raw_tog.plot_psd(fmax=30)
raw_sep = raw.copy()
# Do ICA only on the reference channels.
ref_picks = mne.pick_types(raw_sep.info, meg=False, ref_meg=True)
ica_ref = ICA(n_components=2, allow_ref_meg=True, **ica_kwargs)
ica_ref.fit(raw_sep, picks=ref_picks)
# Do ICA on both reference and standard channels. Here, we can just reuse
# ica_tog from the section above.
ica_sep = ica_tog.copy()
# Extract the time courses of these components and add them as channels
# to the raw data. Think of them the same way as EOG/EKG channels, but instead
# of giving info about eye movements/cardiac activity, they give info about
# external magnetic noise.
ref_comps = ica_ref.get_sources(raw_sep)
for c in ref_comps.ch_names: # they need to have REF_ prefix to be recognised
ref_comps.rename_channels({c: "REF_" + c})
raw_sep.add_channels([ref_comps])
# Now that we have our noise channels, we run the separate algorithm.
bad_comps, scores = ica_sep.find_bads_ref(raw_sep, method="separate")
# Plot scores with bad components marked.
ica_sep.plot_scores(scores, bad_comps)
# Examine the properties of removed components.
ica_sep.plot_properties(raw_sep, picks=bad_comps)
# Remove the components.
raw_sep = ica_sep.apply(raw_sep, exclude=bad_comps)
raw_sep.plot(**plot_kwargs)
raw_sep.plot_psd(fmax=30)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read raw data, cropping to 5 minutes to save memory
Step2: Note that even though standard noise removal has already
Step3: The PSD of these data show the noise as clear peaks.
Step4: Run the "together" algorithm.
Step5: Cleaned data
Step6: Now try the "separate" algorithm.
Step7: Cleaned raw data traces
Step8: Cleaned raw data PSD
|
3,426
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from selenium import webdriver
import os,time,json
import pandas as pd
from collections import defaultdict,Counter
import matplotlib.pyplot as plt
url = "http://www.imdb.com/list/ls061683439/"
with open('./filmfare.json',encoding="utf-8") as f:
datatbl = json.load(f)
driver = webdriver.Chrome(datatbl['data']['chromedriver'])
driver.get(url)
def ExtractText(Xpath):
textlist=[]
if(Xpath=="Movies_Runtime_Xpath"):
[textlist.append(item.text[-10:-7]) for item in driver.find_elements_by_xpath(datatbl['data'][Xpath])]
else:
[textlist.append(item.text) for item in driver.find_elements_by_xpath(datatbl['data'][Xpath])]
return textlist
#Extracting Data from Web
Movies_Votes,Movies_Name,Movies_Ratings,Movies_RunTime=[[] for i in range(4)]
datarepo = [[]]*4
Xpath_list = ['Movies_Name_Xpath','Movies_Rate_Xpath','Movies_Runtime_Xpath','Movies_Votes_Xpath']
for i in range(4):
if(i==3):
driver.find_element_by_xpath(datatbl['data']['listview']).click()
datarepo[i] = ExtractText(Xpath_list[i])
driver.quit()
# Result in a Python Dictionary
Years=range(2015,1954,-1)
result = defaultdict(dict)
for i in range(0,len(datarepo[0])):
result[i]['Movie Name']= datarepo[0][i]
result[i]['Year']= Years[i]
result[i]['Rating']= datarepo[1][i]
result[i]['Votes']= datarepo[3][i]
result[i]['RunTime']= datarepo[2][i]
# Dictionary Result
print(json.dumps(result[59], indent=2))
for key,values in result.items():
values['Votes'] = int(values['Votes'].replace(",",""))
values['Rating']= float(values['Rating'])
try:
values['RunTime'] = int(values['RunTime'])
except ValueError:
values['RunTime'] = 0
print(json.dumps(result[0], indent=2))
# Dictionary Result
print(json.dumps(result[0], indent=2))
# create dataframe
df = pd.DataFrame.from_dict(result,orient='index')
df.sort_values(by='Year',ascending=True,inplace=True)
df = df[['Year', 'Movie Name', 'Rating', 'Votes','RunTime']]
df
#Highest Rating Movies
df.sort_values('Rating',ascending=[False]).head(5)
#Movies with maximum Run Time
df.sort_values(['RunTime'],ascending=[False]).head(10)
df.plot(x=df.Year,y=['RunTime']);
#Rating Greater than 7
df[(df['Rating']>=7)]['Rating'].count()
#Create Rating Graph
Rating_Hist = defaultdict(dict)
Rating_Hist['Btwn 6&7'] = df[(df['Rating']>=6)&(df['Rating']<7)]['Rating'].count()
Rating_Hist['GTEQ 8'] = df[(df['Rating']>=8)]['Rating'].count()
Rating_Hist['Btwn 7 & 8'] = df[(df['Rating']>=7)&(df['Rating']<8)]['Rating'].count()
plt.bar(range(len(Rating_Hist)), Rating_Hist.values(), align='center',color='brown',width=0.4)
plt.xticks(range(len(Rating_Hist)), Rating_Hist.keys(), rotation=25);
#Histogram for average movie run timr
df['RunTime'].mean()
# Movies by Genre
Category=Counter(datatbl['data']['Genre'])
df1 = pd.DataFrame.from_dict(Category,orient='index')
df1 = df1.sort_values([0],ascending=[False]).head(5)
df1.plot(kind='barh',color=['g','c','m']);
# Vote vs rating
%matplotlib inline
import matplotlib.pyplot as plt
df.plot(x=df.Votes,y=['Rating'],kind='scatter');
# plt.scatter(df.Votes, df.Rating, s=df.Rating)
import numpy as np
df = pd.DataFrame(np.random.randint(100000, size=(10000, 2)),
columns=['Votes', 'Rating'])
df.plot(kind='scatter', x='Votes', y='Rating', logx=True, alpha=0.5, color='purple', edgecolor='')
plt.ylabel('IMDB Rating')
plt.xlabel('Number of Votes')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Getting Data
Step2: Store Data in a Python Dictionary
Step3: Data before Clean Up
Step4: Clean Data
Step5: How does data in Dictionary looks like
Step6: Data in Pandas Dataframe
Step7: Movies with Highest Ratings
Step8: Movies with Maximum Run time
Step9: Best Picture Run time
Step10: Best Picture Ratings
Step11: Best Picture by Genre
|
3,427
|
<ASSISTANT_TASK:>
Python Code:
#Compute x = 4 * 3 and print the result
x = 4 * 3
print(x)
#Compute y = 6 * 9 and print the result
y = 6 * 9
print(y)
# Import the Pandas library
import pandas as pd
kaggle_path = "http://s3.amazonaws.com/assets.datacamp.com/course/Kaggle/"
# Load the train and test datasets to create two DataFrames
train_url = kaggle_path + "train.csv"
train = pd.read_csv(train_url)
test_url = kaggle_path + "test.csv"
test = pd.read_csv(test_url)
#Print the `head` of the train and test dataframes
print(train.head())
print(test.head())
train.describe()
test.describe()
train.shape
test.shape
# absoulte numbers
train['Survived'].value_counts()
# percentages
train['Survived'].value_counts(normalize=True)
train['Survived'][train['Sex']=='male'].value_counts()
train['Survived'][train['Sex'] =='female'].value_counts()
# Passengers that survived vs passengers that passed away
print(train['Survived'].value_counts())
# As proportions
print(train['Survived'].value_counts(normalize=True))
# Males that survived vs males that passed away
print(train['Survived'][train['Sex']=='male'].value_counts())
# Females that survived vs Females that passed away
print(train['Survived'][train['Sex']=='female'].value_counts())
# Normalized male survival
print(train['Survived'][train['Sex']=='male'].value_counts(normalize=True))
# Normalized female survival
print(train['Survived'][train['Sex']=='female'].value_counts(normalize=True))
# Create the column Child and assign to 'NaN'
train["Child"] = float('NaN')
# Assign 1 to passengers under 18, 0 to those 18 or older. Print the new column.
# train['Child'][train['Age'] >= 18] = 0
# train['Child'][train['Age'] < 18] = 1
train.loc[train['Age'] >= 18, 'Child'] = 0
train.loc[train['Age'] < 18, 'Child'] = 1
print(train['Child'])
# Print normalized Survival Rates for passengers under 18
print(train["Survived"][train["Child"] == 1].value_counts(normalize = True))
# Print normalized Survival Rates for passengers 18 or older
print(train["Survived"][train["Child"] == 0].value_counts(normalize = True))
# Create a copy of test: test_one
test_one = test
# Initialize a Survived column to 0
test_one['Survived'] = 0
# Set Survived to 1 if Sex equals "female" and print the `Survived` column from `test_one`
# test_one['Survived'][test_one['Sex'] == 'female'] = 1
test_one.loc[test_one['Sex'] == 'female', 'Survived'] = 1
print(test_one['Survived'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Get the Data with Pandas
Step2: 3.Understanding your data
Step3: 4. Rose vs Jack, or Female vs Male
Step4: 5.Does age play a role?
Step5: 6.First Prediction
|
3,428
|
<ASSISTANT_TASK:>
Python Code:
x=1
print x
type(x)
x.conjugate()
type(1+2j)
z=1+2j
print z
(1,2)
t=(1,2,"text")
t
t
def foo():
return (1,2)
x,y=foo()
print x
print y
def swap(x,y):
return (y,x)
x=1;y=2
print "{0:d} {1:d}".format(x,y)
x,y=swap(x,y)
print "{:f} {:f}".format(x,y)
dir(1)
x=[]
x.append("text")
x
x.append(1)
x.pop()
x.append([1,2,3])
x
x.append(2)
x
print x[0]
print x[-2]
x.pop(-2)
x
%%timeit -n10
x=[]
for i in range(100000):
x.append(2*i+1)
%%timeit -n10
x=[]
for i in xrange(100000):
x.append(2*i+1)
range(10)
y=[2*i+1 for i in xrange(10)]
print y
type({})
x={"key":"value","foo":"bar"}
print x
key="key1"
if key in x:
print x[key]
y={ i:i*i for i in xrange(10)}
y
z=[v for (k,v) in y.iteritems()]
print z
p=[]
for i in xrange(2,100):
isprime=1
for j in p:
if(i%j==0):
isprime=0
break
if isprime:
p.append(i)
print p
for i in xrange(10):
pass
i=10
while i>0:
i=i-1
print i
x=['text',"str",''' Hello World\\n ''']
print x
x=['a','b','c']
#access
print x[0]
#update
x[0]='d'
print x
print "size of x%d is"%len(x)
y=['x','y','z']
z=x+y
gamma=y+x
print z
print gamma
print 'a' in x
print y
y.remove('y')# remove by vavlue
print y
print y
y.pop(0)# remove by index
print y
y.insert(0,'x')
y.insert(1,'y')
print y
x=[i*i for i in xrange(10)]
print x
x[:3]
x[-3:]
x[-1:]
x[3:-3]
x[1:6]
x[::2]
print x
x.reverse()
print x
print x
print x[::-1]
print x
x={}
x={'key':'value'}
x['foo']='bar'
x
x['foo']='Hello'
x
x['m']=123
x['foo','key']
keys=['foo','key']
[x[k] for k in keys]
print x
del x
print x
def foo(x):
x=x+1
y=2*x
return y
print foo(3)
x=3
print foo(x)
print x
def bar(x=[]):
x.append(7)
print "in loop: {}".format(x)
x=[1,2,3]
print x
bar(x)
print x
def func(x=0,y=0,z=0):#defualt input argument
return x*100+y*10+z
func(1,2)
func(y=2,z=3,x=1)#keyword input argument
f=func
f(y=2)
distance=[13,500,1370]#meter
def meter2Kilometer(d):
return d/1000.0;
meter2Kilometer(distance)
[meter2Kilometer(d) for d in distance]
d2 = map(meter2Kilometer,distance)
print d2
d3 = map(lambda x: x/1000.0,distance)
print d3
distance=[13,500,1370]#meter
time=[1,10,100]
d3 = map(lambda s,t: s/float(t)*3.6, distance,time )
print d3
d4=filter(lambda s: s<1000, distance)
print d4
total_distance=reduce(lambda i,j : i+j, distance)
total_distance
import numpy as np
x=np.arange(101)
print x
np.histogram(x,bins=[0,50,60,70,80,100])
print np.sort(x)
class Obj:
def __init__(self, _x, _y):
self.x = _x
self.y = _y
def update(self, _x, _y):
self.x += _x
self.y += _y
def __str__(self):
return "x:%d, y:%d"%(self.x,self.y)
a=Obj(5,7)#call __init__
print a#call __str__
a.update(1,2)#call update
print a
import sys
import os
path=os.getcwd()
path=os.path.join(path,'lib')
print path
sys.path.insert(0, path)
from Obj import Obj as ob
b=ob(7,9)
print b
b.update(3,7)
print b
os.getcwd()
from mylib import mymodule as mm
mm=reload(mm)
print mm.Obj2(8,9)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: if/else, for, while, pass, break, continue
Step2: List
Step3: Dictionary
Step4: Function
Step5: Module
|
3,429
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import pandas as pd
pd.options.display.max_rows = 999
pd.set_option('display.width', 1000)
import glob
header = ["evaluation_type", "dataset", "kwargs", "evaluation", "value", "num_skipped"]
similarity_dfs = []
similarity_names = []
similarity_glob = './results/similarity*'
for similarity_file in glob.glob(similarity_glob):
df = pd.read_table(similarity_file, header=None, names=header).set_index(["dataset", "kwargs"]).drop(["evaluation_type"], axis=1)
similarity_dfs.append(df)
similarity_names.append(similarity_file[len(similarity_glob):])
analogy_dfs = []
analogy_names = []
analogy_glob = './results/analogy*'
for analogy_file in glob.glob(analogy_glob):
df = pd.read_table(analogy_file, header=None, names=header).set_index(["dataset", "kwargs", "evaluation"]).drop(["evaluation_type"], axis=1)
analogy_dfs.append(df)
analogy_names.append(analogy_file[len(analogy_glob):])
similarity_df = pd.concat(similarity_dfs, keys=similarity_names, names=['embedding']).reorder_levels(["dataset", "kwargs", "embedding"]).sort_index()
analogy_df = pd.concat(analogy_dfs, keys=analogy_names, names=['embedding']).reorder_levels(["dataset", "evaluation", "kwargs", "embedding"]).sort_index()
for (dataset, kwargs), df in similarity_df.groupby(level=[0,1]):
print('Performance on', dataset, kwargs)
print(df.loc[dataset, kwargs].sort_values(by='value', ascending=False))
print()
print()
for kwargs, df in analogy_df.loc['GoogleAnalogyTestSet', 'threecosmul'].groupby(level=0):
print(kwargs)
print(df.loc[kwargs].sort_values(by='value', ascending=False))
print()
print()
for kwargs, df in analogy_df.loc['GoogleAnalogyTestSet', 'threecosadd'].groupby(level=0):
print(kwargs)
print(df.loc[kwargs].sort_values(by='value', ascending=False))
print()
print()
for kwargs, df in analogy_df.loc['BiggerAnalogyTestSet', 'threecosadd'].groupby(level=0):
print(kwargs)
print(df.loc[kwargs].sort_values(by='value', ascending=False))
print()
print()
for kwargs, df in analogy_df.loc['BiggerAnalogyTestSet', 'threecosmul'].groupby(level=0):
print(kwargs)
print(df.loc[kwargs].sort_values(by='value', ascending=False))
print()
print()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Similarity task
Step2: Analogy task
Step3: We then present the results using the threecosadd analogy function.
Step4: Bigger Analogy Test Set
Step5: We then present the results for the threecosmul analogy function.
|
3,430
|
<ASSISTANT_TASK:>
Python Code:
# import packages
import phylogenetics as phy
import phylogenetics.tools as tools
import phylopandas as ph
import pandas as pd
from phylovega import TreeChart
# intitialize project object and create project folder
project = phy.PhylogeneticsProject(project_dir='tutorial', overwrite=True)
# read in seed sequence(s) to project object
project.read_data("md2_seed_sequence.txt", schema="fasta")
# run BLAST search with default settings, returning 100 hits
project.compute_blast(hitlist_size=100)
project.compute_clusters()
project.compute_alignment()
project.compute_gblocks()
project.compute_tree()
project.compute_reconstruction()
# Visualize tree and ancestors using phylovega
from phylovega import TreeChart
# Construct Vega Specification
chart = TreeChart.from_phylopandas(
project.data,
height_scale=300,
# Node attributes
node_size=300,
node_color="#ccc",
# Leaf attributes
leaf_labels="id",
# Edge attributes
edge_width=2,
edge_color="#000",
)
chart
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Inititialize a phylogenetics project
Step2: 2. Read in your starting sequence(s)
Step3: 3. Use BLAST to search for orthologs similar to your seed sequence(s)
Step4: 4. Build a phylogenetic tree using PhyML
|
3,431
|
<ASSISTANT_TASK:>
Python Code:
# remove display of install details
%%capture --no-display
!pip install ipyparallel
import subprocess
subprocess.Popen(['ipcluster', 'start', '-n', '4'])
# authorize Google to access Google drive files
!pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
gdrive_train_link = 'https://drive.google.com/file/d/1B8fZkx-be5vvCsmCJ_kACUu51LXxbZWT/view?usp=sharing'
id = '1B8fZkx-be5vvCsmCJ_kACUu51LXxbZWT'
downloaded = drive.CreateFile({'id':id})
downloaded.GetContentFile('training_set_for_LSST_DSFP.tar.gz')
subprocess.call(['tar', '-zxf', 'training_set_for_LSST_DSFP.tar.gz']
gdrive_test_link = 'https://drive.google.com/file/d/181zzrkU02FciaeuV714MB5QZ39N32R3k/view?usp=sharing'
id = '181zzrkU02FciaeuV714MB5QZ39N32R3k'
downloaded = drive.CreateFile({'id':id})
downloaded.GetContentFile('test_set_for_LSST_DSFP.tar.gz')
subprocess.call['tar', '-zxf', 'test_set_for_LSST_DSFP.tar.gz']
# first we need to construct a client that will interface with our cluster
from ipyparallel import Client, require
worker = Client()
# once we create a client, we can decide how to allocate tasks across the cluster
# we've got however many 'engines' you started in the cluster
# lets just use all of them
lview = worker[:]
# now if you want to import packages, you can import them across all the 'engines'
with lview.sync_imports():
import numpy as np
import scipy.stats as spstat
import pandas as pd
import os
# there's not much point in creating plots on the engines - we want to actually see them presumably
%matplotlib inline
import matplotlib.pyplot as plt
# If everything has worked so far, you should see a list of worker IDs, and a corresponding list of process IDs.
# You can verify it with a `ps`
ar = lview.apply_async(os.getpid)
print("Engine ID:PID mapping: ", ar.get_dict())
lc = pd.read_csv('training_set_for_LSST_DSFP/FAKE001.dat', delim_whitespace=True, comment = '#')
g_obs = np.where(lc['pb'] == 'g')
fig, ax = plt.subplots(figsize=(7,7))
ax.errorbar(lc['t'].iloc[g_obs].values,
lc['flux'].iloc[g_obs].values,
lc['dflux'].iloc[g_obs].values,
fmt = 'o', color = 'green')
ax.set_xlabel('MJD')
ax.set_ylabel('flux')
fig.tight_layout()
class Source():
'''Astronomical source object for NOAO formatted light curve
Parameters
----------
filename : str
path to space delimited file including each time,
filter, flux, and flux_uncertainty brightness
measurement on each line. A header line must be
included in the file with 't pb flux dflux'
Attributes
----------
_filename : str
the filename parameter
_lc_df : pandas DataFrame
light curve data read into a pandas dataframe
_filters : arr-like
numpy array storing each of the unique passbands
in the light curve file
'''
def __init__(self, filename):
lc_df = pd.read_csv(filename, delim_whitespace=True, comment = '#')
u_obs = np.where(lc_df['pb'] == 'u')
if len(u_obs[0]) > 0:
lc_df.drop(lc_df.index[u_obs], inplace=True)
self._lc_df = lc_df
self._filename = filename
self._filters = np.unique(self._lc_df['pb'])
def plot_multicolor_lc(self):
'''Plot the 4 band light curve'''
fig, ax = plt.subplots(figsize=(7,7))
color_dict = {'g': '#78A5A3',
'r': '#CE5A57',
'i': '#E1B16A',
'z': '#444C5C', }
for filt in self._filters:
filt_obs = np.where(self._lc_df['pb'] == filt)
color = color_dict[filt]
ax.errorbar(self._lc_df['t'].iloc[filt_obs],
self._lc_df['flux'].iloc[filt_obs],
self._lc_df['dflux'].iloc[filt_obs],
fmt = 'o', color = color_dict[filt],
label = "${0}$".format(filt))
ax.legend(fancybox = True)
ax.set_xlabel(r"$\mathrm{MJD}$")
ax.set_ylabel(r"$\mathrm{flux}$")
fig.tight_layout()
lc = Source('training_set_for_LSST_DSFP/FAKE010.dat')
lc.plot_multicolor_lc()
lc60 = Source("training_set_for_LSST_DSFP/FAKE060.dat")
lc60.plot_multicolor_lc()
lc95 = Source("training_set_for_LSST_DSFP/FAKE095.dat")
lc95.plot_multicolor_lc()
class Variable(Source):
'''Class to hold variable light curves and features
Parameters
----------
filename : str
path to space delimited file including each time,
filter, flux, and flux_uncertainty brightness
measurement on each line. A header line must be
included in the file with 't pb flux dflux'
Attributes
----------
_g_time : arr-like
times of the g-band observations
_g_flux : arr-like
flux measurements of the g-band observations
_g_flux_unc : arr-like
uncertainties of the flux measurements of the g-band observations
_r_time : arr-like
times of the r-band observations
_r_flux : arr-like
flux measurements of the r-band observations
_r_flux_unc : arr-like
uncertainties of the flux measurements of the r-band observations
_i_time : arr-like
times of the i-band observations
_i_flux : arr-like
flux measurements of the i-band observations
_i_flux_unc : arr-like
uncertainties of the flux measurements of the i-band observations
_z_time : arr-like
times of the z-band observations
_z_flux : arr-like
flux measurements of the z-band observations
_z_flux_unc : arr-like
uncertainties of the flux measurements of the z-band observations
_g_mean : float
weighted mean of the g-band observations
_r_mean : float
weighted mean of the r-band observations
_i_mean : float
weighted mean of the i-band observations
_z_mean : float
weighted mean of the z-band observations
_g_std : float
normalized standard deviation of g-band flux measurements
_r_std : float
normalized standard deviation of r-band flux measurements
_i_std : float
normalized standard deviation of i-band flux measurements
_z_std : float
normalized standard deviation of z-band flux measurements
_g_amp : float
normalized amplitude of g-band flux measurements
_r_amp : float
normalized amplitude of r-band flux measurements
_i_amp : float
normalized amplitude of i-band flux measurements
_z_amp : float
normalized amplitude of z-band flux measurements
_g_mad : float
normalized median absolute deviation of g-band flux measurements
_r_mad : float
normalized median absolute deviation of r-band flux measurements
_i_mad : float
normalized median absolute deviation of i-band flux measurements
_z_mad : float
normalized median absolute deviation of z-band flux measurements
_g_beyond : float
fraction of g-band flux measurements greater than 1 sample
standard deviation away from the weighted mean
_r_beyond : float
fraction of r-band flux measurements greater than 1 sample
standard deviation away from the weighted mean
_i_beyond : float
fraction of i-band flux measurements greater than 1 sample
standard deviation away from the weighted mean
_z_beyond : float
fraction of z-band flux measurements greater than 1 sample
standard deviation away from the weighted mean
_g_skew : float
skew of g-band flux measurements
_r_skew : float
skew of r-band flux measurements
_i_skew : float
skew of i-band flux measurements
_z_skew : float
skew of z-band flux measurements
_g_minus_r : float
-2.5 multipled by the logarithmic difference of the g-
and r-band observations
_r_minus_i : float
-2.5 multipled by the logarithmic difference of the r-
and i-band observations
_i_minus_z : float
-2.5 multipled by the logarithmic difference of the i-
and z-band observations
'''
def __init__(self, filename):
Source.__init__(self, filename)
def filter_flux(self):
'''Store individual passband fluxes as object attributes'''
for filt in self._filters:
filt_obs = np.where(self._lc_df['pb'] == filt)
setattr(self, '_{0}_time'.format(filt),
self._lc_df['t'].iloc[filt_obs])
setattr(self, '_{0}_flux'.format(filt),
self._lc_df['flux'].iloc[filt_obs])
setattr(self, '_{0}_flux_unc'.format(filt),
self._lc_df['dflux'].iloc[filt_obs])
def weighted_mean_flux(self):
'''Measure (SNR weighted) mean flux in griz'''
if not hasattr(self, '_g_flux'):
self.filter_flux()
weighted_mean = lambda flux, dflux: np.sum(flux*(flux/dflux)**2)/np.sum((flux/dflux)**2)
for filt in self._filters:
filt_flux = getattr(self, '_{0}_flux'.format(filt))
filt_dflux = getattr(self, '_{0}_flux_unc'.format(filt))
setattr(self, '_{0}_mean'.format(filt),
weighted_mean(filt_flux, filt_dflux))
def normalized_flux_std(self):
'''Measure standard deviation of flux in griz'''
if not hasattr(self, '_g_mean'):
self.weighted_mean_flux()
normalized_flux_std = lambda flux, w_mean_flux: np.std(flux/w_mean_flux, ddof = 1)
for filt in self._filters:
filt_flux = getattr(self, '_{0}_flux'.format(filt))
filt_mean = getattr(self, '_{0}_mean'.format(filt))
setattr(self, '_{0}_std'.format(filt),
normalized_flux_std(filt_flux, filt_mean))
def normalized_amplitude(self):
'''Measure the normalized amplitude of variations in griz'''
if not hasattr(self, '_g_mean'):
self.weighted_mean_flux()
normalized_amplitude = lambda flux, w_mean_flux: (np.max(flux) - np.min(flux))/w_mean_flux
for filt in self._filters:
filt_flux = getattr(self, '_{0}_flux'.format(filt))
filt_mean = getattr(self, '_{0}_mean'.format(filt))
setattr(self, '_{0}_amp'.format(filt),
normalized_amplitude(filt_flux, filt_mean))
def normalized_MAD(self):
'''Measure normalized Median Absolute Deviation (MAD) in griz'''
if not hasattr(self, '_g_mean'):
self.weighted_mean_flux()
normalized_mad = lambda flux, w_mean_flux: np.median(np.abs((flux - np.median(flux))/w_mean_flux))
for filt in self._filters:
filt_flux = getattr(self, '_{0}_flux'.format(filt))
filt_mean = getattr(self, '_{0}_mean'.format(filt))
setattr(self, '_{0}_mad'.format(filt),
normalized_mad(filt_flux, filt_mean))
def beyond_1std(self):
'''Measure fraction of flux measurements beyond 1 std'''
if not hasattr(self, '_g_mean'):
self.weighted_mean_flux()
beyond_1std = lambda flux, w_mean_flux: sum(np.abs(flux - w_mean_flux) > np.std(flux, ddof = 1))/len(flux)
for filt in self._filters:
filt_flux = getattr(self, '_{0}_flux'.format(filt))
filt_mean = getattr(self, '_{0}_mean'.format(filt))
setattr(self, '_{0}_beyond'.format(filt),
beyond_1std(filt_flux, filt_mean))
def skew(self):
'''Measure the skew of the flux measurements'''
if not hasattr(self, '_g_flux'):
self.filter_flux()
skew = lambda flux: spstat.skew(flux)
for filt in self._filters:
filt_flux = getattr(self, '_{0}_flux'.format(filt))
setattr(self, '_{0}_skew'.format(filt),
skew(filt_flux))
def mean_colors(self):
'''Measure the mean g-r, g-i, and g-z colors'''
if not hasattr(self, '_g_mean'):
self.weighted_mean_flux()
for filt_pair in [('g','r'), ('r','i'), ('i','z')]:
blue_mean = getattr(self, '_{0}_mean'.format(filt_pair[0]))
red_mean = getattr(self, '_{0}_mean'.format(filt_pair[1]))
if blue_mean > 0 and red_mean > 0:
setattr(self, '_{0}_minus_{1}'.format(*filt_pair),
-2.5*np.log10(blue_mean/red_mean))
else:
setattr(self, '_{0}_minus_{1}'.format(*filt_pair),
-999)
var = Variable('training_set_for_LSST_DSFP/FAKE010.dat')
var.mean_colors()
print("The g'-r', r'-i', and 'i-z' colors are:\
{:.3f}, {:.3f}, and {:.3f}, respectively.". format(var._g_minus_r, var._r_minus_i, var._i_minus_z))
import sqlite3
conn = sqlite3.connect("MiniBroker.db")
cur = conn.cursor()
cur.execute(drop table if exists rawPhot) # drop the table if is already exists
cur.execute(create table rawPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
filename = "training_set_for_LSST_DSFP/FAKE001.dat"
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
cur.execute(insert into rawPhot(objId, t, pb, flux, dflux) values {}.format((objId,) + tuple(var._lc_df.iloc[0])))
cur.execute(insert into rawPhot(objId, t, pb, flux, dflux) values {}.format((objId,) + tuple(var._lc_df.iloc[1])))
cur.execute(insert into rawPhot(objId, t, pb, flux, dflux) values {}.format((objId,) + tuple(var._lc_df.iloc[2])))
cur.execute(drop table if exists rawPhot) # drop the table if it already exists
cur.execute(create table rawPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
# next 3 lines are already in name space; repeated for clarity
filename = "training_set_for_LSST_DSFP/FAKE001.dat"
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
data = [(objId,) + tuple(x) for x in var._lc_df.values] # array of tuples
cur.executemany(insert into rawPhot(objId, t, pb, flux, dflux) values (?,?,?,?,?), data)
# build your list of filenames here
import glob
filenames = glob.glob("training_set_for_LSST_DSFP/FAKE*.dat")
for filename in filenames[1:]:
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
data = [(objId,) + tuple(x) for x in var._lc_df.values] # array of tuples
cur.executemany(insert into rawPhot(objId, t, pb, flux, dflux) values (?,?,?,?,?), data)
cur.execute(select t, flux, dflux
from rawPhot
where objId = 10 and pb = 'g')
data = cur.fetchall()
data = np.array(data)
fig, ax = plt.subplots(figsize=(7,7))
ax.errorbar(data[:,0], data[:,1], data[:,2], fmt = 'o', color = '#78A5A3')
ax.set_xlabel(r"$\mathrm{MJD}$")
ax.set_ylabel(r"$\mathrm{flux}$")
fig.tight_layout()
tmp_conn1 = sqlite3.connect("tmp1.db")
tmp_cur1 = tmp_conn1.cursor()
tmp_cur1.execute(drop table if exists rawPhot) # drop the table if it already exists
tmp_cur1.execute(create table rawPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
%%timeit
# time how long it takes to load every observation individually
for filename in filenames:
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
for obs in var._lc_df.values:
tmp_cur1.execute(insert into rawPhot(objId, t, pb, flux, dflux) values {}.format((objId,) + tuple(obs)))
tmp_conn2 = sqlite3.connect("tmp2.db")
tmp_cur2 = tmp_conn2.cursor()
tmp_cur2.execute(drop table if exists rawPhot) # drop the table if it already exists
tmp_cur2.execute(create table rawPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
%%timeit
# time how long it takes to load all observations of a single source at once
for filename in filenames:
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
data = [(objId,) + tuple(x) for x in var._lc_df.values] # array of tuples
tmp_cur2.executemany(insert into rawPhot(objId, t, pb, flux, dflux) values (?,?,?,?,?), data)
# execute this
@require(Source, Variable, 'pandas as pd', 'numpy as np')
def load_data(filename):
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
data = [(objId,) + tuple(x) for x in var._lc_df.values] # array of tuples
return data
result = lview.map(load_data, filenames)
all_data = result.get()
cur.execute(drop table if exists rawPhot) # drop the table if it already exists
cur.execute(create table rawPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
for data in all_data:
cur.executemany(insert into rawPhot(objId, t, pb, flux, dflux) values (?,?,?,?,?), data)
tmp_conn3 = sqlite3.connect("tmp3.db")
tmp_cur3 = tmp_conn3.cursor()
tmp_cur3.execute(drop table if exists rawPhot) # drop the table if it already exists
tmp_cur3.execute(create table rawPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
%%timeit
# time how long it takes to read files in parallel
result = lview.map(load_data, filenames)
all_data = result.get()
for data in all_data:
tmp_cur3.executemany(insert into rawPhot(objId, t, pb, flux, dflux) values (?,?,?,?,?), data)
cur.execute(select t, flux, dflux
from rawPhot
where objId = 10 and pb = 'g')
data = cur.fetchall()
data = np.array(data)
fig, ax = plt.subplots(figsize=(7,7))
ax.errorbar(data[:,0], data[:,1], data[:,2], fmt = 'o', color = '#78A5A3')
ax.set_xlabel(r"$\mathrm{MJD}$")
ax.set_ylabel(r"$\mathrm{flux}$")
fig.tight_layout()
cur.execute(drop table if exists lcFeats) # drop the table if it already exists
cur.execute(create table lcFeats(
id integer primary key,
objId int,
gStd float,
rStd float,
iStd float,
zStd float,
gAmp float,
rAmp float,
iAmp float,
zAmp float,
gMAD float,
rMAD float,
iMAD float,
zMAD float,
gBeyond float,
rBeyond float,
iBeyond float,
zBeyond float,
gSkew float,
rSkew float,
iSkew float,
zSkew float,
gMinusR float,
rMinusI float,
iMinusZ float,
FOREIGN KEY(objId) REFERENCES rawPhot(objId)
)
)
# as before, define a function to calculate the features for one lightcurve
@require(Source, Variable, 'pandas as pd', 'numpy as np', 'scipy.stats as spstat')
def calculate_features(filename):
var = Variable(filename)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
var.weighted_mean_flux()
var.normalized_flux_std()
var.normalized_amplitude()
var.normalized_MAD()
var.beyond_1std()
var.skew()
var.mean_colors()
feats = (objId, var._g_std, var._r_std, var._i_std, var._z_std,
var._g_amp, var._r_amp, var._i_amp, var._z_amp,
var._g_mad, var._r_mad, var._i_mad, var._z_mad,
var._g_beyond, var._r_beyond, var._i_beyond, var._z_beyond,
var._g_skew, var._r_skew, var._i_skew, var._z_skew,
var._g_minus_r, var._r_minus_i, var._i_minus_z)
return feats
# and then lets map the function onto all the data
result = lview.map(calculate_features, filenames)
# and get the result
all_feats = result.get()
# and now load it all into our table
for feats in all_feats:
cur.execute(insert into lcFeats(objId,
gStd, rStd, iStd, zStd,
gAmp, rAmp, iAmp, zAmp,
gMAD, rMAD, iMAD, zMAD,
gBeyond, rBeyond, iBeyond, zBeyond,
gSkew, rSkew, iSkew, zSkew,
gMinusR, rMinusI, iMinusZ) values {}.format(feats))
cur.execute(select count(*) from lcFeats where gAmp > 2)
nAmp2 = cur.fetchone()[0]
cur.execute(select count(*) from lcFeats where gMinusR = -999)
nNoColor = cur.fetchone()[0]
print("There are {:d} sources with gAmp > 2".format(nAmp2))
print("There are {:d} sources with no measured g' - r' color".format(nNoColor))
conn.commit()
cur.execute(drop table if exists lcLabels) # drop the table if it already exists
cur.execute(create table lcLabels(
objId int,
label int,
foreign key(objId) references rawPhot(objId)
))
labels = np.zeros(100)
labels[20:60] = 1
labels[60:] = 2
data = np.append(np.arange(1,101)[np.newaxis].T, labels[np.newaxis].T, axis = 1)
tup_data = [tuple(x) for x in data]
cur.executemany(insert into lcLabels(objId, label) values (?,?), tup_data)
cur.execute(select label
from lcLabels
order by objId asc)
y = np.array(cur.fetchall()).ravel()
cur.execute(select gStd, rStd, iStd, zStd,
gAmp, rAmp, iAmp, zAmp,
gMAD, rMAD, iMAD, zMAD,
gBeyond, rBeyond, iBeyond, zBeyond,
gSkew, rSkew, iSkew, zSkew,
gMinusR, rMinusI, iMinusZ
from lcFeats
order by objId asc)
X = np.array(cur.fetchall())
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
cv_scores = cross_val_score(SVC(C = 1.0, gamma = 0.1, kernel = 'rbf'), X, y, cv = 5)
print("The SVM model produces a CV accuracy of {:.4f}".format(np.mean(cv_scores)))
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=25)
svm_clf = SVC(C = 1.0, gamma = 0.1, kernel = 'rbf')
%%timeit
# timing solution for RF model
rf_clf.fit(X,y)
%%timeit
# timing solution for SVM model
svm_clf.fit(X,y)
cv_scores = cross_val_score(rf_clf, X, y, cv = 5)
print("The RF model produces a CV accuracy of {:.4f}".format(np.mean(cv_scores)))
cur.execute(drop table if exists testPhot) # drop the table if is already exists
cur.execute(create table testPhot(
id integer primary key,
objId int,
t float,
pb varchar(1),
flux float,
dflux float)
)
cur.execute(drop table if exists testFeats) # drop the table if it already exists
cur.execute(create table testFeats(
id integer primary key,
objId int,
gStd float,
rStd float,
iStd float,
zStd float,
gAmp float,
rAmp float,
iAmp float,
zAmp float,
gMAD float,
rMAD float,
iMAD float,
zMAD float,
gBeyond float,
rBeyond float,
iBeyond float,
zBeyond float,
gSkew float,
rSkew float,
iSkew float,
zSkew float,
gMinusR float,
rMinusI float,
iMinusZ float,
FOREIGN KEY(objId) REFERENCES testPhot(objId)
)
)
new_obs_filenames = glob.glob("test_set_for_LSST_DSFP/FAKE*.dat")
result = lview.map(load_data, new_obs_filenames)
all_data = result.get()
for data in all_data:
cur.executemany(insert into testPhot(objId, t, pb, flux, dflux) values (?,?,?,?,?), data)
result = lview.map(calculate_features, new_obs_filenames)
all_feats = result.get()
for feats in all_feats:
cur.execute(insert into testFeats(objId,
gStd, rStd, iStd, zStd,
gAmp, rAmp, iAmp, zAmp,
gMAD, rMAD, iMAD, zMAD,
gBeyond, rBeyond, iBeyond, zBeyond,
gSkew, rSkew, iSkew, zSkew,
gMinusR, rMinusI, iMinusZ) values {}.format(feats))
svm_clf = SVC(C=1.0, gamma = 0.1, kernel = 'rbf').fit(X, y)
cur.execute(select gStd, rStd, iStd, zStd,
gAmp, rAmp, iAmp, zAmp,
gMAD, rMAD, iMAD, zMAD,
gBeyond, rBeyond, iBeyond, zBeyond,
gSkew, rSkew, iSkew, zSkew,
gMinusR, rMinusI, iMinusZ
from testFeats
order by objId asc)
X_new = np.array(cur.fetchall())
y_preds = svm_clf.predict(X_new)
print(There are {:d}, {:d}, and {:d} sources
in classes 1, 2, 3, respectively.format(*list(np.bincount(y_preds)))) # be careful using bincount
! grep label test_set_for_LSST_DSFP/*dat | sed -e 's/label:/ /' | awk '{print $2+1}' | sort | uniq -c
import sklearn.gaussian_process as gp
# we'll create a function that just fits the g band lightcurve
def calculate_gpfit(filename):
Do a quick gaussian process regression of just g-band lightcurve to get a smooth representation
This should be more robust to outliers
var = Variable(filename)
var.filter_flux()
t = var._g_time.values
y = var._g_flux.values
dy = var._g_flux_unc.values
t = t.reshape(-1, 1)
objId = int(filename.split('FAKE')[1].split(".dat")[0])
# a Gaussian Process is defined by a correlation function that relates each point to each other
# I've chosen a simple common kernel - the Matern kernel.
# It has a nice property in that it is 'stationary' - the covariance between points
# only depends on their separation
base_kernel = gp.kernels.Matern(length_scale=10.,nu=2.5, length_scale_bounds=(1.,20.))
gkernel = 0.5*(np.median(dy)**2.)*base_kernel
# the second thing you need to define a Gaussian Process is a mean function, which in our case is
# implictly zero (this is fine for transients - they spend most of eternity at the background!)
gband = gp.GaussianProcessRegressor(kernel=gkernel, alpha=dy**2.).fit(t, y)
# now lets use the Gaussian process to predict the lightcurve on an evenly sampled grid
# this is appropriate for Wavelets, or generally robust feature extraction
newtime = np.linspace(t.min(), t.max(), 200, endpoint=True)
newtime = newtime.reshape(-1, 1)
gnew = gband.predict(newtime, return_std=True)
return (t, y , dy, newtime, gnew)
ftest = filenames[0:20]
gtime, gflux, gdflux, gnewtime, gnewflux = calculate_gpfit(ftest[6])
fig, ax = plt.subplots(figsize=(7,7))
ax.errorbar(gtime, gflux, gdflux, fmt = 'o', color = '#78A5A3')
ax.plot(gnewtime, gnewflux[0], 'k-')
ax.fill(np.concatenate([gnewtime, gnewtime[::-1]]),
np.concatenate([gnewflux[0] - 1.9600 * gnewflux[1],
(gnewflux[0] + 1.9600 * gnewflux[1])[::-1]]),
alpha=.5, fc='0.8', ec='None', label='95% confidence interval')
ax.set_xlabel(r"$\mathrm{MJD}$")
ax.set_ylabel(r"$\mathrm{flux}$")
fig.tight_layout()
from sklearn.ensemble import IsolationForest
isoF_clf = IsolationForest(n_estimators = 100, contamination=0.01)
isoF_clf.fit(X_new)
anomaly_score = isoF_clf.decision_function(X_new)
print("The 10 most anomalous sources are: {}".format(np.arange(1,5001)[np.argsort(anomaly_score)[:10]]))
lc491 = Variable("test_set_for_LSST_DSFP/FAKE00491.dat")
lc491.plot_multicolor_lc()
lc2047 = Variable("test_set_for_LSST_DSFP/FAKE02030.dat")
lc2047.plot_multicolor_lc()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: End of Warning
Step2: Problem 1) Light Curve Data
Step3: As we have many light curve files (in principle as many as 37 billion...), we will define a Source() class to ease our handling of the data.
Step4: Problem 1c
Step5: One thing that we brushed over previously is that the brightness measurements have units of flux, rather than the traditional use of magnitudes. The reason for this is that LSST will measure flux variations via image differencing, which will for some sources in some filters result in a measurement of negative flux. (You may have already noticed this in 1a.) Statistically there is nothing wrong with such a measurement, but it is impossible to convert a negative flux into a magnitude. Thus we will use flux measurements throughout this exercise.
Step6: Problem 2) Data Preparation
Step7: Problem 2b
Step8: Problem 3) Store the sources in a database
Step9: Following the sqlite3 import, we must first connect to the database. If we attempt a connection to a database that does not exist, then a new database is automatically created. Here we will create a new database file, called MiniBroker.db.
Step10: We now have a database connection object, conn. To interact with the database (create tables, load data, write queries) we need a cursor object.
Step13: Now that we have a cursor object, we can populate the database. As an example we will start by creating a table to hold all the raw photometry (though ultimately we will not use this table for analysis).
Step17: Let's unpack everything that happened in these two commands. First - if the table rawPhot already exists, we drop it to start over from scratch. (this is useful here, but should not be adopted as general practice)
Step21: There are two things to highlight above
Step23: Load all of the raw photometric observations into the rawPhot table in the database.
Step25: Problem 3c
Step32: While we cannot load the light curves to the database in parallel, using the concept of MapReduce we can use the iPython "cluster" that we created to map the I/O process (typically lots of overhead) to multiple machines, and then we can reduce their output to a single object to be loaded into the database.
Step33: You shold have confirmed that loading a full light curve is faster than loading individual observations (not by a ton, but faster is faster and microseconds count when you are dealing with 37 billion sources).
Step34: Now, we use the previously defined lview variable to achieve DirectView access to the cluster to map the load_data function on to all the filenames, and get the results.
Step38: And now that you have results, drop the rawPhot table if it already exists, and load the observations into the database as before.
Step42: Problem 3e Confirm that reading the data in parallel is faster than the seriel operations in 3d using the %%timeit function as before.
Step44: That isn't quite a factor of 4x speed up, but it represents a significant improvement, and recall that writing to the database must happen as a serial process because we are using SQLite.
Step47: Problem 3) Part 2 -- Calculate and store the features in the same database
Step49: The above procedure should look familiar to above, with one exception
Step52: Problem 3h
Step53: Finally, we close by commiting the changes we made to the database.
Step57: Problem 4) Build a Classification Model
Step60: For now - don't worry about what the labels mean (though if you inspect the light curves you may be able to figure this out...)
Step61: Problem 4b
Step62: The SVM model does a decent job of classifying the data. However - we are going to have 10 million alerts every night. Therefore, we need something that runs quickly. For most ML models the training step is slow, while predictions are (relatively) fast.
Step63: Problem 4d
Step69: Problem 4e
Step71: Problem 5b
Step74: Problem 5c
Step75: It turns out that the classification performance in this case isn't particularly good.
Step77: As we discussed in the lecture, one challenge for this is problem is that individual observations are largely spaced out, and have very large uncertainties. The features that we have currently adopted (which are really just moments of the flux distribution) aren't robust to outliers, and are subject to the precise cadence of the observations.
Step78: Lets look at one fit to see what's happening
Step79: Not only does GP regression provide an interpolation between observations, but it returns an uncertainty about that interpolation as well. Thus, it's possible to measure features on interpolated light curves that are observed with precisely the same cadence for all sources.
Step80: Problem 6b
|
3,432
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Remi Flamary <remi.flamary@unice.fr>
# Stanislas Chambon <stan.chambon@gmail.com>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
n_source_samples = 100
n_target_samples = 100
theta = 2 * np.pi / 20
noise_level = 0.1
Xs, ys = ot.datasets.get_data_classif(
'gaussrot', n_source_samples, nz=noise_level)
Xs_new, _ = ot.datasets.get_data_classif(
'gaussrot', n_source_samples, nz=noise_level)
Xt, yt = ot.datasets.get_data_classif(
'gaussrot', n_target_samples, theta=theta, nz=noise_level)
# one of the target mode changes its variance (no linear mapping)
Xt[yt == 2] *= 3
Xt = Xt + 4
pl.figure(1, (10, 5))
pl.clf()
pl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')
pl.legend(loc=0)
pl.title('Source and target distributions')
# MappingTransport with linear kernel
ot_mapping_linear = ot.da.MappingTransport(
kernel="linear", mu=1e0, eta=1e-8, bias=True,
max_iter=20, verbose=True)
ot_mapping_linear.fit(Xs=Xs, Xt=Xt)
# for original source samples, transform applies barycentric mapping
transp_Xs_linear = ot_mapping_linear.transform(Xs=Xs)
# for out of source samples, transform applies the linear mapping
transp_Xs_linear_new = ot_mapping_linear.transform(Xs=Xs_new)
# MappingTransport with gaussian kernel
ot_mapping_gaussian = ot.da.MappingTransport(
kernel="gaussian", eta=1e-5, mu=1e-1, bias=True, sigma=1,
max_iter=10, verbose=True)
ot_mapping_gaussian.fit(Xs=Xs, Xt=Xt)
# for original source samples, transform applies barycentric mapping
transp_Xs_gaussian = ot_mapping_gaussian.transform(Xs=Xs)
# for out of source samples, transform applies the gaussian mapping
transp_Xs_gaussian_new = ot_mapping_gaussian.transform(Xs=Xs_new)
pl.figure(2)
pl.clf()
pl.subplot(2, 2, 1)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_linear[:, 0], transp_Xs_linear[:, 1], c=ys, marker='+',
label='Mapped source samples')
pl.title("Bary. mapping (linear)")
pl.legend(loc=0)
pl.subplot(2, 2, 2)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_linear_new[:, 0], transp_Xs_linear_new[:, 1],
c=ys, marker='+', label='Learned mapping')
pl.title("Estim. mapping (linear)")
pl.subplot(2, 2, 3)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_gaussian[:, 0], transp_Xs_gaussian[:, 1], c=ys,
marker='+', label='barycentric mapping')
pl.title("Bary. mapping (kernel)")
pl.subplot(2, 2, 4)
pl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',
label='Target samples', alpha=.2)
pl.scatter(transp_Xs_gaussian_new[:, 0], transp_Xs_gaussian_new[:, 1], c=ys,
marker='+', label='Learned mapping')
pl.title("Estim. mapping (kernel)")
pl.tight_layout()
pl.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate data
Step2: Plot data
Step3: Instantiate the different transport algorithms and fit them
Step4: Plot transported samples
|
3,433
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from load_utils import *
d = load_diffs(keep_diff = True)
df_events, df_blocked_user_text = load_block_events_and_users()
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(12, 6))
xa = np.sort(d['annotated']['recipient_score'])
ya = 1. * np.arange(len(xa)) / (len(xa) - 1)
xm = np.sort(d['annotated']['pred_recipient_score'])
ym = 1. * np.arange(len(xm)) / (len(xm) - 1)
ax1 = fig.add_subplot(121)
ax1.plot(xa, ya, label = 'annotators')
ax1.plot(xm, ym, label = 'model')
ax1.set_xlabel('$P(X<x)$')
ax1.set_ylabel('$p$')
ax1.legend()
ax2 = fig.add_subplot(122)
ax2.plot(xa, ya, label = 'annotators')
ax2.plot(xm, ym, label = 'model')
ax2.set_xlabel('$P(X<x)$')
ax2.set_ylabel('$p$')
ax2.set_ylim((0.90, 1))
ax2.legend()
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure(figsize=(12, 6))
xa = np.sort(d['annotated']['recipient_score'])
ya = 100 * (1 - (1. * np.arange(len(xa)) / (len(xa) - 1)))
xm = np.sort(d['annotated']['pred_recipient_score'])
ym = 100 * (1 - (1. * np.arange(len(xm)) / (len(xm) - 1)))
ax1 = fig.add_subplot(121)
ax1.plot(xa, ya, label = 'annotators')
ax1.plot(xm, ym, label = 'model')
ax1.set_xlabel('$t$')
ax1.set_ylabel('Percent of comments that are attacks')
ax1.legend()
ax2 = fig.add_subplot(122)
ax2.plot(xa, ya, label = 'annotators')
ax2.plot(xm, ym, label = 'model')
ax2.set_xlabel('$t$')
ax2.set_ylabel('Percent of comments above threshold t')
ax2.set_ylim((0.0, 3))
ax2.set_xlim((0.2, 1.0))
ax2.legend()
plt.savefig('../../paper/figs/model_vs_annotator_percent_attack_distributions.png')
def check_range(d, col, min, max, n = 10, ):
df = d.query('%s >= %f and %s <= %f' % (col, min, col, max))
for i, r in df.sample(n).iterrows():
print(r[col])
print(r.clean_diff)
print('\n')
check_range(d['sample'], 'pred_aggression_score',0.6, 0.7)
# annotations
sns.distplot(d['annotated']['aggression_score'].dropna(), hist=False, label = 'annotator scores')
# model on annotated data
sns.distplot(d['annotated']['pred_aggression_score'].dropna(), hist=False, label = 'model on annoted data')
#model no admin sample
sns.distplot(d['sample']['pred_aggression_score'].dropna(), hist=False, label = 'model sample')
plt.xlim(-1, 1)
plt.legend()
sns.distplot(d['annotated']['recipient_score'].dropna(), kde =False, norm_hist = True)
sns.distplot(d['annotated']['pred_recipient_score'].dropna(), kde =False, norm_hist = True)
sns.distplot(d['sample']['pred_recipient_score'].dropna(), kde =False, norm_hist = True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Q
Step2: Model does not assign 0 scores, like the annotators.
Step3: Q
Step4: Q
Step5: Q
|
3,434
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import IPython
import IPython.display
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
zip_path = tf.keras.utils.get_file(
origin='https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip',
fname='jena_climate_2009_2016.csv.zip',
extract=True)
csv_path, _ = os.path.splitext(zip_path)
df = pd.read_csv(csv_path)
# slice [start:stop:step], starting from index 5 take every 6th record.
df = df[5::6]
date_time = pd.to_datetime(df.pop('Date Time'), format='%d.%m.%Y %H:%M:%S')
df.head()
plot_cols = ['T (degC)', 'p (mbar)', 'rho (g/m**3)']
plot_features = df[plot_cols]
plot_features.index = date_time
_ = plot_features.plot(subplots=True)
plot_features = df[plot_cols][:480]
plot_features.index = date_time[:480]
_ = plot_features.plot(subplots=True)
df.describe().transpose()
wv = df['wv (m/s)']
bad_wv = wv == -9999.0
wv[bad_wv] = 0.0
max_wv = df['max. wv (m/s)']
bad_max_wv = max_wv == -9999.0
max_wv[bad_max_wv] = 0.0
# The above inplace edits are reflected in the DataFrame
df['wv (m/s)'].min()
plt.hist2d(df['wd (deg)'], df['wv (m/s)'], bins=(50, 50), vmax=400)
plt.colorbar()
plt.xlabel('Wind Direction [deg]')
plt.ylabel('Wind Velocity [m/s]')
wv = df.pop('wv (m/s)')
max_wv = df.pop('max. wv (m/s)')
# Convert to radians.
wd_rad = df.pop('wd (deg)')*np.pi / 180
# Calculate the wind x and y components.
df['Wx'] = wv*np.cos(wd_rad)
df['Wy'] = wv*np.sin(wd_rad)
# Calculate the max wind x and y components.
df['max Wx'] = max_wv*np.cos(wd_rad)
df['max Wy'] = max_wv*np.sin(wd_rad)
plt.hist2d(df['Wx'], df['Wy'], bins=(50, 50), vmax=400)
plt.colorbar()
plt.xlabel('Wind X [m/s]')
plt.ylabel('Wind Y [m/s]')
ax = plt.gca()
ax.axis('tight')
timestamp_s = date_time.map(datetime.datetime.timestamp)
day = 24*60*60
year = (365.2425)*day
df['Day sin'] = np.sin(timestamp_s * (2 * np.pi / day))
df['Day cos'] = np.cos(timestamp_s * (2 * np.pi / day))
df['Year sin'] = np.sin(timestamp_s * (2 * np.pi / year))
df['Year cos'] = np.cos(timestamp_s * (2 * np.pi / year))
plt.plot(np.array(df['Day sin'])[:25])
plt.plot(np.array(df['Day cos'])[:25])
plt.xlabel('Time [h]')
plt.title('Time of day signal')
fft = tf.signal.rfft(df['T (degC)'])
f_per_dataset = np.arange(0, len(fft))
n_samples_h = len(df['T (degC)'])
hours_per_year = 24*365.2524
years_per_dataset = n_samples_h/(hours_per_year)
f_per_year = f_per_dataset/years_per_dataset
plt.step(f_per_year, np.abs(fft))
plt.xscale('log')
plt.ylim(0, 400000)
plt.xlim([0.1, max(plt.xlim())])
plt.xticks([1, 365.2524], labels=['1/Year', '1/day'])
_ = plt.xlabel('Frequency (log scale)')
column_indices = {name: i for i, name in enumerate(df.columns)}
n = len(df)
train_df = df[0:int(n*0.7)]
val_df = df[int(n*0.7):int(n*0.9)]
test_df = df[int(n*0.9):]
num_features = df.shape[1]
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
df_std = (df - train_mean) / train_std
df_std = df_std.melt(var_name='Column', value_name='Normalized')
plt.figure(figsize=(12, 6))
ax = sns.violinplot(x='Column', y='Normalized', data=df_std)
_ = ax.set_xticklabels(df.keys(), rotation=90)
class WindowGenerator():
def __init__(self, input_width, label_width, shift,
train_df=train_df, val_df=val_df, test_df=test_df,
label_columns=None):
# Store the raw data.
self.train_df = train_df
self.val_df = val_df
self.test_df = test_df
# Work out the label column indices.
self.label_columns = label_columns
if label_columns is not None:
self.label_columns_indices = {name: i for i, name in
enumerate(label_columns)}
self.column_indices = {name: i for i, name in
enumerate(train_df.columns)}
# Work out the window parameters.
self.input_width = input_width
self.label_width = label_width
self.shift = shift
self.total_window_size = input_width + shift
self.input_slice = slice(0, input_width)
self.input_indices = np.arange(self.total_window_size)[self.input_slice]
self.label_start = self.total_window_size - self.label_width
self.labels_slice = slice(self.label_start, None)
self.label_indices = np.arange(self.total_window_size)[self.labels_slice]
def __repr__(self):
return '\n'.join([
f'Total window size: {self.total_window_size}',
f'Input indices: {self.input_indices}',
f'Label indices: {self.label_indices}',
f'Label column name(s): {self.label_columns}'])
w1 = WindowGenerator(input_width=24, label_width=1, shift=24,
label_columns=['T (degC)'])
w1
w2 = WindowGenerator(input_width=6, label_width=1, shift=1,
label_columns=['T (degC)'])
w2
def split_window(self, features):
inputs = features[:, self.input_slice, :]
labels = features[:, self.labels_slice, :]
if self.label_columns is not None:
labels = tf.stack(
[labels[:, :, self.column_indices[name]] for name in self.label_columns],
axis=-1)
# Slicing doesn't preserve static shape information, so set the shapes
# manually. This way the `tf.data.Datasets` are easier to inspect.
inputs.set_shape([None, self.input_width, None])
labels.set_shape([None, self.label_width, None])
return inputs, labels
WindowGenerator.split_window = split_window
# Stack three slices, the length of the total window:
example_window = tf.stack([np.array(train_df[:w2.total_window_size]),
np.array(train_df[100:100+w2.total_window_size]),
np.array(train_df[200:200+w2.total_window_size])])
example_inputs, example_labels = w2.split_window(example_window)
print('All shapes are: (batch, time, features)')
print(f'Window shape: {example_window.shape}')
print(f'Inputs shape: {example_inputs.shape}')
print(f'labels shape: {example_labels.shape}')
w2.example = example_inputs, example_labels
def plot(self, model=None, plot_col='T (degC)', max_subplots=3):
inputs, labels = self.example
plt.figure(figsize=(12, 8))
plot_col_index = self.column_indices[plot_col]
max_n = min(max_subplots, len(inputs))
for n in range(max_n):
plt.subplot(3, 1, n+1)
plt.ylabel(f'{plot_col} [normed]')
plt.plot(self.input_indices, inputs[n, :, plot_col_index],
label='Inputs', marker='.', zorder=-10)
if self.label_columns:
label_col_index = self.label_columns_indices.get(plot_col, None)
else:
label_col_index = plot_col_index
if label_col_index is None:
continue
plt.scatter(self.label_indices, labels[n, :, label_col_index],
edgecolors='k', label='Labels', c='#2ca02c', s=64)
if model is not None:
predictions = model(inputs)
plt.scatter(self.label_indices, predictions[n, :, label_col_index],
marker='X', edgecolors='k', label='Predictions',
c='#ff7f0e', s=64)
if n == 0:
plt.legend()
plt.xlabel('Time [h]')
WindowGenerator.plot = plot
w2.plot()
w2.plot(plot_col='p (mbar)')
def make_dataset(self, data):
data = np.array(data, dtype=np.float32)
ds = tf.keras.preprocessing.timeseries_dataset_from_array(
data=data,
targets=None,
sequence_length=self.total_window_size,
sequence_stride=1,
shuffle=True,
batch_size=32,)
ds = ds.map(self.split_window)
return ds
WindowGenerator.make_dataset = make_dataset
@property
def train(self):
return self.make_dataset(self.train_df)
@property
def val(self):
return self.make_dataset(self.val_df)
@property
def test(self):
return self.make_dataset(self.test_df)
@property
def example(self):
Get and cache an example batch of `inputs, labels` for plotting.
result = getattr(self, '_example', None)
if result is None:
# No example batch was found, so get one from the `.train` dataset
result = next(iter(self.train))
# And cache it for next time
self._example = result
return result
WindowGenerator.train = train
WindowGenerator.val = val
WindowGenerator.test = test
WindowGenerator.example = example
# Each element is an (inputs, label) pair
w2.train.element_spec
for example_inputs, example_labels in w2.train.take(1):
print(f'Inputs shape (batch, time, features): {example_inputs.shape}')
print(f'Labels shape (batch, time, features): {example_labels.shape}')
single_step_window = WindowGenerator(
input_width=1, label_width=1, shift=1,
label_columns=['T (degC)'])
single_step_window
for example_inputs, example_labels in single_step_window.train.take(1):
print(f'Inputs shape (batch, time, features): {example_inputs.shape}')
print(f'Labels shape (batch, time, features): {example_labels.shape}')
class Baseline(tf.keras.Model):
def __init__(self, label_index=None):
super().__init__()
self.label_index = label_index
def call(self, inputs):
if self.label_index is None:
return inputs
result = inputs[:, :, self.label_index]
return result[:, :, tf.newaxis]
baseline = Baseline(label_index=column_indices['T (degC)'])
baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
val_performance = {}
performance = {}
val_performance['Baseline'] = baseline.evaluate(single_step_window.val)
performance['Baseline'] = baseline.evaluate(single_step_window.test, verbose=0)
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1,
label_columns=['T (degC)'])
wide_window
print('Input shape:', single_step_window.example[0].shape)
print('Output shape:', baseline(single_step_window.example[0]).shape)
wide_window.plot(baseline)
linear = tf.keras.Sequential([
tf.keras.layers.Dense(units=1)
])
print('Input shape:', single_step_window.example[0].shape)
print('Output shape:', linear(single_step_window.example[0]).shape)
MAX_EPOCHS = 20
def compile_and_fit(model, window, patience=2):
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=patience,
mode='min')
model.compile(loss=tf.losses.MeanSquaredError(),
optimizer=tf.optimizers.Adam(),
metrics=[tf.metrics.MeanAbsoluteError()])
history = model.fit(window.train, epochs=MAX_EPOCHS,
validation_data=window.val,
callbacks=[early_stopping])
return history
history = compile_and_fit(linear, single_step_window)
val_performance['Linear'] = linear.evaluate(single_step_window.val)
performance['Linear'] = linear.evaluate(single_step_window.test, verbose=0)
print('Input shape:', wide_window.example[0].shape)
print('Output shape:', baseline(wide_window.example[0]).shape)
wide_window.plot(linear)
plt.bar(x = range(len(train_df.columns)),
height=linear.layers[0].kernel[:,0].numpy())
axis = plt.gca()
axis.set_xticks(range(len(train_df.columns)))
_ = axis.set_xticklabels(train_df.columns, rotation=90)
dense = tf.keras.Sequential([
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=1)
])
history = compile_and_fit(dense, single_step_window)
val_performance['Dense'] = dense.evaluate(single_step_window.val)
performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
CONV_WIDTH = 3
conv_window = WindowGenerator(
input_width=CONV_WIDTH,
label_width=1,
shift=1,
label_columns=['T (degC)'])
conv_window
conv_window.plot()
plt.title("Given 3h as input, predict 1h into the future.")
multi_step_dense = tf.keras.Sequential([
# Shape: (time, features) => (time*features)
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
# Add back the time dimension.
# Shape: (outputs) => (1, outputs)
tf.keras.layers.Reshape([1, -1]),
])
print('Input shape:', conv_window.example[0].shape)
print('Output shape:', multi_step_dense(conv_window.example[0]).shape)
history = compile_and_fit(multi_step_dense, conv_window)
IPython.display.clear_output()
val_performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.val)
performance['Multi step dense'] = multi_step_dense.evaluate(conv_window.test, verbose=0)
conv_window.plot(multi_step_dense)
print('Input shape:', wide_window.example[0].shape)
try:
print('Output shape:', multi_step_dense(wide_window.example[0]).shape)
except Exception as e:
print(f'\n{type(e).__name__}:{e}')
conv_model = tf.keras.Sequential([
tf.keras.layers.Conv1D(filters=32,
kernel_size=(CONV_WIDTH,),
activation='relu'),
tf.keras.layers.Dense(units=32, activation='relu'),
tf.keras.layers.Dense(units=1),
])
print("Conv model on `conv_window`")
print('Input shape:', conv_window.example[0].shape)
print('Output shape:', conv_model(conv_window.example[0]).shape)
history = compile_and_fit(conv_model, conv_window)
IPython.display.clear_output()
val_performance['Conv'] = conv_model.evaluate(conv_window.val)
performance['Conv'] = conv_model.evaluate(conv_window.test, verbose=0)
print("Wide window")
print('Input shape:', wide_window.example[0].shape)
print('Labels shape:', wide_window.example[1].shape)
print('Output shape:', conv_model(wide_window.example[0]).shape)
LABEL_WIDTH = 24
INPUT_WIDTH = LABEL_WIDTH + (CONV_WIDTH - 1)
wide_conv_window = WindowGenerator(
input_width=INPUT_WIDTH,
label_width=LABEL_WIDTH,
shift=1,
label_columns=['T (degC)'])
wide_conv_window
print("Wide conv window")
print('Input shape:', wide_conv_window.example[0].shape)
print('Labels shape:', wide_conv_window.example[1].shape)
print('Output shape:', conv_model(wide_conv_window.example[0]).shape)
wide_conv_window.plot(conv_model)
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences=True),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=1)
])
print('Input shape:', wide_window.example[0].shape)
print('Output shape:', lstm_model(wide_window.example[0]).shape)
history = compile_and_fit(lstm_model, wide_window)
IPython.display.clear_output()
val_performance['LSTM'] = lstm_model.evaluate(wide_window.val)
performance['LSTM'] = lstm_model.evaluate(wide_window.test, verbose=0)
wide_window.plot(lstm_model)
x = np.arange(len(performance))
width = 0.3
metric_name = 'mean_absolute_error'
metric_index = lstm_model.metrics_names.index('mean_absolute_error')
val_mae = [v[metric_index] for v in val_performance.values()]
test_mae = [v[metric_index] for v in performance.values()]
plt.ylabel('mean_absolute_error [T (degC), normalized]')
plt.bar(x - 0.17, val_mae, width, label='Validation')
plt.bar(x + 0.17, test_mae, width, label='Test')
plt.xticks(ticks=x, labels=performance.keys(),
rotation=45)
_ = plt.legend()
for name, value in performance.items():
print(f'{name:12s}: {value[1]:0.4f}')
single_step_window = WindowGenerator(
# `WindowGenerator` returns all features as labels if you
# don't set the `label_columns` argument.
input_width=1, label_width=1, shift=1)
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1)
for example_inputs, example_labels in wide_window.train.take(1):
print(f'Inputs shape (batch, time, features): {example_inputs.shape}')
print(f'Labels shape (batch, time, features): {example_labels.shape}')
baseline = Baseline()
baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
val_performance = {}
performance = {}
val_performance['Baseline'] = baseline.evaluate(wide_window.val)
performance['Baseline'] = baseline.evaluate(wide_window.test, verbose=0)
dense = tf.keras.Sequential([
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=64, activation='relu'),
tf.keras.layers.Dense(units=num_features)
])
history = compile_and_fit(dense, single_step_window)
IPython.display.clear_output()
val_performance['Dense'] = dense.evaluate(single_step_window.val)
performance['Dense'] = dense.evaluate(single_step_window.test, verbose=0)
%%time
wide_window = WindowGenerator(
input_width=24, label_width=24, shift=1)
lstm_model = tf.keras.models.Sequential([
# Shape [batch, time, features] => [batch, time, lstm_units]
tf.keras.layers.LSTM(32, return_sequences=True),
# Shape => [batch, time, features]
tf.keras.layers.Dense(units=num_features)
])
history = compile_and_fit(lstm_model, wide_window)
IPython.display.clear_output()
val_performance['LSTM'] = lstm_model.evaluate( wide_window.val)
performance['LSTM'] = lstm_model.evaluate( wide_window.test, verbose=0)
print()
class ResidualWrapper(tf.keras.Model):
def __init__(self, model):
super().__init__()
self.model = model
def call(self, inputs, *args, **kwargs):
delta = self.model(inputs, *args, **kwargs)
# The prediction for each timestep is the input
# from the previous time step plus the delta
# calculated by the model.
return inputs + delta
%%time
residual_lstm = ResidualWrapper(
tf.keras.Sequential([
tf.keras.layers.LSTM(32, return_sequences=True),
tf.keras.layers.Dense(
num_features,
# The predicted deltas should start small
# So initialize the output layer with zeros
kernel_initializer=tf.initializers.zeros)
]))
history = compile_and_fit(residual_lstm, wide_window)
IPython.display.clear_output()
val_performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.val)
performance['Residual LSTM'] = residual_lstm.evaluate(wide_window.test, verbose=0)
print()
x = np.arange(len(performance))
width = 0.3
metric_name = 'mean_absolute_error'
metric_index = lstm_model.metrics_names.index('mean_absolute_error')
val_mae = [v[metric_index] for v in val_performance.values()]
test_mae = [v[metric_index] for v in performance.values()]
plt.bar(x - 0.17, val_mae, width, label='Validation')
plt.bar(x + 0.17, test_mae, width, label='Test')
plt.xticks(ticks=x, labels=performance.keys(),
rotation=45)
plt.ylabel('MAE (average over all outputs)')
_ = plt.legend()
for name, value in performance.items():
print(f'{name:15s}: {value[1]:0.4f}')
OUT_STEPS = 24
multi_window = WindowGenerator(input_width=24,
label_width=OUT_STEPS,
shift=OUT_STEPS)
multi_window.plot()
multi_window
class MultiStepLastBaseline(tf.keras.Model):
def call(self, inputs):
return tf.tile(inputs[:, -1:, :], [1, OUT_STEPS, 1])
last_baseline = MultiStepLastBaseline()
last_baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
multi_val_performance = {}
multi_performance = {}
multi_val_performance['Last'] = last_baseline.evaluate(multi_window.val)
multi_performance['Last'] = last_baseline.evaluate(multi_window.val, verbose=0)
multi_window.plot(last_baseline)
class RepeatBaseline(tf.keras.Model):
def call(self, inputs):
return inputs
repeat_baseline = RepeatBaseline()
repeat_baseline.compile(loss=tf.losses.MeanSquaredError(),
metrics=[tf.metrics.MeanAbsoluteError()])
multi_val_performance['Repeat'] = repeat_baseline.evaluate(multi_window.val)
multi_performance['Repeat'] = repeat_baseline.evaluate(multi_window.test, verbose=0)
multi_window.plot(repeat_baseline)
multi_linear_model = tf.keras.Sequential([
# Take the last time-step.
# Shape [batch, time, features] => [batch, 1, features]
tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),
# Shape => [batch, 1, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_linear_model, multi_window)
IPython.display.clear_output()
multi_val_performance['Linear'] = multi_linear_model.evaluate(multi_window.val)
multi_performance['Linear'] = multi_linear_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_linear_model)
multi_dense_model = tf.keras.Sequential([
# Take the last time step.
# Shape [batch, time, features] => [batch, 1, features]
tf.keras.layers.Lambda(lambda x: x[:, -1:, :]),
# Shape => [batch, 1, dense_units]
tf.keras.layers.Dense(512, activation='relu'),
# Shape => [batch, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_dense_model, multi_window)
IPython.display.clear_output()
multi_val_performance['Dense'] = multi_dense_model.evaluate(multi_window.val)
multi_performance['Dense'] = multi_dense_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_dense_model)
CONV_WIDTH = 3
multi_conv_model = tf.keras.Sequential([
# Shape [batch, time, features] => [batch, CONV_WIDTH, features]
tf.keras.layers.Lambda(lambda x: x[:, -CONV_WIDTH:, :]),
# Shape => [batch, 1, conv_units]
tf.keras.layers.Conv1D(256, activation='relu', kernel_size=(CONV_WIDTH)),
# Shape => [batch, 1, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_conv_model, multi_window)
IPython.display.clear_output()
multi_val_performance['Conv'] = multi_conv_model.evaluate(multi_window.val)
multi_performance['Conv'] = multi_conv_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_conv_model)
multi_lstm_model = tf.keras.Sequential([
# Shape [batch, time, features] => [batch, lstm_units]
# Adding more `lstm_units` just overfits more quickly.
tf.keras.layers.LSTM(32, return_sequences=False),
# Shape => [batch, out_steps*features]
tf.keras.layers.Dense(OUT_STEPS*num_features,
kernel_initializer=tf.initializers.zeros),
# Shape => [batch, out_steps, features]
tf.keras.layers.Reshape([OUT_STEPS, num_features])
])
history = compile_and_fit(multi_lstm_model, multi_window)
IPython.display.clear_output()
multi_val_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.val)
multi_performance['LSTM'] = multi_lstm_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(multi_lstm_model)
class FeedBack(tf.keras.Model):
def __init__(self, units, out_steps):
super().__init__()
self.out_steps = out_steps
self.units = units
self.lstm_cell = tf.keras.layers.LSTMCell(units)
# Also wrap the LSTMCell in an RNN to simplify the `warmup` method.
self.lstm_rnn = tf.keras.layers.RNN(self.lstm_cell, return_state=True)
self.dense = tf.keras.layers.Dense(num_features)
feedback_model = FeedBack(units=32, out_steps=OUT_STEPS)
def warmup(self, inputs):
# inputs.shape => (batch, time, features)
# x.shape => (batch, lstm_units)
x, *state = self.lstm_rnn(inputs)
# predictions.shape => (batch, features)
prediction = self.dense(x)
return prediction, state
FeedBack.warmup = warmup
prediction, state = feedback_model.warmup(multi_window.example[0])
prediction.shape
def call(self, inputs, training=None):
# Use a TensorArray to capture dynamically unrolled outputs.
predictions = []
# Initialize the lstm state
prediction, state = self.warmup(inputs)
# Insert the first prediction
predictions.append(prediction)
# Run the rest of the prediction steps
for n in range(1, self.out_steps):
# Use the last prediction as input.
x = prediction
# Execute one lstm step.
x, state = self.lstm_cell(x, states=state,
training=training)
# Convert the lstm output to a prediction.
prediction = self.dense(x)
# Add the prediction to the output
predictions.append(prediction)
# predictions.shape => (time, batch, features)
predictions = tf.stack(predictions)
# predictions.shape => (batch, time, features)
predictions = tf.transpose(predictions, [1, 0, 2])
return predictions
FeedBack.call = call
print('Output shape (batch, time, features): ', feedback_model(multi_window.example[0]).shape)
history = compile_and_fit(feedback_model, multi_window)
IPython.display.clear_output()
multi_val_performance['AR LSTM'] = feedback_model.evaluate(multi_window.val)
multi_performance['AR LSTM'] = feedback_model.evaluate(multi_window.test, verbose=0)
multi_window.plot(feedback_model)
x = np.arange(len(multi_performance))
width = 0.3
metric_name = 'mean_absolute_error'
metric_index = lstm_model.metrics_names.index('mean_absolute_error')
val_mae = [v[metric_index] for v in multi_val_performance.values()]
test_mae = [v[metric_index] for v in multi_performance.values()]
plt.bar(x - 0.17, val_mae, width, label='Validation')
plt.bar(x + 0.17, test_mae, width, label='Test')
plt.xticks(ticks=x, labels=multi_performance.keys(),
rotation=45)
plt.ylabel(f'MAE (average over all times and outputs)')
_ = plt.legend()
for name, value in multi_performance.items():
print(f'{name:8s}: {value[1]:0.4f}')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 시계열 예측
Step2: 날씨 데이터세트
Step3: 이 튜토리얼은 시간별 예측만 다루므로 10분 간격부터 1시간까지 데이터를 서브 샘플링하는 것으로 시작합니다.
Step4: 데이터를 살펴보겠습니다. 다음은 처음 몇 개의 행입니다.
Step5: 시간이 지남에 따라 몇 가지 특성이 전개됩니다.
Step6: 검사 및 정리하기
Step7: 풍속
Step8: 특성 엔지니어링
Step9: 그러나 풍향과 속도 열을 바람 벡터로 변환하면 모델이 해석하기가 더 쉽습니다.
Step10: 바람 벡터의 분포는 모델이 올바르게 해석하기에 훨씬 더 간단합니다.
Step11: 시간
Step12: 풍향과 유사하게 초 단위의 시간은 유용한 모델 입력이 아닙니다. 날씨 데이터이므로 하루 및 연 단위의 주기성이 명확합니다. 주기성을 처리할 수 있는 방법에는 여러 가지가 있습니다.
Step13: 그러면 모델이 가장 중요한 빈도 특성에 액세스할 수 있습니다. 이 경우 어떤 빈도가 중요한지 미리 알고 있었습니다.
Step14: 데이터 분할
Step15: 데이터 정규화
Step16: 이제 특성의 분포를 살펴봅니다. 일부 특성은 꼬리가 길지만 -9999 풍속 값과 같은 명백한 오류는 없습니다.
Step17: 데이터 창 작업
Step18: 이 섹션의 시작 부분에서 다이어그램에 나타낸 두 개의 창을 만드는 코드는 다음과 같습니다.
Step19: 2. 분할
Step20: 다음을 사용해 보세요.
Step21: 일반적으로 TensorFlow의 데이터는 가장 바깥 쪽 인덱스가 여러 예제("배치" 차원)에 걸쳐 있는 배열로 구성됩니다. 중간 인덱스는 "시간" 또는 "공간"(너비, 높이) 차원입니다. 가장 안쪽 인덱스는 특성입니다.
Step22: 이 플롯은 항목이 참조하는 시간을 기준으로 입력, 레이블 및 (나중에) 예측값을 정렬합니다.
Step23: 다른 열을 플롯할 수 있지만 예제 창 w2 구성에는 T (degC) 열에 대한 레이블만 있습니다.
Step24: 4. tf.data.Dataset 만들기
Step26: WindowGenerator 객체는 훈련, 검증 및 테스트 데이터를 보유합니다. 위의 make_dataset 메서드를 사용하여 tf.data.Datasets로 여기에 액세스하기 위한 특성을 추가합니다. 또한 간편한 액세스와 플롯을 위한 표준 예제 배치를 추가합니다.
Step27: 이제 WindowGenerator 객체가 tf.data.Dataset 객체에 대한 액세스 권한을 부여하므로 데이터를 쉽게 반복할 수 있습니다.
Step28: Dataset를 반복하면 구체적인 배치가 생성됩니다.
Step29: 단일 스텝 모델
Step30: window 객체는 훈련, 검증 및 테스트 세트로부터 tf.data.Datasets를 생성하므로 데이터 배치를 쉽게 반복할 수 있습니다.
Step31: 기준
Step32: 이 모델을 인스턴스화하고 평가합니다.
Step33: 몇 가지 성능 메트릭을 출력했지만 모델이 얼마나 잘 동작하는지에 대한 느낌은 주지 않습니다.
Step34: 이 확장된 창은 어떠한 코드 변경 없이 동일한 baseline 모델에 직접 전달할 수 있습니다. 이는 입력과 레이블이 동일한 수의 타임스텝을 가지며 기준이 입력을 출력으로 전달하기 때문에 가능합니다.
Step35: 기준 모델의 예측값을 플롯하면 1시간씩 오른쪽으로 이동한 단순한 레이블임을 알 수 있습니다.
Step36: 위의 세 가지 예제 플롯에서 단일 스텝 모델은 24시간 동안 실행됩니다. 이에 관해 몇 가지 설명이 필요합니다.
Step37: 이 튜토리얼은 많은 모델을 훈련하므로 훈련 절차를 하나의 함수 패키지로 만듭니다.
Step38: 모델을 훈련하고 성능을 평가합니다.
Step39: baseline 모델과 마찬가지로 선형 모델은 넓은 범위의 배치에서 호출할 수 있습니다. 이러한 방식으로 모델은 연속적인 타임스텝에 대해 일련의 독립적인 예측을 수행합니다. time 축은 다른 batch 축처럼 작동합니다. 각 타임스텝에서 예측 사이에 상호 작용은 없습니다.
Step40: 다음은 wide_widow에 대한 예제 예측값을 플롯한 내용입니다. 많은 경우 예측이 단순히 입력 온도를 반환하는 것보다는 분명히 더 낮지만 몇 가지 경우에는 더 나쁘다는 사실에 주목하세요.
Step41: 선형 모델의 한 가지 장점은 해석하기가 상대적으로 간단하다는 것입니다. 레이어의 가중치를 가져와 각 입력에 할당된 가중치를 볼 수 있습니다.
Step42: 때로 모델은 입력 T (degC)에 가장 많은 가중치를 두지 않습니다. 이것은 무작위 초기화의 위험 중 하나입니다.
Step43: 다중 스텝 밀집
Step44: layers.Flatten을 모델의 첫 번째 레이어로 추가하여 다중 입력 스텝 창에서 dense 모델을 훈련할 수 있습니다.
Step45: 이 접근법의 주된 단점은 결과적인 모델이 정확히 이 형상의 입력 창에서만 실행될 수 있다는 것입니다.
Step46: 다음 섹션의 컨볼루셔널 모델은 이 문제를 해결합니다.
Step47: 예제 배치에서 실행하여 모델이 예상된 형상으로 출력을 생성하는지 확인합니다.
Step48: conv_window에서 훈련하고 평가하면 multi_step_dense 모델과 유사한 성능을 제공해야 합니다.
Step49: 이 conv_model과 multi_step_dense 모델의 차이점은 conv_model은 모든 길이의 입력에서 실행될 수 있다는 것입니다. 컨볼루셔널 레이어는 입력의 슬라이딩 윈도우에 적용됩니다.
Step50: 출력은 입력보다 짧습니다. 훈련 또는 플롯 작업을 수행하려면 레이블과 예상의 길이가 동일해야 합니다. 따라서 레이블과 예측 길이가 일치하도록 몇 개의 추가 입력 타임스텝으로 넓은 창을 생성하는 WindowGenerator를 빌드합니다.
Step51: 이제 더 넓은 창에 모델의 예측값을 플롯할 수 있습니다. 첫 번째 예측 전 3개의 입력 타임스텝에 주목하세요. 여기서 모든 예측은 이전 3개의 타임스텝에 기초합니다.
Step52: 순환 신경망
Step53: return_sequences=True이면 모델을 한 번에 24시간 분량 데이터에 대해 훈련할 수 있습니다.
Step54: 성능
Step55: 다중 출력 모델
Step56: 레이블의 features 축은 이제 1이 아닌 입력과 동일한 깊이를 갖습니다.
Step57: 밀집
Step58: RNN
Step59: <a id="residual"></a>
Step60: 성능
Step61: 위의 성능은 모든 모델 출력에 대한 평균입니다.
Step62: 기준
Step63: 이 작업은 24시간이 주어졌을 때 24시간을 예측하는 것이므로 또 다른 간단한 접근 방법은 내일도 비슷하다는 가정 하에 전날을 반복하는 것입니다.
Step64: 싱글샷 모델
Step65: 밀집
Step66: CNN
Step67: RNN
Step68: 고급
Step69: 이 모델에 필요한 첫 번째 메서드는 입력을 기반으로 내부 상태를 초기화하는 warmup 메서드입니다. 일단 훈련되면 이 상태는 입력 기록의 관련 부분을 캡처합니다. 이는 앞서 알아본 단일 스텝 LSTM 모델과 동일합니다.
Step70: 이 메서드는 단일 타임스텝 예측과 LSTM의 내부 상태를 반환합니다.
Step71: RNN의 상태 및 초기 예측을 사용하여 이제 이전의 각 스텝에서 수행한 예측을 입력으로 제공하여 모델을 계속 반복할 수 있습니다.
Step72: 예제 입력에서 이 모델을 테스트 실행합니다.
Step73: 이제 모델을 훈련합니다.
Step74: 성능
Step75: 이 튜토리얼의 전반부에서 소개한 다중 출력 모델에 대한 메트릭은 모든 출력 특성에 평균화된 성능을 보여줍니다. 이러한 성능은 유사하지만 출력 타임스텝에서도 평균화됩니다.
|
3,435
|
<ASSISTANT_TASK:>
Python Code:
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
p = Point(1,2)
print("point", p.x, p.y)
origin = Point()
print("origin", origin.x, origin.y)
p
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "Point(" + str(self.x) + ", " + str(self.y) + ")"
Point(1,2)
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "Point(" + str(self.x) + ", " + str(self.y) + ")"
def add(self, other):
assert isinstance(other, (int, Point))
if isinstance(other, Point):
return Point(self.x + other.x , self.y + other.y)
else: # other is int, taken as (int, int)
return Point(self.x + other , self.y + other)
Point(1,1).add(Point(2,2))
Point(1,1).add(2)
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "Point(" + str(self.x) + ", " + str(self.y) + ")"
def __add__(self, other):
assert isinstance(other, (int, Point))
if isinstance(other, Point):
return Point(self.x + other.x , self.y + other.y)
else: # other is int, taken as (int, int)
return Point(self.x + other , self.y + other)
Point(1,1) + Point(2,2)
Point(1,1) + 2
Point(1,2) == Point(2,1)
Point(1,2) == Point(1,2)
p = Point()
p == p
Point(1,2) > Point(2,1)
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "Point(" + str(self.x) + ", " + str(self.y) + ")"
def __add__(self, other):
assert isinstance(other, (int, Point))
if isinstance(other, Point):
return Point(self.x + other.x , self.y + other.y)
else: # other is int, taken as (int, int)
return Point(self.x + other , self.y + other)
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def __gt__(self, other):
return (self.x > other.x and self.y > other.y)
Point(1,0) == Point(1,2)
Point(1,0) == Point(1,0)
Point(1,0) > Point(1,2)
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "Point(" + str(self.x) + ", " + str(self.y) + ")"
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def __gt__(self, other):
return (self.x > other.x and self.y > other.y)
def __add__(self, other):
assert isinstance(other, (int, Point))
if isinstance(other, Point):
return Point(self.x + other.x , self.y + other.y)
else: # other is int, taken as (int, int)
return Point(self.x + other , self.y + other)
def increment(self, other):
'''this method changes self (add "inplace")'''
assert isinstance(other,Point)
self.x += other.x
self.y += other.y
p = Point(6.5, 7)
p + Point(1,2)
print(p)
p.increment(Point(1,2))
print(p)
Point(5,6) > Point(1,2)
class Point():
Holds on a point (x,y) in the plane
def __init__(self, x=0, y=0):
assert isinstance(x, (int, float)) and isinstance(y, (int, float))
self.x = float(x)
self.y = float(y)
def __repr__(self):
return "Point(" + str(self.x) + ", " + str(self.y) + ")"
def __eq__(self, other):
return (self.x, self.y) == (other.x, other.y)
def __lt__(self, other):
return (self.x < other.x and self.y < other.y)
def __add__(self, other):
assert isinstance(other, (int, Point))
if isinstance(other, Point):
return Point(self.x + other.x , self.y + other.y)
else: # other is int, taken as (int, int)
return Point(self.x + other , self.y + other)
def increment(self, other):
'''this method changes self (add "inplace")'''
assert isinstance(other,Point)
self.x += other.x
self.y += other.y
def is_extreme(self, *points):
for point in points:
if not self > point:
return False
return True
p = Point(5, 6)
p.is_extreme(Point(1,1))
p.is_extreme(Point(1,1), Point(2,5), Point(6,2))
Point.is_extreme(Point(7,8), Point(1,1), Point(4,5), Point(2,3))
class Rectangle1():
Holds a parallel-axes rectangle by storing two points
lower left vertex - llv
upper right vertex - urv
def __init__(self, lower_left_vertex, upper_right_vertex):
assert isinstance(lower_left_vertex, Point)
assert isinstance(upper_right_vertex, Point)
assert lower_left_vertex < upper_right_vertex
self.llv = lower_left_vertex
self.urv = upper_right_vertex
def __repr__(self):
representation = "Rectangle with lower left {0} and upper right {1}"
return representation.format(self.llv, self.urv)
def dimensions(self):
height = self.urv.y - self.llv.y
width = self.urv.x - self.llv.x
return height, width
def area(self):
height, width = self.dimensions()
area = height * width
return area
def transpose(self):
Reflection with regard to the line passing through lower left vertex with angle 315 (-45) degrees
height, width = self.dimensions()
self.urv = self.llv
self.llv = Point(self.urv.x - height, self.urv.y - width)
rec = Rectangle1(Point(), Point(2,1))
print(rec)
print("Area:", rec.area())
print("Dimensions:", rec.dimensions())
rec.transpose()
print("Transposed:", rec)
class Rectangle2():
Holds a parallel-axes rectangle by storing lower left point, height and width
def __init__(self, point, height, width):
assert isinstance(point, Point)
assert isinstance(height, (int,float))
assert isinstance(width, (int,float))
assert height > 0
assert width > 0
self.point = point
self.height = float(height)
self.width = float(width)
def __repr__(self):
representation = "Rectangle with lower left {0} and upper right {1}"
return representation.format(self.point, Point(self.point.x + self.width, self.point.y + self.height))
def dimensions(self):
return self.height, self.width
def area(self):
area = self.height * self.width
return area
def transpose(self):
self.point = Point(self.point.x - self.height , self.point.y - self.width)
self.height, self.width = self.width, self.height
rec = Rectangle2(Point(), 1, 2)
print(rec)
print("Area:", rec.area())
print("Dimensions:", rec.dimensions())
rec.transpose()
print("Transposed:", rec)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classes
Step2: Notice that when we send a Point to the console we get
Step4: Which is not useful, so we will define how Point is represented in the console using __repr__.
Step6: Next up we define a method to add two points. Addition is by elements - $(x_1, y_1) + (x_2, y_2) = (x_1+x_2, y_1+y_2)$.
Step8: A nicer way to do it is to overload the addition operator + by defining the addition method name to a name Python reserves for addition - __add__ (those are double underscores)
Step9: We want to be a able to compare Points
Step11: So == checks by identity and > is not defined. Let us overload both these operators
Step12: First we check if two points are equal
Step13: Then if one is strictly smaller than the other
Step15: The addition operator + returns a new instance.
Step17: We now write a method that given many points, checks if the current point is more extreme than the other points.
Step18: We can also use the method via the class instead of the instance, and give the instance of interest (the one that we want to know if it is the extreme) as the first argument self. Much like this, we can either do 'hi'.upper() or str.upper('hi').
Step21: Rectangle class
Step23: Second implementation - point and dimensions
|
3,436
|
<ASSISTANT_TASK:>
Python Code:
!sequana_coverage --download-reference FN433596
! art_illumina -sam -i FN433596.fa -p -l 100 -ss HS20 -f 20 -m 500 -s 40 -o paired_dat -f 100
# no need for the *aln and *sam, let us remove them to save space
!rm -f paired*.aln paired_dat.sam
!sequana_mapping --reference FN433596.fa --file1 paired_dat1.fq --file2 paired_dat2.fq 1>out 2>err
# bioconvert FN433596.fa.sorted.bam simulated.bed -f
# or use e.g. bedtools:
!bedtools genomecov -d -ibam FN433596.fa.sorted.bam > simulated.bed
!sequana_coverage --input simulated.bed --reference FN433596.fa -w 20001 -o --level WARNING -C .5
!cp report/*/*/rois.csv rois_noise_20001.csv
# An instance of coverage signal (yours may be slightly different)
from IPython.display import Image
Image("coverage.png")
%pylab inline
# Here is a convenient function to plot the ROIs in terms of sizes
# and max zscore
def plot_results(file_roi, choice="max"):
import pandas as pd
roi = pd.read_csv(file_roi) #"rois_cnv_deletion.csv")
roi = roi.query("start>100 and end<3043210")
plot(roi["size"], roi["{}_zscore".format(choice)], "or", label="candidate ROIs")
for this in [3,4,5,-3,-4,-5]:
if this == 3: label = "thresholds"
else: label="_nolegend_"
axhline(this, ls="--", label=label)
print("{} ROIs found".format(len(roi)))
xlabel("length of the ROIs")
ylabel("z-scores")
legend()
return roi
roi = plot_results("rois_noise_20001.csv", "max")
import random
import pandas as pd
def create_deletion():
df = pd.read_csv("simulated.bed", sep="\t", header=None)
positions = []
sizes = []
for i in range(80):
# the + and -4000 shift are there to guarantee the next
# CNV does not overlap with the previous one since
# CNV length can be as much as 8000
pos = random.randint(37000*i+4000, 37000*(i+1)-4000)
size = random.randint(1,8) * 1000
positions.append(pos)
#size = 2000
df.loc[pos:pos+size,2] = 0 #deletion
sizes.append(size)
df.to_csv("cnv_deletion.bed", sep="\t", header=None, index=None)
return positions, sizes
def create_duplicated():
df = pd.read_csv("simulated.bed", sep="\t", header=None)
positions = []
sizes = []
for i in range(80):
pos = random.randint(37000*i+4000, 37000*(i+1)-4000)
size = random.randint(1,8) * 1000
positions.append(pos)
df.loc[pos:pos+size,2] += 100 #duplicated
sizes.append(size)
df.to_csv("cnv_duplicated.bed", sep="\t", header=None, index=None)
return positions, sizes
def create_cnvs_mixed():
df = pd.read_csv("simulated.bed", sep="\t", header=None)
# we will place 10% of CNV of size from 1000 to 8000
import random
positions = []
sizes = []
for i in range(80):
pos = random.randint(37000*i+4000, 37000*(i+1)-4000)
size = random.randint(1,8) * 1000
positions.append(pos)
status = random.randint(0,1)
if status == 0:
df.loc[pos:pos+size,2] -= 50
elif status == 1:
df.loc[pos:pos+size,2] += 50
sizes.append(size)
df.to_csv("cnv_mixed.bed", sep="\t", header=None, index=None)
return positions, sizes
def check_found(positions, sizes, roi, precision=200, min_size=150):
A simple function to check given the position and size that
the injected CNVs are detected in the ROIs
We check that the starting or ending position of at least one
ROI coincide with one ROI and that this ROI has at least a length of 200.
Indeed, injections are at least 1000 bases and noise are generally below 100 bases
as shown above.
found = [False] * len(positions)
i = 0
zscores = []
for position,size in zip(positions, sizes):
for this in roi.iterrows():
this = this[1]
if (abs(this.start-position)<precision or abs(this.end-position-size)<precision )and this['size'] > min_size:
#print(this.start, this.end, position, size)
found[i] = True
zscores.append(this.mean_zscore)
continue
if found[i] is False:
print("position not found {} size={}".format(position, size))
i+=1
print("Found {}".format(sum(found)))
return zscores
# call this only once !!!!
positions_deletion, sizes_deletion = create_deletion()
!sequana_coverage --input cnv_deletion.bed -o -w 20001 --level WARNING
!cp report/*/*/rois.csv rois_cnv_deleted.csv
rois_deletion = plot_results("rois_cnv_deleted.csv")
# as precise as 2 base positions but for safety, we put precision of 10 and we can check that the detection rate is 100%
zscores = check_found(positions_deletion, sizes_deletion, rois_deletion,
precision=5)
positions_duplicated, sizes_duplicated = create_duplicated()
!sequana_coverage --input cnv_duplicated.bed -o -w 40001 --level ERROR -C .3 --no-html --no-multiqc
!cp report/*/*/rois.csv rois_cnv_duplicated_40001.csv
rois_duplicated = plot_results("rois_cnv_duplicated_40001.csv", choice="max")
rois_duplicated = plot_results("rois_cnv_duplicated_20000.csv", choice="max")
check_found(positions_duplicated, sizes_duplicated, rois_duplicated,
precision=5)
positions_mix, sizes_mix = create_cnvs_mixed()
!sequana_coverage --input cnv_mixed.bed -o -w 40001 --level ERROR --no-multiqc --no-html --cnv-clustering 1000
!cp report/*/*/rois.csv rois_cnv_mixed.csv
Image("coverage_with_cnvs.png")
rois_mixed = plot_results("rois_cnv_mixed.csv", choice="max")
# note that here we increase the precision to 100 bases. The positions
# are not as precise as in the duplication or deletion cases.
check_found(positions_mix, sizes_mix, rois_mixed, precision=20)
roi = plot_results("rois_noise_20001.csv")
what is happening here is that we detect many events close to the threshold.
So for instance all short events on the left hand side have z-score close to 4,
which is our threshold.
By pure chance, we get longer events of 40 or 50bp. This is quite surprinsing and wanted to know whether those
are real false positives or due to a genuine feature in the genome (e.g. repeated regions that prevent a good mapping)
What is not shown in this plot is the position of the event. We can simulate the same data again (different seed).
If those long events appear at the same place, they ca be considered as genuine, otherwise, they should be considered
as potential background appearing just by chance.
so, we generated 50 simulated data set and reproduce the image above. We store the data in 50_rois.csv
from easydev import execute as shell
def create_data(start=0,end=10):
for i in range(start, end):
print("---------------- {}".format(i))
cmd = "art_illumina -sam -i FN433596.fa -p -l 100 -ss HS20 -f 20 -m 500 -s 40 -o paired_dat -f 100"
shell(cmd)
cmd = "rm -f paired*.aln paired_dat.sam"
shell(cmd)
cmd = "sequana_mapping --reference FN433596.fa --file1 paired_dat1.fq --file2 paired_dat2.fq 1>out 2>err"
shell(cmd)
cmd = "bedtools genomecov -d -ibam FN433596.fa.sorted.bam > simulated.bed"
shell(cmd)
cmd = "sequana_coverage --input simulated.bed --reference FN433596.fa -w 20001 -o --no-html --no-multiqc"
shell(cmd)
cmd = "cp report/*/*/rois.csv rois_{}.csv".format(i)
shell(cmd)
#create_data(0,50)
import pandas as pd
rois = pd.read_csv("50_simulated_rois.csv")
rois = rois.query("start>100 and end <3043210")
roi = plot_results("50_simulated_rois.csv", choice="max")
roi = plot_results("100_simulated_rois.csv", choice="mean")
roi = plot_results("100_simulated_rois.csv", choice="max")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulated FastQ data
Step2: Creating the BAM (mapping) and BED files
Step3: This uses bwa and samtools behind the scene. Then, we will convert the resulting BAM file (FN433596.fasta.sorted.bam) into a BED file once for all. To do so, we use bioconvert (http
Step4: sequana_coverage
Step5: The false positives
Step7: Most of the detected events have a zscore close to the chosen thresholds (-4 and 4). Moreover,
Step8: Deleted regions are all detected
Step9: duplicated regions
Step10: Same results with W=20000,40000,60000,100000 but recovered CN is better
Step11: Note that you may see events with negative zscore. Those are false detection due to the presence of two CNVs close to each other. This can be avoided by increasing the window size e.g. to 40000
Step12: Mixes of duplicated and deleted regions
Step13: Some events (about 1%) may be labelled as not found but visual inspection will show that there are actually detected. This is due to a starting position being offset due to noise data set that interfer with the injected CNVs.
Step14: With 50 simulations, we get 826 events. (100 are removed because on the edge of the origin of replication), which means about 16 events per simulation. The max length is 90.
|
3,437
|
<ASSISTANT_TASK:>
Python Code:
try:
n = int(input("Enter n: "))
if n > 0:
q = 1
elif n == 0:
q = 2
else:
q = 3
except:
q = 4
n = int(input("Enter n: "))
if n > 0:
q = 1
if n >10:
q = 2
if n > 20:
q = 3
else:
q = 4
a = 10
b = "15"
c = 10.5
w = a + c
x = a + 1.0
#y = a + b
z = float(b) + c
"10" + 10
a = 10
b = "15"
c = 10.5
w = 1.5 < b
x = a + 5
y = int(c) + int(b)
z = int(b) + c
# input(). random.randint(), and int() are all functions!
import random
x = input("Enter Name: ")
y = random.randint(1,10) #random is the module, randint() is the function
z = int("9")
import random
x = random.randint(1,10)
print(x)
import random
dir(random)
help(random.randint)
# arguments are input into the function
# return is the output
def my_function_name(input_arguments):
statements-in-function
return output_expression
b = "a"
c = "t"
def x(a):
c = "d"
return a+b+c
print(x("p"))
print(c)
def myfunc(y):
x = 1
z = x + y + a
return z
a = 2
b = myfunc(2)
print(b)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: a. 1
Step2: a. 1
Step3: a. 4
Step4: a. 4
Step5: Functions, continued
Step6: dir() and help() built-in functions
Step7: Watch Me Code 1
Step8: Watch Me Code 3
Step9: IPython display and interact
|
3,438
|
<ASSISTANT_TASK:>
Python Code:
from IPython import display
display.Image('img/simple.jpg', width=400)
import random
import math
import numpy
from string import Template
from deap import base
from deap import creator
from deap import tools
from deap import algorithms
from lily_template import TEMPLATE
# Global Variables
OPTIONS_M = ((0,-3,5), (0,-3,5), (0,-4,5), (0,-3,6), (0,-3,5), (0,-4,5), (0,-4,5))
OPTIONS_m = ((0,-4,5), (0,-4,5), (0,-3,5), (0,-3,5), (0,-4,5), (0,-3,6), (0,5))
MOD_M = ('M','m','m','M','M','m','d')
MOD_m = ('m','d','M','m','M','M','M')
display.Image('img/ex_prog.jpg', width=400)
def setTon(line):
Return the tonality of the exercise and the bass notes of it
ton = line[:2]
notes = list(map(int, line[3:].split(' ')))
if ton[1] == '#':
ton = (int(ton[0])*7)%12
else:
ton = (int(ton[0])*5)%12
for note in notes:
if (ton+6)%12 == note%12:
ton = str((ton-3)%12)+'m'
break
else:
if ton-3 == notes[-1]%12:
ton = str((ton-3)%12)+'m'
else:
ton = str(ton)+'M'
return ton, notes
display.Image('img/all_same.jpg', width=400)
def creatChord(nameC, noteF):
Create one chord given the name of the chord and the fundamental note
num_funda = int(nameC[:-1])
if nameC[-1] == 'M':
val_notes = [num_funda, (num_funda+4)%12, (num_funda+7)%12]
elif nameC[-1] == 'm':
val_notes = [num_funda, (num_funda+3)%12, (num_funda+7)%12]
elif nameC[-1] == 'd':
val_notes = [num_funda, (num_funda+3)%12, (num_funda+6)%12]
# Tessitura of each voice
tenorR = list(range(48, 69))
contR = list(range(52, 77))
sopR = list(range(60, 86))
# Depending in the bass note this are the options for the others voices
if noteF%12 == val_notes[0]:
opc = [[1,1,1], [2,1,0], [0,1,2]]
elif noteF%12 == val_notes[1]:
opc = [[1,0,2], [3,0,0], [2,0,1]]
elif noteF%12 == val_notes[2]:
opc = [[1,1,1], [2,1,0]]
opc = random.choice(opc)
chordN = list()
for num, val in zip(opc, val_notes):
chordN += [val]*num
random.shuffle(chordN)
chord = [noteF,]
for nte, voce in zip(chordN, [tenorR, contR, sopR]):
posible_n = [x for x in voce if x%12 == nte]
chord.append(random.choice(posible_n))
return chord
def selChord(ton, notesBass):
Select the chords from all the posibilities
listaOp = OPTIONS_M if ton[-1] == 'M' else OPTIONS_m
listaMod = MOD_M if ton[-1] == 'M' else MOD_m
prog = list()
for note in notesBass:
name = note%12
grad = name-int(ton[:-1])
grad = math.ceil(((grad+12)%12) / 2)
num = (random.choice(listaOp[grad]) + name +12) % 12
grad = num-int(ton[:-1])
grad = math.ceil(((grad+12)%12) / 2)
name = '{}{}'.format(num, listaMod[grad])
prog.append([creatChord(name, note), grad])
return prog
def newChordProg(ton, notes):
Create a new individual given the tonality and the base notes
chords = selChord(ton, notes)
for c in chords:
yield c
def check_interval(chord):
Return the number of mistakes in the distance between the notes.
res = 0
if chord[2] - chord[1] > 12 or chord[2]-chord[1] < 0:
res += 15
if chord[3] - chord[2] > 12 or chord[3]-chord[2] < 0:
res += 15
if chord[1] == chord[2] or chord[2] == chord[3]:
res += 1.4
return res
def check_2_chords(ch1, ch2):
Return the number of mistakes in the intervals between 2 chords.
res = 0
# Check for 5° and 8°
ite1 = map(lambda x,y: y-x, ch1[:-1], ch1[1:])
ite2 = map(lambda x,y: y-x, ch2[:-1], ch2[1:])
for inter1, inter2 in zip(ite1, ite2):
if inter1 == 7 and inter2 == 7:
res += 15
elif inter1 == 0 and inter2 == 0:
res += 15
elif inter1 == 12 and inter2 == 12:
res += 15
# Check for big intervals, just to make it more "human"
for note1, note2 in zip(ch1[1:], ch2[1:]):
if abs(note1-note2) >= 7: # 7 equals 5° interval
res += .7
return res
def neighborhood(iterable):
Generator gives the prev actual and next.
iterator = iter(iterable)
prev = None
item = next(iterator) # throws StopIteration if empty.
for nex in iterator:
yield (prev,item,nex)
prev = item
item = nex
yield (prev,item,None)
def evalNumErr(ton, individual):
Evaluation function.
res = 0
for prev, item, nex in neighborhood(individual):
res += check_interval(item[0])
if prev == None:
if item[1] != 0:
res += 6
continue
else:
if prev[1] in [4, 6] and item[1] in [3, 1]:
res += 20
res += check_2_chords(prev[0], item[0])
if nex == None:
if item[1] in [1, 2, 3, 4, 5, 6]:
res += 6
return (res,)
def mutChangeNotes(ton, individual, indpb):
Mutant function.
new_ind = toolbox.clone(individual)
for x in range(len(individual[0])):
if random.random() < indpb:
listaOp = OPTIONS_M if ton[-1] == 'M' else OPTIONS_m
listaMod = MOD_M if ton[-1] == 'M' else MOD_m
note = individual[x][0][0]
name = note%12
grad = name-int(ton[:-1])
grad = math.ceil(((grad+12)%12) / 2)
num = (random.choice(listaOp[grad]) + name +12) % 12
grad = num-int(ton[:-1])
grad = math.ceil(((grad+12)%12) / 2)
name = '{}{}'.format(num, listaMod[grad])
new_ind[x] = [creatChord(name, note), grad]
del new_ind.fitness.values
return new_ind,
def transform_lilypond(ton, indiv, make_file=False):
Take one list of chords and print the it in lilypond notation.
note_map = dict()
if ton[-1] == 'M':
note_map = {0: 'c',
1: 'cis',
2: 'd',
3: 'dis',
4: 'e',
5: 'f',
6: 'fis',
7: 'g',
8: 'gis',
9: 'a',
10:'ais',
11:'b'
}
gra = 'major'
else:
note_map = {0: 'c',
1: 'des',
2: 'd',
3: 'ees',
4: 'e',
5: 'f',
6: 'ges',
7: 'g',
8: 'aes',
9: 'a',
10:'bes',
11:'b'
}
gra = 'minor'
voces = [[], [], [], []]
for chord in indiv:
for note, voce in zip(chord, voces):
octave = (note // 12)-4
name_lily = note_map[note % 12]
if octave < 0:
name_lily += ',' * (octave * -1)
elif octave > 0:
name_lily += "'" * octave
voce.append(name_lily)
if make_file:
with open('lily/'+ton+'.ly', 'w') as f:
key_map = {'0': 'c',
'1': 'des',
'2': 'd',
'3': 'ees',
'4': 'e',
'5': 'f',
'6': 'ges',
'7': 'g',
'8': 'aes',
'9': 'a',
'10':'bes',
'11':'b'
}
print(ton)
f.write(Template(TEMPLATE).substitute(key=key_map[ton[:-1]], grade=gra, notes='{}|\n{}|\n{}|\n{}|\n'.format(*(' '.join(voce) for voce in reversed(voces)))))
print('{}|\n{}|\n{}|\n{}|\n'.format(*(' '.join(voce) for voce in reversed(voces))))
def main(ton):
pop = toolbox.population(n=400)
hof = tools.HallOfFame(3)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register('avg', numpy.mean)
stats.register('std', numpy.std)
stats.register('min', numpy.min)
stats.register('max', numpy.max)
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.3, ngen=70, stats=stats, halloffame=hof, verbose=True)
while min(log.select('min')) > 15:
pop = toolbox.population(n=400)
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=0.5, mutpb=0.3, ngen=70, stats=stats, halloffame=hof, verbose=True)
for best in hof:
print([x[0] for x in best], end='\n============\n')
transform_lilypond(ton, [x[0] for x in hof[0]], make_file=True)
if __name__ == '__main__':
line = input('n[#b] notas ')
ton, notes = setTon(line)
print(ton, notes)
# ========================= GA setup =========================
creator.create('FitnessMin', base.Fitness, weights=(-1.0,))
creator.create('Individual', list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register('creat_notes', newChordProg, ton, notes)
toolbox.register('individual', tools.initIterate, creator.Individual,
toolbox.creat_notes)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
toolbox.register('evaluate', evalNumErr, ton)
toolbox.register('mate', tools.cxOnePoint)
toolbox.register('mutate', mutChangeNotes, ton, indpb=0.4)
toolbox.register('select', tools.selTournament, tournsize=3)
# =============================================================
main(ton)
import os
os.system('python auto_trim.py {} {}'.format('lily/'+ton+'.ly', 'temp.jpg'))
display.Image('img/temp.jpg', width=600)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Will be rempresented as
Step2: The file lily_template has the template to create the lilypond file and to give it format I used the string.Template class.
Step3: Initialization
Step5: This individual would be represented as
Step7: The function returns a tuple with the tonality represented as a number from 0 to 11 (C to B) and a 'M' if the tonality is major or 'm' if it's minor.
Step9: After this function, I only need to select a chord for each of the notes in the bass. This process use the midi representation of the notes and making arithmetic operation with them and with a random choice between the possible options. At the end we have a complete chord progression.
Step11: Now, DEAP requires a generator to create the individuals, so I just create a simple generator that yields each chord of the progression.
Step13: Selection
Step15: Also between two chords, you have to avoid the consecutive fifths and octaves, and a normal person tends to make the intervals in a voice more "natural" making jumps not bigger than a fifth that often.
Step17: And for the evaluation of an individual I used this generator to access an element and the two neighbors
Step19: The actual evaluation function
Step21: Crossover
Step23: The next function is just to create a lilypond file using the chords in the individual to see a nice sheet.
Step24: In the main function is where the actual algorithm's running, it's a simple evolutionary algorithm with a hall of fame where the best individuals will be saved. The little while loop is just if you want to run multiple times the algorithm until an individual gets an evaluation lower than, in this case, 15.
Step25: And at the end set up all the functions and the form of the individual in the toolbox that way DEAP can use them in the algorithm.
Step26: And just to show the result I made a little script to trim the pdf that lilypond generate.
|
3,439
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import networkx as nx
K_5=nx.complete_graph(5)
nx.draw(K_5)
def complete_deg(n):
Return the integer valued degree matrix D for the complete graph K_n.
D = np.diag([n-1 for i in range(n)])
return D
#raise NotImplementedError()
D = complete_deg(5)
assert D.shape==(5,5)
assert D.dtype==np.dtype(int)
assert np.all(D.diagonal()==4*np.ones(5))
assert np.all(D-np.diag(D.diagonal())==np.zeros((5,5),dtype=int))
def complete_adj(n):
Return the integer valued adjacency matrix A for the complete graph K_n.
D = np.ones((n,n), dtype = int) - np.diag([1 for i in range(n)])
return D
#raise NotImplementedError()
A = complete_adj(5)
assert A.shape==(5,5)
assert A.dtype==np.dtype(int)
assert np.all(A+np.eye(5,dtype=int)==np.ones((5,5),dtype=int))
def spectrum(ns, printq = False, graph = False):
for i in range(len(ns)):
L = complete_deg(ns[i]) - complete_adj(ns[i])
eigs = np.linalg.eigvals(L)
if printq:
print("Complete Graph with %s vertices (eigenvalues):" % ns[i])
print(eigs)
print("\n")
if graph:
va.enable()
eigs
print("\n")
spectrum(range(3,11), True)
#raise NotImplementedError()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Complete graph Laplacian
Step3: The Laplacian Matrix is a matrix that is extremely important in graph theory and numerical analysis. It is defined as $L=D-A$. Where $D$ is the degree matrix and $A$ is the adjecency matrix. For the purpose of this problem you don't need to understand the details of these matrices, although their definitions are relatively simple.
Step5: The adjacency matrix for $K_n$ is an $n \times n$ matrix with zeros along the diagonal and ones everywhere else. Write a function to compute the adjacency matrix for $K_n$ using NumPy.
Step6: Use NumPy to explore the eigenvalues or spectrum of the Laplacian L of $K_n$. What patterns do you notice as $n$ changes? Create a conjecture about the general Laplace spectrum of $K_n$.
|
3,440
|
<ASSISTANT_TASK:>
Python Code:
from coherence import load_coherence_dataset
coherence_ds = load_coherence_dataset()
X = coherence_ds.data
y = coherence_ds.target
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=1)
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
param_range = [0.0001, 0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0]
param_grid = [# Parameters for Linear Kernel
{'clf__C': param_range,
'clf__kernel': ['linear']},
# Parameter for RBF Kernel
{'clf__C': param_range,
'clf__gamma': param_range,
'clf__kernel': ['rbf']},
# Parameter for Polynomial Kernel
{'clf__C': param_range,
'clf__degree': [2, 3, 5, 8, 16],
'clf__coef0': [0, 1],
'clf__kernel': ['poly']},
]
# So far, the `Pipeline` is not actually needed, but
# this code is ready in case multiple (features selection)
# steps would be considered for inclusion
pipe_svc = Pipeline([('clf', SVC(random_state=1))])
from sklearn.grid_search import ParameterGrid
hyperp_grid = list(ParameterGrid(param_grid))
# Pretty Printing Grid
kernel_combinations = dict()
for hp in hyperp_grid:
kernel = hp['clf__kernel']
kernel_combinations.setdefault(kernel, list())
kernel_combinations[kernel].append(hp)
for kernel in kernel_combinations:
print('Hyperparamters for Kernel: ', kernel.title())
for hp in kernel_combinations[kernel]:
print('\t', hp)
print('-'*80)
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import confusion_matrix
def classify_samples(pipe_svc, param_grid, score, scoring='accuracy', **extra_score_params):
print('-'*80)
print('PERFORMING MACHINE LEARNING ANALYSIS WITH SCORING METRIC: ', scoring)
# GridSearch CV
gs = GridSearchCV(estimator=pipe_svc,
param_grid=param_grid,
scoring=scoring,
cv=10, n_jobs=-1)
gs = gs.fit(X_train, y_train) # Fit the GridSearchCV Estimator
# Printing training results
print('Best Score: ', gs.best_score_)
print('Best Parameters: ', gs.best_params_)
#print('Grid Scores: ', gs.grid_scores_)
# Predict with Best Estimator
clf = gs.best_estimator_
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('Mean Accuracy on Test set: %.3f' % (clf.score(X_test, y_test)))
print('Calculating Score on Prediction: ', score(y_test, y_pred, **extra_score_params))
#Confusion Matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
scores = cross_val_score(gs, X, y, scoring=scoring, cv=10, n_jobs=-1)
print('All CV Scores')
for i, score in enumerate(scores):
print(i, '): ', score)
print('CV %s : %.3f +/- %.3f' % (scoring, np.mean(scores), np.std(scores)))
print('-'*80)
# Accuracy (replication)
from sklearn.metrics import accuracy_score
classify_samples(pipe_svc, param_grid, accuracy_score, scoring='accuracy')
# F1
from sklearn.metrics import f1_score
classify_samples(pipe_svc, param_grid, f1_score, scoring='f1', average='binary')
# F1 Macro-averaged
classify_samples(pipe_svc, param_grid, f1_score, scoring='f1', average='macro')
# F1 Weighted Average
classify_samples(pipe_svc, param_grid, f1_score, scoring='f1', average='weighted')
# ROC AUC Score
from sklearn.metrics import roc_auc_score
classify_samples(pipe_svc, param_grid, roc_auc_score, scoring='roc_auc', average='macro') # default
# ROC AUC Score - Weighted Average
classify_samples(pipe_svc, param_grid, roc_auc_score, scoring='roc_auc', average='weighted') # Roc AUC Weighted
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Train-Test Split
Step2: K-fold Cross Validation and HyperParameter Tuning
Step3: Setting HyperParameters to tune
Step4: Show HyperParameters Grid
Step6: Stratified K-Fold Cross Validation
|
3,441
|
<ASSISTANT_TASK:>
Python Code:
# import the standard numerical and plotting packages
import matplotlib.pyplot as plt
import numpy as np
from skimage.io import imread
# load the images
a = imread("B005_1.tif")
b = imread("B005_2.tif")
fig, axs = plt.subplots(1, 2, figsize=(9, 4))
axs[0].imshow(a, cmap=plt.cm.gray)
axs[1].imshow(b, cmap=plt.cm.gray)
plt.show()
win_size = 32
a_win = a[:win_size, :win_size].copy()
b_win = b[:win_size, :win_size].copy()
fig, axs = plt.subplots(1, 2, figsize=(9, 4))
axs[0].imshow(a_win, cmap=plt.cm.gray)
axs[1].imshow(b_win, cmap=plt.cm.gray)
plt.show()
fig = plt.imshow(b_win - a_win, cmap=plt.cm.gray)
plt.title("Without shift")
plt.show()
plt.imshow(b_win - np.roll(a_win, (1, 0), axis=(0, 1)), cmap=plt.cm.gray)
plt.title("Difference when A has been shifted by 1 pixel")
plt.show()
def match_template(img, template, maxroll=8):
best_dist = np.inf
best_shift = (-1, -1)
for y in range(maxroll):
for x in range(maxroll):
# calculate Euclidean distance
dist = np.sqrt(np.sum((img - np.roll(template, (y, x), axis=(0, 1))) ** 2))
if dist < best_dist:
best_dist = dist
best_shift = (y, x)
return (best_dist, best_shift)
# let's test that it works by manually rolling (shifting circurlarly) the same
# image
match_template(np.roll(a_win, (2, 0), axis=(0, 1)), a_win)
# indeed, when we find the correct shift, we got zero distance. it's not so in real images:
best_dist, best_shift = match_template(b_win, a_win)
print(f"{best_dist=}")
print(f"{best_shift=}")
fig, axs = plt.subplots(1, 2, figsize=(9, 4))
axs[0].imshow(np.roll(a_win, best_shift, axis=(0, 1)), cmap=plt.cm.gray)
axs[1].imshow(b_win, cmap=plt.cm.gray)
plt.show()
from scipy.signal import correlate
cross_corr = correlate(b_win - b_win.mean(), a_win - a_win.mean(), method="fft")
# Note that it's approximately twice as large than the original windows, as we
# can shift a_win by a maximum of it's size - 1 horizontally and vertically
# while still maintaining some overlap between the two windows.
print("Size of the correlation map: %d x %d" % cross_corr.shape)
# let's see what the cross-correlation looks like
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
Y, X = np.meshgrid(np.arange(cross_corr.shape[0]), np.arange(cross_corr.shape[1]))
ax.plot_surface(Y, X, cross_corr, cmap=plt.cm.jet, linewidth=0.2)
plt.title("Correlation map — peak is the most probable shift")
plt.show()
# let's see the same correlation map, from above
plt.imshow(cross_corr, cmap=plt.cm.gray)
y, x = np.unravel_index(cross_corr.argmax(), cross_corr.shape)
print(f"{y=}, {x=}")
plt.plot(x, y, "ro")
plt.show()
dy, dx = y - 31, x - 31
print(f"{dy=}, {dx=}")
def vel_field(curr_frame, next_frame, win_size):
ys = np.arange(0, curr_frame.shape[0], win_size)
xs = np.arange(0, curr_frame.shape[1], win_size)
dys = np.zeros((len(ys), len(xs)))
dxs = np.zeros((len(ys), len(xs)))
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
int_win = curr_frame[y : y + win_size, x : x + win_size]
search_win = next_frame[y : y + win_size, x : x + win_size]
cross_corr = correlate(
search_win - search_win.mean(), int_win - int_win.mean(), method="fft"
)
dys[iy, ix], dxs[iy, ix] = (
np.unravel_index(np.argmax(cross_corr), cross_corr.shape)
- np.array([win_size, win_size])
+ 1
)
# draw velocity vectors from the center of each window
ys = ys + win_size / 2
xs = xs + win_size / 2
return xs, ys, dxs, dys
xs, ys, dxs, dys = vel_field(a, b, 32)
norm_drs = np.sqrt(dxs ** 2 + dys ** 2)
fig, ax = plt.subplots(figsize=(6, 6))
# we need these flips on y since quiver uses a bottom-left origin, while our
# arrays use a top-right origin
ax.quiver(
xs,
ys[::-1],
dxs,
-dys,
norm_drs,
cmap=plt.cm.plasma,
angles="xy",
scale_units="xy",
scale=0.25,
)
ax.set_aspect("equal")
plt.show()
def vel_field_asymmetric_wins(
curr_frame, next_frame, half_int_win_size, half_search_win_size
):
ys = np.arange(half_int_win_size[0], curr_frame.shape[0], 2 * half_int_win_size[0])
xs = np.arange(half_int_win_size[1], curr_frame.shape[1], 2 * half_int_win_size[1])
dys = np.zeros((len(ys), len(xs)))
dxs = np.zeros((len(ys), len(xs)))
for iy, y in enumerate(ys):
for ix, x in enumerate(xs):
int_win = curr_frame[
y - half_int_win_size[0] : y + half_int_win_size[0],
x - half_int_win_size[1] : x + half_int_win_size[1],
]
search_win_y_min = y - half_search_win_size[0]
search_win_y_max = y + half_search_win_size[0]
search_win_x_min = x - half_search_win_size[1]
search_win_x_max = x + half_search_win_size[1]
truncated_search_win = next_frame[
max(0, search_win_y_min) : min(b.shape[0], search_win_y_max),
max(0, search_win_x_min) : min(b.shape[1], search_win_x_max),
]
cross_corr = correlate(
truncated_search_win - np.mean(truncated_search_win),
int_win - np.mean(int_win),
mode="valid",
method="fft",
)
dy, dx = np.unravel_index(np.argmax(cross_corr), cross_corr.shape)
# if the top of the search window got truncated, shift the origin
# up to the top edge of the (non-truncated) search window
if search_win_y_min < 0:
dy += -search_win_y_min
# if the left of the search window got truncated, shift the origin
# over to the left edge of the (non-truncated) search window
if search_win_x_min < 0:
dx += -search_win_x_min
# shift origin to the center of the search window
dy -= half_search_win_size[0] - half_int_win_size[0]
dx -= half_search_win_size[1] - half_int_win_size[1]
dys[iy, ix] = dy
dxs[iy, ix] = dx
return xs, ys, dxs, dys
int_win_size = np.array([32, 32])
print(f"{int_win_size=}")
assert np.all(np.array(a.shape) % int_win_size == 0)
assert np.all(int_win_size % 2 == 0)
half_int_win_size = int_win_size // 2
search_win_size = int_win_size * 2
print(f"{search_win_size=}")
assert np.all(search_win_size % 2 == 0)
half_search_win_size = search_win_size // 2
assert np.all(search_win_size > int_win_size)
print(
"max velocity that can be detected with these window sizes: "
+ f"{half_search_win_size - half_int_win_size}"
)
xs_asym, ys_asym, dxs_asym, dys_asym = vel_field_asymmetric_wins(
a, b, half_int_win_size, half_search_win_size
)
norm_drs_asym = np.sqrt(dxs_asym ** 2 + dys_asym ** 2)
fig, axs = plt.subplots(1, 2, figsize=(12, 6))
axs[0].quiver(
xs,
ys[::-1],
dxs,
-dys,
norm_drs,
cmap=plt.cm.plasma,
angles="xy",
scale_units="xy",
scale=0.25,
)
axs[1].quiver(
xs_asym,
ys_asym[::-1],
dxs_asym,
-dys_asym,
norm_drs_asym,
cmap=plt.cm.plasma,
angles="xy",
scale_units="xy",
scale=0.25,
)
axs[0].set_title(
f"{win_size} x {win_size} int. win. + "
f"{win_size} x {win_size} 0-padded search win."
)
axs[1].set_title(
f"{int_win_size[0]} x {int_win_size[1]} int. win. + "
f"{search_win_size[0]} x {search_win_size[0]} unpadded search win."
)
ax.set_aspect("equal")
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We have downloaded some sample images from PIV challenge,
Step2: The two images show the positions of the particles at two different times. We can analyze small regions of interest, called interrogation windows. Typically we can start with a size of 32 x 32 pixels or smaller. Until recently, the fast algorithms used powers of 2, so the historical sizes are always powers of 2
Step3: We can see that the bright pixels moved between the two frames. We can find out the distance that all the particles moved between frame A and frame B using the principles of least squares or correlations, but let's first try to get it manually.
Step4: Let's try to find the best shift algorithmically
Step5: We can draw this as a vector of velocity
Step6: Well, maybe it's not the best match, but it is already better than nothing.
Step7: The image of the correlation map shows the same result that we got manually looping. We need to shift a_win to give the best possible correlation between the two windows. If there best correlation would come from no shift, the result would be (31, 31)—the center of symmetry.
Step8: We can get the first velocity field by repeating this analysis for all small windows. Let's take 32 x 32 pixels windows from each image and do the loop
Step9: If you've followed along this far, great! Now you understand the basics.
Step10: Making the search window larger compared to the interrogation window would allow for larger velocities to be detected.
|
3,442
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Importando um módulo em Python
import math
# Verificando todos os métodos disponíveis no módulo
dir(math)
# Usando um dos métodos do módulo math
math.sqrt(25)
# Importando apenas um dos métodos do módulo math
from math import sqrt
# Usando o método
sqrt(9)
# Imprimindo todos os métodos do módulo math
print(dir(math))
# Help do método sqrt do módulo math
help(sqrt)
import random
random.choice(['Maça', 'Banana', 'Laranja'])
random.sample(range(100), 10)
import statistics
dados = [2.75, 1.75, 1.25, 0.25, 0.5, 1.25, 3.5]
statistics.mean(dados)
statistics.median(dados)
import os
os.getcwd()
print(dir(os))
import sys
sys.stdout.write('Teste')
sys.version
print(dir(sys))
# Importando o módulo request do pacote urllib, usado para trazer url's
# para dentro do nosso ambiente Python
import urllib.request
# Variável resposta armazena o objeto de conexão à url passada como
# parâmetro
resposta = urllib.request.urlopen('http://python.org')
# Objeto resposta
print(resposta)
# Chamando o método read() do objeto resposta e armazenando o código
# html na variável html
html = resposta.read()
# Imprimindo html
print(html)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Módulos e Pacotes
|
3,443
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append('..')
import socnet as sn
sn.graph_width = 320
sn.graph_height = 180
g = sn.load_graph('4-dijkstra.gml', has_pos=True)
for n, m in g.edges():
g.edge[n][m]['label'] = g.edge[n][m]['c']
sn.show_graph(g, elab=True)
class Heap(object):
def __init__(self, g):
self.g = g
self.h = []
self.indices = {}
def _parent(self, i):
return (i - 1) // 2
def _left(self, i):
return 2 * i + 1
def _right(self, i):
return 2 * i + 2
def _key(self, i):
return self.g.node[self.h[i]]['d']
def _swap(self, i, j):
self.h[i], self.h[j] = self.h[j], self.h[i]
self.indices[self.h[i]] = i
self.indices[self.h[j]] = j
def empty(self):
return len(self.h) == 0
def fix(self, n):
i = self.indices[n]
p = self._parent(i)
while i > 0 and self._key(p) > self._key(i):
self._swap(i, p)
i = p
p = self._parent(i)
def put(self, n):
self.indices[n] = len(self.h)
self.h.append(n)
self.fix(n)
def get(self):
n = self.h[0]
self._swap(0, len(self.h) - 1)
del self.h[-1]
del self.indices[n]
i = 0
while True:
l = self._left(i)
r = self._right(i)
c = i
if l < len(self.h) and self._key(l) < self._key(c):
c = l
if r < len(self.h) and self._key(r) < self._key(c):
c = r
if i == c:
break
self._swap(i, c)
i = c
return n
# sua resposta
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A seguir, vamos configurar as propriedades visuais
Step2: Por fim, vamos carregar e visualizar um grafo
Step3: Algoritmo de Dijkstra
Step4: Exercício
|
3,444
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import matplotlib.colors as colors
filename = '../facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data.info()
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00','#1B4F72',
'#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS','WS', 'D','PS', 'BS']
facies_counts = training_data['Facies'].value_counts().sort_index()
facies_counts.index = facies_labels
facies_counts.plot(kind='bar',color=facies_colors,title='Distribution of Training Data by Facies')
sns.heatmap(training_data.corr(), vmax=1.0, square=True)
training_data.describe()
import xgboost as xgb
import numpy as np
from sklearn.metrics import confusion_matrix, f1_score
from classification_utilities import display_cm, display_adj_cm
from sklearn.model_selection import GridSearchCV
X_train = training_data.drop(['Facies', 'Well Name','Formation','Depth'], axis = 1 )
Y_train = training_data['Facies' ] - 1
dtrain = xgb.DMatrix(X_train, Y_train)
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
acc = total_correct/sum(sum(conf))
return acc
adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]])
def accuracy_adjacent(conf, adjacent_facies):
nb_classes = conf.shape[0]
total_correct = 0.
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
for j in adjacent_facies[i]:
total_correct += conf[i][j]
return total_correct / sum(sum(conf))
# Proposed Initial Model
xgb1 = xgb.XGBClassifier( learning_rate =0.1, n_estimators=200, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6,
colsample_bytree=0.6, reg_alpha=0, reg_lambda=1, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
#Fit the algorithm on the data
xgb1.fit(X_train, Y_train,eval_metric='merror')
#Predict training set:
predictions = xgb1.predict(X_train)
#Print model report
# Confusion Matrix
conf = confusion_matrix(Y_train, predictions)
# Print Results
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
print ("\nConfusion Matrix")
display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True)
# Print Feature Importance
feat_imp = pd.Series(xgb1.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# Cross Validation parameters
cv_folds = 10
rounds = 100
xgb_param_1 = xgb1.get_xgb_params()
xgb_param_1['num_class'] = 9
# Perform cross-validation
cvresult1 = xgb.cv(xgb_param_1, dtrain, num_boost_round=xgb_param_1['n_estimators'],
stratified = True, nfold=cv_folds, metrics='merror', early_stopping_rounds=rounds)
print ("\nCross Validation Training Report Summary")
print (cvresult1.head())
print (cvresult1.tail())
print("Parameter optimization")
grid_search1 = GridSearchCV(xgb1,{'learning_rate':[0.05,0.01,0.1,0.2] , 'n_estimators':[200,400,600,800]},
scoring='accuracy' , n_jobs = 4)
grid_search1.fit(X_train,Y_train)
print("Best Set of Parameters")
grid_search1.grid_scores_, grid_search1.best_params_, grid_search1.best_score_
# Proposed Model with optimized learning rate and number of boosted trees to fit
xgb2 = xgb.XGBClassifier( learning_rate =0.01, n_estimators=400, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6,
colsample_bytree=0.6, reg_alpha=0, reg_lambda=1, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
#Fit the algorithm on the data
xgb2.fit(X_train, Y_train,eval_metric='merror')
#Predict training set:
predictions = xgb2.predict(X_train)
#Print model report
# Confusion Matrix
conf = confusion_matrix(Y_train, predictions )
# Print Results
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
# Confusion Matrix
print ("\nConfusion Matrix")
display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True)
# Print Feature Importance
feat_imp = pd.Series(xgb2.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# Cross Validation parameters
cv_folds = 10
rounds = 100
xgb_param_2 = xgb2.get_xgb_params()
xgb_param_2['num_class'] = 9
# Perform cross-validation
cvresult2 = xgb.cv(xgb_param_2, dtrain, num_boost_round=xgb_param_2['n_estimators'],
stratified = True, nfold=cv_folds, metrics='merror', early_stopping_rounds=rounds)
print ("\nCross Validation Training Report Summary")
print (cvresult2.head())
print (cvresult2.tail())
print("Parameter optimization")
grid_search2 = GridSearchCV(xgb2,{'reg_alpha':[0, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10], 'reg_lambda':[0, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10] },
scoring='accuracy' , n_jobs = 4)
grid_search2.fit(X_train,Y_train)
print("Best Set of Parameters")
grid_search2.grid_scores_, grid_search2.best_params_, grid_search2.best_score_
# Proposed Model with optimized regularization
xgb3 = xgb.XGBClassifier( learning_rate =0.01, n_estimators=400, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6,
colsample_bytree=0.6, reg_alpha=0.1, reg_lambda=0.5, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
#Fit the algorithm on the data
xgb3.fit(X_train, Y_train,eval_metric='merror')
#Predict training set:
predictions = xgb3.predict(X_train)
#Print model report
# Confusion Matrix
conf = confusion_matrix(Y_train, predictions )
# Print Results
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
# Confusion Matrix
print ("\nConfusion Matrix")
display_cm(conf, facies_labels, display_metrics=True, hide_zeros=True)
# Print Feature Importance
feat_imp = pd.Series(xgb3.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
print("Parameter optimization")
grid_search3 = GridSearchCV(xgb3,{'max_depth':[2, 5, 8], 'gamma':[0, 1], 'subsample':[0.4, 0.6, 0.8],'colsample_bytree':[0.4, 0.6, 0.8] },
scoring='accuracy' , n_jobs = 4)
grid_search3.fit(X_train,Y_train)
print("Best Set of Parameters")
grid_search3.grid_scores_, grid_search3.best_params_, grid_search3.best_score_
# Load data
filename = '../facies_vectors.csv'
data = pd.read_csv(filename)
# Change to category data type
data['Well Name'] = data['Well Name'].astype('category')
data['Formation'] = data['Formation'].astype('category')
# Leave one well out for cross validation
well_names = data['Well Name'].unique()
f1=[]
for i in range(len(well_names)):
# Split data for training and testing
X_train = data.drop(['Facies', 'Formation','Depth'], axis = 1 )
Y_train = data['Facies' ] - 1
train_X = X_train[X_train['Well Name'] != well_names[i] ]
train_Y = Y_train[X_train['Well Name'] != well_names[i] ]
test_X = X_train[X_train['Well Name'] == well_names[i] ]
test_Y = Y_train[X_train['Well Name'] == well_names[i] ]
train_X = train_X.drop(['Well Name'], axis = 1 )
test_X = test_X.drop(['Well Name'], axis = 1 )
# Final recommended model based on the extensive parameters search
model_final = xgb.XGBClassifier( learning_rate =0.01, n_estimators=400, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.6, reg_alpha=0.1, reg_lambda=0.5,
colsample_bytree=0.6, objective='multi:softmax',
nthread=4, scale_pos_weight=1, seed=100)
# Train the model based on training data
model_final.fit( train_X , train_Y , eval_metric = 'merror' )
# Predict on the test set
predictions = model_final.predict(test_X)
# Print report
print ("\n------------------------------------------------------")
print ("Validation on the leaving out well " + well_names[i])
conf = confusion_matrix( test_Y, predictions, labels = np.arange(9) )
print ("\nModel Report")
print ("-Accuracy: %.6f" % ( accuracy(conf) ))
print ("-Adjacent Accuracy: %.6f" % ( accuracy_adjacent(conf, adjacent_facies) ))
print ("-F1 Score: %.6f" % ( f1_score ( test_Y , predictions , labels = np.arange(9), average = 'weighted' ) ))
f1.append(f1_score ( test_Y , predictions , labels = np.arange(9), average = 'weighted' ))
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
print ("\nConfusion Matrix Results")
from classification_utilities import display_cm, display_adj_cm
display_cm(conf, facies_labels,display_metrics=True, hide_zeros=True)
print ("\n------------------------------------------------------")
print ("Final Results")
print ("-Average F1 Score: %6f" % (sum(f1)/(1.0*len(f1))))
# Load test data
test_data = pd.read_csv('../validation_data_nofacies.csv')
test_data['Well Name'] = test_data['Well Name'].astype('category')
X_test = test_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
# Predict facies of unclassified data
Y_predicted = model_final.predict(X_test)
test_data['Facies'] = Y_predicted + 1
# Store the prediction
test_data.to_csv('Prediction1.csv')
test_data
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Preparation and Model Selection
Step2: The accuracy function and accuracy_adjacent function are defined in teh following to quatify the prediction correctness.
Step3: Initial model
Step4: The typical range for learning rate is around 0.01~0.2, so we vary ther learning rate a bit and at the same time, scan over the number of boosted trees to fit. This will take a little bit of time to finish.
Step5: It seems that we need to adjust the learning rate and make it smaller, which could help to reduce overfitting in my opinion. The number of boosted trees to fit also requires to be updated.
|
3,445
|
<ASSISTANT_TASK:>
Python Code:
#INPUT
#folder = 'sample' # create this folder and place the exported file
folder = 'all'
#export_filename = 'English_Vocabulary_Sample.csv'
export_filename = 'English_Vocabulary3.csv'
Generate a dictionary from Anki export file.
Note:
Only GRE words have proper Serial No. Rest words have 99999 as Serial No.
word_db = []
with open(folder + os.sep + export_filename, mode='r') as infile:
reader = csv.reader(infile)
Serial_No = 1
for line in reader:
row = {}
row['Word'] = line[0]
row['Word audio'] = line[1]
aud_files = []
for i in range(len(row['Word audio'])):
if (row['Word audio'][i]==':'):
aud_file = ""
for j in range(i+1, len(row['Word audio'])):
if (row['Word audio'][j]==']'):
break
aud_file += row['Word audio'][j]
aud_files.append(aud_file.strip(' '))
row['Word audio2'] = aud_files # list of Word audio files
row['POS'] = line[2]
row['Other form'] = line[3]
row['Meaning'] = line[4]
row['Example'] = line[5]
row['Picture'] = line[6]
row['Picture'] = row['Picture']
img_files = []
for i in range(len(row['Picture'])):
if (row['Picture'][i]=='='):
img_file = ""
for j in range(i+1, len(row['Picture'])):
if (row['Picture'][j]=='/'):
break
img_file += row['Picture'][j]
img_files.append(img_file.strip(' '))
row['Picture2'] = img_files # list of images files
row['Mnemonic'] = line[7]
row['Synonym'] = line[8]
row['Antonym'] = line[9]
row['Note'] = line[10]
#row['Note'] = row['Note'].replace('"', '')
img_files = []
for i in range(len(row['Note'])):
if (row['Note'][i]=='='):
img_file = ""
for j in range(i+1, len(row['Note'])):
if (row['Note'][j]=='/'):
break
img_file += row['Note'][j]
img_files.append(img_file.strip(' '))
row['Note Image'] = img_files # list of images files in Note
row['Multimedia Example'] = line[11]
mul_files = []
for i in range(len(row['Multimedia Example'])):
if (row['Multimedia Example'][i]==':'):
mul_file = ""
for j in range(i+1, len(row['Multimedia Example'])):
if (row['Multimedia Example'][j]==']'):
break
mul_file += row['Multimedia Example'][j]
mul_files.append(mul_file.strip(' '))
row['Multimedia Example2'] = mul_files # list of Multimedia Example files
row['Tags'] = line[12]
row['Tags2'] = row['Tags'].split(' ') # list of tags
if 'EV_GRE' in row['Tags2']:
row['Word No'] = Serial_No
Serial_No += 1
else:
row['Word No'] = 99999 # Serial No for non GRE words
word_db.append(row)
Writes out the generated word_db
with open(folder + os.sep + 'word_db.csv','wb') as f:
w = csv.writer(f)
w.writerow(word_db[0].keys()) # keys in top row
for i in word_db:
w.writerow(i.values())
Updating word_db by Google Cloud Storage links
https://storage.googleapis.com/staging.my-first-cloud-app-gtatiya.appspot.com/FlashVocab/{filename}
for i in range(len(word_db)):
links = []
for IMA_file in word_db[i]['Picture2']:
link = "https://storage.googleapis.com/staging.my-first-cloud-app-gtatiya.appspot.com/FlashVocab/"+IMA_file
links.append(link)
word_db[i]['Picture2 GCS links'] = links
Writes out the generated word_db
with open(folder + os.sep + 'word_db_links_GCS.csv','wb') as f:
w = csv.writer(f)
w.writerow(word_db[0].keys()) # keys in top row
for i in word_db:
w.writerow(i.values())
This function sorts the dictionary based on keys and writes a CSV file
def sort_dict_column(filename, dict_db):
with open(filename,'wb') as f:
w = csv.writer(f)
temp = sorted(dict_db[0].keys())
w.writerow(temp)
for i in dict_db:
i = collections.OrderedDict(sorted(i.items()))
w.writerow((i.values()))
#sort_dict_column(folder + os.sep + 'word_db_links_export_sorted.csv', word_db)
Generate a dictionary from word_db for GRE words based on tags
word_gre_db = []
for i in word_db:
if 'EV_GRE' in i['Tags2']:
word_gre_db.append(i)
Writes out the generated word_gre_db
word_db_links_export_gre.csv
word_db_links_GCP_gre.csv
with open(folder + os.sep + 'word_db_links_GCP_gre.csv','wb') as f:
w = csv.writer(f)
w.writerow(word_gre_db[0].keys()) # keys in top row
for i in word_gre_db:
w.writerow(i.values())
Getting word_db ready for FlashVocab SQLlite export
https://stackoverflow.com/questions/46835197/python-list-of-dictionary-stores-only-last-appended-value-in-every-iteration
words_tem = copy.deepcopy(word_gre_db)
#words_tem = copy.deepcopy(word_db)
word_db2 = []
card_key = 1
for i in words_tem:
del i['Mnemonic'] # delete Mnemonic because it has '"' character
del i['Note'] # I am not going to use Note
#del i['Note Image export links'] # I am not going to use Note
del i['Other form'] # I am not going to use Other form
del i['Picture'] # I am not going to use Picture
del i['Word audio2'] # I am not going to use
del i['Tags'] # I am not going to use
del i['Tags2'] # I am not going to use
del i['Picture2'] # I am not going to use
del i['Multimedia Example'] # I am not going to use
#del i['Multimedia Example2 links'] # I am not going to use
#del i['Note Image links'] # I am not going to use
#del i['Word audio2 export links'] # I am not going to use
#del i['Picture2 links'] # I am not going to use
#del i['Multimedia Example2 export links'] # I am not going to use
del i['Note Image'] # I am not going to use
del i['Word audio'] # I am not going to use
#del i['Word audio2 links'] # I am not going to use
del i['Multimedia Example2'] # I am not going to use
#del i['Picture2 export links']
for j in range(1, 4):
#i = dict(i) # shallow copy
i = copy.deepcopy(i)
i['Card Type'] = 'Type '+str(j)
i['Card Key'] = card_key
i['Schedule Score'] = 0
word_db2.append(i)
card_key += 1
Writes out the generated word_db2
word_db2_links_GCS_sorted.csv
word_db2_links_GCS_gre_sorted.csv
sort_dict_column(folder + os.sep + 'word_db2_links_GCS_gre_sorted.csv', word_db2)
#sort_dict_column(folder + os.sep + 'word_db2_links_GCS_gre_sorted.csv', word_db2)
This it to be used in word_db.txt in MyWordDataDisplay project
words_tem = copy.deepcopy(word_db2)
java_commands = open('java_commands.txt', 'w')
java_commands.close()
for i in words_tem:
java_commands = open('java_commands.txt', 'a') # append to the file created
i = collections.OrderedDict(sorted(i.items()))
for k, v in i.items():
java_commands.write(str(v)+'\n')
java_commands.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: CSV export file manipulations
Step5: Copying media files to folders
Step7: Sort the columns
Step13: GRE words only
|
3,446
|
<ASSISTANT_TASK:>
Python Code:
import logging
from conf import LisaLogging
LisaLogging.setup()
# Execute this cell to enable verbose SSH commands
logging.getLogger('ssh').setLevel(logging.DEBUG)
# Other python modules required by this notebook
import json
import os
# Setup a target configuration
conf = {
# Target is localhost
"platform" : 'linux',
"board" : "juno",
# Login credentials
"host" : "192.168.0.1",
"username" : "root",
"password" : "",
# Binary tools required to run this experiment
# These tools must be present in the tools/ folder for the architecture
"tools" : ['rt-app', 'taskset', 'trace-cmd'],
# Comment the following line to force rt-app calibration on your target
# "rtapp-calib" : {
# "0": 355, "1": 138, "2": 138, "3": 355, "4": 354, "5": 354
# },
# FTrace events end buffer configuration
"ftrace" : {
"events" : [
"sched_switch",
"sched_wakeup",
"sched_wakeup_new",
"sched_contrib_scale_f",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_tune_config",
"sched_tune_tasks_update",
"sched_tune_boostgroup_update",
"sched_tune_filter",
"sched_boost_cpu",
"sched_boost_task",
"sched_energy_diff",
"cpu_frequency",
"cpu_capacity",
],
"buffsize" : 10240
},
# Where results are collected
"results_dir" : "LisaInANutshell",
# Devlib module required (or not required)
'modules' : [ "cpufreq", "cgroups", "cpufreq" ],
#"exclude_modules" : [ "hwmon" ],
}
# Support to access the remote target
from env import TestEnv
# Initialize a test environment using:
# the provided target configuration (my_target_conf)
# the provided test configuration (my_test_conf)
te = TestEnv(conf)
target = te.target
print "DONE"
# Enable Energy-Aware scheduler
target.execute("echo ENERGY_AWARE > /sys/kernel/debug/sched_features");
# Check which sched_feature are enabled
sched_features = target.read_value("/sys/kernel/debug/sched_features");
print "sched_features:"
print sched_features
# It's possible also to run custom script
# my_script = target.get_installed()
# target.execute(my_script)
target.cpufreq.set_all_governors('sched');
# Check which governor is enabled on each CPU
enabled_governors = target.cpufreq.get_all_governors()
print enabled_governors
cpuset = target.cgroups.controller('cpuset')
# Configure a big partition
cpuset_bigs = cpuset.cgroup('/big')
cpuset_bigs.set(cpus=te.target.bl.bigs, mems=0)
# Configure a LITTLE partition
cpuset_littles = cpuset.cgroup('/LITTLE')
cpuset_littles.set(cpus=te.target.bl.littles, mems=0)
# Dump the configuraiton of each controller
cgroups = cpuset.list_all()
for cgname in cgroups:
cgroup = cpuset.cgroup(cgname)
attrs = cgroup.get()
cpus = attrs['cpus']
print '{}:{:<15} cpus: {}'.format(cpuset.kind, cgroup.name, cpus)
# RTApp configurator for generation of PERIODIC tasks
from wlgen import RTA, Periodic, Ramp
# Light workload
light = Periodic(
duty_cycle_pct = 10,
duration_s = 3,
period_ms = 32,
)
# Ramp workload
ramp = Ramp(
start_pct=10,
end_pct=60,
delta_pct=20,
time_s=0.5,
period_ms=16
)
# Heavy workload
heavy = Periodic(
duty_cycle_pct=60,
duration_s=3,
period_ms=16
)
# Composed workload
lrh_task = light + ramp + heavy
# Create a new RTApp workload generator using the calibration values
# reported by the TestEnv module
rtapp = RTA(target, 'test', calibration=te.calibration())
# Configure this RTApp instance to:
rtapp.conf(
# 1. generate a "profile based" set of tasks
kind = 'profile',
# 2. define the "profile" of each task
params = {
# 3. Composed task
'task_lrh': lrh_task.get(),
},
#loadref='big',
loadref='LITTLE',
run_dir=target.working_directory
);
# Inspect the JSON file used to run the application
with open('./test_00.json', 'r') as fh:
rtapp_json = json.load(fh)
logging.info('Generated RTApp JSON file:')
print json.dumps(rtapp_json, indent=4, sort_keys=True)
def execute(te, wload, res_dir):
logging.info('# Setup FTrace')
te.ftrace.start()
logging.info('## Start energy sampling')
te.emeter.reset()
logging.info('### Start RTApp execution')
wload.run(out_dir=res_dir)
logging.info('## Read energy consumption: %s/energy.json', res_dir)
nrg_report = te.emeter.report(out_dir=res_dir)
logging.info('# Stop FTrace')
te.ftrace.stop()
trace_file = os.path.join(res_dir, 'trace.dat')
logging.info('# Save FTrace: %s', trace_file)
te.ftrace.get_trace(trace_file)
logging.info('# Save platform description: %s/platform.json', res_dir)
plt, plt_file = te.platform_dump(res_dir)
logging.info('# Report collected data:')
logging.info(' %s', res_dir)
!tree {res_dir}
return nrg_report, plt, plt_file, trace_file
nrg_report, plt, plt_file, trace_file = execute(te, rtapp, te.res_dir)
import pandas as pd
df = pd.DataFrame(list(nrg_report.channels.iteritems()),
columns=['Cluster', 'Energy'])
df = df.set_index('Cluster')
df
# Show the collected platform description
with open(os.path.join(te.res_dir, 'platform.json'), 'r') as fh:
platform = json.load(fh)
print json.dumps(platform, indent=4)
logging.info('LITTLE cluster max capacity: %d',
platform['nrg_model']['little']['cpu']['cap_max'])
# Let's look at the trace using kernelshark...
trace_file = te.res_dir + '/trace.dat'
!kernelshark {trace_file} 2>/dev/null
# Suport for FTrace events parsing and visualization
import trappy
# NOTE: The interactive trace visualization is available only if you run
# the workload to generate a new trace-file
trappy.plotter.plot_trace(trace_file)
# Load the LISA::Trace parsing module
from trace import Trace
# Define which event we are interested into
trace = Trace(te.platform, te.res_dir, [
"sched_switch",
"sched_load_avg_cpu",
"sched_load_avg_task",
"sched_boost_cpu",
"sched_boost_task",
"cpu_frequency",
"cpu_capacity",
])
# Let's have a look at the set of events collected from the trace
ftrace = trace.ftrace
logging.info("List of events identified in the trace:")
for event in ftrace.class_definitions.keys():
logging.info(" %s", event)
# Trace events are converted into tables, let's have a look at one
# of such tables
df = trace.data_frame.trace_event('sched_load_avg_task')
df.head()
# Simple selection of events based on conditional values
#df[df.comm == 'task_lrh'].head()
# Simple selection of specific signals
#df[df.comm == 'task_lrh'][['util_avg']].head()
# Simple statistics reporting
#df[df.comm == 'task_lrh'][['util_avg']].describe()
# Signals can be easily plot using the ILinePlotter
trappy.ILinePlot(
# FTrace object
ftrace,
# Signals to be plotted
signals=[
'sched_load_avg_cpu:util_avg',
'sched_load_avg_task:util_avg'
],
# # Generate one plot for each value of the specified column
# pivot='cpu',
# # Generate only plots which satisfy these filters
# filters={
# 'comm': ['task_lrh'],
# 'cpu' : [0,5]
# },
# Formatting style
per_line=2,
drawstyle='steps-post',
marker = '+'
).view()
from bart.sched.SchedMultiAssert import SchedAssert
# Create an object to get/assert scheduling pbehaviors
sa = SchedAssert(ftrace, te.topology, execname='task_lrh')
# Check the residency of a task on the LITTLE cluster
print "Task residency [%] on LITTLE cluster:",\
sa.getResidency(
"cluster",
te.target.bl.littles,
percent=True
)
# Check on which CPU the task start its execution
print "Task initial CPU:",\
sa.getFirstCpu()
import operator
# Define the time window where we want focus our assertions
start_s = sa.getStartTime()
little_residency_window = (start_s, start_s + 10)
# Defined the expected task residency
EXPECTED_RESIDENCY_PCT=99
result = sa.assertResidency(
"cluster",
te.target.bl.littles,
EXPECTED_RESIDENCY_PCT,
operator.ge,
window=little_residency_window,
percent=True
)
print "Task running {} [%] of its time on LITTLE? {}"\
.format(EXPECTED_RESIDENCY_PCT, result)
result = sa.assertFirstCpu(te.target.bl.bigs)
print "Task starting on a big CPU? {}".format(result)
# Focus on sched_switch events
df = ftrace.sched_switch.data_frame
# # Select only interesting columns
# df = df.ix[:,'next_comm':'prev_state']
# # Group sched_switch event by task switching into the CPU
# df = df.groupby('next_pid').describe(include=['object'])
# df = df.unstack()
# # Sort sched_switch events by number of time a task switch into the CPU
# df = df['next_comm'].sort_values(by=['count'], ascending=False)
df.head()
# # Get topmost task name and PID
# most_switching_pid = df.index[1]
# most_switching_task = df.values[1][2]
# task_name = "{}:{}".format(most_switching_pid, most_switching_task)
# # Print result
# logging.info("The most swithing task is: [%s]", task_name)
# Focus on cpu_frequency events for CPU0
df = ftrace.cpu_frequency.data_frame
df = df[df.cpu == 0]
# # Compute the residency on each OPP before switching to the next one
# df.loc[:,'start'] = df.index
# df.loc[:,'delta'] = (df['start'] - df['start'].shift()).fillna(0).shift(-1)
# # Group by frequency and sum-up the deltas
# freq_residencies = df.groupby('frequency')['delta'].sum()
# logging.info("Residency time per OPP:")
# df = pd.DataFrame(freq_residencies)
df.head()
# # Compute the relative residency time
# tot = sum(freq_residencies)
# #df = df.apply(lambda delta : 100*delta/tot)
# for f in freq_residencies.index:
# logging.info("Freq %10dHz : %5.1f%%", f, 100*freq_residencies[f]/tot)
# Plot residency time
import matplotlib.pyplot as plt
# Enable generation of Notebook emebedded plots
%matplotlib inline
fig, axes = plt.subplots(1, 1, figsize=(16, 5));
df.plot(kind='bar', ax=axes);
from perf_analysis import PerfAnalysis
# Full analysis function
def analysis(t_min=None, t_max=None):
test_dir = te.res_dir
platform_json = '{}/platform.json'.format(test_dir)
trace_file = '{}/trace.dat'.format(test_dir)
# Load platform description data
with open(platform_json, 'r') as fh:
platform = json.load(fh)
# Load RTApp Performance data
pa = PerfAnalysis(test_dir)
logging.info("Loaded performance data for tasks: %s", pa.tasks())
# Load Trace data
#events = my_tests_conf['ftrace']['events']
events = [
"sched_switch",
"sched_contrib_scale_f",
"sched_load_avg_cpu",
"sched_load_avg_task",
"cpu_frequency",
"cpu_capacity",
]
trace = Trace(platform, test_dir, events)
# Define time ranges for all the temporal plots
trace.setXTimeRange(t_min, t_max)
# Tasks performances plots
for task in pa.tasks():
pa.plotPerf(task)
# Tasks plots
trace.analysis.tasks.plotTasks(pa.tasks())
# Cluster and CPUs plots
trace.analysis.frequency.plotClusterFrequencies()
analysis()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <br><br><br><br>
Step2: Commands execution on remote target
Step3: Example of frameworks configuration on remote target
Step4: Create a big/LITTLE partition using CGroups
Step5: <br><br><br><br>
Step6: <br><br><br><br>
Step7: Example of energy collected data
Step8: Example of platform description
Step9: <br><br><br><br>
Step10: Using the TRAPpy Trace Plotter
Step11: Example of Trace Analysis
Step12: <br><br><br><br>
Step13: Example of Behavioral Analysis
Step14: Get tasks behaviors
Step15: Check for expected behaviros
Step16: Examples of Data analysis
Step17: What are the relative residency on different OPPs?
Step18: Example of Custom Plotting
|
3,447
|
<ASSISTANT_TASK:>
Python Code:
fifteen_factorial = 15*14*13*12*11*10*9*8*7*6*5*4*3*2*1
print(fifteen_factorial)
import math
print(math.factorial(15))
print("Result correct?", math.factorial(15) == fifteen_factorial)
print(math.factorial(5), math.sqrt(2*math.pi)*5**(5+0.5)*math.exp(-5))
print(math.factorial(10), math.sqrt(2*math.pi)*10**(10+0.5)*math.exp(-10))
print(math.factorial(15), math.sqrt(2*math.pi)*15**(15+0.5)*math.exp(-15))
print(math.factorial(20), math.sqrt(2*math.pi)*20**(20+0.5)*math.exp(-20))
print("Absolute differences:")
print(math.factorial(5) - math.sqrt(2*math.pi)*5**(5+0.5)*math.exp(-5))
print(math.factorial(10) - math.sqrt(2*math.pi)*10**(10+0.5)*math.exp(-10))
print(math.factorial(15) - math.sqrt(2*math.pi)*15**(15+0.5)*math.exp(-15))
print(math.factorial(20) - math.sqrt(2*math.pi)*20**(20+0.5)*math.exp(-20))
print("Relative differences:")
print((math.factorial(5) - math.sqrt(2*math.pi)*5**(5+0.5)*math.exp(-5)) / math.factorial(5))
print((math.factorial(10) - math.sqrt(2*math.pi)*10**(10+0.5)*math.exp(-10)) / math.factorial(10))
print((math.factorial(15) - math.sqrt(2*math.pi)*15**(15+0.5)*math.exp(-15)) / math.factorial(15))
print((math.factorial(20) - math.sqrt(2*math.pi)*20**(20+0.5)*math.exp(-20)) / math.factorial(20))
def cuboid_volume(a, b, c):
Compute the volume of a cuboid with edge lengths a, b, c.
Volume is abc. Only makes sense if all are non-negative.
Parameters
----------
a : float
Edge length 1
b : float
Edge length 2
c : float
Edge length 3
Returns
-------
volume : float
The volume a*b*c
if (a < 0.0) or (b < 0.0) or (c < 0.0):
print("Negative edge length makes no sense!")
return 0
return a*b*c
print(cuboid_volume(1,1,1))
print(cuboid_volume(1,2,3.5))
print(cuboid_volume(0,1,1))
print(cuboid_volume(2,-1,1))
def fall_time(H):
Give the time in seconds for an object to fall to the ground
from H metres.
Parameters
----------
H : float
Starting height (metres)
Returns
-------
T : float
Fall time (seconds)
from math import sqrt
from scipy.constants import g
if (H < 0):
print("Negative height makes no sense!")
return 0
return sqrt(2.0*H/g)
print(fall_time(1))
print(fall_time(10))
print(fall_time(0))
print(fall_time(-1))
def triangle_area(a, b, c):
Compute the area of a triangle with edge lengths a, b, c.
Area is sqrt(s (s-a) (s-b) (s-c)).
s is (a+b+c)/2.
Only makes sense if all are non-negative.
Parameters
----------
a : float
Edge length 1
b : float
Edge length 2
c : float
Edge length 3
Returns
-------
area : float
The triangle area.
from math import sqrt
if (a < 0.0) or (b < 0.0) or (c < 0.0):
print("Negative edge length makes no sense!")
return 0
s = 0.5 * (a + b + c)
return sqrt(s * (s-a) * (s-b) * (s-c))
print(triangle_area(1,1,1)) # Equilateral; answer sqrt(3)/4 ~ 0.433
print(triangle_area(3,4,5)) # Right triangle; answer 6
print(triangle_area(1,1,0)) # Not a triangle; answer 0
print(triangle_area(-1,1,1)) # Not a triangle; exception or 0.
from math import sqrt
x = 1.0
y = 1.0 + 1e-14 * sqrt(3.0)
print("The calculation gives {}".format(1e14*(y-x)))
print("The result should be {}".format(sqrt(3.0)))
a = 1e-3
b = 1e3
c = a
formula1_n3_plus = (-b + sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula1_n3_minus = (-b - sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula2_n3_plus = (2.0*c)/(-b + sqrt(b**2 - 4.0*a*c))
formula2_n3_minus = (2.0*c)/(-b - sqrt(b**2 - 4.0*a*c))
print("For n=3, first formula, solutions are {} and {}.".format(formula1_n3_plus,
formula1_n3_minus))
print("For n=3, second formula, solutions are {} and {}.".format(formula2_n3_plus,
formula2_n3_minus))
a = 1e-4
b = 1e4
c = a
formula1_n4_plus = (-b + sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula1_n4_minus = (-b - sqrt(b**2 - 4.0*a*c))/(2.0*a)
formula2_n4_plus = (2.0*c)/(-b + sqrt(b**2 - 4.0*a*c))
formula2_n4_minus = (2.0*c)/(-b - sqrt(b**2 - 4.0*a*c))
print("For n=4, first formula, solutions are {} and {}.".format(formula1_n4_plus,
formula1_n4_minus))
print("For n=4, second formula, solutions are {} and {}.".format(formula2_n4_plus,
formula2_n4_minus))
def g(f, X, delta):
Approximate the derivative of a given function at a point.
Parameters
----------
f : function
Function to be differentiated
X : real
Point at which the derivative is evaluated
delta : real
Step length
Returns
-------
g : real
Approximation to the derivative
return (f(X+delta) - f(X)) / delta
from math import exp
for n in range(1, 8):
print("For n={}, the approx derivative is {}.".format(n, g(exp, 0.0, 10**(-2.0*n))))
def isprime(n):
Checks to see if an integer is prime.
Parameters
----------
n : integer
Number to check
Returns
-------
isprime : Boolean
If n is prime
# No number less than 2 can be prime
if n < 2:
return False
# We only need to check for divisors up to sqrt(n)
for m in range(2, int(n**0.5)+1):
if n%m == 0:
return False
# If we've got this far, there are no divisors.
return True
for n in range(50):
if isprime(n):
print("Function says that {} is prime.".format(n))
n = 2
while (not isprime(n)) or (isprime(2**n-1)):
n += 1
print("The first n such that 2^n-1 is not prime is {}.".format(n))
for n in range(2, 41):
if isprime(n) and isprime(2**n-1):
print("n={} is such that 2^n-1 is prime.".format(n))
def prime_factors(n):
Generate all the prime factors of n.
Parameters
----------
n : integer
Number to be checked
Returns
-------
factors : dict
Prime factors (keys) and multiplicities (values)
factors = {}
m = 2
while m <= n:
if n%m == 0:
factors[m] = 1
n //= m
while n%m == 0:
factors[m] += 1
n //= m
m += 1
return factors
for n in range(17, 21):
print("Prime factors of {} are {}.".format(n, prime_factors(n).keys()))
print("Multiplicities of prime factors of 48 are {}.".format(prime_factors(48).values()))
def divisors(n):
Generate all integer divisors of n.
Parameters
----------
n : integer
Number to be checked
Returns
-------
divs : list
All integer divisors, including 1.
divs = [1]
m = 2
while m <= n/2:
if n%m == 0:
divs.append(m)
m += 1
return divs
for n in range(16, 21):
print("The divisors of {} are {}.".format(n, divisors(n)))
def isperfect(n):
Check if a number is perfect.
Parameters
----------
n : integer
Number to check
Returns
-------
isperfect : Boolean
Whether it is perfect or not.
divs = divisors(n)
sum_divs = 0
for d in divs:
sum_divs += d
return n == sum_divs
for n in range(2,10000):
if (isperfect(n)):
factors = prime_factors(n)
print("{} is perfect.\n"
"Divisors are {}.\n"
"Prime factors {} (multiplicities {}).".format(
n, divisors(n), factors.keys(), factors.values()))
%timeit isperfect(2**(3-1)*(2**3-1))
%timeit isperfect(2**(5-1)*(2**5-1))
%timeit isperfect(2**(7-1)*(2**7-1))
%timeit isperfect(2**(13-1)*(2**13-1))
def logistic(x0, r, N = 1000):
sequence = [x0]
xn = x0
for n in range(N):
xnew = r*xn*(1.0-xn)
sequence.append(xnew)
xn = xnew
return sequence
import numpy
from matplotlib import pyplot
%matplotlib inline
x0 = 0.5
N = 2000
sequence1 = logistic(x0, 1.5, N)
sequence2 = logistic(x0, 3.5, N)
pyplot.plot(sequence1[-100:], 'b-', label = r'$r=1.5$')
pyplot.plot(sequence2[-100:], 'k-', label = r'$r=3.5$')
pyplot.xlabel(r'$n$')
pyplot.ylabel(r'$x$')
pyplot.show()
import numpy
from matplotlib import pyplot
%matplotlib inline
# This is the "best" way of doing it, but numpy hasn't been introduced yet
# r_values = numpy.arange(1.0, 4.0, 0.01)
r_values = []
for i in range(302):
r_values.append(1.0 + 0.01 * i)
x0 = 0.5
N = 2000
for r in r_values:
sequence = logistic(x0, r, N)
pyplot.plot(r*numpy.ones_like(sequence[1000:]), sequence[1000:], 'k.')
pyplot.xlabel(r'$r$')
pyplot.ylabel(r'$x$')
pyplot.show()
def in_Mandelbrot(c, n_iterations = 100):
z0 = 0.0 + 0j
in_set = True
n = 0
zn = z0
while in_set and (n < n_iterations):
n += 1
znew = zn**2 + c
in_set = abs(znew) < 2.0
zn = znew
return in_set
c_values = [0.0, 2+2j, 2-2j, -2+2j, -2-2j]
for c in c_values:
print("Is {} in the Mandelbrot set? {}.".format(c, in_Mandelbrot(c)))
import numpy
def grid_Mandelbrot(N):
x = numpy.linspace(-2.0, 2.0, N)
X, Y = numpy.meshgrid(x, x)
C = X + 1j*Y
grid = numpy.zeros((N, N), int)
for nx in range(N):
for ny in range(N):
grid[nx, ny] = int(in_Mandelbrot(C[nx, ny]))
return grid
from matplotlib import pyplot
%matplotlib inline
pyplot.imshow(grid_Mandelbrot(100))
from math import log
def log_Mandelbrot(c, n_iterations = 100):
z0 = 0.0 + 0j
in_set = True
n = 0
zn = z0
while in_set and (n < n_iterations):
n += 1
znew = zn**2 + c
in_set = abs(znew) < 2.0
zn = znew
return log(n)
def log_grid_Mandelbrot(N):
x = numpy.linspace(-2.0, 2.0, N)
X, Y = numpy.meshgrid(x, x)
C = X + 1j*Y
grid = numpy.zeros((N, N), int)
for nx in range(N):
for ny in range(N):
grid[nx, ny] = log_Mandelbrot(C[nx, ny])
return grid
from matplotlib import pyplot
%matplotlib inline
pyplot.imshow(log_grid_Mandelbrot(100))
pyplot.imshow(log_grid_Mandelbrot(1000)[600:800,400:600])
class Eqint(object):
def __init__(self, sequence):
self.sequence = sequence
def __repr__(self):
return str(len(self.sequence))
def __eq__(self, other):
return len(self.sequence)==len(other.sequence)
zero = Eqint([])
one_list = Eqint([1])
one_tuple = Eqint((1,))
one_string = Eqint('1')
print("Is zero equivalent to one? {}, {}, {}".format(zero == one_list,
zero == one_tuple,
zero == one_string))
print("Is one equivalent to one? {}, {}, {}.".format(one_list == one_tuple,
one_list == one_string,
one_tuple == one_string))
print(zero)
print(one_list)
print(one_tuple)
print(one_string)
class Eqint(object):
def __init__(self, sequence):
self.sequence = sequence
def __repr__(self):
return str(len(self.sequence))
def __eq__(self, other):
return len(self.sequence)==len(other.sequence)
def __add__(a, b):
return Eqint(tuple(a.sequence) + tuple(b.sequence))
zero = Eqint([])
one_list = Eqint([1])
one_tuple = Eqint((1,))
one_string = Eqint('1')
sum_eqint = zero + one_list + one_tuple + one_string
print("The sum is {}.".format(sum_eqint))
print("The internal sequence is {}.".format(sum_eqint.sequence))
positive_integers = []
zero = Eqint([])
positive_integers.append(zero)
N = 10
for n in range(1,N+1):
positive_integers.append(Eqint(list(positive_integers)))
print("The 'final' Eqint is {}".format(positive_integers[-1]))
print("Its sequence is {}".format(positive_integers[-1].sequence))
print("That is, it contains all Eqints with length less than 10.")
def normal_form(numerator, denominator):
from fractions import gcd
factor = gcd(numerator, denominator)
return numerator//factor, denominator//factor
print(normal_form(3, 2))
print(normal_form(15, 3))
print(normal_form(20, 42))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
q1 = Rational(3, 2)
print(q1)
q2 = Rational(15, 3)
print(q2)
q3 = Rational(20, 42)
print(q3)
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
print(Rational(1,2) + Rational(1,3) + Rational(1,6))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
print(Rational(1,3)*Rational(15,2)*Rational(2,5))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __rmul__(self, other):
numerator = self.numerator * other
return Rational(numerator, self.denominator)
def __sub__(a, b):
return a + (-1)*b
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
half = Rational(1,2)
print(2*half)
print(half+(-1)*half)
print(half-half)
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __rmul__(self, other):
numerator = self.numerator * other
return Rational(numerator, self.denominator)
def __sub__(a, b):
return a + (-1)*b
def __float__(a):
return float(a.numerator) / float(a.denominator)
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
print(float(Rational(1,2)))
print(float(Rational(1,3)))
print(float(Rational(1,11)))
class Rational(object):
A rational number.
def __init__(self, numerator, denominator):
n, d = normal_form(numerator, denominator)
self.numerator = n
self.denominator = d
return None
def __add__(a, b):
numerator = a.numerator * b.denominator + b.numerator * a.denominator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __mul__(a, b):
numerator = a.numerator * b.numerator
denominator = a.denominator * b.denominator
return Rational(numerator, denominator)
def __rmul__(self, other):
numerator = self.numerator * other
return Rational(numerator, self.denominator)
def __sub__(a, b):
return a + (-1)*b
def __float__(a):
return float(a.numerator) / float(a.denominator)
def __lt__(a, b):
return a.numerator * b.denominator < a.denominator * b.numerator
def __repr__(self):
max_length = max(len(str(self.numerator)), len(str(self.denominator)))
if self.denominator == 1:
frac = str(self.numerator)
else:
numerator = '\n'+str(self.numerator)+'\n'
bar = max_length*'-'+'\n'
denominator = str(self.denominator)
frac = numerator+bar+denominator
return frac
q_list = [Rational(n//2, n) for n in range(2, 12)]
print(sorted(q_list))
def wallis_rational(N):
The partial product approximation to pi using the first N terms of Wallis' formula.
Parameters
----------
N : int
Number of terms in product
Returns
-------
partial : Rational
A rational number approximation to pi
partial = Rational(2,1)
for n in range(1, N+1):
partial = partial * Rational((2*n)**2, (2*n-1)*(2*n+1))
return partial
pi_list = [wallis_rational(n) for n in range(1, 21)]
print(pi_list)
print(sorted(pi_list))
import numpy
print(numpy.pi-numpy.array(list(map(float, pi_list))))
lhs = 27**5 + 84**5 + 110**5 + 133**5
rhs = 144**5
print("Does the LHS {} equal the RHS {}? {}".format(lhs, rhs, lhs==rhs))
import numpy
import itertools
input_list = numpy.arange(1, 7)
combinations = list(itertools.combinations(input_list, 4))
print(combinations)
n_combinations = 144*143*142*141/24
print("Number of combinations of 4 objects from 144 is {}".format(n_combinations))
from matplotlib import pyplot
%matplotlib inline
n = numpy.arange(5, 51)
N = numpy.zeros_like(n)
for i, n_c in enumerate(n):
combinations = list(itertools.combinations(numpy.arange(1,n_c+1), 4))
N[i] = len(combinations)
pyplot.figure(figsize=(12,6))
pyplot.loglog(n, N, linestyle='None', marker='x', color='k', label='Combinations')
pyplot.loglog(n, n**4, color='b', label=r'$n^4$')
pyplot.xlabel(r'$n$')
pyplot.ylabel(r'$N$')
pyplot.legend(loc='upper left')
pyplot.show()
nmax=145
range_to_power = numpy.arange(1, nmax)**5.0
lhs_combinations = list(itertools.combinations(range_to_power, 4))
lhs_sums = []
for lhs_terms in lhs_combinations:
lhs_sums.append(numpy.sum(numpy.array(lhs_terms)))
for i, lhs in enumerate(lhs_sums):
if lhs in range_to_power:
rhs_primitive = int(lhs**(0.2))
lhs_primitive = (numpy.array(lhs_combinations[i])**(0.2)).astype(int)
print("The LHS terms are {}.".format(lhs_primitive))
print("The RHS term is {}.".format(rhs_primitive))
def dvdt(v, t, sigma, rho, beta):
Define the Lorenz system.
Parameters
----------
v : list
State vector
t : float
Time
sigma : float
Parameter
rho : float
Parameter
beta : float
Parameter
Returns
-------
dvdt : list
RHS defining the Lorenz system
x, y, z = v
return [sigma*(y-x), x*(rho-z)-y, x*y-beta*z]
import numpy
from scipy.integrate import odeint
v0 = [1.0, 1.0, 1.0]
sigma = 10.0
beta = 8.0/3.0
t_values = numpy.linspace(0.0, 100.0, 5000)
rho_values = [13.0, 14.0, 15.0, 28.0]
v_values = []
for rho in rho_values:
params = (sigma, rho, beta)
v = odeint(dvdt, v0, t_values, args=params)
v_values.append(v)
%matplotlib inline
from matplotlib import pyplot
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig = pyplot.figure(figsize=(12,6))
for i, v in enumerate(v_values):
ax = fig.add_subplot(2,2,i+1,projection='3d')
ax.plot(v[:,0], v[:,1], v[:,2])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
ax.set_title(r"$\rho={}$".format(rho_values[i]))
pyplot.show()
t_values = numpy.linspace(0.0, 40.0, 4000)
rho = 28.0
params = (sigma, rho, beta)
v_values = []
v0_values = [[1.0,1.0,1.0],
[1.0+1e-5,1.0+1e-5,1.0+1e-5]]
for v0 in v0_values:
v = odeint(dvdt, v0, t_values, args=params)
v_values.append(v)
fig = pyplot.figure(figsize=(12,6))
line_colours = 'by'
for tstart in range(4):
ax = fig.add_subplot(2,2,tstart+1,projection='3d')
for i, v in enumerate(v_values):
ax.plot(v[tstart*1000:(tstart+1)*1000,0],
v[tstart*1000:(tstart+1)*1000,1],
v[tstart*1000:(tstart+1)*1000,2],
color=line_colours[i])
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
ax.set_zlabel(r'$z$')
ax.set_title(r"$t \in [{},{}]$".format(tstart*10, (tstart+1)*10))
pyplot.show()
import sympy
sympy.init_printing()
y, t = sympy.symbols('y, t')
sympy.dsolve(sympy.diff(y(t), t) + y(t)**2 - sympy.exp(-t), y(t))
for n in range(2, 11):
ode_solution = sympy.dsolve(sympy.diff(y(t), t) + y(t)**n - sympy.exp(-t), y(t),
ics = {y(0) : 1})
print(ode_solution)
%matplotlib inline
for n in range(2, 11):
ode_solution = sympy.dsolve(sympy.diff(y(t), t) + y(t)**n - sympy.exp(-t), y(t),
ics = {y(0) : 1})
sympy.plot(ode_solution.rhs.removeO(), (t, 0, 1));
def all_primes(N):
Return all primes less than or equal to N.
Parameters
----------
N : int
Maximum number
Returns
-------
prime : generator
Prime numbers
primes = []
for n in range(2, N+1):
is_n_prime = True
for p in primes:
if n%p == 0:
is_n_prime = False
break
if is_n_prime:
primes.append(n)
yield n
from itertools import tee
def pair_primes(N):
"Generate consecutive prime pairs, using the itertools recipe"
a, b = tee(all_primes(N))
next(b, None)
return zip(a, b)
def check_twin(pair):
Take in a pair of integers, check if they differ by 2.
p1, p2 = pair
return p2-p1 == 2
def twin_primes(N):
Return all twin primes
return filter(check_twin, pair_primes(N))
for tp in twin_primes(20):
print(tp)
def pi_N(N):
Use the quantify pattern from itertools to count the number of twin primes.
return sum(map(check_twin, pair_primes(N)))
pi_N(1000)
import numpy
from matplotlib import pyplot
%matplotlib inline
N = numpy.array([2**k for k in range(4, 17)])
twin_prime_fraction = numpy.array(list(map(pi_N, N))) / N
pyplot.semilogx(N, twin_prime_fraction)
pyplot.xlabel(r"$N$")
pyplot.ylabel(r"$\pi_N / N$")
pyplot.show()
pyplot.semilogx(N, twin_prime_fraction * numpy.log(N)**2)
pyplot.xlabel(r"$N$")
pyplot.ylabel(r"$\pi_N \times \log(N)^2 / N$")
pyplot.show()
class Polynomial(object):
Representing a polynomial.
explanation = "I am a polynomial"
def __init__(self, roots, leading_term):
self.roots = roots
self.leading_term = leading_term
self.order = len(roots)
def __repr__(self):
string = str(self.leading_term)
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def __mul__(self, other):
roots = self.roots + other.roots
leading_term = self.leading_term * other.leading_term
return Polynomial(roots, leading_term)
def explain_to(self, caller):
print("Hello, {}. {}.".format(caller,self.explanation))
print("My roots are {}.".format(self.roots))
return None
class Monomial(Polynomial):
Representing a monomial, which is a polynomial with leading term 1.
explanation = "I am a monomial"
def __init__(self, roots):
Polynomial.__init__(self, roots, 1)
def __repr__(self):
string = ""
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def basis_pN(N):
A generator for the simplest basis of P^N.
for n in range(N+1):
yield Monomial(n*[0])
for poly in basis_pN(3):
print(poly)
class Monomial(Polynomial):
Representing a monomial, which is a polynomial with leading term 1.
explanation = "I am a monomial"
def __init__(self, roots):
Polynomial.__init__(self, roots, 1)
def __repr__(self):
if len(self.roots):
string = ""
n_zero_roots = len(self.roots) - numpy.count_nonzero(self.roots)
if n_zero_roots == 1:
string = "x"
elif n_zero_roots > 1:
string = "x^{}".format(n_zero_roots)
else: # Monomial degree 0.
string = "1"
for root in self.roots:
if root > 0:
string = string + "(x - {})".format(root)
elif root < 0:
string = string + "(x + {})".format(-root)
return string
for poly in basis_pN(3):
print(poly)
def basis_pN_variant(N):
A generator for the 'sum' basis of P^N.
for n in range(N+1):
yield Monomial(range(n+1))
for poly in basis_pN_variant(4):
print(poly)
from itertools import product
def basis_product():
Basis of the product space
yield from product(basis_pN(3), basis_pN_variant(4))
for p1, p2 in basis_product():
print("Basis element is ({}) X ({}).".format(p1, p2))
def basis_product_long_form():
Basis of the product space (without using yield_from)
prod = product(basis_pN(3), basis_pN_variant(4))
yield next(prod)
for p1, p2 in basis_product():
print("Basis element is ({}) X ({}).".format(p1, p2))
import numpy
set1_x = numpy.array([10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0])
set1_y = numpy.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
set2_x = numpy.array([10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0])
set2_y = numpy.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
set3_x = numpy.array([10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0])
set3_y = numpy.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
set4_x = numpy.array([8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 19.0, 8.0, 8.0, 8.0])
set4_y = numpy.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
data_x = set1_x, set2_x, set3_x, set4_x
data_y = set1_y, set2_y, set3_y, set4_y
print("Results for x:")
for x in data_x:
print("Mean: {:.2f}. Variance {:.2f}. Standard deviation {:.2f}.".format(numpy.mean(x),
numpy.var(x),
numpy.std(x)))
print("Results for y:")
for data in data_y:
print("Mean: {:.2f}. Variance {:.2f}. Standard deviation {:.2f}.".format(numpy.mean(data),
numpy.var(data),
numpy.std(data)))
from scipy import stats
for x, y in zip(data_x, data_y):
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
print("Slope: {:.2f}. Correlation: {:.2f}.".format(slope, r_value))
%matplotlib inline
from matplotlib import pyplot
fit_x = numpy.linspace(2.0, 20.0)
fig = pyplot.figure(figsize=(12,6))
for i in range(4):
slope, intercept, r_value, p_value, std_err = stats.linregress(data_x[i], data_y[i])
ax = fig.add_subplot(2,2,i+1)
ax.scatter(data_x[i], data_y[i])
ax.plot(fit_x, intercept + slope*fit_x)
ax.set_xlim(2.0, 20.0)
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$y$')
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 2
Step2: Exercise 3
Step4: We see that the relative error decreases, whilst the absolute error grows (significantly).
Step6: In later cases, after having covered exceptions, I would suggest raising a NotImplementedError for negative edge lengths.
Step8: Exercise 3
Step9: Floating point numbers
Step10: We see that the first three digits are correct. This isn't too surprising
Step12: There is a difference in the fifth significant figure in both solutions in the first case, which gets to the third (arguably the second) significant figure in the second case. Comparing to the limiting solutions above, we see that the larger root is definitely more accurately captured with the first formula than the second (as the result should be bigger than $10^{-2n}$).
Step13: Exercise 6
Step15: We have a combination of floating point inaccuracies
Step16: Exercise 2
Step17: Exercise 3
Step19: Exercise 4
Step21: Exercise 5
Step23: Exercise 6
Step24: Exercise 7
Step25: It's worth thinking about the operation counts of the various functions implemented here. The implementations are inefficient, but even in the best case you see how the number of operations (and hence computing time required) rapidly increases.
Step26: Exercise 2
Step27: This suggests that, for $r=1.5$, the sequence has settled down to a fixed point. In the $r=3.5$ case it seems to be moving between four points repeatedly.
Step28: Exercise 4
Step29: Exercise 2
Step30: Exercise 3
Step31: Exercise 4
Step32: Exercise 5
Step33: Exercise 6
Step34: Equivalence classes
Step35: Exercise 2
Step36: Exercise 3
Step37: Exercise 4
Step38: Exercise 5
Step39: Rational numbers
Step41: Exercise 2
Step43: Exercise 3
Step45: Exercise 4
Step47: Exercise 5
Step49: Exercise 6
Step51: Exercise 7
Step53: Exercise 8
Step54: The shortest published Mathematical paper
Step55: Exercise 2
Step56: The combinations function returns all the combinations (ignoring order) of r elements from a given list. For example, take a list of length 6, [1, 2, 3, 4, 5, 6] and compute all the combinations of length 4
Step57: We can already see that the number of terms to consider is large.
Step58: Exercise 2a
Step59: With 17 million combinations to work with, we'll need to be a little careful how we compute.
Step60: Then calculate the sums
Step61: Finally, loop through the sums and check to see if it matches any possible term on the RHS
Step63: Lorenz attractor
Step64: Exercise 2
Step65: Exercise 3
Step66: This shows the sensitive dependence on initial conditions that is characteristic of chaotic behaviour.
Step67: Exercise 2
Step68: Exercise 3
Step70: Twin primes
Step71: Now we can generate pairs using the pairwise recipe
Step73: We could examine the results of the two primes directly. But an efficient solution is to use python's filter function. To do this, first define a function checking if the pair are twin primes
Step75: Then use the filter function to define another generator
Step76: Now check by finding the twin primes with $N<20$
Step78: Exercise 2
Step79: Exercise 3
Step80: For those that have checked Wikipedia, you'll see Brun's theorem which suggests a specific scaling, that $\pi_N$ is bounded by $C N / \log(N)^2$. Checking this numerically on this data
Step83: A basis for the polynomials
Step85: Now we can define the first basis
Step86: Then test it on $\mathbb{P}^N$
Step88: This looks horrible, but is correct. To really make this look good, we need to improve the output. If we use
Step89: then we can deal with the uglier cases, and re-running the test we get
Step91: An even better solution would be to use the numpy.unique function as in this stackoverflow answer (the second one!) to get the frequency of all the roots.
Step93: I am too lazy to work back through the definitions and flip all the signs; it should be clear how to do this!
Step95: I've cheated here as I haven't introduced the yield from syntax (which returns an iterator from a generator). We could write this out instead as
Step96: Anscombe's quartet
Step97: Exercise 2
Step98: Exercise 3
|
3,448
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
a_true = 0.5
b_true = 2.0
c_true = -4.0
def quad(x,a,b,c):
return a*x**2 + b*x + c
N = 30
xdata = np.linspace(-5,5,N)
dy = 2.0
np.random.seed(0)
ydata = quad(xdata,a_true,b_true,c_true) + np.random.normal(0.0, dy, N)
plt.errorbar(xdata,ydata,dy,fmt='.k',ecolor='lightgrey')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(-5,5);
assert True # leave this cell for grading the raw data generation and plot
theta_best, theta_cov = opt.curve_fit(quad, xdata, ydata, sigma=dy)
a_fit = theta_best[0]
b_fit = theta_best[1]
c_fit = theta_best[2]
print('a = {0:.3f} +/- {1:.3f}'.format(a_fit, np.sqrt(theta_cov[0,0])))
print('b = {0:.3f} +/- {1:.3f}'.format(b_fit, np.sqrt(theta_cov[1,1])))
print('c = {0:.3f} +/- {1:.3f}'.format(c_fit, np.sqrt(theta_cov[2,2])))
x_fit = np.linspace(-5,5,30)
y_fit = quad(x_fit,a_fit,b_fit,c_fit)
plt.errorbar(xdata,ydata,dy,fmt='.k',ecolor='lightgrey')
plt.plot(x_fit,y_fit)
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(-5,5);
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a quadratic curve
Step2: First, generate a dataset using this model using these parameters and the following characteristics
Step3: Now fit the model to the dataset to recover estimates for the model's parameters
|
3,449
|
<ASSISTANT_TASK:>
Python Code:
import glob
from IPython.display import Image
import numpy as np
import openmc
# 1.6 enriched fuel
fuel = openmc.Material(name='1.6% Fuel')
fuel.set_density('g/cm3', 10.31341)
fuel.add_nuclide('U235', 3.7503e-4)
fuel.add_nuclide('U238', 2.2625e-2)
fuel.add_nuclide('O16', 4.6007e-2)
# borated water
water = openmc.Material(name='Borated Water')
water.set_density('g/cm3', 0.740582)
water.add_nuclide('H1', 4.9457e-2)
water.add_nuclide('O16', 2.4732e-2)
water.add_nuclide('B10', 8.0042e-6)
# zircaloy
zircaloy = openmc.Material(name='Zircaloy')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_nuclide('Zr90', 7.2758e-3)
# Instantiate a Materials collection
materials_file = openmc.Materials([fuel, water, zircaloy])
# Export to "materials.xml"
materials_file.export_to_xml()
# Create cylinders for the fuel and clad
fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.39218)
clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, r=0.45720)
# Create boundary planes to surround the geometry
# Use both reflective and vacuum boundaries to make life interesting
min_x = openmc.XPlane(x0=-0.63, boundary_type='reflective')
max_x = openmc.XPlane(x0=+0.63, boundary_type='reflective')
min_y = openmc.YPlane(y0=-0.63, boundary_type='reflective')
max_y = openmc.YPlane(y0=+0.63, boundary_type='reflective')
min_z = openmc.ZPlane(z0=-100., boundary_type='vacuum')
max_z = openmc.ZPlane(z0=+100., boundary_type='vacuum')
# Create a Universe to encapsulate a fuel pin
pin_cell_universe = openmc.Universe(name='1.6% Fuel Pin')
# Create fuel Cell
fuel_cell = openmc.Cell(name='1.6% Fuel')
fuel_cell.fill = fuel
fuel_cell.region = -fuel_outer_radius
pin_cell_universe.add_cell(fuel_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='1.6% Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
pin_cell_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='1.6% Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
pin_cell_universe.add_cell(moderator_cell)
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = pin_cell_universe
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
# Create root Universe
root_universe = openmc.Universe(universe_id=0, name='root universe')
root_universe.add_cell(root_cell)
# Create Geometry and set root Universe
geometry = openmc.Geometry(root_universe)
# Export to "geometry.xml"
geometry.export_to_xml()
# OpenMC simulation parameters
batches = 20
inactive = 5
particles = 2500
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': True}
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.63, -0.63, -100., 0.63, 0.63, 100.]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
# Instantiate a Plot
plot = openmc.Plot(plot_id=1)
plot.filename = 'materials-xy'
plot.origin = [0, 0, 0]
plot.width = [1.26, 1.26]
plot.pixels = [250, 250]
plot.color_by = 'material'
# Show plot
openmc.plot_inline(plot)
# Instantiate an empty Tallies object
tallies_file = openmc.Tallies()
# Create Tallies to compute microscopic multi-group cross-sections
# Instantiate energy filter for multi-group cross-section Tallies
energy_filter = openmc.EnergyFilter([0., 0.625, 20.0e6])
# Instantiate flux Tally in moderator and fuel
tally = openmc.Tally(name='flux')
tally.filters = [openmc.CellFilter([fuel_cell, moderator_cell])]
tally.filters.append(energy_filter)
tally.scores = ['flux']
tallies_file.append(tally)
# Instantiate reaction rate Tally in fuel
tally = openmc.Tally(name='fuel rxn rates')
tally.filters = [openmc.CellFilter(fuel_cell)]
tally.filters.append(energy_filter)
tally.scores = ['nu-fission', 'scatter']
tally.nuclides = ['U238', 'U235']
tallies_file.append(tally)
# Instantiate reaction rate Tally in moderator
tally = openmc.Tally(name='moderator rxn rates')
tally.filters = [openmc.CellFilter(moderator_cell)]
tally.filters.append(energy_filter)
tally.scores = ['absorption', 'total']
tally.nuclides = ['O16', 'H1']
tallies_file.append(tally)
# Instantiate a tally mesh
mesh = openmc.RegularMesh(mesh_id=1)
mesh.dimension = [1, 1, 1]
mesh.lower_left = [-0.63, -0.63, -100.]
mesh.width = [1.26, 1.26, 200.]
meshsurface_filter = openmc.MeshSurfaceFilter(mesh)
# Instantiate thermal, fast, and total leakage tallies
leak = openmc.Tally(name='leakage')
leak.filters = [meshsurface_filter]
leak.scores = ['current']
tallies_file.append(leak)
thermal_leak = openmc.Tally(name='thermal leakage')
thermal_leak.filters = [meshsurface_filter, openmc.EnergyFilter([0., 0.625])]
thermal_leak.scores = ['current']
tallies_file.append(thermal_leak)
fast_leak = openmc.Tally(name='fast leakage')
fast_leak.filters = [meshsurface_filter, openmc.EnergyFilter([0.625, 20.0e6])]
fast_leak.scores = ['current']
tallies_file.append(fast_leak)
# K-Eigenvalue (infinity) tallies
fiss_rate = openmc.Tally(name='fiss. rate')
abs_rate = openmc.Tally(name='abs. rate')
fiss_rate.scores = ['nu-fission']
abs_rate.scores = ['absorption']
tallies_file += (fiss_rate, abs_rate)
# Resonance Escape Probability tallies
therm_abs_rate = openmc.Tally(name='therm. abs. rate')
therm_abs_rate.scores = ['absorption']
therm_abs_rate.filters = [openmc.EnergyFilter([0., 0.625])]
tallies_file.append(therm_abs_rate)
# Thermal Flux Utilization tallies
fuel_therm_abs_rate = openmc.Tally(name='fuel therm. abs. rate')
fuel_therm_abs_rate.scores = ['absorption']
fuel_therm_abs_rate.filters = [openmc.EnergyFilter([0., 0.625]),
openmc.CellFilter([fuel_cell])]
tallies_file.append(fuel_therm_abs_rate)
# Fast Fission Factor tallies
therm_fiss_rate = openmc.Tally(name='therm. fiss. rate')
therm_fiss_rate.scores = ['nu-fission']
therm_fiss_rate.filters = [openmc.EnergyFilter([0., 0.625])]
tallies_file.append(therm_fiss_rate)
# Instantiate energy filter to illustrate Tally slicing
fine_energy_filter = openmc.EnergyFilter(np.logspace(np.log10(1e-2), np.log10(20.0e6), 10))
# Instantiate flux Tally in moderator and fuel
tally = openmc.Tally(name='need-to-slice')
tally.filters = [openmc.CellFilter([fuel_cell, moderator_cell])]
tally.filters.append(fine_energy_filter)
tally.scores = ['nu-fission', 'scatter']
tally.nuclides = ['H1', 'U238']
tallies_file.append(tally)
# Export to "tallies.xml"
tallies_file.export_to_xml()
# Run OpenMC!
openmc.run()
# Load the statepoint file
sp = openmc.StatePoint('statepoint.20.h5')
# Get the fission and absorption rate tallies
fiss_rate = sp.get_tally(name='fiss. rate')
abs_rate = sp.get_tally(name='abs. rate')
# Get the leakage tally
leak = sp.get_tally(name='leakage')
leak = leak.summation(filter_type=openmc.MeshSurfaceFilter, remove_filter=True)
# Compute k-infinity using tally arithmetic
keff = fiss_rate / (abs_rate + leak)
keff.get_pandas_dataframe()
# Compute resonance escape probability using tally arithmetic
therm_abs_rate = sp.get_tally(name='therm. abs. rate')
thermal_leak = sp.get_tally(name='thermal leakage')
thermal_leak = thermal_leak.summation(filter_type=openmc.MeshSurfaceFilter, remove_filter=True)
res_esc = (therm_abs_rate + thermal_leak) / (abs_rate + thermal_leak)
res_esc.get_pandas_dataframe()
# Compute fast fission factor factor using tally arithmetic
therm_fiss_rate = sp.get_tally(name='therm. fiss. rate')
fast_fiss = fiss_rate / therm_fiss_rate
fast_fiss.get_pandas_dataframe()
# Compute thermal flux utilization factor using tally arithmetic
fuel_therm_abs_rate = sp.get_tally(name='fuel therm. abs. rate')
therm_util = fuel_therm_abs_rate / therm_abs_rate
therm_util.get_pandas_dataframe()
# Compute neutrons produced per absorption (eta) using tally arithmetic
eta = therm_fiss_rate / fuel_therm_abs_rate
eta.get_pandas_dataframe()
p_fnl = (abs_rate + thermal_leak) / (abs_rate + leak)
p_fnl.get_pandas_dataframe()
p_tnl = therm_abs_rate / (therm_abs_rate + thermal_leak)
p_tnl.get_pandas_dataframe()
keff = res_esc * fast_fiss * therm_util * eta * p_fnl * p_tnl
keff.get_pandas_dataframe()
# Compute microscopic multi-group cross-sections
flux = sp.get_tally(name='flux')
flux = flux.get_slice(filters=[openmc.CellFilter], filter_bins=[(fuel_cell.id,)])
fuel_rxn_rates = sp.get_tally(name='fuel rxn rates')
mod_rxn_rates = sp.get_tally(name='moderator rxn rates')
fuel_xs = fuel_rxn_rates / flux
fuel_xs.get_pandas_dataframe()
# Show how to use Tally.get_values(...) with a CrossScore
nu_fiss_xs = fuel_xs.get_values(scores=['(nu-fission / flux)'])
print(nu_fiss_xs)
# Show how to use Tally.get_values(...) with a CrossScore and CrossNuclide
u235_scatter_xs = fuel_xs.get_values(nuclides=['(U235 / total)'],
scores=['(scatter / flux)'])
print(u235_scatter_xs)
# Show how to use Tally.get_values(...) with a CrossFilter and CrossScore
fast_scatter_xs = fuel_xs.get_values(filters=[openmc.EnergyFilter],
filter_bins=[((0.625, 20.0e6),)],
scores=['(scatter / flux)'])
print(fast_scatter_xs)
# "Slice" the nu-fission data into a new derived Tally
nu_fission_rates = fuel_rxn_rates.get_slice(scores=['nu-fission'])
nu_fission_rates.get_pandas_dataframe()
# "Slice" the H-1 scatter data in the moderator Cell into a new derived Tally
need_to_slice = sp.get_tally(name='need-to-slice')
slice_test = need_to_slice.get_slice(scores=['scatter'], nuclides=['H1'],
filters=[openmc.CellFilter], filter_bins=[(moderator_cell.id,)])
slice_test.get_pandas_dataframe()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Generate Input Files
Step2: With our three materials, we can now create a materials file object that can be exported to an actual XML file.
Step3: Now let's move on to the geometry. Our problem will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces -- in this case two cylinders and six planes.
Step4: With the surfaces defined, we can now create cells that are defined by intersections of half-spaces created by the surfaces.
Step5: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe.
Step6: We now must create a geometry that is assigned a root universe, put the geometry into a geometry file, and export it to XML.
Step7: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 5 inactive batches and 15 active batches each with 2500 particles.
Step8: Let us also create a plot file that we can use to verify that our pin cell geometry was created successfully.
Step9: As we can see from the plot, we have a nice pin cell with fuel, cladding, and water! Before we run our simulation, we need to tell the code what we want to tally. The following code shows how to create a variety of tallies.
Step10: Now we a have a complete set of inputs, so we can go ahead and run our simulation.
Step11: Tally Data Processing
Step12: We have a tally of the total fission rate and the total absorption rate, so we can calculate k-eff as
Step13: Notice that even though the neutron production rate, absorption rate, and current are separate tallies, we still get a first-order estimate of the uncertainty on the quotient of them automatically!
Step14: The fast fission factor can be calculated as
Step15: The thermal flux utilization is calculated as
Step16: The next factor is the number of fission neutrons produced per absorption in fuel, calculated as $$\eta = \frac{\langle \nu\Sigma_f\phi \rangle_T}{\langle \Sigma_a \phi \rangle^F_T}$$
Step17: There are two leakage factors to account for fast and thermal leakage. The fast non-leakage probability is computed as $$P_{FNL} = \frac{\langle \Sigma_a\phi \rangle + \langle L \rangle_T}{\langle \Sigma_a \phi \rangle + \langle L \rangle}$$
Step18: The final factor is the thermal non-leakage probability and is computed as $$P_{TNL} = \frac{\langle \Sigma_a\phi \rangle_T}{\langle \Sigma_a \phi \rangle_T + \langle L \rangle_T}$$
Step19: Now we can calculate $k_{eff}$ using the product of the factors form the four-factor formula.
Step20: We see that the value we've obtained here has exactly the same mean as before. However, because of the way it was calculated, the standard deviation appears to be larger.
Step21: We see that when the two tallies with multiple bins were divided, the derived tally contains the outer product of the combinations. If the filters/scores are the same, no outer product is needed. The get_values(...) method allows us to obtain a subset of tally scores. In the following example, we obtain just the neutron production microscopic cross sections.
Step22: The same idea can be used not only for scores but also for filters and nuclides.
Step23: A more advanced method is to use get_slice(...) to create a new derived tally that is a subset of an existing tally. This has the benefit that we can use get_pandas_dataframe() to see the tallies in a more human-readable format.
|
3,450
|
<ASSISTANT_TASK:>
Python Code:
PROJECT = 'cloud-training-demos' # Replace with your PROJECT
BUCKET = 'cloud-training-bucket' # Replace with your BUCKET
REGION = 'us-central1' # Choose an available region for Cloud MLE
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%load_ext google.cloud.bigquery
%%bigquery --project $PROJECT
SELECT
*
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 10
%%bigquery --project $PROJECT
SELECT
weight_pounds, -- this is the label; because it is continuous, we need to use regression
CAST(is_male AS STRING) AS is_male,
mother_age,
CAST(plurality AS STRING) AS plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 10
%%bash
bq --location=US mk -d demo
%%bigquery --project $PROJECT
# TODO: Your code goes here
WITH natality_data AS (
SELECT
weight_pounds,-- this is the label; because it is continuous, we need to use regression
CAST(is_male AS STRING) AS is_male,
mother_age,
CAST(plurality AS STRING) AS plurality,
gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
)
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
natality_data
WHERE
ABS(MOD(hashmonth, 4)) < 3 -- select 75% of the data as training
%%bigquery --project $PROJECT
# TODO: Your code goes here
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
df = bq.query("SELECT * FROM ML.TRAINING_INFO(MODEL demo.babyweight_model_asis)").to_dataframe()
# plot both lines in same graph
import matplotlib.pyplot as plt
plt.plot( 'iteration', 'loss', data=df, marker='o', color='orange', linewidth=2)
plt.plot( 'iteration', 'eval_loss', data=df, marker='', color='green', linewidth=2, linestyle='dashed')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.legend();
%%bigquery --project $PROJECT
SELECT
*
FROM
# TODO: Your code goes here
LIMIT 100
%%bigquery --project $PROJECT
SELECT
weight_pounds,
CAST(is_male AS STRING) AS is_male,
IF(mother_age < 18, 'LOW',
IF(mother_age > 45, 'HIGH',
CAST(mother_age AS STRING))) AS mother_age,
CAST(plurality AS STRING) AS plurality,
CAST(gestation_weeks AS STRING) AS gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 25
%%bigquery --project $PROJECT
SELECT
weight_pounds,
'Unknown' AS is_male,
IF(mother_age < 18, 'LOW',
IF(mother_age > 45, 'HIGH',
CAST(mother_age AS STRING))) AS mother_age,
IF(plurality > 1, 'Multiple', 'Single') AS plurality,
CAST(gestation_weeks AS STRING) AS gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
LIMIT 25
%%bigquery --project $PROJECT
WITH with_ultrasound AS (
SELECT
weight_pounds,
CAST(is_male AS STRING) AS is_male,
IF(mother_age < 18, 'LOW',
IF(mother_age > 45, 'HIGH',
CAST(mother_age AS STRING))) AS mother_age,
CAST(plurality AS STRING) AS plurality,
CAST(gestation_weeks AS STRING) AS gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
),
without_ultrasound AS (
SELECT
weight_pounds,
'Unknown' AS is_male,
IF(mother_age < 18, 'LOW',
IF(mother_age > 45, 'HIGH',
CAST(mother_age AS STRING))) AS mother_age,
IF(plurality > 1, 'Multiple', 'Single') AS plurality,
CAST(gestation_weeks AS STRING) AS gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
),
preprocessed AS (
SELECT * from with_ultrasound
UNION ALL
SELECT * from without_ultrasound
)
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
preprocessed
WHERE
ABS(MOD(hashmonth, 4)) < 3
LIMIT 25
%%bigquery --project $PROJECT
# TODO: Your code goes here
WITH with_ultrasound AS (
SELECT
weight_pounds,
CAST(is_male AS STRING) AS is_male,
IF(mother_age < 18, 'LOW',
IF(mother_age > 45, 'HIGH',
CAST(mother_age AS STRING))) AS mother_age,
CAST(plurality AS STRING) AS plurality,
CAST(gestation_weeks AS STRING) AS gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
),
without_ultrasound AS (
SELECT
weight_pounds,
'Unknown' AS is_male,
IF(mother_age < 18, 'LOW',
IF(mother_age > 45, 'HIGH',
CAST(mother_age AS STRING))) AS mother_age,
IF(plurality > 1, 'Multiple', 'Single') AS plurality,
CAST(gestation_weeks AS STRING) AS gestation_weeks,
FARM_FINGERPRINT(CONCAT(CAST(YEAR AS STRING), CAST(month AS STRING))) AS hashmonth
FROM
publicdata.samples.natality
WHERE
year > 2000
AND gestation_weeks > 0
AND mother_age > 0
AND plurality > 0
AND weight_pounds > 0
),
preprocessed AS (
SELECT * from with_ultrasound
UNION ALL
SELECT * from without_ultrasound
)
SELECT
weight_pounds,
is_male,
mother_age,
plurality,
gestation_weeks
FROM
preprocessed
WHERE
ABS(MOD(hashmonth, 4)) < 3
bq = bigquery.Client(project=PROJECT)
df = # TODO: Your code goes here
# plot both lines in same graph
import matplotlib.pyplot as plt
plt.plot( 'iteration', 'loss', data=df, marker='o', color='orange', linewidth=2)
plt.plot( 'iteration', 'eval_loss', data=df, marker='', color='green', linewidth=2, linestyle='dashed')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.legend();
%%bigquery --project $PROJECT
SELECT
*
FROM
# TODO: Your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the data
Step2: Define features
Step3: Train a model in BigQuery
Step4: With the demo dataset ready, it is possible to create a linear regression model to train the model.
Step5: Explore the training statistics
Step6: Some of these columns are obvious; although, what do the non-specific ML columns mean (specific to BQML)?
Step7: As you can see, the training loss and evaluation loss are essentially identical. We do not seem to be overfitting.
Step8: More advanced...
Step9: On the same dataset, will also suppose that it is unknown whether the child is male or female (on the same dataset) to simulate that an ultrasound was not been performed.
Step10: Bringing these two separate data sets together, there is now a dataset for male or female children determined with ultrasound or unknown if without.
Step11: Create a new model
Step12: Training Statistics
Step13: Make a prediction with the new model
|
3,451
|
<ASSISTANT_TASK:>
Python Code:
%%bash
export PROJECT=$(gcloud config list project --format "value(core.project)")
echo "Your current GCP Project Name is: "$PROJECT
%%bash
pip install tensorflow==2.6.0 --user
import os
PROJECT = "qwiklabs-gcp-bdc77450c97b4bf6" # REPLACE WITH YOUR PROJECT NAME
REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
import tensorflow as tf
print("TensorFlow version: ",tf.version.VERSION)
# Do not change these
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
os.environ["BUCKET"] = PROJECT # DEFAULT BUCKET WILL BE PROJECT ID
if PROJECT == "your-gcp-project-here":
print("Don't forget to update your PROJECT name! Currently:", PROJECT)
%%bash
## Create a BigQuery dataset for serverlessml if it doesn't exist
datasetexists=$(bq ls -d | grep -w serverlessml)
if [ -n "$datasetexists" ]; then
echo -e "BigQuery dataset already exists, let's not recreate it."
else
echo "Creating BigQuery dataset titled: serverlessml"
bq --location=US mk --dataset \
--description 'Taxi Fare' \
$PROJECT:serverlessml
echo "\nHere are your current datasets:"
bq ls
fi
## Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${PROJECT}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${PROJECT}
echo "\nHere are your current buckets:"
gsutil ls
fi
%%bigquery
CREATE OR REPLACE MODEL serverlessml.model1_rawdata
OPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg') AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
%%bigquery
SELECT * FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata)
%%bigquery
SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata)
%%bigquery
SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model1_rawdata,
(
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
))
%%bigquery
CREATE OR REPLACE TABLE serverlessml.cleaned_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
%%bigquery
-- LIMIT 0 is a free query; this allows us to check that the table exists.
SELECT * FROM serverlessml.cleaned_training_data
LIMIT 0
%%bigquery
CREATE OR REPLACE MODEL serverlessml.model2_cleanup
OPTIONS(input_label_cols=['fare_amount'], model_type='linear_reg') AS
SELECT
*
FROM
serverlessml.cleaned_training_data
%%bigquery
SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model2_cleanup)
%%bigquery
-- This model type is in alpha, so it may not work for you yet. This training takes on the order of 15 minutes.
CREATE OR REPLACE MODEL serverlessml.model3b_dnn
OPTIONS(input_label_cols=['fare_amount'], model_type='dnn_regressor', hidden_units=[32, 8]) AS
SELECT
*
FROM
serverlessml.cleaned_training_data
%%bigquery
SELECT SQRT(mean_squared_error) AS rmse FROM ML.EVALUATE(MODEL serverlessml.model3b_dnn)
%%bigquery
SELECT SQRT(mean_squared_error) AS rmse
FROM ML.EVALUATE(MODEL serverlessml.model3b_dnn,
(
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's make sure we install the necessary version of tensorflow. After doing the pip install above, click Restart the kernel on the notebook so that the Python environment picks up the new packages.
Step2: Create a BigQuery Dataset and Google Cloud Storage Bucket
Step3: Model 1
Step4: Once the training is done, visit the BigQuery Cloud Console and look at the model that has been trained. Then, come back to this notebook.
Step5: Let's report just the error we care about, the Root Mean Squared Error (RMSE)
Step6: We told you it was not going to be good! Recall that our heuristic got 8.13, and our target is $6.
Step7: Model 2
Step8: Model 3
Step9: Nice!
|
3,452
|
<ASSISTANT_TASK:>
Python Code:
bigdf = pandas.read_csv('/media/notconfusing/9d9b45fc-55f7-428c-a228-1c4c4a1b728c/home/maximilianklein/snapshot_data/2016-01-03/gender-index-data-2016-01-03.csv')
gender_qid_df = bigdf[['qid','gender']]
def map_gender(x):
if isinstance(x,float):
return 'no gender'
else:
gen = x.split('|')[0]
if gen == 'Q6581072':
return 'female'
elif gen == 'Q6581097':
return 'male'
else:
return 'nonbin'
gender_qid_df['gender'] = gender_qid_df['gender'].apply(map_gender)
def qid2enname(x):
try:
return qidnames[x]
except KeyError:
return None
gender_qid_df['enname'] = gender_qid_df['qid'].apply(qid2enname)
enname_id = pandas.read_csv('/home/notconfusing/workspace/wikidumpparse/wikidump/mediawiki-utilities/enname_id.txt',sep='\t',names=['enname','pageid'])
gender_page_id = pandas.merge(gender_qid_df, enname_id, how='inner',on='enname')
pah_gender = pandas.merge(pah, gender_page_id, how='left', on='pageid')
pah_gender
len(pah), len(gender_page_id), len(pah_gender)
pah_gender['gender'] = pah_gender['gender'].fillna('nonbio')
SE = pah_gender[(pah_gender['dissonance'] == 'Moderate negative') | (pah_gender['dissonance'] == 'High negative')]
NI = pah_gender[(pah_gender['dissonance'] == 'Moderate positive') | (pah_gender['dissonance'] == 'High positive')]
rel_risk = defaultdict(dict)
for risk, risk_name in [(SE,'Spent Effort'), (NI,'Needs Improvement')]:
for gender in ['female','male','nonbin','nonbio']:
gen_mis = len(risk[risk['gender'] == gender])
p_gen_mis = gen_mis/len(risk) #p(gender|misalignment)
p_gen = len(pah_gender[pah_gender['gender'] == gender]) / len(pah_gender) #p(gender)
print(p_gen_mis, p_gen)
rel_risk[gender][risk_name] = p_gen_mis/p_gen#rel sirk
java_min_int = -2147483648
allrecs = pandas.read_csv('/media/notconfusing/9d9b45fc-55f7-428c-a228-1c4c4a1b728c/home/maximilianklein/snapshot_data/2016-01-03/gender-index-data-2016-01-03.csv',na_values=[java_min_int])
def sum_column(q_str):
if type(q_str) is str:
qs = q_str.split('|')
return len(qs) #cos the format will always end with a |
for col in ['site_links']:
allrecs[col] = allrecs[col].apply(sum_column)
allrecs['site_links'].head(20)
allrecs['gender'] = allrecs['gender'].apply(map_gender)
sl_risk = defaultdict(dict)
sl_risk['nonbio']['Sitelink Ratio'] = 1
for gender in ['female','male','nonbin']:
gend_df = allrecs[allrecs['gender']==gender]
gend_df_size = len(gend_df)
avg_sl = (gend_df['site_links'].sum() / gend_df_size) / 2.6
sl_risk[gender]['Sitelink Ratio'] = avg_sl
sl_risk_df = pandas.DataFrame.from_dict(sl_risk, orient='index')
rel_risk_df = pandas.DataFrame.from_dict(rel_risk,orient="index")
risk_df = pandas.DataFrame.join(sl_risk_df,rel_risk_df)
risk_df.index = ['Female','Male','Non-binary','Non-biography']
print(risk_df.to_latex(columns = ['Needs Improvement','Spent Effort', 'Sitelink Ratio'],float_format=lambda n:'%.2f' %n))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Rel risk. P(gender|misaligned)/P(gender)
|
3,453
|
<ASSISTANT_TASK:>
Python Code:
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# Exercício 1 - Imprima na tela os números de 1 a 10. Use uma lista para armazenar os números.
# Exercício 2 - Crie uma lista de 5 objetos e imprima na tela
# Exercício 3 - Crie duas strings e concatene as duas em uma terceira string
# Exercício 4 - Crie uma tupla com os seguintes elementos: 1, 2, 2, 3, 4, 4, 4, 5 e depois utilize a função count do
# objeto tupla para verificar quantas vezes o número 4 aparece na tupla
# Exercício 5 - Crie um dicionário vazio e imprima na tela
# Exercício 6 - Crie um dicionário com 3 chaves e 3 valores e imprima na tela
# Exercício 7 - Adicione mais um elemento ao dicionário criado no exercício anterior e imprima na tela
# Exercício 8 - Crie um dicionário com 3 chaves e 3 valores. Um dos valores deve ser uma lista de 2 elementos numéricos.
# Imprima o dicionário na tela.
# Exercício 9 - Crie uma lista de 4 elementos. O primeiro elemento deve ser uma string,
# o segundo uma tupla de 2 elementos, o terceiro um dcionário com 2 chaves e 2 valores e
# o quarto elemento um valor do tipo float.
# Imprima a lista na tela.
# Exercício 10 - Considere a string abaixo. Imprima na tela apenas os caracteres da posição 1 a 18.
frase = 'Cientista de Dados é o profissional mais sexy do século XXI'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercícios Cap02
|
3,454
|
<ASSISTANT_TASK:>
Python Code:
import gensim, logging, os
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class Corpus(object):
'''Clase Corpus que permite leer de manera secuencial un directorio de documentos de texto'''
def __init__(self, directorio):
self.directory = directorio
def __iter__(self):
for fichero in os.listdir(self.directory):
for linea in open(os.path.join(self.directory, fichero)):
yield linea.split()
CORPUSDIR = '/opt/textos/efe/txt/'
oraciones = Corpus(CORPUSDIR)
#model = gensim.models.Word2Vec(oraciones, min_count=10, size=150, workers=2)
# el modelo puede entrenarse en dos pasos sucesivos pero por separado
#model = gensim.models.Word2Vec() # modelo vacío
#model.build_vocab(oraciones) # primera pasada para crear la lista de vocabulario
#model.train(other_sentences) # segunda pasada para calcula vectores
#model.save('/opt/textos/efe/efe.model.w2v')
model = gensim.models.Word2Vec.load('/opt/textos/efe/efe.model.w2v')
print(model.corpus_count)
print(model['azul'], '\n')
print(model['verde'], '\n')
print(model['microsoft'])
print('hombre - mujer', model.similarity('hombre', 'mujer'))
print('madrid - parís', model.similarity('madrid', 'parís'))
print('perro - gato', model.similarity('perro', 'gato'))
print('gato - periódico', model.similarity('gato', 'periódico'))
lista1 = 'madrid barcelona gonzález washington'.split()
print('en la lista', ' '.join(lista1), 'sobra:', model.doesnt_match(lista1))
lista2 = 'psoe pp ciu epi'.split()
print('en la lista', ' '.join(lista2), 'sobra:', model.doesnt_match(lista2))
lista3 = 'publicaron declararon soy negaron'.split()
print('en la lista', ' '.join(lista3), 'sobra:', model.doesnt_match(lista3))
lista3 = 'homero saturno cervantes shakespeare cela'.split()
print('en la lista', ' '.join(lista3), 'sobra:', model.doesnt_match(lista3))
terminos = 'psoe chicago sevilla aznar podemos estuvieron'.split()
for t in terminos:
print(t, '==>', model.most_similar(t), '\n')
print('==> alcalde + mujer - hombre')
most_similar = model.most_similar(positive=['alcalde', 'mujer'], negative=['hombre'], topn=3)
for item in most_similar:
print(item)
print('==> madrid + filipinas - españa')
most_similar = model.most_similar(positive=['madrid', 'filipinas'], negative=['españa'], topn=3)
for item in most_similar:
print(item)
print('==> michel + fútbol + argentina - españa')
most_similar = model.most_similar(positive=['michel', 'fútbol', 'argentina'], negative=['españa'], topn=3)
for item in most_similar:
print(item)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Entrenamiento de un modelo
Step2: CORPUSDIR contiene una colección de noticias en español (normalizada previamente a minúsculas y sin signos de puntuación) con alrededor de 150 millones de palabras. Entrenamos un modelo en un solo paso, ignorando aquellos tokens que aparecen menos de 10 veces, para descartar erratas.
Step3: Una vez completado el entrenamiento (después de casi 30 minutos), guardamos el modelo en disco.
Step4: En el futuro, podremos utilizar este modelo cargándolo en memoria con la instrucción
Step5: Probando nuestro modelo
Step6: Cada término del vocabulario está representado como un vector con 150 dimensiones
Step7: Estos vectores no nos dicen mucho, salvo que contienen números muy pequeños
Step8: Podemos seleccionar el término que no encaja a partir de una determinada lista de términos usando el método doesnt_match
Step9: Podemos buscar los términos más similares usando el método most_similar de nuestro modelo
Step10: Con el mismo método most_similar podemos combinar vectores de palabras tratando de jugar con los rasgos semánticos de cada una de ellas para descubrir nuevas relaciones.
|
3,455
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
from scipy.stats import norm
from clintrials.dosefinding.efftox import EffTox, LpNormCurve, efftox_dtp_detail
from clintrials.dosefinding.efficacytoxicity import dose_transition_pathways, print_dtps
real_doses = [7.5, 15, 30, 45]
trial_size = 30
cohort_size = 3
first_dose = 3
prior_tox_probs = (0.025, 0.05, 0.1, 0.25)
prior_eff_probs = (0.2, 0.3, 0.5, 0.6)
tox_cutoff = 0.40
eff_cutoff = 0.45
tox_certainty = 0.05
eff_certainty = 0.03
mu_t_mean, mu_t_sd = -5.4317, 2.7643
beta_t_mean, beta_t_sd = 3.1761, 2.7703
mu_e_mean, mu_e_sd = -0.8442, 1.9786
beta_e_1_mean, beta_e_1_sd = 1.9857, 1.9820
beta_e_2_mean, beta_e_2_sd = 0, 0.2
psi_mean, psi_sd = 0, 1
efftox_priors = [
norm(loc=mu_t_mean, scale=mu_t_sd),
norm(loc=beta_t_mean, scale=beta_t_sd),
norm(loc=mu_e_mean, scale=mu_e_sd),
norm(loc=beta_e_1_mean, scale=beta_e_1_sd),
norm(loc=beta_e_2_mean, scale=beta_e_2_sd),
norm(loc=psi_mean, scale=psi_sd),
]
hinge_points = [(0.4, 0), (1, 0.7), (0.5, 0.4)]
metric = LpNormCurve(hinge_points[0][0], hinge_points[1][1], hinge_points[2][0], hinge_points[2][1])
et = EffTox(real_doses, efftox_priors, tox_cutoff, eff_cutoff, tox_certainty, eff_certainty, metric, trial_size,
first_dose)
outcomes = [(3, 1, 0), (3, 1, 0), (3, 1, 0)]
np.random.seed(123)
et.update(outcomes)
np.random.seed(123)
dtps = dose_transition_pathways(et, next_dose=2, cohort_sizes=[3], cohort_number=2,
cases_already_observed=outcomes, custom_output_func=efftox_dtp_detail,
n=10**5)
print_dtps(dtps)
dtps2 = dose_transition_pathways(et, next_dose=2, cohort_sizes=[3], cohort_number=2,
cases_already_observed=outcomes, custom_output_func=efftox_dtp_detail,
n=10**6)
print_dtps(dtps2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above parameters are explained in the manuscript.
Step2: The EffTox class is an object-oriented implementation of the trial design by Thall & Cook (Thall, P. F., & Cook, J. D. (2004). Dose-Finding Based on Efficacy-Toxicity Trade-Offs. Biometrics, 60(3), 684–693.)
Step3: The next dose recommended is 2.
Step4: These match Table 3 in the publication.
|
3,456
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
from terminaltables import AsciiTable
import inet
inet.__version__
from inet import DataLoader
mydataset = DataLoader('../../data/PV') # create an object with information of all connections
len(mydataset.experiment)
mydataset.nIN, mydataset.nPC # number of PV cells and GC cells recorded
mydataset.configuration # number of recording configurations
print(mydataset.motif) # number of connections tested and found for every type
mydataset.experiment[0] # example of the data from the first experiment
mydataset.experiment[12]['fname'] # mydataset.filename(12)
mydataset.filename(12)
mydataset.experiment[12]['matrix']
mydataset.matrix(12)
print(mydataset.experiment[12]['motif'])
mydataset.motifs(12)
mydataset.IN[2]
y = mydataset.stats()
print AsciiTable(y).table
mymotifs = mydataset.motif
info = [
['Connection type', 'Value'],
['PV-PV chemical synapses', mymotifs.ii_chem_found],
['PV-PV electrical synapses', mymotifs.ii_elec_found],
[' ',' '],
['PV-PV bidirectional chemical', mymotifs.ii_c2_found],
['PV-PV divergent chemical', mymotifs.ii_div_found],
['PV-PV convergent chemical', mymotifs.ii_con_found],
['PV-PV linear chemical', mymotifs.ii_lin_found],
[''],
['PV-PV one chemical with electrical', mymotifs.ii_c1e_found],
['PV-PV bidirectional chemical with electrical', mymotifs.ii_c2e_found],
[' ',' '],
['P(PV-PV) chemical synapse', mymotifs.ii_chem_found/mymotifs.ii_chem_tested],
['P(PV-PV) electrical synapse', mymotifs.ii_elec_found/mymotifs.ii_elec_tested],
[''],
['P(PV-PV) bidirectional chemical synapse', mymotifs.ii_c2_found/mymotifs.ii_c2_tested],
['P(div) divergent chemical motifs', mymotifs.ii_div_found/mymotifs.ii_div_tested],
['P(div) convergent chemical motifs', mymotifs.ii_con_found/mymotifs.ii_con_tested],
['P(chain) linear chain motifs', mymotifs.ii_lin_found/mymotifs.ii_lin_tested],
[' ',' '],
['P(PV-PV) one chemical with electrical', mymotifs.ii_c1e_found/mymotifs.ii_c1e_tested],
['P(PV-PV) bidirectional chemical with electrical', mymotifs.ii_c2e_found/mymotifs.ii_c2e_tested],
[' ',' '],
['PV-GC chemical synapses', mymotifs.ie_found],
['GC-PC chemical synapses', mymotifs.ei_found],
[' ',' '],
['P(PV-GC) chemical synapse',mymotifs.ie_found/mymotifs.ie_tested],
['P(GC-PC) chemical synapse', mymotifs.ei_found/mymotifs.ei_tested],
[' ',' '],
]
table = AsciiTable(info)
print (table.table)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <H2>Object creation</H2>
Step2: <H2>Object atttributes</H2>
Step3: Details of every experiments are given in a list
Step4: and details fo the recording configurations are provided
Step5: or the type the number of configurations when two PV-positive cells were recorded
Step6: <H2> Descriptive statistics </H2>
|
3,457
|
<ASSISTANT_TASK:>
Python Code:
from owslib.iso import namespaces
# Append gmi namespace to namespaces dictionary.
namespaces.update({"gmi": "http://www.isotc211.org/2005/gmi"})
namespaces.update({"gml": "http://www.opengis.net/gml/3.2"})
# Select RA
RAs = {
"GLOS": "Great Lakes Observing System",
"SCCOOS": "Southern California Coastal Ocean Observing System",
"SECOORA": "Southeast Coastal Ocean Observing Regional Association",
"PacIOOS": "Pacific Islands Ocean Observing System",
"NANOOS": "Northwest Association of Networked Ocean Observing Systems",
}
ra = RAs["SCCOOS"]
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
# Generate geoportal query and georss feed.
# Base geoportal url.
baseurl = "https://data.nodc.noaa.gov/" "geoportal/rest/find/document" "?searchText="
# Identify the project.
project = (
"dataThemeprojects:"
'"Integrated Ocean Observing System '
'Data Assembly Centers Data Stewardship Program"'
)
# Identify the Regional Association
ra = ' AND "{}" '.format(ra)
# Identify the platform.
platform = 'AND "FIXED PLATFORM"'
# Identify the amount of records and format of the response: 1 to 1010 records.
records = "&start=1&max=1010"
# Identify the format of the response: georss.
response_format = "&f=georss"
# Combine the URL.
url = "{}{}".format(baseurl, quote(project + ra + platform) + records + response_format)
print("Identified response format:\n{}".format(url))
print(
"\nSearch page response:\n{}".format(url.replace(response_format, "&f=searchPage"))
)
# Query the NCEI Geoportal and parse the georss response.
from lxml import etree
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
f = urlopen(url) # Open georss response.
url_string = f.read() # Read response into string.
# Create etree object from georss response.
url_root = etree.fromstring(url_string)
# Find all iso record links.
iso_record = url_root.findall("channel/item/link")
print("Found %i records" % len(iso_record))
for item in iso_record:
print(item.text) # URL to ISO19115-2 record.
# Process each iso record.
%matplotlib inline
import re
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from owslib import util
fig, ax = plt.subplots(figsize=(15, 12))
i = 0
accenos = []
# For each accession in geo.rss response.
for item in iso_record:
# Opens the iso xml web reference.
iso_url = urlopen(item.text)
# Creates tree element.
iso_tree = etree.ElementTree(file=urlopen(item.text))
# Gets the root from tree element.
root = iso_tree.getroot()
# Pulls out identifier string.
ident = root.find(
util.nspath_eval("gmd:fileIdentifier/gco:CharacterString", namespaces)
)
# Pulls out 7-digit accession number from identifier.
acce = re.search("[0-9]{7}", util.testXMLValue(ident))
# Adds accession number to accenos list.
accenos.append(acce.group(0))
print("Accession Number = %s" % acce.group(0))
# Collect Publication date information.
date_path = (
"gmd:identificationInfo/"
"gmd:MD_DataIdentification/"
"gmd:citation/"
"gmd:CI_Citation/"
"gmd:date/"
"gmd:CI_Date/"
"gmd:date/gco:Date"
)
# First published date.
pubdate = root.find(util.nspath_eval(date_path, namespaces))
print("First published date = %s" % util.testXMLValue(pubdate))
# Collect Provider Platform Codes (if it has it).
for tag in root.getiterator(util.nspath_eval("gco:CharacterString", namespaces)):
if tag.text == "Provider Platform Codes":
# Backs up to the MD_keywords element.
node = tag.getparent().getparent().getparent().getparent()
for item in node.findall(
util.nspath_eval("gmd:keyword/gco:CharacterString", namespaces)
):
print("Provider Platform Code = %s" % item.text)
# Pull out the version information.
# Iterate through each processing step which is an NCEI version.
for tag in root.getiterator(util.nspath_eval("gmd:processStep", namespaces)):
# Only parse gco:DateTime and gmd:title/gco:CharacterString.
vers_title = (
"gmi:LE_ProcessStep/"
"gmi:output/"
"gmi:LE_Source/"
"gmd:sourceCitation/"
"gmd:CI_Citation/"
"gmd:title/gco:CharacterString"
)
vers_date = "gmi:LE_ProcessStep/" "gmd:dateTime/gco:DateTime"
if tag.findall(util.nspath_eval(vers_date, namespaces)) and tag.findall(
util.nspath_eval(vers_title, namespaces)
):
# Extract dateTime for each version.
datetimes = tag.findall(util.nspath_eval(vers_date, namespaces))
# Extract title string (contains version number).
titles = tag.findall(util.nspath_eval(vers_title, namespaces))
print(
"{} = ".format(
util.testXMLValue(titles[0]), util.testXMLValue(datetimes[0])
)
)
# Collect package size information.
# Iterate through transfersize nodes.
for tag in root.getiterator(util.nspath_eval("gmd:transferSize", namespaces)):
# Only go into first gco:Real (where size exists).
if tag.find(util.nspath_eval("gco:Real", namespaces)).text:
# Extract size.
sizes = tag.find(util.nspath_eval("gco:Real", namespaces))
print("Current AIP Size = %s MB" % sizes.text)
break
# Only use first size instance, all gco:Real attributes are the same.
break
# Bounding time for AIP.
for tag in root.getiterator(util.nspath_eval("gml:TimePeriod", namespaces)):
# If text exists in begin or end position nodes.
if (
tag.find(util.nspath_eval("gml:beginPosition", namespaces)).text
and tag.find(util.nspath_eval("gml:endPosition", namespaces)).text
):
start_date = tag.find(
util.nspath_eval("gml:beginPosition", namespaces)
).text
end_date = tag.find(util.nspath_eval("gml:endPosition", namespaces)).text
print("Bounding Time = %s TO %s\n" % (start_date, end_date))
# Plotting routine for each accession, plot start-end as timeseries for each accession.
# Create datetime objects for start_date and end_date.
date1 = datetime(
int(start_date.split("-")[0]),
int(start_date.split("-")[1]),
int(start_date.split("-")[2]),
)
date2 = datetime(
int(end_date.split("-")[0]),
int(end_date.split("-")[1]),
int(end_date.split("-")[2]),
)
dates = [date1, date2]
i += 1 # Counter for plotting.
y = [i, i]
# Plot the timeseries.
ax.plot_date(x=dates, y=y, fmt="-", color="b", linewidth=6.0)
# Clean up the plot.
ax.set_ylim([0, i + 1])
years = mdates.YearLocator()
months = mdates.MonthLocator()
yearsFmt = mdates.DateFormatter("%Y")
ax.xaxis.grid(True)
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt) # Format the xaxis labels.
ax.xaxis.set_minor_locator(months)
ax.xaxis.set_ticks_position("bottom")
ax.yaxis.grid(True)
ax.set(yticks=np.arange(1, len(accenos) + 1))
ax.tick_params(which="both", direction="out")
ax.set_yticklabels(accenos)
plt.ylabel("NCEI Accession Number")
title = ax.set_title("%s Data Archived at NCEI" % ra)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we select a Regional Association
Step2: Next we generate a geoportal query and georss feed
Step3: Time to query the portal and parse out the georrs response
Step4: Lets plot up what we have found
|
3,458
|
<ASSISTANT_TASK:>
Python Code:
from keras.layers import Conv2D, MaxPooling2D, Input, Dense, Flatten, Activation, add, Lambda
from keras.layers.normalization import BatchNormalization
from keras.layers.pooling import GlobalAveragePooling2D
from keras.optimizers import RMSprop
from keras.backend import tf as ktf
from keras.models import Model, Sequential, load_model
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.applications.resnet50 import ResNet50
from lib.data_utils import get_MNIST_data
data = get_MNIST_data(num_validation=0, fit=True)
# see if we get the data correctly
print('image size: ', data['X_train'].shape)
# build the model
# preprocess to (28,28,3), then build a resize layer using tf.resize_images() to (224,224,3) as input
inputs = Input(shape=(28,28,3))
inputs_resize = Lambda(lambda img: ktf.image.resize_images(img, (224,224)))(inputs) # resize layer
resnet50 = ResNet50(include_top=False, input_tensor=inputs_resize, input_shape=(224,224,3), pooling='avg')
x = resnet50.output
#x = Dense(units=1024, activation='relu')(x)
predictions = Dense(units=10, activation='softmax')(x)
# connect the model
freezemodel = Model(inputs=inputs, outputs=predictions)
#freezemodel.summary()
# freeze all ResNet50 layers
for layer in resnet50.layers:
layer.trainable = False
# set the loss and optimizer
freezemodel.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# fit the model
checkpoint = ModelCheckpoint('../models/freezeResNet_{epoch:02d}-{loss:.2f}.h5',
monitor='loss',
save_best_only=True)
freezemodel.fit(data['X_train'], data['y_train'].reshape(-1,1),
batch_size=16, epochs=10, callbacks=[checkpoint], initial_epoch=1)
# test the model and see accuracy
score = freezemodel.evaluate(data['X_test'], data['y_test'].reshape(-1, 1), batch_size=32)
print(score)
# save the model: 0.96
freezemodel.save('ResNet50_freeze.h5')
# continue the model training
freezemodel = load_model('../models/ResNet50_freeze.h5', custom_objects={'ktf': ktf})
# set the loss and optimizer
rmsprop = RMSprop(lr=0.0001)
freezemodel.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# fit the model
checkpoint = ModelCheckpoint('../models/freezeResNet_{epoch:02d}-{loss:.2f}.h5',
monitor='loss',
save_best_only=True)
freezemodel.fit(data['X_train'], data['y_train'].reshape(-1, 1),
batch_size=16, epochs=10, callbacks=[checkpoint], initial_epoch=4)
# build the model
# preprocess to (28,28,3), then build a resize layer using tf.resize_images() to (224,224,3) as input
inputs = Input(shape=(28,28,3))
inputs_resize = Lambda(lambda img: ktf.image.resize_images(img, (224,224)))(inputs) # resize layer
resnet50 = ResNet50(include_top=False, input_tensor=inputs_resize, input_shape=(224,224,3), pooling='avg')
x = resnet50.output
#x = Dense(units=1024, activation='relu')(x)
predictions = Dense(units=10, activation='softmax')(x)
# connect the model
tunemodel = Model(inputs=inputs, outputs=predictions)
#freezemodel.summary()
# set the loss and optimizer
rmsprop = RMSprop(lr=0.0001)
tunemodel.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# fit the model
checkpoint = ModelCheckpoint('../models/tuneResNet_{epoch:02d}-{loss:.2f}.h5',
monitor='loss',
save_best_only=True)
tunemodel.fit(data['X_train'], data['y_train'].reshape(-1, 1),
batch_size=16, epochs=10, callbacks=[checkpoint], initial_epoch=0)
# test the model and see accuracy
score = tunemodel.evaluate(data['X_test'], data['y_test'].reshape(-1, 1), batch_size=32)
print(score)
# build the model
# preprocess to (28,28,3), then build a resize layer using tf.resize_images() to (224,224,3) as input
inputs = Input(shape=(28,28,3))
inputs_resize = Lambda(lambda img: ktf.image.resize_images(img, (224,224)))(inputs) # resize layer
resnet50 = ResNet50(include_top=False, input_tensor=inputs_resize, input_shape=(224,224,3), pooling='avg')
x = resnet50.output
predictions = Dense(units=10, activation='softmax')(x)
# connect the model
tunemodel = Model(inputs=inputs, outputs=predictions)
# set the loss and optimizer
rmsprop = RMSprop(lr=0.0001)
tunemodel.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# fit the model
checkpoint = ModelCheckpoint('../models/tuneResNet_early_{epoch:02d}-{loss:.2f}.h5',
monitor='loss',
save_best_only=True)
earlystop = EarlyStopping(min_delta=0.0001, patience=1)
tunemodel.fit(data['X_train'], data['y_train'].reshape(-1, 1),
batch_size=16, epochs=10, callbacks=[checkpoint, earlystop], initial_epoch=0)
# test the model and see accuracy
score = tunemodel.evaluate(data['X_test'], data['y_test'].reshape(-1, 1),
batch_size=16, validation_data=(data['X_test'], data['y_test'].reshape(-1, 1)))
print(score)
from lib.data_utils import create_submission
from keras.models import load_model
# for freeze ResNet50 model (3 epochs)
simple_CNN = load_model('../models/freezeResNet_03-0.09.h5', custom_objects={'ktf': ktf})
print('Load model successfully.')
create_submission(simple_CNN, '../data/test.csv', '../submission/submission_freezeResNet_03.csv', 16, fit=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Read the MNIST data. Notice that we assume that it's 'kaggle-DigitRecognizer/data/train.csv', and we use helper function to read into a dictionary.
Step2: Freeze-weights transfer
Step3: Fine-tune transfer
Step4: Fine-tune transfer with early stopping
Step5: Create submissions
|
3,459
|
<ASSISTANT_TASK:>
Python Code:
def net_force(mass, acceleration):
return mass * acceleration
# Function as above, but with DOCUMENTATION
def net_force(mass, acceleration):
Calculates f=ma, returns force.
We assume mass, acceleration are of type int/float.
return mass * acceleration
def add(a, b):
returns a + b, a and b should be integers
return a + b
def add2(a, b):
# btw, the "==" symbol is asking the question "is this equal to that". More on this later.
assert a == type(int), "'{}' is not an integer".format(a)
assert b == type(int), "'{}' is not an integer".format(b)
return a + b
add2("hello", "world")
print( "✓" + "✓") # strings (rememeber strings support *any* unicode character)
print( 1 + 2) # integers
print( [1] + [2]) # lists
print( (1,) + (2,)) # tuples
def net_force(mass, acceleration):
Returns mass * acceleration
I trust you guys will use this function responsibly.
return mass * acceleration
print(net_force(10, 10))
print(net_force("2", 10))
print(net_force([1], 10 ))
# one more use of the '*' operator, this is called "unpacking"...
x = "12345"
print( *x, sep="_Batman_")
def chemotherapy_radiation_dose(patient_weight, cancer_stage):
radiation_dose = patient_weight * cancer_stage
return radiation_dose
print(chemotherapy_radiation_dose(65, 2))
print(chemotherapy_radiation_dose("65", 2))
def parse_results(number):
return "The correct dose for the patient is " + number
parse_results(130)
def parse_results(number):
return "The correct dose for the patient is " + str(number)
# ^ The change is right here
print(parse_results(130))
print(parse_results("6565"))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: I argued that one of the main drawbacks of this code is that "[it] implicitly assumes the user knows to pass in numbers". In this lecture I aim to do two things. Firstly, I want you to understand what the problem is from a technical point of view. Secondly, I want you to grasp the importance of what is potentially at stake.
Step4: In the documentation above we mention clearly what our function does and how it is to be used. Take particular note to the part where we say mass and acceleration are supposed to be floats or integers. Telling our end-user about how we expect our code to be used helps them avoid bugs and other software defects that could result from improper use. BUT in Python (unlike some other statically-typed languages) the 'python way' is to allow the user to do whatever they want; we just trust that they will be responsible.
Step5: 'add' and 'add2' are very similar functions, the main difference is that 'add' has a docstring that explains what the function does and also explains what the expected input is. 'add2' doesn't have documentation and it 'forces' the user to pass in integers. We haven't seen the assert statement yet, but basically what it does is it tells Python to throw an error whenever the condition is not met. In this case, if we give this function a string it will not work.
Step6: Of these two functions 'add' is more Pythonic, the code is more flexible and it trusts the user to be responsible. Meanwhile the function 'add2' is much more rigid and demands to be used in a certain way. Overall you should try to write code more like the 'add' function as opposed to 'add2'.
Step8: The salient point is to notice is that the ‘+’ operator works not just on integers, but also tuples, lists, strings, class objects and so on. This is what we call 'operator overloading'.
Step9: Notice that, just like the '+' operator '*' can mean different things in different contexts, thus we we give our net_force function strings, lists, and integers it behaves differently in each case.
Step10: Because all of these uses are well-defined Python does not throw an error when you try to multiply an integer by a string; In each case the code "worked", and I'd argue that’s the problem!
Step11: Okay, we have called the chemo function twice and we get the number 130 and the string 6565 returned to us. We shall use those values in a jiffy. Okay, now lets suppose that part of our program has a 'parse results' function. The task of this function is to get the results ready for printing.
Step12: So in the above code I called parse_results with the number 130 and the function returns an error. Now, since are expecting to be given numbers as input and we want to return a string it makes sense to use str() to convert the number. Afterall, a program that spins errors when we give it correct input is of no use at all.
|
3,460
|
<ASSISTANT_TASK:>
Python Code:
import os
import pandas as pd
import requests
from windpowerlib import ModelChain, WindTurbine, create_power_curve
from windpowerlib import data as wt
import logging
logging.getLogger().setLevel(logging.DEBUG)
def get_weather_data(filename='weather.csv', **kwargs):
r
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The height in m for which the data applies is
specified in the second row.
In case no weather data file exists, an example weather data file is
automatically downloaded and stored in the same directory as this example.
Parameters
----------
filename : str
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : str, optional
Path where the weather data file is stored.
Default is the same directory this example is stored in.
Returns
-------
:pandas:`pandas.DataFrame<frame>`
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m). The index is a
DateTimeIndex.
if 'datapath' not in kwargs:
kwargs['datapath'] = os.path.dirname(__file__)
file = os.path.join(kwargs['datapath'], filename)
# download example weather data file in case it does not yet exist
if not os.path.isfile(file):
logging.debug("Download weather data for example.")
req = requests.get("https://osf.io/59bqn/download")
with open(file, "wb") as fout:
fout.write(req.content)
# read csv file
weather_df = pd.read_csv(
file,
index_col=0,
header=[0, 1],
date_parser=lambda idx: pd.to_datetime(idx, utc=True))
# change time zone
weather_df.index = weather_df.index.tz_convert(
'Europe/Berlin')
return weather_df
# Read weather data from csv
weather = get_weather_data(filename='weather.csv', datapath='')
print(weather[['wind_speed', 'temperature', 'pressure']][0:3])
# get power curves
# get names of wind turbines for which power curves and/or are provided
# set print_out=True to see the list of all available wind turbines
df = wt.get_turbine_types(print_out=False)
# find all Enercons
print(df[df["manufacturer"].str.contains("Enercon")])
# find all Enercon 101 turbines
print(df[df["turbine_type"].str.contains("E-101")])
# specification of wind turbine where power curve is provided in the
# oedb turbine library
enercon_e126 = {
'turbine_type': 'E-126/4200', # turbine type as in oedb turbine library
'hub_height': 135 # in m
}
# initialize WindTurbine object
e126 = WindTurbine(**enercon_e126)
# specification of own wind turbine (Note: power curve values and
# nominal power have to be in Watt)
my_turbine = {
'nominal_power': 3e6, # in W
'hub_height': 105, # in m
'power_curve': pd.DataFrame(
data={'value': [p * 1000 for p in [
0.0, 26.0, 180.0, 1500.0, 3000.0, 3000.0]], # in W
'wind_speed': [0.0, 3.0, 5.0, 10.0, 15.0, 25.0]}) # in m/s
}
# initialize WindTurbine object
my_turbine = WindTurbine(**my_turbine)
# specification of wind turbine where power coefficient curve and nominal
# power is provided in an own csv file
# Read your turbine data from your data file using functions like
# pandas.read_csv().
# >>> import pandas as pd
# >>> my_data = pd.read_csv("path/to/my/data/file")
# >>> my_power = my_data["my_power"]
# >>> my_wind_speed = my_data["my_wind_speed"]
my_power = pd.Series(
[0.0, 39000.0, 270000.0, 2250000.0, 4500000.0, 4500000.0]
)
my_wind_speed = (0.0, 3.0, 5.0, 10.0, 15.0, 25.0)
my_turbine2 = {
"nominal_power": 6e6, # in W
"hub_height": 115, # in m
"power_curve": create_power_curve(
wind_speed=my_wind_speed, power=my_power
),
}
my_turbine2 = WindTurbine(**my_turbine2)
# power output calculation for e126
# own specifications for ModelChain setup
modelchain_data = {
'wind_speed_model': 'logarithmic', # 'logarithmic' (default),
# 'hellman' or
# 'interpolation_extrapolation'
'density_model': 'ideal_gas', # 'barometric' (default), 'ideal_gas'
# or 'interpolation_extrapolation'
'temperature_model': 'linear_gradient', # 'linear_gradient' (def.) or
# 'interpolation_extrapolation'
'power_output_model':
'power_coefficient_curve', # 'power_curve' (default) or
# 'power_coefficient_curve'
'density_correction': True, # False (default) or True
'obstacle_height': 0, # default: 0
'hellman_exp': None} # None (default) or None
# initialize ModelChain with own specifications and use run_model method to
# calculate power output
mc_e126 = ModelChain(e126, **modelchain_data).run_model(
weather)
# write power output time series to WindTurbine object
e126.power_output = mc_e126.power_output
# power output calculation for my_turbine
# initialize ModelChain with default parameters and use run_model
# method to calculate power output
mc_my_turbine = ModelChain(my_turbine).run_model(weather)
# write power output time series to WindTurbine object
my_turbine.power_output = mc_my_turbine.power_output
# power output calculation for example_turbine
# own specification for 'power_output_model'
mc_example_turbine = ModelChain(
my_turbine2,
wind_speed_model='hellman').run_model(weather)
my_turbine2.power_output = mc_example_turbine.power_output
# try to import matplotlib
logging.getLogger().setLevel(logging.WARNING)
try:
from matplotlib import pyplot as plt
# matplotlib inline needed in notebook to plot inline
%matplotlib inline
except ImportError:
plt = None
# plot turbine power output
if plt:
e126.power_output.plot(legend=True, label='Enercon E126')
my_turbine.power_output.plot(legend=True, label='myTurbine')
my_turbine2.power_output.plot(legend=True, label='myTurbine2')
plt.xlabel('Time')
plt.ylabel('Power in W')
plt.show()
# plot power (coefficient) curves
if plt:
if e126.power_coefficient_curve is not None:
e126.power_coefficient_curve.plot(
x='wind_speed', y='value', style='*',
title='Enercon E126 power coefficient curve')
plt.xlabel('Wind speed in m/s')
plt.ylabel('Power in W')
plt.show()
if e126.power_curve is not None:
e126.power_curve.plot(x='wind_speed', y='value', style='*',
title='Enercon E126 power curve')
plt.xlabel('Wind speed in m/s')
plt.ylabel('Power in W')
plt.show()
if my_turbine.power_coefficient_curve is not None:
my_turbine.power_coefficient_curve.plot(
x='wind_speed', y='value', style='*',
title='myTurbine power coefficient curve')
plt.xlabel('Wind speed in m/s')
plt.ylabel('Power in W')
plt.show()
if my_turbine.power_curve is not None:
my_turbine.power_curve.plot(x='wind_speed', y='value', style='*',
title='myTurbine power curve')
plt.xlabel('Wind speed in m/s')
plt.ylabel('Power in W')
plt.show()
if my_turbine2.power_coefficient_curve is not None:
my_turbine2.power_coefficient_curve.plot(
x='wind_speed', y='value', style='*',
title='myTurbine2 power coefficient curve')
plt.xlabel('Wind speed in m/s')
plt.ylabel('Power in W')
plt.show()
if my_turbine2.power_curve is not None:
my_turbine2.power_curve.plot(x='wind_speed', y='value', style='*',
title='myTurbine2 power curve')
plt.xlabel('Wind speed in m/s')
plt.ylabel('Power in W')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can use the logging package to get logging messages from the windpowerlib. Change the logging level if you want more or less messages.
Step3: Import weather data
Step4: Initialize wind turbine
Step5: Use the ModelChain to calculate turbine power output
Step6: Plot results
|
3,461
|
<ASSISTANT_TASK:>
Python Code:
!pip install --upgrade --user pixiedust
!pip install --upgrade --user pixiedust-flightpredict
import pixiedust_flightpredict
pixiedust_flightpredict.configure()
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.linalg import Vectors
from numpy import array
import numpy as np
import math
from datetime import datetime
from dateutil import parser
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
logRegModel = LogisticRegressionWithLBFGS.train(labeledTrainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, iterations=1000, validateData=False, intercept=False)
print(logRegModel)
from pyspark.mllib.classification import NaiveBayes
#NaiveBayes requires non negative features, set them to 0 for now
modelNaiveBayes = NaiveBayes.train(labeledTrainingData.map(lambda lp: LabeledPoint(lp.label, \
np.fromiter(map(lambda x: x if x>0.0 else 0.0,lp.features.toArray()),dtype=np.int)\
))\
)
print(modelNaiveBayes)
from pyspark.mllib.tree import DecisionTree
modelDecisionTree = DecisionTree.trainClassifier(labeledTrainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, numClasses=training.getNumClasses(), categoricalFeaturesInfo={})
print(modelDecisionTree)
from pyspark.mllib.tree import RandomForest
modelRandomForest = RandomForest.trainClassifier(labeledTrainingData.map(lambda lp: LabeledPoint(lp.label,\
np.fromiter(map(lambda x: 0.0 if np.isnan(x) else x,lp.features.toArray()),dtype=np.double )))\
, numClasses=training.getNumClasses(), categoricalFeaturesInfo={},numTrees=100)
print(modelRandomForest)
display(testData)
import pixiedust_flightpredict
from pixiedust_flightpredict import *
pixiedust_flightpredict.flightPredict("LAS")
import pixiedust_flightpredict
pixiedust_flightpredict.displayMapResults()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>If PixieDust was just installed or upgraded, <span style="color
Step2: Train multiple classification models
Step3: Evaluate the models
Step4: Run the predictive model application
Step5: Get aggregated results for all the flights that have been predicted.
|
3,462
|
<ASSISTANT_TASK:>
Python Code:
record_f = open("Sample_Data/Swim_Records/record_list.txt")
record = record_f.read().decode('utf-8').split('\n')
record_f.close()
for line in record:
print(line)
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
time_only = []
for line in record:
(player, p_record) = line.split()
time_only.append(float(p_record))
record_f.close()
time_only.sort()
for i in range(3):
print(str(i+1) + "등", time_only[i])
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
time_only = []
name_only = []
for line in record:
(p_name, p_record) = line.split()
time_only.append(float(p_record))
name_only.append(p_name)
record_f.close()
print(name_only)
print(time_only)
city_temperature = {}
city_temperature['Pyongtaek'] = 22
city_temperature
city_temperature['Suwon'] = 18
city_temperature['Jeju'] = 25
city_temperature
city_temperature['Pyongtaek']
city_temperature['Jeju']
key_list = city_temperature.keys()
key_list
value_list = city_temperature.values()
value_list
item_list = city_temperature.items()
item_list
for key in city_temperature.keys():
print(key,"의 온도는", city_temperature[key], "도 이다.")
for key in city_temperature:
print(key,"의 온도는", city_temperature[key], "도 이다.")
dir(city_temperature)
city_temperature.pop("Suwon")
print(city_temperature)
city_temperature.has_key("Suwon")
city_temperature.has_key("Jeju")
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
for item_key in record_dict:
print(item_key, ":", record_dict[item_key])
sorted(record_dict.keys())
for each_record in sorted(record_dict.keys()):
print(each_record, record_dict[each_record])
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
ranking = 1
for each_record in sorted(record_dict.keys()):
print(str(ranking) + "등", record_dict[each_record], each_record)
ranking += 1
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
ranking = 1
for each_record in sorted(record_dict.keys()):
print(str(ranking) + "등", record_dict[each_record], each_record)
if ranking < 3:
ranking += 1
else:
break
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
record_list = record_dict.keys()
record_list.sort()
for i in range(3):
item = record_list[i]
print(str(i+1) + "등", record_dict[item], item)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 복습
Step2: 이제 위 코드를 수정하여 아래 결과를 얻고자 한다.
Step3: 현재 두 개의 리스트는 기존 테이블의 리스트의 순서와 동일한 순서대로 항목을 갖고 있다.
Step4: 이제 원하는 자료들의 쌍을 입력한다.
Step5: 이제 평택의 정보가 추가되었음을 확인할 수 있다.
Step6: 이제 수원과 제주의 정보를 추가하고 확인해보자.
Step7: 주의
Step8: 키만 모아 놓은 리스트
Step9: 주의
Step10: 각각의 항목을 리스트의 항목으로 묶는 방식
Step11: 사전 자료형 반복문
Step12: 사실 keys 메소드를 굳이 사용하지 않아도 된다.
Step13: 사전 자료형의 메소드는 그리 많지 않다.
Step14: 이중에서 pop와 has_key에 대해서는 기본적으로 알고 있는 것이 좋다.
Step15: has_key() 메소드는 특정 키의 존재 여부를 확인해준다.
Step16: 선수이름과 기록 연동하기
Step17: 이제 record_dict를 기록 기준으로 오름차순으로 정렬하면 된다.
Step18: 이제 코드를 정리하면 다음과 같다.
Step19: 연습
Step20: 주의
|
3,463
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
plt.ion()
from astropy import time
from poliastro.twobody.orbit import Orbit
from poliastro.bodies import Earth
from poliastro.plotting import OrbitPlotter
from poliastro.neos import neows
eros = neows.orbit_from_name('Eros')
frame = OrbitPlotter()
frame.plot(eros, label='Eros')
ganymed = neows.orbit_from_name('1036') # Ganymed IAU number
amor = neows.orbit_from_name('2001221') # Amor SPK-ID
eros = neows.orbit_from_spk_id('2000433') # Eros SPK-ID
frame = OrbitPlotter()
frame.plot(ganymed, label='Ganymed')
frame.plot(amor, label='Amor')
frame.plot(eros, label='Eros')
neows.orbit_from_name('*alley')
eros.epoch.iso
epoch = time.Time(2458000.0, scale='tdb', format='jd')
eros_november = eros.propagate(epoch)
eros_november.epoch.iso
neows.orbit_from_name('Toutatis', api_key='DEMO_KEY')
from poliastro.neos import dastcom5
atira = dastcom5.orbit_from_name('atira')[0] # NEO
wikipedia = dastcom5.orbit_from_name('wikipedia')[0] # Asteroid, but not NEO.
frame = OrbitPlotter()
frame.plot(atira, label='Atira (NEO)')
frame.plot(wikipedia, label='Wikipedia (asteroid)')
halleys = dastcom5.orbit_from_name('1P')
frame = OrbitPlotter()
frame.plot(halleys[0], label='Halley')
frame.plot(halleys[5], label='Halley')
frame.plot(halleys[10], label='Halley')
frame.plot(halleys[20], label='Halley')
frame.plot(halleys[-1], label='Halley')
ast_db = dastcom5.asteroid_db()
comet_db = dastcom5.comet_db()
ast_db.dtype.names[:20] # They are more than 100, but that would be too much lines in this notebook :P
aphelion_condition = 2 * ast_db['A'] - ast_db['QR'] < 0.983
axis_condition = ast_db['A'] < 1.3
atiras = ast_db[aphelion_condition & axis_condition]
len(atiras)
from poliastro.twobody.orbit import Orbit
from poliastro.bodies import Earth
earth = Orbit.from_body_ephem(Earth)
frame = OrbitPlotter()
frame.plot(earth, label='Earth')
for record in atiras['NO']:
ss = dastcom5.orbit_from_record(record)
frame.plot(ss, color="#666666")
frame = OrbitPlotter()
frame.plot(earth, label='Earth')
for i in range(len(atiras)):
record = atiras['NO'][i]
label = atiras['ASTNAM'][i].decode().strip() # DASTCOM5 strings are binary
ss = dastcom5.orbit_from_record(record)
frame.plot(ss, label=label)
db = dastcom5.entire_db()
db.columns
db[db.NAME == 'Halley'] # As you can see, Halley is the name of an asteroid too, did you know that?
aphelion_condition = (2 * db['A'] - db['QR']) < 0.983
axis_condition = db['A'] < 1.3
atiras = db[aphelion_condition & axis_condition]
len(atiras)
len(atiras[atiras.A < 0])
axis_condition = (db['A'] < 1.3) & (db['A'] > 0)
atiras = db[aphelion_condition & axis_condition]
len(atiras)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NeoWS module
Step2: You can also search by IAU number or SPK-ID (there is a faster neows.orbit_from_spk_id() function in that case, although)
Step3: Since neows relies on Small-Body Database browser to get the SPK-ID given a body name, you can use the wildcards from that browser
Step4: <div class="alert alert-info">Note that epoch is provided by the Web Service itself, so if you need orbit on another epoch, you have to propagate it
Step5: Given that we are using NASA APIs, there is a maximum number of requests. If you want to make many requests, it is recommended getting a NASA API key. You can use your API key adding the api_key parameter to the function
Step6: DASTCOM5 module
Step7: Keep in mind that this function returns a list of orbits matching your string. This is made on purpose given that there are comets which have several records in the database (one for each orbit determination in history) what allow plots like this one
Step8: While neows can only be used to get Orbit objects, dastcom5 can also provide asteroid and comet complete database.
Step9: <div class="alert alert-info">Asteroid and comet parameters are not exactly the same (although they are very close)
Step10: The number of Atira NEOs we use using this method is
Step11: Which is consistent with the stats published by CNEOS
Step12: We only need to get the 16 orbits from these 16 ndarrays.
Step13: <div class="alert alert-warning">This slightly incorrect, given that Earth coordinates are in a different frame from asteroids. However, for the purpose of this notebook, the effect is barely noticeable.</div>
Step14: <div class="alert alert-info">We knew beforehand that there are no `Atira` comets, only asteroids (comet orbits are usually more eccentric), but we could use the same method with `com_db` if we wanted.</div>
Step15: Also, in this function, DASTCOM5 data (specially strings) is ready to use (decoded and improved strings, etc)
Step16: Panda offers many functionalities, and can also be used in the same way as the ast_db and comet_db functions
Step17: What? I said they can be used in the same way!
Step18: So, rewriting our condition
|
3,464
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-2', 'toplevel')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 2. Key Properties --> Flux Correction
Step7: 3. Key Properties --> Genealogy
Step8: 3.2. CMIP3 Parent
Step9: 3.3. CMIP5 Parent
Step10: 3.4. Previous Name
Step11: 4. Key Properties --> Software Properties
Step12: 4.2. Code Version
Step13: 4.3. Code Languages
Step14: 4.4. Components Structure
Step15: 4.5. Coupler
Step16: 5. Key Properties --> Coupling
Step17: 5.2. Atmosphere Double Flux
Step18: 5.3. Atmosphere Fluxes Calculation Grid
Step19: 5.4. Atmosphere Relative Winds
Step20: 6. Key Properties --> Tuning Applied
Step21: 6.2. Global Mean Metrics Used
Step22: 6.3. Regional Metrics Used
Step23: 6.4. Trend Metrics Used
Step24: 6.5. Energy Balance
Step25: 6.6. Fresh Water Balance
Step26: 7. Key Properties --> Conservation --> Heat
Step27: 7.2. Atmos Ocean Interface
Step28: 7.3. Atmos Land Interface
Step29: 7.4. Atmos Sea-ice Interface
Step30: 7.5. Ocean Seaice Interface
Step31: 7.6. Land Ocean Interface
Step32: 8. Key Properties --> Conservation --> Fresh Water
Step33: 8.2. Atmos Ocean Interface
Step34: 8.3. Atmos Land Interface
Step35: 8.4. Atmos Sea-ice Interface
Step36: 8.5. Ocean Seaice Interface
Step37: 8.6. Runoff
Step38: 8.7. Iceberg Calving
Step39: 8.8. Endoreic Basins
Step40: 8.9. Snow Accumulation
Step41: 9. Key Properties --> Conservation --> Salt
Step42: 10. Key Properties --> Conservation --> Momentum
Step43: 11. Radiative Forcings
Step44: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Step45: 12.2. Additional Information
Step46: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Step47: 13.2. Additional Information
Step48: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Step49: 14.2. Additional Information
Step50: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Step51: 15.2. Additional Information
Step52: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Step53: 16.2. Additional Information
Step54: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Step55: 17.2. Equivalence Concentration
Step56: 17.3. Additional Information
Step57: 18. Radiative Forcings --> Aerosols --> SO4
Step58: 18.2. Additional Information
Step59: 19. Radiative Forcings --> Aerosols --> Black Carbon
Step60: 19.2. Additional Information
Step61: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Step62: 20.2. Additional Information
Step63: 21. Radiative Forcings --> Aerosols --> Nitrate
Step64: 21.2. Additional Information
Step65: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Step66: 22.2. Aerosol Effect On Ice Clouds
Step67: 22.3. Additional Information
Step68: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Step69: 23.2. Aerosol Effect On Ice Clouds
Step70: 23.3. RFaci From Sulfate Only
Step71: 23.4. Additional Information
Step72: 24. Radiative Forcings --> Aerosols --> Dust
Step73: 24.2. Additional Information
Step74: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Step75: 25.2. Historical Explosive Volcanic Aerosol Implementation
Step76: 25.3. Future Explosive Volcanic Aerosol Implementation
Step77: 25.4. Additional Information
Step78: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Step79: 26.2. Historical Explosive Volcanic Aerosol Implementation
Step80: 26.3. Future Explosive Volcanic Aerosol Implementation
Step81: 26.4. Additional Information
Step82: 27. Radiative Forcings --> Aerosols --> Sea Salt
Step83: 27.2. Additional Information
Step84: 28. Radiative Forcings --> Other --> Land Use
Step85: 28.2. Crop Change Only
Step86: 28.3. Additional Information
Step87: 29. Radiative Forcings --> Other --> Solar
Step88: 29.2. Additional Information
|
3,465
|
<ASSISTANT_TASK:>
Python Code:
import os
os.chdir('..')
os.getcwd()
!{'dpp'}
!{'dpp run --verbose ./committees/kns_committee'}
KNS_COMMITTEE_DATAPACKAGE_PATH = './data/committees/kns_committee/datapackage.json'
from datapackage import Package
kns_committee_package = Package(KNS_COMMITTEE_DATAPACKAGE_PATH)
kns_committee_package.resource_names
KNS_COMMITTEE_RESOURE_NAME = 'kns_committee'
import yaml
print(yaml.dump(package.get_resource(KNS_COMMITTEE_RESOURE_NAME).descriptor,
allow_unicode=True, default_flow_style=False))
for i, row in enumerate(package.get_resource(KNS_COMMITTEE_RESOURE_NAME).iter(keyed=True), 1):
if i > 5: continue
print(f'-- row {i} --')
print(yaml.dump(row, allow_unicode=True, default_flow_style=False))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List the available pipelines
Step2: Run a pipeline
Step3: Inspect the output datapackage descriptor
Step4: Each package may contain multiple resources, let's see which resource names are available for the kns_committee package
Step5: Inspect the kns_committee resource descriptor which includes metadata and field descriptions
Step6: Print the first 5 row of data
|
3,466
|
<ASSISTANT_TASK:>
Python Code:
qubits = []
for i in range(3):
q = qubit.Qubit('Transmon')
q.C_g = 3.87e-15
q.C_q = 75.1e-15
q.C_resToGnd = 79.1e-15
qubits.append(q)
q = qubit.Qubit('OCSQubit')
q.C_g = 2.94e-15
q.C_q = 48.5e-15
q.C_resToGnd = 51.5e-15
qubits.append(q)
cpw = cpwtools.CPW(material='al', w=10., s=7.)
print cpw
l_curve = 2*pi*50/4
coupling_length = 287
tot_length = l_curve*(1+1+2+2+2+2) + 2*1000 + 1156 + 350 + 500 + coupling_length # this coupling length ranges from 45-150 depending on desired Qc.
# Plan for 45, can always trombone down
L4 = cpwtools.QuarterLResonator(cpw, tot_length)
print('The frequency is brought down significantly by the capacitance through to ground through the qubit, ' +
'as well as the self-capacitance of the coupling cap to ground. These capacitances pull down the transmon ' +
'frequency more, so we will set Q3 to have no extension, and set the other qubit frequencies around it.')
print('Bare resonator frequency = {:.3f} GHz'.format(L4.fl()/1e9))
print
def L4FromQubit(q):
L4 = cpwtools.QuarterLResonator(cpw, tot_length)
seriesCap = q.C_g*q.C_q/(q.C_g+q.C_q)
L4.addCapacitiveCoupling('g', seriesCap, Z0 = 0)
L4.addCapacitiveCoupling('c_coupler', q.C_resToGnd, Z0 = 0)
return L4
L4 = L4FromQubit(qubits[2])
f0 = L4.fl()
for i,q in enumerate(qubits):
L4 = L4FromQubit(q)
length = L4.setLengthFromFreq(f0 + 0.04e9*[-2, -1, 0, 1][i])
q.C_r = L4.C()
q.omega_r = L4.wl()
q.omega_q = 2*pi*(f0-1e9)
print("{}: l = {:.2f}um f_l = {:.3f}GHz C_r = {:.2f}fF extension = {:.2f}um".format(
q.name, 1e6*q.res_length, L4.fl()/1e9, 1e15*L4.C(), (1e6*length - tot_length)/2))
qb = deepcopy(qubits[2])
g = 2*pi*50e6 # qubit-resonator coupling in Hz
print('Range of C_q on the mask:')
print "C_q = 30fF: E_c = {:.2f}MHz".format( qb.E_c(30e6)/(2*pi*hbar)*1e15 )
print "C_q = 95fF: E_c = {:.2f}MHz".format( qb.E_c(95e6)/(2*pi*hbar)*1e15 )
print
print('Ideal:')
print "Transmon: E_c = 250MHz: C_sigma = C_q + C_g = {:.2f}fF".format( e**2/2/250e6/(2*pi*hbar)*1e15 )
print "Charge Sensitive: E_c = 385MHz: C_sigma = C_q + C_g = {:.2f}fF".format( e**2/2/385e6/(2*pi*hbar)*1e15 )
# With caps chosen from the mask:
for q in qubits:
print "{}: C_q = {:.2f}fF E_c = {:.2f}MHz E_j = {:.2f}GHz alpha = {:.2f}MHz g = {:.2f}MHz C_g = {:.2f}fF".format(
q.name, 1e15*q.C_q, -q.E_c()/(2*pi*hbar)/1e6, q.E_j()/2/pi/hbar/1e9, q.alpha(q.E_c(),q.E_j())/(2*pi)/1e6, g/2/pi/1e6, 1e15*q.cap_g(g))
# We choose the closest g capacitance from the mask
for q in qubits:
print "{}: C_g = {:.2f}fF g = {:.2f}MHz Chi_0/2pi = {:.2f}MHz Chi/2pi = {:.2f}MHz Q_r = {:.0f} kappa = {:.2f}MHz 1/kappa = {:.0f}ns I_c={:.2f}nA n_crit={:.0f}".format(
q.name, 1e15*q.cap_g(q.g()), q.g()/2/pi/1e6, 1e-6*q.Chi_0()/2/pi, 1e-6*q.Chi()/2/pi, q.Q_r(), q.omega_r/q.Q_r()*1e-6/2/pi, q.Q_r()/q.omega_r*1e9, q.I_c()*1e9, ((q.omega_q-q.omega_r)/2/q.g())**2)
delta = 380e-6; #2\Delta/e in V
Jc = 1e8*673e-9 # A/cm^2
nJJs = [2,1,1,2]
for i,q in enumerate(qubits):
print("{}: I_c = {:.2f}nA R_N = {:.2f}k width = {} x {:.3f}nm".format(q.name, q.I_c()*1e9, 1e-3*pi/4*delta/q.I_c(), nJJs[i], 1e9*q.I_c()/(1e4*Jc)/100e-9/nJJs[i] ))
for q in qubits:
print "{}: Ej/Ec = {:.3f} Charge dispersion = {:.3f}MHz".format(q.name, q.E_j()/q.E_c(), q.charge_dispersion()/2/pi/hbar/1e6)
# What variation in C_g should be included on mask for the C_q variation we have?
for C_q_ in [85e-15, 29e-15, e**2/2/250e6]:
for g_ in [2*pi*25e6, 2*pi*50e6, 2*pi*200e6]:
qb.C_q = C_q_
print "C_q = {:.2f}fF g = {:.2f}MHz C_g = {:.2f}fF".format(
1e15*C_q_, g_/2/pi/1e6, 1e15*qb.cap_g(g_))
cpw.setKineticInductanceCorrection(False)
print cpw
cpwx = cpwtools.CPWWithBridges(material='al', w=1e6*cpw.w, s=1e6*cpw.s, bridgeSpacing = 250, bridgeWidth = 3, t_oxide=0.16)
cpwx.setKineticInductanceCorrection(False)
print cpwx
d = 5
MperL = inductiveCoupling.inductiveCoupling.CalcMutual(cpw.w*1e6, cpw.w*1e6, cpw.s*1e6, cpw.s*1e6, d, 10*cpw.w*1e6)[0]
for q in qubits:
M = 1/(np.sqrt(q.Q_r()*pi/8/cpw.z0()**2)*q.omega_r)
print "{} M = {:.2f}pH coupling length = {:.2f}um".format(q.name, M*1e12, M/MperL*1e6)
for q in [3000,6000,9000,15000,21000,27000,33000]:
print "Q_c={} l_c={:.2f}".format(q,1/(np.sqrt(q*pi/8/cpw.z0()**2)*qubits[2].omega_r)/MperL*1e6)
l_curve = 2*pi*50/4
tot_length = l_curve*(1+2+2+2+1)*2 + 4*750 + 2569 + 4*450 + 2*106
purcell = cpwtools.HalfLResonator(cpw,tot_length)
purcell.addCapacitiveCoupling('in', 40e-15)
purcell.addCapacitiveCoupling('out', 130e-15)
print( "f_max = {:.3f}GHz Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcell.fl(), purcell.Qc('in'), purcell.Qc('out') ) )
purcell.l = (tot_length + 503*4)*1e-6
print( "f_min = {:.3f}GHz Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcell.fl(), purcell.Qc('in'), purcell.Qc('out') ) )
print
print('The measured purcell filter (no crossovers) seems to be 150-200MHz below expected. This has been accounted for below.')
f0 = (qubits[1].omega_r + qubits[2].omega_r)/2/2/pi
purcell.setLengthFromFreq(f0 + 175e6) # The measured purcell filter (no crossovers) seems to be 150-200MHz below expected.
print "f = {:.2f}GHz l = {:.3f}um offset = {:.3f}um Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcell.fl(), purcell.l*1e6, (purcell.l*1e6-tot_length)/4, purcell.Qc('in'), purcell.Qc('out') )
print "V_out/V_in =", (purcell.Qc('in')/purcell.Qc('out'))**0.5
print "{:.2f}% power lost through input".format( 100*purcell.Ql()/purcell.Qc('in') )
print "{:.2f}% power lost through output".format( 100*purcell.Ql()/purcell.Qc('out') )
print "{:.2f}% power lost internally".format( 100*purcell.Ql()/purcell.Qint() )
print
print "The purcell filter frequency goes up by 310MHz when crossovers are added:"
purcellx = deepcopy(purcell)
purcellx.cpw = cpwx
print "f = {:.2f}GHz l = {:.3f}um Q_in = {:.2f} Q_out = {:.2f}".format( 1e-9*purcellx.fl(), purcellx.l*1e6, purcellx.Qc('in'), purcellx.Qc('out') )
print "Purcell Filter FWHM = {:.2f}MHz".format(2*pi*f0/purcell.Ql()/2/pi/1e6)
print "Purcell Filter Q_l = {:.2f}".format(purcell.Ql())
print
for q in qubits:
kappa_r = q.omega_r/q.Q_r()
Delta = q.omega_q - q.omega_r
print "{}: T1 limit (no purcell) = {:.2f}us T1 limit (purcell) = {:.2f}us".format(
q.name, (Delta/q.g())**2/kappa_r * 1e6, (Delta/q.g())**2 * (q.omega_r/q.omega_q) * (2*Delta/q.omega_r*purcell.Ql())**2/kappa_r * 1e6 )
C_q = qubits[2].C_q
L_q = 1/(qubits[2].omega_q**2 * C_q)
R_s = 50
C_s = 0.1e-15
Q_s = 1/(qubits[2].omega_q * R_s * C_s)
R_p = R_s*(1 + Q_s**2)
C_p = C_s * Q_s**2/(1 + Q_s**2)
omega = 1/np.sqrt((C_q+C_p)*L_q)
Q_xy = omega*R_p*(C_q+C_p)
print("f: {:.3f}GHz --> {:.3f}GHz".format( 1e-9/np.sqrt(C_q*L_q)/2/pi, 1e-9*omega/2/pi))
print("Q = {:.2f}".format(Q_xy))
print("1/kappa = {:.2f}us".format(1e6*Q_xy/omega))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: CPW
Step2: $\lambda/4$ readout resonators
Step3: Qubit parameters
Step4: Feedline with and without crossovers
Step5: Inductive Coupling
Step6: Purcell Filter
Step7: Loss from XY line
|
3,467
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
# License: BSD-3-Clause
from functools import partial
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa, analysis:ignore
import mne
from mne.stats import (ttest_1samp_no_p, bonferroni_correction, fdr_correction,
permutation_t_test, permutation_cluster_1samp_test)
print(__doc__)
width = 40
n_subjects = 10
signal_mean = 100
signal_sd = 100
noise_sd = 0.01
gaussian_sd = 5
sigma = 1e-3 # sigma for the "hat" method
n_permutations = 'all' # run an exact test
n_src = width * width
# For each "subject", make a smoothed noisy signal with a centered peak
rng = np.random.RandomState(2)
X = noise_sd * rng.randn(n_subjects, width, width)
# Add a signal at the center
X[:, width // 2, width // 2] = signal_mean + rng.randn(n_subjects) * signal_sd
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(gaussian_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
fig, ax = plt.subplots()
ax.imshow(X.mean(0), cmap='inferno')
ax.set(xticks=[], yticks=[], title="Data averaged over subjects")
titles = ['t']
out = stats.ttest_1samp(X, 0, axis=0)
ts = [out[0]]
ps = [out[1]]
mccs = [False] # these are not multiple-comparisons corrected
def plot_t_p(t, p, title, mcc, axes=None):
if axes is None:
fig = plt.figure(figsize=(6, 3))
axes = [fig.add_subplot(121, projection='3d'), fig.add_subplot(122)]
show = True
else:
show = False
p_lims = [0.1, 0.001]
t_lims = -stats.distributions.t.ppf(p_lims, n_subjects - 1)
p_lims = [-np.log10(p) for p in p_lims]
# t plot
x, y = np.mgrid[0:width, 0:width]
surf = axes[0].plot_surface(x, y, np.reshape(t, (width, width)),
rstride=1, cstride=1, linewidth=0,
vmin=t_lims[0], vmax=t_lims[1], cmap='viridis')
axes[0].set(xticks=[], yticks=[], zticks=[],
xlim=[0, width - 1], ylim=[0, width - 1])
axes[0].view_init(30, 15)
cbar = plt.colorbar(ax=axes[0], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=surf)
cbar.set_ticks(t_lims)
cbar.set_ticklabels(['%0.1f' % t_lim for t_lim in t_lims])
cbar.set_label('t-value')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if not show:
axes[0].set(title=title)
if mcc:
axes[0].title.set_weight('bold')
# p plot
use_p = -np.log10(np.reshape(np.maximum(p, 1e-5), (width, width)))
img = axes[1].imshow(use_p, cmap='inferno', vmin=p_lims[0], vmax=p_lims[1],
interpolation='nearest')
axes[1].set(xticks=[], yticks=[])
cbar = plt.colorbar(ax=axes[1], shrink=0.75, orientation='horizontal',
fraction=0.1, pad=0.025, mappable=img)
cbar.set_ticks(p_lims)
cbar.set_ticklabels(['%0.1f' % p_lim for p_lim in p_lims])
cbar.set_label(r'$-\log_{10}(p)$')
cbar.ax.get_xaxis().set_label_coords(0.5, -0.3)
if show:
text = fig.suptitle(title)
if mcc:
text.set_weight('bold')
plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0)
mne.viz.utils.plt_show()
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
ts.append(ttest_1samp_no_p(X, sigma=sigma))
ps.append(stats.distributions.t.sf(np.abs(ts[-1]), len(X) - 1) * 2)
titles.append(r'$\mathrm{t_{hat}}$')
mccs.append(False)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
# Here we have to do a bit of gymnastics to get our function to do
# a permutation test without correcting for multiple comparisons:
X.shape = (n_subjects, n_src) # flatten the array for simplicity
titles.append('Permutation')
ts.append(np.zeros(width * width))
ps.append(np.zeros(width * width))
mccs.append(False)
for ii in range(n_src):
ts[-1][ii], ps[-1][ii] = permutation_t_test(X[:, [ii]], verbose=False)[:2]
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
N = np.arange(1, 80)
alpha = 0.05
p_type_I = 1 - (1 - alpha) ** N
fig, ax = plt.subplots(figsize=(4, 3))
ax.scatter(N, p_type_I, 3)
ax.set(xlim=N[[0, -1]], ylim=[0, 1], xlabel=r'$N_{\mathrm{test}}$',
ylabel=u'Probability of at least\none type I error')
ax.grid(True)
fig.tight_layout()
fig.show()
titles.append('Bonferroni')
ts.append(ts[-1])
ps.append(bonferroni_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
titles.append('FDR')
ts.append(ts[-1])
ps.append(fdr_correction(ps[0])[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
titles.append(r'$\mathbf{Perm_{max}}$')
out = permutation_t_test(X, verbose=False)[:2]
ts.append(out[0])
ps.append(out[1])
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
from sklearn.feature_extraction.image import grid_to_graph # noqa: E402
mini_adjacency = grid_to_graph(3, 3).toarray()
assert mini_adjacency.shape == (9, 9)
print(mini_adjacency[0])
titles.append('Clustering')
# Reshape data to what is equivalent to (n_samples, n_space, n_time)
X.shape = (n_subjects, width, width)
# Compute threshold from t distribution (this is also the default)
threshold = stats.distributions.t.ppf(1 - alpha, n_subjects - 1)
t_clust, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, adjacency=None,
n_permutations=n_permutations, out_type='mask')
# Put the cluster data in a viewable format
p_clust = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_clust[cl] = p
ts.append(t_clust)
ps.append(p_clust)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
titles.append(r'$\mathbf{C_{hat}}$')
stat_fun_hat = partial(ttest_1samp_no_p, sigma=sigma)
t_hat, clusters, p_values, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold, adjacency=None, out_type='mask',
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
p_hat = np.ones((width, width))
for cl, p in zip(clusters, p_values):
p_hat[cl] = p
ts.append(t_hat)
ps.append(p_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
titles.append(r'$\mathbf{C_{TFCE}}$')
threshold_tfce = dict(start=0, step=0.2)
t_tfce, _, p_tfce, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, adjacency=None,
n_permutations=n_permutations, out_type='mask')
ts.append(t_tfce)
ps.append(p_tfce)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
titles.append(r'$\mathbf{C_{hat,TFCE}}$')
t_tfce_hat, _, p_tfce_hat, H0 = permutation_cluster_1samp_test(
X, n_jobs=1, threshold=threshold_tfce, adjacency=None, out_type='mask',
n_permutations=n_permutations, stat_fun=stat_fun_hat, buffer_size=None)
ts.append(t_tfce_hat)
ps.append(p_tfce_hat)
mccs.append(True)
plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1])
fig = plt.figure(facecolor='w', figsize=(14, 3))
assert len(ts) == len(titles) == len(ps)
for ii in range(len(ts)):
ax = [fig.add_subplot(2, 10, ii + 1, projection='3d'),
fig.add_subplot(2, 10, 11 + ii)]
plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax)
fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hypothesis testing
Step2: The data averaged over all subjects looks like this
Step3: In this case, a null hypothesis we could test for each voxel is
Step4: "Hat" variance adjustment
Step5: Non-parametric tests
Step6: Multiple comparisons
Step7: To combat this problem, several methods exist. Typically these
Step8: False discovery rate (FDR) correction
Step9: Non-parametric resampling test with a maximum statistic
Step10: Clustering
Step11: In general the adjacency between voxels can be more complex, such as
Step12: "Hat" variance adjustment
Step13: Threshold-free cluster enhancement (TFCE)
Step14: We can also combine TFCE and the "hat" correction
Step15: Visualize and compare methods
|
3,468
|
<ASSISTANT_TASK:>
Python Code:
import math
print("The square root of 3 is:", math.sqrt(3))
print("π is:", math.pi)
print("The sin of 90 degrees is:", math.sin(math.radians(90)))
math.
math.erf?
s = "Hello!"
s.join() # or press <shift>+<tab> inside the brackets
from IPython.display import Image
Image('imgs/leonardo.jpg')
# HTML
from IPython.display import HTML
HTML('<b>Hello world!</b>')
# Artsy videos
from IPython.display import VimeoVideo
VimeoVideo("150594088")
# LaTeX!
from IPython.display import Latex
Latex(r\begin{eqnarray}
\nabla \times \vec{\mathbf{B}} -\, \frac1c\,
\frac{\partial\vec{\mathbf{E}}}{\partial t} & = & \frac{4\pi}{c}\vec{\mathbf{j}} \\
\nabla \cdot \vec{\mathbf{E}} & = & 4 \pi \rho \\
\nabla \times \vec{\mathbf{E}}\, +\, \frac1c\,
\frac{\partial\vec{\mathbf{B}}}{\partial t} & = & \vec{\mathbf{0}} \\
\nabla \cdot \vec{\mathbf{B}} & = & 0
\end{eqnarray})
class Apple:
def __init__(self, colour):
self.colour = colour
def _repr_html_(self):
if self.colour == 'green':
url = ("https://www.infusioneliquid.com/wp-content"
"/uploads/2013/12/Green-Apples.jpg")
elif self.colour == 'red':
url = ("http://boutonrougedesigns.com/wp-content"
"/uploads/red-apple.jpg")
else:
url = ("http://static1.squarespace.com/static"
"/51623c20e4b01df404d682ae/t/542d5d05e4b09d76c5f801d8"
"/1412259082640/Be+the+job+candidate+that+stands+out+"
"from+the+rest+and+the+one+recruitment+consultants+choose+jpeg")
return "<img src=%s />"%url
Apple("red")
# Matplotlib
%config InlineBackend.figure_format='retina'
# gives you interactive plots
%matplotlib notebook
# rendered inline plots
#%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (6, 6)
plt.rcParams["figure.max_open_warning"] = -1
# Taken from https://github.com/fperez/talk-1504-boulder/blob/master/QuickTour.ipynb
from scipy import special
import numpy as np
def plot_bessel(xmax=20, nmin=0, nmax=10, nstep=3):
x = np.linspace(0, xmax, 200)
f, ax = plt.subplots()
for n in range(nmin, nmax+1, nstep):
plt.plot(x, special.jn(n, x), label=r'$J_{%i(x)}$' % n)
plt.grid()
plt.legend()
plt.title('Bessel Functions')
plot_bessel()
from IPython.html.widgets import interact
interact(plot_bessel, xmax=(10,100.), nmin=(0, 10),
nmax=(10, 30), nstep=(1,5));
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring modules -- getting help
Step2: To get help with a particular function, type math.sin?
Step4: A notebook can display (nearly) anything
Step5: Custom objects
Step6: Inline plots
Step7: Interactive interactive plots (twice as interactive!)
|
3,469
|
<ASSISTANT_TASK:>
Python Code:
a = {
"x" : 1,
"y" : 2,
"z" : 3
}
b = {
"w" : 10,
"x" : 11,
"y" : 2
}
# Find keys in common
a.keys() & b.keys()
# Find keys in a that are not in b
a.keys() - b.keys()
# Find (key,value) pairs in common
a.items() & b.items()
# Make a new dictionary with certain keys removed
c = {key:a[key] for key in a.keys() - {"z", "w"}}
c
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 为了寻找两个字典的相同点,可以简单的在两字典的 keys() 或者 items() 方法返回结果上执行集合操作。比如:
Step2: 这些操作也可以用于修改或者过滤字典元素。 比如,假如你想以现有字典构造一个排除几个指定键的新字典。 下面利用字典推导来实现这样的需求:
|
3,470
|
<ASSISTANT_TASK:>
Python Code:
index_of_users = mkUserIndex(df=apps, user_col='uid')
index_of_items = mkItemIndex(df=apps, item_col='job_title')
print('# users: %d' %len(user_ids))
print('# job titles: %d' %len(item_ids))
from scipy.io import *
user_apply_job = mmread(DATA_DIR + 'user_apply_job.mtx')
printInfo(user_apply_job)
apps_by_job_emp = pd.read_csv(AGG_DIR + 'apps_by_job_emp.csv')
apps_by_job_emp.shape
apps_by_job_emp['job_emp'] = apps_by_job_emp['job_title'] + ' at ' + apps_by_job_emp['organisation_name_ep']
apps_by_job_emp.head()
index_of_items = mkItemIndex(df=apps_by_job_emp, item_col='job_emp')
user_apply_job_emp = buildUserItemMat(df=apps_by_job_emp,
index_of_users=index_of_users, index_of_items=index_of_items,
user_col='uid', item_col='job_emp', rating_col='n_apply')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Applicant-apply-(Job, Employer) matrix
|
3,471
|
<ASSISTANT_TASK:>
Python Code:
indexfile = "./datafiles/index_latest.txt"
import numpy as np
dataindex = np.genfromtxt(indexfile, skip_header=6, unpack=True, delimiter=',', dtype=None, \
names=['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters'])
lon_min = dataindex['geospatial_lon_min']
lon_max = dataindex['geospatial_lon_max']
lat_min = dataindex['geospatial_lat_min']
lat_max = dataindex['geospatial_lat_max']
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
m = Basemap(projection='merc', llcrnrlat=30., urcrnrlat=46.,
llcrnrlon=-10, urcrnrlon=40., lat_ts=38., resolution='i')
lonmean, latmean = 0.5*(lon_min + lon_max), 0.5*(lat_min + lat_max)
lon2plot, lat2plot = m(lonmean, latmean)
fig = plt.figure(figsize=(10,8))
m.plot(lon2plot, lat2plot, 'ko', markersize=2)
m.drawcoastlines(linewidth=0.5, zorder=3)
m.fillcontinents(zorder=2)
m.drawparallels(np.arange(-90.,91.,2.), labels=[1,0,0,0], linewidth=0.5, zorder=1)
m.drawmeridians(np.arange(-180.,181.,3.), labels=[0,0,1,0], linewidth=0.5, zorder=1)
plt.show()
box = [12, 15, 32, 34]
import numpy as np
goodcoordinates = np.where( (lonmean>=box[0]) & (lonmean<=box[1]) & (latmean>=box[2]) & (latmean<=box[3]))
print goodcoordinates
goodfilelist = dataindex['file_name'][goodcoordinates]
print goodfilelist
m2 = Basemap(projection='merc', llcrnrlat=32., urcrnrlat=34.,
llcrnrlon=12, urcrnrlon=15., lat_ts=38., resolution='h')
lon2plot, lat2plot = m2(lonmean[goodcoordinates], latmean[goodcoordinates])
fig = plt.figure(figsize=(10,8))
m2.plot(lon2plot, lat2plot, 'ko', markersize=4)
m2.drawcoastlines(linewidth=0.5, zorder=3)
m2.fillcontinents(zorder=2)
m2.drawparallels(np.arange(-90.,91.,0.5), labels=[1,0,0,0], linewidth=0.5, zorder=1)
m2.drawmeridians(np.arange(-180.,181.,0.5), labels=[0,0,1,0], linewidth=0.5, zorder=1)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: To read the index file (comma separated values), we will try with the genfromtxt function.
Step2: Map of observations
Step3: We import the modules necessary for the plot.
Step4: We create the projection, centered on the Mediterranean Sea in this case.
Step5: And we create a plot showing all the data locations.
Step6: Selection of a data file based on coordinates
Step7: then we look for the observations within this box
Step8: The generation of the file list is direct
Step9: According to the file names, we have 7 profiling drifters available in the area. <br/>
|
3,472
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
raw_inputs = [
[711, 632, 71],
[73, 8, 3215, 55, 927],
[83, 91, 1, 645, 1253, 927],
]
# By default, this will pad using 0s; it is configurable via the
# "value" parameter.
# Note that you could use "pre" padding (at the beginning) or
# "post" padding (at the end).
# We recommend using "post" padding when working with RNN layers
# (in order to be able to use the
# CuDNN implementation of the layers).
padded_inputs = tf.keras.preprocessing.sequence.pad_sequences(
raw_inputs, padding="post"
)
print(padded_inputs)
embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
masked_output = embedding(padded_inputs)
print(masked_output._keras_mask)
masking_layer = layers.Masking()
# Simulate the embedding lookup by expanding the 2D input to 3D,
# with embedding dimension of 10.
unmasked_embedding = tf.cast(
tf.tile(tf.expand_dims(padded_inputs, axis=-1), [1, 1, 10]), tf.float32
)
masked_embedding = masking_layer(unmasked_embedding)
print(masked_embedding._keras_mask)
model = keras.Sequential(
[layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True), layers.LSTM(32),]
)
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
outputs = layers.LSTM(32)(x)
model = keras.Model(inputs, outputs)
class MyLayer(layers.Layer):
def __init__(self, **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.embedding = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)
self.lstm = layers.LSTM(32)
def call(self, inputs):
x = self.embedding(inputs)
# Note that you could also prepare a `mask` tensor manually.
# It only needs to be a boolean tensor
# with the right shape, i.e. (batch_size, timesteps).
mask = self.embedding.compute_mask(inputs)
output = self.lstm(x, mask=mask) # The layer will ignore the masked values
return output
layer = MyLayer()
x = np.random.random((32, 10)) * 100
x = x.astype("int32")
layer(x)
class TemporalSplit(keras.layers.Layer):
Split the input tensor into 2 tensors along the time dimension.
def call(self, inputs):
# Expect the input to be 3D and mask to be 2D, split the input tensor into 2
# subtensors along the time axis (axis 1).
return tf.split(inputs, 2, axis=1)
def compute_mask(self, inputs, mask=None):
# Also split the mask into 2 if it presents.
if mask is None:
return None
return tf.split(mask, 2, axis=1)
first_half, second_half = TemporalSplit()(masked_embedding)
print(first_half._keras_mask)
print(second_half._keras_mask)
class CustomEmbedding(keras.layers.Layer):
def __init__(self, input_dim, output_dim, mask_zero=False, **kwargs):
super(CustomEmbedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.mask_zero = mask_zero
def build(self, input_shape):
self.embeddings = self.add_weight(
shape=(self.input_dim, self.output_dim),
initializer="random_normal",
dtype="float32",
)
def call(self, inputs):
return tf.nn.embedding_lookup(self.embeddings, inputs)
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
return tf.not_equal(inputs, 0)
layer = CustomEmbedding(10, 32, mask_zero=True)
x = np.random.random((3, 10)) * 9
x = x.astype("int32")
y = layer(x)
mask = layer.compute_mask(x)
print(mask)
class MyActivation(keras.layers.Layer):
def __init__(self, **kwargs):
super(MyActivation, self).__init__(**kwargs)
# Signal that the layer is safe for mask propagation
self.supports_masking = True
def call(self, inputs):
return tf.nn.relu(inputs)
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=5000, output_dim=16, mask_zero=True)(inputs)
x = MyActivation()(x) # Will pass the mask along
print("Mask found:", x._keras_mask)
outputs = layers.LSTM(32)(x) # Will receive the mask
model = keras.Model(inputs, outputs)
class TemporalSoftmax(keras.layers.Layer):
def call(self, inputs, mask=None):
broadcast_float_mask = tf.expand_dims(tf.cast(mask, "float32"), -1)
inputs_exp = tf.exp(inputs) * broadcast_float_mask
inputs_sum = tf.reduce_sum(inputs * broadcast_float_mask, axis=1, keepdims=True)
return inputs_exp / inputs_sum
inputs = keras.Input(shape=(None,), dtype="int32")
x = layers.Embedding(input_dim=10, output_dim=32, mask_zero=True)(inputs)
x = layers.Dense(1)(x)
outputs = TemporalSoftmax()(x)
model = keras.Model(inputs, outputs)
y = model(np.random.randint(0, 10, size=(32, 100)), np.random.random((32, 100, 1)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Masking
Step3: As you can see from the printed result, the mask is a 2D boolean tensor with shape
Step4: This is also the case for the following Functional API model
Step5: Passing mask tensors directly to layers
Step7: Supporting masking in your custom layers
Step8: Here is another example of a CustomEmbedding layer that is capable of generating a
Step9: Opting-in to mask propagation on compatible layers
Step10: You can now use this custom layer in-between a mask-generating layer (like Embedding)
Step11: Writing layers that need mask information
|
3,473
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import openfermion
except ImportError:
!pip install git+https://github.com/quantumlib/OpenFermion.git@master#egg=openfermion
from openfermion.ops import FermionOperator
my_term = FermionOperator(((3, 1), (1, 0)))
print(my_term)
my_term = FermionOperator('3^ 1')
print(my_term)
good_way_to_initialize = FermionOperator('3^ 1', -1.7)
print(good_way_to_initialize)
bad_way_to_initialize = -1.7 * FermionOperator('3^ 1')
print(bad_way_to_initialize)
identity = FermionOperator('')
print(identity)
zero_operator = FermionOperator()
print(zero_operator)
my_operator = FermionOperator('4^ 1^ 3 9', 1. + 2.j)
print(my_operator)
print(my_operator.terms)
from openfermion.ops import FermionOperator
term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j)
term_2 = FermionOperator('3^ 1', -1.7)
my_operator = term_1 + term_2
print(my_operator)
my_operator = FermionOperator('4^ 3^ 9 1', 1. + 2.j)
term_2 = FermionOperator('3^ 1', -1.7)
my_operator += term_2
print('')
print(my_operator)
term_1 = FermionOperator('4^ 3^ 9 1', 1. + 2.j)
term_2 = FermionOperator('3^ 1', -1.7)
my_operator = term_1 - 33. * term_2
print(my_operator)
my_operator *= 3.17 * (term_2 + term_1) ** 2
print('')
print(my_operator)
print('')
print(term_2 ** 3)
print('')
print(term_1 == 2.*term_1 - term_1)
print(term_1 == my_operator)
from openfermion.utils import commutator, count_qubits, hermitian_conjugated
from openfermion.transforms import normal_ordered
# Get the Hermitian conjugate of a FermionOperator, count its qubit, check if it is normal-ordered.
term_1 = FermionOperator('4^ 3 3^', 1. + 2.j)
print(hermitian_conjugated(term_1))
print(term_1.is_normal_ordered())
print(count_qubits(term_1))
# Normal order the term.
term_2 = normal_ordered(term_1)
print('')
print(term_2)
print(term_2.is_normal_ordered())
# Compute a commutator of the terms.
print('')
print(commutator(term_1, term_2))
from openfermion.ops import QubitOperator
my_first_qubit_operator = QubitOperator('X1 Y2 Z3')
print(my_first_qubit_operator)
print(my_first_qubit_operator.terms)
operator_2 = QubitOperator('X3 Z4', 3.17)
operator_2 -= 77. * my_first_qubit_operator
print('')
print(operator_2)
from openfermion.ops import FermionOperator
from openfermion.transforms import jordan_wigner, bravyi_kitaev
from openfermion.utils import hermitian_conjugated
from openfermion.linalg import eigenspectrum
# Initialize an operator.
fermion_operator = FermionOperator('2^ 0', 3.17)
fermion_operator += hermitian_conjugated(fermion_operator)
print(fermion_operator)
# Transform to qubits under the Jordan-Wigner transformation and print its spectrum.
jw_operator = jordan_wigner(fermion_operator)
print('')
print(jw_operator)
jw_spectrum = eigenspectrum(jw_operator)
print(jw_spectrum)
# Transform to qubits under the Bravyi-Kitaev transformation and print its spectrum.
bk_operator = bravyi_kitaev(fermion_operator)
print('')
print(bk_operator)
bk_spectrum = eigenspectrum(bk_operator)
print(bk_spectrum)
from openfermion.transforms import reverse_jordan_wigner
# Initialize QubitOperator.
my_operator = QubitOperator('X0 Y1 Z2', 88.)
my_operator += QubitOperator('Z1 Z4', 3.17)
print(my_operator)
# Map QubitOperator to a FermionOperator.
mapped_operator = reverse_jordan_wigner(my_operator)
print('')
print(mapped_operator)
# Map the operator back to qubits and make sure it is the same.
back_to_normal = jordan_wigner(mapped_operator)
back_to_normal.compress()
print('')
print(back_to_normal)
from openfermion.hamiltonians import fermi_hubbard
from openfermion.linalg import get_sparse_operator, get_ground_state
from openfermion.transforms import jordan_wigner
# Set model.
x_dimension = 2
y_dimension = 2
tunneling = 2.
coulomb = 1.
magnetic_field = 0.5
chemical_potential = 0.25
periodic = 1
spinless = 1
# Get fermion operator.
hubbard_model = fermi_hubbard(
x_dimension, y_dimension, tunneling, coulomb, chemical_potential,
magnetic_field, periodic, spinless)
print(hubbard_model)
# Get qubit operator under Jordan-Wigner.
jw_hamiltonian = jordan_wigner(hubbard_model)
jw_hamiltonian.compress()
print('')
print(jw_hamiltonian)
# Get scipy.sparse.csc representation.
sparse_operator = get_sparse_operator(hubbard_model)
print('')
print(sparse_operator)
print('\nEnergy of the model is {} in units of T and J.'.format(
get_ground_state(sparse_operator)[0]))
from openfermion.hamiltonians import jellium_model
from openfermion.utils import Grid
from openfermion.linalg import eigenspectrum
from openfermion.transforms import jordan_wigner, fourier_transform
# Let's look at a very small model of jellium in 1D.
grid = Grid(dimensions=1, length=3, scale=1.0)
spinless = True
# Get the momentum Hamiltonian.
momentum_hamiltonian = jellium_model(grid, spinless)
momentum_qubit_operator = jordan_wigner(momentum_hamiltonian)
momentum_qubit_operator.compress()
print(momentum_qubit_operator)
# Fourier transform the Hamiltonian to the position basis.
position_hamiltonian = fourier_transform(momentum_hamiltonian, grid, spinless)
position_qubit_operator = jordan_wigner(position_hamiltonian)
position_qubit_operator.compress()
print('')
print (position_qubit_operator)
# Check the spectra to make sure these representations are iso-spectral.
spectral_difference = eigenspectrum(momentum_qubit_operator) - eigenspectrum(position_qubit_operator)
print('')
print(spectral_difference)
from openfermion.chem import MolecularData
# Set parameters to make a simple molecule.
diatomic_bond_length = .7414
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
basis = 'sto-3g'
multiplicity = 1
charge = 0
description = str(diatomic_bond_length)
# Make molecule and print out a few interesting facts about it.
molecule = MolecularData(geometry, basis, multiplicity,
charge, description)
print('Molecule has automatically generated name {}'.format(
molecule.name))
print('Information about this molecule would be saved at:\n{}\n'.format(
molecule.filename))
print('This molecule has {} atoms and {} electrons.'.format(
molecule.n_atoms, molecule.n_electrons))
for atom, atomic_number in zip(molecule.atoms, molecule.protons):
print('Contains {} atom, which has {} protons.'.format(
atom, atomic_number))
# Set molecule parameters.
basis = 'sto-3g'
multiplicity = 1
bond_length_interval = 0.1
n_points = 25
# Generate molecule at different bond lengths.
hf_energies = []
fci_energies = []
bond_lengths = []
for point in range(3, n_points + 1):
bond_length = bond_length_interval * point
bond_lengths += [bond_length]
description = str(round(bond_length,2))
print(description)
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))]
molecule = MolecularData(
geometry, basis, multiplicity, description=description)
# Load data.
molecule.load()
# Print out some results of calculation.
print('\nAt bond length of {} angstrom, molecular hydrogen has:'.format(
bond_length))
print('Hartree-Fock energy of {} Hartree.'.format(molecule.hf_energy))
print('MP2 energy of {} Hartree.'.format(molecule.mp2_energy))
print('FCI energy of {} Hartree.'.format(molecule.fci_energy))
print('Nuclear repulsion energy between protons is {} Hartree.'.format(
molecule.nuclear_repulsion))
for orbital in range(molecule.n_orbitals):
print('Spatial orbital {} has energy of {} Hartree.'.format(
orbital, molecule.orbital_energies[orbital]))
hf_energies += [molecule.hf_energy]
fci_energies += [molecule.fci_energy]
# Plot.
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(0)
plt.plot(bond_lengths, fci_energies, 'x-')
plt.plot(bond_lengths, hf_energies, 'o-')
plt.ylabel('Energy in Hartree')
plt.xlabel('Bond length in angstrom')
plt.show()
from openfermion.chem import geometry_from_pubchem
methane_geometry = geometry_from_pubchem('methane')
print(methane_geometry)
from openfermion.chem import MolecularData
from openfermion.transforms import get_fermion_operator, jordan_wigner
from openfermion.linalg import get_ground_state, get_sparse_operator
import numpy
import scipy
import scipy.linalg
# Load saved file for LiH.
diatomic_bond_length = 1.45
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
basis = 'sto-3g'
multiplicity = 1
# Set Hamiltonian parameters.
active_space_start = 1
active_space_stop = 3
# Generate and populate instance of MolecularData.
molecule = MolecularData(geometry, basis, multiplicity, description="1.45")
molecule.load()
# Get the Hamiltonian in an active space.
molecular_hamiltonian = molecule.get_molecular_hamiltonian(
occupied_indices=range(active_space_start),
active_indices=range(active_space_start, active_space_stop))
# Map operator to fermions and qubits.
fermion_hamiltonian = get_fermion_operator(molecular_hamiltonian)
qubit_hamiltonian = jordan_wigner(fermion_hamiltonian)
qubit_hamiltonian.compress()
print('The Jordan-Wigner Hamiltonian in canonical basis follows:\n{}'.format(qubit_hamiltonian))
# Get sparse operator and ground state energy.
sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)
energy, state = get_ground_state(sparse_hamiltonian)
print('Ground state energy before rotation is {} Hartree.\n'.format(energy))
# Randomly rotate.
n_orbitals = molecular_hamiltonian.n_qubits // 2
n_variables = int(n_orbitals * (n_orbitals - 1) / 2)
numpy.random.seed(1)
random_angles = numpy.pi * (1. - 2. * numpy.random.rand(n_variables))
kappa = numpy.zeros((n_orbitals, n_orbitals))
index = 0
for p in range(n_orbitals):
for q in range(p + 1, n_orbitals):
kappa[p, q] = random_angles[index]
kappa[q, p] = -numpy.conjugate(random_angles[index])
index += 1
# Build the unitary rotation matrix.
difference_matrix = kappa + kappa.transpose()
rotation_matrix = scipy.linalg.expm(kappa)
# Apply the unitary.
molecular_hamiltonian.rotate_basis(rotation_matrix)
# Get qubit Hamiltonian in rotated basis.
qubit_hamiltonian = jordan_wigner(molecular_hamiltonian)
qubit_hamiltonian.compress()
print('The Jordan-Wigner Hamiltonian in rotated basis follows:\n{}'.format(qubit_hamiltonian))
# Get sparse Hamiltonian and energy in rotated basis.
sparse_hamiltonian = get_sparse_operator(qubit_hamiltonian)
energy, state = get_ground_state(sparse_hamiltonian)
print('Ground state energy after rotation is {} Hartree.'.format(energy))
from openfermion.hamiltonians import mean_field_dwave
from openfermion.transforms import get_quadratic_hamiltonian
# Set model.
x_dimension = 2
y_dimension = 2
tunneling = 2.
sc_gap = 1.
periodic = True
# Get FermionOperator.
mean_field_model = mean_field_dwave(
x_dimension, y_dimension, tunneling, sc_gap, periodic=periodic)
# Convert to QuadraticHamiltonian
quadratic_hamiltonian = get_quadratic_hamiltonian(mean_field_model)
# Compute the ground energy
ground_energy = quadratic_hamiltonian.ground_energy()
print(ground_energy)
orbital_energies, constant = quadratic_hamiltonian.orbital_energies()
print(orbital_energies)
print()
print(constant)
from openfermion.circuits import gaussian_state_preparation_circuit
circuit_description, start_orbitals = gaussian_state_preparation_circuit(quadratic_hamiltonian)
for parallel_ops in circuit_description:
print(parallel_ops)
print('')
print(start_orbitals)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction to OpenFermion
Step2: Initializing the FermionOperator data structure
Step3: The preferred way to specify the coefficient in openfermion is to provide an optional coefficient argument. If not provided, the coefficient defaults to 1. In the code below, the first method is preferred. The multiplication in the second method actually creates a copy of the term, which introduces some additional cost. All inplace operands (such as +=) modify classes whereas binary operands such as + create copies. Important caveats are that the empty tuple FermionOperator(()) and the empty string FermionOperator('') initializes identity. The empty initializer FermionOperator() initializes the zero operator.
Step4: Note that FermionOperator has only one attribute
Step5: Manipulating the FermionOperator data structure
Step6: The print function prints each term in the operator on a different line. Note that the line my_operator = term_1 + term_2 creates a new object, which involves a copy of term_1 and term_2. The second block of code uses the inplace method +=, which is more efficient. This is especially important when trying to construct a very large FermionOperator. FermionOperators also support a wide range of builtins including, str(), repr(), ==, !=, =, , /, /=, +, +=, -, -=, - and **. Note that since FermionOperators involve floats, == and != check for (in)equality up to numerical precision. We demonstrate some of these methods below.
Step7: Additionally, there are a variety of methods that act on the FermionOperator data structure. We demonstrate a small subset of those methods here.
Step8: The QubitOperator data structure
Step9: Jordan-Wigner and Bravyi-Kitaev
Step10: We see that despite the different representation, these operators are iso-spectral. We can also apply the Jordan-Wigner transform in reverse to map arbitrary QubitOperators to FermionOperators. Note that we also demonstrate the .compress() method (a method on both FermionOperators and QubitOperators) which removes zero entries.
Step11: Sparse matrices and the Hubbard model
Step12: Hamiltonians in the plane wave basis
Step13: Basics of MolecularData class
Step14: If we had previously computed this molecule using an electronic structure package, we can call molecule.load() to populate all sorts of interesting fields in the data structure. Though we make no assumptions about what electronic structure packages users might install, we assume that the calculations are saved in OpenFermion's MolecularData objects. Currently plugins are available for Psi4 (OpenFermion-Psi4) and PySCF (OpenFermion-PySCF), and there may be more in the future. For the purposes of this example, we will load data that ships with OpenFermion to make a plot of the energy surface of hydrogen. Note that helper functions to initialize some interesting chemical benchmarks are found in openfermion.utils.
Step15: The geometry data needed to generate MolecularData can also be retreived from the PubChem online database by inputting the molecule's name.
Step16: InteractionOperator and InteractionRDM for efficient numerical representations
Step17: Quadratic Hamiltonians and Slater determinants
Step18: Any quadratic Hamiltonian may be rewritten in the form
Step19: Eigenstates of quadratic hamiltonians are also known as fermionic Gaussian states, and they can be prepared efficiently on a quantum computer. One can use OpenFermion to obtain circuits for preparing these states. The following code obtains the description of a circuit which prepares the ground state (operations that can be performed in parallel are grouped together), along with a description of the starting state to which the circuit should be applied
|
3,474
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import control as ct
import control.optimal as opt
ct.use_fbs_defaults()
def vehicle_update(t, x, u, params):
# Get the parameters for the model
a = params.get('refoffset', 1.5) # offset to vehicle reference point
b = params.get('wheelbase', 3.) # vehicle wheelbase
maxsteer = params.get('maxsteer', 0.5) # max steering angle (rad)
# Saturate the steering input
delta = np.clip(u[1], -maxsteer, maxsteer)
alpha = np.arctan2(a * np.tan(delta), b)
# Return the derivative of the state
return np.array([
u[0] * np.cos(x[2] + alpha), # xdot = cos(theta + alpha) v
u[0] * np.sin(x[2] + alpha), # ydot = sin(theta + alpha) v
(u[0] / b) * np.tan(delta) # thdot = v/l tan(phi)
])
def vehicle_output(t, x, u, params):
return x[0:2]
# Default vehicle parameters (including nominal velocity)
vehicle_params={'refoffset': 1.5, 'wheelbase': 3, 'velocity': 15,
'maxsteer': 0.5}
# Define the vehicle steering dynamics as an input/output system
vehicle = ct.NonlinearIOSystem(
vehicle_update, vehicle_output, states=3, name='vehicle',
inputs=('v', 'delta'), outputs=('x', 'y'), params=vehicle_params)
# System parameters
wheelbase = vehicle_params['wheelbase']
v0 = vehicle_params['velocity']
# Control inputs
T_curvy = np.linspace(0, 7, 500)
v_curvy = v0*np.ones(T_curvy.shape)
delta_curvy = 0.1*np.sin(T_curvy)*np.cos(4*T_curvy) + 0.0025*np.sin(T_curvy*np.pi/7)
u_curvy = [v_curvy, delta_curvy]
X0_curvy = [0, 0.8, 0]
# Simulate the system + estimator
t_curvy, y_curvy, x_curvy = ct.input_output_response(
vehicle, T_curvy, u_curvy, X0_curvy, params=vehicle_params, return_x=True)
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the resulting trajectory (and some road boundaries)
plt.subplot(1, 4, 2)
plt.plot(y_curvy[1], y_curvy[0])
plt.plot(y_curvy[1] - 9/np.cos(x_curvy[2]), y_curvy[0], 'k-', linewidth=1)
plt.plot(y_curvy[1] - 3/np.cos(x_curvy[2]), y_curvy[0], 'k--', linewidth=1)
plt.plot(y_curvy[1] + 3/np.cos(x_curvy[2]), y_curvy[0], 'k-', linewidth=1)
plt.xlabel('y [m]')
plt.ylabel('x [m]');
plt.axis('Equal')
# Plot the lateral position
plt.subplot(2, 2, 2)
plt.plot(t_curvy, y_curvy[1])
plt.ylabel('Lateral position $y$ [m]')
# Plot the steering angle
plt.subplot(2, 2, 4)
plt.plot(t_curvy, delta_curvy)
plt.ylabel('Steering angle $\\delta$ [rad]')
plt.xlabel('Time t [sec]')
plt.tight_layout()
# Define the lateral dynamics as a subset of the full vehicle steering dynamics
lateral = ct.NonlinearIOSystem(
lambda t, x, u, params: vehicle_update(
t, [0., x[0], x[1]], [params.get('velocity', 1), u[0]], params)[1:],
lambda t, x, u, params: vehicle_output(
t, [0., x[0], x[1]], [params.get('velocity', 1), u[0]], params)[1:],
states=2, name='lateral', inputs=('phi'), outputs=('y')
)
# Compute the linearization at velocity v0 = 15 m/sec
lateral_linearized = ct.linearize(lateral, [0, 0], [0], params=vehicle_params)
# Normalize dynamics using state [x1/b, x2] and timescale v0 t / b
b = vehicle_params['wheelbase']
v0 = vehicle_params['velocity']
lateral_transformed = ct.similarity_transform(
lateral_linearized, [[1/b, 0], [0, 1]], timescale=v0/b)
# Set the output to be the normalized state x1/b
lateral_normalized = lateral_transformed * (1/b)
print("Linearized system dynamics:\n")
print(lateral_normalized)
# Save the system matrices for later use
A = lateral_normalized.A
B = lateral_normalized.B
C = lateral_normalized.C
# Utility function to place poles for the normalized vehicle steering system
def normalized_place(wc, zc):
# Get the dynamics and input matrices, for later use
A, B = lateral_normalized.A, lateral_normalized.B
# Compute the eigenvalues from the characteristic polynomial
eigs = np.roots([1, 2*zc*wc, wc**2])
# Compute the feedback gain using eigenvalue placement
K = ct.place_varga(A, B, eigs)
# Create a new system representing the closed loop response
clsys = ct.StateSpace(A - B @ K, B, lateral_normalized.C, 0)
# Compute the feedforward gain based on the zero frequency gain of the closed loop
kf = np.real(1/clsys(0))
# Scale the input by the feedforward gain
clsys *= kf
# Return gains and closed loop system dynamics
return K, kf, clsys
# Utility function to plot simulation results for normalized vehicle steering system
def normalized_plot(t, y, u, inpfig, outfig):
plt.sca(outfig)
plt.plot(t, y)
plt.sca(inpfig)
plt.plot(t, u[0])
# Utility function to label plots of normalized vehicle steering system
def normalized_label(inpfig, outfig):
plt.sca(inpfig)
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Steering angle $\delta$ [rad]')
plt.sca(outfig)
plt.ylabel('Lateral position $y/b$')
plt.plot([0, 20], [0.95, 0.95], 'k--')
plt.plot([0, 20], [1.05, 1.05], 'k--')
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Explore range of values for omega_c, with zeta_c = 0.7
outfig = plt.subplot(2, 2, 1)
inpfig = plt.subplot(2, 2, 3)
zc = 0.7
for wc in [0.5, 0.7, 1]:
# Place the poles of the system
K, kf, clsys = normalized_place(wc, zc)
# Compute the step response
t, y, x = ct.step_response(clsys, np.linspace(0, 20, 100), return_x=True)
# Compute the input used to generate the control response
u = -K @ x + kf * 1
# Plot the results
normalized_plot(t, y, u, inpfig, outfig)
# Add labels to the figure
normalized_label(inpfig, outfig)
plt.legend(('$\omega_c = 0.5$', '$\omega_c = 0.7$', '$\omega_c = 0.1$'))
# Explore range of values for zeta_c, with omega_c = 0.07
outfig = plt.subplot(2, 2, 2)
inpfig = plt.subplot(2, 2, 4)
wc = 0.7
for zc in [0.5, 0.7, 1]:
# Place the poles of the system
K, kf, clsys = normalized_place(wc, zc)
# Compute the step response
t, y, x = ct.step_response(clsys, np.linspace(0, 20, 100), return_x=True)
# Compute the input used to generate the control response
u = -K @ x + kf * 1
# Plot the results
normalized_plot(t, y, u, inpfig, outfig)
# Add labels to the figure
normalized_label(inpfig, outfig)
plt.legend(('$\zeta_c = 0.5$', '$\zeta_c = 0.7$', '$\zeta_c = 1$'))
plt.tight_layout()
# Find the eigenvalue from the characteristic polynomial
wo = 1 # bandwidth for the observer
zo = 0.7 # damping ratio for the observer
eigs = np.roots([1, 2*zo*wo, wo**2])
# Compute the estimator gain using eigenvalue placement
L = np.transpose(
ct.place(np.transpose(A), np.transpose(C), eigs))
print("L = ", L)
# Create a linear model of the lateral dynamics driving the estimator
est = ct.StateSpace(A - L @ C, np.block([[B, L]]), np.eye(2), np.zeros((2,2)))
# Convert the curvy trajectory into normalized coordinates
x_ref = x_curvy[0] / wheelbase
y_ref = x_curvy[1] / wheelbase
theta_ref = x_curvy[2]
tau = v0 * T_curvy / b
# Simulate the estimator, with a small initial error in y position
t, y_est, x_est = ct.forced_response(est, tau, [delta_curvy, y_ref], [0.5, 0], return_x=True)
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the actual and estimated states
ax = plt.subplot(2, 2, 1)
plt.plot(t, y_ref)
plt.plot(t, x_est[0])
ax.set(xlim=[0, 10])
plt.legend(['actual', 'estimated'])
plt.ylabel('Lateral position $y/b$')
ax = plt.subplot(2, 2, 2)
plt.plot(t, x_est[0] - y_ref)
ax.set(xlim=[0, 10])
plt.ylabel('Lateral error')
ax = plt.subplot(2, 2, 3)
plt.plot(t, theta_ref)
plt.plot(t, x_est[1])
ax.set(xlim=[0, 10])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Vehicle angle $\\theta$')
ax = plt.subplot(2, 2, 4)
plt.plot(t, x_est[1] - theta_ref)
ax.set(xlim=[0, 10])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Angle error')
plt.tight_layout()
# Compute the feedback gains
# K, kf, clsys = normalized_place(1, 0.707) # Gains from MATLAB
# K, kf, clsys = normalized_place(0.07, 0.707) # Original gains
K, kf, clsys = normalized_place(0.7, 0.707) # Final gains
# Print out the gains
print("K = ", K)
print("kf = ", kf)
# Construct an output-based controller for the system
clsys = ct.StateSpace(
np.block([[A, -B@K], [L@C, A - B@K - L@C]]),
np.block([[B], [B]]) * kf,
np.block([[C, np.zeros(C.shape)], [np.zeros(C.shape), C]]),
np.zeros((2,1)))
# Simulate the system
t, y, x = ct.forced_response(clsys, tau, y_ref, [0.4, 0, 0.0, 0], return_x=True)
# Calcaluate the input used to generate the control response
u_sfb = kf * y_ref - K @ x[0:2]
u_ofb = kf * y_ref - K @ x[2:4]
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the actual and estimated states
ax = plt.subplot(1, 2, 1)
plt.plot(t, x[0])
plt.plot(t, x[2])
plt.plot(t, y_ref, 'k-.')
ax.set(xlim=[0, 30])
plt.legend(['state feedback', 'output feedback', 'reference'])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Lateral position $y/b$')
ax = plt.subplot(2, 2, 2)
plt.plot(t, x[1])
plt.plot(t, x[3])
plt.plot(t, theta_ref, 'k-.')
ax.set(xlim=[0, 15])
plt.ylabel('Vehicle angle $\\theta$')
ax = plt.subplot(2, 2, 4)
plt.plot(t, u_sfb[0])
plt.plot(t, u_ofb[0])
plt.plot(t, delta_curvy, 'k-.')
ax.set(xlim=[0, 15])
plt.xlabel('Normalized time $v_0 t / b$')
plt.ylabel('Steering angle $\\delta$')
plt.tight_layout()
import control.flatsys as fs
# Function to take states, inputs and return the flat flag
def vehicle_flat_forward(x, u, params={}):
# Get the parameter values
b = params.get('wheelbase', 3.)
# Create a list of arrays to store the flat output and its derivatives
zflag = [np.zeros(3), np.zeros(3)]
# Flat output is the x, y position of the rear wheels
zflag[0][0] = x[0]
zflag[1][0] = x[1]
# First derivatives of the flat output
zflag[0][1] = u[0] * np.cos(x[2]) # dx/dt
zflag[1][1] = u[0] * np.sin(x[2]) # dy/dt
# First derivative of the angle
thdot = (u[0]/b) * np.tan(u[1])
# Second derivatives of the flat output (setting vdot = 0)
zflag[0][2] = -u[0] * thdot * np.sin(x[2])
zflag[1][2] = u[0] * thdot * np.cos(x[2])
return zflag
# Function to take the flat flag and return states, inputs
def vehicle_flat_reverse(zflag, params={}):
# Get the parameter values
b = params.get('wheelbase', 3.)
# Create a vector to store the state and inputs
x = np.zeros(3)
u = np.zeros(2)
# Given the flat variables, solve for the state
x[0] = zflag[0][0] # x position
x[1] = zflag[1][0] # y position
x[2] = np.arctan2(zflag[1][1], zflag[0][1]) # tan(theta) = ydot/xdot
# And next solve for the inputs
u[0] = zflag[0][1] * np.cos(x[2]) + zflag[1][1] * np.sin(x[2])
thdot_v = zflag[1][2] * np.cos(x[2]) - zflag[0][2] * np.sin(x[2])
u[1] = np.arctan2(thdot_v, u[0]**2 / b)
return x, u
vehicle_flat = fs.FlatSystem(vehicle_flat_forward, vehicle_flat_reverse, inputs=2, states=3)
# Utility function to plot lane change trajectory
def plot_vehicle_lanechange(traj):
# Create the trajectory
t = np.linspace(0, Tf, 100)
x, u = traj.eval(t)
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure(figsize=[9, 4.5])
# Plot the trajectory in xy coordinate
plt.subplot(1, 4, 2)
plt.plot(x[1], x[0])
plt.xlabel('y [m]')
plt.ylabel('x [m]')
# Add lane lines and scale the axis
plt.plot([-4, -4], [0, x[0, -1]], 'k-', linewidth=1)
plt.plot([0, 0], [0, x[0, -1]], 'k--', linewidth=1)
plt.plot([4, 4], [0, x[0, -1]], 'k-', linewidth=1)
plt.axis([-10, 10, -5, x[0, -1] + 5])
# Time traces of the state and input
plt.subplot(2, 4, 3)
plt.plot(t, x[1])
plt.ylabel('y [m]')
plt.subplot(2, 4, 4)
plt.plot(t, x[2])
plt.ylabel('theta [rad]')
plt.subplot(2, 4, 7)
plt.plot(t, u[0])
plt.xlabel('Time t [sec]')
plt.ylabel('v [m/s]')
# plt.axis([0, t[-1], u0[0] - 1, uf[0] + 1])
plt.subplot(2, 4, 8)
plt.plot(t, u[1]);
plt.xlabel('Time t [sec]')
plt.ylabel('$\delta$ [rad]')
plt.tight_layout()
# Define the endpoints of the trajectory
x0 = [0., 2., 0.]; u0 = [15, 0.]
xf = [75, -2., 0.]; uf = [15, 0.]
Tf = xf[0] / uf[0]
# Define a set of basis functions to use for the trajectories
poly = fs.PolyFamily(8)
# Find a trajectory between the initial condition and the final condition
traj1 = fs.point_to_point(vehicle_flat, Tf, x0, u0, xf, uf, basis=poly)
plot_vehicle_lanechange(traj1)
bezier = fs.BezierFamily(8)
traj2 = fs.point_to_point(vehicle_flat, Tf, x0, u0, xf, uf, basis=bezier)
plot_vehicle_lanechange(traj2)
timepts = np.linspace(0, Tf, 12)
poly = fs.PolyFamily(8)
traj_cost = opt.quadratic_cost(
vehicle_flat, np.diag([0, 0.1, 0]), np.diag([0.1, 10]), x0=xf, u0=uf)
constraints = [
opt.input_range_constraint(vehicle_flat, [8, -0.1], [12, 0.1]) ]
traj3 = fs.point_to_point(
vehicle_flat, timepts, x0, u0, xf, uf, cost=traj_cost, basis=poly
)
plot_vehicle_lanechange(traj3)
# Magnitude of the steering input (half maximum)
Msteer = vehicle_params['maxsteer'] / 2
# Create a linearized model of the system going forward at 2 m/s
forward_lateral = ct.linearize(lateral, [0, 0], [0], params={'velocity': 2})
forward_tf = ct.ss2tf(forward_lateral)[0, 0]
print("Forward TF = ", forward_tf)
# Create a linearized model of the system going in reverise at 1 m/s
reverse_lateral = ct.linearize(lateral, [0, 0], [0], params={'velocity': -2})
reverse_tf = ct.ss2tf(reverse_lateral)[0, 0]
print("Reverse TF = ", reverse_tf)
# Configure matplotlib plots to be a bit bigger and optimize layout
plt.figure()
# Forward motion
t, y = ct.step_response(forward_tf * Msteer, np.linspace(0, 4, 500))
plt.plot(t, y, 'b--')
# Reverse motion
t, y = ct.step_response(reverse_tf * Msteer, np.linspace(0, 4, 500))
plt.plot(t, y, 'b-')
# Add labels and reference lines
plt.axis([0, 4, -0.5, 2.5])
plt.legend(['forward', 'reverse'], loc='upper left')
plt.xlabel('Time $t$ [s]')
plt.ylabel('Lateral position [m]')
plt.plot([0, 4], [0, 0], 'k-', linewidth=1)
# Plot the Bode plots
plt.figure()
plt.subplot(1, 2, 2)
ct.bode_plot(forward_tf[0, 0], np.logspace(-1, 1, 100), color='b', linestyle='--')
ct.bode_plot(reverse_tf[0, 0], np.logspace(-1, 1, 100), color='b', linestyle='-')
plt.legend(('forward', 'reverse'));
# Define the desired response of the system
a = 0.2
P = ct.ss2tf(lateral_normalized)
Fm = ct.TransferFunction([a**2], [1, 2*a, a**2])
Fr = Fm / P
# Compute the step response of the feedforward components
t, y_ffwd = ct.step_response(Fm, np.linspace(0, 25, 100))
t, delta_ffwd = ct.step_response(Fr, np.linspace(0, 25, 100))
# Scale and shift to correspond to lane change (-2 to +2)
y_ffwd = 0.5 - 1 * y_ffwd
delta_ffwd *= 1
# Overhead view
plt.subplot(1, 2, 1)
plt.plot(y_ffwd, t)
plt.plot(-1*np.ones(t.shape), t, 'k-', linewidth=1)
plt.plot(0*np.ones(t.shape), t, 'k--', linewidth=1)
plt.plot(1*np.ones(t.shape), t, 'k-', linewidth=1)
plt.axis([-5, 5, -2, 27])
# Plot the response
plt.subplot(2, 2, 2)
plt.plot(t, y_ffwd)
# plt.axis([0, 10, -5, 5])
plt.ylabel('Normalized position y/b')
plt.subplot(2, 2, 4)
plt.plot(t, delta_ffwd)
# plt.axis([0, 10, -1, 1])
plt.ylabel('$\\delta$ [rad]')
plt.xlabel('Normalized time $v_0 t / b$');
plt.tight_layout()
# Compute the feedback gain using eigenvalue placement
wc = 10
zc = 0.707
eigs = np.roots([1, 2*zc*wc, wc**2])
K = ct.place(A, B, eigs)
kr = np.real(1/clsys(0))
print("K = ", np.squeeze(K))
# Compute the estimator gain using eigenvalue placement
wo = 20
zo = 0.707
eigs = np.roots([1, 2*zo*wo, wo**2])
L = np.transpose(
ct.place(np.transpose(A), np.transpose(C), eigs))
print("L = ", np.squeeze(L))
# Construct an output-based controller for the system
C1 = ct.ss2tf(ct.StateSpace(A - B@K - L@C, L, K, 0))
print("C(s) = ", C1)
# Compute the loop transfer function and plot Nyquist, Bode
L1 = P * C1
plt.figure(); ct.nyquist_plot(L1, np.logspace(0.5, 3, 500))
plt.figure(); ct.bode_plot(L1, np.logspace(-1, 3, 500));
# Modified control law
wc = 10
zc = 2.6
eigs = np.roots([1, 2*zc*wc, wc**2])
K = ct.place(A, B, eigs)
kr = np.real(1/clsys(0))
print("K = ", np.squeeze(K))
# Construct an output-based controller for the system
C2 = ct.ss2tf(ct.StateSpace(A - B@K - L@C, L, K, 0))
print("C(s) = ", C2)
# Plot the gang of four for the two designs
ct.gangof4(P, C1, np.logspace(-1, 3, 100))
ct.gangof4(P, C2, np.logspace(-1, 3, 100))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Vehicle steering dynamics (Example 3.11)
Step2: Vehicle driving on a curvy road (Figure 8.6a)
Step3: Linearization of lateral steering dynamics (Example 6.13)
Step4: Eigenvalue placement controller design (Example 7.4)
Step5: Eigenvalue placement observer design (Example 8.3)
Step6: Linear observer applied to nonlinear system output
Step7: Output Feedback Controller (Example 8.4)
Step8: Trajectory Generation (Example 8.8)
Step9: To find a trajectory from an initial state $x_0$ to a final state $x_\text{f}$ in time $T_\text{f}$ we solve a point-to-point trajectory generation problem. We also set the initial and final inputs, which sets the vehicle velocity $v$ and steering wheel angle $\delta$ at the endpoints.
Step10: Change of basis function
Step11: Added cost function
Step12: Vehicle transfer functions for forward and reverse driving (Example 10.11)
Step13: Feedforward Compensation (Example 12.6)
Step14: Fundamental Limits (Example 14.13)
|
3,475
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Luke Bloy <luke.bloy@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from mne.filter import next_fast_len
import mne
print(__doc__)
data_path = mne.datasets.opm.data_path()
subject = 'OPM_sample'
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
bem_fname = op.join(subjects_dir, subject, 'bem',
subject + '-5120-5120-5120-bem-sol.fif')
src_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject)
vv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif'
vv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif'
vv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif'
opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif'
opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif'
opm_trans_fname = None
opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat')
raws = dict()
raw_erms = dict()
new_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz)
raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming
raws['vv'].load_data().resample(new_sfreq)
raws['vv'].info['bads'] = ['MEG2233', 'MEG1842']
raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error')
raw_erms['vv'].load_data().resample(new_sfreq)
raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842']
raws['opm'] = mne.io.read_raw_fif(opm_fname)
raws['opm'].load_data().resample(new_sfreq)
raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname)
raw_erms['opm'].load_data().resample(new_sfreq)
# Make sure our assumptions later hold
assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq']
titles = dict(vv='VectorView', opm='OPM')
ssp_ecg, _ = mne.preprocessing.compute_proj_ecg(
raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1)
raws['vv'].add_proj(ssp_ecg, remove_existing=True)
# due to how compute_proj_eog works, it keeps the old projectors, so
# the output contains both projector types (and also the original empty-room
# projectors)
ssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog(
raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112')
raws['vv'].add_proj(ssp_ecg_eog, remove_existing=True)
raw_erms['vv'].add_proj(ssp_ecg_eog)
fig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:],
info=raws['vv'].info)
fig.suptitle(titles['vv'])
fig.subplots_adjust(0.05, 0.05, 0.95, 0.85)
kinds = ('vv', 'opm')
n_fft = next_fast_len(int(round(4 * new_sfreq)))
print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq']))
for kind in kinds:
fig = raws[kind].plot_psd(n_fft=n_fft, proj=True)
fig.suptitle(titles[kind])
fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)
src = mne.read_source_spaces(src_fname)
# This line removes source-to-source distances that we will not need.
# We only do it here to save a bit of memory, in general this is not required.
del src[0]['dist'], src[1]['dist']
bem = mne.read_bem_solution(bem_fname)
fwd = dict()
trans = dict(vv=vv_trans_fname, opm=opm_trans_fname)
# check alignment and generate forward
with mne.use_coil_def(opm_coil_def_fname):
for kind in kinds:
dig = True if kind == 'vv' else False
fig = mne.viz.plot_alignment(
raws[kind].info, trans=trans[kind], subject=subject,
subjects_dir=subjects_dir, dig=dig, coord_frame='mri',
surfaces=('head', 'white'))
mne.viz.set_3d_view(figure=fig, azimuth=0, elevation=90,
distance=0.6, focalpoint=(0., 0., 0.))
fwd[kind] = mne.make_forward_solution(
raws[kind].info, trans[kind], src, bem, eeg=False, verbose=True)
del trans, src, bem
freq_bands = dict(
delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45))
topos = dict(vv=dict(), opm=dict())
stcs = dict(vv=dict(), opm=dict())
snr = 3.
lambda2 = 1. / snr ** 2
for kind in kinds:
noise_cov = mne.compute_raw_covariance(raw_erms[kind])
inverse_operator = mne.minimum_norm.make_inverse_operator(
raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True)
stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd(
raws[kind], inverse_operator, lambda2=lambda2,
n_fft=n_fft, dB=False, return_sensor=True, verbose=True)
topo_norm = sensor_psd.data.sum(axis=1, keepdims=True)
stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs
# Normalize each source point by the total power across freqs
for band, limits in freq_bands.items():
data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True)
topos[kind][band] = mne.EvokedArray(
100 * data / topo_norm, sensor_psd.info)
stcs[kind][band] = \
100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data
del inverse_operator
del fwd, raws, raw_erms
def plot_band(kind, band):
Plot activity within a frequency band on the subject's brain.
title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band])
topos[kind][band].plot_topomap(
times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno',
time_format=title)
brain = stcs[kind][band].plot(
subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both',
time_label=title, title=title, colormap='inferno',
clim=dict(kind='percent', lims=(70, 85, 99)))
brain.show_view(dict(azimuth=0, elevation=0), roll=0)
return fig, brain
fig_theta, brain_theta = plot_band('vv', 'theta')
fig_alpha, brain_alpha = plot_band('vv', 'alpha')
fig_beta, brain_beta = plot_band('vv', 'beta')
fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta')
fig_gamma, brain_gamma = plot_band('vv', 'gamma')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data, resample. We will store the raw objects in dicts with entries
Step2: Do some minimal artifact rejection just for VectorView data
Step3: Explore data
Step4: Alignment and forward
Step5: Compute and apply inverse to PSD estimated using multitaper + Welch.
Step7: Now we can make some plots of each frequency band. Note that the OPM head
Step8: Alpha
Step9: Beta
Step10: Gamma
|
3,476
|
<ASSISTANT_TASK:>
Python Code:
from IPython.parallel import Client, error
cluster = Client(profile='mpi')
view = cluster[:]
%%px
from mpi4py import MPI
mpi = MPI.COMM_WORLD
bcast = mpi.bcast
barrier = mpi.barrier
rank = mpi.rank
print "MPI rank: %i/%i" % (mpi.rank,mpi.size)
%%px
import sys
from proteus.iproteus import *
%%px
from proteus import default_n, default_s, default_so
Profiling.logLevel=5
Profiling.verbose=False
#!tar czvf mydir.tar.gz ls_p.py tank_so.py twp_navier_stokes_n.py redist_n.py twp_navier_stokes_p.py redist_p.py vof_n.py vof_p.py ls_consrv_n.py ls_consrv_p.py ls_n.py tank.py
#!scp mydir.tar.gz spirit01.afrl.hpc.mil:
#%%px
#import subprocess
#if rank == 0:
# status_gz = subprocess.call("gunzip " + "mydir.tar.gz", shell=True)
#status_tar = subprocess.call("tar "+"xvf mydir.tar",shell=True)
# os.system('tar xzvf mydir.tgz')
%%px
import floating_bar,floating_bar_so
%%px
from proteus import Comm
from petsc4py import PETSc
so = floating_bar_so
so.tnList = so.tnList
pList=[]
nList=[]
so.sList=[]
OptDB = PETSc.Options()
for (p,n) in so.pnList:
so.sList.append(default_s)
pList.append(__import__(p))
nList.append(__import__(n))
pList[-1].name = p
nList[-1].multilevelLinearSolver = default_n.KSP_petsc4py
nList[-1].levelLinearSolver = default_n.KSP_petsc4py
OptDB.setValue(nList[-1].linear_solver_options_prefix+"ksp_type", "preonly")
OptDB.setValue(nList[-1].linear_solver_options_prefix+"pc_type", "lu")
OptDB.setValue(nList[-1].linear_solver_options_prefix+"pc_factor_mat_solver_package","superlu_dist")
opts.save_dof = True
opts.dataDir='.'
opts.probDir='.'
opts.logLevel=7
opts.verbose=True
%%px
ns = NumericalSolution.NS_base(so, pList, nList, so.sList, opts)
%%px --noblock
from threading import Thread
# Create a thread wrapper for the simulation. The target must be an argument-less
# function so we wrap the call to `calculateSolution` in a simple lambda:
simulation_thread = Thread(target = lambda : ns.calculateSolution('run1'))
import numpy as np
import numpy
from pythreejs import *
from IPython.core.display import clear_output
def plot_current_results(in_place=True):
import numpy as np
#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
Makes a blocking call to retrieve remote data and displays the solution mesh
as a contour plot.
Parameters
----------
in_place : bool
By default it calls clear_output so that new plots replace old ones. Set
to False to allow keeping of all previous outputs.
global nn,x,y,u,vertices,triangles,domain,rigid_bar
# We make a blocking call to load the remote data from the simulation into simple named
# variables we can read from the engine namespaces
#load_simulation_globals()
view.apply_sync(load_simulation_globals)
# And now we can use the view to read these variables from all the engines. Then we
# concatenate all of them into single arrays for local plotting
#x = np.concatenate(view['x'])
#y = np.concatenate(view['y'])
#z = np.concatenate(view['z'])
vertices = np.concatenate(view['vertices'])
shifts = np.cumsum([0]+view['nn'][:-1])
flat_triangles = np.concatenate([ tri + shift for tri,shift in zip(view['triangles'], shifts) ])
#flat_triangles=triangles
# We can now call the matplotlib plotting function we need
#fig, ax = plt.subplots(subplot_kw=dict(aspect='equal'))
#fig = plt.figure()
#ax = fig.gca(projection='3d')
#print x.shape,y.shape,flat_triangles.shape
#print flat_triangles.flat[:].max(),flat_triangles.flat[:].min()
#ax.plot_trisurf(X=x, Y=y, TRIANGLES=flat_triangles)#, Z=z)
#ax.plot_trisurf(x,y,z)
widget_surface.geometry = FaceGeometry(vertices=list(vertices.flatten()),
face3=list(flat_triangles.flatten()))
#from matplotlib import tri
#mesh = tri.Triangulation(x,y,flat_triangles)
#help(mesh)
#ip = tri.LinearTriInterpolator(triangulation, u, trifinder=None)
#X = np.linspace(0,tank.domain.L[0],40)
#Z = np.linspace(0,tank.domain.L[1],40)
#U = np.zeros((40,40),'d')
#W = np.zeros((40,40),'d')
#U = ip(X,Z)
# We clear the notebook output before plotting this if in-place plot updating is requested
#if in_place:
# clear_output()
#display(fig)
#return fig
return 0
def load_simulation_globals():
Put some variables we need in engine namespace.
These can then be retrieved by clients for inspection, visualization, etc.
global nn, vertices,x, y, z, triangles,domain,rigid_bar
isosurface = ns.auxiliaryVariables[ns.modelList[2].name][0]
rigid_bar = ns.auxiliaryVariables[ns.modelList[0].name][0]
domain = ns.pList[0].domain
nodes = isosurface.nodes_array
triangles = isosurface.elements_array
x = nodes[:,0]
y = nodes[:,1]
z = nodes[:,2]
vertices = nodes
nn = len(x)
def simulation_alive():
Return True if the simulation thread is still running on any engine.
#return simulation_thread.is_alive()
return any(view.apply_sync(lambda : simulation_thread.is_alive()))
def monitor_simulation(refresh=5.0, plots_in_place=True):
Monitor the simulation progress and call plotting routine.
Supress KeyboardInterrupt exception if interrupted, ensure that the last
figure is always displayed and provide basic timing and simulation status.
Parameters
----------
refresh : float
Refresh interval between calls to retrieve and plot data. The default
is 5s, adjust depending on the desired refresh rate, but be aware that
very short intervals will start having a significant impact.
plots_in_place : bool
If true, every new figure replaces the last one, producing a (slow)
animation effect in the notebook. If false, all frames are plotted
in sequence and appended in the output area.
import datetime as dt, time
if not simulation_alive():
plot_current_results(in_place=plots_in_place)
plt.close('all')
print 'Simulation has already finished, no monitoring to do.'
return
t0 = dt.datetime.now()
fig = None
try:
while simulation_alive():
plot_current_results(in_place=plots_in_place)
#plt.close('all') # prevent re-plot of old figures
time.sleep(refresh) # so we don't hammer the server too fast
except (KeyboardInterrupt):#, error.TimeoutError):
msg = 'Monitoring interrupted, simulation is ongoing!'
else:
msg = 'Simulation completed!'
tmon = dt.datetime.now() - t0
#if plots_in_place and fig is not None:
# clear_output()
# display(fig)
print msg
print 'Monitored for: %s.' % tmon
%px simulation_thread.start()
view.apply_sync(load_simulation_globals)
vertices = np.concatenate(view['vertices'])
shifts = np.cumsum([0]+view['nn'][:-1])
flat_triangles = np.concatenate([ tri + shift for tri,shift in zip(view['triangles'], shifts) ])
from pythreejs import *
from IPython.display import display
domain = view['domain'][0]
vertices = vertices#verticesview['vertices'][0]
triangles=flat_triangles#view['triangles'][0]
#rigid_bar = view['rigid_bar'][0]
#L = view['ns'][0].pList[0].domain.L
#x_ll = view['ns'][0].pList[0].domain.x_ll
#b = view['ns'][0].nList[0].auxiliaryVariables[0].bar
#sx,sy,sz = bar.boxsize
sx,sy,sz = (0.5,0.5,0.5)
widget_bar = Mesh(geometry=BoxGeometry(width=sx,height=sy,depth=sz),
material=LambertMaterial(color='black'),
position=(0.5,0.5,0.5))#b.getPosition())
#widget.quaternion_from_rotation(b.getRotation())
center = (0.5*(domain.x[0]+domain.L[0]),
0.5*(domain.x[1]+domain.L[1]),
0.5*(domain.x[2]+domain.L[2]))
widget_surface = Mesh(geometry=FaceGeometry(vertices=list(vertices.flatten()),face3=list(flat_triangles.flatten())),
material=LambertMaterial(color='aqua'))
#widget.quaternion_from_rotation(b.getRotation())
center = (0.5*(domain.x[0]+domain.L[0]),
0.5*(domain.x[1]+domain.L[1]),
0.5*(domain.x[2]+domain.L[2]))
children=[widget_bar,widget_surface]
children.append(Mesh(geometry=BoxGeometry(width=domain.L[0],height=domain.L[1],depth=domain.L[2]),
material=LambertMaterial(color=0xffffff, #color=0xccccff,
opacity=0.7,
refractionRatio=0.985,
reflectivity= 0.9,
transparent=True),
position=center))
#children.append(Mesh(geometry=PlaneGeometry(width=10,height=10),material=BasicMaterial(color='blue')))
children.append(AmbientLight(color=0x777777))
scene = Scene(children=children)
c = PerspectiveCamera(target=center,
position=(center[0],center[1]+2*domain.L[1],center[2]),
up=[0,0,1],
children=[DirectionalLight(color='white',
position=(center[0],center[1],center[2]+2*domain.L[2]),
intensity=0.5)])
renderer = Renderer(camera=c, scene = scene, controls=[OrbitControls(controlling=c)])
renderer.background='gray'
ar = float(renderer.height)/(renderer.width)
renderer.width = 600
renderer.height = int(ar*renderer.width)
display(renderer)
monitor_simulation(refresh=5.0)
%%px
from tables import openFile
archive = openFile('tank_p%d.h5' % (rank,),'r')
def load_post_simulation_globals(it):
Put some variables we need in engine namespace.
These can then be retrieved by clients for inspection, visualization, etc.
global phi
print it
phi=archive.getNode("/phi"+`it`)[:];
x = np.concatenate(view['x'])
y = np.concatenate(view['y'])
shifts = numpy.cumsum([0]+view['nn'][:-1])
flat_triangles = np.concatenate([ tri + shift for tri,shift in zip(view['triangles'], shifts) ])
triplot(x,y,flat_triangles)
!rm phi*png
import tank
for it,t in enumerate(view['tank_so.tnList'][0]):
view.apply_sync(load_post_simulation_globals,it)
phi = np.concatenate(view['phi'])
pyplot.clf()
plt.xlabel(r'z[m]')
plt.ylabel(r'x[m]')
colors = ['b','g','r','c','m','y','k','w']
pylab.xlim(tank.domain.x[0]-0.1*tank.domain.L[0],tank.domain.x[0]+tank.domain.L[0]+0.1*tank.domain.L[0])
pyplot.axis('equal')
for si,s in enumerate(tank.segments):
pyplot.plot([tank.domain.vertices[s[0]][0],
tank.domain.vertices[s[1]][0]],
[tank.domain.vertices[s[0]][1],
tank.domain.vertices[s[1]][1]],
color=colors[tank.domain.segmentFlags[si]-1],
linewidth=2,
marker='o')
pyplot.tricontour(x,y,flat_triangles,phi,[0])
pyplot.title('T=%2.2f' % (t,))
pyplot.savefig('phi%4.4d.png' % (it,))
!avconv -y -i phi%4d.png -c libx264 -qscale 1 tankPhi.mp4
from IPython.core.display import HTML
data_uri_mp4 = open("tankPhi.mp4", "rb").read().encode("base64").replace("\n", "")
video_tag = <video controls>
<source type ="video/mp4" src="data:video/mp4;base64,{mp4}"/>
Your browser does not support the video tag
</video>.format(mp4=data_uri_mp4)
HTML(data=video_tag)
from IPython.display import FileLink,FileLinks
FileLink('tankPhi.mp4')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: IPython Parallel "Magics"
Step2: Load Proteus
Step3: Define the tank geometry
Step4: Physics and Numerics
Step5: Numerical Solution Object
Step10: Define some functions to help monitor the calculation
Step11: Run the tank
Step14: Post-process the numerical solution
|
3,477
|
<ASSISTANT_TASK:>
Python Code:
import re # Regular Expressions
import pandas as pd # DataFrames & Manipulation
import nltk.data # Sentence tokenizer
from nltk.corpus import stopwords # Import the stop word list
from bs4 import BeautifulSoup # HTML processing
from gensim.models.word2vec import Word2Vec
train_input = "../data/recipes.tsv.bz2"
# keep empty strings (http://pandas-docs.github.io/pandas-docs-travis/io.html#na-values)
train = pd.read_csv(train_input, delimiter="\t", quoting=3, encoding="utf-8", keep_default_na=False)
# load sentence tokenizer model and initialize for german language
nltk.download("punkt")
tokenizer = nltk.data.load('tokenizers/punkt/german.pickle')
def clean_text( input, remove_stopwords = False ):
Remove HTML, non-letter characters, and convert to lower case.
Return list of words.
# remove HTML markup with BeautifulSoup (and keep spaces after removal)
plainText = " ".join(BeautifulSoup(input, 'html.parser').strings)
# retain only letters (include umlauts)
onlyLetters = re.sub(u"[^a-zA-ZäöüÄÖÜß]", " ", plainText)
# get lower case words
words = onlyLetters.lower().split()
# use set of stop words
if remove_stopwords:
stops = set(stopwords.words("english"))
words = [w for w in words if not w in stops]
return words
def clean_sentences( text, remove_stopwords = False ):
Split text by sentences and clean each sentence.
sentences = tokenizer.tokenize(text)
return filter(None, [clean_text(line, remove_stopwords) for line in sentences])
sentences = []
size = train['instructions'].size
for i in xrange ( 0, size ):
if (i+1) % 1000 == 0:
print "Processing %d of %d recipies." % ( i+1, size )
sentences += clean_sentences(train['instructions'][i])
print "Total: %d sentences.\n" % len(sentences)
print "Example Sentences:\n", "\n".join([",".join(sentence) for sentence in sentences[0:3]])
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Import the built-in logging module and configure it so that Word2Vec creates nice output messages
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
print "Training model..."
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count = min_word_count, \
window = context, sample = downsampling)
print "%d words in vocabular." % len(model.wv.vocab)
# make the model much more memory-efficient.
model.init_sims(replace=True)
model_name = "recipes-words_%dfeatures_%dminwords_%dcontext" % (num_features, min_word_count, context)
model.save(model_name)
model.doesnt_match("milch brot eier".split())
model.most_similar("braten")
model.most_similar("pasta")
model.most_similar("brownies")
model.most_similar("lasagne")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare text processing
Step4: Define functions for cleaning the text data.
Step5: Initialize and train the model
Step6: Save the model for later use or continued training
Step7: Some model evaluation examples
|
3,478
|
<ASSISTANT_TASK:>
Python Code:
### Load in necessary libraries for data input and normalization
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
%load_ext autoreload
%autoreload 2
from my_answers import *
%load_ext autoreload
%autoreload 2
from my_answers import *
### load in and normalize the dataset
dataset = np.loadtxt('datasets/normalized_apple_prices.csv')
# lets take a look at our time series
plt.plot(dataset)
plt.xlabel('time period')
plt.ylabel('normalized series value')
odd_nums = np.array([1,3,5,7,9,11,13])
# run a window of size 2 over the odd number sequence and display the results
window_size = 2
X = []
X.append(odd_nums[0:2])
X.append(odd_nums[1:3])
X.append(odd_nums[2:4])
X.append(odd_nums[3:5])
X.append(odd_nums[4:6])
y = odd_nums[2:]
X = np.asarray(X)
y = np.asarray(y)
y = np.reshape(y, (len(y),1)) #optional
assert(type(X).__name__ == 'ndarray')
assert(type(y).__name__ == 'ndarray')
assert(X.shape == (5,2))
assert(y.shape in [(5,1), (5,)])
# print out input/output pairs --> here input = X, corresponding output = y
print ('--- the input X will look like ----')
print (X)
print ('--- the associated output y will look like ----')
print (y)
### TODO: implement the function window_transform_series in the file my_answers.py
from my_answers import window_transform_series
# window the data using your windowing function
window_size = 7
X,y = window_transform_series(series = dataset,window_size = window_size)
# split our dataset into training / testing sets
train_test_split = int(np.ceil(2*len(y)/float(3))) # set the split point
# partition the training set
X_train = X[:train_test_split,:]
y_train = y[:train_test_split]
# keep the last chunk for testing
X_test = X[train_test_split:,:]
y_test = y[train_test_split:]
# NOTE: to use keras's RNN LSTM module our input must be reshaped to [samples, window size, stepsize]
X_train = np.asarray(np.reshape(X_train, (X_train.shape[0], window_size, 1)))
X_test = np.asarray(np.reshape(X_test, (X_test.shape[0], window_size, 1)))
### TODO: create required RNN model
# import keras network libraries
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
import keras
# given - fix random seed - so we can all reproduce the same results on our default time series
np.random.seed(0)
# TODO: implement build_part1_RNN in my_answers.py
from my_answers import build_part1_RNN
model = build_part1_RNN(window_size)
# build model using keras documentation recommended optimizer initialization
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile the model
model.compile(loss='mean_squared_error', optimizer=optimizer)
# run your model!
model.fit(X_train, y_train, epochs=1000, batch_size=50, verbose=0)
# generate predictions for training
train_predict = model.predict(X_train)
test_predict = model.predict(X_test)
# print out training and testing errors
training_error = model.evaluate(X_train, y_train, verbose=0)
print('training error = ' + str(training_error))
testing_error = model.evaluate(X_test, y_test, verbose=0)
print('testing error = ' + str(testing_error))
### Plot everything - the original series as well as predictions on training and testing sets
import matplotlib.pyplot as plt
%matplotlib inline
# plot original series
plt.plot(dataset,color = 'k')
# plot training set prediction
split_pt = train_test_split + window_size
plt.plot(np.arange(window_size,split_pt,1),train_predict,color = 'b')
# plot testing set prediction
plt.plot(np.arange(split_pt,split_pt + len(test_predict),1),test_predict,color = 'r')
# pretty up graph
plt.xlabel('day')
plt.ylabel('(normalized) price of Apple stock')
plt.legend(['original series','training fit','testing fit'],loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# read in the text, transforming everything to lower case
text = open('datasets/holmes.txt').read().lower()
print('our original text has ' + str(len(text)) + ' characters')
### print out the first 1000 characters of the raw text to get a sense of what we need to throw out
text[:2000]
### find and replace '\n' and '\r' symbols - replacing them
text = text[1302:]
text = text.replace('\n',' ') # replacing '\n' with '' simply removes the sequence
text = text.replace('\r',' ')
### print out the first 1000 characters of the raw text to get a sense of what we need to throw out
text[:1000]
### TODO: implement cleaned_text in my_answers.py
from my_answers import cleaned_text
text = cleaned_text(text)
# shorten any extra dead space created above
text = text.replace(' ',' ')
### print out the first 2000 characters of the raw text to get a sense of what we need to throw out
text[:2000]
# count the number of unique characters in the text
chars = sorted(list(set(text)))
# print some of the text, as well as statistics
print ("this corpus has " + str(len(text)) + " total number of characters")
print ("this corpus has " + str(len(chars)) + " unique characters")
### TODO: implement window_transform_series in my_answers.py
from my_answers import window_transform_series
# run your text window-ing function
window_size = 100
step_size = 5
inputs, outputs = window_transform_text(text,window_size,step_size)
# print out a few of the input/output pairs to verify that we've made the right kind of stuff to learn from
print('input = ' + inputs[2])
print('output = ' + outputs[2])
print('--------------')
print('input = ' + inputs[100])
print('output = ' + outputs[100])
# print out the number of unique characters in the dataset
chars = sorted(list(set(text)))
print ("this corpus has " + str(len(chars)) + " unique characters")
print ('and these characters are ')
print (chars)
# this dictionary is a function mapping each unique character to a unique integer
chars_to_indices = dict((c, i) for i, c in enumerate(chars)) # map each unique character to unique integer
# this dictionary is a function mapping each unique integer back to a unique character
indices_to_chars = dict((i, c) for i, c in enumerate(chars)) # map each unique integer back to unique character
# transform character-based input/output into equivalent numerical versions
def encode_io_pairs(text,window_size,step_size):
# number of unique chars
chars = sorted(list(set(text)))
num_chars = len(chars)
# cut up text into character input/output pairs
inputs, outputs = window_transform_text(text,window_size,step_size)
# create empty vessels for one-hot encoded input/output
X = np.zeros((len(inputs), window_size, num_chars), dtype=np.bool)
y = np.zeros((len(inputs), num_chars), dtype=np.bool)
# loop over inputs/outputs and transform and store in X/y
for i, sentence in enumerate(inputs):
for t, char in enumerate(sentence):
X[i, t, chars_to_indices[char]] = 1
y[i, chars_to_indices[outputs[i]]] = 1
return X,y
# use your function
window_size = 100
step_size = 5
X,y = encode_io_pairs(text,window_size,step_size)
### necessary functions from the keras library
from keras.models import Sequential
from keras.layers import Dense, Activation, LSTM
from keras.optimizers import RMSprop
from keras.utils.data_utils import get_file
import keras
import random
# TODO implement build_part2_RNN in my_answers.py
from my_answers import build_part2_RNN
model = build_part2_RNN(window_size, len(chars))
# initialize optimizer
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# compile model --> make sure initialized optimizer and callbacks - as defined above - are used
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
# a small subset of our input/output pairs
Xsmall = X[:10000,:,:]
ysmall = y[:10000,:]
# train the model
model.fit(Xsmall, ysmall, batch_size=500, epochs=40,verbose = 1)
# save weights
model.save_weights('model_weights/best_RNN_small_textdata_weights.hdf5')
# function that uses trained model to predict a desired number of future characters
def predict_next_chars(model,input_chars,num_to_predict):
# create output
predicted_chars = ''
for i in range(num_to_predict):
# convert this round's predicted characters to numerical input
x_test = np.zeros((1, window_size, len(chars)))
for t, char in enumerate(input_chars):
x_test[0, t, chars_to_indices[char]] = 1.
# make this round's prediction
test_predict = model.predict(x_test,verbose = 0)[0]
# translate numerical prediction back to characters
r = np.argmax(test_predict) # predict class of each test input
d = indices_to_chars[r]
# update predicted_chars and input
predicted_chars+=d
input_chars+=d
input_chars = input_chars[1:]
return predicted_chars
# TODO: choose an input sequence and use the prediction function in the previous Python cell to predict 100 characters following it
# get an appropriately sized chunk of characters from the text
start_inds = [10, 100, 1000, 10000]
# load in weights
model.load_weights('model_weights/best_RNN_small_textdata_weights.hdf5')
for s in start_inds:
start_index = s
input_chars = text[start_index: start_index + window_size]
# use the prediction function
predict_input = predict_next_chars(model,input_chars,num_to_predict = 100)
# print out input characters
print('------------------')
input_line = 'input chars = ' + '\n' + input_chars + '"' + '\n'
print(input_line)
# print out predicted characters
line = 'predicted chars = ' + '\n' + predict_input + '"' + '\n'
print(line)
### A simple way to write output to file
f = open('my_test_output.txt', 'w') # create an output file to write too
f.write('this is only a test ' + '\n') # print some output text
x = 2
f.write('the value of x is ' + str(x) + '\n') # record a variable value
f.close()
# print out the contents of my_test_output.txt
f = open('my_test_output.txt', 'r') # create an output file to write too
f.read()
# a small subset of our input/output pairs
Xlarge = X[:100000,:,:]
ylarge = y[:100000,:]
# TODO: fit to our larger dataset
model.fit(Xlarge, ylarge, batch_size=500, epochs=30, verbose=1)
# save weights
model.save_weights('model_weights/best_RNN_large_textdata_weights.hdf5')
# TODO: choose an input sequence and use the prediction function in the previous Python cell to predict 100 characters following it
# get an appropriately sized chunk of characters from the text
start_inds = [10, 100, 1000, 10000]
# save output
f = open('text_gen_output/RNN_large_textdata_output.txt', 'w') # create an output file to write too
# load weights
model.load_weights('model_weights/best_RNN_large_textdata_weights.hdf5')
for s in start_inds:
start_index = s
input_chars = text[start_index: start_index + window_size]
# use the prediction function
predict_input = predict_next_chars(model,input_chars,num_to_predict = 100)
# print out input characters
line = '-------------------' + '\n'
print(line)
f.write(line)
input_line = 'input chars = ' + '\n' + input_chars + '"' + '\n'
print(input_line)
f.write(input_line)
# print out predicted characters
predict_line = 'predicted chars = ' + '\n' + predict_input + '"' + '\n'
print(predict_line)
f.write(predict_line)
f.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lets take a quick look at the (normalized) time series we'll be performing predictions on.
Step2: 1.2 Cutting our time series into sequences
Step3: Here is a hard-coded solution for odd_nums. You can compare its results with what you get from your window_transform_series implementation.
Step4: Again - you can check that your completed window_transform_series function works correctly by trying it on the odd_nums sequence - you should get the above output.
Step5: With this function in place apply it to the series in the Python cell below. We use a window_size = 7 for these experiments.
Step6: 1.3 Splitting into training and testing sets
Step7: <a id='TODO_2'></a>
Step8: With your model built you can now fit the model by activating the cell below! Note
Step9: 1.5 Checking model performance
Step10: In the next cell we compute training and testing errors using our trained model - you should be able to achieve at least
Step11: Activating the next cell plots the original data, as well as both predictions on the training and testing sets.
Step12: Note
Step13: Next, lets examine a bit of the raw text. Because we are interested in creating sentences of English words automatically by building up each word character-by-character, we only want to train on valid English words. In other words - we need to remove all of the other characters that are not part of English words.
Step14: Wow - there's a lot of junk here (i.e., weird uncommon character combinations - as this first character chunk contains the title and author page, as well as table of contents)! To keep things simple, we want to train our RNN on a large chunk of more typical English sentences - we don't want it to start thinking non-english words or strange characters are valid! - so lets clean up the data a bit.
Step15: Lets see how the first 1000 characters of our text looks now!
Step16: <a id='TODO_3'></a>
Step17: With your chosen characters removed print out the first few hundred lines again just to double check that everything looks good.
Step18: Now that we have thrown out a good number of non-English characters/character sequences lets print out some statistics about the dataset - including number of total characters and number of unique characters.
Step19: 2.3 Cutting data into input/output pairs
Step20: With our function complete we can now use it to produce input/output pairs! We employ the function in the next cell, where the window_size = 50 and step_size = 5.
Step21: Lets print out a few input/output pairs to verify that we have made the right sort of stuff!
Step22: Looks good!
Step23: Rockin' - so we have a multiclass classification problem on our hands!
Step24: Now we can transform our input/output pairs - consisting of characters - to equivalent input/output pairs made up of one-hot encoded vectors. In the next cell we provide a function for doing just this
Step25: Now run the one-hot encoding function by activating the cell below and transform our input/output pairs!
Step26: <a id='TODO_5'></a>
Step27: 2.7 Training our RNN model for text generation
Step28: Now lets fit our model!
Step29: How do we make a given number of predictions (characters) based on this fitted model?
Step30: <a id='TODO_6'></a>
Step31: This looks ok, but not great. Now lets try the same experiment with a larger chunk of the data - with the first 100,000 input/output pairs.
Step32: With this recording devices we can now more safely perform experiments on larger portions of the text. In the next cell we will use the first 100,000 input/output pairs to train our RNN model.
|
3,479
|
<ASSISTANT_TASK:>
Python Code:
import stripy as stripy
import numpy as np
xmin = 0.0
xmax = 10.0
ymin = 0.0
ymax = 10.0
extent = [xmin, xmax, ymin, ymax]
spacingX = 0.5
spacingY = 0.5
ellip0 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=0)
ellip1 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=1)
ellip2 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=2)
ellip3 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=3)
ellip4 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=4)
ellip5 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=5)
ellip6 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=6)
ellip7 = stripy.cartesian_meshes.elliptical_mesh(extent, spacingX, spacingY, refinement_levels=7)
print("Size of mesh - 1 {}".format(ellip1.points.shape[0]))
print("Size of mesh - 2 {}".format(ellip2.points.shape[0]))
print("Size of mesh - 3 {}".format(ellip3.points.shape[0]))
print("Size of mesh - 4 {}".format(ellip4.points.shape[0]))
print("Size of mesh - 5 {}".format(ellip5.points.shape[0]))
print("Size of mesh - 6 {}".format(ellip6.points.shape[0]))
print("Size of mesh - 7 {}".format(ellip7.points.shape[0]))
mx, my = ellip2.midpoint_refine_triangulation_by_vertices(vertices=[1,2,3,4,5,6,7,8,9,10])
ellip2mv = stripy.Triangulation(mx, my)
mx, my = ellip2.edge_refine_triangulation_by_vertices(vertices=[1,2,3,4,5,6,7,8,9,10])
ellip2ev = stripy.Triangulation(mx, my)
mx, my = ellip2.centroid_refine_triangulation_by_vertices(vertices=[1,2,3,4,5,6,7,8,9,10])
ellip2cv = stripy.Triangulation(mx, my)
mx, my = ellip2.edge_refine_triangulation_by_triangles(triangles=[1,2,3,4,5,6,7,8,9,10])
ellip2et = stripy.Triangulation(mx, my)
mx, my = ellip2.centroid_refine_triangulation_by_triangles(triangles=[1,2,3,4,5,6,7,8,9,10])
ellip2ct = stripy.Triangulation(mx, my)
str_fmt = "refinement --- {} points, {} simplices"
print(str_fmt.format(ellip2mv.npoints, ellip2mv.simplices.shape[0]))
print(str_fmt.format(ellip2ev.npoints, ellip2ev.simplices.shape[0]))
print(str_fmt.format(ellip2cv.npoints, ellip2cv.simplices.shape[0]))
print(str_fmt.format(ellip2et.npoints, ellip2et.simplices.shape[0]))
print(str_fmt.format(ellip2ct.npoints, ellip2ct.simplices.shape[0]))
%matplotlib inline
import matplotlib.pyplot as plt
def mesh_fig(mesh, meshR, name):
fig = plt.figure(figsize=(10, 10), facecolor="none")
ax = plt.subplot(111)
ax.axis('off')
generator = mesh
refined = meshR
x0 = generator.x
y0 = generator.y
xR = refined.x
yR = refined.y
ax.scatter(x0, y0, color="Red", marker="o", s=50)
ax.scatter(xR, yR, color="DarkBlue", marker="o", s=10)
ax.triplot(xR, yR, refined.simplices, color="black", linewidth=0.5)
fig.savefig(name, dpi=250, transparent=True)
return
mesh_fig(ellip2, ellip2mv, "EdgeByVertex1to10" )
mesh_fig(ellip2, ellip2ev, "EdgeByVertexT1to10" )
mesh_fig(ellip2, ellip2cv, "CentroidByVertexT1to10" )
mesh_fig(ellip2, ellip2et, "EdgeByTriangle1to10" )
mesh_fig(ellip2, ellip2ct, "CentroidByTriangle1to10" )
points = np.array([[ 3.33, 3.33], [7.77, 7.77]]).T
triangulations = [ellip1]
nearest, distances = triangulations[-1].nearest_vertex(points[:,0], points[:,1])
max_depth = 10
while nearest[0] == nearest[1] and max_depth > 0:
xs, ys = triangulations[-1].centroid_refine_triangulation_by_vertices(vertices=nearest[0])
new_triangulation = stripy.Triangulation(xs, ys)
nearest, distances = new_triangulation.nearest_vertex(points[:,0], points[:,1])
triangulations.append(new_triangulation)
max_depth -= 1
print("refinement_steps = {}".format(len(triangulations)))
centroid_triangulations = triangulations[:]
triangulations = [ellip1]
nearest, distances = triangulations[-1].nearest_vertex(points[:,0], points[:,1])
max_depth = 10
while nearest[0] == nearest[1] and max_depth > 0:
xs, ys = triangulations[-1].edge_refine_triangulation_by_vertices(vertices=nearest[0])
new_triangulation = stripy.Triangulation(xs, ys)
nearest, distances = new_triangulation.nearest_vertex(points[:,0], points[:,1])
triangulations.append(new_triangulation)
max_depth -= 1
print("refinement_steps = {}".format(len(triangulations)))
edge_triangulations = triangulations[:]
triangulations = [ellip1]
in_triangle = triangulations[-1].containing_triangle(points[:,0], points[:,1])
max_depth = 10
while in_triangle[0] == in_triangle[1] and max_depth > 0:
xs, ys = triangulations[-1].edge_refine_triangulation_by_triangles(in_triangle[0])
new_triangulation = stripy.Triangulation(xs, ys)
in_triangle = new_triangulation.containing_triangle(points[:,0], points[:,1])
triangulations.append(new_triangulation)
print(in_triangle)
if in_triangle.shape[0] == 0:
break
max_depth -= 1
print("refinement_steps = {}".format(len(triangulations)))
edge_t_triangulations = triangulations[:]
triangulations = [ellip1]
in_triangle = triangulations[-1].containing_triangle(points[:,0], points[:,1])
max_depth = 10
while in_triangle[0] == in_triangle[1] and max_depth > 0:
xs, ys = triangulations[-1].centroid_refine_triangulation_by_triangles(in_triangle[0])
new_triangulation = stripy.Triangulation(xs, ys)
in_triangle = new_triangulation.containing_triangle(points[:,0], points[:,1])
triangulations.append(new_triangulation)
print(in_triangle)
if in_triangle.shape[0] == 0:
break
max_depth -= 1
print("refinement_steps = {}".format(len(triangulations)))
centroid_t_triangulations = triangulations[:]
import matplotlib.pyplot as plt
%matplotlib inline
str_fmt = "{:18} --- {} simplices, equant max = {:.2f}, equant min = {:.2f}, size ratio = {:.2f}"
mesh_fig(edge_triangulations[0], edge_triangulations[-1], "EdgeByVertex" )
T = edge_triangulations[-1]
E = np.array(T.edge_lengths()).T
A = np.array(T.areas()).T
equant = np.max(E, axis=1) / np.min(E, axis=1)
size_ratio = np.sqrt(np.max(A) / np.min(A))
print(str_fmt.format("EdgeByVertex", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))
mesh_fig(edge_t_triangulations[0], edge_t_triangulations[-1], "EdgeByTriangle" )
T = edge_t_triangulations[-1]
E = np.array(T.edge_lengths()).T
A = np.array(T.areas()).T
equant = np.max(E, axis=1) / np.min(E, axis=1)
size_ratio = np.sqrt(np.max(A) / np.min(A))
print(str_fmt.format("EdgeByTriangle", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))
mesh_fig(centroid_triangulations[0], centroid_triangulations[-1], "CentroidByVertex" )
T = centroid_triangulations[-1]
E = np.array(T.edge_lengths()).T
A = np.array(T.areas()).T
equant = np.max(E, axis=1) / np.min(E, axis=1)
size_ratio = np.sqrt(np.max(A) / np.min(A))
print(str_fmt.format("CentroidByVertex", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))
mesh_fig(centroid_t_triangulations[0], centroid_t_triangulations[-1], "CentroidByTriangle" )
T = centroid_t_triangulations[-1]
E = np.array(T.edge_lengths()).T
A = np.array(T.areas()).T
equant = np.max(E, axis=1) / np.min(E, axis=1)
size_ratio = np.sqrt(np.max(A) / np.min(A))
print(str_fmt.format("CentroidByTriangle", T.simplices.shape[0], equant.max(), equant.min(), size_ratio))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Uniform meshes by refinement
Step2: Refinement strategies
Step3: Visualisation of refinement strategies
Step4: Targetted refinement
Step5: Visualisation of targetted refinement
|
3,480
|
<ASSISTANT_TASK:>
Python Code:
import warnings
warnings.filterwarnings('ignore')
import os
import pathlib
import matplotlib.pyplot as plt
import numpy as np
from landlab.components import FlowDirectorSteepest, NetworkSedimentTransporter
from landlab.data_record import DataRecord
from landlab.grid.network import NetworkModelGrid
from landlab.plot import graph
from landlab.io import read_shapefile
from landlab import ExampleData
from landlab.plot import plot_network_and_parcels
%matplotlib inline
datadir = ExampleData("io/shapefile", case="methow").base
shp_file = datadir / "MethowSubBasin.shp"
points_shapefile = datadir / "MethowSubBasin_Nodes_4.shp"
grid = read_shapefile(
shp_file,
points_shapefile=points_shapefile,
node_fields=["usarea_km2", "Elev_m"],
link_fields=["usarea_km2", "Length_m"],
link_field_conversion={"usarea_km2": "drainage_area", "Slope":"channel_slope", "Length_m":"reach_length"},
node_field_conversion={
"usarea_km2": "drainage_area",
"Elev_m": "topographic__elevation",
},
threshold=0.01,
)
grid.at_link.keys()
grid.at_node.keys()
graph.plot_graph(grid, at="node,link")
grid.number_of_links
grid.number_of_nodes
grid.at_node["bedrock__elevation"] = grid.at_node["topographic__elevation"].copy()
grid.at_link["channel_width"] = 1 * np.ones(grid.number_of_links) # m
grid.at_link["flow_depth"] = 0.5 * np.ones(grid.number_of_links) # m
# element_id is the link on which the parcel begins.
element_id = np.repeat(np.arange(grid.number_of_links), 50)
element_id = np.expand_dims(element_id, axis=1)
volume = 1*np.ones(np.shape(element_id)) # (m3)
active_layer = np.ones(np.shape(element_id)) # 1= active, 0 = inactive
density = 2650 * np.ones(np.size(element_id)) # (kg/m3)
abrasion_rate = 0 * np.ones(np.size(element_id)) # (mass loss /m)
# Lognormal GSD
medianD = 0.15 # m
mu = np.log(medianD)
sigma = np.log(2) #assume that D84 = sigma*D50
np.random.seed(0)
D = np.random.lognormal(
mu,
sigma,
np.shape(element_id)
) # (m) the diameter of grains in each parcel
time_arrival_in_link = np.random.rand(np.size(element_id), 1)
location_in_link = np.random.rand(np.size(element_id), 1)
lithology = ["quartzite"] * np.size(element_id)
variables = {
"abrasion_rate": (["item_id"], abrasion_rate),
"density": (["item_id"], density),
"lithology": (["item_id"], lithology),
"time_arrival_in_link": (["item_id", "time"], time_arrival_in_link),
"active_layer": (["item_id", "time"], active_layer),
"location_in_link": (["item_id", "time"], location_in_link),
"D": (["item_id", "time"], D),
"volume": (["item_id", "time"], volume)
}
items = {"grid_element": "link", "element_id": element_id}
parcels = DataRecord(
grid,
items=items,
time=[0.0],
data_vars=variables,
dummy_elements={"link": [NetworkSedimentTransporter.OUT_OF_NETWORK]},
)
timesteps = 10 # total number of timesteps
dt = 60 * 60 * 24 *2 # length of timestep (seconds)
fd = FlowDirectorSteepest(grid, "topographic__elevation")
fd.run_one_step()
nst = NetworkSedimentTransporter(
grid,
parcels,
fd,
bed_porosity=0.3,
g=9.81,
fluid_density=1000,
transport_method="WilcockCrowe",
)
for t in range(0, (timesteps * dt), dt):
nst.run_one_step(dt)
print("Model time: ", t/(60*60*24), "days passed")
timestep_of_interest = 6
originating_link = 27
#filter the parcels to calculate total volumes of only the parcels that originated in the chosen link
parcelfilter = np.zeros_like(
parcels.dataset.element_id, dtype=bool
)
parcelfilter[:, timestep_of_interest] = (parcels.dataset.element_id[:,0] == originating_link)
vol_orig_link = parcels.calc_aggregate_value(
np.sum,
"volume",
at="link",
filter_array=parcelfilter,
fill_value=0.0
)
fig = plot_network_and_parcels(
grid, parcels,
link_attribute=vol_orig_link,
link_attribute_title = "Vol of sed originating on link x",
network_linewidth = 5,
parcel_alpha = 0
)
parcel_vol_on_grid = parcels.dataset["volume"].values
parcel_vol_on_grid[parcels.dataset["element_id"].values==-2]=0
#plt.figure(figsize=(8,6))
plt.plot(np.asarray(parcels.time_coordinates)/(60*60*24),
np.sum(parcel_vol_on_grid, axis=0),
'-',
linewidth=3,
alpha=0.5
)
plt.ylabel('Total volume of parcels on grid $[m^3]$')
plt.xlabel('Time [days]')
plt.show()
plt.loglog(parcels.dataset.D[:,-1],
nst._distance_traveled_cumulative,
'.'
)
plt.xlabel('Parcel grain size (m)')
plt.ylabel('Cumulative parcel travel distance (m)')
# Note: some of the smallest grain travel distances can exceed the length of the
# grid by "overshooting" during a single timestep of high transport rate
plt.plot(grid.at_link["channel_slope"],
nst.d_mean_active,
'.')
plt.xlabel('Channel slope (m/m)')
plt.ylabel('Mean grain size of active layer (m)')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Load a shapefile that represents the river network
Step2: Alright, let's see what fields we read in with this shapefile
Step3: Great! Looks like we have length (reach length), upstream drainage area (drainage area), x and y verticies of each link/reach (x and y of polyline), and bed elevation (topographic elevation).
Step4: Our network consists of 29 links between 30 nodes. In the plot above, X and Y represent the plan-view coordinates of the node locations.
Step5: We must distinguish between topographic elevation (the top surface of the bed sediment) and bedrock elevation (the surface of the river in the absence of modeled sediment).
Step6: In order to track sediment motion, we classify parcels as either active (representing mobile surface sediment) or inactive (immobile subsurface) during each timestep. The active parcels are the most recent parcels to arrive in the link. During a timestep, active parcels are transported downstream (increasing their location_in_link, which is a normalized value ranging from 0 to 1) according to a sediment transport formula.
Step7: In addition to the required parcel attributes listed above, you can designate optional parcel characteristics, depending on your needs. For example
Step8: We now collect the arrays into a dictionary of variables, some of which will be tracked through time (["item_id", "time"]), and others of which will remain constant through time
Step9: With all of the required attributes collected, we can create the parcels DataRecord. Often, parcels will eventually transport off of the downstream-most link. To track these parcels, we have designated a "dummy_element" here, which has index value -2.
Step10: 3. Run the NetworkSedimentTransporter
Step11: Before running the NST, we need to determine flow direction on the grid (upstream and downstream for each link). To do so, we initalize and run a Landlab flow director component
Step12: Then, we initialize the network sediment transporter
Step13: Now we are ready to run the model forward in time
Step14: 4. Plot the model results
Step15: Non-network plotting
Step16: We can also plot individual parcel characteristics. The plot below shows the total transport distance of each parcel through the whole model run as a function of the parcel's grain size (during the final timestep).
Step17: The plot below is an example of accessing variables associated with the grid (grid.at_link.X, or grid.at_node.X), as well as a variable associated with this instance of NetworkModelGrid (nmg.X)
|
3,481
|
<ASSISTANT_TASK:>
Python Code:
#!/usr/bin/python
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from stats import parse_results, get_percentage, get_avg_per_seed, draw_pie, draw_bars, draw_bars_comparison, draw_avgs
pr, eigen, bet = parse_results('test_ws.txt')
draw_pie(get_percentage(pr))
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(pr)+[(0, np.mean(pr[:,1]))]))
draw_pie(get_percentage(eigen))
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(eigen)+[(0, np.mean(eigen[:,1]))]))
draw_pie(get_percentage(bet))
draw_bars_comparison('Avg adopters per seeds', 'Avg adopters', np.array(get_avg_per_seed(bet)+[(0, np.mean(bet[:,1]))]))
draw_bars(np.sort(pr.view('i8,i8'), order=['f0'], axis=0).view(np.int),
np.sort(eigen.view('i8,i8'), order=['f0'], axis=0).view(np.int),
np.sort(bet.view('i8,i8'), order=['f0'], axis=0).view(np.int))
pr_mean = np.mean(pr[:,1])
pr_mean_seed = np.mean(pr[:,0])
print 'Avg Seed:',pr_mean_seed, 'Avg adopters:', pr_mean
eigen_mean = np.mean(eigen[:,1])
eigen_mean_seed = np.mean(eigen[:,0])
print 'Avg Seed:',eigen_mean_seed, 'Avg adopters:',eigen_mean
bet_mean = np.mean(bet[:,1])
bet_mean_seed = np.mean(bet[:,0])
print 'Avg Seed:',bet_mean_seed, 'Avg adopters:',bet_mean
draw_avgs([pr_mean, eigen_mean, bet_mean])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Parse results
Step2: PageRank Seeds Percentage
Step3: Avg adopters per seed comparison
Step4: Eigenvector Seeds Percentage
Step5: Avg adopters per seed comparison
Step6: Betweenness Seeds Percentage
Step7: Avg adopters per seed comparison
Step8: 100 runs adopters comparison
Step9: Centrality Measures Averages
Step10: Eigenv avg adopters and seed
Step11: Betweenness avg adopters and seed
|
3,482
|
<ASSISTANT_TASK:>
Python Code:
import csv as csv
csv_file_object = csv.reader(open('titanic_train.csv', 'rb'))
header = csv_file_object.next()
print(header)
lines = [line for line in csv_file_object]
print(lines[0])
print(lines[1])
import pandas as pd
titanic_train = pd.read_csv("titanic_train.csv")
titanic_train
community_leading_cause = pd.read_csv("LEADINGCAUSESOFDEATH.csv")
community_leading_cause
users = pd.read_csv("users.csv")
users
print("Number of users: %d" % len(users))
print("Unique locations: %d" % len(users.location.value_counts()))
users.location.value_counts()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Use pandas!
Step2: Community health status from data.gov
Step3: -9999 Indicate N.A. value from the source data for the Unemployed column on the VUNERABLEPOPSANDENVHEALTH page
|
3,483
|
<ASSISTANT_TASK:>
Python Code::
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'startseq'
# iterate over the whole length of the sequence
for i in range(max_length):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# stop if we predict the end of the sequence
if word == 'endseq':
break
return in_tex
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
3,484
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
rng = np.random.RandomState(42)
x = rng.rand(1000000)
y = rng.rand(1000000)
%timeit x + y
%timeit np.fromiter((xi + yi for xi, yi in zip(x, y)), dtype=x.dtype, count=len(x))
mask = (x > 0.5) & (y < 0.5)
tmp1 = (x > 0.5)
tmp2 = (y < 0.5)
mask = tmp1 & tmp2
import numexpr
mask_numexpr = numexpr.evaluate('(x > 0.5) & (y < 0.5)')
np.allclose(mask, mask_numexpr)
import pandas as pd
nrows, ncols = 100000, 100
rng = np.random.RandomState(42)
df1, df2, df3, df4 = (pd.DataFrame(rng.rand(nrows, ncols))
for i in range(4))
%timeit df1 + df2 + df3 + df4
%timeit pd.eval('df1 + df2 + df3 + df4')
np.allclose(df1 + df2 + df3 + df4,
pd.eval('df1 + df2 + df3 + df4'))
df1, df2, df3, df4, df5 = (pd.DataFrame(rng.randint(0, 1000, (100, 3)))
for i in range(5))
result1 = -df1 * df2 / (df3 + df4) - df5
result2 = pd.eval('-df1 * df2 / (df3 + df4) - df5')
np.allclose(result1, result2)
result1 = (df1 < df2) & (df2 <= df3) & (df3 != df4)
result2 = pd.eval('df1 < df2 <= df3 != df4')
np.allclose(result1, result2)
result1 = (df1 < 0.5) & (df2 < 0.5) | (df3 < df4)
result2 = pd.eval('(df1 < 0.5) & (df2 < 0.5) | (df3 < df4)')
np.allclose(result1, result2)
result3 = pd.eval('(df1 < 0.5) and (df2 < 0.5) or (df3 < df4)')
np.allclose(result1, result3)
result1 = df2.T[0] + df3.iloc[1]
result2 = pd.eval('df2.T[0] + df3.iloc[1]')
np.allclose(result1, result2)
df = pd.DataFrame(rng.rand(1000, 3), columns=['A', 'B', 'C'])
df.head()
result1 = (df['A'] + df['B']) / (df['C'] - 1)
result2 = pd.eval("(df.A + df.B) / (df.C - 1)")
np.allclose(result1, result2)
result3 = df.eval('(A + B) / (C - 1)')
np.allclose(result1, result3)
df.head()
df.eval('D = (A + B) / C', inplace=True)
df.head()
df.eval('D = (A - B) / C', inplace=True)
df.head()
column_mean = df.mean(1)
result1 = df['A'] + column_mean
result2 = df.eval('A + @column_mean')
np.allclose(result1, result2)
result1 = df[(df.A < 0.5) & (df.B < 0.5)]
result2 = pd.eval('df[(df.A < 0.5) & (df.B < 0.5)]')
np.allclose(result1, result2)
result2 = df.query('A < 0.5 and B < 0.5')
np.allclose(result1, result2)
Cmean = df['C'].mean()
result1 = df[(df.A < Cmean) & (df.B < Cmean)]
result2 = df.query('A < @Cmean and B < @Cmean')
np.allclose(result1, result2)
x = df[(df.A < 0.5) & (df.B < 0.5)]
tmp1 = df.A < 0.5
tmp2 = df.B < 0.5
tmp3 = tmp1 & tmp2
x = df[tmp3]
df.values.nbytes
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As discussed in Computation on NumPy Arrays
Step2: But this abstraction can become less efficient when computing compound expressions.
Step3: Because NumPy evaluates each subexpression, this is roughly equivalent to the following
Step4: In other words, every intermediate step is explicitly allocated in memory. If the x and y arrays are very large, this can lead to significant memory and computational overhead.
Step5: The benefit here is that Numexpr evaluates the expression in a way that does not use full-sized temporary arrays, and thus can be much more efficient than NumPy, especially for large arrays.
Step6: To compute the sum of all four DataFrames using the typical Pandas approach, we can just write the sum
Step7: The same result can be computed via pd.eval by constructing the expression as a string
Step8: The eval() version of this expression is about 50% faster (and uses much less memory), while giving the same result
Step9: Operations supported by pd.eval()
Step10: Arithmetic operators
Step11: Comparison operators
Step12: Bitwise operators
Step13: In addition, it supports the use of the literal and and or in Boolean expressions
Step14: Object attributes and indices
Step15: Other operations
Step16: Using pd.eval() as above, we can compute expressions with the three columns like this
Step17: The DataFrame.eval() method allows much more succinct evaluation of expressions with the columns
Step18: Notice here that we treat column names as variables within the evaluated expression, and the result is what we would wish.
Step19: We can use df.eval() to create a new column 'D' and assign to it a value computed from the other columns
Step20: In the same way, any existing column can be modified
Step21: Local variables in DataFrame.eval()
Step22: The @ character here marks a variable name rather than a column name, and lets you efficiently evaluate expressions involving the two "namespaces"
Step23: As with the example used in our discussion of DataFrame.eval(), this is an expression involving columns of the DataFrame.
Step24: In addition to being a more efficient computation, compared to the masking expression this is much easier to read and understand.
Step25: Performance
Step26: Is roughly equivalent to this
Step27: If the size of the temporary DataFrames is significant compared to your available system memory (typically several gigabytes) then it's a good idea to use an eval() or query() expression.
|
3,485
|
<ASSISTANT_TASK:>
Python Code:
df['Total day minutes'].hist();
sns.boxplot(df['Total day minutes']);
df.hist();
df['State'].value_counts().head()
df['Churn'].value_counts()
sns.countplot(df['Churn']);
sns.countplot(df['State']);
sns.countplot(df[df['State'].\
isin(df['State'].value_counts().head().index)]['State']);
feat = [f for f in df.columns if 'charge' in f]
df[feat].hist();
sns.pairplot(df[feat]);
df['Churn'].map({False: 'blue', True: 'orange'}).head()
df[~df['Churn']].head()
plt.scatter(df[df['Churn']]['Total eve charge'],
df[df['Churn']]['Total intl charge'],
color='orange', label='churn');
plt.scatter(df[~df['Churn']]['Total eve charge'],
df[~df['Churn']]['Total intl charge'],
color='blue', label='loyal');
plt.xlabel('Вечерние начисления');
plt.ylabel('Межнар. начисления');
plt.title('Распределение начислений для лояльных/ушедших');
plt.legend();
sns.heatmap(df.corr());
df.drop(feat, axis=1, inplace=True)
sns.heatmap(df.corr());
sns.boxplot(x='Churn', y='Total day minutes', data=df);
sns.boxplot(x='State', y='Total day minutes', data=df);
sns.violinplot(x='Churn', y='Total day minutes', data=df);
df.groupby('International plan')['Total day minutes'].mean()
sns.boxplot(x='International plan', y='Total day minutes', data=df);
pd.crosstab(df['Churn'], df['International plan'])
sns.countplot(x='International plan', hue='Churn', data=df);
sns.countplot(x='Customer service calls', hue='Churn', data=df);
from sklearn.manifold import TSNE
tsne = TSNE(random_state=0)
df2 = df.drop(['State', 'Churn'], axis=1)
df2['International plan'] = df2['International plan'].map({'Yes': 1,
'No': 0})
df2['Voice mail plan'] = df2['Voice mail plan'].map({'Yes': 1,
'No': 0})
df2.info()
%%time
tsne.fit(df2)
plt.scatter(tsne.embedding_[df['Churn'].values, 0],
tsne.embedding_[df['Churn'].values, 1],
color='orange', alpha=.7);
plt.scatter(tsne.embedding_[~df['Churn'].values, 0],
tsne.embedding_[~df['Churn'].values, 1],
color='blue', alpha=.7);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.2. Категориальные
Step2: 2. Взаимодействия признаков
Step3: 2.2. Количественный с категориальным
Step4: 2.3. Категориальный с категориальным
Step5: 3. Прочее
|
3,486
|
<ASSISTANT_TASK:>
Python Code:
import psyplot.project as psy
import xarray as xr
%matplotlib inline
%config InlineBackend.close_figures = False
import numpy as np
x = np.linspace(-1, 1.)
y = np.linspace(-1, 1.)
x2d, y2d = np.meshgrid(x, y)
z = - x2d**2 - y2d**2
ds = xr.Dataset(
{'z': xr.Variable(('x', 'y'), z)},
{'x': xr.Variable(('x', ), x), 'y': xr.Variable(('y', ), y)})
p = psy.plot.plot2d(ds, cmap='Reds', name='z')
p.update(plot='contourf', levels=5)
p.show()
p.keys('colors')
psy.close('all')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we create some sample data in the form of a 2D parabola
Step2: For a simple 2D plot of a scalar field, we can use the
Step3: The plot formatoption controls, how the plot is made. The default is a
Step4: The plot2d method has several formatoptions controlling the color coding of your plot
Step5: The most important ones are
|
3,487
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
people = graphlab.SFrame('people_wiki.gl/')
people.head()
len(people)
obama = people[people['name'] == 'Barack Obama']
obama
obama['text']
clooney = people[people['name'] == 'George Clooney']
clooney['text']
obama['word_count'] = graphlab.text_analytics.count_words(obama['text'])
print obama['word_count']
obama_word_count_table = obama[['word_count']].stack('word_count', new_column_name = ['word','count'])
obama_word_count_table.head()
obama_word_count_table.sort('count',ascending=False)
people['word_count'] = graphlab.text_analytics.count_words(people['text'])
people.head()
tfidf = graphlab.text_analytics.tf_idf(people['word_count'])
tfidf
people['tfidf'] = tfidf['docs']
obama = people[people['name'] == 'Barack Obama']
obama[['tfidf']].stack('tfidf',new_column_name=['word','tfidf']).sort('tfidf',ascending=False)
clinton = people[people['name'] == 'Bill Clinton']
beckham = people[people['name'] == 'David Beckham']
graphlab.distances.cosine(obama['tfidf'][0],clinton['tfidf'][0])
graphlab.distances.cosine(obama['tfidf'][0],beckham['tfidf'][0])
knn_model = graphlab.nearest_neighbors.create(people,features=['tfidf'],label='name')
knn_model.query(obama)
swift = people[people['name'] == 'Taylor Swift']
knn_model.query(swift)
jolie = people[people['name'] == 'Angelina Jolie']
knn_model.query(jolie)
arnold = people[people['name'] == 'Arnold Schwarzenegger']
knn_model.query(arnold)
people.head(2)
elton_john = people[people['name'] == 'Elton John']
elton_john.head()
elton_john[['word_count']].stack('word_count', new_column_name = ['word','count']).sort('count',ascending=False)
elton_john[['tfidf']].stack('tfidf', new_column_name = ['word','tfidf']).sort('tfidf',ascending=False)
victoria_beckham = people[people['name'] == 'Victoria Beckham']
paul_mccartney = people[people['name'] == 'Paul McCartney']
graphlab.distances.cosine(elton_john['tfidf'][0],victoria_beckham['tfidf'][0])
graphlab.distances.cosine(elton_john['tfidf'][0], paul_mccartney['tfidf'][0])
word_count_cosine_model = graphlab.nearest_neighbors.create(people,features=['word_count'],label='name',distance='cosine')
tfidf_cosine_model = graphlab.nearest_neighbors.create(people,features=['tfidf'],label='name',distance='cosine')
word_count_cosine_model.query(elton_john)
tfidf_cosine_model.query(elton_john)
word_count_cosine_model.query(victoria_beckham)
tfidf_cosine_model.query(victoria_beckham)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load some text data - from wikipedia, pages on people
Step2: Data contains
Step3: Explore the dataset and checkout the text it contains
Step4: Exploring the entry for actor George Clooney
Step5: Get the word counts for Obama article
Step6: Sort the word counts for the Obama article
Step7: Sorting the word counts to show most common words at the top
Step8: Most common words include uninformative words like "the", "in", "and",...
Step9: Examine the TF-IDF for the Obama article
Step10: Words with highest TF-IDF are much more informative.
Step11: Is Obama closer to Clinton than to Beckham?
Step12: Build a nearest neighbor model for document retrieval
Step13: Applying the nearest-neighbors model for retrieval
Step14: As we can see, president Obama's article is closest to the one about his vice-president Biden, and those of other politicians.
Step15: Exercise
Step16: 2) Measuring distance
Step17: 3) Building nearest neighbors models with different input features and setting the distance metric
Step18: Now we are ready to use our model to retrieve documents. Use these two models to collect the following results
Step19: What’s the most similar article, other than itself, to the one on ‘Elton John’ using TF-IDF features?
Step20: What’s the most similar article, other than itself, to the one on ‘Victoria Beckham’ using word count features?
Step21: What’s the most similar article, other than itself, to the one on ‘Victoria Beckham’ using TF-IDF features?
|
3,488
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# This code translate from a binary number using the prefix "0b" to a base 10 number.
int('0b11', 2)
# This code translate from base 10 to base 2.
bin(9)
# Just looking a large binary number
bin(2**53)
import bitstring as bs
def next_float(f):
#packing double-precision foat
b = bs.pack('>d', f)
#extracting mantisa as unsigned int and adding up 1.
# There are two cases, (1) if the bits of the mantissa are all 1,
# (2) all the other cases.
m = b[12:].uint
if m==4503599627370495:
# Case (1)
m=0
b[12:] = m
exp=b[1:12].uint
exp +=1
b[1:12] = exp
else:
# Case (2)
m += 1
# putting the result in his place
b[12:] = m
return b.float
def gap(f):
next_f = next_float(f)
return next_f - f
gap(1)
# What happens when we increase the value?
gap(2**40)
# When will the gap be greater that 1?
print(gap(2.**52))
print(gap(2.**53))
# What does it mean to have a gap larger than 1?
values = np.array([2**i for i in range(-5,60)]).astype(float)
# Corresponding gaps:
# The Numpy function "vectorize" is very useful to be use one wants to apply
# a scalar function to each element of an array.
vgap = np.vectorize(gap)
gaps = vgap(values)
fig = plt.figure(figsize=(10,5))
plt.subplot(121)
plt.plot(values, gaps,'.',markersize=20)
plt.xlabel('float(x)')
plt.ylabel('Gap between next representable number')
plt.title('Linear scale')
plt.grid(True)
plt.subplot(122)
plt.loglog(values, gaps,'.')
plt.xlabel('float(x)')
plt.ylabel('Gap between next representable number')
plt.title('Log scale')
plt.grid(True)
fig.tight_layout()
plt.show()
# This function shows the bits used for the sign, exponent and mantissa for a 64-bit double presision number.
# fps: Floating Point Standard
# Double: Double precision IEEE 754
def to_fps_double(f):
b = bs.pack('>d', f)
b = b.bin
#show sign + exponent + mantisa
print(b[0]+' '+b[1:12]+ ' '+b[12:])
to_fps_double(1.)
int('0b01111111111', 2)
to_fps_double(1.+gap(1.))
to_fps_double(+0.)
to_fps_double(-0.)
to_fps_double(np.inf)
to_fps_double(-np.inf)
to_fps_double(np.nan)
to_fps_double(-np.nan)
to_fps_double(2.**-1074)
print(2.**-1074)
to_fps_double(2.**-1075)
print(2.**-1075)
to_fps_double(9.4)
to_fps_double(1)
to_fps_double(1+2**-52)
for i in np.arange(1,11):
to_fps_double(1+i*2**-55)
for i in np.arange(11):
to_fps_double((1+i*2**-55)*2**52)
to_fps_double(2**52)
to_fps_double(2**52+1)
to_fps_double(2**53)
to_fps_double(2**53+1)
x = np.logspace(-200,800,1000, base=2, dtype=np.dtype(float))
plt.figure()
plt.loglog(x,x,'b',label='fl$(x)$: value stored')
plt.loglog(x,np.power(2.,-52)*np.abs(x)/2,'r',label='Upper bound of $|$fl$(x)-x|$ estimated')
plt.legend(loc='best')
plt.grid(True)
plt.show()
a = 1.
b = 2.**(-52) #emach
result_1 = a + b # arithmetic result is 1.0000000000000002220446049250313080847263336181640625
result_1b = result_1-1.0
print("{0:.1000}".format(result_1))
print(result_1b)
print(b)
c = 2.**(-53)
result_2 = a + c # arithmetic result is 1.00000000000000011102230246251565404236316680908203125
np.set_printoptions(precision=16)
print("{0:.1000}".format(result_2))
print(result_2-a)
to_fps_double(result_2)
to_fps_double(result_2-a)
d = 2.**(-53) + 2.**(-54)
result_3 = a + d # arithmetic result is 1.000000000000000166533453693773481063544750213623046875
print("{0:.1000}".format(result_3))
to_fps_double(result_3)
to_fps_double(d)
# What does it mean to store 0.5+delta?
e = 2.**(-1)
f = b/2. # emach/2
result_4 = e + f # 0.50000000000000011102230246251565404236316680908203125
print("{0:.100}".format(result_4))
result_5 = e + b # 0.5000000000000002220446049250313080847263336181640625
print("{0:.100}".format(result_5))
g = b/4.
result_5 = e + g # 0.500000000000000055511151231257827021181583404541015625
print("{0:.100}".format(result_5))
num_1 = a
num_2 = b
result = a + b
print("{0:.100}".format(result))
f1 = lambda x: (1.-np.cos(x))/(np.power(np.sin(x),2))
f2 = lambda x: 1./(1+np.cos(x))
x = np.arange(-10,10,0.1)
plt.figure()
plt.plot(x,f1(x),'-')
plt.grid(True)
plt.show()
plt.figure()
plt.plot(x,f2(x),'-')
plt.grid(True)
plt.show()
x = np.arange(-1,1,0.01)
plt.figure()
plt.plot(x,f1(x),'.',markersize=10)
plt.grid(True)
plt.show()
plt.figure()
plt.plot(x,f2(x),'.',markersize=10)
plt.grid(True)
plt.show()
# This function corresponds to the numerator of f1(x)
f3 = lambda x: (1.-np.cos(x))
# This function corresponds to the denominator of f2(x)
f4 = lambda x: np.power(np.sin(x),2)
x = np.flip(np.logspace(-19,0,20))
o1 = f1(x)
o2 = f2(x)
o3 = f3(x)
o4 = f4(x)
print("x, f1(x), f2(x), f3(x), f4(x)")
for i in np.arange(len(x)):
print("%1.10f, %1.10f, %1.10f, %1.25f, %1.25f" % (x[i],o1[i],o2[i],o3[i],o4[i]))
x = np.logspace(-20,0,40)
plt.figure()
plt.loglog(x,1-np.cos(x),'.',label='$1-\cos(x)$')
plt.loglog(x,0*x+1e-20,'.', label='$10^{-20}$')
plt.grid(True)
plt.xlabel('$x$')
plt.legend(loc='best')
plt.show()
# For this value of x=1e-7 we obtain an outcome greater than 0
to_fps_double(1-np.cos(1.e-7))
# But for x=1e-8 we actually get 0. This explains why in the previous plot the blue dots stop appearing for values less or equal than 1e-8 approximately.
to_fps_double(1-np.cos(1.e-8))
xp = lambda a,b,c: (-b+np.sqrt((b**2)-4*a*c))/(2*a)
print(xp(1,np.power(10.,10),-1))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <div id='intro' />
Step2: <div id='nature' />
Step3: The next two functions are self-explanatory
Step4: So if we compute gap(1) we should get machine epsilon.
Step5: In order to prove our hypothesis (that floating point numbers are not uniformly distributed), we will create an array of values
Step6: We include now a comparison between a linear scale plot and a loglog scale plot. Which one is more useful here?
Step7: As you can see, the hypothesis was right. In other words
Step8: Let's see some intereseting examples
Step9: <div id='firstinteger' />
Step10: This means that if we want to store any number in the interval $[1,1+\epsilon_{\text{mach}}]$, only the numbers $1$ and $1+\epsilon_{\text{mach}}$ will be stored. For example, compare the exponent and the mantissa in the previous cell with the following outputs
Step11: Now, we can scale this difference such that the scaling factor multiplied but $\epsilon_{\text{mach}}$ is one. The factor will be $2^{52}$. This means $2^{52}\,\epsilon_{\text{mach}}=1$. Repeating the same example as before but with the scaling factor we obtain
Step12: Which means we can only store exactly the numbers
Step13: This means, the distance now from $2^{52}$ and the following number representable is $1$ !!!! So, what would happend if I can to store $2^{53}+1$?
Step14: I can't stored the Integer $2^{53}+1$! Thus, the first integer not representable is $2^{53}+1$.
Step15: As you may have expected, the error grows proportionally with the value of $x$.
Step16: As you can see, if you try to save a number between $1$ and $1 + \epsilon {mach}$, it will have to be rounded (according to IEEE rounding criteria) to a representable number before being stored, thus creating a difference between the _real number and the stored number.
Step17: We'll let the students find some representable numbers and some non-representable numbers.
Step18: <div id='func' />
Step19: The first plot shows some spikes, are these expected? or is it an artifact?
Step20: The second function also shows the spikes! It seems they are real.
Step21: In the previous function, we see an outlier at $x=0$, this point is telling us that $f_1(x)$ at $x=0$ seems to be $0$.
Step22: In this case we see a different behavior. Is this the correct one?
Step23: From teh previous table, we see that the numerator of $f_1(x)$ becomes $0$. What is happening with $1-\cos(x)$ about $x=0$?
Step24: Another example
|
3,489
|
<ASSISTANT_TASK:>
Python Code:
%pdb on
%pdb
def pick_and_take():
picked = numpy.random.randint(0, 1000)
raise NotImplementedError()
pick_and_take()
import pdb;pdb.set_trace()
def func(x):
return x + 1
for i in range(100):
print(func(i))
if i == 10 or i == 20:
import pdb;pdb.set_trace()
raise Exception
def test_debug(y):
x = 10
# One-liner to start the debugger here.
from IPython.core.debugger import Tracer; Tracer()()
x = x + y
for i in range(10):
x = x+i
return x
test_debug(10)
from IPython import embed; embed()
import pixiedust
%%pixie_debugger
import random
def find_max (values):
max = 0
for val in values:
if val > max:
max = val
return max
find_max(random.sample(range(100), 10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tracer
Step2: Interactive Python Console
Step3: Pixie Debugger
|
3,490
|
<ASSISTANT_TASK:>
Python Code:
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
!pip install empiricaldist
# Get utils.py
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://github.com/AllenDowney/ThinkBayes2/raw/master/soln/utils.py')
import numpy as np
import pandas as pd
n = 60
t1 = 30
t2 = n-t1
lam1 = 4
lam2 = 2
from scipy.stats import poisson
before = poisson(lam1).rvs(t1)
before
after = poisson(lam2).rvs(t2)
after
data = np.concatenate([before, after])
n = len(data)
lam = 2
lams = np.linspace(0, 10, 51)
D, L = np.meshgrid(data, lams)
like1 = poisson.pmf(D, L).prod(axis=1)
like1 /= like1.sum()
like2 = poisson.pmf(np.sum(data), n*lams)
like2 /= like2.sum()
import matplotlib.pyplot as plt
plt.plot(lams, like1)
plt.plot(lams, like2)
np.sum(lams * like1), np.sum(lams * like2)
poisson.pmf(before, 4)
poisson.pmf(after, 2)
t = 7
def likelihood1(data, t, lam1, lam2):
before = data[:t]
after = data[t:]
like1 = poisson.pmf(before, lam1).prod()
like2 = poisson.pmf(after, lam2).prod()
return like1
like1 = likelihood1(data, t, 4, 2)
like1
from scipy.special import binom
def combos(data):
data = np.asarray(data)
n = data.sum()
k = len(data)
print(n, k)
ns = n - np.cumsum(data) + data
print(ns)
print(data)
cs = binom(ns, data)
print(cs)
return cs.prod() / k**n
combos(data[:t])
from scipy.special import binom
def likelihood2(data, t, lam1, lam2):
before = data[:t].sum()
like1 = poisson.pmf(before, lam1*t) * combos(data[:t])
after = data[t:].sum()
t2 = len(data) - t
n, k = after, t2
like2 = poisson.pmf(after, lam2*t2)
return like1
like2 = likelihood2(data, t, 4, 2)
like2
like2 / like1
from empiricaldist import Pmf
ts = range(1, len(data))
prior_t = Pmf(1, ts)
lams1 = np.linspace(0, 10, 51)
prior_lam1 = Pmf(1, lams1)
lams2 = np.linspace(0, 10, 41)
prior_lam2 = Pmf(1, lams2)
from utils import make_joint
def make_joint3(pmf1, pmf2, pmf3):
Make a joint distribution with three parameters.
joint2 = make_joint(pmf2, pmf1).stack()
joint3 = make_joint(pmf3, joint2).stack()
return Pmf(joint3)
joint_prior = make_joint3(prior_t, prior_lam1, prior_lam2)
joint_prior.head()
ts
lams1
T, L = np.meshgrid(ts, lams1)
M = T * L
M.shape
C = np.cumsum(data)[:-1]
C.shape
from scipy.special import binom
like1 = poisson.pmf(C, M) / binom(C+T-1, T-1)
like1.shape
ts2 = len(data) - np.array(ts)
ts2
T2, L2 = np.meshgrid(ts2, lams2)
M2 = T2 * L2
M2.shape
C2 = sum(data) - C
C2.shape
like2 = poisson.pmf(C2, M2) / binom(C2+T2-1, T2-1)
like2.shape
like = like1.T[:, :, None] * like2.T[:, None, :]
like.shape
like.flatten().shape
from utils import normalize
joint_posterior = joint_prior * like.reshape(-1)
normalize(joint_posterior)
from utils import pmf_marginal
posterior_t = pmf_marginal(joint_posterior, 0)
posterior_t.head(3)
posterior_t.plot()
posterior_lam1 = pmf_marginal(joint_posterior, 1)
posterior_lam2 = pmf_marginal(joint_posterior, 2)
posterior_lam1.plot()
posterior_lam2.plot()
likelihood = joint_prior.copy().unstack().unstack()
likelihood.head()
t = 30
row = likelihood.loc[t].unstack()
row.head()
lams = row.columns
lams.shape
lam_mesh, data_mesh = np.meshgrid(lams, data[:t])
probs = poisson.pmf(data_mesh, lam_mesh)
probs.shape
likelihood1 = probs.prod(axis=0)
likelihood1.shape
lams = row.index
lams.shape
lam_mesh, data_mesh = np.meshgrid(lams, data[t:])
probs = poisson.pmf(data_mesh, lam_mesh)
probs.shape
likelihood2 = probs.prod(axis=0)
likelihood2.shape
likelihood_row = np.multiply.outer(likelihood2, likelihood1)
likelihood_row.shape
likelihood.loc[t] = likelihood_row.flatten()
likelihood.loc[t]
likelihood = joint_prior.copy().unstack().unstack()
likelihood.head()
for t in likelihood.index:
row = likelihood.loc[t].unstack()
lams = row.columns
lam_mesh, data_mesh = np.meshgrid(lams, data[:t])
probs = poisson.pmf(data_mesh, lam_mesh)
likelihood1 = probs.prod(axis=0)
lams = row.index
lam_mesh, data_mesh = np.meshgrid(lams, data[t:])
probs = poisson.pmf(data_mesh, lam_mesh)
likelihood2 = probs.prod(axis=0)
likelihood_row = np.multiply.outer(likelihood2, likelihood1)
likelihood.loc[t] = likelihood_row.flatten()
from utils import normalize
def update(prior, data):
prior: Pmf representing the joint prior
data: sequence f counts
returns: Pmf representing the joint posterior
likelihood = joint_prior.copy().unstack().unstack()
for t in likelihood.index:
row = likelihood.loc[t].unstack()
lams = row.columns
lam_mesh, data_mesh = np.meshgrid(lams, data[:t])
probs = poisson.pmf(data_mesh, lam_mesh)
likelihood1 = probs.prod(axis=0)
lams = row.index
lam_mesh, data_mesh = np.meshgrid(lams, data[t:])
probs = poisson.pmf(data_mesh, lam_mesh)
likelihood2 = probs.prod(axis=0)
likelihood_row = np.multiply.outer(likelihood2, likelihood1)
likelihood.loc[t] = likelihood_row.flatten()
posterior = prior * likelihood.stack().stack()
normalize(posterior)
return posterior
posterior = update(joint_prior, data)
from utils import pmf_marginal
posterior_t = pmf_marginal(posterior, 0)
posterior_t.head(3)
posterior_t.plot()
posterior_lam1 = pmf_marginal(posterior, 1)
posterior_lam2 = pmf_marginal(posterior, 2)
posterior_lam1.plot()
posterior_lam2.plot()
try:
import emcee
except:
!pip install emcee
import emcee
print(emcee.__version__)
try:
import corner
except ImportError:
!pip install corner
try:
import tdqm
except ImportError:
!pip install tdqm
from scipy.stats import poisson
from scipy.stats import gamma
alpha, beta = 3, 1
def log_prior(theta):
t, lam1, lam2 = theta
return gamma.logpdf([lam1, lam2], alpha, beta).sum()
def log_likelihood(theta, data):
t, lam1, lam2 = theta
t = int(t)
k1 = data[:t]
k2 = data[t:]
like1 = poisson.logpmf(k1, lam1).sum()
like2 = poisson.logpmf(k2, lam2).sum()
return like1 + like2
def log_posterior(theta, data):
t, lam1, lam2 = theta
if t < 1 or t >= len(data):
return -np.inf
if lam1 < 0 or lam2 < 0:
return -np.inf
return log_likelihood(theta, data)
ndim = 3 # number of parameters in the model
nwalkers = 50 # number of MCMC walkers
nburn = 500 # "burn-in" period to let chains stabilize
nsteps = 2500 # number of MCMC steps to take
np.random.seed(0)
com = 30, 3, 3
starting_guesses = com + np.random.random((nwalkers, ndim))
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[data])
state = sampler.run_mcmc(starting_guesses, nsteps, progress=True)
flat_samples = sampler.get_chain(discard=100, thin=15, flat=True)
flat_samples.shape
import corner
truths = [30, 4, 2]
labels = ['t', 'lam1', 'lam2']
fig = corner.corner(flat_samples, labels=labels, truths=truths);
stop
import pymc3 as pm
n = len(data)
t = range(n)
alpha = 1.0 / np.mean(data)
import theano.tensor as T
with pm.Model() as model:
tau = pm.DiscreteUniform('tau', lower=0, upper=n)
lam1 = pm.Exponential('lam1', alpha)
lam2 = pm.Exponential('lam2', alpha)
lam = T.switch(t < tau, lam1, lam2)
Y_obs = pm.Poisson('Y_obs', lam, observed=data)
trace = pm.sample(10000, tune=2000)
pm.traceplot(trace);
tau_sample = trace['tau']
cdf_tau = Cdf(tau_sample)
thinkplot.Cdf(cdf_tau)
lam1_sample = trace['lam1']
cdf_lam1 = Cdf(lam1_sample)
thinkplot.Cdf(cdf_lam1)
lam2_sample = trace['lam2']
cdf_lam2 = Cdf(lam2_sample)
thinkplot.Cdf(cdf_lam2)
stop
# !wget https://raw.githubusercontent.com/baltimore-sun-data/2018-shootings-analysis/master/BPD_Part_1_Victim_Based_Crime_Data.csv
df = pd.read_csv('BPD_Part_1_Victim_Based_Crime_Data.csv', parse_dates=[0])
df.head()
df.shape
shootings = df[df.Description.isin(['HOMICIDE', 'SHOOTING']) & (df.Weapon == 'FIREARM')]
shootings.shape
grouped = shootings.groupby('CrimeDate')
counts = grouped['Total Incidents'].sum()
counts.head()
index = pd.date_range(counts.index[0], counts.index[-1])
counts = counts.reindex(index, fill_value=0)
counts.head()
counts.plot()
thinkplot.decorate(xlabel='Date',
ylabel='Number of shootings')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Changepoint Detection
Step2: Likelihood
Step4: Doing it the long way
Step5: Using emcee
Step6: Based on an example from Chapter 1 of Bayesian Methods for Hackers
|
3,491
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from sympy.vector import CoordSys3D
N = CoordSys3D('N')
x1, x2, x3 = symbols("x_1 x_2 x_3")
alpha1, alpha2, alpha3 = symbols("alpha_1 alpha_2 alpha_3")
L, ga, gv, c = symbols("L g_a g_v c")
init_printing()
x = alpha1
y = alpha2
a1=gv *2*pi/L
z = ga * cos(c * alpha1)
r = x*N.i + y*N.j + z*N.k
r
r1 = r.diff(alpha1)
r2 = r.diff(alpha2)
k1 = trigsimp(r1.magnitude())
k2 = trigsimp(r2.magnitude())
r1 = r1/k1
r2 = r2/k2
r1
r2
n = r1.cross(r2)
n = trigsimp(n.normalize())
n
dr1=r1.diff(alpha1)
k1 = trigsimp(r1.cross(dr1).magnitude()/k1**3)
k1
dr2=r2.diff(alpha2)
k2 = trigsimp(r2.cross(dr2).magnitude()/k2**3)
k2
n.diff(alpha1)
r1.diff(alpha1)
R_alpha=r+alpha3*n
R_alpha
R1=R_alpha.diff(alpha1)
R2=R_alpha.diff(alpha2)
R3=R_alpha.diff(alpha3)
R1
R1x = R1.dot(N.i)
simplify(R1x)
R1z = R1.dot(N.k)
simplify(R1z)
R2
R3
eps=trigsimp(R1.dot(R2.cross(R3)))
R_1=simplify(trigsimp(R2.cross(R3)/eps))
R_2=simplify(trigsimp(R3.cross(R1)/eps))
R_3=simplify(trigsimp(R1.cross(R2)/eps))
R_1
R_2
R_3
dx1da1=R1.dot(N.i)
dx1da2=R2.dot(N.i)
dx1da3=R3.dot(N.i)
dx2da1=R1.dot(N.j)
dx2da2=R2.dot(N.j)
dx2da3=R3.dot(N.j)
dx3da1=R1.dot(N.k)
dx3da2=R2.dot(N.k)
dx3da3=R3.dot(N.k)
A=Matrix([[dx1da1, dx1da2, dx1da3], [dx2da1, dx2da2, dx2da3], [dx3da1, dx3da2, dx3da3]])
simplify(A)
A_inv = trigsimp(A**-1)
simplify(trigsimp(A_inv))
trigsimp(A.det())
g11=R1.dot(R1)
g12=R1.dot(R2)
g13=R1.dot(R3)
g21=R2.dot(R1)
g22=R2.dot(R2)
g23=R2.dot(R3)
g31=R3.dot(R1)
g32=R3.dot(R2)
g33=R3.dot(R3)
G=Matrix([[g11, g12, g13],[g21, g22, g23], [g31, g32, g33]])
G=trigsimp(G)
G
simplify(G)
g_11=R_1.dot(R_1)
g_12=R_1.dot(R_2)
g_13=R_1.dot(R_3)
g_21=R_2.dot(R_1)
g_22=R_2.dot(R_2)
g_23=R_2.dot(R_3)
g_31=R_3.dot(R_1)
g_32=R_3.dot(R_2)
g_33=R_3.dot(R_3)
G_con=Matrix([[g_11, g_12, g_13],[g_21, g_22, g_23], [g_31, g_32, g_33]])
G_con=trigsimp(G_con)
G_con
simplify(G_con)
G_inv = G**-1
G_inv
dR1dalpha1 = trigsimp(R1.diff(alpha1))
dR1dalpha1
dR1dalpha2 = trigsimp(R1.diff(alpha2))
dR1dalpha2
dR1dalpha3 = trigsimp(R1.diff(alpha3))
dR1dalpha3
dR2dalpha1 = trigsimp(R2.diff(alpha1))
dR2dalpha1
dR2dalpha2 = trigsimp(R2.diff(alpha2))
dR2dalpha2
dR2dalpha3 = trigsimp(R2.diff(alpha3))
dR2dalpha3
dR3dalpha1 = trigsimp(R3.diff(alpha1))
dR3dalpha1
dR3dalpha2 = trigsimp(R3.diff(alpha2))
dR3dalpha2
dR3dalpha3 = trigsimp(R3.diff(alpha3))
dR3dalpha3
u1=Function('u^1')
u2=Function('u^2')
u3=Function('u^3')
q=Function('q') # q(alpha3) = 1+alpha3/R
K = Symbol('K') # K = 1/R
u1_nabla1 = u1(alpha1, alpha2, alpha3).diff(alpha1) + u3(alpha1, alpha2, alpha3) * K / q(alpha3)
u2_nabla1 = u2(alpha1, alpha2, alpha3).diff(alpha1)
u3_nabla1 = u3(alpha1, alpha2, alpha3).diff(alpha1) - u1(alpha1, alpha2, alpha3) * K * q(alpha3)
u1_nabla2 = u1(alpha1, alpha2, alpha3).diff(alpha2)
u2_nabla2 = u2(alpha1, alpha2, alpha3).diff(alpha2)
u3_nabla2 = u3(alpha1, alpha2, alpha3).diff(alpha2)
u1_nabla3 = u1(alpha1, alpha2, alpha3).diff(alpha3) + u1(alpha1, alpha2, alpha3) * K / q(alpha3)
u2_nabla3 = u2(alpha1, alpha2, alpha3).diff(alpha3)
u3_nabla3 = u3(alpha1, alpha2, alpha3).diff(alpha3)
# $\nabla_2 u^2 = \frac { \partial u^2 } { \partial \alpha_2}$
grad_u = Matrix([[u1_nabla1, u2_nabla1, u3_nabla1],[u1_nabla2, u2_nabla2, u3_nabla2], [u1_nabla3, u2_nabla3, u3_nabla3]])
grad_u
G_s = Matrix([[q(alpha3)**2, 0, 0],[0, 1, 0], [0, 0, 1]])
grad_u_down=grad_u*G_s
expand(simplify(grad_u_down))
B = zeros(9, 12)
B[0,1] = (1+alpha3/R)**2
B[0,8] = (1+alpha3/R)/R
B[1,2] = (1+alpha3/R)**2
B[2,0] = (1+alpha3/R)/R
B[2,3] = (1+alpha3/R)**2
B[3,5] = S(1)
B[4,6] = S(1)
B[5,7] = S(1)
B[6,9] = S(1)
B[6,0] = -(1+alpha3/R)/R
B[7,10] = S(1)
B[8,11] = S(1)
B
E=zeros(6,9)
E[0,0]=1
E[1,4]=1
E[2,8]=1
E[3,1]=1
E[3,3]=1
E[4,2]=1
E[4,6]=1
E[5,5]=1
E[5,7]=1
E
Q=E*B
Q=simplify(Q)
Q
T=zeros(12,6)
T[0,0]=1
T[0,2]=alpha3
T[1,1]=1
T[1,3]=alpha3
T[3,2]=1
T[8,4]=1
T[9,5]=1
T
Q=E*B*T
Q=simplify(Q)
Q
from sympy import MutableDenseNDimArray
C_x = MutableDenseNDimArray.zeros(3, 3, 3, 3)
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1)
el = Symbol(elem_index)
C_x[i,j,k,l] = el
C_x
C_x_symmetry = MutableDenseNDimArray.zeros(3, 3, 3, 3)
def getCIndecies(index):
if (index == 0):
return 0, 0
elif (index == 1):
return 1, 1
elif (index == 2):
return 2, 2
elif (index == 3):
return 0, 1
elif (index == 4):
return 0, 2
elif (index == 5):
return 1, 2
for s in range(6):
for t in range(s, 6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
elem_index = 'C^{{{}{}{}{}}}'.format(i+1, j+1, k+1, l+1)
el = Symbol(elem_index)
C_x_symmetry[i,j,k,l] = el
C_x_symmetry[i,j,l,k] = el
C_x_symmetry[j,i,k,l] = el
C_x_symmetry[j,i,l,k] = el
C_x_symmetry[k,l,i,j] = el
C_x_symmetry[k,l,j,i] = el
C_x_symmetry[l,k,i,j] = el
C_x_symmetry[l,k,j,i] = el
C_x_symmetry
C_isotropic = MutableDenseNDimArray.zeros(3, 3, 3, 3)
C_isotropic_matrix = zeros(6)
mu = Symbol('mu')
la = Symbol('lambda')
for s in range(6):
for t in range(s, 6):
if (s < 3 and t < 3):
if(t != s):
C_isotropic_matrix[s,t] = la
C_isotropic_matrix[t,s] = la
else:
C_isotropic_matrix[s,t] = 2*mu+la
C_isotropic_matrix[t,s] = 2*mu+la
elif (s == t):
C_isotropic_matrix[s,t] = mu
C_isotropic_matrix[t,s] = mu
for s in range(6):
for t in range(s, 6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
el = C_isotropic_matrix[s, t]
C_isotropic[i,j,k,l] = el
C_isotropic[i,j,l,k] = el
C_isotropic[j,i,k,l] = el
C_isotropic[j,i,l,k] = el
C_isotropic[k,l,i,j] = el
C_isotropic[k,l,j,i] = el
C_isotropic[l,k,i,j] = el
C_isotropic[l,k,j,i] = el
C_isotropic
def getCalpha(C, A, q, p, s, t):
res = S(0)
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
res += C[i,j,k,l]*A[q,i]*A[p,j]*A[s,k]*A[t,l]
return simplify(trigsimp(res))
C_isotropic_alpha = MutableDenseNDimArray.zeros(3, 3, 3, 3)
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
c = getCalpha(C_isotropic, A_inv, i, j, k, l)
C_isotropic_alpha[i,j,k,l] = c
C_isotropic_alpha[0,0,0,0]
C_isotropic_matrix_alpha = zeros(6)
for s in range(6):
for t in range(6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
C_isotropic_matrix_alpha[s,t] = C_isotropic_alpha[i,j,k,l]
C_isotropic_matrix_alpha
C_orthotropic = MutableDenseNDimArray.zeros(3, 3, 3, 3)
C_orthotropic_matrix = zeros(6)
for s in range(6):
for t in range(s, 6):
elem_index = 'C^{{{}{}}}'.format(s+1, t+1)
el = Symbol(elem_index)
if ((s < 3 and t < 3) or t == s):
C_orthotropic_matrix[s,t] = el
C_orthotropic_matrix[t,s] = el
for s in range(6):
for t in range(s, 6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
el = C_orthotropic_matrix[s, t]
C_orthotropic[i,j,k,l] = el
C_orthotropic[i,j,l,k] = el
C_orthotropic[j,i,k,l] = el
C_orthotropic[j,i,l,k] = el
C_orthotropic[k,l,i,j] = el
C_orthotropic[k,l,j,i] = el
C_orthotropic[l,k,i,j] = el
C_orthotropic[l,k,j,i] = el
C_orthotropic
def getCalpha(C, A, q, p, s, t):
res = S(0)
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
res += C[i,j,k,l]*A[q,i]*A[p,j]*A[s,k]*A[t,l]
return simplify(trigsimp(res))
C_orthotropic_alpha = MutableDenseNDimArray.zeros(3, 3, 3, 3)
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
c = getCalpha(C_orthotropic, A_inv, i, j, k, l)
C_orthotropic_alpha[i,j,k,l] = c
C_orthotropic_alpha[0,0,0,0]
C_orthotropic_matrix_alpha = zeros(6)
for s in range(6):
for t in range(6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
C_orthotropic_matrix_alpha[s,t] = C_orthotropic_alpha[i,j,k,l]
C_orthotropic_matrix_alpha
P=eye(12,12)
P[0,0]=1/(1+alpha3/R)
P[1,1]=1/(1+alpha3/R)
P[2,2]=1/(1+alpha3/R)
P[3,0]=-1/(R*(1+alpha3/R)**2)
P[3,3]=1/(1+alpha3/R)
P
Def=simplify(E*B*P)
Def
rows, cols = Def.shape
D_p=zeros(rows, cols)
q = 1+alpha3/R
for i in range(rows):
ratio = 1
if (i==0):
ratio = q*q
elif (i==3 or i == 4):
ratio = q
for j in range(cols):
D_p[i,j] = Def[i,j] / ratio
D_p = simplify(D_p)
D_p
C_isotropic_alpha_p = MutableDenseNDimArray.zeros(3, 3, 3, 3)
q=1+alpha3/R
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
fact = 1
if (i==0):
fact = fact*q
if (j==0):
fact = fact*q
if (k==0):
fact = fact*q
if (l==0):
fact = fact*q
C_isotropic_alpha_p[i,j,k,l] = simplify(C_isotropic_alpha[i,j,k,l]*fact)
C_isotropic_matrix_alpha_p = zeros(6)
for s in range(6):
for t in range(6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
C_isotropic_matrix_alpha_p[s,t] = C_isotropic_alpha_p[i,j,k,l]
C_isotropic_matrix_alpha_p
C_orthotropic_alpha_p = MutableDenseNDimArray.zeros(3, 3, 3, 3)
q=1+alpha3/R
for i in range(3):
for j in range(3):
for k in range(3):
for l in range(3):
fact = 1
if (i==0):
fact = fact*q
if (j==0):
fact = fact*q
if (k==0):
fact = fact*q
if (l==0):
fact = fact*q
C_orthotropic_alpha_p[i,j,k,l] = simplify(C_orthotropic_alpha[i,j,k,l]*fact)
C_orthotropic_matrix_alpha_p = zeros(6)
for s in range(6):
for t in range(6):
i,j = getCIndecies(s)
k,l = getCIndecies(t)
C_orthotropic_matrix_alpha_p[s,t] = C_orthotropic_alpha_p[i,j,k,l]
C_orthotropic_matrix_alpha_p
D_p_T = D_p*T
K = Symbol('K')
D_p_T = D_p_T.subs(R, 1/K)
simplify(D_p_T)
theta, h1, h2=symbols('theta h_1 h_2')
square_geom=theta/2*(R+h2)**2-theta/2*(R+h1)**2
expand(simplify(square_geom))
square_int=integrate(integrate(1+alpha3/R, (alpha3, h1, h2)), (alpha1, 0, theta*R))
expand(simplify(square_int))
simplify(D_p.T*C_isotropic_matrix_alpha_p*D_p)
W = simplify(D_p_T.T*C_isotropic_matrix_alpha_p*D_p_T*(1+alpha3*K)**2)
W
h=Symbol('h')
E=Symbol('E')
v=Symbol('nu')
W_a3 = integrate(W, (alpha3, -h/2, h/2))
W_a3 = simplify(W_a3)
W_a3.subs(la, E*v/((1+v)*(1-2*v))).subs(mu, E/((1+v)*2))
A_M = zeros(3)
A_M[0,0] = E*h/(1-v**2)
A_M[1,1] = 5*E*h/(12*(1+v))
A_M[2,2] = E*h**3/(12*(1-v**2))
Q_M = zeros(3,6)
Q_M[0,1] = 1
Q_M[0,4] = K
Q_M[1,0] = -K
Q_M[1,2] = 1
Q_M[1,5] = 1
Q_M[2,3] = 1
W_M=Q_M.T*A_M*Q_M
W_M
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Coordinates
Step2: Mid-surface coordinates is defined with the following vector $\vec{r}=\vec{r}(\alpha_1, \alpha_2)$
Step3: Tangent to curve
Step4: Normal to curve
Step5: Curvature
Step6: Derivative of base vectors
Step7: $ \frac { d\vec{n} } { d\alpha_1} = -\frac {1}{R} \vec{v} = -k \vec{v} $
Step8: $ \frac { d\vec{v} } { d\alpha_1} = \frac {1}{R} \vec{n} = k \vec{n} $
Step9: Base Vectors $\vec{R}^1, \vec{R}^2, \vec{R}^3$
Step10: Jacobi matrix
Step11: Metric tensor
Step12: ${\displaystyle \hat{G}=\sum_{i,j} g_{ij}\vec{R}^i\vec{R}^j}$
Step13: Derivatives of vectors
Step14: $ \frac { d\vec{R_1} } { d\alpha_1} = -\frac {1}{R} \left( 1+\frac{\alpha_3}{R} \right) \vec{R_3} $
Step15: $ \frac { d\vec{R_1} } { d\alpha_3} = \frac {1}{R} \frac {1}{1+\frac{\alpha_3}{R}} \vec{R_1} $
Step16: $ \frac { d\vec{R_3} } { d\alpha_1} = \frac {1}{R} \frac {1}{1+\frac{\alpha_3}{R}} \vec{R_1} $
Step17: $ \frac { d\vec{R_3} } { d\alpha_3} = \vec{0} $
Step18: $
Step19: Deformations tensor
Step20: Tymoshenko theory
Step21: Elasticity tensor(stiffness tensor)
Step22: Include symmetry
Step23: Isotropic material
Step24: Orthotropic material
Step25: Orthotropic material in shell coordinates
Step26: Physical coordinates
Step27: Stiffness tensor
Step28: Tymoshenko
Step29: Square of segment
Step30: ${\displaystyle A=\int_{0}^{L}\int_{h_1}^{h_2} \left( 1+\frac{\alpha_3}{R} \right) d \alpha_1 d \alpha_3}, L=R \theta$
Step31: Virtual work
Step32: Isotropic material physical coordinates - Tymoshenko
|
3,492
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.io as sio
import math
n=100 # number of iterations
#initial
B=0.15 # Model Noise
R=0.02 # Observation Noise
A = 0.5 # Model Matrix
# creation of numpy arrays for variables
z= np.zeros(n)
m= np.zeros(n)
y= np.zeros(n)
C= np.zeros(n)
K= np.zeros(n)
# initial values for covariance and mean of the posterior
C[0]=0.4
m[0]=0
# initial values of model (truth) and observation data
z[0]=np.random.normal(loc=0.0, scale=0.4, size=None)
y[0]=z[0] + np.random.normal(loc=0.0, scale=R, size=None)
# iteration through kalman filter
for i in range(0,n-1,1):
z[i+1]=A*z[i]+np.random.normal(loc=0.0, scale=B, size=None) # evolution of model
y[i+1]=z[i+1] + np.random.normal(loc=0.0, scale=R, size=None) # evolution of observ.
#forecast formulas
m[i+1]=A*m[i]
C[i+1]=A*A*C[i]+B
K[i+1]=C[i+1]/(R+C[i+1]) # kalman gain
# analysis formulas
m[i+1]=m[i+1]-K[i+1]*(m[i+1]-y[i+1])
C[i+1]=C[i+1]-K[i+1]*C[i+1]
print m[-1]
print C[-1]
print np.mean(z)
print np.cov(z)
# plot initial cond.
mu = 0
variance = 0.4
sigma = math.sqrt(variance)
x = np.linspace(mu-3*sigma,mu+3*sigma, 100)
plt.plot(x,mlab.normpdf(x, mu, sigma),"b", label="initial")
# plot posterior
mu_1 = m[-1]
variance_1 = C[-1]
sigma_1 = math.sqrt(variance_1)
x_1 = np.linspace(mu_1-3*sigma_1,mu_1+3*sigma_1, 100)
plt.plot(x_1,mlab.normpdf(x_1, mu_1, sigma_1),"r--", label="posterior")
plt.title("B= "+ str(B)+ ", R= "+ str(R) + "\n"+" number of steps= " + str(n))
plt.legend()
plt.grid()
plt.show()
plt.plot(range(0,n), m, "r--", label= "posterior est.")
plt.plot(range(0,n), z, "b--", label= "model (truth)")
plt.plot(range(0,n), y, "g--", label="Observations")
plt.grid()
plt.legend()
plt.show()
mat_contents = sio.loadmat('octave_a.mat')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lorenz equations
|
3,493
|
<ASSISTANT_TASK:>
Python Code:
# Importa la librería financiera.
# Solo es necesario ejecutar la importación una sola vez.
import cashflows as cf
import cashflows as cf
##
## Se tienen cuatro fuentes de capital con diferentes costos
## sus datos se almacenarar en las siguientes listas:
##
monto = [0] * 4
interes = [0] * 4
## emision de acciones
## --------------------------------------
monto[0] = 4000
interes[0] = 25.0 / 1.0 # tasa de descueto de la acción
## préstamo 1.
## -------------------------------------------------------
##
nrate = cf.interest_rate(const_value=[20]*5, start=2018)
credito1 = cf.fixed_ppal_loan(amount = 2000, # monto
nrate = nrate, # tasa de interés
orgpoints = 50/2000) # costos de originación
credito1
## flujo de caja para el crédito antes de impuestos
tax_rate = cf.interest_rate(const_value=[30]*5, start=2018)
credito1.tocashflow(tax_rate=tax_rate)
## la tasa efectiva pagada por el crédito es
## aquella que hace el valor presente cero para
## el flujo de caja anterior (antes o después de
## impuestos)
credito1.true_rate(tax_rate = tax_rate)
## se almacenan los datos para este credito
monto[1] = 2000
interes[1] = credito1.true_rate(tax_rate = tax_rate)
## préstamo 2.
## -------------------------------------------------------
##
credito2 = cf.fixed_rate_loan(amount = 1000, # monto
nrate = 20, # tasa de interés
start = 2018,
grace = 0,
life = 4, # número de cuotas
dispoints = 0.24) # costos de constitución
credito2
credito2.tocashflow(tax_rate = tax_rate)
credito2.true_rate(tax_rate = tax_rate)
## se almacenan los datos para este credito
monto[2] = 1000
interes[2] = credito2.true_rate(tax_rate = tax_rate)
## préstamo 3.
## -------------------------------------------------------
##
nrate = cf.interest_rate(const_value=[7]*5, start=2018)
credito3 = cf.bullet_loan(amount = 5000, # monto
nrate = nrate, # tasa de interés
orgpoints = 0.01, # costos de originación
dispoints = 0.20) # puntos de descuento
credito3
credito3.tocashflow(tax_rate=tax_rate)
credito3.true_rate(tax_rate=tax_rate)
## se almacenan los datos de este crédito
monto[3] = 5000
interes[3] = credito3.true_rate(tax_rate=tax_rate)
## montos
monto
## tasas
interes
## Costo ponderado del capital (WACC)
## -------------------------------------------------------------
## es el promdio ponderado de las tasas por
## el porcentaje de capital correspondiente a cada fuente
##
s = sum(monto) # capital total
wacc = sum([x*r/s for x, r in zip(monto, interes)])
wacc
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problema del costo de capital
Step2: En la modelación de créditos con cashflow se consideran dos tipos de costos
|
3,494
|
<ASSISTANT_TASK:>
Python Code:
from collections import OrderedDict
#JSON to store all the informacion.
jsonsOccupants = []
#Number of occupants
N = 3
#Definition of the states
states = OrderedDict([('Leaving','out'), ('Resting', 'sofa'), ('Working in my laboratory', 'wp')])
#Definition of the schedule
schedule = {'t1': "08:01:00", 't2': "08:10:00", 't3': "08:20:00"}
#Possible Variation on the schedule
variation = {'t1': "00:01:00", 't2': "00:01:00", 't3': "00:01:00"}
#Probability of state change associated with the Markovian chain as a function of the temporal period
markovActivity = {
'-t1': [[100, 0, 0], [0, 0, 0], [0, 0, 0]],
't1-t2': [[0, 0, 100], [0, 50, 50], [0, 50, 50]],
't2-t3': [[100, 0, 0], [0, 50, 50], [0, 50, 50]],
't3-': [[0, 0, 100], [100, 0, 0], [0, 0, 100]]
}
#Time associated to each state (minutes)
timeActivity = {
'-t1': [3, 0, 0], 't1-t2': [3, 3, 3], 't2-t3': [3, 3, 3], 't3-': [3, 3, 3]
}
#Time variation associated to each state (minutes)
timeActivityVariation = {
'-t1': [1, 0, 0], 't1-t2': [1, 1, 1], 't2-t3': [1, 1, 1], 't3-': [1, 1, 1]
}
#Store the information
jsonOccupant = {'type': 'example' , 'N': N, 'states': states , 'schedule': schedule, 'variation': variation,
'markovActivity': markovActivity, 'timeActivity': timeActivity, "timeActivityVariation": timeActivityVariation}
jsonsOccupants.append(jsonOccupant)
import soba.visualization.ramen.mapGenerator as ramen
with open('labgsi.blueprint3d') as data_file:
jsonMap = ramen.returnMap(data_file)
from soba.models.continuousModel import ContinuousModel
from time import time
class ModelExample(ContinuousModel):
def __init__(self, width, height, jsonMap, jsonsOccupants, seed = int(time())):
super().__init__(width, height, jsonMap, jsonsOccupants, seed = seed, timeByStep = 60)
self.createOccupants(jsonsOccupants)
def step(self):
if self.clock.clock.hour > 17:
self.finishSimulation = True
super().step()
import soba.run
import sys
from optparse import OptionParser
parameters = {'width': 40, 'height': 40, 'jsonMap': jsonMap, 'jsonsOccupants': jsonsOccupants}
sys.argv = []
sys.argv.append("-1")
sys.argv.append("-v")
soba.run.run(ModelExample, parameters, visualJS="example.js")
import soba.run
import sys
#Fixed parameters during iterations
fixed_params = {"width": 40, "height": 40, "jsonMap": jsonMap, "jsonsOccupants": jsonsOccupants}
#Variable parameters to each iteration
variable_params = {"seed": range(10, 500, 10)}
sys.argv = []
sys.argv.append("-1")
sys.argv.append("-b")
soba.run.run(ModelExample, fixed_params, variable_params)
from collections import OrderedDict
#JSON to store all the informacion.
jsonsOccupants = []
#Number of occupants
N = 3
#Definition of the states
states = OrderedDict([('out','Pos1'), ('Working in my laboratory', {'Pos2': 1, 'Pos3': 2})])
#Definition of the schedule
schedule = {'t1': "08:01:00", 't2': "08:10:00", 't3': "08:20:00"}
#Possible Variation on the schedule
variation = {'t1': "00:01:00", 't2': "00:01:00", 't3': "00:01:00"}
#Probability of state change associated with the Markovian chain as a function of the temporal period
markovActivity = {
'-t1': [[100, 0, 0], [0, 0, 0], [0, 0, 0]],
't1-t2': [[0, 0, 100], [0, 50, 50], [0, 50, 50]],
't2-t3': [[100, 0, 0], [0, 50, 50], [0, 50, 50]],
't3-': [[0, 0, 100], [0, 100, 0], [0, 100, 0]]
}
#Time associated to each state (minutes)
timeActivity = {
'-t1': [3, 0, 0], 't1-t2': [3, 3, 3], 't2-t3': [3, 3, 3], 't3-': [3, 3, 3]
}
#Time variation associated to each state (minutes)
timeActivityVariation = {
'-t1': [1, 0, 0], 't1-t2': [1, 1, 1], 't2-t3': [1, 1, 1], 't3-': [1, 1, 1]
}
#Store the information
jsonOccupant = {'type': 'example' , 'N': N, 'states': states , 'schedule': schedule, 'variation': variation,
'markovActivity': markovActivity, 'timeActivity': timeActivity}
jsonsOccupants.append(jsonOccupant)
jsonMap = {
'Pos1': {'entrance':'', 'conectedTo': {'U':'Pos2'}, 'measures': {'dx':2, 'dy':2}},
'Pos2': {'measures': {'dx':3, 'dy':3.5}, 'conectedTo': {'R':'Pos3'}},
'Pos3': {'measures': {'dx':3, 'dy':3.5}}
}
from soba.models.roomsModel import RoomsModel
import datetime as dt
class ModelExample(RoomsModel):
def __init__(self, width, height, jsonMap, jsonsOccupants, seed = int(time())):
super().__init__(width, height, jsonMap, jsonsOccupants, seed = seed)
def step(self):
if self.clock.clock.day > 3:
self.finishSimulation = True
super().step()
import soba.run
import sys
cellW = 4
cellH = 4
sys.argv = []
sys.argv.append("-1")
sys.argv.append("-v")
parameters = {'width': cellW, 'height': cellH, 'jsonMap': jsonMap, 'jsonsOccupants': jsonsOccupants}
soba.run.run(ModelExample, parameters, visualJS="example.js")
#Fixed parameters during iterations
fixed_params = {"width": cellW, "height": cellH, "jsonMap": jsonMap, "jsonsOccupants": jsonsOccupants}
#Variable parameters to each iteration
variable_params = {"seed": range(10, 500, 10)}
sys.argv = []
sys.argv.append("-1")
sys.argv.append("-b")
soba.run.run(ModelExample, fixed_params, variable_params)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2.- We define the building plan or the distribution of the space.
Step2: 3.- We implement a Model inheriting a base class of SOBA.
Step3: 4.- We call the execution methods.
Step4: 4.1- Bacth mode.
Step5: Implementing a sample model with simplified space
Step6: 2.- We define the building plan or the distribution of the space.
Step7: 3.- We implement a Model inheriting a base class of SOBA.
Step8: 4.- We call the execution methods.
Step9: 4.1- Bacth mode.
|
3,495
|
<ASSISTANT_TASK:>
Python Code:
from IPython.display import HTML
input_form =
<div style="background-color:gainsboro; border:solid black; width:300px; padding:20px;">
Variable Name: <input type="text" id="var_name" value="foo"><br>
Variable Value: <input type="text" id="var_value" value="bar"><br>
<button onclick="set_value()">Set Value</button>
</div>
javascript =
<script type="text/Javascript">
function set_value(){
var var_name = document.getElementById('var_name').value;
var var_value = document.getElementById('var_value').value;
var command = var_name + " = '" + var_value + "'";
console.log("Executing Command: " + command);
var kernel = IPython.notebook.kernel;
kernel.execute(command);
}
</script>
HTML(input_form + javascript)
print(foo)
from math import pi, sin
# Add an input form similar to what we saw above
input_form =
<div style="background-color:gainsboro; border:solid black; width:600px; padding:20px;">
Code: <input type="text" id="code_input" size="50" height="2" value="sin(pi / 2)"><br>
Result: <input type="text" id="result_output" size="50" value="1.0"><br>
<button onclick="exec_code()">Execute</button>
</div>
# here the javascript has a function to execute the code
# within the input box, and a callback to handle the output.
javascript =
<script type="text/Javascript">
function handle_output(out_type, out){
console.log(out_type);
console.log(out);
var res = null;
// if output is a print statement
if(out_type == "stream"){
res = out.data;
}
// if output is a python object
else if(out_type === "pyout"){
res = out.data["text/plain"];
}
// if output is a python error
else if(out_type == "pyerr"){
res = out.ename + ": " + out.evalue;
}
// if output is something we haven't thought of
else{
res = "[out type not implemented]";
}
document.getElementById("result_output").value = res;
}
function exec_code(){
var code_input = document.getElementById('code_input').value;
var kernel = IPython.notebook.kernel;
var callbacks = {'output' : handle_output};
document.getElementById("result_output").value = ""; // clear output box
var msg_id = kernel.execute(code_input, callbacks, {silent:false});
console.log("button pressed");
}
</script>
HTML(input_form + javascript)
%pylab inline
from IPython.display import HTML
from cStringIO import StringIO
# We'll use HTML to create a control panel with an
# empty image and a number of navigation buttons.
disp_html =
<div class="animation" align="center">
<img id="anim_frame" src=""><br>
<button onclick="prevFrame()">Prev Frame</button>
<button onclick="reverse()">Reverse</button>
<button onclick="pause()">Pause</button>
<button onclick="play()">Play</button>
<button onclick="nextFrame()">Next Frame</button>
</div>
# now the javascript to drive it. The nextFrame() and prevFrame()
# functions will call the kernel and pull-down the frame which
# is generated. The play() and reverse() functions use timeouts
# to repeatedly call nextFrame() and prevFrame().
javascript =
<script type="text/Javascript">
var count = -1; // keep track of frame number
var animating = 0; // keep track of animation direction
var timer = null;
var kernel = IPython.notebook.kernel;
function output(out_type, out){
data = out.data["text/plain"];
document.getElementById("anim_frame").src = data.substring(1, data.length - 1);
if(animating > 0){
timer = setTimeout(nextFrame, 0);
}
else if(animating < 0){
timer = setTimeout(prevFrame, 0);
}
}
var callbacks = {'output' : output};
function pause(){
animating = 0;
if(timer){
clearInterval(timer);
timer = null;
}
}
function play(){
pause();
animating = +1;
nextFrame();
}
function reverse(){
pause();
animating = -1;
prevFrame();
}
function nextFrame(){
count += 1;
var msg_id = kernel.execute("disp._get_frame_data(" + count + ")", callbacks, {silent:false});
}
function prevFrame(){
count -= 1;
var msg_id = kernel.execute("disp._get_frame_data(" + count + ")", callbacks, {silent:false});
}
// display the first frame
setTimeout(nextFrame, 0);
</script>
# Here we create a class whose HTML representation is the above
# HTML and javascript. Note that we've hard-coded the global
# variable name `disp` in the Javascript, so you'll have to assign
# the resulting object to this name in order to view it.
class DisplayAnimation(object):
def __init__(self, anim):
self.anim = anim
self.fig = anim._fig
plt.close(self.fig)
def _get_frame_data(self, i):
anim._draw_frame(i)
buffer = StringIO()
fig.savefig(buffer, format='png')
buffer.reset()
data = buffer.read().encode('base64')
return "data:image/png;base64,{0}".format(data.replace('\n', ''))
def _repr_html_(self):
return disp_html + javascript
from matplotlib import animation
fig = plt.figure()
ax = plt.axes(xlim=(0, 10), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
def init():
line.set_data([], [])
return line,
def animate(i):
x = np.linspace(0, 10, 1000)
y = np.cos(i * 0.02 * np.pi) * np.sin(x - i * 0.02 * np.pi)
line.set_data(x, y)
return line,
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=30)
# For now, we need to name this `disp` for it to work
disp = DisplayAnimation(anim)
disp
def save_to_mem(fig):
buffer = StringIO()
fig.savefig(buffer, format='png')
buffer.reset()
data = buffer.read().encode('base64')
return "data:image/png;base64,{0}".format(data.replace('\n', ''))
fig, ax = plt.subplots()
ax.plot(rand(200))
%timeit save_to_mem(fig)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: IPython Notebook
Step3: After pressing <button>Set Value</button> above
Step4: Examining the code, we see that
Step7: And then we'll manipulate this value via Javascript
Step10: Pressing <button>Execute</button> above will call kernel.execute
Step11: This code should be considered a proof-of-concept
Step12: Once again, if you're viewing this statically, you'll see nothing above
|
3,496
|
<ASSISTANT_TASK:>
Python Code:
import os
import sys
sys.path.insert(0, "../..")
import importlib
import numpy as np
import pandas as pd
import yellowbrick
import yellowbrick as yb
from yellowbrick.features.importances import FeatureImportances
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import manifold, datasets
from sklearn.linear_model import LogisticRegression, LinearRegression
mpl.rcParams["figure.figsize"] = (9,6)
X_iris, y_iris = datasets.load_iris(True)
X_iris_pd = pd.DataFrame(X_iris, columns=['f1', 'f2', 'f3', 'f4'])
viz = FeatureImportances(LogisticRegression())
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), relative=False)
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), absolute=True)
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), relative=False, absolute=True)
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), stack=True)
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), stack=True, relative=False)
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), stack=True, absolute=True)
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LogisticRegression(), stack=True, relative=False, absolute=True)
viz.fit(X_iris, y_iris)
viz.show()
X_digits, y_digits = datasets.load_digits(return_X_y=True)
viz = FeatureImportances(LogisticRegression(), stack=True, relative=True)
viz.fit(X_digits, y_digits)
viz.show()
viz = FeatureImportances(LinearRegression())
viz.fit(X_iris, y_iris)
viz.show()
viz = FeatureImportances(LinearRegression(), stack=True)
viz.fit(X_iris, y_iris)
viz.show()
importlib.reload(yellowbrick.features.importances)
from yellowbrick.features.importances import FeatureImportances
viz = FeatureImportances(LogisticRegression(), relative=False, absolute=False, stack=True)
viz.fit(X_pd, y)
viz.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Iris Datasets for Example Code
Step2: Logistic Regression with Mean of Feature Importances
Step3: Logistic Regression with Stacked Feature Importances
Step4: Load Digits Datasets for Example Code
Step5: Linear Regression
Step6: Playground
|
3,497
|
<ASSISTANT_TASK:>
Python Code:
class vector_math:
'''
This is the base class for vector math - which allows for initialization with two vectors.
'''
def __init__(self, vectors = [[1,2,2],[3,4,3]]):
self.vect1 = vectors[0]
self.vect2 = vectors[1]
def set_vects(self, vectors):
self.vect1 = vectors[0]
self.vect2 = vectors[1]
def sum_vects(self):
return [x + y for x, y in zip(self.vect1, self.vect2)]
def sub_vects(self):
# default should be [-2,-2,-1]
return [x - y for x, y in zip(self.vect1, self.vect2)]
# Can expand out to for x, y in zip: ... to show what it and sum do
def multi_vects(self):
#default should be [3,8,6]
return [x * y for x, y in zip(self.vect1, self.vect2)]
def multi_scalar(self, scalar, vect):
return [e * scalar for e in vect]
# Show difference between just element * number and using tuple from zip()
def multi_scalar_l(self, scalar, vect):
return lambda e: e * scalar, vect
def mean_vects(self):
mean_vect = self.sum_vects()
return self.multi_scalar(1/len(mean_vect), mean_vect)
def dot_product(self):
return sum(self.multi_vects())
vect = vector_math()
sum_vect = vect.sum_vects()
print("Sum of vectors = {}".format(sum_vect))
print("Subtraction of vectors = {}".format(vect.sub_vects()))
print("Product of vectors = {}".format(vect.multi_vects()))
print("Product of Sum of vectors and 2 = {}\n".format(vect.multi_scalar(2, sum_vect)))
# Yep can still use character returns and others in format
print("Average of vectors = {}".format(["{:.2f}".format(e) for e in vect.mean_vects()]))
# Now there are other ways to reduce the decimal places but this was just to show a nested format call
# TODO: Consider adding timeit to show difference between calling multi_scalar directly and calling mean_vect:
#print("Average of vectors through calling scalar = {}".format(
# ["{:.2f}".format(e) for e in vect.multi_scalar(1/len(sum_vect), sum_vect)]))
print("The Dot Product is {}".format(vect.dot_product()))
from math import sqrt
# Using the vect variables showing without functions
sum_of_squares = sum([x * y for x, y in zip(vect.vect1, vect.vect1)])
magnitude = sqrt(sum_of_squares)
distance = sqrt(sum([(x - y) ** 2 for x, y in zip(vect.vect1, vect.vect2)]))
print("Sum of Squares is {}".format(sum_of_squares))
print("Magnitude is {:.2f}".format(magnitude))
print("Distance is {}".format(distance))
import dis
import time
# For instruction - shows disassemble of methods and performs quick time check
vect = [2,3,3,3,4,5,6,6,4,3,2,1,3,4,5,6,4,3,2,1,3,4,5,6,4,3,2]
t1 = time.time()
print("list comp")
dis.dis(compile("[e * 2 for e in vect]", '<stdin>', 'exec'))
d_l = time.time() - t1
print(d_l)
t2 = time.time()
print("\n\n\nlambda")
dis.dis(compile("lambda e: e * 2, vect", '<stdin>', 'exec'))
d_lam = time.time() - t2
print(d_lam)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Other vector operations that could be done
Step2: List Comprehensions are Powerful tools in Python
|
3,498
|
<ASSISTANT_TASK:>
Python Code:
user = #type CMEMS user name within colons
password = #type CMEMS password within colons
product_name = 'INSITU_BAL_NRT_OBSERVATIONS_013_032' #type aimed CMEMS in situ product
distribution_unit = 'cmems.smhi.se' #type aimed hosting institution
index_file = 'index_latest.txt' #type aimed index file name
import ftplib
ftp=ftplib.FTP(distribution_unit,user,password)
ftp.cwd("Core")
ftp.cwd(product_name)
remote_filename= index_file
local_filename = remote_filename
local_file = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + remote_filename, local_file.write)
local_file.close()
ftp.quit()
#ready when 221 Goodbye.!
import numpy as np
import pandas as pd
from random import randint
index = np.genfromtxt(index_file, skip_header=6, unpack=False, delimiter=',', dtype=None,
names=['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters'])
dataset = randint(0,len(index)) #ramdom line of the index file
values = [index[dataset]['catalog_id'], '<a href='+index[dataset]['file_name']+'>'+index[dataset]['file_name']+'</a>', index[dataset]['geospatial_lat_min'], index[dataset]['geospatial_lat_max'],
index[dataset]['geospatial_lon_min'], index[dataset]['geospatial_lon_max'], index[dataset]['time_coverage_start'],
index[dataset]['time_coverage_end'], index[dataset]['provider'], index[dataset]['date_update'], index[dataset]['data_mode'],
index[dataset]['parameters']]
headers = ['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters']
df = pd.DataFrame(values, index=headers, columns=[dataset])
df.style
from shapely.geometry import box, multipoint
import shapely
targeted_geospatial_lat_min = 55.0 # enter min latitude of your bounding box
targeted_geospatial_lat_max = 70.0 # enter max latitude of your bounding box
targeted_geospatial_lon_min = 12.0 # enter min longitude of your bounding box
targeted_geospatial_lon_max = 26.00 # enter max longitude of your bounding box
targeted_bounding_box = box(targeted_geospatial_lon_min, targeted_geospatial_lat_min, targeted_geospatial_lon_max, targeted_geospatial_lat_max)
selected_netCDFs = [];
for netCDF in index:
file_name = netCDF['file_name']
geospatial_lat_min = float(netCDF['geospatial_lat_min'])
geospatial_lat_max = float(netCDF['geospatial_lat_max'])
geospatial_lon_min = float(netCDF['geospatial_lon_min'])
geospatial_lon_max = float(netCDF['geospatial_lon_max'])
bounding_box = shapely.geometry.box(geospatial_lon_min, geospatial_lat_min, geospatial_lon_max, geospatial_lat_max)
bounding_box_centroid = bounding_box.centroid
if (targeted_bounding_box.contains(bounding_box_centroid)):
selected_netCDFs.append(file_name)
print("total: " +str(len(selected_netCDFs)))
for nc in selected_netCDFs:
last_idx_slash = nc.rfind('/')
ncdf_file_name = nc[last_idx_slash+1:]
folders = nc.split('/')[3:len(nc.split('/'))-1]
host = nc.split('/')[2] #or distribution unit
ftp=ftplib.FTP(host,user,password)
for folder in folders:
ftp.cwd(folder)
local_file = open(ncdf_file_name, 'wb')
ftp.retrbinary('RETR '+ncdf_file_name, local_file.write)
local_file.close()
ftp.quit()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <h3>DOWNLOAD</h3>
Step2: <h3>QUICK VIEW</h3>
Step3: <h3>FILTERING CRITERIA</h3>
Step4: Regarding the above glimpse, it is posible to filter by 12 criteria. As example we will setup next a filter to only download those files that contains data within a defined boundingbox.
Step5: 2. netCDF filtering/selection
Step6: <h3> SELECTION DOWNLOAD </h3>
|
3,499
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from sklearn.svm import SVR
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split
# input data reading
df = pd.read_csv("winequality-white.csv", sep=";")
df.head()
# independent variables and dependent variable
x = df.loc[:, : "alcohol"]
y = df["quality"]
# features have to be scaled before model learning
scaler = StandardScaler()
x = scaler.fit_transform(x)
# spliting data to train and test parts
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=1)
# create and fit LASSO and SVR models
lasso = Lasso(alpha=0.001)
svr = SVR(C=8, epsilon=0.2, gamma=0.5)
lasso.fit(x_train, y_train)
svr.fit(x_train, y_train)
# make prediction on test data and rounding results
y_pred_lasso = np.round(np.clip(lasso.predict(x_test), 1, 10)).astype(int)
y_pred_svr = np.round(np.clip(svr.predict(x_test), 1, 10)).astype(int)
np.round(1 - mean_squared_error(y_test, y_pred_lasso) / y_test.std(), 2)
np.round(1 - mean_squared_error(y_test, y_pred_svr) / y_test.std(), 2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Regression sample
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.