text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1:
Step2: Now, we can create an
Step3: Epochs behave similarly to
Step4: You can select subsets of epochs by indexing the
Step5: It is also possible to iterate through
Step6: You can manually remove epochs from the Epochs object by using
Step7: If you wish to save the epochs as a file, you can do it with
Step8: Later on you can read the epochs with
Step9: If you wish to look at the average across trial types, then you may do so,
|
<ASSISTANT_TASK:>
Python Code:
import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
data_path = mne.datasets.sample.data_path()
# Load a dataset that contains events
raw = mne.io.read_raw_fif(
op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif'))
# If your raw object has a stim channel, you can construct an event array
# easily
events = mne.find_events(raw, stim_channel='STI 014')
# Show the number of events (number of rows)
print('Number of events:', len(events))
# Show all unique event codes (3rd column)
print('Unique event codes:', np.unique(events[:, 2]))
# Specify event codes of interest with descriptive labels.
# This dataset also has visual left (3) and right (4) events, but
# to save time and memory we'll just look at the auditory conditions
# for now.
event_id = {'Auditory/Left': 1, 'Auditory/Right': 2}
epochs = mne.Epochs(raw, events, event_id, tmin=-0.1, tmax=1,
baseline=(None, 0), preload=True)
print(epochs)
print(epochs.events[:3])
print(epochs.event_id)
print(epochs[1:5])
print(epochs['Auditory/Right'])
# These will be epochs objects
for i in range(3):
print(epochs[i])
# These will be arrays
for ep in epochs[:2]:
print(ep)
epochs.drop([0], reason='User reason')
epochs.drop_bad(reject=dict(grad=2500e-13, mag=4e-12, eog=200e-6), flat=None)
print(epochs.drop_log)
epochs.plot_drop_log()
print('Selection from original events:\n%s' % epochs.selection)
print('Removed events (from numpy setdiff1d):\n%s'
% (np.setdiff1d(np.arange(len(events)), epochs.selection).tolist(),))
print('Removed events (from list comprehension -- should match!):\n%s'
% ([li for li, log in enumerate(epochs.drop_log) if len(log) > 0]))
epochs_fname = op.join(data_path, 'MEG', 'sample', 'sample-epo.fif')
epochs.save(epochs_fname)
epochs = mne.read_epochs(epochs_fname, preload=False)
ev_left = epochs['Auditory/Left'].average()
ev_right = epochs['Auditory/Right'].average()
f, axs = plt.subplots(3, 2, figsize=(10, 5))
_ = f.suptitle('Left / Right auditory', fontsize=20)
_ = ev_left.plot(axes=axs[:, 0], show=False)
_ = ev_right.plot(axes=axs[:, 1], show=False)
plt.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem 3
Step2: Problem 6
Step3: Reliability
Step4: If you want to invert matrices with small determinant, the solution is to ensure the tolerances are low enough so that the inv() function can invert the matrix.
|
<ASSISTANT_TASK:>
Python Code:
# Initial import statements
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.pyplot import *
from numpy import *
from numpy.linalg import *
from scipy.linalg import lu_factor, lu_solve
# Create a function which can be used later if needed
def lu_decomp1(A, b):
# Solve by using lu_factor and lu_solve
PLU = lu_factor(A)
x = lu_solve(PLU, b)
return x
# Create variables
A = np.matrix(((1, 4, 1),
(1, 6, -1),
(2, -1, 2)))
b = np.array(([7, 13, 5]))
x = lu_decomp1(A, b)
print(dot(inv(A), b))
print("Result = {}".format(x))
A = np.array([[5, -3, -1, 0],
[-2, 1, 1, 1],
[3, -5, 1, 2],
[0, 8, -4, -3]])
B = np.array(([1, 3, -9, 6, 4],
[2, -1, 6, 7, 1],
[3, 2, -3, 15, 5],
[8, -1, 1, 4, 2],
[11, 1, -2, 18, 7]))
ainv = inv(A)
binv = inv(B)
print("Inverse of A:\n {}".format(ainv))
print("\nInverse of B:\n {}".format(binv))
print("Determinant of A: {}".format(np.linalg.det(A)))
print("Determinant of B: {}".format(np.linalg.det(B)))
def gaussSeidel(A, b):
omega = 1.1
# Amount of iterations
p = 1000
# Define tolerance
tol = 1.0e-9
n = len(b)
x = np.zeros(n)
# Generate array based on starting vector
for y in range(n):
x[y] = b[y]/A[y, y]
# Iterate p times
for k in range(p):
xOld = x.copy()
for i in range(n):
s = 0
for j in range(n):
if j != i:
s = s + A[i, j] * x[j]
x[i] = omega/A[i, i] * (b[i] - s) + (1 - omega)*x[i]
# Break execution if we are within the tolerance needed
dx = math.sqrt(np.dot(x-xOld,x-xOld))
if dx < tol: return x
return x
A = np.array(([4.0, -1, 0, 0],
[-1, 4, -1, 0],
[0, -1, 4, -1],
[0, 0, -1, 3]))
b = np.array(([15.0, 10, 10, 10]))
x = gaussSeidel(A, b)
print("Result = {}".format(x))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setting up data paths and loading raw data (skip some data for speed)
Step2: Since downsampling reduces the timing precision of events, we recommend
Step3: When resampling epochs is unwanted or impossible, for example when the data
Step4: Because resampling also affects the stim channels, some trigger onsets might
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
from matplotlib import pyplot as plt
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
raw = mne.io.read_raw_fif(raw_fname).crop(120, 240).load_data()
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=2, tmin=-0.1, tmax=0.8, preload=True)
# Downsample to 100 Hz
print('Original sampling rate:', epochs.info['sfreq'], 'Hz')
epochs_resampled = epochs.copy().resample(100, npad='auto')
print('New sampling rate:', epochs_resampled.info['sfreq'], 'Hz')
# Plot a piece of data to see the effects of downsampling
plt.figure(figsize=(7, 3))
n_samples_to_plot = int(0.5 * epochs.info['sfreq']) # plot 0.5 seconds of data
plt.plot(epochs.times[:n_samples_to_plot],
epochs.get_data()[0, 0, :n_samples_to_plot], color='black')
n_samples_to_plot = int(0.5 * epochs_resampled.info['sfreq'])
plt.plot(epochs_resampled.times[:n_samples_to_plot],
epochs_resampled.get_data()[0, 0, :n_samples_to_plot],
'-o', color='red')
plt.xlabel('time (s)')
plt.legend(['original', 'downsampled'], loc='best')
plt.title('Effect of downsampling')
mne.viz.tight_layout()
# Resample to 300 Hz
raw_resampled = raw.copy().resample(300, npad='auto')
print('Number of events before resampling:', len(mne.find_events(raw)))
# Resample to 100 Hz (suppress the warning that would be emitted)
raw_resampled = raw.copy().resample(100, npad='auto', verbose='error')
print('Number of events after resampling:',
len(mne.find_events(raw_resampled)))
# To avoid losing events, jointly resample the data and event matrix
events = mne.find_events(raw)
raw_resampled, events_resampled = raw.copy().resample(
100, npad='auto', events=events)
print('Number of events after resampling:', len(events_resampled))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Enter your gateway and the auth token
Step2: Set the Log bucket and Tensorboard Image
Step3: Set the client and create the experiment
Step4: Set the Inference parameters
Step5: Load the the components yaml files for setting up the components
Step10: Define the pipeline
Step11: Compile the pipeline
Step12: Execute the pipeline
Step13: Wait for inference service below to go to READY True state
Step14: Get the Inference service name
Step15: Use the deployed model for prediction request and save the output into a json
Step16: Use the deployed model for explain request and save the output into a json
Step17: Model Interpretation using Captum Vis and Insights
Step18: import the necessary packages
Step19: Read the prediction, explanation, and the class mapping file which saved during the prediction and expalain requests.
Step20: Captum Insights can also be used for visualization
Step21: Load the downloaded model pth file and classifer
Step22: Captum Insights output image
|
<ASSISTANT_TASK:>
Python Code:
! pip uninstall -y kfp
! pip install kfp
import kfp
import json
import os
from kfp.onprem import use_k8s_secret
from kfp import components
from kfp.components import load_component_from_file, load_component_from_url
from kfp import dsl
from kfp import compiler
import numpy as np
import logging
kfp.__version__
INGRESS_GATEWAY='http://istio-ingressgateway.istio-system.svc.cluster.local'
AUTH="<enter your auth token>"
NAMESPACE="kubeflow-user-example-com"
COOKIE="authservice_session="+AUTH
EXPERIMENT="Default"
MINIO_ENDPOINT="http://minio-service.kubeflow:9000"
LOG_BUCKET="mlpipeline"
TENSORBOARD_IMAGE="public.ecr.aws/pytorch-samples/tboard:latest"
client = kfp.Client(host=INGRESS_GATEWAY+"/pipeline", cookies=COOKIE)
client.create_experiment(EXPERIMENT)
experiments = client.list_experiments(namespace=NAMESPACE)
my_experiment = experiments.experiments[0]
my_experiment
DEPLOY_NAME="torchserve"
MODEL_NAME="cifar10"
ISVC_NAME=DEPLOY_NAME+"."+NAMESPACE+"."+"example.com"
INPUT_REQUEST="https://raw.githubusercontent.com/kubeflow/pipelines/master/samples/contrib/pytorch-samples/cifar10/input.json"
! python utils/generate_templates.py cifar10/template_mapping.json
prepare_tensorboard_op = load_component_from_file("yaml/tensorboard_component.yaml")
prep_op = components.load_component_from_file(
"yaml/preprocess_component.yaml"
)
train_op = components.load_component_from_file(
"yaml/train_component.yaml"
)
deploy_op = load_component_from_file("yaml/deploy_component.yaml")
pred_op = load_component_from_file("yaml/prediction_component.yaml")
minio_op = components.load_component_from_file(
"yaml/minio_component.yaml"
)
@dsl.pipeline(
name="Training Cifar10 pipeline", description="Cifar 10 dataset pipeline"
)
def pytorch_cifar10( # pylint: disable=too-many-arguments
minio_endpoint=MINIO_ENDPOINT,
log_bucket=LOG_BUCKET,
log_dir=f"tensorboard/logs/{dsl.RUN_ID_PLACEHOLDER}",
mar_path=f"mar/{dsl.RUN_ID_PLACEHOLDER}/model-store",
config_prop_path=f"mar/{dsl.RUN_ID_PLACEHOLDER}/config",
model_uri=f"s3://mlpipeline/mar/{dsl.RUN_ID_PLACEHOLDER}",
tf_image=TENSORBOARD_IMAGE,
deploy=DEPLOY_NAME,
isvc_name=ISVC_NAME,
model=MODEL_NAME,
namespace=NAMESPACE,
confusion_matrix_log_dir=f"confusion_matrix/{dsl.RUN_ID_PLACEHOLDER}/",
checkpoint_dir="checkpoint_dir/cifar10",
input_req=INPUT_REQUEST,
cookie=COOKIE,
ingress_gateway=INGRESS_GATEWAY,
):
def sleep_op(seconds):
Sleep for a while.
return dsl.ContainerOp(
name="Sleep " + str(seconds) + " seconds",
image="python:alpine3.6",
command=["sh", "-c"],
arguments=[
'python -c "import time; time.sleep($0)"',
str(seconds)
],
)
This method defines the pipeline tasks and operations
pod_template_spec = json.dumps({
"spec": {
"containers": [{
"env": [
{
"name": "AWS_ACCESS_KEY_ID",
"valueFrom": {
"secretKeyRef": {
"name": "mlpipeline-minio-artifact",
"key": "accesskey",
}
},
},
{
"name": "AWS_SECRET_ACCESS_KEY",
"valueFrom": {
"secretKeyRef": {
"name": "mlpipeline-minio-artifact",
"key": "secretkey",
}
},
},
{
"name": "AWS_REGION",
"value": "minio"
},
{
"name": "S3_ENDPOINT",
"value": f"{minio_endpoint}",
},
{
"name": "S3_USE_HTTPS",
"value": "0"
},
{
"name": "S3_VERIFY_SSL",
"value": "0"
},
]
}]
}
})
prepare_tb_task = prepare_tensorboard_op(
log_dir_uri=f"s3://{log_bucket}/{log_dir}",
image=tf_image,
pod_template_spec=pod_template_spec,
).set_display_name("Visualization")
prep_task = (
prep_op().after(prepare_tb_task
).set_display_name("Preprocess & Transform")
)
confusion_matrix_url = f"minio://{log_bucket}/{confusion_matrix_log_dir}"
script_args = f"model_name=resnet.pth," \
f"confusion_matrix_url={confusion_matrix_url}"
# For GPU, set number of gpus and accelerator type
ptl_args = f"max_epochs=1, gpus=0, accelerator=None, profiler=pytorch"
train_task = (
train_op(
input_data=prep_task.outputs["output_data"],
script_args=script_args,
ptl_arguments=ptl_args
).after(prep_task).set_display_name("Training")
)
# For GPU uncomment below line and set GPU limit and node selector
# ).set_gpu_limit(1).add_node_selector_constraint
# ('cloud.google.com/gke-accelerator','nvidia-tesla-p4')
(
minio_op(
bucket_name="mlpipeline",
folder_name=log_dir,
input_path=train_task.outputs["tensorboard_root"],
filename="",
).after(train_task).set_display_name("Tensorboard Events Pusher")
)
(
minio_op(
bucket_name="mlpipeline",
folder_name=checkpoint_dir,
input_path=train_task.outputs["checkpoint_dir"],
filename="",
).after(train_task).set_display_name("checkpoint_dir Pusher")
)
minio_mar_upload = (
minio_op(
bucket_name="mlpipeline",
folder_name=mar_path,
input_path=train_task.outputs["checkpoint_dir"],
filename="cifar10_test.mar",
).after(train_task).set_display_name("Mar Pusher")
)
(
minio_op(
bucket_name="mlpipeline",
folder_name=config_prop_path,
input_path=train_task.outputs["checkpoint_dir"],
filename="config.properties",
).after(train_task).set_display_name("Conifg Pusher")
)
model_uri = str(model_uri)
# pylint: disable=unused-variable
isvc_yaml =
apiVersion: "serving.kubeflow.org/v1beta1"
kind: "InferenceService"
metadata:
name: {}
namespace: {}
spec:
predictor:
serviceAccountName: sa
pytorch:
storageUri: {}
resources:
requests:
cpu: 4
memory: 16Gi
limits:
cpu: 4
memory: 16Gi
.format(
deploy, namespace, model_uri
)
# For GPU inference use below yaml with gpu count and accelerator
gpu_count = "1"
accelerator = "nvidia-tesla-p4"
isvc_gpu_yaml = # pylint: disable=unused-variable
apiVersion: "serving.kubeflow.org/v1beta1"
kind: "InferenceService"
metadata:
name: {}
namespace: {}
spec:
predictor:
serviceAccountName: sa
pytorch:
storageUri: {}
resources:
requests:
cpu: 4
memory: 16Gi
limits:
cpu: 4
memory: 16Gi
nvidia.com/gpu: {}
nodeSelector:
cloud.google.com/gke-accelerator: {}
.format(deploy, namespace, model_uri, gpu_count, accelerator)
# Update inferenceservice_yaml for GPU inference
deploy_task = (
deploy_op(action="apply", inferenceservice_yaml=isvc_yaml
).after(minio_mar_upload).set_display_name("Deployer")
)
# Wait here for model to be loaded in torchserve for inference
sleep_task = sleep_op(5).after(deploy_task).set_display_name("Sleep")
# Make Inference request
pred_task = (
pred_op(
host_name=isvc_name,
input_request=input_req,
cookie=cookie,
url=ingress_gateway,
model=model,
inference_type="predict",
).after(sleep_task).set_display_name("Prediction")
)
(
pred_op(
host_name=isvc_name,
input_request=input_req,
cookie=cookie,
url=ingress_gateway,
model=model,
inference_type="explain",
).after(pred_task).set_display_name("Explanation")
)
dsl.get_pipeline_conf().add_op_transformer(
use_k8s_secret(
secret_name="mlpipeline-minio-artifact",
k8s_secret_key_to_env={
"secretkey": "MINIO_SECRET_KEY",
"accesskey": "MINIO_ACCESS_KEY",
},
)
)
compiler.Compiler().compile(pytorch_cifar10, 'pytorch.tar.gz', type_check=True)
run = client.run_pipeline(my_experiment.id, 'pytorch-cifar10', 'pytorch.tar.gz')
!kubectl get isvc $DEPLOY
INFERENCE_SERVICE_LIST = ! kubectl get isvc {DEPLOY_NAME} -n {NAMESPACE} -o json | python3 -c "import sys, json; print(json.load(sys.stdin)['status']['url'])"| tr -d '"' | cut -d "/" -f 3
INFERENCE_SERVICE_NAME = INFERENCE_SERVICE_LIST[0]
INFERENCE_SERVICE_NAME
!curl -v -H "Host: $INFERENCE_SERVICE_NAME" -H "Cookie: $COOKIE" "$INGRESS_GATEWAY/v1/models/$MODEL_NAME:predict" -d @./cifar10/input.json > cifar10_prediction_output.json
! cat cifar10_prediction_output.json
!curl -v -H "Host: $INFERENCE_SERVICE_NAME" -H "Cookie: $COOKIE" "$INGRESS_GATEWAY/v1/models/$MODEL_NAME:explain" -d @./cifar10/input.json > cifar10_explanation_output.json
!./install-dependencies.sh
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import torchvision.transforms as transforms
import torch
import torch.nn.functional as F
import json
import captum
from captum.attr import LayerAttribution
from captum.attr import visualization as viz
import base64
import os
import io
prediction_json = json.loads(open("./cifar10_prediction_output.json", "r").read())
explainations_json = json.loads(open("./cifar10_explanation_output.json", "r").read())
labels_path = './cifar10/class_mapping.json'
with open(labels_path) as json_data:
idx_to_labels = json.load(json_data)
count = 0
for i in range(0, len(explainations_json["explanations"])):
image = base64.b64decode(explainations_json["explanations"][i]["b64"])
fileName = 'captum_kitten_{}.jpeg'.format(count)
imagePath = ( os.getcwd() +"/" + fileName)
img = Image.open(io.BytesIO(image))
img = img.convert('RGB')
img.save(imagePath, 'jpeg', quality=100)
print("Saving ", imagePath)
count += 1
from IPython.display import Image
Image(filename='captum_kitten_0.jpeg')
Image(filename='captum_kitten_1.jpeg')
Image(filename='captum_kitten_2.jpeg')
from minio import Minio
from kubernetes import client, config
import base64
config.load_incluster_config()
v1 = client.CoreV1Api()
sec = v1.read_namespaced_secret("mlpipeline-minio-artifact", NAMESPACE).data
minio_accesskey = base64.b64decode(sec["accesskey"]).decode('UTF-8')
minio_secretkey = base64.b64decode(sec["secretkey"]).decode('UTF-8')
minio_config = {
"HOST": "minio-service.kubeflow:9000",
"ACCESS_KEY": minio_accesskey,
"SECRET_KEY": minio_secretkey,
"BUCKET": "mlpipeline",
"FOLDER": "checkpoint_dir/cifar10"}
def _initiate_minio_client(minio_config):
minio_host = minio_config["HOST"]
access_key = minio_config["ACCESS_KEY"]
secret_key = minio_config["SECRET_KEY"]
client = Minio(minio_host, access_key=access_key, secret_key=secret_key, secure=False)
return client
client= _initiate_minio_client(minio_config)
client
def download_artifact_from_minio(folder: str, artifact: str):
artifact_name = artifact.split("/")[-1]
result = client.fget_object(
minio_config["BUCKET"],
os.path.join(folder, artifact_name),
artifact,
)
download_artifact_from_minio(minio_config["FOLDER"],"resnet.pth")
print("[INFO] Downloaded the Model Pth File.....")
download_artifact_from_minio(minio_config["FOLDER"],"cifar10_train.py")
print("[INFO] Downloaded the Model Classifier File.....")
from cifar10_train import CIFAR10Classifier
model = CIFAR10Classifier()
model_pt_path ="./resnet.pth"
model.load_state_dict(torch.load(model_pt_path,map_location=torch.device('cpu')))
model.eval()
#Lets read two test images and make the prediction and use these images for captum Insights.
imgs = ['./cifar10/kitten.png',"./cifar10/horse.png"]
for img in imgs:
img = Image.open(img)
transformed_img = transform(img)
input_img = transform_normalize(transformed_img)
input_img = input_img.unsqueeze(0) # the model requires a dummy batch dimension
output = model(input_img)
output = F.softmax(output, dim=1)
prediction_score, pred_label_idx = torch.topk(output, 1)
pred_label_idx.squeeze_()
predicted_label = idx_to_labels[str(pred_label_idx.squeeze_().item())]
print('Predicted:', predicted_label, '/', pred_label_idx.item(), ' (', prediction_score.squeeze().item(), ')')
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.features import ImageFeature
# Baseline is all-zeros input - this may differ depending on your data
def baseline_func(input):
return input * 0
# merging our image transforms from above
def full_img_transform(input):
i = Image.open(input)
i = transform(i)
i = transform_normalize(i)
i = i.unsqueeze(0)
i.requires_grad = True
return i
input_imgs = torch.cat(list(map(lambda i: full_img_transform(i), imgs)), 0)
visualizer = AttributionVisualizer(
models=[model],
score_func=lambda o: torch.nn.functional.softmax(o, 1),
classes=list(map(lambda k: idx_to_labels[k], idx_to_labels.keys())),
features=[
ImageFeature(
"Photo",
baseline_transforms=[baseline_func],
input_transforms=[],
)
],
dataset=[Batch(input_imgs, labels=[3,7])]
)
visualizer.serve(debug=True,port=6080)
! kubectl delete --all isvc -n $NAMESPACE
! kubectl delete pod --field-selector=status.phase==Succeeded -n $NAMESPACE
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Widget Styling
Step2: Parent/child relationships
Step3: After the parent is displayed
Step4: Fancy boxes
Step5: TabWidget
Step6: Alignment
Step7: If a label is longer than the minimum width, the widget is shifted to the right
Step8: If a description is not set for the widget, the label is not displayed
Step9: Flex boxes
Step10: Using hbox
Step11: By setting the width of the container to 100% and its pack to center, you can center the buttons.
Step12: Visibility
Step13: Another example
|
<ASSISTANT_TASK:>
Python Code:
%%html
<style>
.example-container { background: #999999; padding: 2px; min-height: 100px; }
.example-container.sm { min-height: 50px; }
.example-box { background: #9999FF; width: 50px; height: 50px; text-align: center; vertical-align: middle; color: white; font-weight: bold; margin: 2px;}
.example-box.med { width: 65px; height: 65px; }
.example-box.lrg { width: 80px; height: 80px; }
</style>
from IPython.html import widgets
from IPython.display import display
button = widgets.Button(
description='Hello World!',
width=100, # Integers are interpreted as pixel measurements.
height='2em', # em is valid HTML unit of measurement.
color='lime', # Colors can be set by name,
background_color='#0022FF', # and also by color code.
border_color='red')
display(button)
from IPython.display import display
float_range = widgets.FloatSlider()
string = widgets.Text(value='hi')
container = widgets.Box(children=[float_range, string])
container.border_color = 'red'
container.border_style = 'dotted'
container.border_width = 3
display(container) # Displays the `container` and all of it's children.
container = widgets.Box()
container.border_color = 'red'
container.border_style = 'dotted'
container.border_width = 3
display(container)
int_range = widgets.IntSlider()
container.children=[int_range]
name1 = widgets.Text(description='Location:')
zip1 = widgets.BoundedIntText(description='Zip:', min=0, max=99999)
page1 = widgets.Box(children=[name1, zip1])
name2 = widgets.Text(description='Location:')
zip2 = widgets.BoundedIntText(description='Zip:', min=0, max=99999)
page2 = widgets.Box(children=[name2, zip2])
accord = widgets.Accordion(children=[page1, page2])
display(accord)
accord.set_title(0, 'From')
accord.set_title(1, 'To')
name = widgets.Text(description='Name:')
color = widgets.Dropdown(description='Color:', options=['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet'])
page1 = widgets.Box(children=[name, color])
age = widgets.IntSlider(description='Age:', min=0, max=120, value=50)
gender = widgets.RadioButtons(description='Gender:', options=['male', 'female'])
page2 = widgets.Box(children=[age, gender])
tabs = widgets.Tab(children=[page1, page2])
display(tabs)
tabs.set_title(0, 'Name')
tabs.set_title(1, 'Details')
display(widgets.Text(description="a:"))
display(widgets.Text(description="aa:"))
display(widgets.Text(description="aaa:"))
display(widgets.Text(description="a:"))
display(widgets.Text(description="aa:"))
display(widgets.Text(description="aaa:"))
display(widgets.Text(description="aaaaaaaaaaaaaaaaaa:"))
display(widgets.Text(description="a:"))
display(widgets.Text(description="aa:"))
display(widgets.Text(description="aaa:"))
display(widgets.Text())
buttons = [widgets.Button(description=str(i)) for i in range(3)]
display(*buttons)
container = widgets.HBox(children=buttons)
display(container)
container.width = '100%'
container.pack = 'center'
w1 = widgets.Latex(value="First line")
w2 = widgets.Latex(value="Second line")
w3 = widgets.Latex(value="Third line")
display(w1, w2, w3)
w2.visible=None
w2.visible=False
w2.visible=True
form = widgets.VBox()
first = widgets.Text(description="First Name:")
last = widgets.Text(description="Last Name:")
student = widgets.Checkbox(description="Student:", value=False)
school_info = widgets.VBox(visible=False, children=[
widgets.Text(description="School:"),
widgets.IntText(description="Grade:", min=0, max=12)
])
pet = widgets.Text(description="Pet's Name:")
form.children = [first, last, student, school_info, pet]
display(form)
def on_student_toggle(name, value):
if value:
school_info.visible = True
else:
school_info.visible = False
student.on_trait_change(on_student_toggle, 'value')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Hypothesis testing
Step2: And here's an example that uses it to compute the p-value of an experiment where we toss a coin 250 times and get 140 heads.
Step3: The p-value turns out to be about 7%, which is considered on the border of statistical significance.
Step4: Permutation test
Step5: Here's an example where we test the observed difference in pregnancy length for first babies and others.
Step6: The p-value is about 17%, which means it is plausible that the observed difference is just the result of random sampling, and might not be generally true in the population.
Step7: Here's the distrubution of the test statistic (the difference in means) over many simulated samples
Step8: Under the null hypothesis, we often see differences bigger than the observed difference.
Step9: If the hypothesis under test is that first babies come late, the appropriate test statistic is the raw difference between first babies and others, rather than the absolute value of the difference. In that case, the p-value is smaller, because we are testing a more specific hypothesis.
Step10: But in this example, the result is still not statistically significant.
Step11: But that's not statistically significant either.
Step12: Here's an example testing the correlation between birth weight and mother's age.
Step13: The reported p-value is 0, which means that in 1000 trials we didn't see a correlation, under the null hypothesis, that exceeded the observed correlation. That means that the p-value is probably smaller than $1/1000$, but it is not actually 0.
Step14: Testing proportions
Step15: Here's an example using the data from the book
Step16: The observed deviance from the expected values is not statistically significant.
Step17: Using this test, we get a smaller p-value
Step18: Taking this result at face value, we might consider the data statistically significant, but considering the results of both tests, I would not draw any strong conclusions.
Step19: If we specifically test the deviations of first babies and others from the expected number of births in each week of pregnancy, the results are statistically significant with a very small p-value. But at this point we have run so many tests, we should not be surprised to find at least one that seems significant.
Step21: Power
Step22: In this example, the false negative rate is 70%, which means that the power of the test (probability of statistical significance if the actual difference is 0.078 weeks) is only 30%.
Step23: Exercise
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import random
import thinkstats2
import thinkplot
class HypothesisTest(object):
def __init__(self, data):
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
def PValue(self, iters=1000):
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def TestStatistic(self, data):
raise UnimplementedMethodException()
def MakeModel(self):
pass
def RunModel(self):
raise UnimplementedMethodException()
class CoinTest(HypothesisTest):
def TestStatistic(self, data):
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
ct = CoinTest((140, 110))
pvalue = ct.PValue()
pvalue
class DiffMeansPermute(thinkstats2.HypothesisTest):
def TestStatistic(self, data):
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
import first
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
pvalue = ht.PValue()
pvalue
ht.PlotCdf()
thinkplot.Config(xlabel='test statistic',
ylabel='CDF')
class DiffMeansOneSided(DiffMeansPermute):
def TestStatistic(self, data):
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
ht = DiffMeansOneSided(data)
pvalue = ht.PValue()
pvalue
class DiffStdPermute(DiffMeansPermute):
def TestStatistic(self, data):
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
ht = DiffStdPermute(data)
pvalue = ht.PValue()
pvalue
class CorrelationPermute(thinkstats2.HypothesisTest):
def TestStatistic(self, data):
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
cleaned = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = cleaned.agepreg.values, cleaned.totalwgt_lb.values
ht = CorrelationPermute(data)
pvalue = ht.PValue()
pvalue
ht.actual, ht.MaxTestStat()
class DiceTest(thinkstats2.HypothesisTest):
def TestStatistic(self, data):
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
n = sum(self.data)
values = [1, 2, 3, 4, 5, 6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
pvalue = dt.PValue(iters=10000)
pvalue
class DiceChiTest(DiceTest):
def TestStatistic(self, data):
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
dt = DiceChiTest(data)
pvalue = dt.PValue(iters=10000)
pvalue
class PregLengthTest(thinkstats2.HypothesisTest):
def MakeModel(self):
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def TestStatistic(self, data):
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def FalseNegRate(data, num_runs=1000):
Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
neg_rate = FalseNegRate(data)
neg_rate
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: p
Step2: Train - Test
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import pandas as pd
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot as plt
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.arima_model import ARIMA
from pandas import DataFrame
from sklearn.metrics import mean_squared_error
from statsmodels.graphics.tsaplots import plot_pacf
%matplotlib inline
def date_parser(x):
return datetime.strptime('190'+x, '%Y-%m')
series = read_csv('shampoo_sales.csv',
header=0,
index_col=0, #time
squeeze=True, #this returns a series
parse_dates=[0], #allows for automatic parsing of dates!
date_parser = date_parser
)
series.head()
series.plot()
plt.show()
autocorrelation_plot(series)
plt.show()
plot_pacf(series)
plt.show()
#fit entire model
model = ARIMA(series, order=(5,1,0))
model_fit = model.fit(disp=0)
model_fit.summary()
residuals = DataFrame(model_fit.resid)
residuals.plot()
plt.show()
residuals.plot(kind='kde')
plt.show()
residuals.describe()
XX = series.values
XX.shape
#splitting
train_size = int(len(XX)*2/3)
train_size
train_set = XX[:train_size]
test_set = XX[train_size:]
train_set.shape, test_set.shape
train_set[0]
history = list(train_set)
history[0] = 3333
history = list(train_set)
len(history)
predictions = []
test_len = len(test_set)
test_len
%%time
for tt in range(len(test_set)):
output = ARIMA(history, order=(5,1,0)).fit(method='css', disp=0).forecast()
y_hat = output[0] #0th is the index of the prediction
predictions.append(y_hat)
observation = test_set[tt]
#history.append(observation)
history.append(y_hat)
print "predicted: {}, expected: {}".format(y_hat, observation)
error = mean_squared_error(predictions, test_set)
error
plt.figure(figsize=(15,6))
plt.plot(predictions, label='preds')
plt.plot(test_set, label='test set')
plt.legend()
plt.show()
fitted = ARIMA(list(train_set), order=(5,1,0)).fit(disp=0)
fitted
autocorrelation_plot(fitted.resid)
plot_pacf(fitted.resid)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 訓練後の整数量子化
Step2: MNIST モデルをビルドする
Step3: TensorFlow Lite モデルに変換する
Step4: TensorFlow Lite モデルになってはいますが、すべてのパラメータデータには 32 ビット浮動小数点値が使用されています。
Step5: 重みが量子化されたためモデルは多少小さくなりましたが、他の変数データはまだ浮動小数点数フォーマットのままです。
Step6: すべての重みと変数データが量子化されたため、元の TensorFlow Lite モデルにくらべてはるかに小さくなりました。
Step7: 互換性を考慮すれば、大抵においては良いことではありますが、Edge TPU など、整数ベースの演算のみを実行するデバイスには対応していません。
Step8: 内部の量子化は上記と同じままですが、入力テンソルと出力テンソルが整数フォーマットになっているのがわかります。
Step9: これで、モデルの入力テンソルと出力テンソルに整数データを強いようする整数量子化モデルを得られました。Edge TPU などの整数限定ハードウェアに対応しています。
Step10: TensorFlow Lite モデルを実行する
Step11: 1つの画像に対してモデルを検証する
Step12: では、浮動小数点数モデルをテストします。
Step13: 今度は量子化されたモデル(uint8データを使用する)を検証します
Step14: モデルを評価する
Step15: 浮動小数点数モデルを評価します。
Step16: uint8データを使用した完全に量子化されたモデルで評価を繰り返します
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
logging.getLogger("tensorflow").setLevel(logging.DEBUG)
import tensorflow as tf
import numpy as np
assert float(tf.__version__[:3]) >= 2.3
# Load MNIST dataset
mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images.astype(np.float32) / 255.0
test_images = test_images.astype(np.float32) / 255.0
# Define the model architecture
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28)),
tf.keras.layers.Reshape(target_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=5,
validation_data=(test_images, test_labels)
)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
tflite_model = converter.convert()
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model_quant = converter.convert()
def representative_data_gen():
for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):
# Model has only one input so each data point has one element.
yield [input_value]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
tflite_model_quant = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
def representative_data_gen():
for input_value in tf.data.Dataset.from_tensor_slices(train_images).batch(1).take(100):
yield [input_value]
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Ensure that if any ops can't be quantized, the converter throws an error
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# Set the input and output tensors to uint8 (APIs added in r2.3)
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_model_quant = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite_model_quant)
input_type = interpreter.get_input_details()[0]['dtype']
print('input: ', input_type)
output_type = interpreter.get_output_details()[0]['dtype']
print('output: ', output_type)
import pathlib
tflite_models_dir = pathlib.Path("/tmp/mnist_tflite_models/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
# Save the unquantized/float model:
tflite_model_file = tflite_models_dir/"mnist_model.tflite"
tflite_model_file.write_bytes(tflite_model)
# Save the quantized model:
tflite_model_quant_file = tflite_models_dir/"mnist_model_quant.tflite"
tflite_model_quant_file.write_bytes(tflite_model_quant)
# Helper function to run inference on a TFLite model
def run_tflite_model(tflite_file, test_image_indices):
global test_images
# Initialize the interpreter
interpreter = tf.lite.Interpreter(model_path=str(tflite_file))
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
predictions = np.zeros((len(test_image_indices),), dtype=int)
for i, test_image_index in enumerate(test_image_indices):
test_image = test_images[test_image_index]
test_label = test_labels[test_image_index]
# Check if the input type is quantized, then rescale input data to uint8
if input_details['dtype'] == np.uint8:
input_scale, input_zero_point = input_details["quantization"]
test_image = test_image / input_scale + input_zero_point
test_image = np.expand_dims(test_image, axis=0).astype(input_details["dtype"])
interpreter.set_tensor(input_details["index"], test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
predictions[i] = output.argmax()
return predictions
import matplotlib.pylab as plt
# Change this to test a different image
test_image_index = 1
## Helper function to test the models on one image
def test_model(tflite_file, test_image_index, model_type):
global test_labels
predictions = run_tflite_model(tflite_file, [test_image_index])
plt.imshow(test_images[test_image_index])
template = model_type + " Model \n True:{true}, Predicted:{predict}"
_ = plt.title(template.format(true= str(test_labels[test_image_index]), predict=str(predictions[0])))
plt.grid(False)
test_model(tflite_model_file, test_image_index, model_type="Float")
test_model(tflite_model_quant_file, test_image_index, model_type="Quantized")
# Helper function to evaluate a TFLite model on all images
def evaluate_model(tflite_file, model_type):
global test_images
global test_labels
test_image_indices = range(test_images.shape[0])
predictions = run_tflite_model(tflite_file, test_image_indices)
accuracy = (np.sum(test_labels== predictions) * 100) / len(test_images)
print('%s model accuracy is %.4f%% (Number of test samples=%d)' % (
model_type, accuracy, len(test_images)))
evaluate_model(tflite_model_file, model_type="Float")
evaluate_model(tflite_model_quant_file, model_type="Quantized")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Model Inputs
Step2: Generator network
Step3: Discriminator
Step4: Hyperparameters
Step5: Build network
Step6: Discriminator and Generator Losses
Step7: Optimizers
Step8: Training
Step9: Training loss
Step10: Generator samples from training
Step11: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 5, 7, 3, 0, 9. Since this is just a sample, it isn't representative of the full range of images this generator can make.
Step12: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
Step13: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise. Looks like 1, 9, and 8 show up first. Then, it learns 5 and 3.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, shape=(None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, shape=(None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
''' Build the generator network.
Arguments
---------
z : Input tensor for the generator
out_dim : Shape of the generator output
n_units : Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('generator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
''' Build the discriminator network.
Arguments
---------
x : Input tensor for the discriminator
n_units: Number of units in hidden layer
reuse : Reuse the variables with tf.variable_scope
alpha : leak parameter for leaky ReLU
Returns
-------
out, logits:
'''
with tf.variable_scope('discriminator', reuse=reuse): # finish this
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
# Size of input image to discriminator
input_size = 784 # 28x28 MNIST images flattened
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Label smoothing
smooth = 0.1
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Generator network here
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
# Disriminator network here
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, n_units=d_hidden_size, reuse=True, alpha=alpha)
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer().minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer().minimize(g_loss, var_list=g_vars)
batch_size = 100
epochs = 100
samples = []
losses = []
saver = tf.train.Saver(var_list = g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
%matplotlib inline
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
_ = view_samples(-1, samples)
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, reuse=True),
feed_dict={input_z: sample_z})
view_samples(0, [gen_samples])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Db2 Extensions
Step2: <a id='top'></a>
Step3: If you connected to the SAMPLE database, you will have the EMPLOYEE and DEPARTMENT tables available to you. However, if you are connecting to a different database, you will need to execute the next command to populate the tables for you. Note, if you run this command and the two tables already exist, the tables will not be replaced. So don't worry if you execute this command by mistake.
Step4: Requesting data from Db2 using the standard %sql (ibm_db) interface is relatively straight-forward. We just need to place the SQL in the command and execute it to get the results.
Step5: Now that we have a working Db2 connection, we will need to set up an OData service to talk to Db2.
Step6: Back to Top
Step7: Back to Top
Step8: The results will show the URL service command used (http
Step9: One drawback of OData is that we don't get the actual error text returned. We know that the error code is, but the message isn't that descriptive. Using the %sql (Db2) command, we can find out that the table doesn't exist.
Step10: Back to Top
Step11: To limit the results from a OData request, you must add the \$top=x modifier at the end of the service request. The format then becomes
Step12: The last example illustrates two additional features of the %odata command. First, you can span statements over multiple lines by using the backslash character ('\'). You could also use the %%odata command to do this without backslashes, but it unfortunately will not allow for variable substitution. The current settings being used by OData can be found by issuing the SETTINGS command.
Step13: You can also refer to these values by using the settings['name'] variable. So the DROP statement just took the current DATABASE and SCHEMA settings and deleted the definition for the EMPLOYEE table. You could have done this directly with
Step14: And this command will show the connection service being created for us.
Step15: Back to Top
Step16: You can use this URL to directly access the results through a browser, or any application that can read the results returned by the OData gateway. The print statement below will display the URL as an active link. Click on that to see the results in another browser window.
Step17: When a URL is generated, we need to append the \$format=json tag at the end to tell the OData service and the browser how to handle the results. When we run OData and RESTful calls from a programming language (like Python), we are able to send information in the header which tells the API how to handle the results and parameters. All of the RESTful calls to the OData gateway use the following header information
Step18: Back to Top
Step19: You will notice that not all of the rows have been displayed. The output has been limited to 10 lines. 5 lines from the start of the answer set and 5 lines from the bottom of the answer set are displayed. If you want to change the maximum number of rows to be displayed, use the MAXROWS setting.
Step20: If you want an unlimited number of rows returned, set maxrows to -1.
Step21: It is better to limit the results from the answer set by using the LIMIT clause in the SELECT statement. LIMIT will force Db2 to stop retrieving rows after "x" number have been read, while the MAXROWS setting will retrieve all rows and then only display a portion of them. The one advantage of MAXROWS is that you see the bottom 5 rows while you would only be able to do that with Db2 if you could reverse sort the output. The current OData implementation does not have the ability to $orderby, so sorting to reverse the output is not possible.
Step22: Example
Step23: Back to Top
Step24: The COUNT(*) function is available as part of a SELECT list and it cannot include any other column names. If you do include other column names they will be ignored.
Step25: One of the unusual behaviors of the COUNT(*) function is that will actually return the entire answer set under the covers. The %odata command strips the count out from the results and doesn't display the rows returned. That is probably not would you expect from this syntax! The COUNT function is better described as the count of physical rows returned. Here is the same example with 5 rows returned and the JSON records.
Step26: One of the recommendations would be not to use the COUNT(*) function to determine the amount of rows that will be retrieved, especially if you expect there to a large of number rows. To minimize the data returned, you can use the form COUNT(column) which will modify the OData request to return the count and ONLY that column in the result set. This is a compromise in terms of the amount of data returned. This example using the -r (raw) flag which results in all of the JSON headers and data to be displayed. The JSON flag (-j) will not display any records.
Step27: Back to Top
Step28: This actually can cause some issues if you try to reuse the connection information that was created with the UNKNOWN_TBL. Since the service could not determine the structure of the table, the service will not return any column information with a select statement. The next SQL statement will create the UNKNOWN_TBL.
Step29: Retrying the SELECT statement will result in 43 rows with no columns returned!
Step30: To correct this situation, you need to DROP the connect that the %odata program is using and reissue the SELECT statement.
Step31: Now you can try the SQL statement again.
Step32: Back to Top
Step33: The datatypes are not the same as what one expect from a relational database. You get generic information on the character columns (String), and the numbers (Int16, Decimal). The Decimal specification actually contains the number of digits and decimal places but that isn't returned when using the table display.
Step34: Example
Step35: Example
Step36: Example
Step37: Example
Step38: Example
Step39: Converting to OData will mean that the search will look across the entire string, not just the beginning.
Step40: Back to Top
Step41: Back to Top
Step42: We also need to remove the connection information from the system in the event we've run this example before.
Step43: A couple of things about the table design. The salary is NOT NULL, while the BONUS allows for nulls. Unfortunately, the DESCRIBE command only tells us about the columns in the table and their OData data type, and no indication of whether table.
Step44: The initial INSERT will populate the table with valid data. The echo option will show the json document that is sent via the POST command to OData to insert the row.
Step45: Just to make sure things were inserted properly, we retrieve the contents of the table.
Step46: OData (and Db2) will return an error message about our missing SALARY column which requires a value.
Step47: We can try this on the Db2 side as well to get the details of the error.
Step48: Back to Top
Step49: A primary key is required to issue a DELETE command. You also need to make sure that the primary key column does not contain NULLs because a primary key must always contain a value. The following SQL tries to fix the primary key issue.
Step50: Check to see if we can delete the row yet.
Step51: Adding a primary key after the fact won't help because the service URL would have already recorded the information about the table (and the fact it didn't have a primary key at the time). We need to drop our SERVICE URL and generate another one.
Step52: We do a describe on the table and this will force another service URL to be generated for us.
Step53: Trying the DELETE this time will work.
Step54: Deleting the record again still gives you a successful return code. The call always returns a successful status even if the record doesn't exist.
Step55: Back to Top
Step56: At this point we can update their salary.
Step57: We doublecheck the results to make sure we got it right!
Step58: Back to Top
Step59: We also need to drop any service connection you may have created in the past with this table name.
Step60: Now that we have created the view, we can retrieve rows from it just like a standard table.
Step61: You can also create sophisticated VIEWS that can take parameters to adjust the results returned. For instance, consider the following SQL statement which gives me count of employees that work in SYSTEMS departments.
Step62: There are two departments with the name SYSTEMS in them, but there is no easy way to create a view for every possible combination of searches that you may want. Instead what we do is create a table that contains the pattern we want to look for and create the view so that it references this table.
Step63: Now we create a view that access this PATTERN table to do the actual search. Note that values that are inserted into the PATTERN table must have the SQL special characters like % to make sure patterns can be anywhere in the string.
Step64: In order for our view to work properly, we must populate our PATTERN table with a value. To test the view we will use %SYSTEMS% as our first example.
Step65: And now we can test our view by selecting from it.
Step66: Now that we have it working, we can try exactly the same thing but with OData. Our first transaction will update the search key to SERVICE.
Step67: The next OData statement should select the count of employees working in service departments.
|
<ASSISTANT_TASK:>
Python Code:
%run db2odata.ipynb
%run db2.ipynb
%sql connect reset
%sql connect
%sql -sampledata
%sql SELECT * FROM EMPLOYEE
%odata register
%odata RESET TABLE EMPLOYEE
s = %odata -e SELECT lastname, salary from employee where salary > 50000
s = %odata -e SELECT * FROM EMPLOYEE
%odata select * from unknown_table
%sql select * from unknown_table
s = %odata -e -j SELECT * FROM EMPLOYEE LIMIT 1
%odata \
RESET \
DATABASE {odata_settings['database']} \
SCHEMA {odata_settings['schema']} \
TABLE EMPLOYEE
%odata settings
%odata set DATABASE {odata_settings['database']} SCHEMA {odata_settings['schema']}
u = %odata -e select * from employee limit 1
url = %odata -e select * from employee limit 1
print(url)
%odata delete
s = %odata -e SELECT * FROM EMPLOYEE
%odata set maxrows 10
%odata set maxrows -1
%odata select * from employee
%odata set maxrows 10
s = %odata -e SELECT * FROM EMPLOYEE LIMIT 5
s = %odata -e SELECT FIRSTNME, LASTNAME FROM EMPLOYEE LIMIT 5
s = %odata -e SELECT COUNT(*) FROM EMPLOYEE LIMIT 1
s = %odata -e -r SELECT COUNT(*) FROM EMPLOYEE LIMIT 5
s = %odata -e -r SELECT COUNT(EMPNO) FROM EMPLOYEE LIMIT 5
%sql -q DROP TABLE UNKNOWN_TBL
%odata RESET TABLE UNKNOWN_TBL
s = %odata -e SELECT * FROM UNKNOWN_TBL
%sql CREATE TABLE UNKNOWN_TBL AS (SELECT * FROM EMPLOYEE) WITH DATA
s = %odata -e SELECT * FROM UNKNOWN_TBL
%odata RESET TABLE UNKNOWN_TBL
s = %odata -e SELECT * FROM UNKNOWN_TBL
%odata DESCRIBE EMPLOYEE
s = %odata -e SELECT EMPNO, WORKDEPT, SALARY FROM EMPLOYEE WHERE SALARY < 40000
s = %odata -e SELECT EMPNO, WORKDEPT, SALARY FROM EMPLOYEE WHERE SALARY < 40000 AND WORKDEPT = 'E21'
s = %odata -e \
SELECT EMPNO, WORKDEPT, SALARY \
FROM EMPLOYEE \
WHERE SALARY < 40000 AND WORKDEPT = 'E21' OR WORKDEPT = 'E11'
s = %odata -e \
SELECT EMPNO, WORKDEPT, SALARY \
FROM EMPLOYEE \
WHERE SALARY < 40000 AND (WORKDEPT = 'E21' OR WORKDEPT = 'E11')
s = %odata -e SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%AA%'
%sql SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%ON'
s = %odata -e SELECT LASTNAME FROM EMPLOYEE WHERE LASTNAME LIKE '%ON'
s = %odata -e SELECT * FROM EMPLOYEE LIMIT 5
%%sql -q
DROP TABLE TESTODATA;
CREATE TABLE TESTODATA
(
EMPNO INT NOT NULL,
LASTNAME VARCHAR(10) NOT NULL,
SALARY INT NOT NULL,
BONUS INT
);
%sql select * from testodata
%odata -e select * from testodata
%odata RESET TABLE TESTODATA
%odata -e DESCRIBE TESTODATA
%odata -e INSERT INTO TESTODATA(EMPNO, LASTNAME, SALARY, BONUS) VALUES (1,'Fred',10000,1000)
%odata SELECT * FROM TESTODATA
%odata -e INSERT INTO TESTODATA(EMPNO, LASTNAME, BONUS) VALUES (2,'Wilma',50000)
%sql INSERT INTO TESTODATA(EMPNO, LASTNAME, BONUS) VALUES (2,'Wilma',50000)
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%sql ALTER TABLE TESTODATA ADD CONSTRAINT PKTD PRIMARY KEY (EMPNO)
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%odata RESET TABLE TESTODATA
%odata DESCRIBE TESTODATA
%odata -e DELETE FROM TESTODATA WHERE EMPNO=1
%odata -e DELETE FROM TESTODATA WHERE EMPNO=2
%odata -e \
INSERT INTO TESTODATA(EMPNO, LASTNAME, SALARY, BONUS) \
VALUES (1,'Fred',10000,1000)
%odata -e UPDATE TESTODATA SET BONUS=2000 WHERE EMPNO=1
%odata SELECT * FROM TESTODATA
%%sql
CREATE OR REPLACE VIEW EMPDEPT AS
(
SELECT LASTNAME, DEPTNAME
FROM EMPLOYEE E, DEPARTMENT D
WHERE E.WORKDEPT = D.DEPTNO
)
%odata RESET TABLE EMPDEPT
%odata SELECT LASTNAME, DEPTNAME FROM EMPDEPT LIMIT 5
%%sql
SELECT
COUNT(*)
FROM
EMPLOYEE E, DEPARTMENT D
WHERE
E.WORKDEPT = D.DEPTNO
AND D.DEPTNAME LIKE '%SYSTEMS%'
%%sql -q
DROP TABLE PATTERN;
CREATE TABLE PATTERN
(
PATTERN_NUMBER INT NOT NULL PRIMARY KEY,
SEARCH VARCHAR(16)
);
%odata RESET TABLE EMPDEPT
%odata RESET TABLE PATTERN
%%sql
CREATE OR REPLACE VIEW EMPDEPT AS
(
SELECT
COUNT(*) AS COUNT
FROM
EMPLOYEE E, DEPARTMENT D
WHERE
E.WORKDEPT = D.DEPTNO
AND D.DEPTNAME LIKE
(
SELECT SEARCH FROM PATTERN WHERE PATTERN_NUMBER=1
)
);
%sql INSERT INTO PATTERN VALUES(1,'%SYSTEMS%')
%sql SELECT * FROM EMPDEPT
%odata UPDATE PATTERN SET SEARCH = '%SERVICE%' WHERE PATTERN_NUMBER = 1
%odata SELECT * FROM EMPDEPT
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load sample dataset
Step2: Distribution plots
Step3: It is often useful to overlay the mean and SD with the histograms, below is one way to do it.
Step4: You can change things like bin, kde flags to customize the plot
Step5: Plotting dist of 2 variables
Step6: You can use the kind argument to change the scatter to hex, reg etc
Step7: Plotting dist of all variables
Step8: Plotting data frequency
|
<ASSISTANT_TASK:>
Python Code:
import seaborn as sns
%matplotlib inline
tips = sns.load_dataset('tips')
tips.head(5)
#find dist of total bills
sns.distplot(tips['total_bill'])
tips.total_bill.mean()
tips_mean = tips.total_bill.mean()
tips_sd = tips.total_bill.std()
ax = sns.distplot(tips['total_bill'])
# plot mean in black
ax.axvline(x=tips_mean, color='black', linestyle='dashed')
# plot mean +- 1SD in red, dotted
ax.axvline(x=tips_mean + tips_sd, color='red', linestyle='dotted')
ax.axvline(x=tips_mean - tips_sd, color='red', linestyle='dotted')
# title
ax.set_title('$\mu = {}$ | $\sigma = {}$'.format(round(tips_mean, 2), round(tips_sd, 2)))
sns.distplot(tips['total_bill'], kde=False, bins=35)
sns.jointplot(x=tips['total_bill'], y=tips['tip'])
jgrid = sns.jointplot(x='min_season', y='max_wind_merged', data=hurricanes_ipl,
kind='reg', joint_kws={'line_kws':{'color':'green'}}, height=7, space=0.5)
j = jgrid.annotate(stats.pearsonr)
j = jgrid.ax_joint.set_title('Does hurricane wind speed increase over time?')
sns.jointplot(x=tips['total_bill'], y=tips['tip'], kind='hex')
sns.jointplot(x=tips['total_bill'], y=tips['tip'], kind='reg') #regression
sns.pairplot(tips, hue='sex')
sns.rugplot(tips['total_bill'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1.2. Tuple definition
Step2: 1.3. Counting ocurrences in tuples
Step3: 1.4. Searching for an item in a tuple
Step4: 1.5. Slicing in tuples
Step5: 1.6. Functions can return tuples
Step6: 1.7. Swapping pairs with tuples is fun!
Step7: 1.8. Tuples are inmutable
Step8: ... or be changed
Step9: Tuples are inmutable!
Step10: 2. [0] Lists
Step11: 2.1 [1] (Of course) lists are objects
Step12: (Tuples are about three times faster)
Step13: 2.2 [0] Appending items to a list (O(1))
Step14: Python lists can be "promiscuous"
Step15: 2.3 [0] Inserting items (O(n))
Step16: 2.4 [0] Deleting items from a list by content (O(n)))
Step17: 2.4 [0] Deleting items from the begin of the list (O(1)))
Step18: 2.4 [0] Deleting items from the end of the list (O(1)))
Step19: 2.5 [0] Sorting the elements of a list (O(n log n))
Step20: 2.6 [1] Erasing all list items (O(1))
Step21: 2.7 [0] List slicing (O(s))
Step22: 2.8 [1] Defining lists with list comprehensions
Step23: 2.9 [1] Lists are mutable objects
Step24: 3. [0] Sets
Step25: 3.2. [0] Sets can grow (O(1))
Step26: 3.3. [0] Sets can not contain dupplicate objects
Step27: 3.4. [1] Sets can not contain mutable objects
Step28: 3.5 [0] Intersection of sets (O(min(len(s), len(t)))
Step29: 3.6 [0] Union of sets (O(len(s)+len(t)))
Step30: 3.7. [0] Sets are MUCH more efficient for searching by content than lists
Step31: 4 [0] Dictionaries
Step32: 4.1 [0] Static definition of a dictionary
Step33: 4.2 [0] Indexing of a dictionary by a key (O(1))
Step34: 4.3 [0] Testing if a key is the dictionary (O(1))
Step35: 4.4 [1] Getting the keys (O(n))
Step36: 4.5 [1] Getting the values (O(n))
Step37: 4.4 [1] Determining the position of a key in a dictionary (O(n))
Step38: 4.6 [0] Inserting a new entry (O(1))
Step39: [0] 4.7 Deleting an entry (O(1))
Step40: 4.8 [1] Dictionaries are mutable
Step41: 4.9 [0] Looping a dictionary (O(n))
Step42: 5. Bytes
Step43: 5.2. Indexing in a bytes sequence
Step44: 5.3. Concatenation of bytes sequences
Step45: 5.4. Bytes are inmutable
Step46: 6. Bytearray
Step47: 7. Arrays
Step48: Element access
Step49: Slice access
Step50: Appending elements
Step51: Concatenating arrays
Step52: Deleting elements
|
<ASSISTANT_TASK:>
Python Code:
print(type(()))
help(())
!python -m timeit "x = (1, 'a', 'b', 'a')"
a = (1, 'a', 'b', 'a')
a.count('a')
a.index('b')
a
a[2] # The 3-rd item
a[2:1] # Extract the tuple from the 2-nd item to the 1-st one
a[2:2] # Extract from the 2-nd item to the 2-nd item
a[2:3] # Extract from the 2-nd item to the 3-rd one
a[2:4] # Extract one item more
a[1:] # Extract from the 1-st to the end
a[:] # Extract all items (a==a[:])
def return_tuple():
return (1, 'a', 2)
print(return_tuple())
a = 1; b = 2
print(a, b)
(a, b) = (b, a)
print(a, b)
a = (1, 'a')
print(id(a),a)
a += (2,) # This creates a new instance of 'a'
print(id(a),a)
a[1] = 2
a = 1; b = 2
print('"a" is in', id(a))
t = (a, b)
print('"t" is in', id(t), 'and contains', t)
a = 3
print('"a" is in', id(a))
print('"t" is in', id(t), 'and contains', t)
help([])
print(type([]));
!python -m timeit "x = [1, 'a', 'b', 'a']" # List creation is more expensive than tuple creation
a = [] # Empty list definition
a.append('Hello')
a.append('world!')
a
a.append(100)
a
a.insert(1,'wave!')
a
a.remove('Hello')
a
b = ['a', 'b', 'a']; b.remove('a'); print(b)
a.pop(0)
print(a)
a.pop(len(a)-1) # By index, equivalent to "del a[0]"
print(a)
a = []
a.append('c')
a.append('b')
a.append('a')
a
a.sort()
a
a.reverse()
a
# Indexing
print(a[1]) # Second element
print(a[-1]) # Last element
print(a[100]) # Error
a+a
a*3
a[::-1]
x = [1,2,3,2,1] # Palindrome?
x[::-1] == x
a.clear()
a
a.append('Hello')
print(a, a[0])
a.append(1)
a.append(('a',2))
a.append('world!')
a
print(a[1:1], a[1:2], a[1:3], a[1:], a[:], a[1:3:2])
[x**2 for x in range(10)]
# http://stackoverflow.com/questions/31045518/finding-prime-numbers-using-list-comprehention
[x for x in range(2, 2000) if all(x % y != 0 for y in range(2, int(x ** 0.5) + 1))]
l = [[x+y for x in range(10)] for y in range(10)]
l
l[1][2]
10 in l
10 in l[1]
# Lists of lists
x = [[1,2],[2,3,4],['a']]
x
l = [2,3]
id(l)
l[1] = 4
l
id(l)
a = {1, 2, 'a', (1, 2)}
a
print(type(a))
help(a)
a.add('a')
print(a)
a.add('a')
print(a)
a = set()
a.add([1,2]) # Sets can not contain lists
a = set() # Empty set
a.add({1,2,3}) # Sets can not contain sets
a = {1,2,3}
b = {2,3,4}
a.intersection(b)
a.union(b)
a = set(range(1000))
print(a)
%timeit '0' in a
a = list(range(1000))
print(a)
%timeit '0' in a
help({})
a = {'Macinstosh':'OSX', 'PC':'Windows', 'Macintosh-Linux':'Linux', 'PC-Linux':'Linux'}
a
a['PC']
'PC-Linux' in a
a.keys()
a.values()
list(a.keys()).index("Macintosh-Linux")
for i in a.keys():
print(i)
a['Celullar'] = "Android"
a
del a['Celullar']
a
# Modifiying an entry
a.update({"PC": "Windows 10"})
a
id(a)
a['Macintosh-Linux'] = 'Linux for the Mac'
a
id(a)
for i in a:
print(i, a[i])
for i in a.values():
print(i)
for i in a.items():
print(i)
a = b'hello'
print(type(a))
print(a)
chr(a[1])
b = b'world!'
print('"b" is in', id(b))
c = a + b' ' + b
print(c)
a = b'abc'
print(id(a))
a += b'efg'
print(id(a))
%%timeit x = b''
x += b'x'
%%timeit x = bytearray()
x.extend(b'x')
# Array of bytes = 0
x = bytearray(10)
x
len(x)
for i in range(len(x)):
x[i] += 1
x
# A byte in Python is a 0 <= value <= 255.
x[1] = -1
# A bytearray can be created from a list
x = bytearray([1,2,3])
x
import sys
x = bytearray(sys.stdin.read(5).encode())
x
import array as arr
a = arr.array('d', [1.1, 3.5, 4.5])
print(a)
a[1]
a[1] = 4.0
a
a[1:2]
a.append(5.0)
a
a.extend([5.0, 6.0, 7.0])
a
a += arr.array('d', [1.1, 4.0, 4.5])
a
# By value
a.remove(4.0)
a
# By index
a.pop(3)
a
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Utility functions
Step2: Read images
Step3: Initial Alignment
Step4: Registration
Step5: Post registration analysis
Step6: Now visually inspect the results.
Step7: If we are satisfied with the results, save them to file.
|
<ASSISTANT_TASK:>
Python Code:
import SimpleITK as sitk
# Utility method that either downloads data from the Girder repository or
# if already downloaded returns the file name for reading from disk (cached data).
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
# Always write output to a separate directory, we don't want to pollute the source directory.
import os
OUTPUT_DIR = "Output"
%matplotlib inline
import matplotlib.pyplot as plt
from ipywidgets import interact, fixed
from IPython.display import clear_output
# Callback invoked by the interact IPython method for scrolling through the image stacks of
# the two images (moving and fixed).
def display_images(fixed_image_z, moving_image_z, fixed_npa, moving_npa):
# Create a figure with two subplots and the specified size.
plt.subplots(1, 2, figsize=(10, 8))
# Draw the fixed image in the first subplot.
plt.subplot(1, 2, 1)
plt.imshow(fixed_npa[fixed_image_z, :, :], cmap=plt.cm.Greys_r)
plt.title("fixed image")
plt.axis("off")
# Draw the moving image in the second subplot.
plt.subplot(1, 2, 2)
plt.imshow(moving_npa[moving_image_z, :, :], cmap=plt.cm.Greys_r)
plt.title("moving image")
plt.axis("off")
plt.show()
# Callback invoked by the IPython interact method for scrolling and modifying the alpha blending
# of an image stack of two images that occupy the same physical space.
def display_images_with_alpha(image_z, alpha, fixed, moving):
img = (1.0 - alpha) * fixed[:, :, image_z] + alpha * moving[:, :, image_z]
plt.imshow(sitk.GetArrayViewFromImage(img), cmap=plt.cm.Greys_r)
plt.axis("off")
plt.show()
# Callback invoked when the StartEvent happens, sets up our new data.
def start_plot():
global metric_values, multires_iterations
metric_values = []
multires_iterations = []
# Callback invoked when the EndEvent happens, do cleanup of data and figure.
def end_plot():
global metric_values, multires_iterations
del metric_values
del multires_iterations
# Close figure, we don't want to get a duplicate of the plot latter on.
plt.close()
# Callback invoked when the IterationEvent happens, update our data and display new figure.
def plot_values(registration_method):
global metric_values, multires_iterations
metric_values.append(registration_method.GetMetricValue())
# Clear the output area (wait=True, to reduce flickering), and plot current data
clear_output(wait=True)
# Plot the similarity metric values
plt.plot(metric_values, "r")
plt.plot(
multires_iterations,
[metric_values[index] for index in multires_iterations],
"b*",
)
plt.xlabel("Iteration Number", fontsize=12)
plt.ylabel("Metric Value", fontsize=12)
plt.show()
# Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the
# metric_values list.
def update_multires_iterations():
global metric_values, multires_iterations
multires_iterations.append(len(metric_values))
fixed_image = sitk.ReadImage(fdata("training_001_ct.mha"), sitk.sitkFloat32)
moving_image = sitk.ReadImage(fdata("training_001_mr_T1.mha"), sitk.sitkFloat32)
interact(
display_images,
fixed_image_z=(0, fixed_image.GetSize()[2] - 1),
moving_image_z=(0, moving_image.GetSize()[2] - 1),
fixed_npa=fixed(sitk.GetArrayViewFromImage(fixed_image)),
moving_npa=fixed(sitk.GetArrayViewFromImage(moving_image)),
);
initial_transform = sitk.CenteredTransformInitializer(
fixed_image,
moving_image,
sitk.Euler3DTransform(),
sitk.CenteredTransformInitializerFilter.GEOMETRY,
)
moving_resampled = sitk.Resample(
moving_image,
fixed_image,
initial_transform,
sitk.sitkLinear,
0.0,
moving_image.GetPixelID(),
)
interact(
display_images_with_alpha,
image_z=(0, fixed_image.GetSize()[2] - 1),
alpha=(0.0, 1.0, 0.05),
fixed=fixed(fixed_image),
moving=fixed(moving_resampled),
);
registration_method = sitk.ImageRegistrationMethod()
# Similarity metric settings.
registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)
registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)
registration_method.SetMetricSamplingPercentage(0.01)
registration_method.SetInterpolator(sitk.sitkLinear)
# Optimizer settings.
registration_method.SetOptimizerAsGradientDescent(
learningRate=1.0,
numberOfIterations=100,
convergenceMinimumValue=1e-6,
convergenceWindowSize=10,
)
registration_method.SetOptimizerScalesFromPhysicalShift()
# Setup for the multi-resolution framework.
registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])
registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])
registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()
# Don't optimize in-place, we would possibly like to run this cell multiple times.
registration_method.SetInitialTransform(initial_transform, inPlace=False)
# Connect all of the observers so that we can perform plotting during registration.
registration_method.AddCommand(sitk.sitkStartEvent, start_plot)
registration_method.AddCommand(sitk.sitkEndEvent, end_plot)
registration_method.AddCommand(
sitk.sitkMultiResolutionIterationEvent, update_multires_iterations
)
registration_method.AddCommand(
sitk.sitkIterationEvent, lambda: plot_values(registration_method)
)
final_transform = registration_method.Execute(
sitk.Cast(fixed_image, sitk.sitkFloat32), sitk.Cast(moving_image, sitk.sitkFloat32)
)
print(f"Final metric value: {registration_method.GetMetricValue()}")
print(
f"Optimizer's stopping condition, {registration_method.GetOptimizerStopConditionDescription()}"
)
moving_resampled = sitk.Resample(
moving_image,
fixed_image,
final_transform,
sitk.sitkLinear,
0.0,
moving_image.GetPixelID(),
)
interact(
display_images_with_alpha,
image_z=(0, fixed_image.GetSize()[2] - 1),
alpha=(0.0, 1.0, 0.05),
fixed=fixed(fixed_image),
moving=fixed(moving_resampled),
);
sitk.WriteImage(
moving_resampled, os.path.join(OUTPUT_DIR, "RIRE_training_001_mr_T1_resampled.mha")
)
sitk.WriteTransform(
final_transform, os.path.join(OUTPUT_DIR, "RIRE_training_001_CT_2_mr_T1.tfm")
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Chained Conditional
Step2: The condition need not directly involve a logical operator. For example
Step3: Iterations
Step4: enumerate
Step5: For Loop
Step6: Tuples
Step7: You cannot change any element of a tuple to a new value since tuples are immutable types. For example
Step8: Short in-class assignment
|
<ASSISTANT_TASK:>
Python Code:
# Greater than ( > )
if 1 > 0:
print("One is more than zero")
else:
print("BITS Pilani Goa Campus is better than IIT Kanpur")
# Less than ( < )
if 12 < 42:
print("Yes, 12 is less than 42")
else:
print("Everyone registered in CTE Python will pass with distinction (90%+ marks)")
# Equal to ( == )
if 2 + 2 == 4:
print("Two plus Two equals Four")
else:
print("Lite......")
# Not equal to ( != )
if 1 != 0:
print("Sachin")
else:
print("Kohli")
# Inversion operator
if not 2 + 2 == 4:
print("Lite....")
else:
print("CTE Python....")
if 1 < 0:
print(1, end='')
elif 2 == 3:
print(2, end='')
elif not 3 == 4:
print(3, end='')
print(".....Lite")
a = [1, 2, 3]
if isinstance(a, list):
print(a, "is a list !")
if 2 in a:
print("Yes, 2 is in ", a)
if 5 not in a:
print("No, 5 is not in ", a)
list(range(0, 10))
list(range(-4, 10))
list(range(0, 10, 3))
list(range(0, 5, -6))
list(range(-10, 10, -5))
list(range(10, 2, 3))
list(range(10, 1, -2))
list(enumerate(range(0,5)))
list(enumerate(range(0,5), 10))
a = [1, 2, 3, 4, 5]
for value in a:
print(value, end=' ')
b = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
for sublist in b:
for value in sublist:
print(value, end=' ')
a = (2, 3, 4)
print(a)
a[1] = 10
print(a)
a = ["One", "Two", "Three"]
for i, value in enumerate(a):
print("Value at index <", i, "> of list<a> is : ", value)
for i in range(0, 10):
for j in range(0, i):
print("*", end='')
print("")
for i in range(0, 10):
for j in range(10, i, -1):
print(" ", end='')
for k in range(0, i):
print("*", end='')
print("")
for i in range(0, 5):
for j in range(0, 5 - i):
print("*", end='')
for k in range(0, 2*i):
print("-", end='')
for j in range(0, 5 - i):
print("*", end='')
print("")
for i in range(3, -1, -1):
for j in range(0, 5 - i):
print("*", end='')
for k in range(0, 2*i):
print("-", end='')
for j in range(0, 5 - i):
print("*", end='')
print("")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task
|
<ASSISTANT_TASK:>
Python Code:
shps = !ls /home/user/data/north_carolina/shape/*shp
td = {}
for shp in shps:
with fiona.open( shp, 'r') as inp:
td[ inp.name ] = inp.bounds
## Fiona inp.bounds => ( lower_lng, lower_lat, upper_lng, upper_lat)
## Create shapely geometry from the coords
## shapely/geometry/geo.py
## box(minx, miny, maxx, maxy, ccw=True)
nboxes = []
for k,v in iter(td.iteritems()):
nboxes.append( box( v[0], v[1], v[2], v[3]) )
print 'Found BBOXs: ',len(nboxes)
## create a single cascaded UNION too
dst_poly = cascaded_union(nboxes)
dst_poly.bounds
## Draw every BBOX for all files, transparently
## use matplotlib.Polygon to draw; let autoscale calculate the area
fig, ax = plt.subplots(figsize=(12,12))
for polygon in nboxes:
mpl_poly = Polygon(np.array(polygon.exterior), facecolor="y", alpha=0.02)
ax.add_patch(mpl_poly)
## Indicate the exterior of the study area with a heavy line
ax.add_patch( Polygon(np.array(dst_poly.exterior), fill=False, lw=4, ec="b", alpha=0.9) )
ax.relim()
ax.autoscale()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Under stand how path is joined
Step2: numpy.c_ function
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
np.random.seed(20)
runningtimes=0
while (runningtimes <=10):
x = np.random.randn(5)
print(x)
print('**')
runningtimes=runningtimes+1
import numpy as np
runningtimes=0
while (runningtimes <=10):
np.random.seed(5)
x = np.random.randn(5)
print(x)
print('**')
runningtimes=runningtimes+1
import os
PROJECT_ROOT_DIR = '.'
CHAPTER_ID = '001'
fig_id = '300'
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print(path)
import numpy as np
arr1 = np.c_[np.array([1,2,3]), np.array([4,5,6])]
print(arr1)
print('*******')
arr2 = np.c_[np.array([[1,2,3]]), np.array([[4,5,6]])]
print(arr2)
print('*******')
arr3 = np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
print(arr3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First we generate the image
Step2: 2. Next re-partition image into a polar grid
Step3: Now regenerate the image to test construct_rphi_image
Step4: But we know we have 10 fold symmetry, so let's do something more interesting
Step5: Plot the results
|
<ASSISTANT_TASK:>
Python Code:
# imports
import matplotlib.pyplot as plt
import numpy as np
from skbeam.core.image import construct_rphi_avg_image
%matplotlib inline
# first generate some random scattering pattern
# There are missing regions
from skbeam.core.utils import angle_grid, radial_grid
shape = 800,800
x0,y0 = 401, 401
ANGLES = angle_grid((y0, x0), shape)
RADII = radial_grid((y0, x0), shape)
img = np.cos(ANGLES*5)**2*RADII**2
mask = np.ones_like((ANGLES))
mask[100:200] = 0
mask[:,100:200] = 0
mask[:,500:643] = 0
mask[70:130] = 0
img*=mask
plt.figure(0);plt.clf();
plt.imshow(mask*img)
# reconstruct this into a polar grid
from skbeam.core.accumulators.binned_statistic import RPhiBinnedStatistic
rphibinstat = RPhiBinnedStatistic(shape, bins=(400,360), mask=mask, origin=(y0,x0))
rphi_img = rphibinstat(img)
# create mask from np.nans since RPhiBinnedStatistic fills masked regions with np.nans
rphimask = np.ones_like(rphi_img)
rphimask[np.where(np.isnan(rphi_img))] = 0
#plotting partition and mask
plt.figure(2);plt.clf();
plt.subplot(1,2,1)
plt.imshow(rphi_img)
plt.subplot(1,2,2)
plt.imshow(rphimask)
# get angles and radii from (q, phi) polar coordinate system
angles = rphibinstat.bin_centers[1]
radii = rphibinstat.bin_centers[0]
# reproject
Zproj = construct_rphi_avg_image(radii, angles, rphi_img, shape=(800,800))
sym = int(10)
polar_shape = 500, 360
origin = x0, y0
def reconstruct_nfold(img, sym, polar_shape, mask=None, origin=None):
''' Reconstruct an image assuming a certain symmetry.
Parameters
----------
img : the image
sym : the symmetry of the sample
polar_shape : the shape of the new polar coordinate image
Returns
-------
reconstructed_image : the reconstructed image
'''
shape = img.shape
rphibinstat = RPhiBinnedStatistic(shape, bins=polar_shape, mask=mask, origin=origin)
angles = rphibinstat.bin_centers[1]
radii = rphibinstat.bin_centers[0]
rphi_img = rphibinstat(img)
# create mask from np.nans since RPhiBinnedStatistic fills masked regions with np.nans
rphimask = np.ones_like(rphi_img)
rphimask[np.where(np.isnan(rphi_img))] = 0
reconstructed_image = np.zeros_like(img)
reconstructed_image_mask = np.zeros_like(img,dtype=int)
# symmetry
dphi = 2*np.pi/float(sym)
for i in range(sym):
anglesp = angles + dphi*i
imgtmp = construct_rphi_avg_image(radii, anglesp, rphi_img,
shape=shape, center=origin, mask=rphimask)
w = np.where(~np.isnan(imgtmp))
reconstructed_image[w] += imgtmp[w]
reconstructed_image_mask += (~np.isnan(imgtmp)).astype(int)
# the mask keeps count of included pixels. Average by this amount
reconstructed_image /= reconstructed_image_mask
return reconstructed_image
reconstructed_image = reconstruct_nfold(img, sym, polar_shape, mask=mask, origin=origin)
# And finally we have the reconstructed image assuming 5 fold symmetry
plt.figure(4);plt.clf()
plt.subplot(2,2,1).set_aspect("auto", adjustable="box");
plt.title("Image")
plt.imshow(img)
plt.subplot(2,2,2).set_aspect("equal")
plt.title("Reprojection no symmetry")
plt.imshow(Zproj)
plt.subplot(2,2,3).set_aspect("auto", adjustable="box")
plt.title("Reprojection assuming 10 fold symmetry")
plt.imshow(reconstructed_image)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tutorial - How to work with the OpenEnergy Platform (OEP)
Step2: 0.1 About the Database and used Packages
Step3: 1. Create a table / Table Architecure
Step4: <div class="alert alert-block alert-info">
Step5: <div class="alert alert-block alert-info">
Step6: <div class="alert alert-block alert-info">
Step7: <div class="alert alert-block alert-info">
Step8: 4. Insert Data into a table
Step9: <div class="alert alert-block alert-info">
Step10: <div class="alert alert-block alert-info">
Step11: If you want to insert multiple lines of data like two power plants in this case, you can insert a list of dictionaries with as many entries as the table has columns.
Step12: <div class="alert alert-block alert-info">
Step13: <div class="alert alert-block alert-info">
Step14: 6. Alter data in a table
Step15: <div class="alert alert-block alert-info">
|
<ASSISTANT_TASK:>
Python Code:
__copyright__ = "Reiner Lemoine Institut, Zentrum für nachhaltige Energiesysteme Flensburg"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "wolfbunke, Ludee"
import requests
import pandas as pd
from IPython.core.display import HTML
#
#
# oedb
oep_url= 'http://oep.iks.cs.ovgu.de/'
# token
your_token = ''
data = {"query":
{"name": "Windpark, Am Speller Sand",
"type": "wind_onshore",
"capacity": 20000,
"lat": 52.40731,
"lon": 7.46118
}}
# create table
schema = 'model_draft'
table = 'example_api_table_test'
data = { "query":
{ "columns": [{ "name":"id", "data_type": "bigserial", "is_nullable": "NO" },
{ "name":"name", "data_type": "varchar", "character_maximum_length": "50" },
{ "name":"type", "data_type": "varchar", "character_maximum_length": "20" },
{ "name":"capacity", "data_type": "decimal" },
{ "name":"lat", "data_type": "numeric" },
{ "name":"lon", "data_type": "numeric" } ],
"constraints": [ { "constraint_type": "PRIMARY KEY", "constraint_parameter": "id" } ]
} }
requests.put(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/',
json=data, headers={'Authorization': 'Token %s'%your_token} )
# check if exists
schema = 'model_draft'
table = 'example_api_table_test'
requests.get(oep_url+'/api/v0/schema/'+schema+'/tables/'+table)
# Delete your table
schema = 'model_draft'
table = 'example_api_table_test'
requests.delete(oep_url+'/api/v0/schema/'+schema+'/tables/'+table, headers={'Authorization': 'Token %s'%your_token} )
# check if exists
schema = 'model_draft'
table = 'example_api_table_test'
requests.get(oep_url+'/api/v0/schema/'+schema+'/tables/'+table)
# table columns
schema = 'model_draft'
table = 'example_api_table_test'
result = requests.get(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/columns')
json_result = result.json()
json_result['capacity']
# all:
#json_result
# insert data
schema = 'model_draft'
table = 'example_api_table_test'
data = {"query":
{"name": "Windpark, Am Speller Sand",
"type": "wind_onshore",
"capacity": 20000,
"lat": 52.40731,
"lon": 7.46118
}}
result = requests.post(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/new',
json=data, headers={'Authorization': 'Token %s'%your_token} )
result.status_code
# show the id of the new row
json_result = result.json()
json_result['data']
# insert data
schema = 'model_draft'
table = 'example_api_table_test'
data = {"query": [{"name": "Anlage 2",
"type": "photovoltaics",
"capacity": 10,
"lat": 51.804783,
"lon": 11.686346},
{"name": "Anlage 3",
"type": "photovoltaic",
"capacity": 5.5,
"lat": 51.804783,
"lon": 11.6341573
}]}
result = requests.post(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/new',
json=data, headers={'Authorization': 'Token %s'%your_token} )
result.status_code
# show the id of the new rows
json_result = result.json()
json_result['data']
# insert data to row 11
schema = 'model_draft'
table = 'example_api_table_test'
rownumber = '11'
data = {"query":
{"name": "Anlage 11",
"type": "photovoltaic",
"capacity": 5.5,
"lat": 51.804783,
"lon": 10.41573
}}
result = requests.put(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/'+rownumber,
json=data, headers={'Authorization': 'Token %s'%your_token} )
result.status_code
# insert some data
# alter data
schema = 'model_draft'
table = 'example_api_table_test'
result = requests.get(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/') # Load the names via GET
result.status_code
row = "1"
data = {"query": {"capacity": "2", "type": "pv"}}
result = requests.post(oep_url+'/api/v0/schema/'+schema+'/tables/'+table+'/rows/{id}'.format(id=row), json=data, headers={'Authorization': 'Token %s'%your_token})
result.status_code
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The length of a list is acquired by the len functino
Step2: Lists can be initialised if its values are known at run time
Step3: Appending and extending lists
Step4: Note that the + operator does not modify the lists inplace, rather it
Step5: Items can be removed from the lists too, with the pop function.
Step6: List comprehension
Step7: Generaters
Step8: Anonymous inline functions (lambdas) can be used if the is_positive function isn't available
Step9: Mapping functions to lists
Step10: map and filter commands may be nested
Step11: The map function is very powerful, and makes type conversion very easy
Step12: Iterating through multiple lists
Step13: Note, if more than one iterable is passed into map.
Step14: Retreiving the position in the list
Step15: This can also be used in conjunction with the zip function
Step16: In many applications, it is helpful for the index variable to be offset
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
l1 = list()
l2 = []
print(l1)
print(l2)
print(len(l1))
print(len(l2))
l3 = [1, 2, 3]
print(l3)
print(len(l3))
l1.append(1)
print(l1)
l1.append(10)
print(l1)
l2.append(100)
print(l2)
print(l1)
print(l2)
print(l1 + l2)
l1.extend(l2)
print(l1)
print(l2)
l2 = [0, 1, 2, 3, 4, 5, 6]
l2.pop(1)
print(l2)
l2.pop()
print(l2)
print(l1)
l3 = [-2, -1, 0, 1, 2]
print(l3)
print(len(l3))
print([el for el in l3])
# Return the positive elements
print([el for el in l3 if el > 0])
# Return the negative elements
print([el for el in l3 if el < 0])
# Multiply the elements by two
print([el * 2 for el in l3])
# Multiply filtered elements by two
print([el * 2 for el in l3 if el <= 1])
def is_positive(el):
return el > 0
print(l3)
print(filter(is_positive, l3))
print(list(filter(is_positive, l3))) # python 3
# Return the positive elements
print(list(filter(lambda el: el > 0, l3)))
# Return the non-positive elements
print(list(filter(lambda el: el <= 0, l3)))
# Return elements outside of a range
print(list(filter(lambda el: el < -1 or el > 1, l3)))
# Return the elements found within a range (note the mathematical notation)
print(list(filter(lambda el: -1 <= el <= 1, l3)))
print([abs(el) for el in l3])
print(list(map(abs, l3)))
def add_one(item):
return item + 1
print(list(map(add_one, l3)))
print(list(map(lambda el: el * 2, filter(lambda el: el <= 1, l3))))
print('Integer array:', list(map(int, l3)))
print(' Float array:', list(map(float, l3)))
print('Boolean array:', list(map(bool, l3)))
l4 = [1, 2, 3]
print('l3:', l3)
print('l4:', l4)
for el3, el4 in zip(l3, l4):
print(el3, el4)
l5 = l3 + l4
print(l5)
for el3, el4, el5 in zip(l3, l4, l5):
print(el3, el4, el5)
def add(l, r):
try:
return l * r
except TypeError:
# Addition of `None` type is not defined
return None
def is_None(l, r):
return l is None or r is None
l5 = [5, 4, 3, 2, 1]
print(list(map(add, l4, l5)))
print(list(map(is_None, l4, l5)))
for index, value in enumerate(l1):
print(index, value)
for index, (el3, el4, el5) in enumerate(zip(l3, l4, l5)):
print(index, (el3, el4, el5))
for index, (el3, el4, el5) in enumerate(zip(l3, l4, l5), start=100):
print(index, (el3, el4, el5))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Aod Plus Ccn
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 13.3. External Mixture
Step59: 14. Optical Radiative Properties --> Radiative Scheme
Step60: 14.2. Shortwave Bands
Step61: 14.3. Longwave Bands
Step62: 15. Optical Radiative Properties --> Cloud Interactions
Step63: 15.2. Twomey
Step64: 15.3. Twomey Minimum Ccn
Step65: 15.4. Drizzle
Step66: 15.5. Cloud Lifetime
Step67: 15.6. Longwave Bands
Step68: 16. Model
Step69: 16.2. Processes
Step70: 16.3. Coupling
Step71: 16.4. Gas Phase Precursors
Step72: 16.5. Scheme Type
Step73: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ipsl', 'sandbox-1', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_aod_plus_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.external_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the data
Step2: Counting word frequency
Step3: Let's keep the first 10000 most frequent words. As Andrew noted, most of the words in the vocabulary are rarely used so they will have little effect on our predictions. Below, we'll sort vocab by the count value and keep the 10000 most frequent words.
Step4: What's the last word in our vocabulary? We can use this to judge if 10000 is too few. If the last word is pretty common, we probably need to keep more words.
Step5: The last word in our vocabulary shows up in 30 reviews out of 25000. I think it's fair to say this is a tiny proportion of reviews. We are probably fine with this number of words.
Step6: Text to vector function
Step7: If you do this right, the following code should return
Step8: Now, run through our entire review data set and convert each review to a word vector.
Step9: Train, Validation, Test sets
Step10: Building the network
Step11: Intializing the model
Step12: Training the network
Step13: Testing
Step14: Try out your own text!
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
reviews = pd.read_csv('reviews.txt', header=None)
labels = pd.read_csv('labels.txt', header=None)
reviews.head()
from collections import Counter
#Create the counter
total_counts = Counter()
#iter every row
for idx, row in reviews.iterrows():
#Review is contained in 0 position of the row (first column)
for word in row[0].split(' '):
total_counts[word] += 1
print("Total words in data set: ", len(total_counts))
total_counts.most_common()
vocab = sorted(total_counts, key=total_counts.get, reverse=True)[:10000]
print(vocab[:60])
print(vocab[-1], ': ', total_counts[vocab[-1]])
word2idx = {} ## create the word-to-index dictionary here
i = 0
for w in vocab:
word2idx[w] = i
i += 1
word2idx['the']
def text_to_vector(text):
word_vector = np.zeros(len(word2idx))
for word in text.split(' '):
index = word2idx.get(word, None)
if index != None:
word_vector[index] = 1
return word_vector
text_to_vector('The tea is for a party to celebrate '
'the movie so she has no time for a cake')[:65]
word_vectors = np.zeros((len(reviews), len(vocab)), dtype=np.int_)
for ii, (_, text) in enumerate(reviews.iterrows()):
word_vectors[ii] = text_to_vector(text[0])
# Printing out the first 5 word vectors
word_vectors[:5, :23]
Y = (labels=='positive').astype(np.int_)
records = len(labels)
shuffle = np.arange(records)
np.random.shuffle(shuffle)
test_fraction = 0.9
train_split, test_split = shuffle[:int(records*test_fraction)], shuffle[int(records*test_fraction):]
trainX, trainY = word_vectors[train_split,:], to_categorical(Y.values[train_split].reshape((len(Y.values[train_split]),)), 2)
testX, testY = word_vectors[test_split,:], to_categorical(Y.values[test_split].reshape((len(Y.values[test_split]),)), 2)
trainY
# Network building
def build_model():
# This resets all parameters and variables, leave this here
tf.reset_default_graph()
#### Your code ####
#input layer
net = tflearn.input_data([None, len(word2idx)])
#hidden layer
net = tflearn.fully_connected(net, n_units=200, activation='ReLU')
net = tflearn.fully_connected(net, n_units=25, activation='ReLU')
#output layer
net = tflearn.fully_connected(net, n_units=2, activation='softmax')
#training
net = tflearn.regression(net,
optimizer='sgd',
learning_rate=0.1,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=3, tensorboard_dir='model_dir')
return model
model = build_model()
# Training
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=128, n_epoch=20)
predictions = (np.array(model.predict(testX))[:,0] >= 0.5).astype(np.int_)
test_accuracy = np.mean(predictions == testY[:,0], axis=0)
print("Test accuracy: ", test_accuracy)
# Helper function that uses your model to predict sentiment
def test_sentence(sentence):
positive_prob = model.predict([text_to_vector(sentence.lower())])[0][1]
print('Sentence: {}'.format(sentence))
print('P(positive) = {:.3f} :'.format(positive_prob),
'Positive' if positive_prob > 0.5 else 'Negative')
sentence = "Moonlight is by far the best movie of 2016."
test_sentence(sentence)
sentence = "It's amazing anyone could be talented enough to make something this spectacularly awful"
test_sentence(sentence)
sentence = "Ace Ventura is the best movie ever! I wonder why Jim Carrey didn't won the Oscar"
test_sentence(sentence=sentence)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And here we have the standard deviations
Step2: If we maintain our key assumption from the Quantifying Scaling Accuracy notebook, that the single log-likelihood integrals are independent and identically distributed gaussians. Then with 100 samples, the mean and variance are -8.46 and 0.06 respectively. Importance sampling is precise.
Step3: Single Likelihood Weights
Step4: These look very similar. A natural next thing to check is the correlation between the weight sequences. We see extremely high correlation.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib import rc
rc('text', usetex=True)
!head -n 5 likelihoodvariancetest.txt
multi = np.loadtxt('likelihoodvariancetest.txt')
multi1000 = np.loadtxt('likelihoodvariancetest1000samples.txt')
multi10000 = np.loadtxt('likelihoodvariancetest10000samples.txt')
plt.title('Log-Likelihood Distribution For Fixed Hypersample')
plt.ylabel('Density')
plt.xlabel('Log-Likelihood')
plt.hist(multi[:,4], bins=20, normed=True, alpha=0.5, label='100 samples')
plt.hist(multi1000[:,4], bins=20, normed=True, alpha=0.5, label='1000 samples')
plt.hist(multi10000[:,4], bins=20, normed=True, alpha=0.5, label='10000 samples')
plt.legend(loc=2);
print np.std(multi[:,4])
print np.std(multi1000[:,4])
print np.std(multi10000[:,4])
print 51.0639314938 / len(multi)
print np.mean(multi[:,4]) / 115919
single = np.zeros((115919,4))
i = j = 0
with open('singleintegralweightvariancetest.txt', 'r') as f:
for line in f:
if 'likelihood' in line:
j += 1
i = 0
else:
single[i,j] = float(line)
i += 1
f.close()
plt.hist(single[:,0], bins=50, alpha=0.4, label='l1')
plt.hist(single[:,1], bins=50, alpha=0.4, label='l2')
plt.hist(single[:,2], bins=50, alpha=0.4, label='l3')
plt.hist(single[:,3], bins=50, alpha=0.4, label='l4');
plt.title('Single Log-Likelihood Weight Histograms')
plt.ylabel('Count')
plt.xlabel('Log-Likelihood');
np.corrcoef(single.transpose())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Change data type
Step2: Rename the categories
Step3: Values in data frame have not changed
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
file_name_string = 'C:/Users/Charles Kelly/Desktop/Exercise Files/02_07/Begin/EmployeesWithGrades.xlsx'
employees_df = pd.read_excel(file_name_string, 'Sheet1', index_col=None, na_values=['NA'])
employees_df["Grade"] = employees_df["Grade"].astype("category")
employees_df["Grade"].cat.categories = ["excellent", "good", "acceptable", "poor", "unacceptable"]
employees_df.groupby('Grade').count()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a point source theory RVT motion
Step2: Create site profile
Step3: Create the site response calculator
Step4: Initialize the variations
Step5: Specify the output
Step6: Perform the calculation
Step7: Plot the outputs
Step8: The statistics of the output can be also retrieved and returned as either a dict or pandas.DataFrame.
Step9: Repeat using a Generic Depth-Dependent Model
Step10: Repeat using a Specific Depth-Dependent Model
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
import pysra
%matplotlib inline
# Increased figure sizes
plt.rcParams["figure.dpi"] = 120
m = pysra.motion.SourceTheoryRvtMotion(6.0, 30, "wna")
m.calc_fourier_amps()
profile = pysra.site.Profile(
[
pysra.site.Layer(
pysra.site.DarendeliSoilType(18.0, plas_index=0, ocr=1, stress_mean=100),
10,
400,
),
pysra.site.Layer(
pysra.site.DarendeliSoilType(18.0, plas_index=0, ocr=1, stress_mean=200),
10,
450,
),
pysra.site.Layer(
pysra.site.DarendeliSoilType(18.0, plas_index=0, ocr=1, stress_mean=400),
30,
600,
),
pysra.site.Layer(pysra.site.SoilType("Rock", 24.0, None, 0.01), 0, 1200),
]
)
calc = pysra.propagation.EquivalentLinearCalculator()
var_thickness = pysra.variation.ToroThicknessVariation()
var_velocity = pysra.variation.ToroVelocityVariation.generic_model("USGS C")
var_soiltypes = pysra.variation.SpidVariation(
-0.5, std_mod_reduc=0.15, std_damping=0.30
)
freqs = np.logspace(-1, 2, num=500)
outputs = pysra.output.OutputCollection(
[
pysra.output.ResponseSpectrumOutput(
# Frequency
freqs,
# Location of the output
pysra.output.OutputLocation("outcrop", index=0),
# Damping
0.05,
),
pysra.output.ResponseSpectrumRatioOutput(
# Frequency
freqs,
# Location in (denominator),
pysra.output.OutputLocation("outcrop", index=-1),
# Location out (numerator)
pysra.output.OutputLocation("outcrop", index=0),
# Damping
0.05,
),
pysra.output.InitialVelProfile(),
]
)
count = 20
for p in pysra.variation.iter_varied_profiles(
profile,
count,
var_thickness=var_thickness,
var_velocity=var_velocity,
var_soiltypes=var_soiltypes,
):
# Here we auto-descretize the profile for wave propagation purposes
calc(m, p.auto_discretize(), p.location("outcrop", index=-1))
outputs(calc)
for o in outputs[:-1]:
ax = o.plot(style="stats")
ax = outputs[-1].plot(style="stats")
outputs[-1].calc_stats()
outputs[-1].calc_stats(as_dataframe=True)
var_velocity_dd = pysra.variation.DepthDependToroVelVariation.generic_model("USGS C")
outputs.reset()
count = 20
for p in pysra.variation.iter_varied_profiles(
profile,
count,
var_thickness=var_thickness,
var_velocity=var_velocity_dd,
var_soiltypes=var_soiltypes,
):
calc(m, p.auto_discretize(), p.location("outcrop", index=-1))
outputs(calc)
for o in outputs[:-1]:
ax = o.plot(style="stats")
ax = outputs[-1].plot(style="stats")
outputs[-1].calc_stats(as_dataframe=True)
var_velocity_dd = pysra.variation.DepthDependToroVelVariation.generic_model(
"USGS C", depth=[0, 10, 20], ln_std=[0.25, 0.15, 0.10]
)
outputs.reset()
count = 20
for p in pysra.variation.iter_varied_profiles(
profile,
count,
var_thickness=var_thickness,
var_velocity=var_velocity_dd,
var_soiltypes=var_soiltypes,
):
calc(m, p.auto_discretize(), p.location("outcrop", index=-1))
outputs(calc)
for o in outputs[:-1]:
ax = o.plot(style="stats")
ax = outputs[-1].plot(style="stats")
outputs[-1].calc_stats(as_dataframe=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For those of you unfamiliar with the MNIST dataset, it is a set of 70000, 28x28 pixel images depicting handwritten digits from 0-9. It is a commonly used dataset within image classification tutorials due to to its clean nature and uniform structure meaning that very little time must be spent wrangling the data.
Step2: This may actually be a harder observation for our network to classify as it is likely a 7, however, it could in fact be a 2 or 3 with the lower segment cropped. In any case, this gives us a feel for the type of images that we are modeling.
Step3: With the data reshaped, the only remaining steps are to standardise the data with mean 0 and 1 standard deviation and encode the output variable. Whilst not entirely necessary as each columns holds values within the same range (0 and 255), it will nonetheless make backpropogation more efficient and avoid saturation. If you're interested in reading more on this then I recommend these two articles here and here. Whilst there is a preprocessing() function available in sklearn, we'll implement our own here just demonstrate the point ore verbosely.
Step4: The argument for a multiplier has been included here as you may wish to divide through by twice the standard deviation, as is the recommendation here. As for encoding the labels, we're going to apply one-hot encoding to them. What this means is the a k-leveled variable is transformed into k individual boolean columns, in our case k=10, so 10 new columns created, the first indicating if the image is a zero or not, the second if it is a one and so forth. You may be wondering why we must do this additional step and the reason is simply that if we do not, the model will assume the outputs are ordered, so 1 is greater than 0 and so forth, often leading to poor predictions from the model. This one-hot encoding can be achieved through the Keras function to_categorical().
Step5: Building The Model
Step6: The kernel_initializer argument initialises the sets of weights between each layer to be random draws from the normal distribution. When you build your own model you should not be too prescriptive in your choices of units and activation functions in your first two layers, instead trying out different sizes and function. The input_dimension and units arguments in the firs and final layer respectively should be kept constant however as these are dictated by the shape of the data.
Step7: You'll notice in the above code snippet that verbose=0, I have done this to keep the tutorial clean, however you may want to set verbose=1 when running yours as you'll get useful output regarding the model's accuracy. Should you re-run this exact guide, you'll notice that the model's accuracy on the training data begins to plateau around 40 epochs.
Step8: An accuracy as high as this is by all standards very good, however some tuning of our networks can increase this further, although that is beyond the scope of this guide.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
# Load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(X_train.shape)
print(y_train.shape)
plt.imshow(X_train[123], cmap=plt.get_cmap('gray'))
def array_reshape(X, output_size):
X = X.reshape(output_size[0],output_size[1])
return X
X_train = array_reshape(X_train, [60000, 28**2])
X_test = array_reshape(X_test, [10000, 28**2])
def standardise_array(X, multiplier=1):
X = (X-np.mean(X))/(multiplier*np.std(X))
return X
X_train = standardise_array(X_train)
X_test = standardise_array(X_test)
y_train_enc = to_categorical(y_train)
y_test_enc = to_categorical(y_test)
model = Sequential()
model.add(Dense(units = 32, activation = "relu", input_dim = 28**2, kernel_initializer = "normal"))
model.add(Dense(units = 12, activation = "relu", kernel_initializer='normal'))
model.add(Dense(units = 10, activation = "softmax", kernel_initializer='normal'))
model.compile(loss = "categorical_crossentropy",
optimizer = "adam",
metrics = ["accuracy"])
np.random.seed(123)
model.fit(X_train, y_train_enc, epochs=50, batch_size=32, verbose=0)
accuracy = model.evaluate(X_test, y_test_enc, verbose=0)
print("The model has {}% accuracy on unseen testing data".format(np.round(accuracy[1]*100, 1)))
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.wrappers.scikit_learn import KerasClassifier
# Set the seed again for reproducabilty
np.random.seed(123)
# Create our entire arrays
X = np.vstack((X_train, X_test))
y = np.vstack((y_train_enc, y_test_enc))
# Wrap our model inside a function
def mnist_nn():
model = Sequential()
model.add(Dense(units = 32, activation = "relu", input_dim = 28**2, kernel_initializer = "normal"))
model.add(Dense(units = 12, activation = "relu", kernel_initializer='normal'))
model.add(Dense(units = 10, activation = "softmax", kernel_initializer='normal'))
model.compile(loss = "categorical_crossentropy",
optimizer = "adam",
metrics = ["accuracy"])
return model
# Place an Sklearn wrapper around our Keras Network
clf = KerasClassifier(build_fn=mnist_nn, epochs = 50, batch_size=32, verbose=0)
# Define the number of folds to be made
folds = KFold(n_splits=10, random_state=123)
# Run Cross-validation
accuracies = cross_val_score(clf, X, y, cv=folds)
# Obtain our final model's metrics
final_accuracy = np.mean(accuracies)
s_error = np.std(accuracies*100)/np.sqrt(10)
print("Final Accuracy: {}, with standard error: {}".format(np.round(final_accuracy*100,1), np.round(s_error, 1)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Unit Test
|
<ASSISTANT_TASK:>
Python Code:
def list_of_chars(list_chars):
# TODO: Implement me
if li
return list_chars[::-1]
# %load test_reverse_string.py
from nose.tools import assert_equal
class TestReverse(object):
def test_reverse(self):
assert_equal(list_of_chars(None), None)
assert_equal(list_of_chars(['']), [''])
assert_equal(list_of_chars(
['f', 'o', 'o', ' ', 'b', 'a', 'r']),
['r', 'a', 'b', ' ', 'o', 'o', 'f'])
print('Success: test_reverse')
def main():
test = TestReverse()
test.test_reverse()
if __name__ == '__main__':
main()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotting
Step2: Spectrogram
Step3: Symbolic Math
|
<ASSISTANT_TASK:>
Python Code:
import soundfile as sf
sig, fs = sf.read('data/singing.wav')
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(len(sig)) / fs
plt.plot(t, sig)
plt.xlabel('time / seconds')
plt.grid()
plt.specgram(sig, Fs=fs)
plt.ylabel('frequency / Hz')
plt.xlabel('time / seconds')
plt.ylim(0, 10000);
%matplotlib inline
import sympy as sp
sp.init_printing()
t, sigma, omega = sp.symbols(('t', 'sigma', 'omega'))
sigma = -2
omega = 10
s = sigma + sp.I * omega
x = sp.exp(s * t)
x
sp.plotting.plot(sp.re(x),(t, 0, 2 * sp.pi), ylim=[-2, 2], ylabel='Re{$e^{st}$}')
sp.plotting.plot(sp.im(x),(t, 0, 2 * sp.pi), ylim=[-2, 2], ylabel='Im{$e^{st}$}');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: torchvision for loading input images
Step2: Visualize the transforms done before model prediction
Step3: plot_waveform function from torchaudio
Step4: Using torchaudio to load audio and display waveform using plot_waveform
|
<ASSISTANT_TASK:>
Python Code:
import torchaudio
import torchvision
import torch
import matplotlib.pyplot as plt
from IPython.display import Audio
img = torchvision.io.read_image("data/birdie2.jpg")
img = torchvision.transforms.ToPILImage()(img)
display(img)
img = torchvision.io.read_image("data/birdie2.jpg")
normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img = torchvision.transforms.Resize(256)(img)
img = torchvision.transforms.CenterCrop(224)(img).float()/255.
img = normalize(img)
img = (img * 255).to(torch.uint8)
img = torchvision.transforms.ToPILImage()(img)
display(img)
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f'Channel {c+1}')
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
wav_path = "data/ljspeech.wav"
metadata = torchaudio.info(wav_path)
print(metadata)
wav, sample_rate = torchaudio.load(wav_path)
display(Audio(wav, rate=sample_rate))
plot_waveform(wav, sample_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load software and filenames definitions
Step2: Data folder
Step3: List of data files
Step4: Data load
Step5: Load the leakage coefficient from disk
Step6: Load the direct excitation coefficient ($d_{exAA}$) from disk
Step7: Load the gamma-factor ($\gamma$) from disk
Step8: Update d with the correction coefficients
Step9: Laser alternation selection
Step10: We need to define some parameters
Step11: We should check if everithing is OK with an alternation histogram
Step12: If the plot looks good we can apply the parameters with
Step13: Measurements infos
Step14: Or check the measurements duration
Step15: Compute background
Step16: Burst search and selection
Step17: Donor Leakage fit
Step18: Burst sizes
Step19: Fret fit
Step20: Weighted mean of $E$ of each burst
Step21: Gaussian fit (no weights)
Step22: Gaussian fit (using burst size as weights)
Step23: Stoichiometry fit
Step24: The Maximum likelihood fit for a Gaussian population is the mean
Step25: Computing the weighted mean and weighted standard deviation we get
Step26: Save data to file
Step27: The following string contains the list of variables to be saved. When saving, the order of the variables is preserved.
Step28: This is just a trick to format the different variables
|
<ASSISTANT_TASK:>
Python Code:
# data_id = "7d"
from fretbursts import *
init_notebook()
from IPython.display import display
data_dir = './data/singlespot/'
import os
data_dir = os.path.abspath(data_dir) + '/'
assert os.path.exists(data_dir), "Path '%s' does not exist." % data_dir
from glob import glob
file_list = sorted(f for f in glob(data_dir + '*.hdf5') if '_BKG' not in f)
## Selection for POLIMI 2012-11-26 datatset
labels = ['17d', '27d', '7d', '12d', '22d']
files_dict = {lab: fname for lab, fname in zip(labels, file_list)}
files_dict
data_id
d = loader.photon_hdf5(filename=files_dict[data_id])
leakage_coeff_fname = 'results/usALEX - leakage coefficient DexDem.csv'
leakage = np.loadtxt(leakage_coeff_fname)
print('Leakage coefficient:', leakage)
dir_ex_coeff_fname = 'results/usALEX - direct excitation coefficient dir_ex_aa.csv'
dir_ex_aa = np.loadtxt(dir_ex_coeff_fname)
print('Direct excitation coefficient (dir_ex_aa):', dir_ex_aa)
gamma_fname = 'results/usALEX - gamma factor - all-ph.csv'
gamma = np.loadtxt(gamma_fname)
print('Gamma-factor:', gamma)
d.leakage = leakage
d.dir_ex = dir_ex_aa
d.gamma = gamma
d.ph_times_t[0][:3], d.ph_times_t[0][-3:]#, d.det_t
print('First and last timestamps: {:10,} {:10,}'.format(d.ph_times_t[0][0], d.ph_times_t[0][-1]))
print('Total number of timestamps: {:10,}'.format(d.ph_times_t[0].size))
d.add(det_donor_accept=(0, 1), alex_period=4000, D_ON=(2850, 580), A_ON=(900, 2580), offset=0)
plot_alternation_hist(d)
loader.alex_apply_period(d)
print('D+A photons in D-excitation period: {:10,}'.format(d.D_ex[0].sum()))
print('D+A photons in A-excitation period: {:10,}'.format(d.A_ex[0].sum()))
d
d.time_max
d.calc_bg(bg.exp_fit, time_s=60, tail_min_us='auto', F_bg=1.7)
dplot(d, timetrace_bg)
d.rate_m, d.rate_dd, d.rate_ad, d.rate_aa
d.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel('all'))
print(d.ph_sel)
dplot(d, hist_fret);
# if data_id in ['7d', '27d']:
# ds = d.select_bursts(select_bursts.size, th1=20)
# else:
# ds = d.select_bursts(select_bursts.size, th1=30)
ds = d.select_bursts(select_bursts.size, add_naa=False, th1=30)
n_bursts_all = ds.num_bursts[0]
def select_and_plot_ES(fret_sel, do_sel):
ds_fret= ds.select_bursts(select_bursts.ES, **fret_sel)
ds_do = ds.select_bursts(select_bursts.ES, **do_sel)
bpl.plot_ES_selection(ax, **fret_sel)
bpl.plot_ES_selection(ax, **do_sel)
return ds_fret, ds_do
ax = dplot(ds, hist2d_alex, S_max_norm=2, scatter_alpha=0.1)
if data_id == '7d':
fret_sel = dict(E1=0.60, E2=1.2, S1=0.2, S2=0.9, rect=False)
do_sel = dict(E1=-0.2, E2=0.5, S1=0.8, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '12d':
fret_sel = dict(E1=0.30,E2=1.2,S1=0.131,S2=0.9, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.8, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '17d':
fret_sel = dict(E1=0.01, E2=0.98, S1=0.14, S2=0.88, rect=False)
do_sel = dict(E1=-0.4, E2=0.4, S1=0.80, S2=2, rect=False)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '22d':
fret_sel = dict(E1=-0.16, E2=0.6, S1=0.2, S2=0.80, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.85, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
elif data_id == '27d':
fret_sel = dict(E1=-0.1, E2=0.5, S1=0.2, S2=0.82, rect=False)
do_sel = dict(E1=-0.2, E2=0.4, S1=0.88, S2=2, rect=True)
ds_fret, ds_do = select_and_plot_ES(fret_sel, do_sel)
n_bursts_do = ds_do.num_bursts[0]
n_bursts_fret = ds_fret.num_bursts[0]
n_bursts_do, n_bursts_fret
d_only_frac = 1.*n_bursts_do/(n_bursts_do + n_bursts_fret)
print('D-only fraction:', d_only_frac)
dplot(ds_fret, hist2d_alex, scatter_alpha=0.1);
dplot(ds_do, hist2d_alex, S_max_norm=2, scatter=False);
bandwidth = 0.03
E_range_do = (-0.1, 0.15)
E_ax = np.r_[-0.2:0.401:0.0002]
E_pr_do_kde = bext.fit_bursts_kde_peak(ds_do, bandwidth=bandwidth, weights='size',
x_range=E_range_do, x_ax=E_ax, save_fitter=True)
mfit.plot_mfit(ds_do.E_fitter, plot_kde=True, bins=np.r_[E_ax.min(): E_ax.max(): bandwidth])
plt.xlim(-0.3, 0.5)
print("%s: E_peak = %.2f%%" % (ds.ph_sel, E_pr_do_kde*100))
nt_th1 = 50
dplot(ds_fret, hist_size, which='all', add_naa=False)
xlim(-0, 250)
plt.axvline(nt_th1)
Th_nt = np.arange(35, 120)
nt_th = np.zeros(Th_nt.size)
for i, th in enumerate(Th_nt):
ds_nt = ds_fret.select_bursts(select_bursts.size, th1=th)
nt_th[i] = (ds_nt.nd[0] + ds_nt.na[0]).mean() - th
plt.figure()
plot(Th_nt, nt_th)
plt.axvline(nt_th1)
nt_mean = nt_th[np.where(Th_nt == nt_th1)][0]
nt_mean
E_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, bandwidth=bandwidth, weights='size')
E_fitter = ds_fret.E_fitter
E_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
E_fitter.fit_histogram(mfit.factory_gaussian(center=0.5))
E_fitter.fit_res[0].params.pretty_print()
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(E_fitter, ax=ax[0])
mfit.plot_mfit(E_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, E_pr_fret_kde*100))
display(E_fitter.params*100)
ds_fret.fit_E_m(weights='size')
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.03], weights=None)
ds_fret.fit_E_generic(fit_fun=bl.gaussian_fit_hist, bins=np.r_[-0.1:1.1:0.005], weights='size')
E_kde_w = E_fitter.kde_max_pos[0]
E_gauss_w = E_fitter.params.loc[0, 'center']
E_gauss_w_sig = E_fitter.params.loc[0, 'sigma']
E_gauss_w_err = float(E_gauss_w_sig/np.sqrt(ds_fret.num_bursts[0]))
E_gauss_w_fiterr = E_fitter.fit_res[0].params['center'].stderr
E_kde_w, E_gauss_w, E_gauss_w_sig, E_gauss_w_err, E_gauss_w_fiterr
S_pr_fret_kde = bext.fit_bursts_kde_peak(ds_fret, burst_data='S', bandwidth=0.03) #weights='size', add_naa=True)
S_fitter = ds_fret.S_fitter
S_fitter.histogram(bins=np.r_[-0.1:1.1:0.03])
S_fitter.fit_histogram(mfit.factory_gaussian(), center=0.5)
fig, ax = plt.subplots(1, 2, figsize=(14, 4.5))
mfit.plot_mfit(S_fitter, ax=ax[0])
mfit.plot_mfit(S_fitter, plot_model=False, plot_kde=True, ax=ax[1])
print('%s\nKDE peak %.2f ' % (ds_fret.ph_sel, S_pr_fret_kde*100))
display(S_fitter.params*100)
S_kde = S_fitter.kde_max_pos[0]
S_gauss = S_fitter.params.loc[0, 'center']
S_gauss_sig = S_fitter.params.loc[0, 'sigma']
S_gauss_err = float(S_gauss_sig/np.sqrt(ds_fret.num_bursts[0]))
S_gauss_fiterr = S_fitter.fit_res[0].params['center'].stderr
S_kde, S_gauss, S_gauss_sig, S_gauss_err, S_gauss_fiterr
S = ds_fret.S[0]
S_ml_fit = (S.mean(), S.std())
S_ml_fit
weights = bl.fret_fit.get_weights(ds_fret.nd[0], ds_fret.na[0], weights='size', naa=ds_fret.naa[0], gamma=1.)
S_mean = np.dot(weights, S)/weights.sum()
S_std_dev = np.sqrt(
np.dot(weights, (S - S_mean)**2)/weights.sum())
S_wmean_fit = [S_mean, S_std_dev]
S_wmean_fit
sample = data_id
variables = ('sample n_bursts_all n_bursts_do n_bursts_fret '
'E_kde_w E_gauss_w E_gauss_w_sig E_gauss_w_err E_gauss_w_fiterr '
'S_kde S_gauss S_gauss_sig S_gauss_err S_gauss_fiterr '
'E_pr_do_kde nt_mean\n')
variables_csv = variables.replace(' ', ',')
fmt_float = '{%s:.6f}'
fmt_int = '{%s:d}'
fmt_str = '{%s}'
fmt_dict = {**{'sample': fmt_str},
**{k: fmt_int for k in variables.split() if k.startswith('n_bursts')}}
var_dict = {name: eval(name) for name in variables.split()}
var_fmt = ', '.join([fmt_dict.get(name, fmt_float) % name for name in variables.split()]) + '\n'
data_str = var_fmt.format(**var_dict)
print(variables_csv)
print(data_str)
# NOTE: The file name should be the notebook name but with .csv extension
with open('results/usALEX-5samples-E-corrected-all-ph.csv', 'a') as f:
f.seek(0, 2)
if f.tell() == 0:
f.write(variables_csv)
f.write(data_str)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: QUESTIONS
Step2: From Decision Tree to Random Forest
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import load_iris
from sklearn.cross_validation import train_test_split
# Let's load the iris dataset
iris = load_iris()
X, y = iris.data, iris.target
# split data into training and test sets using the handy train_test_split func
# in this split, we are "holding out" only one value and label (placed into X_test and y_test)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
# Let's try a decision tree classification method
from sklearn import tree
t = tree.DecisionTreeClassifier(max_depth = 4,
criterion = 'entropy',
class_weight = 'balanced',
random_state = 2)
t.fit(X_train, y_train)
t.score(X_test, y_test) # what performance metric is this?
# What was the label associated with this test sample? ("held out" sample's original label)
# Let's predict on our "held out" sample
y_pred = t.predict(X_test)
print(y_pred)
# fill in the blank below
# how did our prediction do for first sample in test dataset?
print("Prediction: %d, Original label: %d" % (y_pred[0], y_test[0])) # <-- fill in blank
# Here's a nifty way to cross-validate (useful for quick model evaluation!)
from sklearn import cross_validation
t = tree.DecisionTreeClassifier(max_depth = 4,
criterion = 'entropy',
class_weight = 'balanced',
random_state = 2)
# splits, fits and predicts all in one with a score (does this multiple times)
score = cross_validation.cross_val_score(t, X, y)
score
from sklearn.tree import export_graphviz
import graphviz
# Let's rerun the decision tree classifier
from sklearn import tree
t = tree.DecisionTreeClassifier(max_depth = 4,
criterion = 'entropy',
class_weight = 'balanced',
random_state = 2)
t.fit(X_train, y_train)
t.score(X_test, y_test) # what performance metric is this?
export_graphviz(t, out_file="mytree.dot",
feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True,
special_characters=True)
with open("mytree.dot") as f:
dot_graph = f.read()
graphviz.Source(dot_graph, format = 'png')
from sklearn.datasets import load_iris
import pandas as pd
import numpy as np
iris = load_iris()
X, y = iris.data, iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(max_depth=4,
criterion = 'entropy',
n_estimators = 100,
class_weight = 'balanced',
n_jobs = -1,
random_state = 2)
#forest = RandomForestClassifier()
forest.fit(X_train, y_train)
y_preds = iris.target_names[forest.predict(X_test)]
forest.score(X_test, y_test)
# Here's a nifty way to cross-validate (useful for model evaluation!)
from sklearn import cross_validation
# reinitialize classifier
forest = RandomForestClassifier(max_depth=4,
criterion = 'entropy',
n_estimators = 100,
class_weight = 'balanced',
n_jobs = -1,
random_state = 2)
score = cross_validation.cross_val_score(forest, X, y)
score
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Mmr
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 14. Optical Radiative Properties --> Radiative Scheme
Step59: 14.2. Shortwave Bands
Step60: 14.3. Longwave Bands
Step61: 15. Optical Radiative Properties --> Cloud Interactions
Step62: 15.2. Twomey
Step63: 15.3. Twomey Minimum Ccn
Step64: 15.4. Drizzle
Step65: 15.5. Cloud Lifetime
Step66: 15.6. Longwave Bands
Step67: 16. Model
Step68: 16.2. Processes
Step69: 16.3. Coupling
Step70: 16.4. Gas Phase Precursors
Step71: 16.5. Scheme Type
Step72: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mohc', 'hadgem3-gc31-lm', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: load data
Step3: Word Vectors Pre Trained
Step4: dont need word to id dict since this is indexed with words
Step5: for tensorboard
Step6: building word vectors of 200d for model
Step7: fill in biolab vectors available
Step8: total words not updated with training from biolab
Step9: gcloud tensorboard serving
Step10: for http
Step11: write to checkpoint file
Step12: FastText Vectors
Step13: saving all trained fast text vectors
Step14: Viewing word vectors
|
<ASSISTANT_TASK:>
Python Code:
import sys
import os
import re
import collections
import itertools
import bcolz
import pickle
sys.path.append('../lib')
import gc
import random
import smart_open
import h5py
import csv
import tensorflow as tf
import gensim
import datetime as dt
from tqdm import tqdm_notebook as tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
random_state_number = 967898
from tensorflow.python.client import device_lib
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
get_available_gpus()
%pylab
%matplotlib inline
%load_ext autoreload
%autoreload
pd.options.mode.chained_assignment = None
pd.options.display.max_columns = 999
color = sns.color_palette()
corpus_vocab_list, corpus_vocab_wordidx = None, None
with open('processed/stage1/vocab_words_wordidx.pkl', 'rb') as f:
(corpus_vocab_list, corpus_wordidx) = pickle.load(f)
print(len(corpus_vocab_list), len(corpus_wordidx))
store = pd.HDFStore('processed/stage1/data_frames.h5')
train_df = store['train_df']
test_df = store['test_df']
from gensim.models.keyedvectors import KeyedVectors
biolab_keyed_vectors_pubmed_pmc_wiki = KeyedVectors.load_word2vec_format('external/biolab_wvs/wikipedia-pubmed-and-PMC-w2v.bin', binary=True)
biolab_words_pubmed_pmc_wiki = biolab_keyed_vectors_pubmed_pmc_wiki.vocab.keys()
biolab_words = set(biolab_words_pubmed_pmc_wiki)
len(biolab_words)
vocab_biolab = set(biolab_words) & set(vocab_words)
print (len(vocab_biolab))
vocab_biolab
vocab_not_in_biolab =set(vocab_words) - set(biolab_words)
print(len(vocab_not_in_biolab))
vocab_not_in_biolab
undesirable_ascii_characters = list(range(32))
undesirable_ascii_characters.remove(10) #keep new line since this might be used for sentence tokenizer
undesirable_charmap = dict.fromkeys(undesirable_ascii_characters)
from nltk import word_tokenize
from utils import custom_word_tokenizer, apply_custom_regx
custom_tokenized_biolab_pubmed_pmc_wiki_wv = {}
for word in vocab_biolab:
vector = biolab_keyed_vectors_pubmed_pmc_wiki.word_vec(word)
custom_tokenized_biolab_pubmed_pmc_wiki_wv[word.lower()] = vector
word = word.lower().encode('ascii', 'ignore').decode('utf-8', 'ignore')
word = str(word).translate(undesirable_charmap)
word = apply_custom_regx(word)
word = word.replace('\\t', '')
for part in word_tokenize(word):
if part in custom_tokenized_biolab_pubmed_pmc_wiki_wv:
custom_tokenized_biolab_pubmed_pmc_wiki_wv[part] += vector
custom_tokenized_biolab_pubmed_pmc_wiki_wv[part] /= 2
len(custom_tokenized_biolab_pubmed_pmc_wiki_wv)
tb_vocab_size=5000
tb_vocab_biolab = list(vocab_biolab)[:tb_vocab_size]
with open("view_wvs_tb/tb_vocab.tsv", "w") as fp:
wr = csv.writer(fp, delimiter='\n')
wr.writerow(tb_vocab_biolab)
tb_word_vectors = np.random.randn(tb_vocab_size, 200)
for i,word in enumerate(tb_vocab_biolab):
tb_word_vectors[i] = custom_tokenized_biolab_pubmed_pmc_wiki_wv[word]
%autoreload
from utils import visualize_embeddings_in_tensorboard
visualize_this_embedding = tb_word_vectors
print(visualize_this_embedding.shape)
metadata_path = "/home/bicepjai/Projects/dsotc/data_prep/view_wvs_tb/tb_vocab.tsv"
visualize_embeddings_in_tensorboard(visualize_this_embedding, metadata_path, "/home/bicepjai/Projects/dsotc/data_prep/view_wvs_tb")
del tb_word_vectors
corpus_word_vectors = np.random.randn(len(vocab_words), 200)
corpus_word_vectors.shape
for word in vocab_biolab:
dataset_corpus_word_index = vocab_wordidx[word]
corpus_word_vectors[dataset_corpus_word_index] = custom_tokenized_biolab_pubmed_pmc_wiki_wv[word]
words_not_updated = set(vocab_words) - vocab_biolab
len(words_not_updated)
words_not_updated
np.save("processed/stage1/biolab_updated_wvs.npy", corpus_word_vectors)
dataset_corpus_words_list = np.load("dataset_corpus_words_list.npy")
corpus_word_vectors = np.load("corpus_word_vectors.npy")
tb_vocab_size = 10000
local_tb_dir = "/home/bicepjai/Projects/ml-compete/kaggle/mskrct/data_prep_2_ft/model_wv_visualize/gcloud/"
with open(local_tb_dir+"/vocab.tsv", "wb") as fp:
wr = csv.writer(fp, delimiter='\n')
wr.writerow(dataset_corpus_words_list[:tb_vocab_size])
# np.savetxt("model_wv_visualize/word_vectors.tsv",corpus_word_vectors[:tb_vocab_size], delimiter='\t')
!rm $local_tb_dir/checkpoint
!ls $local_tb_dir
from word2vec import visualize_embeddings_in_tensorboard
visualize_this_embedding = corpus_word_vectors[:tb_vocab_size]
print visualize_this_embedding.shape
# path for gcloud tensorboard
metadata_path = "/home/bicepjai/projects/tb_visual/vocab.tsv"
# metadata_path = "/home/bicepjai/Projects/ml-compete/kaggle/mskrct/data_prep_2_ft/model_wv_visualize/vocab.tsv"
visualize_embeddings_in_tensorboard(visualize_this_embedding, metadata_path, local_tb_dir)
checkpoint_txt = "model_checkpoint_path: \"/home/bicepjai/projects/tb_visual/visual_embed.ckpt-1\"\n\
all_model_checkpoint_paths: \"/home/bicepjai/projects/tb_visual/visual_embed.ckpt-1\""
with open(local_tb_dir+"/checkpoint","w") as f:
f.seek(0)
f.truncate()
f.write(checkpoint_txt)
fasttext_vec_file = "processed/stage2/pretrained_word_vectors/ft_sg_200d_10e.vec"
ft_lines = None
with open(fasttext_vec_file,"r") as f:
ft_lines = f.readlines()
print(ft_lines[0])
print(type(ft_lines), len(ft_lines))
ft_shape = tuple([int(i.strip()) for i in ft_lines[0].split()])
ft_shape
print(len(ft_lines[1].split()))
ft_lines[1]
ft_vocab_size=ft_shape[0]
ft_vocab_size
ft_word_vectors = np.random.randn(ft_vocab_size, ft_shape[1])
ft_words = []
for i, line in enumerate(ft_lines[1:]):
str_list =line.split()
ft_words.append(str_list[0].strip())
vec = np.array([np.float(f) for f in str_list[1:]])
ft_word_vectors[i] = vec
ft_word_vectors.shape
a = list(ft_words)
a.sort(key=len, reverse=True)
print(a[:10])
del a
ft_wordidx = {w:i for i,w in enumerate(ft_words)}
ft_vocab_size, len(ft_wordidx)
len(set(vocab_words) - set(ft_words))
set(vocab_words) - set(ft_words)
%autoreload
import global_utils
fasttext_vec_file="/home/bicepjai/Projects/dsotc/data_prep/processed/stage1/pretrained_word_vectors/ft_cbow_200d_20e.vec"
wvs = global_utils.get_corpus_wvs_from_ft(fasttext_vec_file, 200, vocab_words)
wvs.shape
%ll /home/bicepjai/Projects/dsotc/data_prep/processed/stage1/pretrained_word_vectors
len(vocab_words)
%autoreload
import global_utils
ft_vector_files = [
(100,"ft_cbow_100d_20e"),(200,"ft_cbow_200d_20e"),(200,"ft_cbow_300d_20e"),
(100,"ft_sg_100d_20e"),(200,"ft_sg_200d_20e"),(200,"ft_sg_300d_20e"),
(100,"ft_cbow_100d_50e"),(200,"ft_cbow_200d_50e"),(200,"ft_cbow_300d_50e"),
(100,"ft_sg_100d_50e"),(200,"ft_sg_200d_50e"),(200,"ft_sg_300d_50e"),
(100,"ft_cbow_100d_100e"),(200,"ft_cbow_200d_100e"),(200,"ft_cbow_300d_100e"),
(100,"ft_sg_100d_100e"),(200,"ft_sg_200d_100e"),(200,"ft_sg_300d_100e")
]
for dim_file_name in ft_vector_files:
file_path = "/home/bicepjai/Projects/dsotc/data_prep/processed/stage1/pretrained_word_vectors/"+dim_file_name[1]+".vec"
dim = dim_file_name[0]
if not os.path.exists(file_path):
print("file doesnt exist",file_path)
continue
ft_vec = global_utils.get_corpus_wvs_from_ft(file_path, dim, vocab_words)
print(ft_vector_file,ft_vec.shape)
np.save("processed/stage1/pretrained_word_vectors/"+dim_file_name[1]+".npy", ft_vec)
%autoreload
import global_utils
WORD_EMB_SIZE=200
ft_file_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/processed/stage1/pretrained_word_vectors/ft_sg_200d_50e.vec"
trained_embeddings = global_utils.get_embeddings_from_ft(ft_file_path, WORD_EMB_SIZE, corpus_vocab_list)
trained_embeddings.shape
tb_vocab_size=5000
tb_vocab_biolab = list(trained_embeddings)[:tb_vocab_size]
with open("view_wvs_tb/tb_vocab.tsv", "w") as fp:
wr = csv.writer(fp, delimiter='\n')
wr.writerow(corpus_vocab_list)
tb_word_vectors = np.random.randn(tb_vocab_size, 200)
for i,word in enumerate(tb_vocab_biolab):
tb_word_vectors[i] = trained_embeddings[i]
%autoreload
from utils import visualize_embeddings_in_tensorboard
visualize_this_embedding = tb_word_vectors
print(visualize_this_embedding.shape)
metadata_path = "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/view_wvs_tb/tb_vocab.tsv"
visualize_embeddings_in_tensorboard(visualize_this_embedding, metadata_path, "/home/bicepjai/Projects/Deep-Survey-Text-Classification/data_prep/view_wvs_tb")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Read epochs for all channels, removing a bad one
Step3: Transform to source space
Step4: Transform to common cortical space
Step5: It's a good idea to spatially smooth the data, and for visualization
Step6: Finally, we want to compare the overall activity levels in each condition,
Step7: Compute statistic
Step8: Visualize the clusters
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.epochs import equalize_epoch_counts
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
event_id = 1 # L auditory
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
event_id = 3 # L visual
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
equalize_epoch_counts([epochs1, epochs2])
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
sample_vertices = [s['vertno'] for s in inverse_operator['src']]
# Let's average and compute inverse, resampling to speed things up
evoked1 = epochs1.average()
evoked1.resample(50, npad='auto')
condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)
evoked2 = epochs2.average()
evoked2.resample(50, npad='auto')
condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition1.crop(0, None)
condition2.crop(0, None)
tmin = condition1.tmin
tstep = condition1.tstep
n_vertices_sample, n_times = condition1.data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10
X[:, :, :, 0] += condition1.data[:, :, np.newaxis]
X[:, :, :, 1] += condition2.data[:, :, np.newaxis]
fsave_vertices = [np.arange(10242), np.arange(10242)]
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
X = np.abs(X) # only magnitude
X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X = np.transpose(X, [2, 1, 0])
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.001
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=1,
threshold=t_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot(hemi='both', views='lateral',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.save_image('clusters.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <div id='intro' />
Step2: b) Indexing (view)
Step3: c) Indexing (copy)
Step4: d) Vectorization (THE HEART OF THE HEART OF NUMERICAL COMPUTING!!)
Step5: e) Broadcasting
Step6: f) Reduction
Step7: g) Examples
Step8: <div id='matplotlib' />
Step9: <div id='scipy' />
Step10: <div id='sympy' />
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import sympy as sym
from scipy.integrate import odeint
import scipy.linalg as spla
from scipy import optimize, interpolate
from scipy.linalg import solve_triangular, toeplitz, lu
from scipy.optimize import root
# pip install bitstring
import bitstring as bs
%matplotlib inline
sym.init_printing()
from ipywidgets import interact, widgets, fixed, IntSlider, interact_manual, Checkbox, RadioButtons
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mpl
mpl.rcParams['font.size'] = 14
mpl.rcParams['axes.labelsize'] = 20
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
from matplotlib import cm
# pip install memory_profiler
%load_ext memory_profiler
from sklearn import datasets
from colorama import Fore, Back, Style
# https://pypi.org/project/colorama/
# Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
# Style: DIM, NORMAL, BRIGHT, RESET_ALL
textBold = lambda x: Style.BRIGHT+x+Style.RESET_ALL
textBoldH = lambda x: Style.BRIGHT+Back.YELLOW+x+Style.RESET_ALL
textBoldB = lambda x: Style.BRIGHT+Back.BLUE+Fore.BLACK+x+Style.RESET_ALL
textBoldR = lambda x: Style.BRIGHT+Back.RED+Fore.BLACK+x+Style.RESET_ALL
# NumPy array
a1 = np.arange(12)
# NumPy ndarray
a2 = np.array([[1, 2, 3], [4, 5, 6]], float)
a3 = np.reshape(a1,(4,3))
print(textBoldB(' Printing their values '))
print(textBoldH('a1:\n'),a1)
print(textBoldH('a2:\n'),a2)
print(textBoldH('a3:\n'),a3)
print(textBoldB(' Printing their datatypes '))
print(textBoldH('a1:\n'),a1.dtype)
print(textBoldH('a2:\n'),a2.dtype)
print(textBoldH('a3:\n'),a3.dtype)
print(textBoldB(' Printing their type name '))
print(textBoldH('a1:\n'),type(a1).__name__)
print(textBoldH('a2:\n'),type(a2).__name__)
print(textBoldH('a3:\n'),type(a3).__name__)
print(textBoldB(' Showing original variable a3 '))
print(textBoldH('a3\n'),a3)
print(textBoldB(' Showing a subset of elements (SLICES) of variable a3 \n the second and third columns '))
print(textBoldH('a3[:,1:]\n'),a3[:,1:])
print(textBoldB(' Storing the outcome in a4 '))
a4 = a3[:,1:]
print(textBoldB(' What does it means it is a "view"? '))
print(textBoldB(' Let\'s change the original a3 and see what happens to a4. '))
a3[0,1]=100
print(textBoldH('a4\n'), a4)
print(textBoldB(' a4 has changed too! So a4 is a "view" of part of a3. '))
print('\n')
print(textBoldB(' So, how do we copy it?'))
print(textBoldB(' Let\'s use np.copy to copy the "view" obtained before,'+
'\n then modify the original variable and see what had happened'))
a5 = np.copy(a3[:,1:])
print(textBoldH('a3\n'), a3)
print(textBoldH('a4=a3[:,1:])\n'), a4)
print(textBoldH('a5=np.copy(a3[:,1:])\n'), a5)
a3[0,1]=1
print(textBoldB(' Changing a3 as "a3[0,1]=1"'))
print(textBoldH('a3\n'), a3)
print(textBoldH('a4\n'), a4)
print(textBoldH('a5\n'), a5)
print(textBoldB(' Thus, chaning a3 have changed a4 since it is a "view" and not changed a5 since it is a copy. '))
print(textBoldB(' Another way to extract slices is with steps: '))
a6 = a3[:,::2]
print(textBoldH('a3\n'), a3)
print(textBoldH('a6 = a3[:,::2]\n'), a6)
print(textBoldB(' In this case we obtained the first and third columns, \n '+
' the pattern consider ini:end:step, where "ini" is the first index considered,\n'+
' "end" defines that the pattern will be considere until the index "end-1",\n'+
' and "step" indicates the step for selecting the next index, it is 1 by default.'))
print(textBoldB(' Now we will list the alternatives to "copy" the data from ndarrays'))
print(textBoldH('a3\n'), a3)
print('\n')
print(textBoldR('with SCALARS:'))
print(textBoldB('o1 = a3[1,2]'))
o1 = a3[1,2]
print(textBoldH('o1\n'), o1)
print(textBoldB('Changing o1: o1 = 111'))
o1 = 111
print(textBoldH('a3\n'), a3)
print(textBoldH('o1\n'), o1)
print('\n')
print(textBoldR('with MASKS:'))
print(textBoldB('o2 = a3[a3>9]'))
o2 = a3[a3>9]
print(textBoldH('o2\n'), o2)
print('\n')
print(textBoldR('with ARRAYS:'))
print(textBoldB('o3 = a3[[0,1],[1,2]]'))
o3 = a3[[0,1],[1,2]]
print(textBoldH('o3\n'), o3)
print(textBoldH('o3:\n'),o3.dtype)
print(textBoldH('o3:\n'),type(o3).__name__)
print('\n')
print(textBoldR('with ARRAYS and BROADCASTING (this will be review next) :'))
print(textBoldB('o4 = a3[[[1],[2]],[1,0]]'))
o4 = a3[[[1],[2]],[1,0]]
print(textBoldH('o4\n'), o4)
print(textBoldH('o4:\n'),o4.dtype)
print(textBoldH('o4:\n'),type(o4).__name__)
print(textBoldH('a3\n'), a3)
print(textBoldB(' We will define a new ndarray n1 = np.ones((4,3)) and add it to the array a3.'))
n1 = np.ones((4,3))
print(textBoldB(' The first implementation one may think of is the following:'))
def sum_two_arrays(a3,n1):
out = np.zeros_like(a3) # What does this function do?
for i in range(4):
for j in range(3):
out[i,j] = a3[i,j]+n1[i,j]
return out
n2 = sum_two_arrays(a3,n1)
# To quantify the computation time it takes let's measure it:
%timeit n2 = sum_two_arrays(a3,n1)
# The output it shows in the computer we run the experiments was:
# 33.9 µs ± 1.68 µs per loop (mean ± std. dev. of 7 runs, 10000 loops each)
# This means it took about 34 microseconds.
# Now, let's try the NumPy way! i.e. the vectorized approach!
%timeit n2 = a3+n1
# The output it shows in the computer we run the experiments was:
# 1.05 µs ± 12.7 ns per loop (mean ± std. dev. of 7 runs, 1000000 loops each)
# This means it was about 34 times faster!!
# and also it only took "one" line of code where in the previous version took "7" lines of code!!
print(textBoldR(' Given the previous argument, it is crucial you get familir with VECTORIZATION computing with NumPy!! '))
# Creating a 1D array, this is the stringly suggested way to create "vectors" is NumPy.
# This can be considered as a row vector.
x1 = np.array([1,2,3])
print(textBoldH('x1:'),x1)
print(textBoldH('shape:'),x1.shape)
print(textBoldH('dtype:'),x1.dtype)
print(textBoldH('type name:'),type(x1).__name__)
# Creating a 2D array with one row.
x2 = x1[np.newaxis] # What does np.newaxis do?
print(textBoldH('x2:'),x2)
print(textBoldH('shape:'),x2.shape)
print(textBoldH('dtype:'),x2.dtype)
print(textBoldH('type name:'),type(x2).__name__)
# This emulates a column vector.
# Transpossing the 2D array. This emulates a column vector.
x2 = x1[np.newaxis].T
print(textBoldH('x2:'),x2)
print(textBoldH('shape:'),x2.shape)
print(textBoldH('dtype:'),x2.dtype)
print(textBoldH('type name:'),type(x2).__name__)
# Notice that transpossing a 1D array does not do anything (please try it by yourself!).
# Now that we have a 1D array (that emulates a row vector) and
# a 1 column 2d array (that emulates a columns vectors), we can do a "BROADCAST" and see what happend.
x3=x1+x2
print(textBoldH('x3:'),x3)
print(textBoldH('shape:'),x3.shape)
print(textBoldH('dtype:'),x3.dtype)
print(textBoldH('type name:'),type(x3).__name__)
# In this cas we have operated elementwise and broadcasted the array.
# The previous operation is equivalent to the following operation.
x11 = np.tile(x1, (3,1)) # Repeat the 1d array 'vertically' 3 times
print(textBoldH('x11:'),x11)
x21 = np.tile(x2, 3) # Repeat the 2d array 'horizontally' 3 times
print(textBoldH('x21:'),x21)
x31 = x11+x21
print(textBoldH('x31:'),x31)
print(textBoldH('shape:'),x31.shape)
print(textBoldH('dtype:'),x31.dtype)
print(textBoldH('type name:'),type(x31).__name__)
print(textBoldR(' As you may suspect, this way of doing the sum is not recommended.\n'+
' It was added just for comparison purposes.'))
print(textBoldH('a3\n'), a3)
print('\n')
print(textBoldB(' First reduction, sum along axis 0:'))
print(textBoldB(' s1 = np.sum(a3,axis=0)'))
s1 = np.sum(a3,axis=0)
print(textBoldH('s1'), s1)
print(textBoldH('shape:'),s1.shape)
print(textBoldH('dtype:'),s1.dtype)
print(textBoldH('type name:'),type(s1).__name__)
print('\n')
print(textBoldB(' First reduction, sum along axis 1:'))
print(textBoldB(' s2 = np.sum(a3,axis=1)'))
s2 = np.sum(a3,axis=1)
print(textBoldH('s2'), s2)
print(textBoldH('shape:'),s2.shape)
print(textBoldH('dtype:'),s2.dtype)
print(textBoldH('type name:'),type(s2).__name__)
print('\n')
print(textBoldB(' First reduction, sum along axis 0 and 1:'))
print(textBoldB(' s3 = np.sum(a3)'))
s3 = np.sum(a3)
print(textBoldH('s3'), s3)
print(textBoldH('shape:'),s3.shape)
print(textBoldH('dtype:'),s3.dtype)
print(textBoldH('type name:'),type(s3).__name__)
print('\n')
print(textBoldR(' As you may suspect, the reduction reduces the dimensionality in respect of the original input.'))
# Create a random matrix and comput its determinant
np.random.seed(0) # Fixing seed for randomnumber generator.
n = 100
A = np.random.random((n,n))
print(textBoldH('|A|:'), np.linalg.det(A))
# Compute matrix vector product
x = np.random.random(n)
b = A @ x
print(textBoldH('A*x:'), b)
# Solving a linear system of equations A*x=b, i.e. assume we know A and b and we want x.
x_out = np.linalg.solve(A,b)
print(textBoldH('x_out=A^{-1} b:'), x_out)
# Computing the norm 2, between x and x_out
print(textBoldH('||x-x_out||_2:'), np.linalg.norm(x-x_out))
# It seems it is small, why is this?
# Computing dot product
v1 = np.random.random(n)
v2 = np.random.random(n)
print(textBoldH('dot product between v1 and v2:'), np.dot(v1,v2))
# Computing elementwise product
print(textBoldH('v1*v2:'), v1*v2)
# Squaring each element of v1
print(textBoldH('out:'), np.power(v1,2))
def plot_trig_func(n=5):
x = np.linspace(0,10,n) # Generates a linearly spaced set of n points from 0 to 10
y1 = np.sin(x) # Vectorization in action!
y2 = np.cos(x) # Vectorization in action!
plt.figure(figsize=(5,5)) # Creating the figure
plt.plot(x,y1,label=r'$\sin(x)$') # Plotting sin
plt.plot(x,y2,'r.-',label=r'$\cos(x)$') # Plotting cos
plt.grid(True) # Adding nice grid
plt.legend(loc='best') # Adding legend
plt.title('Plotting trigonometric functions') # Adding title
plt.xlabel(r'$x$') # Adding x-label
plt.ylabel(r'$y$') # Adding y-label
plt.show() # Showing the plot
# The following is a iPython widget called interact,
# it allows us to interact with the function that it is being called.
interact(plot_trig_func,n=(3,1000,1)) # Making this 'tunable'!
def plot_funct_2_var(theta=0):
x = np.linspace(-2,2,20)
y = x # Recall this is just a 'view'
X, Y = np.meshgrid(x,y) # It generates two 2D arrays with with cartesian product of 'x' and 'y'.
# 'lambda' definition help us to organize better the code
f = lambda x,y: np.sin(x*np.sin(theta)+y*np.cos(theta))*np.exp(-(x**2+y**2))
Z = f(X,Y) # Evaluation a function of two variables
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
fig.suptitle('Comparison of two ways of plotting $f(x,y)$')
# This is useful for images.
ax1.imshow(Z, cmap='hot', interpolation='nearest')
ax1.set_ylabel('imshow')
# This is useful for functions in two variables, i.e. z=f(x,y)
ax2.pcolormesh(X,Y,Z,cmap='hot', shading='auto')
ax2.axis('equal')
ax2.set_ylabel('pcolormesh')
plt.show()
interact(plot_funct_2_var,theta=(0,2*np.pi,2*np.pi/50))
def plot_funct_2_var_3D(theta=0):
x = np.linspace(-2,2,40)
y = x # Recall this is just a 'view'
X, Y = np.meshgrid(x,y) # It generates two 2D arrays with with cartesian product of 'x' and 'y'.
# 'lambda' definition help us to organize better the code
f = lambda x,y: np.sin(x*np.sin(theta)+y*np.cos(theta))*np.exp(-(x**2+y**2))
Z = f(X,Y) # Evaluation a function of two variables
fig, ax = plt.subplots(subplot_kw={"projection": "3d"},figsize=(10,10))
# Plot the surface.
surf = ax.plot_surface(X, Y, Z, cmap='hot', linewidth=2, antialiased=True)
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.5, aspect=10)
plt.show()
interact(plot_funct_2_var_3D,theta=(0,2*np.pi,2*np.pi/50))
# Numerical Integration
import scipy.integrate as integrate
# Integrating $\int_0^1 \exp(-x^2)\,dx$
result = integrate.quad(lambda x: np.exp(-x**2), 0, 1)
print(result)
# Integrating $\int_0^1 1\,dx=1$
result = integrate.quad(lambda x: 1, 0, 1)
print(result)
# Minimization
from scipy.optimize import minimize
f = lambda x: np.sin(x[0])*(1+np.cos(x[1]))
x0 = np.array([0, 0])
res = minimize(f, x0, method='nelder-mead', options={'xatol': 1e-8, 'disp': True})
print(res.x)
# Plotting the previous function
x = np.linspace(-2*np.pi,2*np.pi)
y = x
X,Y = np.meshgrid(x,y)
f = lambda x,y: np.sin(x)*(1+np.cos(y))
Z = f(X,Y)
plt.figure(figsize=(8,8))
plt.pcolormesh(X,Y,Z,shading='auto')
plt.plot(res.x[0],res.x[1],'r.')
plt.colorbar()
plt.title('The red dot is the minimum found numerically')
plt.show()
# root finding
from scipy.optimize import root
f = lambda x: x - 0.5 * np.exp(x)
sol = root(f, 0.3)
print(sol)
# Working with algebraic expression versus 'numerical' expressions
sym.sqrt(3)==np.sqrt(3)
sym.sqrt(3)>np.sqrt(3)
sym.sqrt(3)<np.sqrt(3)
# Defining symbolic variables
x = sym.symbols('x')
x
y=x**2+sym.cos(x)+(x**2+1)**2
y
# Expanding the terms
sym.expand(y)
# Computing derivatives
yp=sym.diff(y)
yp
# Factorization of expression
sym.factor(yp)
# Computing limits
sym.limit((1-sym.cos(x))/x, x, 0)
sym.limit((sym.sin(x))/x, x, 0)
# Solving algebraic equations
sym.solve(x**2-x+1,x)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading the image-sentence scores from the a json file
Step2: Read a model's scores on each sentence-image pair
Step3: SVO-Probes
Step4: Computing accuracy across different types.
Step5: Plot positive, negative, and average results for subjects, verbs, and objects.
Step6: Plot average, positive, or negative accuracy for different word types.
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import sys
import json
import csv
import pandas as pd
import re
import matplotlib.pyplot as plt
import seaborn as sns
csv.field_size_limit(sys.maxsize)
def scores(path):
id_to_scores = {}
d = json.load(
open(
path,
'r'))
# id --> score
for item in d:
id_to_scores[item] = float(d[item])
return id_to_scores
def get_score_dataframe(path, df):
sentences = [item for item in df.sentence.values]
pos_image_id = [item for item in df.pos_image_id.values]
neg_image_id = [item for item in df.neg_image_id.values]
idxs = [item for item in df.index]
pair_to_scores = scores(path)
pos_scores = []
neg_scores = []
count = 0
for idx, sentence, pos_image_id, neg_image_id in zip(idxs, sentences,
pos_image_id,
neg_image_id):
neg_key = re.sub(' +', ' ', '%s|%d' % (sentence.lower(), neg_image_id))
pos_key = re.sub(' +', ' ', '%s|%d' % (sentence.lower(), pos_image_id))
if (pos_key in pair_to_scores) and (neg_key in pair_to_scores):
pos_scores.append(pair_to_scores[pos_key])
neg_scores.append(pair_to_scores[neg_key])
else:
df = df.drop([idx])
count += 1
df['pos_scores'] = pos_scores
df['neg_scores'] = neg_scores
return df
# SVO Probes dataset
!wget https://storage.googleapis.com/dm-mmt-models/svo_probes.csv --no-check-certificate -P '/tmp'
%ls /tmp
df = pd.read_csv(open('/tmp/svo_probes.csv', 'r'))
# Change this path to include the scores from your model
!wget https://storage.googleapis.com/dm-mmt-models/mmt_cc_svo_results.json --no-check-certificate -P '/tmp'
json_path = '/tmp/mmt_cc_svo_results.json'
def accuracy(frame):
neg = frame[['sentence', 'neg_image_id', 'neg_scores']].drop_duplicates()
pos = frame[['sentence', 'pos_image_id', 'pos_scores']].drop_duplicates()
neg_acc = np.mean([item == 0 for item in neg['neg_scores'].values])
pos_acc = np.mean([item == 1 for item in pos['pos_scores'].values])
# macro
acc = (neg_acc + pos_acc)/2
return acc, pos_acc, neg_acc
data_df = get_score_dataframe(json_path, df)
subj_neg_df = data_df[data_df['subj_neg'] & ~data_df['obj_neg']
& ~data_df['verb_neg']]
verb_neg_df = data_df[data_df['verb_neg'] & ~data_df['obj_neg']
& ~data_df['subj_neg']]
obj_neg_df = data_df[data_df['obj_neg'] & ~data_df['verb_neg']
& ~data_df['subj_neg']]
all_df = pd.concat([subj_neg_df, verb_neg_df, obj_neg_df])
acc_all, pos_acc_all, neg_acc_all = accuracy(pd.concat([subj_neg_df,
verb_neg_df,
obj_neg_df]))
acc_subj, pos_acc_subj, neg_acc_subj = accuracy(subj_neg_df)
acc_verb, pos_acc_verb, neg_acc_verb = accuracy(verb_neg_df)
acc_obj, pos_acc_obj, neg_acc_obj = accuracy(obj_neg_df)
results = [['All', acc_all, pos_acc_all, neg_acc_all],
['Subj', acc_subj, pos_acc_subj, neg_acc_subj],
['Verb', acc_verb, pos_acc_verb, neg_acc_verb],
['Obj', acc_obj, pos_acc_obj, neg_acc_obj]]
results_df = pd.DataFrame.from_records(results,
columns=['Type', 'Avg Accuracy',
'Pos Accuracy', 'Neg Accuracy'])
results_df
tmp_df = results_df
tmp_df = tmp_df.rename(columns={"Avg Accuracy": "Avg",
"Pos Accuracy": "Pos",
"Neg Accuracy": "Neg"})
melt_df = pd.melt(tmp_df, id_vars = "Type")
melt_df = melt_df.rename(columns={"variable": "Accuracy Type",
'value': 'Accuracy',
'Type': 'Word Type'})
_ = sns.barplot(data=melt_df, x='Accuracy Type', y='Accuracy', hue='Word Type')
fig = plt.gcf()
_ = fig.set_size_inches(10, 5, forward=True)
input_or_select = "Avg Accuracy" # @param ["Avg Accuracy", "Pos Accuracy", "Neg Accuracy"]
_ = sns.barplot(data=results_df, x='Type', y=input_or_select)
fig = plt.gcf()
_ = fig.set_size_inches(10, 5, forward=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. It's important to split the data randomly so all cases are represented in both sets.
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: Thinking about your results
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
n_records = features.shape[0]
split = np.random.choice(features.index,
size=int(n_records*0.8),
replace=False)
train_features, train_targets = features.ix[split], targets.ix[split]
val_features, val_targets = features.drop(split), targets.drop(split)
class NeuralNetwork:
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.learning_rate = learning_rate
#### Set this to your implemented sigmoid function ####
# TODO: Activation functionlam is the sigmoid function
self.activation_function = lambda x : 1 / (1 + np.exp(-x))
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin = 2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Backpropagated error
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors) # errors propagated to the hidden layer
hidden_grad = hidden_outputs * (1 - hidden_outputs) # hidden layer gradients
# TODO: Update the weights
self.weights_hidden_to_output += self.learning_rate * output_errors * hidden_outputs.T # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.learning_rate * hidden_errors * hidden_grad * inputs.T # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)# signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs)# signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
### Set the hyperparameters here ###
epochs = 15000
learning_rate = 0.005
hidden_nodes = 30
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
if e%(epochs/10) == 0:
# Calculate losses for the training and test sets
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# Print out the losses as the network is training
print('Training loss: {:.4f}'.format(train_loss))
print('Validation loss: {:.4f}'.format(val_loss))
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
np.random.seed(42)
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path == 'Bike-Sharing-Dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
[ 0.22931895, -1.28754157]))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
[[-0.7128223, 0.22086344, -0.64139849],
[-1.06444693, 1.06268915, -0.17280743]]))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
self.assertTrue(np.allclose(network.run(inputs), -0.97900982))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create Feature
Step2: Standardize Feature
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
from sklearn import preprocessing
import numpy as np
# Create feature
x = np.array([[-500.5],
[-100.1],
[0],
[100.1],
[900.9]])
# Create scaler
scaler = preprocessing.StandardScaler()
# Transform the feature
standardized = scaler.fit_transform(x)
# Show feature
standardized
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The density looks like a $\mathrm{sech}^2$ disk (cored near the center, exponential at large distances)
Step2: Now we 'observe' this density distribution from $z_{\mathrm{sun}} = 0.05$
Step3: The observed density is
Step4: We also 'observe' the mean of the squared velocity
Step5: Now we load all of this data into the data_dicts format for specifying the data for M2M
Step6: M2M fit
Step7: Run without any smoothing
|
<ASSISTANT_TASK:>
Python Code:
n_mock= 100000
sigma_true= 0.1
totmass_true= 0.25
z_mock, vz_mock, m_mock= wendym2m.sample_sech2(sigma_true,totmass_true,n=n_mock)
_= bovy_plot.bovy_hist(numpy.fabs(z_mock),bins=31,normed=True,
xlabel=r'$z$',ylabel=r'$\nu(z)$',lw=2.,histtype='step')
gca().set_yscale('log')
zsun_true= 0.05
# We only observe the density at a few z
z_obs= numpy.array([0.1,0.15,0.2,-0.1,-0.15,-0.2])
h_obs= 0.025
dens_obs= hom2m.compute_dens(z_mock,zsun_true,z_obs,h_obs,w=m_mock)
dens_obs_noise= numpy.sqrt(dens_obs)*0.2*numpy.sqrt(numpy.amax(dens_obs))\
/(numpy.fabs(z_obs**2)/numpy.amin(numpy.fabs(z_obs**2)))
dens_obs+= numpy.random.normal(size=dens_obs.shape)*dens_obs_noise
bovy_plot.bovy_print(axes_labelsize=17.,text_fontsize=12.,xtick_labelsize=15.,ytick_labelsize=15.)
figsize(6,4)
bovy_plot.bovy_plot(z_obs,dens_obs,'ko',semilogy=True,
xlabel=r'$\tilde{z}$',ylabel=r'$\nu_{\mathrm{obs}}(\tilde{z})$',
xrange=[-.25,0.25],yrange=[0.003,6.])
errorbar(z_obs,dens_obs,yerr=dens_obs_noise,marker='None',ls='none',color='k')
# We only observe the v2 at a few z (same as before)
v2_obs= hom2m.compute_v2(z_mock,vz_mock,zsun_true,z_obs,h_obs)
# use constant noise
v2_obs_noise=numpy.zeros_like(v2_obs)+0.002
numpy.random.seed(10) # probably best to set a seed somewhere so the data is always the same
v2_obs+= numpy.random.normal(size=v2_obs.shape)*v2_obs_noise
bovy_plot.bovy_print(axes_labelsize=17.,text_fontsize=12.,xtick_labelsize=15.,ytick_labelsize=15.)
figsize(6,4)
bovy_plot.bovy_plot(z_obs,v2_obs,'ko',
xlabel=r'$\tilde{z}$',ylabel=r'$\langle v^2\rangle$',
xrange=[-.25,0.25],yrange=[0.0,0.06],gcf=True)
errorbar(z_obs,v2_obs,yerr=v2_obs_noise,marker='None',ls='none',color='k')
dens_data= {'type':'dens','pops':0,'zobs':z_obs,'obs':dens_obs,'unc':dens_obs_noise,'zrange':1.}
v2_data= {'type':'v2','pops':0,'zobs':z_obs,'obs':v2_obs,'unc':v2_obs_noise,'zrange':1.}
data_dicts= [dens_data,v2_data]
n_m2m= 1000
sigma_init= 0.2
h_m2m= 0.075
z_m2m, vz_m2m, w_init= wendym2m.sample_sech2(sigma_init,totmass_true,n=n_m2m)
z_out= numpy.linspace(-0.3,0.3,101)
dens_init= hom2m.compute_dens(z_m2m,zsun_true,z_out,h_m2m,w=w_init)
v2_init= hom2m.compute_v2(z_m2m,vz_m2m,zsun_true,z_out,h_m2m,w=w_init)
bovy_plot.bovy_print(axes_labelsize=17.,text_fontsize=12.,xtick_labelsize=15.,ytick_labelsize=15.)
figsize(12,4)
subplot(1,2,1)
bovy_plot.bovy_plot(z_out,dens_init,'-',semilogy=True,gcf=True,
xlabel=r'$\tilde{z}$',ylabel=r'$\nu_{\mathrm{obs}}(\tilde{z})$',
xrange=[-.25,0.25],yrange=[0.003,30.])
bovy_plot.bovy_plot(z_obs,dens_obs,'o',semilogy=True,overplot=True)
errorbar(z_obs,dens_obs,yerr=dens_obs_noise,marker='None',ls='none',color=sns.color_palette()[1])
yscale('log',nonposy='clip')
subplot(1,2,2)
bovy_plot.bovy_plot(z_out,v2_init,'-',gcf=True,
xlabel=r'$\tilde{z}$',ylabel=r'$\nu_{\mathrm{obs}}(\tilde{z})$',
xrange=[-.25,0.25],yrange=[0.0,0.06])
bovy_plot.bovy_plot(z_obs,v2_obs,'o',overplot=True)
errorbar(z_obs,v2_obs,yerr=v2_obs_noise,marker='None',ls='none',color=sns.color_palette()[1])
tight_layout()
step= numpy.pi/3.*10.**-2.
nstep= 10000
eps= 10.**-3.5
smooth= None#1./step/100.
st96smooth= False
mu= 0.
h_m2m= 0.075
omega_m2m= 0.
zsun_m2m= zsun_true
prior= 'entropy'
use_v2=True
w_out,z_m2m,vz_m2m,Q,wevol,windx= \
wendym2m.fit_m2m(w_init,z_m2m,vz_m2m,omega_m2m,zsun_m2m,data_dicts,npop=1,
nstep=nstep,step=step,mu=mu,eps=eps,h_m2m=h_m2m,prior=prior,
smooth=smooth,st96smooth=st96smooth,output_wevolution=10)
w_out= w_out[:,0]
z_out= numpy.linspace(-0.35,0.35,101)
dens_final= hom2m.compute_dens(z_m2m,zsun_true,z_out,h_m2m,w=w_out)
v2_final= hom2m.compute_v2(z_m2m,vz_m2m,zsun_true,z_out,h_m2m,w=w_out)
bovy_plot.bovy_print(axes_labelsize=19.,text_fontsize=14.,xtick_labelsize=15.,ytick_labelsize=15.)
figsize(15,6)
subplot(2,3,1)
bovy_plot.bovy_plot(z_out,dens_init,'-',semilogy=True,color=init_color,
xlabel=r'$\tilde{z}$',ylabel=r'$\nu_{\mathrm{obs}}(\tilde{z})$',
xrange=[-.3,0.3],yrange=[0.003,30.],gcf=True)
bovy_plot.bovy_plot(z_obs,dens_obs,'o',semilogy=True,overplot=True,color=constraint_color)
bovy_plot.bovy_plot(z_out,dens_final,'-',semilogy=True,overplot=True,zorder=0,color=final_color)
errorbar(z_obs,dens_obs,yerr=dens_obs_noise,marker='None',ls='none',color=constraint_color)
yscale('log',nonposy='clip')
gca().yaxis.set_major_formatter(FuncFormatter(
lambda y,pos: (r'${{:.{:1d}f}}$'.format(int(numpy.maximum(-numpy.log10(y),0)))).format(y)))
subplot(2,3,4)
bovy_plot.bovy_plot(z_out,v2_init,'-',color=init_color,
xlabel=r'$\tilde{z}$',ylabel=r'$\langle v_z^2\rangle(\tilde{z})$',
xrange=[-.3,0.3],yrange=[0.0,0.06],gcf=True)
bovy_plot.bovy_plot(z_obs,v2_obs,'o',overplot=True,color=constraint_color)
bovy_plot.bovy_plot(z_out,v2_final,'-',overplot=True,zorder=0,color=final_color)
errorbar(z_obs,v2_obs,yerr=v2_obs_noise,marker='None',ls='none',color=constraint_color)
# yscale('log',nonposy='clip')
# gca().yaxis.set_major_formatter(FuncFormatter(
# lambda y,pos: (r'${{:.{:1d}f}}$'.format(int(numpy.maximum(-numpy.log10(y),0)))).format(y)))
subplot(2,3,2)
#_= hist(w_init,histtype='step',lw=2.,normed=True,bins=31,zorder=0,color=init_color)
_= hist(w_out,histtype='step',lw=2.,normed=True,bins=31,zorder=0,color=final_color)
subplot(2,3,5)
_= hist(vz_m2m,histtype='step',lw=2.,normed=True,bins=31,zorder=0,color=init_color)
xs= numpy.linspace(-0.75,0.75,201)
plot(xs,1./numpy.sqrt(2.*numpy.pi)/sigma_true*numpy.exp(-0.5*xs**2./sigma_true**2.),
lw=2.,zorder=2,color=constraint_color)
_= hist(vz_m2m,weights=w_out,histtype='step',lw=2.,normed=True,bins=31,zorder=1,color=final_color)
xlim(-0.75,0.75)
ylim(0.,5.)
xlabel(r'$v_z$')
ylabel(r'$p(v_z)$')
print("Velocity dispersions: mock, fit",numpy.std(vz_mock),\
numpy.sqrt(numpy.sum(w_out*(vz_m2m-numpy.sum(w_out*vz_m2m)/numpy.sum(w_out))**2.)/numpy.sum(w_out)))
subplot(2,3,3)
for ii in range(len(wevol)):
bovy_plot.bovy_plot(numpy.linspace(0.,1.,nstep)*nstep*step,wevol[ii,0],'-',
color=cm.viridis(z_m2m[windx][ii]/0.3),
yrange=[-0.2/len(z_m2m),numpy.amax(wevol)*1.1],
semilogx=True,xlabel=r'$t$',ylabel=r'$w(t)$',gcf=True,overplot=ii>0)
gca().xaxis.set_major_formatter(FuncFormatter(
lambda y,pos: (r'${{:.{:1d}f}}$'.format(int(numpy.maximum(-numpy.log10(y),0)))).format(y)))
subplot(2,3,6)
bovy_plot.bovy_plot(numpy.linspace(0.,1.,nstep)*nstep*step,numpy.sum(Q,axis=1),lw=3.,
loglog=True,xlabel=r'$t$',ylabel=r'$\chi^2$',gcf=True,
yrange=[1.,10**3.5])
gca().yaxis.set_major_formatter(FuncFormatter(
lambda y,pos: (r'${{:.{:1d}f}}$'.format(int(numpy.maximum(-numpy.log10(y),0)))).format(y)))
gca().xaxis.set_major_formatter(FuncFormatter(
lambda y,pos: (r'${{:.{:1d}f}}$'.format(int(numpy.maximum(-numpy.log10(y),0)))).format(y)))
tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Agent class constructor accepts 5 parameters
Step2: Now we will explore all the magic methods of agent class.
Step3: str returns name of the agent
Step4: getitem returns value of an attribute
Step5: setitem sets/changes value of an attribute. Returns keyerror if agent doesn't have the attribute.
Step6: contains returns true if agent contains the attribute
Step7: iter loops over attributes of an agent
Step8: reversed loops over attributes in reverse order
Step9: eq checks if two agents are equivalent
Step10: repr
Step11: call
Step12: iadd increases value of each attributes
Step13: isub this is opposite of iadd - substracts values of attributes.
Step14: imul multiplies each attributes
Step15: add adds two agents making it a composite/group. Name of the composite/group is concatinated names of all the agents. As shown below, agents become members of the composite.
Step16: Now we will explore general class methods
Step17: join_group
Step18: same_type - Returns true if agents are of same type.
Step19: attrs_to_dict - returns ordered dictionary representing attributes
Step20: sum - Returns sum of all the attributes
Step21: magnitude -
Step22: die - makes agent inactive
Step23: is_active - returns true if agent is active
|
<ASSISTANT_TASK:>
Python Code:
cd ..
from indra2.agent import Agent
def newt_action(agent):
print("I'm " + agent.name + " and I'm inventing modern mechanics!")
newton = Agent("Newton",
attrs={"place": 0.0, "time": 1658.0, "achieve": 43.9},
action=newt_action,
duration=30)
len(newton)
str(newton)
newton['time']
newton['place'] = 2.5
newton['place']
"time" in newton
for attr in newton:
print(attr)
for attr in reversed(newton):
print(attr)
LEIBBYEAR = 1646
LEIBDYEAR = 1716
def leib_action(agent):
print("I'm " + agent.name + " and I'm inventing calculus!")
leibniz = Agent("Leibniz",
attrs={"place": 0.0, "time": LEIBBYEAR},
action=leib_action,
duration=20)
other_Leibniz = Agent("Leibniz",
attrs={"place": 1.0, "time": LEIBBYEAR},
action=leib_action,
duration=20)
print("Leibniz & othere_Leibniz:", leibniz == other_Leibniz)
print("Leibniz & Leibniz:", leibniz == leibniz)
print("Leibniz & Newton:", leibniz == newton)
repr(leibniz)
newton()
leibniz()
newton
newton += 2
newton
newton += 2
newton
newton -= 2
newton
newton *= 2
newton
import composite
comp = newton + leibniz
comp
newton.to_json()
ModernNewton = Agent("ModerNewton",
attrs={"place": 0.0, "time": 1658.0, "achieve": 43.9},
action=newt_action,
duration=30)
ModernNewton.join_group(comp)
ModernNewton
comp
newton.same_type(leibniz)
newton.same_type(newton)
newton.attrs_to_dict()
newton.sum()
newton.magnitude()
# newton.die()
newton
newton.set_pos(100,100)
newton
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-2', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Dividing data into train and test set
Step2: Let us use building 1 for demo purposes
Step3: Let's split data at April 30th
Step4: REDD data set has got appliance level data sampled every 3 or 4 seconds and mains data sampled every 1 second. Let us verify the same.
Step5: Since, both of these are sampled at different frequencies, we will downsample both to 1 minute resolution. We will also select the top-5 appliances in terms of energy consumption and use them for training our FHMM and CO models.
Step6: Training and disaggregation
|
<ASSISTANT_TASK:>
Python Code:
import time
from matplotlib import rcParams
import matplotlib.pyplot as plt
%matplotlib inline
rcParams['figure.figsize'] = (13, 6)
plt.style.use('ggplot')
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.disaggregate import CombinatorialOptimisation
train = DataSet('/data/REDD/redd.h5')
test = DataSet('/data/REDD/redd.h5')
building = 1
train.buildings[building].elec.mains().plot()
train.set_window(end="30-4-2011")
test.set_window(start="30-4-2011")
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
train_elec.mains().plot()
test_elec.mains().plot()
fridge_meter = train_elec['fridge']
fridge_df = fridge_meter.load().next()
fridge_df.head()
mains = train_elec.mains()
mains_df = mains.load().next()
mains_df.head()
top_5_train_elec = train_elec.submeters().select_top_k(k=5)
top_5_train_elec
start=time.time()
from nilmtk.disaggregate import fhmm_exact
fhmm = fhmm_exact.FHMM()
# Note that we have given the sample period to downsample the data to 1 minute
fhmm.train(top_5_train_elec, sample_period=60)
end=time.time()
print end-start
disag_filename = '/data/REDD/redd-disag-fhmm.h5'
output = HDFDataStore(disag_filename, 'w')
# Note that we have mentioned to disaggregate after converting to a sample period of 60 seconds
fhmm.disaggregate(test_elec.mains(), output, sample_period=60)
output.close()
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[building].elec
from nilmtk.metrics import f1_score
f1_fhmm = f1_score(disag_fhmm_elec, test_elec)
f1_fhmm.index = disag_fhmm_elec.get_labels(f1_fhmm.index)
f1_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("FHMM");
start=time.time()
from nilmtk.disaggregate import CombinatorialOptimisation
co = CombinatorialOptimisation()
# Note that we have given the sample period to downsample the data to 1 minute
co.train(top_5_train_elec, sample_period=60)
end=time.time()
print end-start
disag_filename = '/data/REDD/redd-disag-co.h5'
output = HDFDataStore(disag_filename, 'w')
# Note that we have mentioned to disaggregate after converting to a sample period of 60 seconds
co.disaggregate(test_elec.mains(), output, sample_period=60)
output.close()
disag_co = DataSet(disag_filename)
disag_co_elec = disag_co.buildings[building].elec
from nilmtk.metrics import f1_score
f1_co= f1_score(disag_co_elec, test_elec)
f1_co.index = disag_co_elec.get_labels(f1_co.index)
f1_co.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("CO");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SAM (System for Award Management) - exclusions
Step2: There are 8,659 firms on the SAM exclusion list
Step3: NPI and CAGE don't seem to be great keys to join the data - ideally we can use SAM
Step4: FAPIIS is not bad with 3002 DUNS code but time range goes only from 2012 to 2017
Step5: Which means we're dealing with 49.5M transactions totalling 6.7 trillion dollars. These purchases came from 622k vendors that won 2.2mn solicitations issued by government agencies.
Step6: Understanding where the budget is spent
Step7: Looking at SMBs by year
Step8: SMB contract by gov. agency & by naics code
Step9: Simple Linear regression (LR)
Step10: LR
Step11: MVP
Step12: MVP 2 - Collaborative filtering
Step13: Worklow 1
Step14: b. Collaborative Filtering - item-item prediction
Step15: OTHERS - FROM TUTORIAL - Anton Tarasenko
Step16: You can find agencies that buy products like yours. If it's "software"
Step17: What Firms in Your Industry Sell to the Government
Step18: Inspecting Specific Transactions
Step19: Alternatively, specify vendors your product relates to and check how the government uses it. Top deals in data analytics
Step20: Searching Through Descriptions
Step21: Some rows of descriptionofcontractrequirement contain codes like "IGF
Step22: Facts about Government Contracting
Step23: The median shows the most likely supplier. Agencies on the top of the table actively employ vendors whose annual revenue is less than $1mn.
Step24: Women-owned businesses make about one tenth of the transactions, but their share in terms of sales is only 3.7%.
Step25: For example, firms owned by women, veterans, and minorities (all represented at the same time) sell $5bn in goods and services. That's 0.07% of all government purchases.
|
<ASSISTANT_TASK:>
Python Code:
import google.datalab.bigquery as bq
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn import cross_validation as cv
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics import mean_squared_error
from math import sqrt
%%sql
select * from [fiery-set-171213:vrec.sam_exclusions] limit 5
%%sql
select Exclusion_Type from [fiery-set-171213:vrec.sam_exclusions] group by 1;
%%sql
select Classification from [fiery-set-171213:vrec.sam_exclusions] group by 1;
%%sql
select
count(*)
from [fiery-set-171213:vrec.sam_exclusions]
where Classification in ('Firm')
;
%%bq query -n df_query
select
EXTRACT(YEAR FROM Active_Date) as year,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
and Active_Date is not NULL
group by 1
order by 1;
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.plot(kind='bar', x='year', title='Excluded Firms per year', figsize=(15,8))
ax.set_xlabel('Year')
ax.set_ylabel('count')
%%bq query
select
#Name,
SAM_Number,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
#and Active_Date is not NULL
group by 1
order by 2 DESC
limit 5;
%%bq query
select
NPI,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
#and CAGE is not NULL
group by 1
order by 2 DESC
limit 5;
%%bq query
select
CAGE,
count(*) as count
from `fiery-set-171213.vrec.sam_exclusions`
where Classification in ('Firm')
#and CAGE is not NULL
group by 1
order by 2 DESC
limit 5;
%%bq query
select *
from `fiery-set-171213.vrec.fapiis`
limit 5
%%bq query -n df_query
select
EXTRACT(YEAR FROM RECORD_DATE) as year,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1
order by 1;
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.plot(kind='bar', x='year', title='Firms by Record date', figsize=(10,5))
ax.set_xlabel('Year')
ax.set_ylabel('count')
%%bq query -n df_query
select
EXTRACT(YEAR FROM TERMINATION_DATE) as year,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1
order by 1;
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.plot(kind='bar', x='year', title='Firms by termination date', figsize=(10,5))
ax.set_xlabel('Year')
ax.set_ylabel('count')
%%bq query
select
AWARDEE_NAME,
DUNS,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1,2
order by 3 DESC
limit 5;
%%bq query
select
*
from `fiery-set-171213.vrec.fapiis`
where AWARDEE_NAME in ('ALPHA RAPID ENGINEERING SOLUTIONS')
limit 5;
%%bq query
select
RECORD_TYPE,
count(*) as count
from `fiery-set-171213.vrec.fapiis`
group by 1
order by 2 DESC
%%bq query -n df_query
select count(*) as transactions
from `fiery-set-171213.vrec.usa_spending_all`
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
%%bq query
select *
from `fiery-set-171213.vrec.usa_spending_all`
where mod_agency in ('1700: DEPT OF THE NAVY')
limit 5
%%bq query -n df_query
select
#substr(signeddate, 1, 2) month,
fiscal_year as year,
count(*) transactions,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by year
order by year asc
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.set_index('year')['dollarsobligated'].plot(kind='bar', title='Government purchases by years')
ax.set_ylabel('dollars obligated')
%%bq query -n df_query
select
fiscal_year as year,
sum(dollarsobligated)/count(*) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by year
order by year asc
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
ax = df.set_index('year')['dollarsobligated'].plot(kind='bar', title='avg. transaction size by years')
ax.set_ylabel('dollars obligated')
%%bq query
select
maj_agency_cat,
mod_agency,
count(*)
from `fiery-set-171213.vrec.usa_spending_all`
group by 1,2
order by 3 DESC
limit 20
%%bq query
select
mod_parent,
vendorname,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by 1,2
order by 3 DESC
limit 20
%%bq query
select
productorservicecode,
systemequipmentcode,
claimantprogramcode,
principalnaicscode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where vendorname in ('LOCKHEED MARTIN CORPORATION')
group by 1,2,3,4
order by 5 DESC
limit 20
%%bq query
select
#mod_parent,
vendorname,
systemequipmentcode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where productorservicecode in ('1510: AIRCRAFT, FIXED WING')
group by 1,2
order by 3 DESC
limit 20
%%bq query
select
vendorname,
systemequipmentcode,
claimantprogramcode,
principalnaicscode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where productorservicecode in ('1510: AIRCRAFT, FIXED WING')
and contractingofficerbusinesssizedetermination in ('S: SMALL BUSINESS')
group by 1,2,3,4
order by dollarsobligated DESC
limit 20
%%bq query
select
*
from `gpqueries.contracts.raw`
where productorservicecode in ('1510: AIRCRAFT, FIXED WING')
and contractingofficerbusinesssizedetermination in ('S: SMALL BUSINESS')
limit 1
%%bq query
select
claimantprogramcode,
principalnaicscode,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1,2
order by dollarsobligated DESC
limit 10
%%bq query -n df_query
select
fiscal_year,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1
order by 1
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
plt = df.set_index('fiscal_year')['dollarsobligated'].plot(kind='bar', title='transactions amount for SMBs')
%%bq query -n df_query
#%%sql
select
smb.fiscal_year,
sum(smb.transaction) as smb,
sum(total.transaction) as total,
sum(smb.transaction)/sum(total.transaction) as percentage
from
(select
fiscal_year,
sum(dollarsobligated) as transaction
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1) as smb
join
(select
fiscal_year,
sum(dollarsobligated) as transaction
from `fiery-set-171213.vrec.usa_spending_all`
group by 1) as total
on smb.fiscal_year = total.fiscal_year
group by 1
order by 1
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
plt = df.set_index('fiscal_year')['percentage'].plot(kind='bar', title='dollars % for SMBs')
%%bq query
select
smb.principalnaicscode as principalnaicscode,
sum(total.count) as count,
sum(smb.dollarsobligated) as dollarsobligated_smb,
sum(total.dollarsobligated) as dollarsobligated_total,
sum(smb.dollarsobligated)/sum(total.dollarsobligated) as smb_percentage
from
(select
principalnaicscode,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
group by 1) as smb
join
(select
principalnaicscode,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
group by 1
having dollarsobligated > 0
) as total
on smb.principalnaicscode = total.principalnaicscode
group by 1
order by 5 DESC
limit 10
%%bq query -n df_query
select
maj_agency_cat,
#mod_agency,
#contractactiontype,
#typeofcontractpricing,
#performancebasedservicecontract,
state,
#vendorcountrycode,
#principalnaicscode,
contractingofficerbusinesssizedetermination,
#sum(dollarsobligated) as dollarsobligated
dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where vendorcountrycode in ('UNITED STATES', 'USA: UNITED STATES OF AMERICA')
and contractingofficerbusinesssizedetermination in ('O: OTHER THAN SMALL BUSINESS', 'S: SMALL BUSINESS')
and dollarsobligated > 0
#group by 1,2,3
limit 20000
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
# Create dummy variable using pandas function get_dummies
df1 = df.join(pd.get_dummies(df['maj_agency_cat']))
df1 = df1.join(pd.get_dummies(df['state']))
df1 = df1.join(pd.get_dummies(df['contractingofficerbusinesssizedetermination']))
df1 = df1.drop('maj_agency_cat', axis = 1)
df1 = df1.drop('state', axis = 1)
df1 = df1.drop('contractingofficerbusinesssizedetermination', axis = 1)
df1.head()
train_data = df1.iloc[:,1:]
train_labels = df[['dollarsobligated']]
lm = LinearRegression()
lm.fit(train_data, train_labels)
# The coefficients
print('Coefficients: \n', lm.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((lm.predict(train_data) - train_labels) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % lm.score(train_data, train_labels))
%%bq query -n df_query
select
vendorname,
maj_agency_cat,
state,
contractingofficerbusinesssizedetermination,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
where vendorcountrycode in ('UNITED STATES', 'USA: UNITED STATES OF AMERICA')
and contractingofficerbusinesssizedetermination in ('O: OTHER THAN SMALL BUSINESS', 'S: SMALL BUSINESS')
and dollarsobligated > 0
group by 1,2,3,4
limit 20000
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
#Create dummy variable using pandas function get_dummies
df1 = df.join(pd.get_dummies(df['maj_agency_cat']))
df1 = df1.join(pd.get_dummies(df['state']))
df1 = df1.join(pd.get_dummies(df['contractingofficerbusinesssizedetermination']))
df1 = df1.drop('maj_agency_cat', axis = 1)
df1 = df1.drop('state', axis = 1)
df1 = df1.drop('contractingofficerbusinesssizedetermination', axis = 1)
df1 = df1.drop('vendorname', axis = 1)
df1 = df1.drop('dollarsobligated', axis = 1)
train_data = df1.iloc[:,1:]
train_labels = df[['count']]
lm = LinearRegression()
lm.fit(train_data, train_labels)
# The coefficients
print('Coefficients: \n', lm.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((lm.predict(train_data) - train_labels) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % lm.score(train_data, train_labels))
%%bq query
select
#principalnaicscode,
fiscal_year,
maj_agency_cat,
#contractingofficerbusinesssizedetermination,
#vendorname,
productorservicecode,
count(*) as count,
sum(dollarsobligated) as dollarsobligated
from `fiery-set-171213.vrec.usa_spending_all`
#where contractingofficerbusinesssizedetermination in ("S: SMALL BUSINESS")
#where regexp_contains(principalnaicscode, "CONSTRUCTION")
#and regexp_contains(maj_agency_cat, "AGRICULTURE")
where regexp_contains(productorservicecode, "MEAT")
#and fiscal_year = 2016
group by 1,2,3
order by dollarsobligated DESC
limit 10
%%bq query -n df_query
select
contractingofficerbusinesssizedetermination,
mod_agency,
vendorname,
count(*) as count
from `fiery-set-171213.vrec.usa_spending_all`
where vendorcountrycode in ('UNITED STATES', 'USA: UNITED STATES OF AMERICA')
and contractingofficerbusinesssizedetermination in ('O: OTHER THAN SMALL BUSINESS', 'S: SMALL BUSINESS')
and mod_agency not in ("")
group by 1,2,3
order by count DESC
limit 20000
df = df_query.execute(output_options=bq.QueryOutput.dataframe()).result()
df.head()
df1 = df.drop('contractingofficerbusinesssizedetermination', axis = 1)
n_agency = df1.mod_agency.unique().shape[0]
n_vendors = df1.vendorname.unique().shape[0]
print 'Number of gov agency = ' + str(n_agency) + ' | Number of vendors = ' + str(n_vendors)
# Convert categorial value with label encoding
le_agency = LabelEncoder()
label_agency = le_agency.fit_transform(df1['mod_agency'])
le_vendor = LabelEncoder()
label_vendor = le_vendor.fit_transform(df1['vendorname'])
df_agency = pd.DataFrame(label_agency)
df_vendor = pd.DataFrame(label_vendor)
df2 = pd.concat([df_agency, df_vendor], axis = 1)
df2 = pd.concat([df2, df1['count']], axis = 1)
df2.columns = ['mod_agency', 'vendorname', 'count']
df2.head(5)
# To ge the right label back
# le_agency.inverse_transform([173, 100])
# Split into training and test data set
train_data, test_data = cv.train_test_split(df2, test_size=0.25)
#Build the matrix
train_data_matrix = np.zeros((n_agency, n_vendors))
for line in train_data.itertuples():
train_data_matrix[line[1]-1, line[2]-1] = line[3]
test_data_matrix = np.zeros((n_agency, n_vendors))
for line in test_data.itertuples():
test_data_matrix[line[1]-1, line[2]-1] = line[3]
#Compute cosine distance
user_similarity = pairwise_distances(train_data_matrix, metric='cosine')
item_similarity = pairwise_distances(train_data_matrix.T, metric='cosine')
def predict(ratings, similarity, type='user'):
if type == 'user':
mean_user_rating = ratings.mean(axis=1)
#You use np.newaxis so that mean_user_rating has same format as ratings
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
elif type == 'item':
pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])
return pred
item_prediction = predict(train_data_matrix, item_similarity, type='item')
user_prediction = predict(train_data_matrix, user_similarity, type='user')
# Evaluation
def rmse(prediction, ground_truth):
prediction = prediction[ground_truth.nonzero()].flatten()
ground_truth = ground_truth[ground_truth.nonzero()].flatten() #filter out all items with no 0 as we only want to predict in the test set
return sqrt(mean_squared_error(prediction, ground_truth))
print 'User-based CF RMSE: ' + str(rmse(user_prediction, test_data_matrix))
print 'Item-based CF RMSE: ' + str(rmse(item_prediction, test_data_matrix))
print 'Worklow 1'
print '=' * 100
print 'Select your agency:'
agency = df1['mod_agency'][10]
print agency
print '=' * 100
print '1. Have you considered working with these SMB companies (user prediction)?'
agency = le_agency.transform(agency)
vendor_reco = pd.DataFrame(user_prediction[agency, :])
labels = pd.DataFrame(le_vendor.inverse_transform(range(0, len(vendor_reco))))
df_reco = pd.concat([vendor_reco, labels], axis = 1)
df_reco.columns = ['reco_score', 'vendorname']
#Join to get the SMB list
df_smb = df.drop(['mod_agency', 'count'], axis = 1)
df_reco = df_reco.set_index('vendorname').join(df_smb.set_index('vendorname'))
df_reco = df_reco.sort_values(['reco_score'], ascending = [0])
df_reco[df_reco['contractingofficerbusinesssizedetermination'] == 'S: SMALL BUSINESS'].head(10)
print '=' * 100
print '2. Have you considered working with these SMB companies (item-item prediction?)'
vendor_reco = pd.DataFrame(item_prediction[agency, :])
df_reco = pd.concat([vendor_reco, labels], axis = 1)
df_reco.columns = ['reco_score', 'vendorname']
df_reco = df_reco.set_index('vendorname').join(df_smb.set_index('vendorname'))
df_reco = df_reco.sort_values(['reco_score'], ascending = [0])
df_reco[df_reco['contractingofficerbusinesssizedetermination'] == 'S: SMALL BUSINESS'].head(10)
print 'Worklow 2'
print '=' * 100
print 'Select a vendor:'
# Workflow 2 - WIP
# Select a vendor
# Other similar vendor
%%sql
select
substr(productorservicecode, 1, 4) product_id,
first(substr(productorservicecode, 7)) product_name,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
product_id
order by
sum_dollarsobligated desc
limit 10
%%sql
select
substr(agencyid, 1, 4) agency_id,
first(substr(agencyid, 7)) agency_name,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
where
productorservicecode contains 'software'
group by
agency_id
order by
sum_dollarsobligated desc
ignore case
%%sql
select
substr(agencyid, 1, 4) agency_id,
first(substr(agencyid, 7)) agency_name,
substr(principalnaicscode, 1, 6) naics_id,
first(substr(principalnaicscode, 9)) naics_name,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
where
principalnaicscode contains 'software' and
fiscal_year = 2015
group by
agency_id, naics_id
order by
sum_dollarsobligated desc
ignore case
%%sql
select
fiscal_year,
dollarsobligated,
vendorname, city, state, annualrevenue, numberofemployees,
descriptionofcontractrequirement
from
gpqueries:contracts.raw
where
agencyid contains 'transportation security administration' and
principalnaicscode contains 'computer and software stores'
ignore case
%%sql
select
agencyid,
dollarsobligated,
vendorname,
descriptionofcontractrequirement
from
gpqueries:contracts.raw
where
vendorname contains 'tableau' or
vendorname contains 'socrata' or
vendorname contains 'palantir' or
vendorname contains 'revolution analytics' or
vendorname contains 'mathworks' or
vendorname contains 'statacorp' or
vendorname contains 'mathworks'
order by
dollarsobligated desc
limit
100
ignore case
%%sql
select
agencyid,
dollarsobligated,
descriptionofcontractrequirement
from
gpqueries:contracts.raw
where
descriptionofcontractrequirement contains 'body camera'
limit
100
ignore case
%%sql
select
substr(pop_state_code, 1, 2) state_code,
first(substr(pop_state_code, 4)) state_name,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
state_code
order by
sum_dollarsobligated desc
%%sql --module gpq
define query vendor_size_by_agency
select
substr(agencyid, 1, 4) agency_id,
first(substr(agencyid, 7)) agency_name,
nth(11, quantiles(annualrevenue, 21)) vendor_median_annualrevenue,
nth(11, quantiles(numberofemployees, 21)) vendor_median_numberofemployees,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
agency_id
having
transactions > 1000 and
sum_dollarsobligated > 10e6
order by
vendor_median_annualrevenue asc
bq.Query(gpq.vendor_size_by_agency).to_dataframe()
%%sql
select
womenownedflag,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
womenownedflag
%%sql
select
womenownedflag, veteranownedflag, minorityownedbusinessflag,
count(*) transactions,
sum(dollarsobligated) sum_dollarsobligated
from
gpqueries:contracts.raw
group by
womenownedflag, veteranownedflag, minorityownedbusinessflag
order by
womenownedflag, veteranownedflag, minorityownedbusinessflag desc
%%sql
select
sum(if(before2015.dunsnumber is null, 1, 0)) new_vendors,
sum(if(before2015.dunsnumber is null, 0, 1)) old_vendors
from
flatten((select unique(dunsnumber) dunsnumber from gpqueries:contracts.raw where fiscal_year = 2015), dunsnumber) in2015
left join
flatten((select unique(dunsnumber) dunsnumber from gpqueries:contracts.raw where fiscal_year < 2015), dunsnumber) before2015
on before2015.dunsnumber = in2015.dunsnumber
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load training data
Step2: Build features
Step3: Because solving the sum of squares equation involved the quadratic formula, in some cases imaginary numbers result due to porosities being negative, which is what the warning below is about.
Step4: Regress missing PE values
Step5: Apply regression model to missing PE values and merge back into dataframe
Step6: Compute UMAA for lithology model
Step7: Umaa Rhomaa plot
Step8: Here I use matrix inversion to "solve" the ternary plot for each lithologic component. Essentially each datapoint is a mix of the three components defined by the ternary diagram, with abundances of each defined by the relative distances from each endpoint. I use a GR cutoff of 40 API to determine when to use either the QTZ-CAL-DOL or QTZ-CAL-CLAY ternary solutions. In other words, it is assumed that below 40 API, there is 0% clay, and above 40 API there is 0% dolomite, and also that these four lithologic components are the only components in these rocks. Admittedly it's not a great assumption, especially since the ternary plot indicates other stuff is going on. For example the high Umaa datapoints near the Calcite endpoint may indicate some heavy minerals (e.g., pyrite) or even barite-weighted mud. The "pull" of datapoints to the northwest quadrant probably reflects some gas effect, so my lithologies in those gassy zones will be skewed.
Step9: Plot facies by formation to see if the Formation feature will be useful
Step10: Group formations by similar facies distributions
Step11: Make dummy variables from the categorical Formation feature
Step12: Compute Archie water saturation
Step14: Get distances between wells
Step15: Add latitude and longitude as features, add distances to every other well as features
Step16: First guess at facies using KNN
Step17: Fit RandomForect model and apply LeavePGroupsOut test
Step18: Apply model to validation dataset
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
from math import radians, cos, sin, asin, sqrt
import itertools
from sklearn import neighbors
from sklearn import preprocessing
from sklearn import ensemble
from sklearn.model_selection import LeaveOneGroupOut, LeavePGroupsOut
import inversion
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
df = pd.read_csv('../facies_vectors.csv')
def estimate_dphi(df):
return ((4*(df['PHIND']**2) - (df['DeltaPHI']**2))**0.5 - df['DeltaPHI']) / 2
def estimate_rhob(df):
return (2.71 - (df['DPHI_EST']/100) * 1.71)
def estimate_nphi(df):
return df['DPHI_EST'] + df['DeltaPHI']
def compute_rhomaa(df):
return (df['RHOB_EST'] - (df['PHIND'] / 100)) / (1 - df['PHIND'] / 100)
def compute_umaa(df):
return ((df['PE'] * df['RHOB_EST']) - (df['PHIND']/100 * 0.398)) / (1 - df['PHIND'] / 100)
df['DPHI_EST'] = df.apply(lambda x: estimate_dphi(x), axis=1).astype(float)
df['RHOB_EST'] = df.apply(lambda x: estimate_rhob(x), axis=1)
df['NPHI_EST'] = df.apply(lambda x: estimate_nphi(x), axis=1)
df['RHOMAA_EST'] = df.apply(lambda x: compute_rhomaa(x), axis=1)
pe = df.dropna()
PE = pe['PE'].values
wells = pe['Well Name'].values
drop_list_pe = ['Formation', 'Well Name', 'Facies', 'Depth', 'PE', 'RELPOS']
fv_pe = pe.drop(drop_list_pe, axis=1).values
X_pe = preprocessing.StandardScaler().fit(fv_pe).transform(fv_pe)
y_pe = PE
reg = neighbors.KNeighborsRegressor(n_neighbors=40, weights='distance')
logo = LeaveOneGroupOut()
f1knn_pe = []
for train, test in logo.split(X_pe, y_pe, groups=wells):
well_name = wells[test[0]]
reg.fit(X_pe[train], y_pe[train])
score = reg.fit(X_pe[train], y_pe[train]).score(X_pe[test], y_pe[test])
print("{:>20s} {:.3f}".format(well_name, score))
f1knn_pe.append(score)
print("-Average leave-one-well-out F1 Score: %6f" % (np.mean(f1knn_pe)))
reg.fit(X_pe, y_pe)
fv_apply = df.drop(drop_list_pe, axis=1).values
X_apply = preprocessing.StandardScaler().fit(fv_apply).transform(fv_apply)
df['PE_EST'] = reg.predict(X_apply)
df.PE = df.PE.combine_first(df.PE_EST)
df['UMAA_EST'] = df.apply(lambda x: compute_umaa(x), axis=1)
df[df.GR < 125].plot(kind='scatter', x='UMAA_EST', y='RHOMAA_EST', c='GR', figsize=(8,6))
plt.ylim(3.1, 2.2)
plt.xlim(0.0, 17.0)
plt.plot([4.8, 9.0, 13.8, 4.8], [2.65, 2.87, 2.71, 2.65], c='r')
plt.plot([4.8, 11.9, 13.8, 4.8], [2.65, 3.06, 2.71, 2.65], c='g')
plt.scatter([4.8], [2.65], s=50, c='r')
plt.scatter([9.0], [2.87], s=50, c='r')
plt.scatter([13.8], [2.71], s=50, c='r')
plt.scatter([11.9], [3.06], s=50, c='g')
plt.text(2.8, 2.65, 'Quartz', backgroundcolor='w')
plt.text(14.4, 2.71, 'Calcite', backgroundcolor='w')
plt.text(9.6, 2.87, 'Dolomite', backgroundcolor='w')
plt.text(12.5, 3.06, 'Illite', backgroundcolor='w')
plt.text(7.0, 2.55, "gas effect", ha="center", va="center", rotation=-55,
size=8, bbox=dict(boxstyle="larrow,pad=0.3", fc="pink", ec="red", lw=2))
plt.text(15.0, 2.78, "barite?", ha="center", va="center", rotation=0,
size=8, bbox=dict(boxstyle="rarrow,pad=0.3", fc="yellow", ec="orange", lw=2))
# QTZ-CAL-CLAY
ur1 = inversion.UmaaRhomaa()
ur1.set_dol_uma(11.9)
ur1.set_dol_rhoma(3.06)
# QTZ-CAL-DOL
ur2 = inversion.UmaaRhomaa()
df['UR_QTZ'] = np.nan
df['UR_CLY'] = np.nan
df['UR_CAL'] = np.nan
df['UR_DOL'] = np.nan
df.ix[df.GR >= 40, 'UR_QTZ'] = df.ix[df.GR >= 40].apply(lambda x: ur1.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)
df.ix[df.GR >= 40, 'UR_CLY'] = df.ix[df.GR >= 40].apply(lambda x: ur1.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1)
df.ix[df.GR >= 40, 'UR_CAL'] = df.ix[df.GR >= 40].apply(lambda x: ur1.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)
df.ix[df.GR >= 40, 'UR_DOL'] = 0
df.ix[df.GR < 40, 'UR_QTZ'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)
df.ix[df.GR < 40, 'UR_DOL'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1)
df.ix[df.GR < 40, 'UR_CAL'] = df.ix[df.GR < 40].apply(lambda x: ur2.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)
df.ix[df.GR < 40, 'UR_CLY'] = 0
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
fms = df.Formation.unique()
fig, ax = plt.subplots(int(len(fms) / 2), 2, sharey=True, sharex=True, figsize=(5,10))
for i, fm in enumerate(fms):
facies_counts = df[df.Formation == fm]['Facies'].value_counts().sort_index()
colors = [facies_colors[i-1] for i in facies_counts.index]
ax[int(i/2), i%2].bar(facies_counts.index, height=facies_counts, color=colors)
ax[int(i/2), i%2].set_title(fm, size=8)
fm_groups = [['A1 SH', 'B1 SH', 'B2 SH', 'B3 SH', 'B4 SH'],
['B5 SH', 'C SH'],
['A1 LM', 'C LM'],
['B1 LM', 'B3 LM', 'B4 LM'],
['B2 LM', 'B5 LM']]
fm_group_dict = {fm:i for i, l in enumerate(fm_groups) for fm in l}
df['FM_GRP'] = df.Formation.map(fm_group_dict)
df = pd.get_dummies(df, prefix='FM_GRP', columns=['FM_GRP'])
def archie(df):
return np.sqrt(0.08 / ((df.PHIND ** 2) * (10 ** df.ILD_log10)))
df['SW'] = df.apply(lambda x: archie(x), axis=1)
# modified from jesper
latlong = pd.DataFrame({"SHRIMPLIN": [37.978076, -100.987305], #
"ALEXANDER D": [37.6747257, -101.1675259], #
"SHANKLE": [38.0633799, -101.3920543], #
"LUKE G U": [37.4499614, -101.6121913], #
"KIMZEY A": [37.12289, -101.39697], #
"CROSS H CATTLE": [37.9105826, -101.6464517], #
"NOLAN": [37.7866294, -101.0451641], #?
"NEWBY": [37.3172442, -101.3546995], #
"CHURCHMAN BIBLE": [37.3497658, -101.1060761], #?
"STUART": [37.4857262, -101.1391063], #
"CRAWFORD": [37.1893654, -101.1494994], #?
"Recruit F9": [0,0]})
def haversine(lon1, lat1, lon2, lat2):
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = 6367 * c
return km
def get_lat(df):
return latlong[df['Well Name']][0]
def get_long(df):
return latlong[df['Well Name']][1]
df['LAT'] = df.apply(lambda x: get_lat(x), axis=1)
df['LON'] = df.apply(lambda x: get_long(x), axis=1)
dist_dict = {}
for k in latlong:
dict_name = k + '_DISTANCES'
k_dict = {}
lat1 = latlong[k][0]
lon1 = latlong[k][1]
for l in latlong:
lat2 = latlong[l][0]
lon2 = latlong[l][1]
if l == 'Recruit F9':
dist = haversine(0, 0, 0, 0)
elif k == "Recruit F9":
dist = haversine(0, 0, 0, 0)
else:
dist = haversine(lon1, lat1, lon2, lat2)
k_dict[l] = dist
dist_dict[dict_name] = k_dict
for i in dist_dict:
df[i] = np.nan
for j in dist_dict[i]:
df.loc[df['Well Name'] == j, i] = dist_dict[i][j]
df0 = df[(df.PHIND <= 40) & (df['Well Name'] != 'CROSS H CATTLE')]
facies = df0['Facies'].values
wells = df0['Well Name'].values
keep_list0 = ['GR', 'ILD_log10', 'PHIND', 'PE', 'NM_M', 'RELPOS', 'RHOB_EST',
'UR_CLY', 'UR_CAL']
fv0 = df0[keep_list0].values
clf0 = neighbors.KNeighborsClassifier(n_neighbors=56, weights='distance')
X0 = preprocessing.StandardScaler().fit(fv0).transform(fv0)
y0 = facies
logo = LeaveOneGroupOut()
f1knn0 = []
clf0.fit(X0, y0)
X1 = preprocessing.StandardScaler().fit(df[keep_list0].values).transform(df[keep_list0].values)
knn_pred = clf0.predict(X1)
df['KNN_FACIES'] = knn_pred
df1 = df.dropna()
df1 = df1[(df1['Well Name'] != 'CROSS H CATTLE') & (df.PHIND < 40.0)]
facies = df1['Facies'].values
wells = df1['Well Name'].values
drop_list = ['Formation', 'Well Name', 'Facies', 'Depth', 'DPHI_EST', 'NPHI_EST', 'DeltaPHI',
'UMAA_EST', 'UR_QTZ', 'PE_EST', 'Recruit F9_DISTANCES', 'KIMZEY A_DISTANCES',
'NEWBY_DISTANCES', 'ALEXANDER D_DISTANCES', 'NOLAN_DISTANCES', 'FM_GRP_3']
fv = df1.drop(drop_list, axis=1).values
X = preprocessing.StandardScaler().fit(fv).transform(fv)
y = facies
ne_grid = [150]
mf_grid = [10]
md_grid = [None]
msl_grid = [5]
mss_grid = [20]
keys = ['n_estimators', 'max_features', 'max_depth', 'min_samples_leaf', 'min_samples_split']
param_sets = itertools.product(ne_grid, mf_grid, md_grid, msl_grid, mss_grid)
param_grid = [dict(zip(keys, i)) for i in param_sets]
clf_list = []
for i, d in enumerate(param_grid):
clf = ensemble.RandomForestClassifier(n_estimators=d['n_estimators'],
class_weight='balanced',
min_samples_leaf=d['min_samples_leaf'],
min_samples_split=d['min_samples_split'],
max_features=d['max_features'],
max_depth=d['max_depth'],
n_jobs=-1)
lpgo = LeavePGroupsOut(n_groups=2)
f1rfc = []
for train, test in lpgo.split(X, y, groups=wells):
clf.fit(X[train], y[train])
score = clf.fit(X[train], y[train]).score(X[test], y[test])
f1rfc.append(score)
print("Average leave-two-wells-out F1 Score: %6f" % (np.mean(f1rfc)))
clf_list.append((clf, np.mean(f1rfc)))
np.max([i[1] for i in clf_list])
list(zip(df1.drop(drop_list, axis=1).columns, clf.feature_importances_))
# refit model to entire training set
clf.fit(X, y)
# load validation data
vd = pd.read_csv('../validation_data_nofacies.csv')
# compute extra log data features
vd['DPHI_EST'] = vd.apply(lambda x: estimate_dphi(x), axis=1).astype(float)
vd['RHOB_EST'] = vd.apply(lambda x: estimate_rhob(x), axis=1)
vd['NPHI_EST'] = vd.apply(lambda x: estimate_nphi(x), axis=1)
vd['RHOMAA_EST'] = vd.apply(lambda x: compute_rhomaa(x), axis=1)
# predict missing PE values
drop_list_vd = ['Formation', 'Well Name', 'Depth', 'PE', 'RELPOS']
fv_vd = vd.drop(drop_list_vd, axis=1).values
X_vd = preprocessing.StandardScaler().fit(fv_vd).transform(fv_vd)
vd['PE_EST'] = reg.predict(X_vd)
vd.PE = vd.PE.combine_first(vd.PE_EST)
vd['UMAA_EST'] = vd.apply(lambda x: compute_umaa(x), axis=1)
# Estimate lithology using Umaa Rhomaa solution
vd['UR_QTZ'] = np.nan
vd['UR_CLY'] = np.nan
vd['UR_CAL'] = np.nan
vd['UR_DOL'] = np.nan
vd.ix[vd.GR >= 40, 'UR_QTZ'] = vd.ix[vd.GR >= 40].apply(lambda x: ur1.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)
vd.ix[vd.GR >= 40, 'UR_CLY'] = vd.ix[vd.GR >= 40].apply(lambda x: ur1.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1)
vd.ix[vd.GR >= 40, 'UR_CAL'] = vd.ix[vd.GR >= 40].apply(lambda x: ur1.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)
vd.ix[vd.GR >= 40, 'UR_DOL'] = 0
vd.ix[vd.GR < 40, 'UR_QTZ'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_qtz(x.UMAA_EST, x.RHOMAA_EST), axis=1)
vd.ix[vd.GR < 40, 'UR_DOL'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_dol(x.UMAA_EST, x.RHOMAA_EST), axis=1)
vd.ix[vd.GR < 40, 'UR_CAL'] = vd.ix[vd.GR < 40].apply(lambda x: ur2.get_cal(x.UMAA_EST, x.RHOMAA_EST), axis=1)
vd.ix[vd.GR < 40, 'UR_CLY'] = 0
# Formation grouping
vd['FM_GRP'] = vd.Formation.map(fm_group_dict)
vd = pd.get_dummies(vd, prefix='FM_GRP', columns=['FM_GRP'])
# Water saturation
vd['SW'] = vd.apply(lambda x: archie(x), axis=1)
# Lat-long features
vd['LAT'] = vd.apply(lambda x: get_lat(x), axis=1)
vd['LON'] = vd.apply(lambda x: get_long(x), axis=1)
for i in dist_dict:
vd[i] = np.nan
for j in dist_dict[i]:
vd.loc[vd['Well Name'] == j, i] = dist_dict[i][j]
# Compute first guess at facies with KNN
X2 = preprocessing.StandardScaler().fit(vd[keep_list0].values).transform(vd[keep_list0].values)
vd['KNN_FACIES'] = clf0.predict(X2)
# Apply final model
drop_list1 = ['Formation', 'Well Name', 'Depth', 'DPHI_EST', 'NPHI_EST', 'DeltaPHI',
'UMAA_EST', 'UR_QTZ', 'PE', 'Recruit F9_DISTANCES', 'KIMZEY A_DISTANCES',
'NEWBY_DISTANCES', 'ALEXANDER D_DISTANCES', 'NOLAN_DISTANCES', 'FM_GRP_3']
fv_vd1 = vd.drop(drop_list1, axis=1).values
X_vd1 = preprocessing.StandardScaler().fit(fv_vd1).transform(fv_vd1)
vd_predicted_facies = clf.predict(X_vd1)
vd['Facies'] = vd_predicted_facies
vd.to_csv('RFC_submission_3_predictions.csv')
vd_predicted_facies
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The Basics
Step2: Try It Yourself
Step3: Variables can be reassigned
Step4: The ability to reassign variable values becomes important when iterating through groups of objects for batch processing or other purposes. In the example below, the value of b is dynamically updated every time the while loop is executed
Step5: Variable data types can be inferred, so Python does not require us to declare the data type of a variable on assignment.
Step6: is equivalent to
Step7: There are cases when we may want to declare the data type, for example to assign a different data type from the default that will be inferred. Concatenating strings provides a good example.
Step8: Above, Python has inferred the type of the variable pizza to be an integer. Since strings can only be concatenated with other strings, our print statement generates an error. There are two ways we can resolve the error
Step9: Given the following variable assignments
Step11: Lists
Step12: There are multiple ways to create a list
Step13: We can inspect our lists
Step14: The above output for typed_list may seem odd. Referring to the documentation, we see that the argument to the type constructor is an iterable, which according to the documentation is "An object capable of returning its members one at a time." In our construtor statement above
Step15: Lists in Python are
Step16: Info on additional list methods is available at https
Step17: Operators
|
<ASSISTANT_TASK:>
Python Code:
# type your help commands in the box and
# execute the code in the box by typing shift-enter
# (hold down the shift key while hitting the enter/return key)
# The interpreter can be used as a calculator, and can also echo or concatenate strings.
3 + 3
3 * 3
3 ** 3
3 / 2 # classic division - output is a floating point number
# Use quotes around strings
'dogs'
# + operator can be used to concatenate strings
'dogs' + "cats"
print('Hello World!')
a = 5
b = 10
a + b
b = 38764289.1097
a + b
a = 5
b = 10
while b > a:
print("b="+str(b))
b = b-1
a = 5
type(a)
a = int(5)
type(a)
c = 'dogs'
print(type(c))
c = str('dogs')
print(type(c))
customer = 'Carol'
pizzas = 2
print(customer + ' ordered ' + pizzas + ' pizzas.')
customer = 'Carol'
pizzas = str(2)
print(customer + ' ordered ' + pizzas + ' pizzas.')
customer = 'Carol'
pizzas = 2
print(customer + ' ordered ' + str(pizzas) + ' pizzas.')
# Fun with types
this = 12
that = 15
the_other = "27"
my_stuff = [this,that,the_other,["a","b","c",4]]
more_stuff = {
"item1": this,
"item2": that,
"item3": the_other,
"item4": my_stuff
}
this + that
# this won't work ...
# this + that + the_other
# ... but this will ...
this + that + int(the_other)
# ...and this too
str(this) + str(that) + the_other
# Separate list items with commas!
number_list = [1, 2, 3, 4, 5]
string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']
combined_list = [1, 2, 'oranges', 3.14, 'peaches', 'grapes', 99.19876]
# Nested lists - lists of lists - are allowed.
list_of_lists = [[1, 2, 3], ['oranges', 'grapes', 8], [['small list'], ['bigger', 'list', 55], ['url_1', 'url_2']]]
# Create an empty list
empty_list = []
# As we did above, by using square brackets around a comma-separated sequence of items
new_list = [1, 2, 3]
# Using the type constructor
constructed_list = list('purple')
# Using a list comprehension
result_list = [i for i in range(1, 20)]
empty_list
new_list
result_list
constructed_list
constructed_list_int = list(123)
constructed_list_str = list('123')
constructed_list_str
ordered = [3, 2, 7, 1, 19, 0]
ordered
# There is a 'sort' method for sorting list items as needed:
ordered.sort()
ordered
string_list = ['apples', 'oranges', 'pears', 'grapes', 'pineapples']
string_list[0]
# We can use positions to 'slice' or selection sections of a list:
string_list[3:]
string_list[:3]
string_list[1:4]
# If we don't know the position of a list item, we can use the 'index()' method to find out.
# Note that in the case of duplicate list items, this only returns the position of the first one:
string_list.index('pears')
string_list.append('oranges')
string_list
string_list.index('oranges')
# one more time with lists and dictionaries
list_ex1 = my_stuff[0] + my_stuff[1] + int(my_stuff[2])
print(list_ex1)
list_ex2 = (
str(my_stuff[0])
+ str(my_stuff[1])
+ my_stuff[2]
+ my_stuff[3][0]
)
print(list_ex2)
dict_ex1 = (
more_stuff['item1']
+ more_stuff['item2']
+ int(more_stuff['item3'])
)
print(dict_ex1)
dict_ex2 = (
str(more_stuff['item1'])
+ str(more_stuff['item2'])
+ more_stuff['item3']
)
print(dict_ex2)
# Now try it yourself ...
# print out the phrase "The answer: 42" using the following
# variables and one or more of your own and the 'print()' function
# (remember spaces are characters as well)
start = "The"
answer = 42
# read data into a list of dictionaries
import csv
# create an empty list that will be filled with the rows of data from the CSV as dictionaries
csv_content = []
# open and loop through each line of the csv file to populate our data file
with open('aaj1945_DataS1_Egg_shape_by_species_v2.csv') as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader: # process each row of the csv file
csv_content.append(row)
print(csv_content[0].keys())
#print()
#print(csv_content[0])
# extract content of each "column" individually
order = []
for item in csv_content:
try:
order.append(item['Order'])
except:
order.append(None)
family = []
for item in csv_content:
try:
family.append(item['Family'])
except:
family.append(None)
species = []
for item in csv_content:
try:
species.append(item['Species'])
except:
species.append(None)
asymmetry = []
for item in csv_content:
try:
asymmetry.append(item['Asymmetry'])
except:
asymmetry.append(None)
ellipticity = []
for item in csv_content:
try:
ellipticity.append(item['Ellipticity'])
except:
ellipticity.append(None)
avgLength = []
for item in csv_content:
try:
avgLength.append(item['AvgLength (cm)'])
except:
avgLength.append(None)
noImages = []
for item in csv_content:
try:
noImages.append(item['Number of images'])
except:
noImages.append(None)
noEggs = []
for item in csv_content:
try:
noEggs.append(item['Number of eggs'])
except:
noEggs.append(None)
print(order[0:3])
print(family[0:3])
print(species[0:3])
print(asymmetry[0:3])
print(ellipticity[0:3])
print(avgLength[0:3])
print(noImages[0:3])
print(noEggs[0:3])
# define a function that can extract a named column from a named list of dictionaries
def extract_column(source_list, source_column):
new_list = []
for item in source_list:
try:
new_list.append(item[source_column])
except:
new_list.append(None)
print(source_column + ": " + ", ".join(new_list[0:3]))
return(new_list)
order = extract_column(csv_content, 'Order')
family = extract_column(csv_content, 'Family')
species = extract_column(csv_content, 'Species')
asymmetry = extract_column(csv_content, 'Asymmetry')
ellipticity = extract_column(csv_content, 'Ellipticity')
avgLength = extract_column(csv_content, 'AvgLength (cm)')
noImages = extract_column(csv_content, 'Number of images')
noEggs = extract_column(csv_content, 'Number of eggs')
print()
print(order[0:3])
print(family[0:3])
print(species[0:3])
print(asymmetry[0:3])
print(ellipticity[0:3])
print(avgLength[0:3])
print(noImages[0:3])
print(noEggs[0:3])
# use the extract_column function in a loop to automatically extract all of the columns from a from the list
# of dictionaries to create a dictionary representing each column of values
columns = {}
for column in csv_content[0].keys():
columns[column] = extract_column(csv_content, column)
columns
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Contour plots of 2d wavefunctions
Step3: The contour, contourf, pcolor and pcolormesh functions of Matplotlib can be used for effective visualizations of 2d scalar fields. Use the Matplotlib documentation to learn how to use these functions along with the numpy.meshgrid function to visualize the above wavefunction
Step4: Next make a visualization using one of the pcolor functions
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
def well2d(x, y, nx, ny, L=1.0):
Compute the 2d quantum well wave function.
# YOUR CODE HERE
#raise NotImplementedError()
Psi = (2/L)*np.sin((nx*np.pi*x)/L)*np.sin((ny*np.pi*y)/L)
return Psi
psi = well2d(np.linspace(0,1,10), np.linspace(0,1,10), 1, 1)
assert len(psi)==10
assert psi.shape==(10,)
# YOUR CODE HERE
#raise NotImplementedError()
L=1.0
x=np.linspace(0,1,100)
y=np.linspace(0,1,100)
X,Y = np.meshgrid(x,y)
z= well2d(X,Y,3,2,L)
C = plt.contourf(X,Y,z, cmap='PuBu')
plt.xlim(0,1)
plt.ylim(0,1)
plt.box(False)
plt.title('Wave Function for a 2D Well for nx=3, ny=2')
plt.tick_params(axis='y', right='off', direction='out')
plt.tick_params(axis='x', top='off', direction='out')
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar(C,shrink=0.7);
assert True # use this cell for grading the contour plot
# YOUR CODE HERE
#raise NotImplementedError()
a=plt.pcolormesh(X,Y,well2d(X,Y,3,2,L), cmap='ocean')
plt.box(False)
plt.title('Wave Function in 2D Well for nx=3, ny=2')
plt.tick_params(axis='y', right='off', direction='out')
plt.tick_params(axis='x', top='off', direction='out')
plt.xlim(0,1)
plt.ylim(0,1)
plt.xlabel('x')
plt.ylabel('y')
plt.colorbar(a,shrink=0.7);
assert True # use this cell for grading the pcolor plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: once we have a smiles representation of a molecule, we can convert it into a molecular object with RDKit
Step2: Once we have a molecular object, we can generate many kinds of bitvector representations or fingerprints.
Step3: check_name is based on a curated data file of known cations and anions
Step4: A Few Useful Datafiles
Step5: aggregate_data returns a devmodel object that contains a pandas dataframe of the raw data and a data summary
Step6: and has scaled and centered 2D features from rdkit
Step7: The purpose of the data summary is to provide historical information when ML models are ported over into GAINS. Once we have a devmodel the underlying data can be interrogated
Step8: Build Models with Scikit-Learn
Step9: We can then see how the model is performing with matplotlib
Step10: Build Models with Keras
Step11: Save Models for GAINS
|
<ASSISTANT_TASK:>
Python Code:
import salty
smiles = salty.check_name("1-butyl-3-methylimidazolium")
print(smiles)
%matplotlib inline
from rdkit import Chem
from rdkit.Chem import Draw
fig = Draw.MolToMPL(Chem.MolFromSmiles(smiles),figsize=(5,5))
ms = [Chem.MolFromSmiles("OC(=O)C(N)Cc1ccc(O)cc1"), Chem.MolFromSmiles(smiles)]
fig=Draw.MolsToGridImage(ms[:],molsPerRow=2,subImgSize=(400,200))
fig.save('compare.png')
from rdkit.Chem.AtomPairs import Pairs
from rdkit.Chem import AllChem
from rdkit.Chem.Fingerprints import FingerprintMols
from rdkit import DataStructs
radius = 2
fpatom = [Pairs.GetAtomPairFingerprintAsBitVect(x) for x in ms]
print("atom pair score: {:8.4f}".format(DataStructs.TanimotoSimilarity(fpatom[0], fpatom[1])))
fpmorg = [AllChem.GetMorganFingerprint(ms[0],radius,useFeatures=True),
AllChem.GetMorganFingerprint(ms[1],radius,useFeatures=True)]
fptopo = [FingerprintMols.FingerprintMol(x) for x in ms]
print("morgan score: {:11.4f}".format(DataStructs.TanimotoSimilarity(fpmorg[0], fpmorg[1])))
print("topological score: {:3.4f}".format(DataStructs.TanimotoSimilarity(fptopo[0], fptopo[1])))
print("Cations in database: {}".format(len(salty.load_data("cationInfo.csv"))))
print("Anions in database: {}".format(len(salty.load_data("anionInfo.csv"))))
rawdata = salty.load_data("cpt.csv")
rawdata.columns
devmodel = salty.aggregate_data(['cpt', 'density']) # other option is viscosity
devmodel.Data_summary
devmodel.Data.columns
import matplotlib.pyplot as plt
import numpy as np
df = devmodel.Data
with plt.style.context('seaborn-whitegrid'):
fig=plt.figure(figsize=(5,5), dpi=300)
ax=fig.add_subplot(111)
scat = ax.scatter(np.exp(df["Heat capacity at constant pressure, J/K/mol"]), np.exp(
df["Specific density, kg/m<SUP>3</SUP>"]),
marker="*", c=df["Temperature, K"]/max(df["Temperature, K"]), cmap="Purples")
plt.colorbar(scat)
ax.grid()
ax.set_ylabel("Density $(kg/m^3)$")
ax.set_xlabel("Heat Capacity $(J/K/mol)$")
from sklearn.model_selection import cross_val_score
from sklearn.neural_network import MLPRegressor
from sklearn.multioutput import MultiOutputRegressor
model = MLPRegressor(activation='relu', alpha=0.92078, batch_size='auto',
beta_1=0.9, beta_2=0.999, early_stopping=False, epsilon=1e-08,
hidden_layer_sizes=75, learning_rate='constant',
learning_rate_init=0.001, max_iter=1e8, momentum=0.9,
nesterovs_momentum=True, power_t=0.5, random_state=None,
shuffle=True, solver='lbfgs', tol=1e-08, validation_fraction=0.1,
verbose=False, warm_start=False)
multi_model = MultiOutputRegressor(model)
X_train, Y_train, X_test, Y_test = salty.devmodel_to_array\
(devmodel, train_fraction=0.8)
multi_model.fit(X_train, Y_train)
X=X_train
Y=Y_train
with plt.style.context('seaborn-whitegrid'):
fig=plt.figure(figsize=(5,2.5), dpi=300)
ax=fig.add_subplot(122)
ax.plot([0,2000], [0,2000], linestyle="-", label=None, c="black", linewidth=1)
ax.plot(np.exp(Y)[:,0],np.exp(multi_model.predict(X))[:,0],\
marker="*",linestyle="",alpha=0.4)
ax.set_ylabel("Predicted $C_{pt}$ $(K/J/mol)$")
ax.set_xlabel("Actual $C_{pt}$ $(K/J/mol)$")
ax.text(0.1,.9,"R: {0:5.3f}".format(multi_model.score(X,Y)), transform = ax.transAxes)
plt.xlim(200,1700)
plt.ylim(200,1700)
ax.grid()
ax=fig.add_subplot(121)
ax.plot([0,2000], [0,2000], linestyle="-", label=None, c="black", linewidth=1)
ax.plot(np.exp(Y)[:,1],np.exp(multi_model.predict(X))[:,1],\
marker="*",linestyle="",alpha=0.4)
ax.set_ylabel("Predicted Density $(kg/m^3)$")
ax.set_xlabel("Actual Density $(kg/m^3)$")
plt.xlim(850,1600)
plt.ylim(850,1600)
ax.grid()
plt.tight_layout()
from keras.layers import Dense, Dropout, Input
from keras.models import Model, Sequential
from keras.optimizers import Nadam
X_train, Y_train, X_test, Y_test = salty.devmodel_to_array\
(devmodel, train_fraction=0.8)
model = Sequential()
model.add(Dense(75, activation='relu', input_dim=X_train.shape[1]))
model.add(Dropout(0.5))
model.add(Dense(2, activation='linear'))
model.compile(optimizer="adam",
loss="mean_squared_error",
metrics=['mse'])
model.fit(X_train,Y_train,epochs=1000,verbose=False)
X=X_train
Y=Y_train
with plt.style.context('seaborn-whitegrid'):
fig=plt.figure(figsize=(5,2.5), dpi=300)
ax=fig.add_subplot(122)
ax.plot([0,2000], [0,2000], linestyle="-", label=None, c="black", linewidth=1)
ax.plot(np.exp(Y)[:,0],np.exp(model.predict(X))[:,0],\
marker="*",linestyle="",alpha=0.4)
ax.set_ylabel("Predicted $C_{pt}$ $(K/J/mol)$")
ax.set_xlabel("Actual $C_{pt}$ $(K/J/mol)$")
plt.xlim(200,1700)
plt.ylim(200,1700)
ax.grid()
ax=fig.add_subplot(121)
ax.plot([0,2000], [0,2000], linestyle="-", label=None, c="black", linewidth=1)
ax.plot(np.exp(Y)[:,1],np.exp(model.predict(X))[:,1],\
marker="*",linestyle="",alpha=0.4)
ax.set_ylabel("Predicted Density $(kg/m^3)$")
ax.set_xlabel("Actual Density $(kg/m^3)$")
plt.xlim(850,1600)
plt.ylim(850,1600)
ax.grid()
plt.tight_layout()
model.save("_static/cpt_density_qspr.h5")
devmodel.Data_summary.to_csv("_static/cpt_density_summ.csv")
devmodel.Coef_data.to_csv("_static/cpt_density_desc.csv", index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function make_string is a helper function that is used to simplify the implementation of the method __str__.
Step2: Graphical Representation
Step3: The method $t.\texttt{toDot}()$ takes a binary trie $t$ and returns a graph that depicts the tree $t$.
Step4: The method $t.\texttt{collectIDs}(d)$ takes a binary trie $t$ and a dictionary $d$ and updates the dictionary so that the following holds
Step5: Defining $\texttt{Nil}$ and $\texttt{Node}(p, v, l, r)$ as Classes
Step6: The class Node represents a heap of the form $\texttt{Node}(p,v,l,r)$ where
Step7: Implementing the Method top
Step8: For the class Node, the function top is specified via the following equation
Step9: Implementing the method insert
Step10: $p_{\mathrm{top}} \leq p \;\wedge\; l.\texttt{count}() \leq r.\texttt{count}() \;\rightarrow\;
Step11: Implementing the Method remove
Step12: $\texttt{Node}(p,v,\texttt{Nil},r).\texttt{remove}() = r$
Step13: Testing
Step14: Heapsort
|
<ASSISTANT_TASK:>
Python Code:
class Heap:
sNodeCount = 0
def __init__(self):
Heap.sNodeCount += 1
self.mID = str(Heap.sNodeCount)
def getID(self):
return self.mID # used only by graphviz
def _make_string(self, attributes):
# get the name of the class of the object self
name = self.__class__.__name__
# map the function __str__ to all attributes and join them with a comma
return f"{name}({', '.join(map(str, [getattr(self, at) for at in attributes]))})"
Heap._make_string = _make_string
import graphviz as gv
def toDot(self):
dot = gv.Digraph(node_attr={'shape': 'record', 'style': 'rounded'})
nodeDict = {}
self._collectIDs(nodeDict)
for n, t in nodeDict.items():
if isinstance(t, Nil):
dot.node(n, label='', shape='point')
elif isinstance(t, Node):
if t.mValue != None:
dot.node(n, label='{' + str(t.mPriority) + '|' + str(t.mValue) + '}')
else:
dot.node(n, label= str(t.mPriority))
else:
assert False, f'Unknown node {t}'
for n, t in nodeDict.items():
if isinstance(t, Node):
dot.edge(n, t.mLeft .getID())
dot.edge(n, t.mRight.getID())
return dot
Heap.toDot = toDot
def _collectIDs(self, nodeDict):
nodeDict[self.getID()] = self
if isinstance(self, Node):
self.mLeft ._collectIDs(nodeDict)
self.mRight._collectIDs(nodeDict)
Heap._collectIDs = _collectIDs
class Nil(Heap):
def _count(self):
return 0
def __str__(self):
return 'Nil()'
class Node(Heap):
def __init__(self, priority, value, left, right):
Heap.__init__(self)
self.mPriority = priority
self.mValue = value
self.mLeft = left
self.mRight = right
self.mCount = left._count() + right._count() + 1
def _extract(self):
return self.mPriority, self.mValue, self.mLeft, self.mRight
def _count(self):
return self.mCount
def __str__(self):
return _make_string(self, ['mPriority', 'mValue', 'mLeft', 'mRight'])
def top(self):
return None
Nil.top = top
def top(self):
return self.mPriority, self.mValue
Node.top = top
del top
def insert(self, p, v):
return Node(p, v, Nil(), Nil())
Nil.insert = insert
def insert(self, p, v):
p_top, v_top, l, r = self._extract()
if p_top <= p:
if l._count() <= r._count():
return Node(p_top, v_top, l.insert(p, v), r)
else:
return Node(p_top, v_top, l, r.insert(p, v))
else:
if l._count() <= r._count():
return Node(p, v, l.insert(p_top, v_top), r)
else:
return Node(p, v, l, r.insert(p_top, v_top))
Node.insert = insert
del insert
def remove(self):
return self
Nil.remove = remove
def remove(self):
p, v, l, r = self._extract()
if isinstance(l, Nil):
return r
if isinstance(r, Nil):
return l
p1, v1, l1, r1 = l._extract()
p2, v2, l2, r2 = r._extract()
if p1 <= p2:
return Node(p1, v1, l.remove(), r)
else:
return Node(p2, v2, l, r.remove())
Node.remove = remove
del remove
h = Nil()
h.toDot()
h = h.insert(2, 'a')
h.toDot()
h = h.insert(1, 'b')
h.toDot()
h = h.insert(7, 'c')
h.toDot()
h = h.insert(0, 'd')
h.toDot()
h = h.insert(8, 'e')
h.toDot()
h = h.insert(3, 'f')
h.toDot()
h = h.insert(4, 'g')
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
h = h.remove()
h.toDot()
for i in range(1, 31+1):
h = h.insert(i, None)
h.toDot()
def heap_sort(L):
H = Nil()
for p in L:
H = H.insert(p, None)
S = []
display(H.toDot())
while isinstance(H, Node):
p, _ = H.top()
S.append(p)
H = H.remove()
return S
heap_sort([77, 54, 68, 7, 13, 1, 4, 5, 6, 3, 12, 67, 12, 14, 23, 54, 67])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: A Look at Each Host Galaxy's Cepheids
Step2: OK, now we are all set up! Let's plot one of the datasets.
Step3: Q
Step4: Q
Step5: Now, let's set up a suitable parameter grid and compute the posterior PDF!
Step6: Now, plot, with confidence contours
Step7: Are these inferred parameters sensible?
Step8: OK, this looks good! Later in the course we will do some more extensive model checking.
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (15.0, 8.0)
# First, we need to know what's in the data file.
!head -15 R11ceph.dat
class Cepheids(object):
def __init__(self,filename):
# Read in the data and store it in this master array:
self.data = np.loadtxt(filename)
self.hosts = self.data[:,1].astype('int').astype('str')
# We'll need the plotting setup to be the same each time we make a plot:
colornames = ['red','orange','yellow','green','cyan','blue','violet','magenta','gray']
self.colors = dict(zip(self.list_hosts(), colornames))
self.xlimits = np.array([0.3,2.3])
self.ylimits = np.array([30.0,17.0])
return
def list_hosts(self):
# The list of (9) unique galaxy host names:
return np.unique(self.hosts)
def select(self,ID):
# Pull out one galaxy's data from the master array:
index = (self.hosts == str(ID))
self.mobs = self.data[index,2]
self.merr = self.data[index,3]
self.logP = np.log10(self.data[index,4])
return
def plot(self,X):
# Plot all the points in the dataset for host galaxy X.
ID = str(X)
self.select(ID)
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.errorbar(self.logP, self.mobs, yerr=self.merr, fmt='.', ms=7, lw=1, color=self.colors[ID], label='NGC'+ID)
plt.xlabel('$\\log_{10} P / {\\rm days}$',fontsize=20)
plt.ylabel('${\\rm magnitude (AB)}$',fontsize=20)
plt.xlim(self.xlimits)
plt.ylim(self.ylimits)
plt.title('Cepheid Period-Luminosity (Riess et al 2011)',fontsize=20)
return
def overlay_straight_line_with(self,a=0.0,b=24.0):
# Overlay a straight line with gradient a and intercept b.
x = self.xlimits
y = a*x + b
plt.plot(x, y, 'k-', alpha=0.5, lw=2)
plt.xlim(self.xlimits)
plt.ylim(self.ylimits)
return
def add_legend(self):
plt.legend(loc='upper left')
return
data = Cepheids('R11ceph.dat')
print(data.colors)
data.plot(4258)
# for ID in data.list_hosts():
# data.plot(ID)
data.overlay_straight_line_with(a=-2.0,b=24.0)
data.add_legend()
# import cepheids_pgm
# cepheids_pgm.simple()
from IPython.display import Image
Image(filename="cepheids_pgm.png")
def log_likelihood(logP,mobs,merr,a,b):
return -0.5*np.sum((mobs - a*logP -b)**2/(merr**2))
def log_prior(a,b):
amin,amax = -10.0,10.0
bmin,bmax = 10.0,30.0
if (a > amin)*(a < amax)*(b > bmin)*(b < bmax):
logp = np.log(1.0/(amax-amin)) + np.log(1.0/(bmax-bmin))
else:
logp = -np.inf
return logp
def log_posterior(logP,mobs,merr,a,b):
return log_likelihood(logP,mobs,merr,a,b) + log_prior(a,b)
# Select a Cepheid dataset:
data.select(4258)
# Set up parameter grids:
npix = 100
amin,amax = -4.0,-2.0
bmin,bmax = 25.0,27.0
agrid = np.linspace(amin,amax,npix)
bgrid = np.linspace(bmin,bmax,npix)
logprob = np.zeros([npix,npix])
# Loop over parameters, computing unnormlized log posterior PDF:
for i,a in enumerate(agrid):
for j,b in enumerate(bgrid):
logprob[j,i] = log_posterior(data.logP,data.mobs,data.merr,a,b)
# Normalize and exponentiate to get posterior density:
Z = np.max(logprob)
prob = np.exp(logprob - Z)
norm = np.sum(prob)
prob /= norm
sorted = np.sort(prob.flatten())
C = sorted.cumsum()
# Find the pixel values that lie at the levels that contain
# 68% and 95% of the probability:
lvl68 = np.min(sorted[C > (1.0 - 0.68)])
lvl95 = np.min(sorted[C > (1.0 - 0.95)])
plt.imshow(prob, origin='lower', cmap='Blues', interpolation='none', extent=[amin,amax,bmin,bmax])
plt.contour(prob,[lvl68,lvl95],colors='black',extent=[amin,amax,bmin,bmax])
plt.grid()
plt.xlabel('slope a')
plt.ylabel('intercept b / AB magnitudes')
data.plot(4258)
data.overlay_straight_line_with(a=-3.0,b=26.3)
data.add_legend()
prob_a_given_data = np.sum(prob,axis=0) # Approximate the integral as a sum
prob_b_given_data = np.sum(prob,axis=1) # Approximate the integral as a sum
print(prob_a_given_data.shape, np.sum(prob_a_given_data))
# Plot 1D distributions:
fig,ax = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(15, 6)
plt.subplots_adjust(wspace=0.2)
left = ax[0].plot(agrid, prob_a_given_data)
ax[0].set_title('${\\rm Pr}(a|d)$')
ax[0].set_xlabel('slope $a$')
ax[0].set_ylabel('Posterior probability density')
right = ax[1].plot(bgrid, prob_b_given_data)
ax[1].set_title('${\\rm Pr}(a|d)$')
ax[0].set_xlabel('intercept $b$ / AB magnitudes')
ax[1].set_ylabel('Posterior probability density')
# Compress each PDF into a median and 68% credible interval, and report:
def compress_1D_pdf(x,pr,ci=68,dp=1):
# Interpret credible interval request:
low = (1.0 - ci/100.0)/2.0 # 0.16 for ci=68
high = 1.0 - low # 0.84 for ci=68
# Find cumulative distribution and compute percentiles:
cumulant = pr.cumsum()
pctlow = x[cumulant>low].min()
median = x[cumulant>0.50].min()
pcthigh = x[cumulant>high].min()
# Convert to error bars, and format a string:
errplus = np.abs(pcthigh - median)
errminus = np.abs(median - pctlow)
report = "$ "+str(round(median,dp))+"^{+"+str(round(errplus,dp))+"}_{-"+str(round(errminus,dp))+"} $"
return report
print("a = ",compress_1D_pdf(agrid,prob_a_given_data,ci=68,dp=2))
print("b = ",compress_1D_pdf(bgrid,prob_b_given_data,ci=68,dp=2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The points now all seem to be within SF borders
Step3: I will now look at the total squared error in relation to the number of clusters, to find the ideal knee bend,
Step4: As seen the error drops dramaticly as we move from 1 to 2 clusters. It also drops rather significantly from 2-3, though
|
<ASSISTANT_TASK:>
Python Code:
X = X[X['lon'] < -122]
X.plot(kind='scatter', x='lon', y='lat')
from sklearn.cluster import KMeans
#To work with out cluster we have to turn our panda dataframe into a numpy array,
np_X = np.array(X)
kmeans = KMeans(n_clusters=2)
kmeans.fit(np_X)
centroid = kmeans.cluster_centers_
labels = kmeans.labels_
print "The %s cluster centers are located at %s " %(len(centroid),centroid)
colors = ["g.","r.","c."]
for i in range(len(np_X)):
plt.plot(np_X[i][0],np_X[i][1],colors[labels[i]],markersize=10)
plt.scatter(centroid[:,0],centroid[:,1], marker = "x", s=150, linewidths = 5, zorder =10)
plt.show()
from sklearn.cluster import KMeans
#To work with out cluster we have to turn our panda dataframe into a numpy array,
np_X = X
kmeans = KMeans(n_clusters=2)
kmeans.fit(np_X)
centroid = kmeans.cluster_centers_
classified_data = kmeans.labels_
labels = kmeans.labels_
print "The %s cluster centers are located at %s " %(len(centroid),centroid)
classified_data
#copy dataframe (may be memory intensive but just for illustration)
df_processed = X.copy()
df_processed['Cluster Class'] = pd.Series(classified_data, index=df_processed.index)
df_processed.head()
centroid_df = DataFrame(centroid)
centroid_df.head()
df_processed.plot(kind='scatter', x='lon', y='lat',
c = 'Cluster Class', label='datapoints');
import numpy
import pandas
from matplotlib import pyplot
import seaborn
seaborn.set(style='ticks')
numpy.random.seed(0)
N = 37
_genders= ['Female', 'Male', 'Non-binary', 'No Response']
df = pandas.DataFrame({
'Height (cm)': numpy.random.uniform(low=130, high=200, size=N),
'Weight (kg)': numpy.random.uniform(low=30, high=100, size=N),
'Gender': numpy.random.choice(_genders, size=N)
})
fg = seaborn.FacetGrid(data=df, hue='Gender', hue_order=_genders, aspect=1.61)
fg.map(pyplot.scatter, 'Weight (kg)', 'Height (cm)').add_legend()
########################################
import seaborn
seaborn.set(style='ticks')
fg = seaborn.FacetGrid(data=df_processed, hue='Cluster Class', hue_order=_classes, aspect=1.61)
fg.map(pyplot.scatter, 'Lat', 'Lon').add_legend()
from scipy.spatial import distance
def dist_euc(lon,lat,centroid):
data_cord = [lon,lat]
return distance.euclidean(data_cord,centroid)
df_processed['distance'] = df_processed.apply(lambda row: dist_euc(row['lon'], row['lat'],centroid[row['Cluster Class']]), axis=1)
df_processed.head()
ksum = []
def get_ksum(k):
lonList = X['lon'].tolist()
latList = X['lat'].tolist()
for i in range(1,k):
kmeans = KMeans(n_clusters=i)
kmeans.fit(X)
centroid = kmeans.cluster_centers_
labels = kmeans.labels_
tmp_sum = 0
for index, row in enumerate(lonList):
tmp_sum += dist_euc(lonList[index], latList[index], centroid[labels[index]])
ksum.append(tmp_sum)
get_ksum(10)
print ksum
#I Transform my data into a Dataframe to do easy and pretty plotting :-)
ksum_df = DataFrame(ksum, index = range(1,10))
ksum_df.plot()
import csv
csv_file = df_processed[['lon','lat','Cluster Class']].values
csv_file
with open('datapoints.csv','wb') as f:
w = csv.writer(f)
w.writerows(csv_file)
df_csv.head()
df_csv = X.copy(deep = True)
centroid_list = []
for i in range(1,7):
kmeans = KMeans(n_clusters=i)
kmeans.fit(X)
centroid = kmeans.cluster_centers_
labels = kmeans.labels_
column = "k%s" %i
df_csv[column] = labels
centroid_not_np = centroid.tolist()
centroid_list.append(centroid_not_np)
df_csv.head()
centroid_list
df_csv.to_csv('csv_clusters.csv', index=False)
with open('centroids.csv','wb') as csvfile:
w = csv.writer(csvfile,quoting=csv.QUOTE_MINIMAL)
w.writerows(centroid_list)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The object cdata now manages all the calibration images. The images are converted to arrays into a file called calibration.h5. A HDF5 file can be opened using the utility HDFview. Data is arranged in the following hierarchy
Step2: To retrieve a specific calibration image, from the HDF5 file, as a numpy array
Step3: To retrieve a group of datasets by specifying just the camera
Step4: To iterate through the group use
Step5: Particle Data
Step6: The object cdata now manages all the particle images. The images are converted to arrays into a file called particle.h5.
|
<ASSISTANT_TASK:>
Python Code:
cd assignment/calibration_images/
cdata = CalibrationData()
cdata.config()
cdata.create_h5()
import json
print json.dumps(cdata.h5dict, indent=4) #Json used to print cdata.h5dict neatly
arr = cdata.get_dset(camera=0, z_loc=-6) #Note: It returns a numpy array, not a h5py dataset!
print arr
grp = cdata.get_camera_grp(camera=0)
print grp
print 'Group attributes = ', grp.attrs.keys(), grp.attrs['cam']
for dset in grp.values():
print dset
print 'Dataset attributes = ', dset.attrs.keys(), dset.attrs['cam'], dset.attrs['z']
arr = dset[...]
print arr
cd ../particle_images/
pdata = ParticleData()
pdata.config()
pdata.create_h5()
arr = pdata.get_dset(camera=0, ab='a', time=47)
grp = pdata.get_camera_grp(camera=0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id = "ANNOVAR"></a>
Step2: Specify the name and location of the csv file that ANNOVAR produces as output
Step3: <a id = "myvariant"></a>
Step4: <a id = "filter"></a>
Step5: <a id = "rarevars"></a>
Step6: Zero variants found. Writing a csv output won't make much sense. You can still customize the filters the way you'd like, as you can see below.
Step7: <a id = "export"></a>
|
<ASSISTANT_TASK:>
Python Code:
os.getcwd()
import os
import re
import sys
import vcf
import time
import pysam
import myvariant
import collections
import numpy as np
import pandas as pd
sys.path.append(os.getcwd().replace("notebooks/dnaSeq/VAPr_Variant_Annotation_Prioritization", "src/dnaSeq/VAPr"))
#variantannotation functions
from variantannotation import annotate_batch
from variantannotation import create_output_files
from variantannotation import myvariant_parsing_utils
from variantannotation import mongo_DB_export
from variantannotation import utilities
from variantannotation import MongoDB_querying
ANNOVAR_PATH = '/data/annovar/'
FILE_NAMES = ['Tumor_RNAseq_variants.vcf', 'Tumor_targeted_seq.vcf', 'normal_targeted_seq.vcf', 'normal_blood_WGS.vqsr.vcf', 'somatic_mutect_old.vcf']
IN_PATH = '/data/ccbb_internal/interns/Carlo/test_vcf/'
OUT_PATH = '/data/ccbb_internal/interns/Carlo/test_vcf_out/'
vcf_file = IN_PATH
#Check if file paths are correctly pointing to the specified files.
for i in range(0, len(FILE_NAMES)):
print IN_PATH+FILE_NAMES[i]
utilities.run_annovar(ANNOVAR_PATH, IN_PATH+FILE_NAMES[0], OUT_PATH)
#Annovar runs as a subprocess on every file. They will run in parallel for speed up.
for i in range(0, len(FILE_NAMES)):
utilities.run_annovar(ANNOVAR_PATH, IN_PATH+FILE_NAMES[i], OUT_PATH)
#This serves to give a real-time feedback of the ANNOVAR progress and status.
filepath_out = '/data/ccbb_internal/interns/Carlo/test_vcf_out/'
filepath_in = '/data/ccbb_internal/interns/Carlo/test_vcf/'
#For safety, check the files in directory. Either run '!ls' here on iPython, or go to the directory and check
#manually for existing files. There should be once csv file for every vcf file.
VCF_FILE_NAME = 'Tumor_RNAseq_variants.vcf'
CSV_FILE_NAME = 'Tumor_RNAseq_variants.hg19_multianno.csv'
chunksize = 10000
step = 0
#Get variant list. Should always be the first step after running ANNOVAR
open_file = myvariant_parsing_utils.VariantParsing()
#Name Collections & DB. Change them to something appropriate. Each file should live in a collection
db_name = 'Variant_Prioritization_Workflow'
collection_name = 'Test_Tumor_RNAseq'
list_file = open_file.get_variants_from_vcf(filepath_in+VCF_FILE_NAME)
as_batch = annotate_batch.AnnotationMethods()
as_batch.by_chunks(list_file, chunksize, step, filepath_out+CSV_FILE_NAME, collection_name, db_name)
filepath = '/data/ccbb_internal/interns/Carlo'
#Create output files (if needed): specify name of files and path
rare_cancer_variants_csv = filepath + "/tumor_rna_rare_cancer_vars_csv.csv"
rare_cancer_variants_vcf = filepath + "/tumor_rna_rare_cancer_vars_vcf.vcf"
input_vcf_compressed = filepath + '/test_vcf/Tumor_RNAseq_variants.vcf.gz'
#Apply filter.
filter_collection = MongoDB_querying.Filters(db_name, collection_name)
rare_cancer_variants = filter_collection.rare_cancer_variant()
#Crete writer object for filtered lists:
my_writer = create_output_files.FileWriter(db_name, collection_name)
#cancer variants filtered files
my_writer.generate_annotated_csv(rare_cancer_variants, rare_cancer_variants_csv)
my_writer.generate_annotated_vcf(rare_cancer_variants,input_vcf_compressed, rare_cancer_variants_vcf)
#Apply filter.
filter_collection = MongoDB_querying.Filters(db_name, collection_name)
rare_disease_variants = filter_collection.rare_disease_variant()
from pymongo import MongoClient
client = MongoClient()
db = client.My_Variant_Database
collection = db.ANNOVAR_MyVariant_chunks
filter2 = collection.find({ "$and": [
{"$or": [{"ThousandGenomeAll": {"$lt": 0.05}}, {"ThousandGenomeAll": {"$exists": False}}]},
{"$or": [{"ESP6500siv2_all": { "$lt": 0.05}}, {"ESP6500siv2_all": { "$exists": False}}]},
{"$or": [{"Func_knownGene": "exonic"}, {"Func_knownGene": "splicing"}]},
{"ExonicFunc_knownGene": {"$ne": "synonymous SNV"}},
{"Genotype_Call.DP": {"$gte": 10}},
{"cosmic70": { "$exists": True}}
]})
#Create output files (if needed): specify name of files and path
out_unfiltered_vcf_file = filepath + "/out_unfiltered_rnaseq_vcf.vcf"
out_unfiltered_csv_file = filepath + "/out_unfiltered_rnaseq_csv.csv"
input_vcf_compressed = filepath + '/test_vcf/Tumor_RNAseq_variants.vcf.gz'
#Create writer object
#db and collection name must be specified since no list is given. The entire collection will be queried.
my_writer = create_output_files.FileWriter(db_name, collection_name)
#Write collection to csv and vcf
#The in_vcf_file must be the .vcf.gz file and it needs to have an associated .tbi file.
my_writer.generate_unfiltered_annotated_csv(out_unfiltered_csv_file)
my_writer.generate_unfiltered_annotated_vcf(input_vcf_compressed, out_unfiltered_vcf_file)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Read in the hanford.csv file
Step2: 3. Calculate the basic descriptive statistics on the data
Step3: 4. Calculate the coefficient of correlation (r) and generate the scatter plot. Does there seem to be a correlation worthy of investigation?
Step4: Yes, there seems to be a correlation wothy of investigation.
Step5: 6. Plot the linear regression line on the scatter plot of values. Calculate the r^2 (coefficient of determination)
Step6: 7. Predict the mortality rate (Cancer per 100,000 man years) given an index of exposure = 10
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
%matplotlib inline
import matplotlib.pyplot as plt # package for doing plotting (necessary for adding the line)
import statsmodels.formula.api as smf # package we'll be using for linear regression
import numpy as np
import scipy as sp
df = pd.read_csv("data/hanford.csv")
df
df.describe()
df.plot(kind='scatter', x='Exposure', y='Mortality')
r = df.corr()['Exposure']['Mortality']
r
lm = smf.ols(formula="Mortality~Exposure",data=df).fit()
intercept, slope = lm.params
lm.params
# Method 01 (What we've learned from the class)
df.plot(kind='scatter', x='Exposure', y='Mortality')
plt.plot(df["Exposure"],slope*df["Exposure"]+intercept,"-",color="red")
# Method 02 (Another version) _ so much harder ...than what we have learned
def plot_correlation( ds, x, y, ylim=(100,240) ):
plt.xlim(0,14)
plt.ylim(ylim[0],ylim[1])
plt.scatter(ds[x], ds[y], alpha=0.6, s=50)
for abc, row in ds.iterrows():
plt.text(row[x], row[y],abc )
plt.xlabel(x)
plt.ylabel(y)
# Correlation
trend_variable = np.poly1d(np.polyfit(ds[x], ds[y], 1))
trendx = np.linspace(0, 14, 4)
plt.plot(trendx, trend_variable(trendx), color='r')
r = sp.stats.pearsonr(ds[x],ds[y])
plt.text(trendx[3], trend_variable(trendx[3]),'r={:.3f}'.format(r[0]), color = 'r' )
plt.tight_layout()
plot_correlation(df,'Exposure','Mortality')
r_squared = r **2
r_squared
def predicting_mortality_rate(exposure):
return intercept + float(exposure) * slope
predicting_mortality_rate(10)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first thing to do when you get a new dataset is take a look at some of it. This lets you see that it all read in correctly and gives an idea of what's going on with the data. In this case, let's see if there are any missing values, which will be reprsented with NaN or None.
Step2: Yep, it looks like there's some missing values.
Step3: That seems like a lot! It might be helpful to see what percentage of the values in our dataset were missing to give us a better sense of the scale of this problem
Step4: Wow, almost a quarter of the cells in this dataset are empty! In the next step, we're going to take a closer look at some of the columns with missing values and try to figure out what might be going on with them.
Step5: By looking at the documentation, I can see that this column has information on the number of seconds left in the game when the play was made. This means that these values are probably missing because they were not recorded, rather than because they don't exist. So, it would make sense for us to try and guess what they should be rather than just leaving them as NA's.
Step6: Oh dear, it looks like that's removed all our data! 😱 This is because every row in our dataset had at least one missing value. We might have better luck removing all the columns that have at least one missing value instead.
Step7: We've lost quite a bit of data, but at this point we have successfully removed all the NaN's from our data.
Step8: We can use the Panda's fillna() function to fill in missing values in a dataframe for us. One option we have is to specify what we want the NaN values to be replaced with. Here, I'm saying that I would like to replace all the NaN values with 0.
Step9: I could also be a bit more savvy and replace missing values with whatever value comes directly after it in the same column. (This makes a lot of sense for datasets where the observations have some sort of logical order to them.)
|
<ASSISTANT_TASK:>
Python Code:
# modules we'll use
import pandas as pd
import numpy as np
# read in all our data
nfl_data = pd.read_csv("../input/nflplaybyplay2009to2016/NFL Play by Play 2009-2017 (v4).csv")
# set seed for reproducibility
np.random.seed(0)
# look at the first five rows of the nfl_data file.
# I can see a handful of missing data already!
nfl_data.head()
# get the number of missing data points per column
missing_values_count = nfl_data.isnull().sum()
# look at the # of missing points in the first ten columns
missing_values_count[0:10]
# how many total missing values do we have?
total_cells = np.product(nfl_data.shape)
total_missing = missing_values_count.sum()
# percent of data that is missing
percent_missing = (total_missing/total_cells) * 100
print(percent_missing)
# look at the # of missing points in the first ten columns
missing_values_count[0:10]
# remove all the rows that contain a missing value
nfl_data.dropna()
# remove all columns with at least one missing value
columns_with_na_dropped = nfl_data.dropna(axis=1)
columns_with_na_dropped.head()
# just how much data did we lose?
print("Columns in original dataset: %d \n" % nfl_data.shape[1])
print("Columns with na's dropped: %d" % columns_with_na_dropped.shape[1])
# get a small subset of the NFL dataset
subset_nfl_data = nfl_data.loc[:, 'EPA':'Season'].head()
subset_nfl_data
# replace all NA's with 0
subset_nfl_data.fillna(0)
# replace all NA's the value that comes directly after it in the same column,
# then replace all the remaining na's with 0
subset_nfl_data.fillna(method='bfill', axis=0).fillna(0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Trajectory simulation
Step2: If we plot this simulated trajectory, we see how the distance between two subsequent positions of the particle is significantly smaller on the left as compared to the right of our diffusion gradient (visible as gray shading)
Step3: Modeling
Step4: Analysis
Step5: As we can see, the diffusion coefficient changes quite irregularly over time, but this irregularity is just the result of the irregular path of our particle across the sample. However, this plots confirms that the chosen parameter boundaries of $\sigma \in [0, 20]$ suffice. We may re-plot the particle path from above, but this time color-code the particle positions with the corresponding inferred diffusivity values
Step6: Finally, we can directly compare the inferred mean values to the true values of the diffusion coefficient used in the simulation
Step7: To assess our choice for the hyper-parameter boundaries, we may further plot the inferred distribution of the magnitude of parameter changes between time steps. In this case, the distribution falls off to (approximately) zero on both sides, therefore the boundaries suffice.
Step8: Regime-switch diffusion process
Step9: Trajectory simulation
Step10: Modeling
Step11: Analysis
Step12: The irregular spacing of the parameter jumps in time gets more regular once the parameter evolution is plotted on the chessboard pattern
Step13: The figure above shows that we can nicely reconstruct the spatial pattern in diffusivity for the region covered by the diffusing particle. Finally, we may plot a histogram to confirm that the inferred values for the diffusion coefficient (green histogram) align with the true values used in the simulation (red lines)
Step14: Model selection
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
D = np.linspace(0.0, 15., 500)
x = np.arange(500)
plt.figure(figsize=(8,2))
plt.fill_between(x, D, 0)
plt.xlabel('x position [a.u.]')
plt.ylabel('D [a.u.]');
np.random.seed(12337) # reproducable results
trajectory = [[150., 150.]] # starting point
for t in range(750):
try:
dc = D[int(trajectory[-1][0])] # look up diffusion coefficient
except:
if int(trajectory[-1][0]) >= 500:
dc = 15. # constant diffusion coefficient on the far right
else:
dc = 0. # constant diffusion coefficient on the far left
trajectory.append([p + np.random.normal(0, dc) for p in trajectory[-1]])
trajectory = np.array(trajectory)
plt.figure(figsize=(8, 10))
D_img = np.array([list(D)]*len(D))[:, :250]
plt.imshow(D_img, cmap='Greys', alpha=0.9, zorder=0, interpolation='nearest')
plt.scatter(*trajectory.T, lw=0.2, c='b', alpha=0.7, zorder=2, s=50)
plt.plot(*trajectory.T, lw=1.5, c='k', zorder=1)
plt.xlim([0, 250])
plt.ylim([80, 250])
plt.xlabel('x position [a.u.]')
plt.ylabel('y position [a.u.]');
import bayesloop as bl
import sympy.stats
from sympy import Symbol
S = bl.HyperStudy()
# load data
velocity = trajectory[1:] - trajectory[:-1] # compute velocity vectors from positions
S.load(velocity)
# create low-level model
std = Symbol('D', positive=True)
normal = sympy.stats.Normal('normal', 0, std)
# we assume the diffusivity to lie within the interval ]0, 15[
# within this interval, we create 5000 equally spaced parameter values
L = bl.om.SymPy(normal, 'D', bl.oint(0, 15, 5000))
# create high-level model
# we assume a Gaussian random walk of the parameter 'D' with a
# standard deviation within the interval [0, 0.3] (20 equally
# spaced hyper-parameter values)
T = bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.3, 20), target='D')
S.set(L, T)
S.fit()
plt.figure(figsize=(8,2))
S.plot('D')
plt.xlim([0, 750])
plt.ylim([0, 8]);
D_mean = S.getParameterMeanValues('D') # extract posterior mean values of first (and only) parameter
# prepare color coding
m = plt.cm.ScalarMappable(cmap='RdYlBu')
m.set_array(D_mean)
m.autoscale()
plt.figure(figsize=(8, 10))
plt.imshow(D_img, cmap='Greys', alpha=0.9, zorder=0, interpolation='nearest')
plt.scatter(*trajectory[1:].T, lw=0.2, c=[m.to_rgba(x) for x in D_mean], alpha=0.9, zorder=2, s=50)
plt.plot(*trajectory[1:].T, lw=1.5, c='k', zorder=1)
plt.xlim([0, 250])
plt.ylim([80, 250])
plt.xlabel('x position [a.u.]')
plt.ylabel('y position [a.u.]');
D = np.linspace(0.0, 15., 500)
x = np.arange(500)
plt.figure(figsize=(8,2))
plt.fill_between(x, D, 0)
plt.scatter(trajectory[1:, 0], D_mean, c='r', alpha=0.6)
plt.xlim([0, 250])
plt.ylim([0, 8])
plt.xlabel('x position [a.u.]')
plt.ylabel('D [a.u.]');
plt.figure(figsize=(8,3))
S.plot('sigma', alpha=0.7, facecolor='g')
plt.xlim([0, 0.3]);
import matplotlib.patches as mpatches
# helper function for chessboard pattern
def check_region(x, y):
if (int(x) % 2 == 0 and int(y) % 2 == 0) or (int(x) % 2 == 1 and int(y) % 2 == 1):
return True
else:
return False
# create chessboard pattern
n = 5
D_img = np.array([0, 1]*int(np.ceil(0.5*(n**2))))[:-1].reshape([n, n])
# plot chessboard pattern
plt.figure(figsize=(8,8))
plt.imshow(D_img, cmap='Greys', alpha=0.5, extent=[0, n, 0, n], zorder=0, interpolation='nearest')
plt.grid(b=False, which='major')
plt.xlabel('x position [a.u.]')
plt.ylabel('y position [a.u.]')
# legend
white_patch = mpatches.Patch(color='white', label='D = 0.03 a.u.')
gray_patch = mpatches.Patch(color='0.5', label='D = 0.09 a.u.')
legend = plt.legend(handles=[white_patch, gray_patch],
bbox_to_anchor=(0., 1.02, 1., .102),
loc=3,
borderaxespad=0.,
frameon = 1)
frame = legend.get_frame()
frame.set_facecolor('0.85')
np.random.seed(1274)
D1 = 0.03
D2 = 0.09
trajectory = [[2.5, 2.5]]
for t in range(2000):
dc = D1 if check_region(*trajectory[-1]) else D2
trajectory.append([x + np.random.normal(0, dc) for x in trajectory[-1]])
trajectory = np.array(trajectory)
# plotting
plt.figure(figsize=(8,8))
plt.imshow(D_img, cmap='Greys', alpha=0.5, extent=[0, n, 0, n], zorder=0, interpolation='nearest')
plt.grid(b=False, which='major')
plt.xlabel('x position [a.u.]')
plt.ylabel('y position [a.u.]')
plt.scatter(*trajectory.T, lw=0.2, c='b', alpha=0.7, zorder=2, s=50)
plt.plot(*trajectory.T, lw=1.5, c='k', zorder=1)
plt.xlim([1, 5])
plt.ylim([0, 4]);
# load new data
velocity = trajectory[1:] - trajectory[:-1]
S.load(velocity)
# change parameter boudaries of low-level model
L = bl.om.SymPy(normal, 'D', bl.oint(0, 0.2, 2000))
# create high-level model
T = bl.tm.RegimeSwitch('log10pMin', bl.cint(-10, 3, 20))
S.set(L, T)
S.fit()
plt.figure(figsize=(8,2))
S.plot('D')
plt.xlim([0, 2000])
plt.ylim([0, .2]);
D_mean = S.getParameterMeanValues('D')
# prepare color coding
m = plt.cm.ScalarMappable(cmap='RdYlBu')
m.set_array([0.03, 0.09])
m.autoscale()
# plotting
plt.figure(figsize=(8,8))
plt.imshow(D_img, cmap='Greys', alpha=0.5, extent=[0, n, 0, n], zorder=0, interpolation='nearest')
plt.grid(b=False, which='major')
plt.xlabel('x position [a.u.]')
plt.ylabel('y position [a.u.]')
plt.scatter(*trajectory[1:].T, lw=0.2, c=[m.to_rgba(x) for x in D_mean], alpha=0.9, zorder=2, s=50)
plt.plot(*trajectory[1:].T, lw=1.5, c='k', zorder=1)
cb = plt.colorbar(m, shrink=0.75)
cb.set_label('diffusivity [a.u.]')
cb.set_clim(0.03, 0.09)
plt.xlim([1, 5])
plt.ylim([0, 4]);
plt.figure(figsize=(8,3))
plt.hist(D_mean, 20, alpha=0.7, facecolor='g', normed=True)
plt.axvline(0.03, 0, 1600, lw=1, c='r')
plt.axvline(0.09, 0, 1600, lw=1, c='r')
plt.xlabel('diffusivity [a.u.]')
plt.ylabel('probability density [a.u.]');
T = bl.tm.GaussianRandomWalk('sigma', bl.cint(0, 0.01, 20), target='D')
S.set(T)
S.fit()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading data from a file
Step2: Inspecting a dataframe // What's in the flights dataset?
Step3: Series
Step4: DataFrame Indexing
Step5: Dataframe index
Step6: But now how do I get the 3rd row?
Step7: Exploring our dataset - let's look at the flights table
Step8: Identifying and removing NAs in a dataset
Step9: Aside
Step10: The great thing about Pandas is that if you pass in a Series, the order of the elements in it doesn't matter anymore. It uses the index to align the Series to the row/column index of the dataframe.
Step11: Performing a function along an axis
Step12: If you want to apply an arbitrary function along an axis, look into the apply function
Step13: Other descriptive functions work here, like 'std', 'count', 'min', 'max'
Step14: Merging tables 'vertically' // Subsetting and re-combining flights from different airlines
Step15: Nothing special, just be sure the dataframes have the columns with the same names and types.
Step16: Merge two tables by a single column // What are the most common destination airports?
Step17: The airports table gives us a key! Let's merge the flights data with the airports data, using dest in flights and faa in airports.
Step18: Why did we use how='left'?
Step19: There are 1357 airports in the airports table that aren't in the flights table at all.
Step20: Since each operation gives us back a dataframe, they are easily chained
Step21: Goal
Step22: Pivot Table // Average flight time from origin to destination
Step23: However, often you want to pivot just a regular dataframe. I'll create one from airtime for an example
Step24: Multi-column merge // What's the weather like for departing flights?
Step25: flights_weather has less rows. Default behavior of merge is 'inner' and so this means there are some flight year/month/day/hour/origin combos where we don't have a weather entry
Step26: Arranging a dataframe // What's the weather like for the most and least delayed flights?
Step27: Some other tidying
Step28: Wide to long formatted data
Step29: Well this is a bit hard to read. What about the first entry for each type of delay in each hour?
Step30: An incomplete investigation of NAs
Step31: Do flights with NA departure time also have an NA departure delay?
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function # For the python2 people
import pandas as pd # This is typically how pandas is loaded
airlines = pd.read_table("airlines.txt")
airports = pd.read_table("airports.txt")
flights = pd.read_table("flights.txt")
planes = pd.read_table("planes.txt")
weather = pd.read_table("weather.txt")
print(type(planes)) # Yup, it's a DataFrame
# What does it look like?
planes # Jupyter Notebooks do some nifty formatting here
# How big is it?
print(planes.shape) # Works like numpy
print(planes.columns) # What are the column labels?
print(planes.index) # What are the row labels?
# Let's grab a column
planes['manufacturer']
# Inspecting this column further
manufacturer = planes['manufacturer']
print(type(manufacturer)) # It's a Series
# Indexing into Series
print("Indexing into Series: ", manufacturer[3])
# Indexing into DataFrame
print("Indexing into DataFrame: ", planes.loc[3, 'manufacturer'])
third_row = planes.loc[3] # get the third row
third_row
print(type(third_row))
planes = planes.set_index('tailnum')
# OR
planes = pd.read_table('planes.txt', index_col=0) #Set the first column as the index
planes.loc['N10156']
print(planes.iloc[3]) # Get the third row
print(planes.iloc[:, 3]) # Get the third column
print('What are the first 5 rows?')
flights.head()
print('What are the last 5 rows?')
flights.tail()
print('Sample random rows')
flights.sample(3, axis=0) # Axis 0 represents the rows, axis 1, the columns
print('What are the dimensions of the flights dataframe?\n')
print(flights.shape)
print('Are there any NAs in the flights dataframe?\n')
print(flights.isnull().any())
print('Selecting for flights where there is complete data, what are the dimensions?\n')
print("Original Matrix Shape:", flights.shape)
null_rows = flights.isnull().any(axis=1) # Rows where any value is null
flights_complete = flights.loc[~null_rows]
print("Complete-rows shape:", flights_complete.shape)
print(type(null_rows))
null_rows
print('How might I obtain a summary of the original dataset?')
flights.describe() # Similar to R's 'summary'
# use include='all' to include the non-numberic columns too
# An example
flights['air_time'].mean() # Returns a single value
subset = flights[['air_time', 'dep_delay', 'arr_delay']]
subset.mean(axis=0) # Axis 0: collapse all rows, result has Index = to original Columns
result = flights_complete.groupby('origin')['dep_delay'].mean()
result
# What is this object?
print(type(result))
flights_complete.groupby('origin')['dep_delay'].describe()
print('Subsetting the dataset to have 2 dataframes')
flightsUA = flights.loc[flights.carrier == 'UA',]
flightsAA = flights.loc[flights.carrier == 'AA',]
print('Checking the number of rows in two dataframes')
print(flightsUA.shape[0] + flightsAA.shape[0])
print('Combining two dataframes than checking the number of rows in the resulting data frame')
flightsUAandAA = pd.concat([flightsUA,flightsAA], axis=0) # axis=1 would stitch them together horizontally
print(flightsUAandAA.shape[0])
print('Binding 3 data frames and checking the number of rows')
allthree = pd.concat([flightsUA,flightsAA,flightsUAandAA])
allthree.shape[0]
airports.head()
print('Merging in pandas')
flights_readdest = flights_complete.merge(airports, left_on='dest', right_on = 'faa', how='left')
flights_readdest.head()
len(set(airports.faa) - set(flights.dest))
flights_readdest.columns
flights_sm = flights_readdest[['origin', 'name', 'year', 'month', 'day', 'air_time']]
flights_sm.head()
# Renaming is not so simple in pandas
flights_sm = flights_sm.rename(columns = {'name': 'dest'})
flights_sm.head()
airtime = flights_complete.merge(airports, left_on='dest', right_on='faa', how='left') \
.loc[:, ['origin', 'name', 'air_time']] \
.groupby(['origin', 'name'])['air_time'] \
.mean()
print(airtime.shape)
airtime.head()
airtime.groupby(level='origin').max()
# What if we want to know where the flight goes?
rows = airtime.groupby(level='origin').idxmax() # This returns the indices in airtime where the max was found
airtime[rows] # Index by it to get the max rows
pvt_airtime = airtime.unstack() # Since airtime has a hierarchical index, we can use unstack
pvt_airtime
airtime_df = pd.DataFrame(airtime).reset_index()
airtime_df.head()
airtime_pv = airtime_df.pivot(index='origin',
columns='name',
values='air_time')
airtime_pv
weather.head()
print(flights_complete.columns & weather.columns) # What columns do they share?
flights_weather = flights_complete.merge(weather,
on=["year", "month","day","hour", "origin"])
print(flights_complete.shape)
print(flights_weather.shape)
# Let's grab flights+weather where the delay was greater than 200 minutes
flights_weather_posdelays = flights_weather.loc[flights_weather.dep_delay > 200]
flights_weather_posdelays.shape
# Anything unusual about these flights?
%matplotlib notebook
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure()
plt.hist(flights_weather.dropna().wind_gust, 30, range=(0, 50), normed=True, label='normal', alpha=.7)
plt.hist(flights_weather_posdelays.dropna().wind_gust, 30, range=(0,50), normed=True, label='delayed', alpha=.7)
plt.legend(loc='best')
plt.title('Wind Gust')
plt.figure()
plt.hist(flights_weather.dropna().pressure, 30, normed=True, label='normal', alpha=.7)
plt.hist(flights_weather_posdelays.dropna().pressure, 30, normed=True, label='delayed', alpha=.7)
plt.legend(loc='best')
plt.title('Pressure')
plt.figure()
plt.hist(flights_weather.dropna().hour, 30, normed=True, label='normal', alpha=.7)
plt.hist(flights_weather_posdelays.dropna().hour, 30, normed=True, label='delayed', alpha=.7)
plt.legend(loc='best')
plt.title('Hour')
flights_weather.sort_values('dep_delay').head(10)
flights_weather.sort_values('dep_delay', ascending=False).head(10)
flights_complete.dest.str.lower().head() # For string columns, use .str to access string methods
flights_complete.dest.str.upper().head()
flights_complete.head()
day_delay = pd.melt(flights_complete, id_vars=['hour', 'time_hour'], value_vars=['dep_delay', 'arr_delay'], var_name='type_of_delay')
day_delay
plt.figure()
sns.stripplot(x='hour', y='value', hue='type_of_delay', data=day_delay)
day_delay_first = day_delay.drop_duplicates('time_hour', keep='first')
day_delay_first.head()
flights.isnull().sum(axis=0)
flights_incomplete = flights.loc[flights.isnull().any(axis=1)]
flights_incomplete.shape
pd.crosstab(
index=flights_incomplete.dep_time.isnull(), # Series of bool values
columns=flights_incomplete.dep_delay.isnull() # series of bool values
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading initial data
Step2: Remove rows with NAN from data
Step3: Add diff_pt and cos(diff_phi)
Step4: Add max, sum among PIDs
Step5: define label = signB * signTrack
Step6: Apply ghost prob cut
Step7: Leave not muons, kaons, electrons, protons, pions
Step8: Tracks sum of charges asymmetry checks
Step9: Calculating tagging efficiency ($\epsilon_{tag}$)
Step10: Choose most probable B-events
Step11: Define B-like events for training
Step12: Main idea
Step13: PID pairs scatters
Step14: pt
Step15: count of tracks
Step16: PIDs histograms
Step17: Train to distinguish same sign vs opposite sign
Step18: DT
Step19: Calibration
Step20: Implementing best tracking
Step21: MC trained algorithm testing
|
<ASSISTANT_TASK:>
Python Code:
import pandas
import numpy
from folding_group import FoldingGroupClassifier
from rep.data import LabeledDataStorage
from rep.report import ClassificationReport
from rep.report.metrics import RocAuc
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, roc_auc_score
from utils import get_N_B_events, get_events_number, get_events_statistics
import root_numpy
data_nan = pandas.DataFrame(root_numpy.root2array('datasets/data/csv/JPsiK/Tracks.root'))
data_nan.head()
event_id_column = 'event_id'
event_id = data_nan.run.apply(str) + '_' + data_nan.event.apply(str)
data_nan['group_column'] = numpy.unique(event_id, return_inverse=True)[1]
data_nan[event_id_column] = event_id
get_events_statistics(data_nan)
get_N_B_events()
data = data_nan.dropna()
len(data_nan), len(data), get_events_statistics(data)
from utils import add_diff_pt
# add diff pt
add_diff_pt(data)
# add cos(diff_phi)
data['cos_diff_phi'] = numpy.cos(data.diff_phi.values)
from itertools import combinations
PIDs = {'k': data.PIDNNk.values,
'e': data.PIDNNe.values,
'mu': data.PIDNNm.values,
}
for (pid_name1, pid_values1), (pid_name2, pid_values2) in combinations(PIDs.items(), 2):
data.loc[:, 'max_PID_{}_{}'.format(pid_name1, pid_name2)] = numpy.maximum(pid_values1, pid_values2)
data.loc[:, 'sum_PID_{}_{}'.format(pid_name1, pid_name2)] = pid_values1 + pid_values2
data.loc[:, 'label'] = (data.signB.values * data.signTrack.values > 0) * 1
', '.join(data.columns)
initial_cut = '(ghostProb < 0.4)'
data = data.query(initial_cut)
get_events_statistics(data)
threshold_kaon = 0.
threshold_muon = 0.
threshold_electron = 0.
threshold_pion = 0.
threshold_proton = 0.
cut_pid = " ( (PIDNNk > {trk}) | (PIDNNm > {trm}) | (PIDNNe > {tre}) | (PIDNNpi > {trpi}) | (PIDNNp > {trp})) "
cut_pid = cut_pid.format(trk=threshold_kaon, trm=threshold_muon, tre=threshold_electron, trpi=threshold_pion,
trp=threshold_proton)
data = data.query(cut_pid)
get_events_statistics(data)
from utils import compute_sum_of_charges
means = [compute_sum_of_charges(data[mask], name, bins=bins,
event_id_column=event_id_column) for mask, name, bins in \
zip([data.signB > -100,
(data.IPs > 3) & ((abs(data.diff_eta) > 0.6) | (abs(data.diff_phi) > 0.825)),
(abs(data.diff_eta) < 0.6) & (abs(data.diff_phi) < 0.825) & (data.IPs < 3)],
['full', 'OS', 'SS'], [21, 21, 21])]
N_B_passed = float(get_events_number(data))
tagging_efficiency = N_B_passed / get_N_B_events()
tagging_efficiency_delta = sqrt(N_B_passed) / get_N_B_events()
tagging_efficiency, tagging_efficiency_delta
hist(data.diff_pt.values, bins=100)
pass
_, take_indices = numpy.unique(data[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data.Bmass.values[take_indices], bins=100)
title('B mass hist')
xlabel('mass')
subplot(1, 2, 2)
hist(data.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist')
xlabel('signal sWeights')
plt.savefig('img/Bmass_less_PID.png' , format='png')
sweight_threshold = 1.
data_sw_passed = data[data.N_sig_sw > sweight_threshold]
data_sw_not_passed = data[data.N_sig_sw <= sweight_threshold]
get_events_statistics(data_sw_passed)
_, take_indices = numpy.unique(data_sw_passed[event_id_column], return_index=True)
figure(figsize=[15, 5])
subplot(1, 2, 1)
hist(data_sw_passed.Bmass.values[take_indices], bins=100)
title('B mass hist for sWeight > 1 selection')
xlabel('mass')
subplot(1, 2, 2)
hist(data_sw_passed.N_sig_sw.values[take_indices], bins=100, normed=True)
title('sWeights hist for sWeight > 1 selection')
xlabel('signal sWeights')
plt.savefig('img/Bmass_selected_less_PID.png' , format='png')
hist(data_sw_passed.diff_pt.values, bins=100)
pass
features = list(set(data.columns) - {'index', 'run', 'event', 'i', 'signB', 'signTrack', 'N_sig_sw', 'Bmass', 'mult',
'PIDNNp', 'PIDNNpi', 'label', 'thetaMin', 'Dist_phi', event_id_column,
'mu_cut', 'e_cut', 'K_cut', 'ID', 'diff_phi', 'group_column'})
features
figure(figsize=[15, 16])
bins = 60
step = 3
for i, (feature1, feature2) in enumerate(combinations(['PIDNNk', 'PIDNNm', 'PIDNNe', 'PIDNNp', 'PIDNNpi'], 2)):
subplot(4, 3, i + 1)
Z, (x, y) = numpy.histogramdd(data_sw_passed[[feature1, feature2]].values, bins=bins, range=([0, 1], [0, 1]))
pcolor(numpy.log(Z).T, vmin=0)
xlabel(feature1)
ylabel(feature2)
xticks(numpy.arange(bins, step), x[::step]), yticks(numpy.arange(bins, step), y[::step])
plt.savefig('img/PID_selected_less_PID.png' , format='png')
hist(data_sw_passed.diff_pt.values, bins=60, normed=True)
pass
figure(figsize=(20, 6))
subplot(1, 2, 1)
_, n_tracks = numpy.unique(data_sw_passed[event_id_column], return_counts=True)
hist(n_tracks, bins=100)
title('Number of tracks for events with sWeight > 1')
subplot(1, 2, 2)
_, n_tracks_all = numpy.unique(data[event_id_column], return_counts=True)
hist(n_tracks_all, bins=106)
title('Number of tracks')
plt.savefig('img/tracks_number_less_PID.png' , format='png')
figure(figsize=[15, 4])
for i, column in enumerate(['PIDNNm', 'PIDNNe', 'PIDNNk']):
subplot(1, 3, i + 1)
hist(data_sw_passed[column].values, bins=60, range=(0, 1), label=column)
legend()
from decisiontrain import DecisionTrainClassifier
from rep.estimators import SklearnClassifier
from hep_ml.losses import LogLossFunction
data_sw_passed_lds = LabeledDataStorage(data_sw_passed, data_sw_passed.label.values, data_sw_passed.N_sig_sw.values)
tt_base = DecisionTrainClassifier(learning_rate=0.1, n_estimators=3000, depth=6,
max_features=15, n_threads=14, loss=LogLossFunction(regularization=100))
tt_folding = FoldingGroupClassifier(SklearnClassifier(tt_base), n_folds=2, random_state=11,
train_features=features, group_feature='group_column')
%time tt_folding.fit_lds(data_sw_passed_lds)
pass
import cPickle
with open('models/dt_full_group.pkl', 'w') as f:
cPickle.dump(tt_folding, f)
# import cPickle
# with open('models/dt_full_group.pkl', 'r') as f:
# tt_folding = cPickle.load(f)
comparison_report = tt_folding.test_on_lds(data_sw_passed_lds)
comparison_report.compute_metric(RocAuc())
comparison_report.roc()
lc = comparison_report.learning_curve(RocAuc(), steps=1)
lc
comparison_report.feature_importance()
from utils import get_result_with_bootstrap_for_given_part
result = get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', get_N_B_events(),
logistic=True, n_calibrations=30)
result
import utils
reload(utils)
from utils import get_result_with_bootstrap_for_given_part
result = get_result_with_bootstrap_for_given_part(tagging_efficiency, tagging_efficiency_delta, tt_folding,
[data_sw_passed, data_sw_not_passed], 'tt-log', get_N_B_events(),
logistic=True, n_calibrations=1)
result
result.to_csv('img/tracks.csv', index=False, header=True)
from utils import prepare_B_data_for_given_part
Bdata_prepared = prepare_B_data_for_given_part(tt_folding, [data_sw_passed, data_sw_not_passed],
get_N_B_events(), logistic=True)
Bdata_prepared.to_csv('models/Bdata_tracks.csv', header=True, index=False)
from utils import estimate_algorithm
import cPickle
with open('models/dt_MC.pkl', 'r') as f:
tt_folding_MC = cPickle.load(f)
with open('models/calibrator_tracks_MC.pkl', 'r') as f:
calibrator_tracks_MC = cPickle.load(f)
with open('models/calibrator_B_MC.pkl', 'r') as f:
calibrator_B_MC = cPickle.load(f)
p_MC = tt_folding_MC.predict_proba(data)[:, 1]
roc_auc_score(data.label, p_MC, sample_weight=data.N_sig_sw.values.astype(float64))
estimate_algorithm(tt_folding_MC, calibrator_tracks_MC, calibrator_B_MC, data, get_N_B_events())
estimate_algorithm(tt_folding_MC, calibrator_tracks_MC, calibrator_B_MC, data, get_N_B_events(), calib_part_itself=True,
calib_itself=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Twiss parameters with and without coupler kick
Step2: Trajectories with Coupler Kick
|
<ASSISTANT_TASK:>
Python Code:
# the output of plotting commands is displayed inline within frontends,
# directly below the code cell that produced it
%matplotlib inline
from time import time
# this python library provides generic shallow (copy) and deep copy (deepcopy) operations
from copy import deepcopy
# import from Ocelot main modules and functions
from ocelot import *
# import from Ocelot graphical modules
from ocelot.gui.accelerator import *
# import lattice
from xfel_l1 import *
tws0 = Twiss()
tws0.E = 0.005
tws0.beta_x = 7.03383607232
tws0.beta_y = 4.83025657816
tws0.alpha_x = 0.981680481977
tws0.alpha_y = -0.524776086698
tws0.E = 0.1300000928
lat = MagneticLattice(cell_l1, start=bpmf_103_i1, stop=qd_210_b1)
# twiss parameters without coupler kick
tws1 = twiss(lat, tws0)
# adding coupler coefficients
for elem in lat.sequence:
if elem.__class__ == Cavity and not(".AH1." in elem.id) and not(".A1." in elem.id):
elem.coupler_kick = True
elem.vx_up = -56.813 + 10.751j
elem.vy_up = -41.091 + 0.5739j
elem.vxx_up = 0.99943 - 0.81401j
elem.vxy_up = 3.4065 - 0.4146j
elem.vx_down = -24.014 + 12.492j
elem.vy_down = 36.481 + 7.9888j
elem.vxx_down = -4.057 - 0.1369j
elem.vxy_down = 2.9243 - 0.012891j
# update transfer maps
lat.update_transfer_maps()
tws = twiss(lat, tws0)
bx0 = [tw.beta_x for tw in tws1]
by0 = [tw.beta_y for tw in tws1]
s0 = [tw.s for tw in tws1]
bx = [tw.beta_x for tw in tws]
by = [tw.beta_y for tw in tws]
s = [tw.s for tw in tws]
fig, ax = plot_API(lat, legend=False)
ax.plot(s0, bx0, "b", lw=1, label=r"$\beta_x$")
ax.plot(s, bx, "b--", lw=1, label=r"$\beta_x$, CK")
ax.plot(s0, by0, "r", lw=1, label=r"$\beta_y$")
ax.plot(s, by, "r--", lw=1, label=r"$\beta_y$, CK")
ax.set_ylabel(r"$\beta_{x,y}$, m")
ax.legend()
plt.show()
from ocelot.cpbd.track import lattice_track
for a in np.arange(-0.6, 0.6, 0.1):
cix_118_i1.angle = a*0.001
lat.update_transfer_maps()
p = Particle(px=0, E=0.150-0.02)
plist = lattice_track(lat, p)
s = [p.s for p in plist]
x = [p.x for p in plist]
y = [p.y for p in plist]
px = [p.px for p in plist]
py = [p.py for p in plist]
plt.subplot(211)
plt.plot(s, x)
plt.subplot(212)
plt.plot(s, y)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As we saw earlier the Dirichlet process describes the distribution of a random probability distribution. The Dirichlet process takes two parameters
Step2: Let's illustrate again with a standard normal base measure. We can construct a function base_measure that generates samples from it.
Step3: Because the normal distribution has continuous support, we can generate samples from it forever and we will never see the same sample twice (in theory). We can illustrate this by drawing from the distribution ten thousand times and seeing that we get ten thousand unique values.
Step4: However, when we feed the base measure through the stochastic memoization procedure and then sample, we get many duplicate samples. The number of unique samples goes down as $\alpha$ increases.
Step5: At this point, we have a function dp_draws that returns samples from a probability distribution (specifically, a probability distribution sampled from $\text{DP}(\alpha H_0)$). We can use dp_draws as a base distribution for another Dirichlet process!
Step6: How do we interpret this? norm_dp is a sampler from a probability distribution that looks like the standard normal distribution. norm_hdp is a sampler from a probability distribution that "looks like" the distribution norm_dp samples from.
Step7: And here is a histogram for samples drawn from norm_hdp, our second sampler.
Step8: The second plot doesn't look very much like the first! The level to which a sample from a Dirichlet process approximates the base distribution is a function of the dispersion parameter $\alpha$. Because I set $\alpha=10$ (which is relatively small), the approximation is fairly course. In terms of memoization, a small $\alpha$ value means the stochastic memoizer will more frequently reuse values already seen instead of drawing new ones.
Step9: Since the Hierarchical DP is a Dirichlet Process inside of Dirichlet process, we must provide it with both a first and second level $\alpha$ value.
Step10: We can sample directly from the probability distribution drawn from the Hierarchical Dirichlet Process.
Step11: norm_hdp is not equivalent to the Hierarchical Dirichlet Process; it samples from a single distribution sampled from this HDP. Each time we instantiate the norm_hdp variable, we are getting a sampler for a unique distribution. Below we sample five times and get five different distributions.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from numpy.random import choice
from scipy.stats import beta
class DirichletProcessSample():
def __init__(self, base_measure, alpha):
self.base_measure = base_measure
self.alpha = alpha
self.cache = []
self.weights = []
self.total_stick_used = 0.
def __call__(self):
remaining = 1.0 - self.total_stick_used
i = DirichletProcessSample.roll_die(self.weights + [remaining])
if i is not None and i < len(self.weights) :
return self.cache[i]
else:
stick_piece = beta(1, self.alpha).rvs() * remaining
self.total_stick_used += stick_piece
self.weights.append(stick_piece)
new_value = self.base_measure()
self.cache.append(new_value)
return new_value
@staticmethod
def roll_die(weights):
if weights:
return choice(range(len(weights)), p=weights)
else:
return None
from scipy.stats import norm
base_measure = lambda: norm().rvs()
from pandas import Series
ndraws = 10000
print "Number of unique samples after {} draws:".format(ndraws),
draws = Series([base_measure() for _ in range(ndraws)])
print draws.unique().size
norm_dp = DirichletProcessSample(base_measure, alpha=100)
print "Number of unique samples after {} draws:".format(ndraws),
dp_draws = Series([norm_dp() for _ in range(ndraws)])
print dp_draws.unique().size
norm_hdp = DirichletProcessSample(norm_dp, alpha=10)
import matplotlib.pyplot as plt
pd.Series(norm_dp() for _ in range(10000)).hist()
_=plt.title("Histogram of Samples from norm_dp")
pd.Series(norm_hdp() for _ in range(10000)).hist()
_=plt.title("Histogram of Samples from norm_hdp")
class HierarchicalDirichletProcessSample(DirichletProcessSample):
def __init__(self, base_measure, alpha1, alpha2):
first_level_dp = DirichletProcessSample(base_measure, alpha1)
self.second_level_dp = DirichletProcessSample(first_level_dp, alpha2)
def __call__(self):
return self.second_level_dp()
norm_hdp = HierarchicalDirichletProcessSample(base_measure, alpha1=10, alpha2=20)
pd.Series(norm_hdp() for _ in range(10000)).hist()
_=plt.title("Histogram of samples from distribution drawn from Hierarchical DP")
for i in range(5):
norm_hdp = HierarchicalDirichletProcessSample(base_measure, alpha1=10, alpha2=10)
_=pd.Series(norm_hdp() for _ in range(100)).hist()
_=plt.title("Histogram of samples from distribution drawn from Hierarchical DP")
_=plt.figure()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'A': [0, 1, 1, 1, 0, 1],
'B': [1, 0, 1, 1, 1, 0],
'C': [1, 1, 0, 1, 1, 1],
'D': [1, 1, 1, 0, 1, 1]})
df["category"] = df.idxmin(axis=1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The above command is only needed if you are plotting in a Jupyter notebook.
Step2: And then produce a line plot
Step3: We can add labels and titles
Step4: We can change the plotting style, and use LaTeX style notation where needed
Step5: We can plot two lines at once, and add a legend, which we can position
Step6: We would probably prefer to use subplots. At this point we have to leave the simple interface, and start building the plot using its individual components, figures and axes, which are objects to manipulate
Step7: The axes variable contains all of the separate axes that you may want. This makes it easy to construct many subplots using a loop
Step8: Matplotlib will allow you to generate and place axes pretty much wherever you like, to use logarithmic scales, to do different types of plot, and so on. Check the examples and gallery for details.
Step9: Let's get some information about the file we just read in. First, let's see what data fields our dataset has
Step10: Now let's see what datatype (i.e. integer, boolean, string, float,...) the data in each field is
Step11: Finally, let's try printing the first few records in our dataframe
Step12: Note that pandas can read Excel files (using pandas.read_excel), and takes as arguments either the URL (as here) or the filename on the local machine.
Step13: We can see the underlying library is Matplotlib, but it's far easier to plot large data sets.
Step14: We can also extract specific metrics
Step15: However, we often wish to calculate statistics for a subset of our data. For this, we can use pandas' groups. Let's group our data by Name and try running describe again. We see that pandas has now calculated statistics for each type of iris separately.
Step16: We can select subsets of our data using criteria. For example, we can select all records with PetalLength greater than 5
Step17: We can also combine criteria like so
Step18: Data across multiple files
Step19: To compare the data from these different datasets, it will help if we first combine them into a single dataframe. Before we do this, we'll add an extra field to each of our dataframes describing the musical genre so that we do not lose this information when the dataframes are combined.
Step20: This has given us a fairly sizeable dataframe with 513 rows and 32 columns. However, if you look closely at the index column you'll notice something dodgey has happened - combining our dataframes has meant that the index field is no longer unique (multiple records share the same index).
Step21: This is not good. Looking at the printout of the dataframe above, we see that the last record is LOYALTY. by Kendrick Lamar and has index 46. However, if we try to access the record with index 46, we instead get Rebellion (Lies) by Arcade Fire.
Step22: We can remedy this by reindexing. Looking at the fields available, it looks like the tracks' id would be a good choice for a unique index.
Step23: Unfortunately, there are still duplicates where the same track appears in multiple playlists. Let's remove these duplicates, keeping only the first instance.
Step24: Sucess! Before we do anything else, let's write our single combined dataset to file.
Step25: Now onto some analysis. Let's first look at some statistics for each of our genres.
Step26: From this alone we can get a lot of information
Step27: We can visualise our data by plotting the various characteristics against each other. In the plot below, we compare the energy and danceability of country, metal and house music. The data from the three different genres separates into three pretty distinct clusters.
Step28: More information about pandas can be found in the documentation, or in tutorials or in standard books.
Step29: In the weight column, instead of a number as we may expect, we see the values are 'NaN' or 'Not a Number'. If you open the original spreadsheet, you'll see that the original weight data is missing for these records. The count function returns the number of non-NaN entries per column, so if we subtract that from the length of the survey, we can see how many NaN entries there are per column
Step30: We need to work out a sensible way to deal with this missing data, as if we try to do any analysis on the dataset in its current state, python may throw value errors. For example, let's try converting the data in the weight column to an integer
Step31: There are several different ways we can deal with NaNs - which we choose depends on the individual dataset.
Step32: We may just wish to discard records with NaNs in a particular column (e.g. if we wish to deal with NaNs in other columns in a different way). We can discard all the records with NaNs in the weight column like so
Step33: It may be that it's more appropriate for us to set all missing data with a certain value. For example, let's set all missing weights to 0
Step34: For our dataset, this is not the best choice as it will change the mean of our data
Step35: A better solution here is to fill all NaN values with the mean weight value
Step36: Exercises
Step37: In the solution below for the music genre exercise, we've included a few extra steps in order to format the plot and make it more readable (e.g. changing the axis limits, increasing the figure size and fontsize).
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy
x = numpy.linspace(0, 1)
y1 = numpy.sin(numpy.pi * x) + 0.1 * numpy.random.rand(50)
y2 = numpy.cos(3.0 * numpy.pi * x) + 0.2 * numpy.random.rand(50)
from matplotlib import pyplot
pyplot.plot(x, y1)
pyplot.show()
pyplot.plot(x, y1)
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.title('A single line plot')
pyplot.show()
pyplot.plot(x, y1, linestyle='--', color='black', linewidth=3)
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title(r'A single line plot, roughly $\sin(\pi x)$')
pyplot.show()
pyplot.plot(x, y1, label=r'$y_1$')
pyplot.plot(x, y2, label=r'$y_2$')
pyplot.xlabel(r'$x$')
pyplot.ylabel(r'$y$')
pyplot.title('Two line plots')
pyplot.legend(loc='lower left')
pyplot.show()
fig, axes = pyplot.subplots(nrows=1, ncols=2, figsize=(10,6))
axis1 = axes[0]
axis1.plot(x, y1)
axis1.set_xlabel(r'$x$')
axis1.set_ylabel(r'$y_1$')
axis2 = axes[1]
axis2.plot(x, y2)
axis2.set_xlabel(r'$x$')
axis2.set_ylabel(r'$y_2$')
fig.tight_layout()
pyplot.show()
data = []
for nx in range(2,5):
for ny in range(2,5):
data.append(numpy.sin(nx * numpy.pi * x) + numpy.cos(ny * numpy.pi * x))
fig, axes = pyplot.subplots(nrows=3, ncols=3, figsize=(10,10))
for nrow in range(3):
for ncol in range(3):
ndata = ncol + 3 * nrow
axes[nrow, ncol].plot(x, data[ndata])
axes[nrow, ncol].set_xlabel(r'$x$')
axes[nrow, ncol].set_ylabel(r'$\sin({} \pi x) + \cos({} \pi x)$'.format(nrow+2, ncol+2))
fig.tight_layout()
pyplot.show()
import pandas
iris = pandas.read_csv('https://raw.githubusercontent.com/pandas-dev/pandas/master/pandas/tests/data/iris.csv')
iris.columns
iris.dtypes
# print first 5 records
iris.head()
iris['PetalLength'].plot.hist()
pyplot.show()
iris.describe()
print(iris['SepalLength'].min())
print(iris['PetalLength'].std())
print(iris['PetalWidth'].count())
grouped_iris = iris.groupby('Name')
grouped_iris.describe()
grouped_iris['PetalLength'].mean()
iris[iris.PetalLength > 5].head()
iris[(iris.Name == 'Iris-setosa') & (iris.PetalWidth < 0.3)].head()
dfs = {'indie': pandas.read_csv('spotify_data/indie.csv'), 'pop': pandas.read_csv('spotify_data/pop.csv'),
'country': pandas.read_csv('spotify_data/country.csv'), 'metal': pandas.read_csv('spotify_data/metal.csv'),
'house': pandas.read_csv('spotify_data/house.csv'), 'rap': pandas.read_csv('spotify_data/rap.csv')}
# add genre field to each dataframe
for name, df in dfs.items():
df['genre'] = name
# combine into single dataframe
data = pandas.concat(dfs.values())
data
data.index.is_unique
data.iloc[46]
data.set_index('id', inplace=True)
data.index.is_unique
data = data[~data.index.duplicated(keep='first')]
data.index.is_unique
data.to_csv('spotify_data/combined_data.csv')
data[['duration_ms', 'explicit', 'popularity', 'acousticness', 'danceability', 'energy', 'instrumentalness',
'liveness', 'loudness', 'speechiness', 'tempo', 'valence', 'genre']].groupby('genre').mean()
data.sort_values('valence')[['album', 'artists', 'name', 'genre', 'valence']].groupby('genre').head(1)
colours = ['red', 'blue', 'green', 'orange', 'pink', 'purple']
ax = data[data.genre == 'country'].plot.scatter('danceability', 'energy', c=colours[0], label='country', figsize=(10,10))
data[data.genre == 'metal'].plot.scatter('danceability', 'energy', c=colours[1], marker='x', label='metal', ax=ax)
data[data.genre == 'house'].plot.scatter('danceability', 'energy', c=colours[2], marker='+', label='house', ax=ax)
survey = pandas.read_excel('surveys.xlsx')
survey.head()
len(survey) - survey.count()
survey.weight.astype('int')
survey.dropna()
survey.dropna(subset=['weight'])
nan_zeros = survey.copy() # make a copy so we don't overwrite original dataframe
nan_zeros.weight.fillna(0, inplace=True)
nan_zeros.head()
print(survey.weight.mean(), nan_zeros.weight.mean())
nan_mean = survey.copy()
nan_mean.weight.fillna(survey.weight.mean(), inplace=True)
print(survey.weight.mean(), nan_mean.weight.mean())
nan_mean.head()
for name, df in iris.groupby('Name'):
# create a new figure
pyplot.figure()
# plot histogram of sepalwidth
df['SepalWidth'].plot.hist()
# add title
pyplot.title(name)
# create a new axis
fig, axis = pyplot.subplots()
# create a dictionary of colours
colours = {'indie': 'red', 'pop': 'blue',
'country': 'green', 'metal': 'black',
'house': 'orange', 'rap': 'pink'}
# create a dictionary of markers
markers = {'indie': '+', 'pop': 'x',
'country': 'o', 'metal': 'd',
'house': 's', 'rap': '*'}
for name, df in data.groupby('genre'):
df.plot.scatter('acousticness', 'liveness', label=name, s=30, color=colours[name], marker=markers[name],
ax=axis, figsize=(10,8), fontsize=16)
# set limits of x and y axes so that they are between 0 and 1
axis.set_xlim([0,1.0])
axis.set_ylim([0,1.0])
# set the font size of the axis labels
axis.xaxis.label.set_fontsize(16)
axis.yaxis.label.set_fontsize(16)
pyplot.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Replace the variable values in the cell below
Step2: Create a Dataset from BigQuery
Step3: Let's do some regular expression parsing in BigQuery to get the source of the newspaper article from the URL. For example, if the url is http
Step6: Now that we have good parsing of the URL to get the source, let's put together a dataset of source and titles. This will be our labeled dataset for machine learning.
Step7: For ML training, we usually need to split our dataset into training and evaluation datasets (and perhaps an independent test dataset if we are going to do model or feature selection based on the evaluation dataset). AutoML however figures out on its own how to create these splits, so we won't need to do that here.
Step8: AutoML for text classification requires that
Step9: Let's make sure we have roughly the same number of labels for each of our three labels
Step10: Finally we will save our data, which is currently in-memory, to disk.
Step11: Now let's sample 500 articles from the full dataset and make sure we have enough examples for each label in our sample dataset (see here for further details on how to prepare data for AutoML).
Step12: Let's write the sample dataset to disk.
|
<ASSISTANT_TASK:>
Python Code:
import os
from google.cloud import bigquery
import pandas as pd
%load_ext google.cloud.bigquery
PROJECT = "qwiklabs-gcp-03-3247cf88ddb1" #"cloud-training-demos" # Replace with your PROJECT
BUCKET = PROJECT # defaults to PROJECT
REGION = "us-central1" # Replace with your REGION
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
!gsutil mb gs://$BUCKET
%%bigquery --project $PROJECT
SELECT
url, title, score
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
LENGTH(title) > 10
AND score > 10
AND LENGTH(url) > 0
LIMIT 10
%%bigquery --project $PROJECT
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
COUNT(title) AS num_articles
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
GROUP BY
source
ORDER BY num_articles DESC
LIMIT 100
regex = '.*://(.[^/]+)/'
sub_query =
SELECT
title,
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '{0}'), '.'))[OFFSET(1)] AS source
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '{0}'), '.com$')
AND LENGTH(title) > 10
.format(regex)
query =
SELECT
LOWER(REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ')) AS title,
source
FROM
({sub_query})
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
.format(sub_query=sub_query)
print(query)
bq = bigquery.Client(project=PROJECT)
title_dataset = bq.query(query).to_dataframe()
title_dataset.head()
print("The full dataset contains {n} titles".format(n=len(title_dataset)))
title_dataset.source.value_counts()
DATADIR = './data/'
if not os.path.exists(DATADIR):
os.makedirs(DATADIR)
FULL_DATASET_NAME = 'titles_full.csv'
FULL_DATASET_PATH = os.path.join(DATADIR, FULL_DATASET_NAME)
# Let's shuffle the data before writing it to disk.
title_dataset = title_dataset.sample(n=len(title_dataset))
title_dataset.to_csv(
FULL_DATASET_PATH, header=False, index=False, encoding='utf-8')
sample_title_dataset = title_dataset.sample(n=500)
sample_title_dataset.source.value_counts()
SAMPLE_DATASET_NAME = 'titles_sample.csv'
SAMPLE_DATASET_PATH = os.path.join(DATADIR, SAMPLE_DATASET_NAME)
sample_title_dataset.to_csv(
SAMPLE_DATASET_PATH, header=False, index=False, encoding='utf-8')
sample_title_dataset.head()
%%bash
gsutil cp data/titles_sample.csv gs://$BUCKET
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: And a few things from OpenMM
Step2: First, let's find a PDB for alanine dipeptide, the system we'll
Step3: Lets use the amber99sb-ildn forcefield with implicit solvent
Step4: Set the initial positions to the "first frame" of the PDB
Step5: Let's use one of the OpenMM reporters that mdtraj provides. This is
|
<ASSISTANT_TASK:>
Python Code:
import os
import mdtraj
import mdtraj.reporters
from simtk import unit
import simtk.openmm as mm
from simtk.openmm import app
pdb = mdtraj.load('data/native.pdb')
topology = pdb.topology.to_openmm()
forcefield = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(topology, nonbondedMethod=app.CutoffNonPeriodic)
integrator = mm.LangevinIntegrator(330*unit.kelvin, 1.0/unit.picoseconds, 2.0*unit.femtoseconds)
simulation = app.Simulation(topology, system, integrator)
simulation.context.setPositions(pdb.xyz[0])
simulation.context.setVelocitiesToTemperature(330*unit.kelvin)
if not os.path.exists('ala2.h5'):
simulation.reporters.append(mdtraj.reporters.HDF5Reporter('ala2.h5', 1000))
simulation.step(100000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step 1
Step1: The function name allows users to call data = GPy.util.datasets.boston_housing() to acquire the data. You should use a name that makes it clear to readers of the code what is going on. The data set name is passed to the function as a default argument. This name corresponds to the entry in the json file.
Step2: Optional Step 3
Step3: Now we can access the same data set, but this time, because we have the data in cache no download is performed.
Step4: For this version of the data set we can check that the response variables have been normalized.
|
<ASSISTANT_TASK:>
Python Code:
def boston_housing(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
return data_details_return({'X' : X, 'Y': Y}, data_set)
import pods
data = pods.datasets.boston_housing()
from pods.datasets import *
import numpy as np
def boston_housing_preprocess(data_set='boston_housing'):
if not data_available(data_set):
download_data(data_set)
all_data = np.genfromtxt(os.path.join(data_path, data_set, 'housing.data'))
X = all_data[:, 0:13]
Y = all_data[:, 13:14]
Y = (Y - np.mean(Y))/np.std(Y)
return data_details_return({'X' : X, 'Y': Y,
'info' : 'The response variables have been preprocessed to have zero mean and unit standard deviation'
}, data_set)
data = boston_housing_preprocess()
print('Mean: ', data['Y'].mean())
print('Standard deviation ', data['Y'].std())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We make a full-sky arcminute resolution geometry. I've only been able to reproduce this bug for res=1.0.
Step2: We do a pix2sky that is needed by map2alm and make sure it gives a sensible result.
Step3: It makes sense. We now save a map that has this geometry and load it back.
Step4: The shapes and wcs of the geometry we originally made and of the saved map seem to agree. So we proceed to do the same pix2sky operation on the loaded geometry.
Step5: The results are all nans.
|
<ASSISTANT_TASK:>
Python Code:
%load_ext autoreload
%autoreload 2
from enlib import enmap,wcs as mwcs
import numpy as np
import sys,os
res = 1.0
shape, wcs = enmap.fullsky_geometry(res=res*np.pi/180./60., proj="car")
shape = (3,)+shape
ny, nx = shape[-2:]
vy,vx = enmap.pix2sky(shape, wcs, [np.arange(ny),np.zeros(ny)])
hy,hx = enmap.pix2sky(shape, wcs, [np.zeros(nx),np.arange(nx)])
print(vy,vx,hy,hx)
root = os.environ['WORK']+"/"
enmap.write_map(root+"temp.fits",enmap.zeros(shape,wcs,dtype=np.uint8))
lshape,lwcs = enmap.read_map_geometry(root+"temp.fits")
print(shape,wcs)
print(lshape,lwcs)
print(mwcs.equal(wcs,lwcs))
ny, nx = lshape[-2:]
vy2,vx2 = enmap.pix2sky(lshape, lwcs, [np.arange(ny),np.zeros(ny)])
hy2,hx2 = enmap.pix2sky(lshape, lwcs, [np.zeros(nx),np.arange(nx)])
print(vy2,vx2,hy2,hx2)
print(np.all(np.isclose(vy,vy2)))
print(np.all(np.isclose(vx,vx2)))
print(np.all(np.isclose(hy,hy2)))
print(np.all(np.isclose(hx,hx2)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When should I use Seaborn versus Matplotlib?
Step2: Pandas/Matplotlib plot...
Step3: Using Seaborn
Step4: An important difference is the imperative approach from matplotlib versus the declarative approach from seaborn
Step5: <div class="alert alert-warning" style="font-size
Step6: And further customization (layers) can be added to the specification, e.g.
Step7: As Plotnine is built on top of Matplotlib, one can still retrieve the Matplotlib Figure object from Plotnine for eventual customization.
Step8: <div class="alert alert-info">
Step9: Altair implements the Grammar of Graphics with the same main ingredients, but a different syntax
Step10: When encoding the variables for the chosen geometry, Altair provides a specific syntax on the data type of each variable. For information on this ...
Step11: <div class="alert alert-info">
Step12: By default, Bokeh will open a new webpage to plot the figure. Still, an integration with Jupyter notebooks is provided
Step13: <div class="alert alert-danger">
Step14: Useful to know when you want to use the index as well
Step15: Bokeh has lots of functionalities to adjust and customize charts, e.g. by adding new annotations to the figure object
Step16: hvplot/holoviews
Step17: The link in between hvplot/holoviews and Bokeh (for further adjustments) can be made using the render function
Step18: <div class="alert alert-info">
Step19: Similar to Bokeh, Plotly provides a lot of building blocks for interactive plotting, see https
Step20: Similar to other high-level interfaces, this can be done by Plotly Express as well, supporting direct interaction with a Pandas DataFrame
Step21: <div class="alert alert-danger">
Step22: <div class="alert alert-info">
Step23: However, Holoviews/hvplot, Bokeh and Plotly can be used as Pandas back-end for plotting, by defining the pd.options.plotting.backend variable
Step24: <div class="alert alert-info">
Step25: <div class="alert alert-info">
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotnine as p9
import seaborn as sns
titanic = pd.read_csv("data/titanic.csv")
with plt.style.context('seaborn-whitegrid'): # context manager for styling the figure
fig, ax = plt.subplots()
survival_rate = titanic.groupby("Pclass")['Survived'].mean()
survival_rate.plot(kind='bar', color='grey',
rot=0, figsize=(6, 4), ax=ax)
ylab = ax.set_ylabel("Survival rate")
xlab = ax.set_xlabel("Cabin class")
with sns.axes_style("whitegrid"): # context manager for styling the figure
g = sns.catplot(data=titanic,
x="Pclass", y="Survived",
kind="bar", estimator=np.mean,
ci=None, color="grey")
g.set_axis_labels("Cabin class", "Survival rate")
fig.savefig("my_plot_with_one_issue.pdf")
import plotnine as p9
myplot = (p9.ggplot(titanic) # 1. DATA
+ p9.geom_bar( # 2. GEOMETRY, geom_*
stat='stat_summary',
mapping=p9.aes(x='Pclass',
y='Survived') # 3. AESTHETICS - relate variables to geometry
)
)
myplot
import plotnine as p9
myplot = (p9.ggplot(titanic) # 1. DATA
+ p9.geom_bar( # 2. GEOMETRY, geom_*
stat='stat_summary',
mapping=p9.aes(x='Pclass',
y='Survived') # 3. AESTHETICS - relate variables to geometry
)
+ p9.xlab("Cabin class") # labels
+ p9.theme_minimal() # theme
# ...
)
myplot
my_plt_version = myplot.draw(); # extract as Matplotlib Figure
# Do some Matplotlib magick...
my_plt_version.axes[0].set_title("Titanic fare price per cabin class")
ax2 = my_plt_version.add_axes([0.7, 0.5, 0.3, 0.3], label="ax2")
import altair as alt
(alt.Chart(titanic) # 1. DATA
.mark_bar() # 2. GEOMETRY, geom_*
.encode( # 3. AESTHETICS - relate variables to geometry
x=alt.X('Pclass:O',
axis=alt.Axis(title='Cabin class')),
y=alt.Y('mean(Survived):Q',
axis=alt.Axis(format='%',
title='Survival rate'))
)
)
brush = alt.selection(type='interval')
(alt.Chart(titanic)
.mark_circle().encode(
x="Fare:Q",
y="Age:Q",
column="Sex:O",
color=alt.condition(brush, "Pclass:N", alt.value('grey')),
).add_selection(brush))
from bokeh.plotting import figure, output_file, show
from bokeh.io import output_notebook
output_notebook()
p = figure()
p.line(x=[1, 2, 3],
y=[4,6,2])
show(p)
from bokeh.models import ColumnDataSource
flow_data = pd.read_csv("data/vmm_flowdata.csv", parse_dates=True, index_col=0)
source_data = ColumnDataSource(data=flow_data)
p = figure(x_axis_type="datetime", plot_height=200, plot_width=600)
p.line(x='Time', y='LS06_347', source=source_data)
show(p)
from bokeh.models import ColumnDataSource, BoxAnnotation, Label
p = figure(x_axis_type="datetime", plot_height=200, plot_width=600)
p.line(x='Time', y='L06_347', source=source_data)
p.circle(x='Time', y='L06_347', source=source_data,
fill_alpha= 0.3, line_alpha=0.3)
alarm_box = BoxAnnotation(bottom=10, fill_alpha=0.3,
fill_color='#ff6666') # arbitrary value; this is NOT the real-case value
p.add_layout(alarm_box)
alarm_label = Label(text="Flood risk", x_units='screen',
x= 10, y=10, text_color="#330000")
p.add_layout(alarm_label)
show(p)
import hvplot.pandas
flow_data.hvplot()
import holoviews as hv
fig = hv.render(flow_data.hvplot())
type(fig)
import plotly
import plotly.graph_objects as go
survival_rate = titanic.groupby("Pclass")['Survived'].mean().reset_index()
fig = go.Figure()
fig.add_trace(go.Bar(
x=survival_rate["Pclass"],
y=survival_rate["Survived"])
)
fig.update_xaxes(type='category')
fig.update_layout(
xaxis_title="Cabin class",
yaxis_title="Survival rate")
fig.show()
import plotly.express as px
# plotly express does not provide the count statistics out of the box, so calculating these
survival_rate = titanic.groupby("Pclass")['Survived'].mean().reset_index()
fig = px.bar(survival_rate, x="Pclass", y="Survived")
fig.show()
df = flow_data.reset_index()
fig = px.line(flow_data.reset_index(), x="Time", y=df.columns,
hover_data={"Time": "|%B %d, %Y"}
)
fig.show()
pd.options.plotting.backend = 'matplotlib'
flow_data.plot()
pd.options.plotting.backend = 'holoviews'
flow_data.plot()
pd.options.plotting.backend = 'plotly'
flow_data.plot()
import pandas as pd
import holoviews as hv
import hvplot.pandas
from colorcet import fire
hv.extension('bokeh')
df = pd.read_csv('data/HG_OOSTENDE-gps-2018.csv', nrows=1_000_000, # for the live demo on my laptop, I just use 1_000_000 points
usecols=['location-long', 'location-lat',
'individual-local-identifier'])
df.head()
import warnings
warnings.filterwarnings('ignore')
df.hvplot.points('location-long', 'location-lat', geo=True, tiles='ESRI',
datashade=True, aggregator='count', cmap=fire, project=True,
xlim=(-5, 5), ylim=(48, 53), frame_width=600)
from IPython.display import Image
Image('https://raw.githubusercontent.com/rougier/python-visualization-landscape/master/landscape.png')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The function collect_variables(expr) takes a string expr that can be interpreted as a Python expression as input and collects all variables occurring in expr. It takes care to eliminate the function symbols from the names returned by extract_variables.
Step2: The function arb(S) takes a set S as input and returns an arbitrary element from
Step3: We need the function choice from the module random. Given a list L, random.choice(L) returns a random element from L. In order to have reproducible results, we have to set the seed for the random number generator.
Step4: Given a dictionary A, the function extend(A) returns a dictionary B such that B[key] = value and B[x] = A[x] for all x that are different from key.
Step5: The module Set implements <em style="color
Step6: The function cast_to_set(L) returns a Set object containing all elements from the iterable L.
Step7: Given a list of sets L, the function union(L) returns the set of all elements occurring in some set $S$ that is itself a member of the list L, i.e. we have
Step8: We define the class Failure of exceptions so that we can distinguish Failure exceptions from other exceptions. This is done by creating a new, empty class that is derived from the class Exception.
Step9: A Constraint Problem Solver Using Local Search
Step10: The function local_search takes three parameters.
Step11: The function numConflicts takes three arguments
Step12: Consistency Checking
Step13: The function enforce_consistency takes 4 arguments
Step14: The procedure exists_values takes five arguments
Step15: The function all_assignments returns the list of all possible assignments for the variables in the set Vars.
Step16: Solving the Eight-Queens-Puzzle
Step17: As the N queens problem is not very difficult, we will not use consistency checking.
Step18: The 100 queens problem can be solved in 32 seconds if we do not use consistency checking.
Step19: Solving the Zebra Puzzle
Step20: Solving the Zebra Puzzle takes about 4 seconds, provided we use consistency checking.
Step21: Solving a Sudoku Puzzle
Step22: Solving the given Sudoku puzzle takes about 2 minutes, provided we use consistency checking. For hard problems, local search is not a good idea.
Step23: Solving a Crypto-Arithmetic Puzzle
Step24: Solving the crypto-arithmetic puzzle took 160 milliseconds with consistency checking.
|
<ASSISTANT_TASK:>
Python Code:
import extractVariables as ev
def collect_variables(expr):
return frozenset(var for var in ev.extractVars(expr)
if var not in dir(__builtins__)
if var not in ['and', 'or', 'not']
)
def arb(S):
for x in S:
return x
import random
random.seed(42)
def extend(A, key, value):
B = A.copy()
B[key] = value
return B
import sys
sys.path.append('..')
import Set
def cast_to_Set(L):
Result = Set.Set()
for x in L:
Result.insert(x)
return Result
def union(L):
return { x for S in L for x in S }
class Failure(Exception):
pass
def solve(P, consistency=True):
Variables, Values, Constraints = P
VarsInConstrs = union([ collect_variables(f) for f in Constraints ])
MisspelledVars = (VarsInConstrs - Variables) | (Variables - VarsInConstrs)
if MisspelledVars:
print("Did you misspell any of the following Variables?")
for v in MisspelledVars:
print(v)
ValuesPerVar = { x: Values for x in Variables }
Annotated = { f: collect_variables(f) for f in Constraints }
if consistency:
Connected = {}
Var2Formulas = variables_2_formulas(Annotated)
for x in Variables:
Connected[x] = union([ V for f, V in Annotated.items() if x in V ]) - { x }
try:
enforce_consistency(ValuesPerVar, Var2Formulas, Annotated, Connected)
for x, Values in ValuesPerVar.items():
print(f'{x}: {Values}')
except Failure:
return None
return local_search(Variables, ValuesPerVar, Annotated)
def local_search(Variables, ValuesPerVar, Annotated):
Variables = list(Variables) # convert to list for random.choice(Variables) to work
Assignment = { x: random.choice(list(ValuesPerVar[x])) for x in Variables }
iteration = 0
lastVar = arb(Variables)
while True:
Conflicts = [ (numConflicts(x, Assignment, Annotated), x) for x in Variables
if x != lastVar
]
maxNum, _ = Set.last(cast_to_Set(Conflicts))
if maxNum == 0 and numConflicts(lastVar, Assignment, Annotated) == 0:
print(f'Number of iterations: {iteration}')
return Assignment
if iteration % 11 == 0: # avoid infinite loop
x = random.choice(Variables)
else: # choose var with max number of conflicts
FaultyVars = [ var for (num, var) in Conflicts if num == maxNum ]
x = random.choice(FaultyVars)
if iteration % 13 == 0: # avoid infinite loop
newVal = random.choice(list(ValuesPerVar[x]))
else:
Conflicts = [ (numConflicts(x, extend(Assignment, x, val), Annotated), val)
for val in ValuesPerVar[x]
]
minNum, _ = Set.first(cast_to_Set(Conflicts))
ValuesForX = [ val for (n, val) in Conflicts if n == minNum ]
newVal = random.choice(ValuesForX)
Assignment[x] = newVal
lastVar = x
iteration += 1
def numConflicts(x, Assign, Annotated):
NewAssign = Assign.copy()
return len([ (f, V) for (f, V) in Annotated.items()
if x in V and not eval(f, NewAssign)
])
def variables_2_formulas(Annotated):
Dictionary = {};
for f, Vars in Annotated.items():
for x in Vars:
if x in Dictionary:
Dictionary[x] |= { f }
else:
Dictionary[x] = { f }
return Dictionary
def enforce_consistency(ValuesPerVar, Var2Formulas, Annotated, Connected):
UncheckedVars = set(Var2Formulas.keys())
while UncheckedVars:
variable = UncheckedVars.pop()
Constraints = Var2Formulas[variable]
Values = ValuesPerVar[variable]
RemovedVals = set()
for f in Constraints:
OtherVars = Annotated[f] - { variable }
for value in Values:
if not exists_values(variable, value, f, OtherVars, ValuesPerVar):
RemovedVals |= { value }
UncheckedVars |= Connected[variable]
Remaining = Values - RemovedVals
if not Remaining:
raise Failure()
ValuesPerVar[variable] = Remaining
def exists_values(var, val, f, Vars, ValuesPerVar):
Assignments = all_assignments(Vars, ValuesPerVar)
return any(eval(f, extend(A, var, val)) for A in Assignments)
def all_assignments(Variables, ValuesPerVar):
Variables = set(Variables) # turn frozenset into a set
if not Variables:
return [ {} ] # list containing empty assignment
var = Variables.pop()
Values = ValuesPerVar[var]
Assignments = all_assignments(Variables, ValuesPerVar)
return [ extend(A, var, val) for A in Assignments
for val in ValuesPerVar[var]
]
%%capture
%run N-Queens-Problem-CSP.ipynb
P = create_csp(8)
%%time
Solution = solve(P, False)
print(f'Solution = {Solution}')
show_solution(Solution)
P = create_csp(100)
%%time
Solution = solve(P, False)
%run Zebra.ipynb
zebra = zebra_csp()
%%time
Solution = solve(zebra, True)
show_solution(Solution)
%run Sudoku.ipynb
csp = sudoku_csp(Sudoku)
csp
%%time
Solution = solve(csp)
show_solution(Solution)
%run Crypto-Arithmetic.ipynb
csp = crypto_csp()
%%time
Solution = solve(csp, True)
show_solution(Solution)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Linear model
Step3: Decision tree
Step4: Test another clasifier
Step5: ROC area
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function
from sklearn import __version__ as sklearn_version
print('Sklearn version:', sklearn_version)
from sklearn import datasets
iris = datasets.load_iris()
print(iris.DESCR)
# Print some data lines
print(iris.data[:10])
print(iris.target)
#Randomize and separate train & test
from sklearn.utils import shuffle
X, y = shuffle(iris.data, iris.target, random_state=0)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# Linear model
from sklearn.linear_model import LogisticRegression
# Define classifier
clf_logistic = LogisticRegression()
# Fit classifier
clf_logistic.fit(X_train, y_train)
# Evaluate accuracy in test
from sklearn.metrics import accuracy_score
# Predict test data
y_test_pred = clf_logistic.predict(X_test)
# Evaluate accuracy
print('Accuracy test: ', accuracy_score(y_test, y_test_pred))
from sklearn import tree
# Define classifier
clf_tree = tree.DecisionTreeClassifier(max_depth=3)
# Fit
clf_tree.fit(X_train, y_train)
# Evaluate test accuracy
print('Tree accuracy test: ', accuracy_score(y_test, clf_tree.predict(X_test)))
# Configure model
from sklearn import svm
clf_svc = svm.LinearSVC()
# Fit over train
clf_svc.fit(X_train, y_train)
# Accuracy score over test
print(accuracy_score(y_test, clf_svc.predict(X_test)))
# ROC area
# Print probabilities
y_test_proba = clf_logistic.predict_proba(X_test)
print(y_test_proba[:5])
#Recode y from multiclass labels to binary labels
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
lb.fit(y_train)
print('Test classes: ',lb.classes_)
y_test_bin = lb.transform(y_test)
print(y_test_bin[:5])
# Roc curve
from sklearn.metrics import roc_auc_score
print('Average ROC area: ', roc_auc_score(y_test_bin, y_test_proba))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.metrics import classification_report, log_loss, roc_auc_score
print('Classification Report:',classification_report(y_test, y_pred))
print('Log Loss:',log_loss(y_test, y_pred))
print('ROC AUC:',roc_auc_score(y_test, y_pred))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's print the output...
Step2: Let's type in our S3 bucket address, which you'll need to change below. Note the use of s3
Step3: We can write our tick data DataFrame in Parquet format. We can give it the MarketDataRequest we used for fetching the data, which basically creates the filename in the format of environment.category.data_source.freq.tickers for high frequency data or in the format of environment.category.data_source.freq for daily data. This will enable us to more easily fetch the data using the same MarketDataRequest interface.
Step4: We could fetch the data directly using the S3 filename ie.
Step5: But it is more convenient to simply use the MarketDataRequest object we populated earlier. But in order to make it fetch from S3 instead of Dukascopy, we just need to set the data_engine property to give it the path of the S3 bucket and the postfix /*.parquet.
Step6: It should be noted there are many other ways to dump and read Parquet files from S3. We can use pandas.read_parquet to directly read Parquet files from S3. Libraries like Dask also support reading Parquet directly from S3 too. I'd also checkout AWS Data Wrangler, which makes it easier to use Pandas with many AWS services including S3, at https
Step7: We can print out the MarketDataRequest we just constructed. We should be able to see there quandl for the data_source and fx for the category, as well as the start_date (realise it's difficult!)
Step8: We can now fetch the market data from quandl.
Step9: Let's write this to S3.
Step10: And we can read it back using a similar call to before, except this time we set the data_engine property of the MarketDataRequest.
Step11: If we set the cache_algo property to cache_algo_return and remove the data_engine parameter (and if we have had Redis running, and make exactly the same data requet call (same assets and dates), findatapy will look in the Redis cache locally to fetch the data. This is significantly quicker at around 20ms versus over 500ms for fetching from S3.
|
<ASSISTANT_TASK:>
Python Code:
# First disable the log so the output is neater
import logging, sys
logging.disable(sys.maxsize)
from findatapy.market import Market, MarketDataRequest
# In this case we are saving predefined tick tickers to disk, and then reading back
from findatapy.market.ioengine import IOEngine
md_request = MarketDataRequest(
start_date='04 Jan 2021',
finish_date='05 Jan 2021',
category='fx',
data_source='dukascopy',
freq='tick',
tickers=['EURUSD'],
fields=['bid', 'ask', 'bidv', 'askv'],
data_engine=None
)
market = Market()
df = market.fetch_market(md_request=md_request)
print(df)
folder = 's3://type-here'
IOEngine().write_time_series_cache_to_disk(folder, df, engine='parquet', md_request=md_request)
s3_filename = folder + '/backtest.fx.dukascopy.tick.NYC.EURUSD.parquet'
df = IOEngine().read_time_series_cache_from_disk(s3_filename, engine='parquet')
print(df)
md_request.data_engine = folder + '/*.parquet'
df = market.fetch_market(md_request)
print(df)
import os
from findatapy.market import Market, MarketDataRequest
# In this case we are saving predefined tick tickers to disk, and then reading back
from findatapy.market.ioengine import IOEngine
# Change this to your own Quandl API key
quandl_api_key = os.environ['QUANDL_API_KEY']
md_request = market.create_md_request_from_str(md_request_str='fx.quandl.daily.NYC',
md_request=MarketDataRequest(start_date='01 Jan 2021', finish_date='27 May 2021', quandl_api_key=quandl_api_key))
print(md_request)
df = market.fetch_market(md_request)
print(df)
IOEngine().write_time_series_cache_to_disk(folder, df, engine='parquet', md_request=md_request)
df = market.fetch_market(md_request_str='fx.quandl.daily.NYC',
md_request=MarketDataRequest(start_date='01 Jan 2021', finish_date='27 May 2021',
quandl_api_key=quandl_api_key,
data_engine=folder + '/*.parquet'))
print(df)
df = market.fetch_market(md_request_str='fx.quandl.daily.NYC',
md_request=MarketDataRequest(start_date='01 Jan 2021', finish_date='27 May 2021',
quandl_api_key=quandl_api_key,
cache_algo='cache_algo_return'))
print(df)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creation of numpy arrays
Step2: Create an array filled with zeros and one filled with random numberers in the interval [0,1), each with a shape (5, 2).
Step3: Numpy arrays can also be created from lists using the np.array() function. Create numpy arrays from the following two lists!
Step4: Basic Operations
Step5: Using numpy arrays this is a lot easier and faster. For two arrays of the same shape "+" "-" "/" "*" operations will be applied elementwise to the whole arrays. Calculate the vector pointing from atom1 to atom2 using numpy!
Step6: We can not only use these operations for two arrays of the same shape. Whenever each dimension of the two arrays has either the same size or one of them has size 1, numpy broadcasts the smaller array to the shape of the larger array. Lets see how this works in an example
Step7: The array a has the shape (4, 2). In the first dimension there are 4 entries and in the second dimension there are 2 entries.
Step8: The array b has the shape (1, 2). In the first dimension one of them has the size 1. In the second dimension they both have the same size (2). Because all dimensions have eighter equal size or one of them is 1 numpy will be able to braodcast b to the shape of a. That is why we can for example do
Step9: When we combine two arrays with different numbers of dimensions, all missing dimensions starting from the last dimension are treates as dimension with size one. For example
Step10: We can alos do the same with a numpy array and somthing that is convertible into a numpy array. So, a shortcut for the example above would be
Step11: Use broadcasting to caculate all the vectors from atom1 to all other atoms! other_atoms is a 2d array where the first dimension represents different atoms and the second dimension represents the x, y and z coordinates.
Step12: Indexing
Step13: For higherdimensional arrays idexes for the different dimensions are separated by kommas. For example
Step14: Try to slice out the following block from the array a!
Step15: But this is not all. We can also do more fancy indexing. If we compare two arrays or an array with something that can be broadcasted to the same shape, we get an array of booleans returned. This array is the result of an elementwise comparison. Such boolean arrays can also be used for indexing. Lets see how this works
Step16: Lets see how we use such boolean arrays for indexing
Step17: Set all numbers in a which equal 2, to 0.
Step18: Your first own numpy MD code
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
example_array = np.ones((2, 3))
print(example_array)
my_zeros_array = # your code goes here
my_random_array = # your code goes here
print(my_zeros_array)
print(my_random_array)
atom1_xyz = [5, 2, 8]
atom2_xyz = [8, 4, 6]
atom1_xyz_np = # your code goes here
atom2_xyz_np = # your code goes here
print(atom1_xyz_np)
print(atom2_xyz_np)
vec = []
for i in range(len(atom1_xyz)):
vec.append(atom2_xyz[i] - atom1_xyz[i])
print(vec)
vec = # your code goes here
print(vec)
a = np.array([[2, 1],
[3, 4],
[9, 2],
[7, 1]])
b = np.array([[2, 3]])
c = a * b
print(c)
a = np.array([[2, 1],
[3, 4],
[9, 2],
[7, 1]]) # has shape (4, 2)
d = np.array([5]) # has shape (1) and is treated like shape (1, 1)
print(a - d)
print(a - 5)
atom1 = np.array([5, 2, 8])
other_atoms = np.array([[5, 1, 4],
[8, 4, 6],
[3, 2, 4],
[9, 7, 3]])
vectors_from_atom1_to_all_others = # your code goes here
print(vectors_from_atom1_to_all_others)
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
print(a[3]) # a at the position 3 (keep in mind python counts from 0!)
print(a[2:]) # all numbers from position 2 to the end
print(a[2:8]) # all numbers from position 2 to (not including) position 8
print(a[2:8:2]) # every second number in the same range as above
print(a[:-2]) # all numbers except for the last two
a = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
print(a[0, 1]) # the element in the row 0 and column 1
print(a[0, :]) # all elements in row 0
print(a[:, :-1]) # all elements in all columns except for the last culumn
block = np.array([[5, 6],
[8, 9]])
slice_of_a = a[# your code goes in here]
if np.array_equal(block, slice_of_a):
print('That is correct!')
else:
print('Sorry you have sliced out\n', slice_of_a, 'instead')
a = np.array([[2, 3, 5],
[5, 2, 3],
[3, 5, 2]])
print(a > 2) # we could also use "==" "<" ">" "<=" ">=" "!="
a = np.array([[2, 3, 5],
[5, 2, 3],
[3, 5, 2]])
a[a > 2] += 1 # here we add one to each value larger then 2
print(a)
a[?] = # your code goes in here
print(a)
import matplotlib.pyplot as plt
from IPython.display import clear_output, display
%matplotlib inline
import numpy as np
class Simulation:
def __init__(self, n_atoms, box_size):
self.n_atoms = n_atoms
self.box_size = box_size
self.positions = # Task 1
self.velocities = # Task 1
self.fig, self.ax = plt.subplots()
def run(self, n_steps):
for step in range(n_steps):
self.positions = self.positions # Task 2
self.keep_in_box()
self.show(step)
def keep_in_box(self):
self.positions[...] += 0 # Task 3
self.positions[...] -= 0 # Task 3
def show(self, step):
self.ax.scatter(self.positions[:, 0], self.positions[:, 1])
self.ax.set_ylim((0, self.box_size))
self.ax.set_xlim((0, self.box_size))
self.ax.text(0.4, 0.9, 'step ' + str(step),
bbox={'facecolor':'white', 'alpha':0.5, 'pad':7})
self.ax.set_aspect('equal', adjustable='box')
clear_output(True)
display(self.fig)
self.ax.cla()
sim = Simulation(n_atoms=100, box_size=1)
sim.run(n_steps=500)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Prepare the data
Step2: 2. Normalize the training set
Step3: 3. Prepare data for LSTM
Step4: 4. Define the model
Step5: 5. Define loss and optimization functions
Step6: 6. Train the model
Step7: 9. Evaluate the model using the test set
Step8: 10. Inverse transform the predicted values
Step9: BONUS EXERCISE
|
<ASSISTANT_TASK:>
Python Code:
# RUN THIS CELL
import torch
import torch.nn as nn
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
df = pd.read_csv('../Data/TimeSeriesData/Energy_Production.csv',index_col=0,parse_dates=True)
df.dropna(inplace=True)
print(len(df))
df.head()
# RUN THIS CELL
plt.figure(figsize=(12,4))
plt.title('Industrial Production Index for Electricity and Gas Utilities')
plt.ylabel('Index 2012=100, Not Seasonally Adjusted')
plt.grid(True)
plt.autoscale(axis='x',tight=True)
plt.plot(df['IPG2211A2N'])
plt.show()
# CODE HERE
y = df['IPG2211A2N'].values.astype(float)
test_size =
window_size =
train_set =
test_set =
# Run the code below to check your results:
print(f'Train: {len(train_set)}')
print(f'Test: {len(test_set)}')
# DON'T WRITE HERE
y = df['IPG2211A2N'].values.astype(float)
test_size = 12
window_size = 12
train_set = y[:-test_size]
test_set = y[-test_size:]
print(f'Train: {len(train_set)}')
print(f'Test: {len(test_set)}')
# CODE HERE
scaler = MinMaxScaler(feature_range=(-1, 1))
train_norm =
# Run the code below to check your results:
print(f'First item, original: {train_set[0]}')
print(f'First item, scaled: {train_norm[0]}')
# DON'T WRITE HERE
scaler = MinMaxScaler(feature_range=(-1, 1))
train_norm = scaler.fit_transform(train_set.reshape(-1, 1))
print(f'First item, original: {train_set[0]}')
print(f'First item, scaled: {train_norm[0]}')
# RUN THIS CELL
train_norm = torch.FloatTensor(train_norm).view(-1)
def input_data(seq,ws):
out = []
L = len(seq)
for i in range(L-ws):
window = seq[i:i+ws]
label = seq[i+ws:i+ws+1]
out.append((window,label))
return out
# CODE HERE
train_data =
# Run the code below to check your results:
print(f'Train_data: {len(train_data)}') # should equal 301
# DON'T WRITE HERE
train_data = input_data(train_norm,window_size)
print(f'Train_data: {len(train_data)}')
# CODE HERE
class LSTMnetwork(nn.Module):
# Run the code below to check your results:
torch.manual_seed(101)
model = LSTMnetwork()
model
# DON'T WRITE HERE
class LSTMnetwork(nn.Module):
def __init__(self,input_size=1,hidden_size=64,output_size=1):
super().__init__()
self.hidden_size = hidden_size
# Add an LSTM layer:
self.lstm = nn.LSTM(input_size,hidden_size)
# Add a fully-connected layer:
self.linear = nn.Linear(hidden_size,output_size)
# Initialize h0 and c0:
self.hidden = (torch.zeros(1,1,self.hidden_size),
torch.zeros(1,1,self.hidden_size))
def forward(self,seq):
lstm_out, self.hidden = self.lstm(
seq.view(len(seq),1,-1), self.hidden)
pred = self.linear(lstm_out.view(len(seq),-1))
return pred[-1]
torch.manual_seed(101)
model = LSTMnetwork()
model
# CODE HERE
# DON'T WRITE HERE
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# CODE HERE
# DON'T WRITE HERE
epochs = 50
for i in range(epochs):
for seq, y_train in train_data:
# reset the parameters and hidden states
optimizer.zero_grad()
model.hidden = (torch.zeros(1,1,model.hidden_size),
torch.zeros(1,1,model.hidden_size))
# apply the model
y_pred = model(seq)
# update parameters
loss = criterion(y_pred, y_train)
loss.backward()
optimizer.step()
# OPTIONAL print statement
print(f'{i+1} of {epochs} epochs completed')
# CODE HERE
future =
preds =
model.eval()
for i in range(future):
# Run the code below to check your results:
preds[window_size:]
# DON'T WRITE HERE
future = 12
preds = train_norm[-window_size:].tolist()
model.eval()
for i in range(future):
seq = torch.FloatTensor(preds[-window_size:])
with torch.no_grad():
model.hidden = (torch.zeros(1,1,model.hidden_size),
torch.zeros(1,1,model.hidden_size))
preds.append(model(seq).item())
preds[window_size:]
# CODE HERE
true_predictions =
# Run the code below to check your results:
true_predictions
# DON'T WRITE HERE
true_predictions = scaler.inverse_transform(np.array(preds[window_size:]).reshape(-1, 1))
true_predictions
# CODE HERE
# CODE HERE TO DISPLAY THE END OF THE GRAPH
# DON'T WRITE HERE
x = np.arange('2018-02-01', '2019-02-01', dtype='datetime64[M]').astype('datetime64[D]')
plt.figure(figsize=(12,4))
plt.title('Industrial Production Index for Electricity and Gas Utilities')
plt.ylabel('Index 2012=100, Not Seasonally Adjusted')
plt.grid(True)
plt.autoscale(axis='x',tight=True)
plt.plot(df['IPG2211A2N'])
plt.plot(x,true_predictions)
plt.show()
# DON'T WRITE HERE
fig = plt.figure(figsize=(12,4))
plt.title('Industrial Production Index for Electricity and Gas Utilities')
plt.ylabel('Index 2012=100, Not Seasonally Adjusted')
plt.grid(True)
plt.autoscale(axis='x',tight=True)
fig.autofmt_xdate()
plt.plot(df['IPG2211A2N']['2017-01-01':])
plt.plot(x,true_predictions)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Big caveat here
Step2: grouping on the main layer
Step3: Look at a few of the common main layer groups
Step4: Charges
Step5: Stereo grouping
Step6: an aside
Step7: Sucks to be you if it's important to you that those molecules be different and you're using InChI.
Step8: No need here, this set has no labelled compounds. That's likely a property of how the ZINC clean set was constructed.
Step9: Not much interesting there. There's no simple query to find questionable tautomer motion.
|
<ASSISTANT_TASK:>
Python Code:
%sql postgresql://localhost/inchi_split \
select count(*) from zinc_clean_nonstandard;
d = %sql \
select formula,count(zinc_id) freq from zinc_clean_nonstandard group by formula \
order by freq desc limit 10;
d
d = %sql \
select formula,skeleton,hydrogens,count(zinc_id) freq from zinc_clean_nonstandard group by \
(formula,skeleton,hydrogens) \
order by freq desc limit 10;
d[:5]
tpl=d[0][:-1]
print(tpl)
rows = %sql \
select zinc_id,smiles from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
(formula,skeleton,hydrogens) = :tpl
cids = [x for x,y in rows][:9]
ms = [Chem.MolFromSmiles(y) for x,y in rows][:9]
Draw.MolsToGridImage(ms,legends=cids)
tpl=d[1][:-1]
print(tpl)
rows = %sql \
select zinc_id,smiles from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
(formula,skeleton,hydrogens) = :tpl
cids = [x for x,y in rows][:9]
ms = [Chem.MolFromSmiles(y) for x,y in rows][:9]
Draw.MolsToGridImage(ms,legends=cids)
tpl=d[4][:-1]
print(tpl)
rows = %sql \
select zinc_id,smiles from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
(formula,skeleton,hydrogens) = :tpl
cids = [x for x,y in rows][:9]
ms = [Chem.MolFromSmiles(y) for x,y in rows][:9]
Draw.MolsToGridImage(ms,legends=cids)
d = %sql \
select formula,skeleton,hydrogens,charge,protonation,count(zinc_id) freq from zinc_clean_nonstandard group by \
(formula,skeleton,hydrogens,charge,protonation) \
order by freq desc limit 10;
d[:5]
tpl=d[0][:-1]
tpl = tuple(x if x is not None else '' for x in tpl)
print(tpl)
rows = %sql \
select zinc_id,smiles from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
(formula,skeleton,hydrogens,coalesce(charge,''),coalesce(protonation,'')) = :tpl
cids = [x for x,y in rows][:9]
ms = [Chem.MolFromSmiles(y) for x,y in rows][:9]
Draw.MolsToGridImage(ms,legends=cids)
d = %sql \
select formula,skeleton,hydrogens,charge,protonation,stereo_bond,stereo_tet,stereo_m,stereo_s,count(zinc_id) freq \
from zinc_clean_nonstandard where stereo_bond is not null or stereo_tet is not null \
group by \
(formula,skeleton,hydrogens,charge,protonation,stereo_bond,stereo_tet,stereo_m,stereo_s) \
order by freq desc limit 10;
d[:5]
tpl=d[0][:-1]
tpl = tuple(x if x is not None else '' for x in tpl)
print(tpl)
rows = %sql \
select zinc_id,smiles from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
(formula,skeleton,hydrogens,\
coalesce(charge,''),coalesce(protonation,''),coalesce(stereo_bond,''),\
coalesce(stereo_tet,''),coalesce(stereo_m,''),coalesce(stereo_s,'')) = :tpl
cids = [x for x,y in rows]
ms = [Chem.MolFromSmiles(y) for x,y in rows]
Draw.MolsToGridImage(ms,legends=cids)
tpl=d[1][:-1]
tpl = tuple(x if x is not None else '' for x in tpl)
print(tpl)
rows = %sql \
select zinc_id,smiles from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
(formula,skeleton,hydrogens,\
coalesce(charge,''),coalesce(protonation,''),coalesce(stereo_bond,''),\
coalesce(stereo_tet,''),coalesce(stereo_m,''),coalesce(stereo_s,'')) = :tpl
cids = [x for x,y in rows]
ms = [Chem.MolFromSmiles(y) for x,y in rows]
Draw.MolsToGridImage(ms,legends=cids)
td = %sql \
select t2.zinc_id,t2.nonstandard_inchi,t2.smiles from zinc_clean_nonstandard t1 join zinc_clean t2 using (zinc_id) \
where (formula,skeleton,hydrogens,charge)=\
('/C29H33N2','/c1-28(2)22-16-12-14-18-24(22)30(5)26(28)20-10-8-7-9-11-21-27-29(3,4)23-17-13-15-19-25(23)31(27)6',\
'/h7-21H,1-6H3','/q+1')
print(td)
cids = [x for x,y,z in td]
ms = [Chem.MolFromSmiles(z) for x,y,z in td]
Draw.MolsToGridImage(ms,legends=cids)
%sql \
select count(*) \
from zinc_clean_nonstandard where isotope is not null
rows = %sql \
select zinc_id,smiles,nonstandard_inchi from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
fixedh_stereo_tet is not null and position('?' in fixedh_stereo_tet)<=0 and stereo_tet!=fixedh_stereo_tet
len(rows)
cids = [x for x,y,z in rows][:10]
ms = [Chem.MolFromSmiles(y) for x,y,z in rows][:10]
Draw.MolsToGridImage(ms,legends=cids)
rows = %sql \
select zinc_id,smiles,nonstandard_inchi from zinc_clean join zinc_clean_nonstandard using (zinc_id) where \
fixedh_stereo_bond is not null and fixedh_stereo_bond!='/b' and position('?' in fixedh_stereo_bond)<=0 and stereo_bond!=fixedh_stereo_bond
len(rows)
cids = [x for x,y,z in rows][:10]
ms = [Chem.MolFromSmiles(y) for x,y,z in rows][:10]
Draw.MolsToGridImage(ms,legends=cids)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First reload the data we generated in notmist.ipynb.
Step2: Reformat into a shape that's more adapted to the models we're going to train
Step3: Problem 1
Step4: Run it
Step5: Introducing a suitable amount of regularization improves validation set and test set accuracy.
Step6: Now run it
Step7: 93.1%, the best results we've had so far.
Step8: Now re-run the 2-layer NN, set regularization to 0, and see what happens.
Step9: The model quickly overfits the training set (actually 'remembers' the entire training set; shows how powerful nn can be). The model stopped improving after that and performs poorly on validation and test sets.
Step10: Problem 3
Step11: Improved somewhat, but not an aweful lot. Why?
Step12: Train with full dataset
Step13: 91.8%
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
import math
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
batch_size = 128
beta = 0.001
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + beta * tf.nn.l2_loss(weights)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
relu_units = 1024
beta = 0.001
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables. Redefine the variables reflecting NN structure.
# First layer
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, relu_units]))
b1 = tf.Variable(tf.zeros([relu_units]))
# Second layer
w2 = tf.Variable(
tf.truncated_normal([relu_units, num_labels]))
b2 = tf.Variable(
tf.zeros([num_labels]))
# Training computation.
logits1 = tf.matmul(tf_train_dataset, w1) + b1
relu1 = tf.nn.relu(logits1)
logits2 = tf.matmul(relu1, w2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + (
beta * tf.nn.l2_loss(w1)) + (
beta * tf.nn.l2_loss(w2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits2)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
#Restrict the training data to a few batches
batch_size = 128
num_batch = 5
train_dataset_short = train_dataset[0:batch_size * num_batch + 1,]
train_labels_short = train_labels[0:batch_size * num_batch + 1,]
batch_size = 128
relu_units = 1024
beta = 0
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables. Redefine the variables reflecting NN structure.
# First layer
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, relu_units]))
b1 = tf.Variable(tf.zeros([relu_units]))
# Second layer
w2 = tf.Variable(
tf.truncated_normal([relu_units, num_labels]))
b2 = tf.Variable(
tf.zeros([num_labels]))
# Training computation.
logits1 = tf.matmul(tf_train_dataset, w1) + b1
relu1 = tf.nn.relu(logits1)
logits2 = tf.matmul(relu1, w2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + (
beta * tf.nn.l2_loss(w1)) + (
beta * tf.nn.l2_loss(w2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits2)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels_short.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset_short[offset:(offset + batch_size), :]
batch_labels = train_labels_short[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
relu_units = 1024
beta = 0.001
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables. Redefine the variables reflecting NN structure.
# First layer
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, relu_units]))
b1 = tf.Variable(tf.zeros([relu_units]))
# Second layer
w2 = tf.Variable(
tf.truncated_normal([relu_units, num_labels]))
b2 = tf.Variable(
tf.zeros([num_labels]))
# Training computation.
logits1 = tf.matmul(tf_train_dataset, w1) + b1
relu1 = tf.nn.relu(logits1)
logits2 = tf.matmul(relu1, w2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + (
beta * tf.nn.l2_loss(w1)) + (
beta * tf.nn.l2_loss(w2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits2)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels_short.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset_short[offset:(offset + batch_size), :]
batch_labels = train_labels_short[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
#Introduce dropout to the nn
batch_size = 128
relu_units = 1024
beta = 0.001
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables. Redefine the variables reflecting NN structure.
# First layer
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, relu_units]))
b1 = tf.Variable(tf.zeros([relu_units]))
# Second layer
w2 = tf.Variable(
tf.truncated_normal([relu_units, num_labels]))
b2 = tf.Variable(
tf.zeros([num_labels]))
# Training computation.
logits1 = tf.matmul(tf_train_dataset, w1) + b1
relu1 = tf.nn.dropout(tf.nn.relu(logits1), keep_prob = 0.5)
logits2 = tf.matmul(relu1, w2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + (
beta * tf.nn.l2_loss(w1)) + (
beta * tf.nn.l2_loss(w2))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits2)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2)
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels_short.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset_short[offset:(offset + batch_size), :]
batch_labels = train_labels_short[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
#Introduce dropout to the nn
batch_size = 128
relu_units = 1024
beta = 0.001
graph1 = tf.Graph()
with graph1.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables. Redefine the variables reflecting NN structure.
# First layer
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, relu_units]))
b1 = tf.Variable(tf.zeros([relu_units]))
# Second layer
w2 = tf.Variable(
tf.truncated_normal([relu_units, num_labels]))
b2 = tf.Variable(
tf.zeros([num_labels]))
# Training computation.
logits1 = tf.matmul(tf_train_dataset, w1) + b1
relu1 = tf.nn.dropout(tf.nn.relu(logits1), keep_prob = 0.5)
logits2 = tf.matmul(relu1, w2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + (
beta * tf.nn.l2_loss(w1)) + (
beta * tf.nn.l2_loss(w2))
# Optimizer. Use learning rate decay.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits2)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2)
num_steps = 3001
with tf.Session(graph=graph1) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
batch_size = 128
relu_units1 = 1024
relu_units2 = 512
#beta = 0
graph2 = tf.Graph()
with graph2.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables. Redefine the variables reflecting NN structure.
# First layer
w1 = tf.Variable(
tf.truncated_normal([image_size * image_size, relu_units1], stddev=0.01))
b1 = tf.Variable(tf.zeros([relu_units1]))
# Second layer
w2 = tf.Variable(
tf.truncated_normal([relu_units1, relu_units2], stddev=0.01))
b2 = tf.Variable(
tf.zeros([relu_units2]))
# Third layer
w3 = tf.Variable(
tf.truncated_normal([relu_units2, num_labels], stddev=0.01))
b3 = tf.Variable(
tf.zeros([num_labels]))
# Training computation.
logits1 = tf.matmul(tf_train_dataset, w1) + b1
relu1 = tf.nn.dropout(tf.nn.relu(logits1), keep_prob = 0.5)
logits2 = tf.matmul(relu1, w2) + b2
relu2 = tf.nn.dropout(tf.nn.relu(logits2), keep_prob = 0.5)
logits3 = tf.matmul(relu2, w3) + b3
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits3, tf_train_labels))
# Optimizer. Use learning rate decay.
global_step = tf.Variable(0) # count the number of steps taken.
learning_rate = tf.train.exponential_decay(learning_rate=0.3, global_step=global_step,
decay_steps=1000, decay_rate=0.98)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
# optimizer = tf.train.GradientDescentOptimizer(0.3).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_train_dataset, w1) + b1), w2) + b2), w3)
+ b3)
valid_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2), w3)
+ b3)
test_prediction = tf.nn.softmax(
tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2), w3)
+ b3)
num_steps = 50001
with tf.Session(graph=graph2) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction],
feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Plotten Sie mithilfe von matrix_plot die Systemmatrizen für $\sigma = 0$ und $n=10$.
Step2: Aufgabe
Step3: Frage
Step4: Frage
Step5: Frage
Step6: Nur die hohen Frequenzen
Step7: Frage
Step8: Aufgabe
Step9: Zweigitter-Iterationsmatrix
Step10: Im Folgenden werden wir nun mithilfe des pymg frameworks die Zweigitter-Iterationsmatrix für ein einfaches Multigrid
Step11: Buntebilderaufgabe
Step12: Aufgabe
Step13: Buntebilderaufgabe
Step14: Aufgabe
Step15: Aufgabe
Step16: Asymptotische Äquivalenz zwischen periodisch und nicht-periodisch
Step17: Aufgabe
Step18: Glättung
Step19: Gauss-Seidel
Step20: Grobgitterkorrektur
Step21: Zweigitter
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.path.append("/home/moser/MG_2016/pyMG/")
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import pymg
from project.helmholtz1d import Helmholtz1D
from project.helmholtz1d_periodic import Helmholtz1D_Periodic
from project.gauss_seidel import GaussSeidel
from project.weighted_jacobi import WeightedJacobi
from project.pfasst.plot_tools import eigvalue_plot_list, matrix_plot, matrix_row_plot
from project.pfasst.transfer_tools import to_dense
from project.pfasst.matrix_method_tools import matrix_power
def system_matrix_hh1d(n,sig):
hh1d = Helmholtz1D(n, sig)
return hh1d.A
def system_matrix_hh1d_periodic(n,sig):
hh1d = Helmholtz1D_Periodic(n, sig)
return hh1d.A
def spec_rad(A):
return np.max(np.abs(sp.linalg.eigvals(to_dense(A))))
matrix_plot(to_dense(system_matrix_hh1d(10,0)))
matrix_plot(to_dense(system_matrix_hh1d_periodic(10,0)))
def plot_3_eigvalueplots(A_p,A_z,A_m):
eig_p.append(sp.linalg.eigvals(to_dense(A_p)))
eig_z.append(sp.linalg.eigvals(to_dense(A_z)))
eig_m.append(sp.linalg.eigvals(to_dense(A_m)))
real_part_p = np.real(eig_p[-1])
img_part_p = np.imag(eig_p[-1])
real_part_z = np.real(eig_z[-1])
img_part_z = np.imag(eig_z[-1])
real_part_m = np.real(eig_m[-1])
img_part_m = np.imag(eig_m[-1])
fig1, (ax1, ax2, ax3) = plt.subplots(ncols=3,figsize=(15,3))
ax1.plot(real_part_p,img_part_p,'ro')
ax1.set_xlabel("real part")
ax1.set_ylabel("img part")
ax1.set_title('eigenvalues')
ax2.plot(real_part_z,img_part_z,'bo')
ax2.set_xlabel("real part")
ax2.set_ylabel("img part")
ax2.set_title('eigenvalues')
ax3.plot(real_part_m,img_part_m,'go')
ax3.set_xlabel("real part")
ax3.set_ylabel("img part")
ax3.set_title('eigenvalues')
fig1.tight_layout()
plt.show()
def plot_2_eigvalueplots(A_p,A_z):
eig_p.append(sp.linalg.eigvals(to_dense(A_p)))
eig_z.append(sp.linalg.eigvals(to_dense(A_z)))
real_part_p = np.real(eig_p[-1])
img_part_p = np.imag(eig_p[-1])
real_part_z = np.real(eig_z[-1])
img_part_z = np.imag(eig_z[-1])
fig1, (ax1, ax2) = plt.subplots(ncols=2,figsize=(15,3))
ax1.plot(real_part_p,img_part_p,'ro')
ax1.set_xlabel("real part")
ax1.set_ylabel("img part")
ax1.set_title('eigenvalues')
ax2.plot(real_part_z,img_part_z,'bo')
ax2.set_xlabel("real part")
ax2.set_ylabel("img part")
ax2.set_title('eigenvalues')
fig1.tight_layout()
plt.show()
eig_p=[]
eig_m=[]
eig_z=[]
for n in [5,10,20]:
A_p = system_matrix_hh1d(n,100.0)
A_z = system_matrix_hh1d(n,0.0)
A_m = system_matrix_hh1d(n,-100.0)
plot_3_eigvalueplots(A_p, A_z, A_m)
def iteration_matrix_wjac(n, sigma, periodic=True):
if periodic:
A = system_matrix_hh1d_periodic(n,sigma)
else:
A = system_matrix_hh1d(n,sigma)
wjac = WeightedJacobi(A, 2.0/3.0)
P_inv = wjac.Pinv
return np.eye(n) - P_inv.dot(A)
matrix_plot(iteration_matrix_wjac(10,0))
n=10
for sigma in [100,0,-100]:
plot_2_eigvalueplots(iteration_matrix_wjac(n, sigma,periodic=True),iteration_matrix_wjac(n, sigma,periodic=False))
sigma_range = np.linspace(-100,100,100)
sr_wjac_periodic = map(lambda sig : spec_rad(iteration_matrix_wjac(n, sig,periodic=True)), sigma_range)
sr_wjac = map(lambda sig : spec_rad(iteration_matrix_wjac(n, sig,periodic=False)), sigma_range)
# Achsen festhalten
fig1, (ax1, ax2) = plt.subplots(ncols=2,figsize=(15,4))
ax1.plot(sigma_range, sr_wjac_periodic,'k-')
ax1.set_xlabel('$\sigma$')
ax1.set_ylabel("spectral radius")
ax1.set_title('periodic')
ax2.plot(sigma_range, sr_wjac,'k-')
ax2.set_xlabel('$\sigma$')
ax2.set_ylabel("spectral radius")
ax2.set_title('non-periodic')
fig1.tight_layout()
plt.show()
def iteration_matrix_gs(n, sigma, periodic=True):
if periodic:
A = system_matrix_hh1d_periodic(n,sigma)
else:
A = system_matrix_hh1d(n,sigma)
gs = GaussSeidel(A)
P_inv = gs.Pinv
return np.eye(n) - P_inv.dot(A)
matrix_plot(iteration_matrix_gs(10,0,True))
n=10
for sigma in [100,0,-100]:
plot_2_eigvalueplots(iteration_matrix_gs(n, sigma,periodic=True),iteration_matrix_gs(n, sigma,periodic=False))
sr_gs_periodic = map(lambda sig : spec_rad(iteration_matrix_gs(n, sig,periodic=True)), sigma_range)
sr_gs = map(lambda sig : spec_rad(iteration_matrix_gs(n, sig,periodic=False)), sigma_range)
# Achsen festhalten
fig1, (ax1, ax2) = plt.subplots(ncols=2,figsize=(15,4))
ax1.plot(sigma_range, sr_gs_periodic,'k-')
ax1.set_xlabel('$\sigma$')
ax1.set_ylabel("spectral radius")
ax1.set_title('periodic')
ax2.plot(sigma_range, sr_gs,'k-')
ax2.set_xlabel('$\sigma$')
ax2.set_ylabel("spectral radius")
ax2.set_title('non-periodic')
fig1.tight_layout()
plt.show()
plt.semilogy(sigma_range, np.asarray(sr_gs_periodic)-np.asarray(sr_gs),'k-')
def transformation_matrix_fourier_basis(N):
psi = np.zeros((N,N),dtype=np.complex128)
for i in range(N):
for j in range(N):
psi[i,j] = np.exp(2*np.pi*1.0j*j*i/N)
return psi/np.sqrt(N)
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
wjac_trafo = np.dot(PSI_trafo_inv, np.dot(iteration_matrix_wjac(n,0),PSI_trafo))
gs_trafo = np.dot(PSI_trafo_inv, np.dot(iteration_matrix_gs(n,0),PSI_trafo))
matrix_plot(np.real(wjac_trafo))
matrix_plot(np.real(gs_trafo))
def plot_fourier_transformed(A):
A = to_dense(A)
n = A.shape[0]
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
A_traf = np.dot(PSI_trafo_inv, np.dot(A,PSI_trafo))
matrix_row_plot([A,np.abs(A_traf)])
def get_high_theta_eigvals(A, plot=False):
A = to_dense(A)
n = A.shape[0]
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
A_traf = np.dot(PSI_trafo_inv, np.dot(A,PSI_trafo))
if plot:
matrix_plot(np.abs(A_traf))
eigvals = np.asarray(map(lambda k : A_traf[k,k],range(n)))
return eigvals[np.floor(n/4):np.ceil(3.0*n/4)]
def get_low_theta_eigvals(A, plot=False):
A = to_dense(A)
n = A.shape[0]
PSI_trafo = transformation_matrix_fourier_basis(n)
PSI_trafo_inv = sp.linalg.inv(PSI_trafo)
A_traf = np.dot(PSI_trafo_inv, np.dot(A,PSI_trafo))
if plot:
matrix_plot(np.abs(A_traf))
eigvals = np.asarray(map(lambda k : A_traf[k,k],range(n)))
return np.hstack([eigvals[:np.floor(n/4)],eigvals[np.ceil(3.0*n/4):]])
high_eigvals_periodic = np.abs(get_high_theta_eigvals(iteration_matrix_gs(20,0,True),True))
high_eigvals_dirichlet = np.abs(get_high_theta_eigvals(iteration_matrix_gs(20,0,False),True))
print high_eigvals_periodic
high_eigvals_dirichlet - high_eigvals_periodic
do_plot = False
for n in [5,10,50]:
for sig in [-100,0,100]:
h_eigs_periodic = np.abs(get_high_theta_eigvals(iteration_matrix_gs(n,sig,True),do_plot))
h_eigs_dirichlet = np.abs(get_high_theta_eigvals(iteration_matrix_gs(n,sig,False),do_plot))
print "Sigma :\t\t",sig
print "n:\t\t",n
# print "\n",h_eigs_periodic-h_eigs_dirichlet,"\n"
print "sum:\t\t", np.sum(np.abs(h_eigs_periodic-h_eigs_dirichlet))
print "max:\t\t", np.max(np.abs(h_eigs_periodic-h_eigs_dirichlet)),"\n"
from project.linear_transfer import LinearTransfer
from project.linear_transfer_periodic import LinearTransferPeriodic
def coarse_grid_correction(n,nc, sigma):
A_fine = to_dense(system_matrix_hh1d(n,sigma))
A_coarse = to_dense(system_matrix_hh1d(nc,sigma))
A_coarse_inv = sp.linalg.inv(A_coarse)
lin_trans = LinearTransfer(n, nc)
prolong = to_dense(lin_trans.I_2htoh)
restrict = to_dense(lin_trans.I_hto2h)
return np.eye(n)- np.dot(prolong.dot(A_coarse_inv.dot(restrict)), A_fine)
plot_fourier_transformed(coarse_grid_correction(31,15,-1000))
plot_fourier_transformed(coarse_grid_correction(31,15,0))
plot_fourier_transformed(coarse_grid_correction(31,15,1000))
plot_3_eigvalueplots(coarse_grid_correction(31,15,-1000),coarse_grid_correction(31,15,0),coarse_grid_correction(31,15,100))
def coarse_grid_correction_periodic(n,nc, sigma):
A_fine = to_dense(system_matrix_hh1d_periodic(n,sigma))
A_coarse = to_dense(system_matrix_hh1d_periodic(nc,sigma))
A_coarse_inv = sp.linalg.inv(A_coarse)
lin_trans = LinearTransferPeriodic(n, nc)
prolong = to_dense(lin_trans.I_2htoh)
restrict = to_dense(lin_trans.I_hto2h)
return np.eye(n)- np.dot(prolong.dot(A_coarse_inv.dot(restrict)), A_fine)
matrix_plot(coarse_grid_correction_periodic(31,15,-1000))
matrix_plot(coarse_grid_correction_periodic(31,15,-0.00))
matrix_plot(coarse_grid_correction_periodic(31,15,1000))
plot_fourier_transformed(coarse_grid_correction_periodic(32,16,-1000))
plot_fourier_transformed(coarse_grid_correction_periodic(32,16,-0.00))
plot_fourier_transformed(coarse_grid_correction_periodic(32,16,1000))
prolong = to_dense(LinearTransferPeriodic(16, 8).I_2htoh)
restrict = to_dense(LinearTransferPeriodic(16, 8).I_hto2h)
matrix_plot(to_dense(LinearTransferPeriodic(16, 8).I_2htoh))
matrix_plot(to_dense(LinearTransferPeriodic(16, 8).I_hto2h))
matrix_plot(prolong.dot(restrict))
def mat_of_interest(n):
prolong = to_dense(LinearTransferPeriodic(n, n/2).I_2htoh)
restrict = to_dense(LinearTransferPeriodic(n, n/2).I_hto2h)
return prolong.dot(restrict)
prolong = to_dense(LinearTransferPeriodic(15, 7).I_2htoh)
restrict = to_dense(LinearTransferPeriodic(15, 7).I_hto2h)
matrix_plot(to_dense(LinearTransferPeriodic(15, 7).I_2htoh))
matrix_plot(to_dense(LinearTransferPeriodic(15, 7).I_hto2h))
matrix_plot(prolong.dot(restrict))
def two_grid_it_matrix(n,nc, sigma, nu1=3,nu2=3,typ='wjac'):
cg = coarse_grid_correction(n,nc,sigma)
if typ is 'wjac':
smoother = iteration_matrix_wjac(n,sigma, periodic=False)
if typ is 'gs':
smoother = iteration_matrix_gs(n,sigma, periodic=False)
pre_sm = matrix_power(smoother, nu1)
post_sm = matrix_power(smoother, nu2)
return pre_sm.dot(cg.dot(post_sm))
plot_fourier_transformed(two_grid_it_matrix(15,7,-100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix(15,7,0,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix(15,7,100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix(15,7,-100,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix(15,7,0,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix(15,7,100,typ='gs'))
eig_p=[]
eig_m=[]
eig_z=[]
for n,nc in zip([7,15,31],[3,7,15]):
A_p = two_grid_it_matrix(n,nc,100.0)
A_z = two_grid_it_matrix(n,nc,0.0)
A_m = two_grid_it_matrix(n,nc,-100.0)
plot_3_eigvalueplots(A_p, A_z, A_m)
sr_2grid_var_sigma = map(lambda sig : spec_rad(two_grid_it_matrix(15,7,sig)), sigma_range)
plt.semilogy(sigma_range, sr_2grid_var_sigma,'k-')
plt.title('$n_f = 15, n_c = 7$')
plt.xlabel('$\sigma$')
plt.ylabel("spectral radius")
nf_range = map(lambda k: 2**k-1,range(3,10))
nc_range = map(lambda k: 2**k-1,range(2,9))
sr_2grid_m1000 = map(lambda nf,nc : spec_rad(two_grid_it_matrix(nf,nc,-1000)), nf_range, nc_range)
sr_2grid_0 = map(lambda nf,nc : spec_rad(two_grid_it_matrix(nf,nc,0)), nf_range, nc_range)
sr_2grid_p1000 = map(lambda nf,nc : spec_rad(two_grid_it_matrix(nf,nc,1000)), nf_range, nc_range)
plt.semilogy(nf_range, sr_2grid_m1000,'k-',nf_range, sr_2grid_0,'k--',nf_range, sr_2grid_p1000,'k:')
plt.xlabel('$n_f$')
plt.ylabel("spectral radius")
plt.legend(("$\sigma = -1000$","$\sigma = 0$","$\sigma = 1000$"),'upper right',shadow = True)
def two_grid_it_matrix_periodic(n,nc, sigma, nu1=3,nu2=3,typ='wjac'):
cg = coarse_grid_correction_periodic(n,nc,sigma)
if typ is 'wjac':
smoother = iteration_matrix_wjac(n,sigma, periodic=True)
if typ is 'gs':
smoother = iteration_matrix_gs(n,sigma, periodic=True)
pre_sm = matrix_power(smoother, nu1)
post_sm = matrix_power(smoother, nu2)
return pre_sm.dot(cg.dot(post_sm))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,-100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,0.01,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,100,typ='wjac'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,-100,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,-0.01,typ='gs'))
plot_fourier_transformed(two_grid_it_matrix_periodic(16,8,100,typ='gs'))
sr_2grid_var_sigma_periodic = map(lambda sig : spec_rad(two_grid_it_matrix_periodic(16,8,sig)), sigma_range)
plt.plot(sigma_range, np.asarray(sr_2grid_var_sigma)-np.asarray(sr_2grid_var_sigma_periodic),'k-')
plt.title('Differenz periodisch und nicht periodisch')
plt.xlabel('$\sigma$')
plt.ylabel("spectral radius")
nf_range = map(lambda k: 2**k,range(3,10))
nc_range = map(lambda k: 2**k,range(2,9))
sr_2grid_m1000_p = map(lambda nf,nc : spec_rad(two_grid_it_matrix_periodic(nf,nc,-1000)), nf_range, nc_range)
sr_2grid_0_p = map(lambda nf,nc : spec_rad(two_grid_it_matrix_periodic(nf,nc,0.01)), nf_range, nc_range)
sr_2grid_p1000_p = map(lambda nf,nc : spec_rad(two_grid_it_matrix_periodic(nf,nc,1000)), nf_range, nc_range)
plt.semilogy(nf_range, sr_2grid_m1000_p,'k-',nf_range, sr_2grid_0_p,'k--',nf_range, sr_2grid_p1000_p,'k:')
plt.xlabel('$n_f$')
plt.ylabel("spectral radius")
plt.legend(("$\sigma = -1000$","$\sigma = 0$","$\sigma = 1000$"),'upper right',shadow = True)
plt.semilogy(nf_range, np.abs(np.asarray(sr_2grid_m1000_p) - np.asarray(sr_2grid_m1000)),'k-',
nf_range, np.abs(np.asarray(sr_2grid_0_p) - np.asarray(sr_2grid_0)),'k--',
nf_range, np.abs(np.asarray(sr_2grid_p1000_p) - np.asarray(sr_2grid_p1000)),'k:')
plt.xlabel('$n_f$')
plt.ylabel("spectral radius")
plt.legend(("$\sigma = -1000$","$\sigma = 0$","$\sigma = 1000$"),'upper right',shadow = True)
def hs_norm(A):
n = A.shape[0]
return sp.linalg.norm(A,'fro')/np.sqrt(n)
n_range = np.arange(10,100)
hs_sysmat_m1000 = map(lambda n: hs_norm(to_dense(system_matrix_hh1d(n,-1000))-to_dense(system_matrix_hh1d_periodic(n,-1000))),n_range)
hs_sysmat_0 = map(lambda n: hs_norm(to_dense(system_matrix_hh1d(n,0.001))-to_dense(system_matrix_hh1d_periodic(n,0.001))),n_range)
hs_sysmat_p1000 = map(lambda n: hs_norm(to_dense(system_matrix_hh1d(n,1000))-to_dense(system_matrix_hh1d_periodic(n,1000))),n_range)
plt.plot(hs_sysmat_m1000)
plt.plot(hs_sysmat_0)
plt.plot(hs_sysmat_p1000)
n_range = 2**np.arange(1,11)
hs_wjac_m1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_wjac(n,-1000))-to_dense(iteration_matrix_wjac(n,-1000,False))),n_range)
hs_wjac_0 = map(lambda n: hs_norm(to_dense(iteration_matrix_wjac(n,0))-to_dense(iteration_matrix_wjac(n,0,False))),n_range)
hs_wjac_p1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_wjac(n,1000))-to_dense(iteration_matrix_wjac(n,1000,False))),n_range)
plt.plot(n_range, hs_wjac_m1000)
plt.plot(n_range, hs_wjac_0)
plt.plot(n_range, hs_wjac_p1000)
n_range = 2**np.arange(1,11)
hs_gs_m1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_gs(n,-1000))-to_dense(iteration_matrix_gs(n,-1000,False))),n_range)
hs_gs_0 = map(lambda n: hs_norm(to_dense(iteration_matrix_gs(n,0))-to_dense(iteration_matrix_gs(n,0,False))),n_range)
hs_gs_p1000 = map(lambda n: hs_norm(to_dense(iteration_matrix_gs(n,1000))-to_dense(iteration_matrix_gs(n,1000,False))),n_range)
plt.plot(n_range, hs_gs_m1000)
plt.plot(n_range, hs_gs_0)
plt.plot(n_range, hs_gs_p1000)
def einmal_einpacken(A):
return np.r_[[np.zeros(A.shape[0]+1)],np.c_[np.zeros(A.shape[0]),A]]
n_f_range = 2**np.arange(3,10)
n_c_range = 2**np.arange(2,9)
hs_cgc_m1000 = map(lambda nf,nc: hs_norm(einmal_einpacken(coarse_grid_correction(nf-1,nc-1,-1000))-coarse_grid_correction_periodic(nf,nc,-1000)),n_f_range ,n_c_range)
hs_cgc_0 = map(lambda nf,nc: hs_norm(einmal_einpacken(coarse_grid_correction(nf-1,nc-1,0))-coarse_grid_correction_periodic(nf,nc,0.001)),n_f_range ,n_c_range)
hs_cgc_p1000 = map(lambda nf,nc: hs_norm(einmal_einpacken(coarse_grid_correction(nf-1,nc-1,1000))-coarse_grid_correction_periodic(nf,nc,1000)),n_f_range ,n_c_range)
plt.semilogy(n_f_range, hs_cgc_m1000)
plt.semilogy(n_f_range, hs_cgc_0)
plt.semilogy(n_f_range, hs_cgc_p1000)
# plt.semilogy(n_f_range, 1/np.sqrt(n_f_range))
n_f_range = 2**np.arange(3,12)
n_c_range = 2**np.arange(2,11)
hs_2grid_m1000 = map(lambda nf,nc: hs_norm(
einmal_einpacken(two_grid_it_matrix(nf-1,nc-1,-1000))-two_grid_it_matrix_periodic(nf,nc,-1000))
,n_f_range ,n_c_range)
hs_2grid_0 = map(lambda nf,nc: hs_norm(
einmal_einpacken(two_grid_it_matrix(nf-1,nc-1,0.001))-two_grid_it_matrix_periodic(nf,nc,0.001))
,n_f_range ,n_c_range)
hs_2grid_p1000 = map(lambda nf,nc: hs_norm(
einmal_einpacken(two_grid_it_matrix(nf-1,nc-1,1000))-two_grid_it_matrix_periodic(nf,nc,1000))
,n_f_range ,n_c_range)
plt.semilogy(n_f_range, hs_2grid_m1000)
plt.semilogy(n_f_range, hs_2grid_0)
plt.semilogy(n_f_range, hs_2grid_p1000)
plt.semilogy(n_f_range, 1/np.sqrt(n_f_range)*30)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Single Deletions
Step2: These can also be done for only a subset of genes
Step3: This can also be done for reactions
Step4: Double Deletions
Step5: By default, the double deletion function will automatically use multiprocessing, splitting the task over up to 4 cores if they are available. The number of cores can be manually sepcified as well. Setting use of a single core will disable use of the multiprocessing library, which often aids debuggging.
Step6: Double deletions can also be run for reactions
|
<ASSISTANT_TASK:>
Python Code:
import pandas
from time import time
import cobra.test
from cobra.flux_analysis import \
single_gene_deletion, single_reaction_deletion, \
double_gene_deletion, double_reaction_deletion
cobra_model = cobra.test.create_test_model("textbook")
ecoli_model = cobra.test.create_test_model("ecoli")
growth_rates, statuses = single_gene_deletion(cobra_model)
gr, st = single_gene_deletion(cobra_model,
cobra_model.genes[:20])
pandas.DataFrame.from_dict({"growth_rates": gr,
"status": st})
gr, st = single_reaction_deletion(cobra_model,
cobra_model.reactions[:20])
pandas.DataFrame.from_dict({"growth_rates": gr,
"status": st})
double_gene_deletion(cobra_model, cobra_model.genes[-5:],
return_frame=True)
start = time() # start timer()
double_gene_deletion(ecoli_model, ecoli_model.genes[:200],
number_of_processes=2)
t1 = time() - start
print("Double gene deletions for 200 genes completed in "
"%.2f sec with 2 cores" % t1)
start = time() # start timer()
double_gene_deletion(ecoli_model, ecoli_model.genes[:200],
number_of_processes=1)
t2 = time() - start
print("Double gene deletions for 200 genes completed in "
"%.2f sec with 1 core" % t2)
print("Speedup of %.2fx" % (t2 / t1))
double_reaction_deletion(cobra_model,
cobra_model.reactions[2:7],
return_frame=True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
from collections import Counter
import random
## Your code here
# f(w_i): frequency of word w_i in the total dataset.
# t: threshold parameter
threshold = 1e-5
words_counts = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in words_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in words_counts}
train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = set(words[start:idx] + words[idx+1:stop+1])
return list(target_words)
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, shape=[None])
labels = tf.placeholder(tf.int32, shape=[None, None])
n_vocab = len(int_to_vocab)
n_embedding = 200 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1)) # create embedding weight matrix here
embed = tf.nn.embedding_lookup(embedding, inputs)# use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1)) # create softmax weight matrix here
softmax_b = tf.Variable(tf.zeros(n_vocab)) # create softmax biases here
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w,
softmax_b,
labels,
embed,
n_sampled,
n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Смотрим, где именно в файле интересующие нас данные
Step2: Считываем нужные нам данные как датафреймы
Step3: Создаем в датафреймах отдельные столбцы с данными в удобных нам форматах.
Step4: Создаем столбцы в датафрейме с "Goal1Complitations", где будем хранить количество сессий и конверсию
Step5: Переносим из таблицы сессий количество сессий и считаем конверсию для каждой страницы, которая есть в "Goal1Complitations"
Step6: Обнулим конверсию для тех страниц по которым не було сессий. В даннос случае это страница "(entrance)"
Step7: Строим график
Step8: Выводим результат
|
<ASSISTANT_TASK:>
Python Code:
path = 'Sessions_Page.json'
path2 = 'Goal1CompletionLocation_Goal1Completions.json'
with open(path, 'r') as f:
sessions_page = json.loads(f.read())
with open(path2, 'r') as f:
goals_page = json.loads(f.read())
type (sessions_page)
sessions_page.keys()
sessions_page['reports'][0].keys()
sessions_page['reports'][0]['data']['rows']
sessions_df = pd.DataFrame(sessions_page['reports'][0]['data']['rows'])
goals_df = pd.DataFrame(goals_page['reports'][0]['data']['rows'])
x=[]
for i in sessions_df.dimensions:
x.append(str(i[0]))
sessions_df.insert(2, 'name', x)
x=[]
for i in goals_df.dimensions:
x.append(str(i[0]))
goals_df.insert(2, 'name', x)
x=[]
for i in sessions_df.metrics:
x.append(float(i[0]['values'][0]))
sessions_df.insert(3, 'sessions', x)
x=[]
for i in goals_df.metrics:
x.append(float(i[0]['values'][0]))
goals_df.insert(3, 'goals', x)
goals_df.insert(4, 'sessions', 0)
goals_df.insert(5, 'convers_rate', 0)
for i in range(7):
goals_df.sessions[i] = sum(sessions_df.sessions[sessions_df.name==goals_df.name[i]])
goals_df.convers_rate = goals_df.goals/goals_df.sessions*100
goals_df.convers_rate[goals_df.sessions==0] = 0
goals_df.ix[range(1,7),[2,5]]
goals_df.ix[range(1,7),[2,5]].plot(kind="bar", legend=False)
plt.xticks([0, 1, 2, 3, 4, 5], goals_df.name, rotation="vertical")
plt.show()
name = goals_df.ix[goals_df.convers_rate==max(goals_df.convers_rate),2]
print 'The best converting page on your site is "',str(name)[5:len(name)-28], '" with conversion rate', max(goals_df.convers_rate),'%'
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Batch Normalization using tf.layers.batch_normalization<a id="example_1"></a>
Step6: We'll use the following function to create convolutional layers in our network. They are very basic
Step8: Run the following cell, along with the earlier cells (to load the dataset and define the necessary functions).
Step10: With this many layers, it's going to take a lot of iterations for this network to learn. By the time you're done training these 800 batches, your final test and validation accuracies probably won't be much better than 10%. (It will be different each time, but will most likely be less than 15%.)
Step12: TODO
Step13: TODO
Step15: With batch normalization, you should now get an accuracy over 90%. Notice also the last line of the output
Step17: TODO
Step18: TODO
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False)
DO NOT MODIFY THIS CELL
def fully_connected(prev_layer, num_units):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=tf.nn.relu)
return layer
DO NOT MODIFY THIS CELL
def conv_layer(prev_layer, layer_depth):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=tf.nn.relu)
return conv_layer
DO NOT MODIFY THIS CELL
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]]})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, isTraining):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
layer = tf.layers.dense(prev_layer, num_units, activation=None)
# We can also set use_bias=False, because batch normalization have bias beta inside
layer = tf.layers.batch_normalization(inputs=layer, training=isTraining)
layer = tf.nn.relu(layer)
return layer
def conv_layer(prev_layer, layer_depth, isTraining):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
# We can also set use_bias=False, because batch normalization have bias beta inside
conv_layer = tf.layers.conv2d(prev_layer, layer_depth*4, 3, strides, 'same', activation=None)
conv_layer = tf.layers.batch_normalization(inputs=conv_layer, training=isTraining)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
isTrainingP = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, isTrainingP)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, isTrainingP)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
# Force control dependencies to update batch normalization population statistics
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, isTrainingP: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
isTrainingP: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, isTrainingP: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
isTrainingP: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
isTrainingP: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
isTrainingP: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
def fully_connected(prev_layer, num_units, is_training):
Create a fully connectd layer with the given layer as input and the given number of neurons.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param num_units: int
The size of the layer. That is, the number of units, nodes, or neurons.
:returns Tensor
A new fully connected layer
# Define variables for batch_normalization
gamma = tf.Variable(tf.ones(num_units))
beta = tf.Variable(tf.zeros(num_units))
epsilon = 0.001
pop_mean = tf.Variable(tf.zeros(num_units), trainable=False)
pop_variance = tf.Variable(tf.ones(num_units), trainable=False)
layer = tf.layers.dense(prev_layer, num_units, activation=None, use_bias=False)
# We need to define 2 functions to use tf.conf on a bool placeholder
def training_batch_normalization():
# Update population statistics
batch_mean, batch_variance = tf.nn.moments(layer, [0])
decay = 0.99
update_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
update_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
# Force the population statistics update
with tf.control_dependencies([update_mean, update_variance]):
return tf.nn.batch_normalization(x=layer, mean=batch_mean, variance=batch_variance,
offset=beta, scale=gamma, variance_epsilon=epsilon)
def test_batch_normalization():
return tf.nn.batch_normalization(x=layer, mean=pop_mean, variance=pop_variance,
offset=beta, scale=gamma, variance_epsilon=epsilon)
layer = tf.cond(is_training, training_batch_normalization, test_batch_normalization)
return tf.nn.relu(layer)
def conv_layer(prev_layer, layer_depth, is_training):
Create a convolutional layer with the given layer as input.
:param prev_layer: Tensor
The Tensor that acts as input into this layer
:param layer_depth: int
We'll set the strides and number of feature maps based on the layer's depth in the network.
This is *not* a good way to make a CNN, but it helps us create this example with very little code.
:returns Tensor
A new convolutional layer
strides = 2 if layer_depth % 3 == 0 else 1
in_channels = prev_layer.get_shape().as_list()[3]
out_channels = layer_depth*4
weights = tf.Variable(
tf.truncated_normal([3, 3, in_channels, out_channels], stddev=0.05))
#bias = tf.Variable(tf.zeros(out_channels))
# Define variables for batch_normalization (normalize each filter map)
gamma = tf.Variable(tf.ones(out_channels))
beta = tf.Variable(tf.zeros(out_channels))
epsilon = 0.001
pop_mean = tf.Variable(tf.zeros(out_channels), trainable=False)
pop_variance = tf.Variable(tf.ones(out_channels), trainable=False)
conv_layer = tf.nn.conv2d(prev_layer, weights, strides=[1,strides, strides, 1], padding='SAME')
# We need to define 2 functions to use tf.conf on a bool placeholder
def training_batch_normalization():
# Update population statistics
# BHWC, we calculate moments for each channel
batch_mean, batch_variance = tf.nn.moments(conv_layer, [0,1,2], keep_dims=False)
decay = 0.99
update_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))
update_variance = tf.assign(pop_variance, pop_variance * decay + batch_variance * (1 - decay))
# Force the population statistics update
with tf.control_dependencies([update_mean, update_variance]):
return tf.nn.batch_normalization(x=conv_layer, mean=batch_mean, variance=batch_variance,
offset=beta, scale=gamma, variance_epsilon=epsilon)
def test_batch_normalization():
# Use population statistics
return tf.nn.batch_normalization(x=conv_layer, mean=pop_mean, variance=pop_variance,
offset=beta, scale=gamma, variance_epsilon=epsilon)
conv_layer = tf.cond(is_training, training_batch_normalization, test_batch_normalization)
conv_layer = tf.nn.relu(conv_layer)
return conv_layer
def train(num_batches, batch_size, learning_rate):
# Build placeholders for the input samples and labels
inputs = tf.placeholder(tf.float32, [None, 28, 28, 1])
labels = tf.placeholder(tf.float32, [None, 10])
is_training_p = tf.placeholder(tf.bool)
# Feed the inputs into a series of 20 convolutional layers
layer = inputs
for layer_i in range(1, 20):
layer = conv_layer(layer, layer_i, is_training_p)
# Flatten the output from the convolutional layers
orig_shape = layer.get_shape().as_list()
layer = tf.reshape(layer, shape=[-1, orig_shape[1] * orig_shape[2] * orig_shape[3]])
# Add one fully connected layer
layer = fully_connected(layer, 100, is_training_p)
# Create the output layer with 1 node for each
logits = tf.layers.dense(layer, 10)
# Define loss and training operations
model_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
train_opt = tf.train.AdamOptimizer(learning_rate).minimize(model_loss)
# Create operations to test accuracy
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train and test the network
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for batch_i in range(num_batches):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
# train this batch
sess.run(train_opt, {inputs: batch_xs, labels: batch_ys, is_training_p: True})
# Periodically check the validation or training loss and accuracy
if batch_i % 100 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training_p: False})
print('Batch: {:>2}: Validation loss: {:>3.5f}, Validation accuracy: {:>3.5f}'.format(batch_i, loss, acc))
elif batch_i % 25 == 0:
loss, acc = sess.run([model_loss, accuracy], {inputs: batch_xs, labels: batch_ys, is_training_p: False})
print('Batch: {:>2}: Training loss: {:>3.5f}, Training accuracy: {:>3.5f}'.format(batch_i, loss, acc))
# At the end, score the final accuracy for both the validation and test sets
acc = sess.run(accuracy, {inputs: mnist.validation.images,
labels: mnist.validation.labels,
is_training_p: False})
print('Final validation accuracy: {:>3.5f}'.format(acc))
acc = sess.run(accuracy, {inputs: mnist.test.images,
labels: mnist.test.labels,
is_training_p: False})
print('Final test accuracy: {:>3.5f}'.format(acc))
# Score the first 100 test images individually. This won't work if batch normalization isn't implemented correctly.
correct = 0
for i in range(100):
correct += sess.run(accuracy,feed_dict={inputs: [mnist.test.images[i]],
labels: [mnist.test.labels[i]],
is_training_p: False})
print("Accuracy on 100 samples:", correct/100)
num_batches = 800
batch_size = 64
learning_rate = 0.002
tf.reset_default_graph()
with tf.Graph().as_default():
train(num_batches, batch_size, learning_rate)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: & for multiple parameters
Step2: 2) What genres are most represented in the search results?
Step3: ANSWER
Step4: 3) Use a for loop to determine who BESIDES Lil Wayne has the highest popularity rating.
Step5: ANSWER
Step6: 5) Pick two of your favorite Lils to fight it out, and use their IDs to print out their top tracks.
Step7: Will the world explode if a musician swears?
Step8: QUESTION
Step9: 7) Since we're talking about Lils, what about Biggies?
Step10: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average?
|
<ASSISTANT_TASK:>
Python Code:
import requests
response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&market=US&limit=50')
data = response.json()
data.keys()
artist_data = data['artists']
artist_data.keys()
lil_names = artist_data['items']
#lil_names = list of dictionaries = list of artist name, popularity, type, genres etc
for names in lil_names:
if not names['genres']:
print(names['name'], names['popularity'], "there are no genres listed")
else:
print(names['name'], names['popularity'], names["genres"])
#Join all the lists of genres in the dictionary and then count the number of elements in it
#ANSWER:
all_genres = []
for artist in lil_names:
print("All genres we've heard of:", all_genres)
#The conditional: None
print("Current artist has:", artist['genres'])
all_genres = all_genres + artist['genres']
#genre_list = ", ".join(artist['genres'])
#print(artist['name'], ":", genre_list)
all_genres.count('dirty soup rap')
all_genres.count('crunk')
#This is bad because dirty south rap shows up four times. We need a unique list of genres
for genre in all_genres:
genre_count = all_genres.count(genre)
print(genre, "shows up", genre_count, "times")
#To remove duplicates. You need to turn a list into a set.
unique_genres = set(list_with_duplicates)
for genre in unique_genres:
genre_count = all_genres.count(genre)
print(genre, "shows up", genre_count, "times")
#There is a library that comes with Python called Collections
#Inside of it is a magic thing called Counter.
import collections # will import the whole collections
#you can also type
from collections import Counter
#all_genres is a list of strings of genrs with duplicates
#counter will count all te genres for us
counts = collections.Counter(all_genres)
counts.most_common(4) #will give you the four most common genres
#HOW TO AUTOMATE GETTING ALL THE RESULTS
response = requests.get("https://api.spotify.com/v1/search?query=lil&type=artist&market=US&limit=10")
small_data = response.json()
small_data['artists']
print(len(small_data['artists']['items'])) #We only get 10 artists
print(data['artists']['total'])
import math
page_count = math.ceil(4502/50)
#math.ceil rounds up
#math.ceil(page_count)
page_count
#First page artists 1-50:
#https://api.spotify.com/v1/search?query=lil&type=artist&market=US&limit=50
#Second page artists 51-100:
#https://api.spotify.com/v1/search?query=lil&type=artist&market=US&limit=50&offset=50
#Third page artists 101-150:
#https://api.spotify.com/v1/search?query=lil&type=artist&market=US&limit=50&offset=100
#Fourth page artists 151-200:
#https://api.spotify.com/v1/search?query=lil&type=artist&market=US&limit=50&offset=150
for page in [0, 1, 2, 3, 4]:
offset = (page) * 50 #because page 2 is 50 and 2-1 = 1 x 50 = 50
print("We are on page", page, "with an offset of", offset)
for page in range(91):
#Get a page
offset = page * 50
print("We are on page", page, "with an offset of", offset)
#Make the request with a changed offset ?offset [offset]
#data = response.json()
#add all our new artists to our list of existing artists
#all_artists = all_artists + data['artists]['items]
print("Successfully retrieved", len(all_artists), "artists")
#TA-Stephan:can't just print the names yourself. The code must do it.
for popularity in lil_names:
print(popularity['name'], popularity['popularity'], popularity['followers'])
print("Lil Yachty, Lil Uzi Vert, Lil Jon have the highest popularity ratings besides Lil Wayne, and they do not have the largest number of followers.")
for kim in lil_names:
print(kim['name'], kim['id'])
response = requests.get("https://api.spotify.com/v1/artists/5tth2a3v0sWwV1C7bApBdX/")
kim_data = response.json()
#print(kim_data)
kim_followers = kim_data['followers']
total_kim_followers = kim_followers['total']
#print(total_kim_followers)
for artists in lil_names:
if artists["followers"]["total"] > total_kim_followers:
print(artists['name'], artists['popularity'])
#ANSWER:
for artist in artists:
#print("Looking at", artist['name'])
if artist['name'] == "Lil' Kim":
print("Found Lil Kim")
print(artist['popularity'])
else:
pass
#print("Not Lil Kim")
lil_kim_popularity = 62
for artist in artists:
if artist['popularity'] > lil_kim_popularity:
print(artist['name'], "is more popular with a score of", artist['popularity'])
more_popular_than_lil_kim.append(artist['name'])
else:
print(artist['name'], "is less popular with a score of", artist['popularity'])
print("#### More popular than Lil Kim ####"):
print(artist_name)
more_popular_string = ", ".join(more_popular_than_lil_kim)
print("Artists mroe popular than Lil Kim are:", more_popular_string)
#Let's pick Lil Wayne and Lil Mama because I don't know who most of these people are
wayne_id = "55Aa2cqylxrFIXC767Z865"
response = requests.get("https://api.spotify.com/v1/artists/" + wayne_id + "/top-tracks?country=US")
wayne_data = response.json()
top_wayne_tracks = wayne_data['tracks']
for track in top_wayne_tracks:
print(track["name"])
mama_id = "5qK5bOC6wLtuLhG5KvU17c"
response = requests.get("https://api.spotify.com/v1/artists/" + mama_id + "/top-tracks?country=US")
mama_data = response.json()
top_mama_tracks = mama_data['tracks']
for track in top_mama_tracks:
print(track["name"])
wayne_explicit_count = 0
wayne_exp_popularity_count = 0
wayne_ok_count = 0
wayne_ok_popularity_count = 0
wayne_explicit_len = 0
wayne_ok_len = 0
for track in top_wayne_tracks:
print(track['name'], track['explicit'], track['popularity'], track["duration_ms"])
if True:
wayne_explicit_count = wayne_explicit_count + 1
wayne_exp_popularity_count = wayne_exp_popularity_count + int(track['popularity'])
wayne_avg_pop = wayne_exp_popularity_count / wayne_explicit_count
wayne_explicit_len = wayne_explicit_len + int(track["duration_ms"])
if not track['explicit']:
wayne_ok_count = wayne_ok_count + 1
wayne_ok_popularity_count = wayne_ok_popularity_count + track['popularity']
wayne_ok_avg_pop = wayne_ok_popularity_count / wayne_ok_count
wayne_ok_len = wayne_ok_len + track["duration_ms"]
if wayne_explicit_count > 0:
print("The average popularity for Lil Wayne's explicit songs is", wayne_avg_pop)
#1 minute is 60000 milliseconds, who knew?
wayne_explicit_mins = int(wayne_explicit_len) / 60000
print("Lil Wayne has", wayne_explicit_mins, "minutes of explicit songs")
if wayne_ok_count > 0:
print("The average popularity for Lil Wayne's non-explicit songs is", wayne_ok_avg_pop)
wayne_ok_mins = int(wayne_ok_len) / 60000
print("Lil Wayne has", wayne_ok_mins, "minutes of explicit songs")
mama_exp_count = 0
mama_exp_pop_count = 0
mama_ok_count = 0
mama_ok_pop_count = 0
mama_exp_len = 0
mama_ok_len = 0
for track in top_mama_tracks:
print(track['name'], track['explicit'], track['popularity'], track["duration_ms"])
if True:
mama_exp_count = mama_exp_count + 1
mama_exp_pop_count = mama_exp_pop_count + int(track['popularity'])
mama_avg_pop = int(mama_exp_pop_count) / int(mama_exp_count)
mama_exp_len = mama_exp_len + int(track["duration_ms"])
if not track['explicit']:
mama_ok_count = mama_ok_count + 1
mama_ok_pop_count = mama_ok_pop_count + int(track['popularity'])
mama_ok_avg_pop = int(mama_ok_pop_count) / int(mama_ok_count)
mama_ok_len = mama_ok_len + int(track["duration_ms"])
if mama_exp_count > 0:
#1 minute is 60000 milliseconds, who knew?
print("The average popularity for Lil Mama's xplicit songs is", mama_avg_pop)
mama_exp_mins = int(mama_exp_len) / 60000
print("Lil Mama has", mama_exp_mins, "minutes of explicit songs")
if mama_ok_count > 0:
print("The average popularity for Lil Mama's non-explicit songs is", mama_ok_avg_pop)
mama_ok_mins = int(mama_ok_len) / 60000
print("Lil Mama has", mama_ok_mins, "minutes of non-explicit songs")
#We need to bypass the limit. And find out
response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist')
biggie_data = response.json()
biggie_artists = biggie_data['artists']
biggie_names = biggie_artists['items']
biggie_count= 0
for name in biggie_names:
print(name['name'])
biggie_count = biggie_count + 1
print("There are a total number of", biggie_count, "biggie artists")
response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist')
lil_data = response.json()
lil_x_artists = lil_data['artists']
lil_x_names = lil_x_artists['items']
lil_x_count= 0
for name in lil_x_names:
print(name['name'])
lil_x_count = biggie_count + 1
print("There are a total number of", lil_x_count, "lil artists")
response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&limit=50')
b_data = response.json()
b_artists = b_data['artists']
b_names = b_artists['items']
b_pop_count = 0
b_number = 0
for names in b_names:
print(names['name'], names['popularity'])
b_number = b_number + 1
b_pop_count = b_pop_count + int(names['popularity'])
avg_b_pop = b_pop_count / int(b_number)
print("The Biggies' average popularity is", avg_b_pop)
lil_pop_count = 0
lil_number = 0
for names in lil_names:
print(names['name'], names['popularity'])
lil_number = lil_number + 1
lil_pop_count = lil_pop_count + int(names['popularity'])
avg_lil_pop = lil_pop_count / int(lil_number)
print("The Lils average popularity is", avg_lil_pop)
print("The Lils are far more popular")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: Split data into training and testing.
Step3: Learning a multiple regression model
Step4: Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows
Step5: Making Predictions
Step6: Compute RSS
Step7: Test your function by computing the RSS on TEST data for the example model
Step8: Create some new features
Step9: Next create the following 4 new features as column in both TEST and TRAIN data
Step10: Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.
Step11: Learning Multiple Models
Step12: Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients
Step13: Quiz Question
Step14: Quiz Question
|
<ASSISTANT_TASK:>
Python Code:
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/kc_house_data.gl')
train_data,test_data = sales.random_split(.8,seed=0)
example_features = ['sqft_living', 'bedrooms', 'bathrooms']
example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features,
validation_set = None)
example_weight_summary = example_model.get("coefficients")
print example_weight_summary
example_predictions = example_model.predict(train_data)
print example_predictions[0] # should be 271789.505878
import math
def get_residual_sum_of_squares(model, data, outcome):
# First get the predictions
predict = model.predict(data)
# Then compute the residuals/errors
residuals = []
for i in range(0, len(predict)):
error = outcome[i] - predict[i]
residuals.append(math.pow(error,2))
# Then square and add them up
RSS = reduce(lambda x,y : x + y, residuals)
return(RSS)
rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])
print rss_example_train # should be 2.7376153833e+14
from math import log
train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)
test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)
# create the remaining 3 features in both TEST and TRAIN data
train_data['bed_bath_rooms'] = train_data['bedrooms'] * train_data['bathrooms']
test_data['bed_bath_rooms'] = test_data['bedrooms'] * test_data['bathrooms']
train_data['log_sqft_living'] = train_data['sqft_living'].apply(lambda x: log(x))
test_data['log_sqft_living'] = test_data['sqft_living'].apply(lambda x: log(x))
train_data['lat_plus_long'] = train_data['lat'] + train_data['long']
test_data['lat_plus_long'] = test_data['lat'] + test_data['long']
print sum(test_data['bedrooms_squared'])/len(test_data['bedrooms_squared'])
print sum(test_data['bed_bath_rooms'])/len(test_data['bed_bath_rooms'])
print sum(test_data['log_sqft_living'])/len(test_data['log_sqft_living'])
print sum(test_data['lat_plus_long'])/len(test_data['lat_plus_long'])
model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
model_2_features = model_1_features + ['bed_bath_rooms']
model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']
# Learn the three models: (don't forget to set validation_set = None)
model_1 = graphlab.linear_regression.create(train_data, target = 'price', features = model_1_features,
validation_set = None)
model_2 = graphlab.linear_regression.create(train_data, target = 'price', features = model_2_features,
validation_set = None)
model_3 = graphlab.linear_regression.create(train_data, target = 'price', features = model_3_features,
validation_set = None)
# Examine/extract each model's coefficients:
model_1_weight_summary = model_1.get("coefficients")
print model_1_weight_summary
model_2_weight_summary = model_2.get("coefficients")
print model_2_weight_summary
model_3_weight_summary = model_3.get("coefficients")
print model_3_weight_summary
# Compute the RSS on TRAINING data for each of the three models and record the values:
rss_model_1_train = get_residual_sum_of_squares(model_1, train_data, train_data['price'])
print rss_model_1_train
rss_model_2_train = get_residual_sum_of_squares(model_2, train_data, train_data['price'])
print rss_model_2_train
rss_model_3_train = get_residual_sum_of_squares(model_3, train_data, train_data['price'])
print rss_model_3_train
# Compute the RSS on TESTING data for each of the three models and record the values:
rss_model_1_test = get_residual_sum_of_squares(model_1, test_data, test_data['price'])
print rss_model_1_test
rss_model_2_test = get_residual_sum_of_squares(model_2, test_data, test_data['price'])
print rss_model_2_test
rss_model_3_test = get_residual_sum_of_squares(model_3, test_data, test_data['price'])
print rss_model_3_test
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a ML Data Files using Dataflow
Step2: 2. Beam Pipeline
Step3: 5. Run Pipeline
Step4: TF Text Classification Model with TF Hub for Text Encoding
Step5: 2. Create feature columns
Step6: 3. Create a model using the premade DNNClassifer
Step7: 4. Define experiment
Step8: b) Serving function
Step9: c) TrainSpec & EvalSpec
Step10: 5. Run experiment
Step11: 6. Evaluate the model
Step12: 7. Use SavedModel for predictions
|
<ASSISTANT_TASK:>
Python Code:
import os
class Params:
pass
# Set to run on GCP
Params.GCP_PROJECT_ID = 'ksalama-gcp-playground'
Params.REGION = 'europe-west1'
Params.BUCKET = 'ksalama-gcs-cloudml'
Params.PLATFORM = 'local' # local | GCP
Params.DATA_DIR = 'data/news' if Params.PLATFORM == 'local' else 'gs://{}/data/news'.format(Params.BUCKET)
Params.TRANSFORMED_DATA_DIR = os.path.join(Params.DATA_DIR, 'transformed')
Params.RAW_TRAIN_DATA_FILE_PREFEX = os.path.join(Params.DATA_DIR, 'train')
Params.RAW_EVAL_DATA_FILE_PREFEX = os.path.join(Params.DATA_DIR, 'eval')
Params.MODELS_DIR = 'models/news' if Params.PLATFORM == 'local' else 'gs://{}/models/news'.format(Params.BUCKET)
Params.TEMP_DIR = os.path.join(Params.DATA_DIR, 'tmp')
Params.TRANSFORM = True
Params.TRAIN = True
Params.RESUME_TRAINING = False
Params.EAGER = False
if Params.EAGER:
tf.enable_eager_execution()
bq_query = '''
SELECT
key,
REGEXP_REPLACE(title, '[^a-zA-Z0-9 $.-]', ' ') AS title,
source
FROM
(
SELECT
ARRAY_REVERSE(SPLIT(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.'))[OFFSET(1)] AS source,
title,
ABS(FARM_FINGERPRINT(title)) AS Key
FROM
`bigquery-public-data.hacker_news.stories`
WHERE
REGEXP_CONTAINS(REGEXP_EXTRACT(url, '.*://(.[^/]+)/'), '.com$')
AND LENGTH(title) > 10
)
WHERE (source = 'github' OR source = 'nytimes' OR source = 'techcrunch')
'''
import apache_beam as beam
def to_tsv(bq_row):
CSV_HEADER = 'key,title,source'.split(',')
### process bq_row['title']
csv_row = '\t'.join([str(bq_row[column]) for column in CSV_HEADER])
return csv_row
def run_pipeline(runner, opts):
pipeline = beam.Pipeline(runner, options=opts)
print("Sink train data files: {}".format(Params.RAW_TRAIN_DATA_FILE_PREFEX))
print("Sink data files: {}".format(Params.RAW_EVAL_DATA_FILE_PREFEX))
print("Temporary directory: {}".format(Params.TEMP_DIR))
print("")
for step in ['train', 'eval']:
if step == 'train':
source_query = 'SELECT * FROM ({}) WHERE MOD(key,100) <= 75'.format(bq_query)
sink_location = Params.RAW_TRAIN_DATA_FILE_PREFEX
else:
source_query = 'SELECT * FROM ({}) WHERE MOD(key,100) > 75'.format(bq_query)
sink_location = Params.RAW_EVAL_DATA_FILE_PREFEX
(
pipeline
| '{} - Read from BigQuery'.format(step) >> beam.io.Read(beam.io.BigQuerySource(query=source_query, use_standard_sql=True))
| '{} - Process to TSV'.format(step) >> beam.Map(to_tsv)
| '{} - Write to TSV '.format(step) >> beam.io.Write(beam.io.WriteToText(sink_location,
file_name_suffix='.tsv', num_shards=5))
)
job = pipeline.run()
if runner == 'DirectRunner':
job.wait_until_finish()
from datetime import datetime
import shutil
job_name = 'preprocess-hackernews-data' + '-' + datetime.utcnow().strftime('%y%m%d-%H%M%S')
options = {
'region': Params.REGION,
'staging_location': os.path.join(Params.TEMP_DIR, 'staging'),
'temp_location': Params.TEMP_DIR,
'job_name': job_name,
'project': Params.GCP_PROJECT_ID
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
runner = 'DirectRunner' if Params.PLATFORM == 'local' else 'DirectRunner'
if Params.TRANSFORM:
if Params.PLATFORM == 'local':
shutil.rmtree(Params.DATA_DIR, ignore_errors=True)
print 'Launching {} job {} ... hang on'.format(runner, job_name)
run_pipeline(runner, opts)
print "Pipline completed."
else:
print "Transformation skipped!"
%%bash
ls data/news
echo ""
head data/news/train-00000-of-00005.tsv
import tensorflow as tf
from tensorflow import data
print tf.__version__
RAW_HEADER = 'key,title,source'.split(',')
RAW_DEFAULTS = [['NA'],['NA'],['NA']]
TARGET_FEATRUE_NAME = 'source'
TARGET_LABELS = ['github', 'nytimes', 'techcrunch']
TEXT_FEATURE_NAME = 'title'
KEY_COLUMN = 'key'
def parse_tsv(tsv_row):
columns = tf.decode_csv(tsv_row, record_defaults=RAW_DEFAULTS, field_delim='\t')
features = dict(zip(RAW_HEADER, columns))
features.pop(KEY_COLUMN)
target = features.pop(TARGET_FEATRUE_NAME)
return features, target
def generate_tsv_input_fn(files_pattern,
mode=tf.estimator.ModeKeys.EVAL,
num_epochs=1,
batch_size=200):
def _input_fn():
#file_names = data.Dataset.list_files(files_pattern)
file_names = tf.matching_files(files_pattern)
if Params.EAGER:
print file_names
dataset = data.TextLineDataset(file_names)
dataset = dataset.apply(
tf.contrib.data.shuffle_and_repeat(count=num_epochs,
buffer_size=batch_size*2)
)
dataset = dataset.apply(
tf.contrib.data.map_and_batch(parse_tsv,
batch_size=batch_size,
num_parallel_batches=2)
)
datset = dataset.prefetch(batch_size)
if Params.EAGER:
return dataset
iterator = dataset.make_one_shot_iterator()
features, target = iterator.get_next()
return features, target
return _input_fn
import tensorflow_hub as hub
print hub.__version__
def create_feature_columns(hparams):
title_embeding_column = hub.text_embedding_column(
"title", "https://tfhub.dev/google/universal-sentence-encoder/1",
trainable=hparams.trainable_embedding)
feature_columns = [title_embeding_column]
print "feature columns: \n {}".format(feature_columns)
print ""
return feature_columns
def create_estimator_hub(hparams, run_config):
feature_columns = create_feature_columns(hparams)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
estimator = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes =len(TARGET_LABELS),
label_vocabulary=TARGET_LABELS,
hidden_units=hparams.hidden_units,
optimizer=optimizer,
config=run_config
)
return estimator
TRAIN_SIZE = 73124
NUM_EPOCHS = 10
BATCH_SIZE = 1000
TOTAL_STEPS = (TRAIN_SIZE/BATCH_SIZE)*NUM_EPOCHS
EVAL_EVERY_SEC = 60
hparams = tf.contrib.training.HParams(
num_epochs=NUM_EPOCHS,
batch_size=BATCH_SIZE,
trainable_embedding=False,
learning_rate=0.01,
hidden_units=[256, 128],
max_steps=TOTAL_STEPS
)
MODEL_NAME = 'dnn_estimator_hub'
model_dir = os.path.join(Params.MODELS_DIR, MODEL_NAME)
run_config = tf.estimator.RunConfig(
tf_random_seed=19830610,
log_step_count_steps=1000,
save_checkpoints_secs=EVAL_EVERY_SEC,
keep_checkpoint_max=1,
model_dir=model_dir
)
print(hparams)
print("")
print("Model Directory:", run_config.model_dir)
print("Dataset Size:", TRAIN_SIZE)
print("Batch Size:", BATCH_SIZE)
print("Steps per Epoch:",TRAIN_SIZE/BATCH_SIZE)
print("Total Steps:", TOTAL_STEPS)
def generate_serving_input_fn():
def _serving_fn():
receiver_tensor = {
'title': tf.placeholder(dtype=tf.string, shape=[None])
}
return tf.estimator.export.ServingInputReceiver(
receiver_tensor, receiver_tensor)
return _serving_fn
train_spec = tf.estimator.TrainSpec(
input_fn = generate_tsv_input_fn(
Params.RAW_TRAIN_DATA_FILE_PREFEX+"*",
mode = tf.estimator.ModeKeys.TRAIN,
num_epochs=hparams.num_epochs,
batch_size=hparams.batch_size
),
max_steps=hparams.max_steps,
hooks=None
)
eval_spec = tf.estimator.EvalSpec(
input_fn = generate_tsv_input_fn(
Params.RAW_EVAL_DATA_FILE_PREFEX+"*",
mode=tf.estimator.ModeKeys.EVAL,
num_epochs=1,
batch_size=hparams.batch_size
),
exporters=[tf.estimator.LatestExporter(
name="estimate", # the name of the folder in which the model will be exported to under export
serving_input_receiver_fn=generate_serving_input_fn(),
exports_to_keep=1,
as_text=False)],
steps=None,
throttle_secs=EVAL_EVERY_SEC
)
from datetime import datetime
import shutil
if Params.TRAIN:
if not Params.RESUME_TRAINING:
print("Removing previous training artefacts...")
shutil.rmtree(model_dir, ignore_errors=True)
else:
print("Resuming training...")
tf.logging.set_verbosity(tf.logging.INFO)
time_start = datetime.utcnow()
print("Experiment started at {}".format(time_start.strftime("%H:%M:%S")))
print(".......................................")
estimator = create_estimator_hub(hparams, run_config)
tf.estimator.train_and_evaluate(
estimator=estimator,
train_spec=train_spec,
eval_spec=eval_spec
)
time_end = datetime.utcnow()
print(".......................................")
print("Experiment finished at {}".format(time_end.strftime("%H:%M:%S")))
print("")
time_elapsed = time_end - time_start
print("Experiment elapsed time: {} seconds".format(time_elapsed.total_seconds()))
else:
print "Training was skipped!"
TRAIN_SIZE = 73124
VALID_SIZE = 23079
tf.logging.set_verbosity(tf.logging.ERROR)
estimator = create_estimator_hub(hparams, run_config)
train_metrics = estimator.evaluate(
input_fn = generate_tsv_input_fn(
files_pattern= Params.RAW_TRAIN_DATA_FILE_PREFEX+"*",
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TRAIN_SIZE),
steps=1
)
print("############################################################################################")
print("# Train Measures: {}".format(train_metrics))
print("############################################################################################")
eval_metrics = estimator.evaluate(
input_fn=generate_tsv_input_fn(
files_pattern=Params.RAW_EVAL_DATA_FILE_PREFEX+"*",
mode= tf.estimator.ModeKeys.EVAL,
batch_size= TRAIN_SIZE),
steps=1
)
print("")
print("############################################################################################")
print("# Valid Measures: {}".format(eval_metrics))
print("############################################################################################")
import os
export_dir = model_dir +"/export/estimate/"
saved_model_dir = os.path.join(export_dir, os.listdir(export_dir)[0])
print(saved_model_dir)
print("")
predictor_fn = tf.contrib.predictor.from_saved_model(
export_dir = saved_model_dir,
signature_def_key="predict"
)
output = predictor_fn(
{
'title':[
'Microsoft and Google are joining forces for a new AI framework',
'A new version of Python is mind blowing',
'EU is investigating new data privacy policies'
]
}
)
print(output)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tarea 1 (a). Irradiancia máxima
Step2: Tarea 3. Elección del filtro interferencial
Step3: Tarea 3 (b). Verificación del filtro
|
<ASSISTANT_TASK:>
Python Code:
from IPython.core.display import Image
Image("http://upload.wikimedia.org/wikipedia/commons/thumb/2/28/IEC60825_MPE_W_s.png/640px-IEC60825_MPE_W_s.png")
####
# Parámetros a modificar. INICIO
####
web_laser = 'http://www.punterolaser.com' # Incluir la dirección de la página web
web_anchura = '1100' # Valor en pixeles de la anchura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
web_altura = '800' # Valor en pixeles de la altura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
####
# Parámetros a modificar. FIN
####
##############################################################################################################################
texto_web_laser='<iframe src= '+web_laser+' width='+web_anchura+'px, height='+web_altura+'px>'
from IPython.display import HTML
HTML(texto_web_laser)
####
# Parámetros a modificar. INICIO
####
web_filtro = 'http://www.semrock.com/FilterDetails.aspx?id=LP02-224R-25' # Incluir la dirección de la página web
web_anchura = '1100' # Valor en pixeles de la anchura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
web_altura = '800' # Valor en pixeles de la altura de la página web incrustada en el notebook. Solo modificar si no se ve bien la página web
####
# Parámetros a modificar. FIN
####
##############################################################################################################################
texto_web_filtro='<iframe src= '+web_filtro+' width='+web_anchura+'px, height='+web_altura+'px>'
from IPython.display import HTML
HTML(texto_web_filtro)
%pylab inline
####
# Parámetros a modificar. INICIO
####
longitud_de_onda_laser = 530 # Incluir el valor de la longitud de onda del puntero láser seleccionado (en nm)
# Pintamos la gráfica original y un zoom empleando el rango de valores siguientes (para ver mejor la zona deseada)
longitud_de_onda_minina = 500 # Incluir el valor de la longitud de onda mímina (en nm) para hacer zoom
longitud_de_onda_maxima = 670 # Incluir el valor de la longitud de onda maxima (en nm) para hacer zoom
transmitancia_minina = 1e-8 # Incluir el valor de la transmitancia mímina para hacer zoom
transmitancia_maxima = 1 # Incluir el valor de la transmitancia máxima para hacer zoom
####
# Parámetros a modificar. FIN
####
##############################################################################################################################
from numpy.core.defchararray import find
indice_igual=find(web_filtro,'=')
codigoID = web_filtro[indice_igual+1:-3]
Codigo_Filtro = codigoID
filename = 'http://www.semrock.com/_ProductData/Spectra/'+Codigo_Filtro+'_Spectrum.txt' # Dirección del fichero de datos
data=genfromtxt(filename,dtype=float,skip_header=4) # Carga los datos
longitud_de_onda=data[:,0];transmitancia=data[:,1];
figure(figsize(13,6))
subplot(1,2,1)
semilogy(longitud_de_onda,transmitancia)
xlabel('$\lambda$ (nm)');ylabel('T');title('Curva original')
subplot(1,2,2)
semilogy(longitud_de_onda,transmitancia)
xlabel('$\lambda$ (nm)');ylabel('T');title('Zoom')
axis([longitud_de_onda_minina, longitud_de_onda_maxima, transmitancia_minina, transmitancia_maxima]);
from scipy.interpolate import interp1d
f_transm = interp1d(data[:,0],data[:,1])
transm_para_laser = f_transm(longitud_de_onda_laser)
print "Transmitancia para la longitud de onda del puntero láser"
print transm_para_laser
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: "header=0" indicates that the first line of the file contains column names, "delimiter=\t" indicates that the fields are separated by tabs, and quoting=3 ignore doubled quotes
Step2: We have 25000 rows, lets check the first one
Step3: Data Cleaning and Text Preprocessing
Step4: Dealing with Punctuation, Numbers and Stopwords
Step5: We'll also convert our reviews to lower case and split them into individual words (a process called "tokenization")
Step6: Finally, we need to decide how to deal with frequently occurring words that don't carry much meaning. Such words are called "stop words"; in English they include words such as "a", "and", "is", and "the". Conveniently, there are Python packages that come with stop word lists built in. Let's import a stop word list from the Python Natural Language Toolkit (NLTK).
Step7: Now we can use nltk to get a list of stop words
Step8: To remove stop words from our movie review
Step9: Now we have code to clean one review - but we need to clean the rest 25,000! To make our code reusable, let's create a function
Step10: At the end of the fuction we joined the words back into one paragraph. This is to make the output easier to use in our Bag of Words
Step11: Creating Features from a Bag of Words
Step12: Now that the Bag of Words model is trained, let's look at the vocabulary
Step13: Classification
Step14: Now we'll evaluate the performance of our classifier
Step15: Sentiment Analysis
Step16: Next, we create a function to sum the sentiment associated with each word in a paragraph
Step17: We'll use our cleaned review dataset in clean_train_reviews. Lets check the results on the first two.
Step18: Why this approach to sentiment analysis in movie reviews can be problematic?
Step19: Then we'll convert this sentiment values to positive (1) and negative (0) reviews as we have in our dataset
Step20: And we'll compare our results with the entire target vector (because we are not doing training at this point)
Step21: Not bad for such a simple method.
Step22: Now we can do classification again with our new feature set
Step23: Was the new feature set useful or not?
|
<ASSISTANT_TASK:>
Python Code:
%cd C:/temp/
import pandas as pd
train = pd.read_csv("labeledTrainData.tsv", header=0, delimiter="\t", quoting=3)
print(train.columns.values)
print(train.shape)
print train["review"][0]
from bs4 import BeautifulSoup
example1 = BeautifulSoup(train["review"][0])
print(example1.get_text())
import re
# Use regular expressions to do a find-and-replace
letters_only = re.sub("[^a-zA-Z]", # The pattern to search for (everything except letters)
" ", # The pattern to replace it with
example1.get_text() ) # The text to search
print letters_only
lower_case = letters_only.lower() # Convert to lower case
words = lower_case.split() # Split into words
print(words)
import nltk
#nltk.download() # Download text data sets, including stop words (A new window should open)
from nltk.corpus import stopwords # Import the stop word list
print stopwords.words("english")
words = [w for w in words if not w in stopwords.words("english")]
print(words)
def review_to_words( raw_review ):
# Function to convert a raw review to a string of words
# The input is a single string (a raw movie review), and
# the output is a single string (a preprocessed movie review)
#
# 1. Remove HTML
review_text = BeautifulSoup(raw_review).get_text()
#
# 2. Remove non-letters
letters_only = re.sub("[^a-zA-Z]", " ", review_text)
#
# 3. Convert to lower case, split into individual words
words = letters_only.lower().split()
#
# 4. In Python, searching a set is much faster than searching
# a list, so convert the stop words to a set
stops = set(stopwords.words("english"))
#
# 5. Remove stop words
meaningful_words = [w for w in words if not w in stops]
#
# 6. Join the words back into one string separated by space,
# and return the result.
return( " ".join( meaningful_words ))
num_reviews = train["review"].size
clean_train_reviews = []
for i in xrange( 0, num_reviews ):
if( (i+1)%2500 == 0 ):
print "Review %d of %d\n" % ( i+1, num_reviews )
# Call our function for each one, and add the result to the list
clean_train_reviews.append( review_to_words( train["review"][i] ) )
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
# fit_transform() does two functions: First, it fits the model
# and learns the vocabulary; second, it transforms our training data
# into feature vectors. The input to fit_transform should be a list of
# strings.
train_data_features = vectorizer.fit_transform(clean_train_reviews)
train_data_features = train_data_features.toarray()
print(train_data_features.shape)
# Take a look at the words in the vocabulary
vocab = vectorizer.get_feature_names()
print(vocab)
import numpy as np
# Sum up the counts of each vocabulary word
dist = np.sum(train_data_features, axis=0)
# For each, print the vocabulary word and the number of times it
# appears in the training set
for tag, count in zip(vocab, dist):
print count, tag
from sklearn.cross_validation import train_test_split
random_state = np.random.RandomState(0)
X, y = train_data_features, train["sentiment"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=random_state)
print("Training the random forest...")
from sklearn.ensemble import RandomForestClassifier
# Initialize a Random Forest classifier with 100 trees
forest = RandomForestClassifier(n_estimators = 100, n_jobs=2)
from time import time
t0 = time()
# Fit the forest to the training set, using the bag of words as
# features and the sentiment labels as the response variable
forest = forest.fit(X_train, y_train)
print("... took %0.3fs" % (time() - t0))
y_pred = forest.predict(X_test)
from sklearn import metrics
print(metrics.classification_report(y_test, y_pred, target_names=['negative review', 'positive review']))
import pandas as pd
afinn = pd.read_csv("AFINN-111.txt", header=None, delimiter="\t")
sent_dict = dict(zip(afinn[0], afinn[1]))
print(sent_dict)
#Calculate the sentiment in the provided text
def sentiment_in_text(text, sent_dict):
sentiment = 0.0
words = text.split()
for w in words:
if not w in sent_dict: continue
sentiment += float(sent_dict[w])
return sentiment
print(clean_train_reviews[0])
print(sentiment_in_text(clean_train_reviews[0], sent_dict))
print(clean_train_reviews[1])
print(sentiment_in_text(clean_train_reviews[1], sent_dict))
sentiment_values = [sentiment_in_text(x, sent_dict) for x in clean_train_reviews] #This is a list comprehension expression
sentiment_values = np.array(sentiment_values) #We convert the list to a numpy array for easier manipulation
print(sentiment_values)
y_pred_sent = [1 if x>0 else 0 for x in sentiment_values]
print(metrics.classification_report(y, y_pred_sent, target_names=['negative review', 'positive review']))
#The bag-of-words is in the variable train_data_features
print(train_data_features.shape)
sentiment_values_matrix = np.matrix(sentiment_values).T
print(sentiment_values_matrix.shape)
#numpy.hstack() Stack arrays in sequence horizontally (column wise). The number of rows must match
X2 = np.hstack((sentiment_values_matrix, train_data_features))
print(X2.shape)
random_state = np.random.RandomState(0)
y = train["sentiment"]
X_train2, X_test2, y_train2, y_test2 = train_test_split(X2, y, test_size=.25, random_state=random_state)
print("Training again the random forest...")
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators = 100, n_jobs=2)
from time import time
t0 = time()
forest = forest.fit(X_train2, y_train2)
print("... took %0.3fs" % (time() - t0))
y_pred2 = forest.predict(X_test2)
from sklearn import metrics
print(metrics.classification_report(y_test2, y_pred2, target_names=['negative review', 'positive review']))
# Read the test data
test = pd.read_csv("testData.tsv", header=0, delimiter="\t", \
quoting=3 )
# Verify that there are 25,000 rows and 2 columns
print test.shape
# Create an empty list and append the clean reviews one by one
num_reviews = len(test["review"])
clean_test_reviews = []
print "Cleaning and parsing the test set movie reviews...\n"
for i in xrange(0,num_reviews):
if( (i+1) % 2500 == 0 ):
print "Review %d of %d\n" % (i+1, num_reviews)
clean_review = review_to_words( test["review"][i] )
clean_test_reviews.append( clean_review )
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
# Use pandas to write the comma-separated output file
output.to_csv( "Bag_of_Words_model.csv", index=False, quoting=3 )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The features?
Step2: There are 200 observations, corresponding to 200 markets.
Step3: Questions
Step4: Interpreting Model Coefficients
Step5: The predicted Sales in that market are of 9.409444 * 1000 =~ 9409 widgets
Step6: Plotting the Least Squares Line
Step7: Confidence in Linear Regression Models
Step8: Since we only have a single sample of data, and not the entire population the "true" value of the regression coefficient is either within this interval or it isn't, but there is no way to actually know.
Step9: If the 95% confidence interval includes zero, the p-value for that coefficient will be greater than 0.05. If the 95% confidence interval does not include zero, the p-value will be less than 0.05. Thus, a p-value less than 0.05 is one way to decide whether there is likely a relationship between the feature and the response. (Again, using 0.05 as the cutoff is just a convention.)
Step10: Is that a "good" R-squared value? One cannot generally assess that. What a "good" R-squared value is depends on the domain and therefore R-squared is most useful as a tool for comparing different models.
Step11: How do we interpret the coefficients? For a given amount of Radio and Newspaper ad spending, an increase of a unit ($1000 dollars) in TV ad spending is associated with an increase in Sales of 45.765 widgets.
Step12: TV and Radio have significant p-values, whereas Newspaper does not. Thus we reject the null hypothesis for TV and Radio (that there is no association between those features and Sales), and fail to reject the null hypothesis for Newspaper.
Step13: R-squared will always increase as you add more features to the model, even if they are unrelated to the response. Thus, selecting the model with the highest R-squared is not a reliable approach for choosing the best linear model.
Step14: Handling Categorical Predictors with Two Categories
Step15: When using scikit-learn, we need to represent all data numerically. For example, if the feature we want to represent has only two categories, we create a dummy variable that represents the categories as a binary value
Step16: The multiple linear regression including the IsLarge predictor
Step17: How do we interpret the coefficient of IsLarge? For a given amount of TV/Radio/Newspaper ad spending, a large market is associated with an average increase in Sales of 51.55 widgets (as compared to sales in a Small market).
Step18: We have to represent Area numerically, but an encoding such as 0=rural, 1=suburban, 2=urban would not work because that would imply that there is an ordered relationship between suburban and urban. Instead, we can create another dummy variable.
Step19: rural is coded as Area_suburban=0 and Area_urban=0
|
<ASSISTANT_TASK:>
Python Code:
# imports
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# read data into a DataFrame
data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
data.head()
# print the size of the DataFrame object, i.e., the size of the dataset
data.shape
fig, axs = plt.subplots(1, 3, sharey=True)
data.plot(kind='scatter', x='TV', y='Sales', ax=axs[0], figsize=(16, 8))
data.plot(kind='scatter', x='Radio', y='Sales', ax=axs[1])
data.plot(kind='scatter', x='Newspaper', y='Sales', ax=axs[2])
import statsmodels.formula.api as sf
#create a model with Sales as dependent variable and TV as explanatory variable
model = sf.ols('Sales ~ TV', data)
#fit the model to the data
fitted_model = model.fit()
# print the coefficients
print(fitted_model.params)
7.032594 + 0.047537*50
# create a DataFrame to use with the Statsmodels formula interface
New_TV_spending = pd.DataFrame({'TV': [50]})
#check the newly created DataFrame
New_TV_spending.head()
# use the model created above to predict the sales to be generated by the new TV ad money
sales = fitted_model.predict(New_TV_spending)
print(sales)
# create a DataFrame with the minimum and maximum values of TV ad money
New_TV_money = pd.DataFrame({'TV': [data.TV.min(), data.TV.max()]})
print(New_TV_money.head())
# make predictions for those x values and store them
sales_predictions = fitted_model.predict(New_TV_money)
print(sales_predictions)
# plot the observed data
data.plot(kind='scatter', x='TV', y='Sales')
# plot the least squares line
plt.plot(New_TV_money, sales_predictions, c='red', linewidth=2)
# print the confidence intervals for the model coefficients
print(fitted_model.conf_int())
# print the p-values for the model coefficients
fitted_model.pvalues
# print the R-squared value for the model
fitted_model.rsquared
# create a model with all three features
multi_model = sf.ols(formula='Sales ~ TV + Radio + Newspaper', data=data)
fitted_multi_model = multi_model.fit()
# print the coefficients
print(fitted_multi_model.params)
# print a summary of the fitted model
fitted_multi_model.summary()
# only include TV and Radio in the model
model1 = sf.ols(formula='Sales ~ TV + Radio', data=data).fit()
print(model1.rsquared)
# add Newspaper to the model (which we believe has no association with Sales)
model2 = sf.ols(formula='Sales ~ TV + Radio + Newspaper', data=data).fit()
print(model2.rsquared)
# create a DataFrame
feature_cols = ['TV', 'Radio', 'Newspaper']
X = data[feature_cols]
y = data.Sales
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X, y)
# print intercept and coefficients
print(lm.intercept_)
print(lm.coef_)
# pair the feature names with the coefficients
print(zip(feature_cols, lm.coef_))
# predict for a new observation
lm.predict([[100, 25, 25]])
# calculate the R-squared
lm.score(X, y)
import numpy as np
# create a Series of booleans in which roughly half are True
#generate len(data) numbers between 0 and 1
numbers = np.random.rand(len(data))
#create and index of 0s and 1s by based on whether the corresponding random number
#is greater than 0.5.
index_for_large = (numbers > 0.5)
#create a new data column called Size and set its values to 'small'
data['Size'] = 'small'
# change the values of Size to 'large' whenever the corresponding value of the index is 1
data.loc[index_for_large, 'Size'] = 'large'
data.head()
# create a new Series called IsLarge
data['IsLarge'] = data.Size.map({'small':0, 'large':1})
data.head()
# create X and y
feature_cols = ['TV', 'Radio', 'Newspaper', 'IsLarge']
X = data[feature_cols]
y = data.Sales
# instantiate, fit
lm = LinearRegression()
lm.fit(X, y)
# print coefficients
list(zip(feature_cols, lm.coef_))
# set a seed for reproducibility
np.random.seed(123456)
# assign roughly one third of observations to each group
nums = np.random.rand(len(data))
mask_suburban = (nums > 0.33) & (nums < 0.66)
mask_urban = nums > 0.66
data['Area'] = 'rural'
data.loc[mask_suburban, 'Area'] = 'suburban'
data.loc[mask_urban, 'Area'] = 'urban'
data.head()
# create three dummy variables using get_dummies, then exclude the first dummy column
area_dummies = pd.get_dummies(data.Area, prefix='Area').iloc[:, 1:]
# concatenate the dummy variable columns onto the original DataFrame (axis=0 means rows, axis=1 means columns)
data = pd.concat([data, area_dummies], axis=1)
data.head()
# read data into a DataFrame
#data = pd.read_csv('http://www-bcf.usc.edu/~gareth/ISL/Advertising.csv', index_col=0)
# create X and y
feature_cols = ['TV', 'Radio', 'Newspaper', 'IsLarge', 'Area_suburban', 'Area_urban']
X = data[feature_cols]
y = data.Sales
# instantiate, fit
lm = LinearRegression()
lm.fit(X, y)
# print coefficients
list(zip(feature_cols, lm.coef_))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load and prepare the data
Step2: Checking out the data
Step3: Dummy variables
Step4: Scaling target variables
Step5: Splitting the data into training, testing, and validation sets
Step6: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
Step7: Time to build the network
Step8: Training the network
Step9: Check out your predictions
Step10: OPTIONAL
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
rides.head()
rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# Save data for approximately the last 21 days
test_data = data[-21*24:]
# Now remove the test data from the data set
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# Hold out the last 60 days or so of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### TODO: Set self.activation_function to your implemented sigmoid function ####
#
# Note: in Python, you can define a function with a lambda expression,
# as shown below.
self.activation_function = lambda x : 1 / (1 + np.exp(-x)) # Replace 0 with your sigmoid calculation.
### If the lambda code above is not something you're familiar with,
# You can uncomment out the following three lines and put your
# implementation there instead.
#
#def sigmoid(x):
# return 0 # Replace 0 with your sigmoid calculation here
#self.activation_function = sigmoid
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer - Replace these values with your calculations.
hidden_inputs = np.dot(self.weights_input_to_hidden,inputs) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with your calculations.
final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error - Replace this value with your calculations.
output_errors = targets - final_outputs # Output layer error is the difference between desired target and actual output.
# TODO: Backpropagated error - Replace these values with your calculations.
hidden_errors = np.dot(output_errors,self.weights_hidden_to_output)
hidden_grad = hidden_outputs * (1.0 - hidden_outputs) # hidden layer gradients
hidden_error_term = hidden_grad * hidden_errors.T
# TODO: Update the weights - Replace these values with your calculations.
self.weights_hidden_to_output += self.lr * output_errors * hidden_outputs.T # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += self.lr * hidden_error_term * inputs.T # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer - replace these values with the appropriate calculations.
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs) # signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer - Replace these values with the appropriate calculations.
final_inputs = np.dot(self.weights_hidden_to_output,hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
import sys
### Set the hyperparameters here ###
epochs = 1500
learning_rate = 0.01
hidden_nodes = 8
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=1)
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You can listen to them here
Step2: One GrFNN to rule them all
Step3: THE PREVIOUS CODE GENERATED THE FIGURES ADDED TO THE PAPER ON APRIL 28 NEXT CODE IS EXPERIMENTAL
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
fs = 8000.0
f0 = 220.0 # Hz
duration = 0.05 # 1.0
t = np.linspace(0, duration, num=fs*duration)
N_overtones = 7
harmonics = np.arange(1, N_overtones)
f = f0 * harmonics;
# exponentially decaying gain
# G = 0.5 * np.exp(-f/1e3) + 0.5
G = np.ones_like(f)
x_full = np.sum(np.exp(2j*np.pi*np.outer(f,t)), 0)/N_overtones
x_missing1 = np.sum(G[1:]*np.exp(2j*np.pi*np.outer(t, f[1:])), 1)/N_overtones
x_missing2 = np.sum(G[2:]*np.exp(2j*np.pi*np.outer(t, f[2:])), 1)/N_overtones
x_missing3 = np.sum(G[3:]*np.exp(2j*np.pi*np.outer(t, f[3:])), 1)/N_overtones
def normalize(x, g=1.0):
# return g*x/np.max(np.abs(x))
return g*x/np.sqrt(np.mean(x*np.conj(x)))
G = 0.25
x_full = normalize(x_full, G)
x_missing1 = normalize(x_missing1, G)
x_missing2 = normalize(x_missing2, G)
x_missing3 = normalize(x_missing3, G)
from scipy.io import wavfile
plt.figure(figsize=(16,4))
ax1 = plt.subplot(1,4,1)
plt.plot(t, np.real(x_full))
ax2 = plt.subplot(1,4,2, sharex=ax1, sharey=ax1)
plt.plot(t, np.real(x_missing1))
ax3 = plt.subplot(1,4,3, sharex=ax1, sharey=ax1)
plt.plot(t, np.real(x_missing2))
ax4 = plt.subplot(1,4,4, sharex=ax1, sharey=ax1)
plt.plot(t, np.real(x_missing3))
plt.xlim(0, 0.02)
plt.ylim(-1, 1)
from IPython.display import Audio
Audio(np.real(x_full), rate=fs)
Audio(np.real(x_missing1), rate=fs)
Audio(np.real(x_missing2), rate=fs)
Audio(np.real(x_missing3), rate=fs)
from pygrfnn import GrFNN
from pygrfnn.oscillator import Zparam
from pygrfnn.network import Model, make_connections
from pygrfnn.vis import tf_detail, tf_simple
from matplotlib2tikz import save as tikz_save
# supercritical
params1 = Zparam(alpha=0.0001, beta1=-1, beta2=0, epsilon=1.0)
freqs = (128, 2048)
num_oscs = 256
stimulus_conn_type = 'allfreq'
gain = 1.0
# create model
g1 = GrFNN(params1, frequency_range=freqs, num_oscs=num_oscs, stimulus_conn_type=stimulus_conn_type, w=100.0)
m = Model()
m.add_layer(g1, input_channel=0)
# C = make_connections(g1, g1,
# strength=1.0, range=1.05,
# modes=[1/3., 1/2., 1., 2., 3.],
# mode_amplitudes =[1, 1, 1, 1, 1],
# self_connect=True)
# save initial states to use the same for every run
z0 = [g.z.copy() for g in m.layers()]
output_dir = '/Users/jorgeh/Documents/CCRMA/research/ismir2015/frameworkPaper/figs/missingFundamental'
for s, name, filename in [(x_full, 'Full', 'full'),
(x_missing1, 'Missing f0', 'missing1'),
(x_missing2, 'Missing f0 & f1', 'missing2'),
(x_missing3, 'Missing f0, f1 & f2', 'missing3')]:
# reset states
for z, l in zip(z0, m.layers()):
l.z = z.copy()
# run it
m.run(gain*s, t, 1/fs)
# show TFR
tf_detail(np.abs(g1.Z), t, g1.f, t_detail=np.max(t), x=s, title="Layer 1 - {}".format(name),
# vmin=0,
# vmax=0.7
)
# NOTE: tikz_save can't handle the color bar, so if you uncomment this, you'll need to go to the source code
# in pygrfnn.vis and comment out the line lines `cb = plt.colorbar(im, ax=axTF, cax=axCB)` and
# `cb.ax.yaxis.set_ticks_position('left')`
# tikz_save('{}/{}.tikz'.format(output_dir, filename), figureheight='3cm', figurewidth='4cm')
# FFT of the stimuli
from numpy.fft import fft
N = min(len(x_full), 2048)
x = np.real(x_full[:N])*np.hanning(N)
XF = np.abs(fft(x))
f = np.arange(0, fs, fs/N)
plt.semilogx(f[:N/2], XF[:N/2])
plt.xlim(100, 2000)
import librosa
fmin = 128
n_bins = 60 * 2
bins_per_octave=12 * 2
cqtF = librosa.cqt_frequencies(n_bins=n_bins, fmin=fmin, bins_per_octave=bins_per_octave)
for s in [x_full, x_missing1, x_missing2, x_missing3]:
C = librosa.cqt(np.real(s), sr=fs, fmin=fmin,
n_bins=n_bins, bins_per_octave=bins_per_octave)[:,-1]
plt.semilogx(cqtF, C)
print 'x,y'
# for x,y in zip(cqtF, C):
# print '{},{}'.format(x, y)
# print '\n\n'
from pygrfnn import GrFNN
from pygrfnn.oscillator import Zparam
from pygrfnn.network import Model, make_connections
from pygrfnn.vis import tf_detail, tf_simple, vector_field
from matplotlib2tikz import save as tikz_save
# supercritical
# params1 = Zparam(alpha=0.0001, beta1=-1, beta2=-1, epsilon=1.0) # this kind of works without 'w' and gain=0.4
params1 = Zparam(alpha=-0.00001, beta1=.25, beta2=-10, epsilon=0.95)
vector_field(alpha=params1.alpha,
beta1=params1.beta1,
beta2=params1.beta2,
delta1=params1.delta1, delta2=params1.delta2,
epsilon=params1.epsilon,
F=np.arange(0,0.8, 0.1))
plt.ylim(-.2,.9)
N_octaves = 4
freqs = (110, 110*2**N_octaves)
num_oscs = 64*N_octaves
stimulus_conn_type = 'allfreq'
gain = 0.5
# create model
g1 = GrFNN(params1,
z0 = 0,
frequency_range=freqs,
num_oscs=num_oscs,
stimulus_conn_type=stimulus_conn_type,
# w=100.0
)
m = Model()
m.add_layer(g1, input_channel=0)
# save initial states to use the same for every run
z0 = [g.z.copy() for g in m.layers()]
output_dir = '/Users/jorgeh/Documents/CCRMA/research/ismir2015/frameworkPaper/figs/missingFundamental'
mean_10ms = {}
for s, name, filename in [(x_full, 'Full', 'full'),
(x_missing1, 'Missing f0', 'missing1'),
(x_missing2, 'Missing f0 & f1', 'missing2'),
(x_missing3, 'Missing f0, f1 & f2', 'missing3')]:
# reset states
for z, l in zip(z0, m.layers()):
l.z = z.copy()
# run it
m.run(gain*s, t, 1/fs)
# show TFR
tf_detail(np.abs(g1.Z), t, g1.f, t_detail=np.max(t), x=s, title="Layer 1 - {}".format(name),
# vmin=0,
# vmax=0.7
)
M = int(20e-3*fs)
mean_10ms[name] = np.mean(np.abs(g1.Z[:,-M:]),1)
# NOTE: tikz_save can't handle the color bar, so if you uncomment this, you'll need to go to the source code
# in pygrfnn.vis and comment out the line lines `cb = plt.colorbar(im, ax=axTF, cax=axCB)` and
# `cb.ax.yaxis.set_ticks_position('left')`
# tikz_save('{}/{}.tikz'.format(output_dir, filename), figureheight='3cm', figurewidth='4cm')
for m in mean_10ms:
plt.figure()
plt.semilogx(g1.f, mean_10ms[m])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TADA ... wait, nothing happened.
Step2: How about some text on the image.
Step3: Once images are captured, let's try to get a 'color' from each square
Step4: Our task is to assign each square a 'color.' To do this, we try to group them (since the computer doesn't really know what 'color' is.
Step5: Now, how about red vs green vs blue (a 3d cross plot)?
|
<ASSISTANT_TASK:>
Python Code:
import os
from picamera import PiCamera
from picamera.color import Color
from time import sleep
camera = PiCamera()
# import a bunch of stuff that we'll use to manipulate our images...
import pandas as pd
from skimage.io import imread
from skimage import filters
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage import io
from skimage.measure import block_reduce
import numpy as np
from sklearn.cluster import KMeans
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
output_notebook()
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
camera.start_preview()
sleep(3)
camera.stop_preview()
camera.hflip = True
camera.vflip = True
camera.brightness = 50 # the default is 50, but you can set it to whatever.
camera.annotate_foreground = Color(1.0,1.0,0.5)
camera.resolution = (120, 120)
camera.annotate_text = ""
camera.annotate_text_size = 36
camera.start_preview()
sleep(2)
camera.capture('./img/cubeU.jpg')
camera.stop_preview()
camera.start_preview()
sleep(2)
camera.capture('./img/cubeR.jpg')
camera.stop_preview()
camera.start_preview()
sleep(2)
camera.capture('./img/cubeF.jpg')
camera.stop_preview()
camera.start_preview()
sleep(2)
camera.capture('./img/cubeD.jpg')
camera.stop_preview()
camera.start_preview()
sleep(2)
camera.capture('./img/cubeL.jpg')
camera.stop_preview()
camera.start_preview()
sleep(2)
camera.capture('./img/cubeB.jpg')
camera.stop_preview()
face_order = 'URFDLB'
face_images = {}
face_images_out = []
squares = {}
masks = {} # for QA
for face in face_order:
# read and cache images in dict
face_images[face] = (imread('./img/cube%s.jpg' % face))
img = face_images[face]
mask = np.empty(img.shape[:2], dtype=np.bool)
mask[::]=False
row_coords = [(25, 30), (50, 55), (75, 80)]
col_coords = [(38, 43), (65, 70), (95, 100)]
squares[face] = np.zeros((3, 3, 3))
row = 0
# extract average RGB values from approximate square centers
for i in row_coords:
col = 0
for j in col_coords:
mask[i[0]:i[1], j[0]:j[1]] = True
squares[face][row, col] = (np.average(img[i[0]:i[1], j[0]:j[1], 0]),
np.average(img[i[0]:i[1], j[0]:j[1], 1]),
np.average(img[i[0]:i[1], j[0]:j[1], 2]))
col+=1
row+=1
# to show last image alignment
masks[face] = mark_boundaries(img, mask)
fig_mask = plt.figure()
ax = fig_mask.add_subplot(1,1,1)
ax.imshow(masks['D'])
plt.axis("off")
# this basically puts all the average color values for each square into a big table, so we can do some math
square_list = []
for face in face_order:
for x in range(3):
for y in range(3):
r, g, b = squares[face][x,y]
square_list.append([face, x, y, r, g, b])
columns = ['face', 'x', 'y', 'r', 'g', 'b']
square_frame = pd.DataFrame(square_list, columns=columns)
square_frame.head(10)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(square_frame.r, square_frame.b, c='gray', marker='o')
ax.set_xlabel('red')
ax.set_ylabel('blue')
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(square_frame.r, square_frame.g, square_frame.b, c='gray', marker='o')
ax.set_xlabel('red')
ax.set_ylabel('green')
ax.set_zlabel('blue')
X = square_frame[['r', 'g', 'b']]
km = KMeans(n_clusters=6, random_state=123)
km.fit(X)
km.cluster_centers_
centers_list = []
center_rgb = square_frame[(square_frame['x']==1) & (square_frame['y']==1)]
print center_rgb
square_mapper = {}
center_map = km.predict(center_rgb[['r', 'g', 'b']])
for i in range(6):
square_mapper[center_map[i]] = face_order[i]
center_map
square_mapper
square_predict = km.predict(square_frame[['r', 'g', 'b']])
squares = []
for square in square_predict:
squares.append(square_mapper[square])
square_frame['predict'] = squares
square_color_map = {'U': 'black', 'R': 'red', 'F': 'green', 'D': 'yellow', 'L': 'orange', 'B': 'blue'}
square_colors = [square_color_map[sq] for sq in squares]
square_frame['colors'] = square_colors
square_frame[:15]
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(square_frame.r, square_frame.g, square_frame.b, c=square_frame.colors, marker='o')
ax.set_xlabel('red')
ax.set_ylabel('green')
ax.set_zlabel('blue')
import kociemba
kociemba.solve('URRDUUDRDRLBFRBBDUFBBBFLBRLLDUDDBDUFFLRRLUFFDUFLLBURFL')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: From a sample of the RMS Titanic data, we can see the various features present for each passenger on the ship
Step3: The very same sample of the RMS Titanic data now shows the Survived feature removed from the DataFrame. Note that data (the passenger data) and outcomes (the outcomes of survival) are now paired. That means for any passenger data.loc[i], they have the survival outcome outcomes[i].
Step5: Tip
Step6: Question 1
Step7: Answer
Step9: Examining the survival statistics, a large majority of males did not survive the ship sinking. However, a majority of females did survive the ship sinking. Let's build on our previous prediction
Step10: Question 2
Step11: Answer
Step13: Examining the survival statistics, the majority of males younger than 10 survived the ship sinking, whereas most males age 10 or older did not survive the ship sinking. Let's continue to build on our previous prediction
Step14: Question 3
Step15: Answer
Step17: After exploring the survival statistics visualization, fill in the missing code below so that the function will make your prediction.
Step18: Question 4
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the dataset
in_file = 'titanic_data.csv'
full_data = pd.read_csv(in_file)
# Print the first few entries of the RMS Titanic data
display(full_data.head())
# Store the 'Survived' feature in a new variable and remove it from the dataset
outcomes = full_data['Survived']
data = full_data.drop('Survived', axis = 1)
# Show the new dataset with 'Survived' removed
display(data.head())
def accuracy_score(truth, pred):
Returns accuracy score for input truth and predictions.
# Ensure that the number of predictions matches number of outcomes
if len(truth) == len(pred):
# Calculate and return the accuracy as a percent
return "Predictions have an accuracy of {:.2f}%.".format((truth == pred).mean()*100)
else:
return "Number of predictions does not match number of outcomes!"
# Test the 'accuracy_score' function
predictions = pd.Series(np.ones(5, dtype = int))
print accuracy_score(outcomes[:5], predictions)
def predictions_0(data):
Model with no features. Always predicts a passenger did not survive.
predictions = []
for _, passenger in data.iterrows():
# Predict the survival of 'passenger'
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_0(data)
print accuracy_score(outcomes, predictions)
vs.survival_stats(data, outcomes, 'Sex')
def predictions_1(data):
Model with one feature:
- Predict a passenger survived if they are female.
predictions = []
for _, passenger in data.iterrows():
if passenger['Sex']== 'female':
predictions.append(1)
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_1(data)
print accuracy_score(outcomes, predictions)
vs.survival_stats(data, outcomes, 'Age', ["Sex == 'male'"])
def predictions_2(data):
Model with two features:
- Predict a passenger survived if they are female.
- Predict a passenger survived if they are male and younger than 10.
predictions = []
for _, passenger in data.iterrows():
#If passenger was female predict survived
if passenger['Sex'] == 'female':
predictions.append(1)
#If the passenger was not female and younger than 10 predict survived
elif passenger['Age'] < 10:
predictions.append(1)
#otherwise predict passenger died
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_2(data)
print accuracy_score(outcomes, predictions)
vs.survival_stats(data, outcomes, 'Age', ["Sex == 'female'", "Pclass == 3"])
def predictions_3(data):
Model with multiple features. Makes a prediction with an accuracy of at least 80%.
predictions = []
for _, passenger in data.iterrows():
#If passenger was female predict survived
if passenger['Sex'] == 'female':
#If female was in 3rd class & above 20
if passenger['Pclass'] == 3 and passenger['Age'] > 20:
predictions.append(0)
#Else predict survived
else:
predictions.append(1)
elif passenger['Age'] < 10:
#If the passenger was not female, below 10 but in 3rd class predict died
if passenger['Pclass'] == 3:
predictions.append(0)
#Else predict survived
else:
predictions.append(1)
#otherwise predict passenger died
else:
predictions.append(0)
# Return our predictions
return pd.Series(predictions)
# Make the predictions
predictions = predictions_3(data)
print accuracy_score(outcomes, predictions)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Simulating 2D data for anomaly detection
Step3: Outlier detection algorithm
Step 1
Step4: Now lets plot the 'real' and 'synthetic' data to examine the properties of the 'synthetic' data
Step5: Now lets plot the marginal distributions of the 'real' and 'synthetic' data and make sure that they match for a given feature
Step7: Step 2
Step8: Lets plot the probability of an object, which is described by the coordiantes (Feature 1, Feature 2), to be classified as 'real' by the trained RF. This will give us a sense of the fitting that is done.
Step9: One can see that in the parameter range of Feature 1
Step11: In our case, we find that counting all the leafs, regardless of their prediction, is not optimal.
Step12: We defined the distance matrix to be 1 - the similarity matrix.
Step 4
Step13: We can see that for this dataset most of the objects have an average distance of 0.998 from each other, and outliers are objects with average distance that approaches 1.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
import numpy
import sklearn
from sklearn.preprocessing import Imputer
import matplotlib.pyplot as plt
mean = [50, 60]
cov = [[5,5],[100,200]]
x1,y1 = numpy.random.multivariate_normal(mean,cov,1000).T
mean = [65, 70]
cov = [[20,10],[2,10]]
x2,y2 = numpy.random.multivariate_normal(mean,cov,1000).T
# and additional noises
mean = [60, 60]
cov = [[100,0],[0,100]]
x3,y3 = numpy.random.multivariate_normal(mean,cov,200).T
# concatenate it all to a single vector
x_total = numpy.concatenate((x1, x2, x3))
y_total = numpy.concatenate((y1, y2, y3))
X = numpy.array([x_total, y_total]).T
# create object IDs that will be just integers
obj_ids = numpy.arange(len(x_total))
plt.rcParams['figure.figsize'] = 4, 4
plt.title("real data")
plt.plot(X[:, 0], X[:, 1], "ok", markersize=3)
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
def return_synthetic_data(X):
The function returns a matrix with the same dimensions as X but with synthetic data
based on the marginal distributions of its featues
features = len(X[0])
X_syn = numpy.zeros(X.shape)
for i in xrange(features):
obs_vec = X[:,i]
syn_vec = numpy.random.choice(obs_vec, len(obs_vec)) # here we chose the synthetic data to match the marginal distribution of the real data
X_syn[:,i] += syn_vec
return X_syn
X_syn = return_synthetic_data(X)
plt.rcParams['figure.figsize'] = 8, 4
plt.subplot(1, 2, 1)
plt.title("real data")
plt.plot(X[:, 0], X[:, 1], "ok", markersize=3)
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.subplot(1, 2, 2)
plt.title("synthetic data")
plt.plot(X_syn[:, 0], X_syn[:, 1], "og", markersize=3)
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.tight_layout()
plt.rcParams['figure.figsize'] = 8, 6
plt.subplot(2, 2, 1)
plt.title("real data")
tmp = plt.hist(X[:, 0], bins=numpy.linspace(0, 140, 20), color="k", label="Feature 1")
plt.legend(loc="best")
plt.xlabel("Feature 1")
plt.ylabel("N")
plt.subplot(2, 2, 2)
plt.title("real data")
tmp = plt.hist(X[:, 1], bins=numpy.linspace(0, 140, 20), color="k", label="Feature 2")
plt.legend(loc="best")
plt.xlabel("Feature 2")
plt.ylabel("N")
plt.subplot(2, 2, 3)
plt.title("synthetic data")
tmp = plt.hist(X_syn[:, 0], bins=numpy.linspace(0, 140, 20), color="g", label="Feature 1")
plt.legend(loc="best")
plt.xlabel("Feature 1")
plt.ylabel("N")
plt.subplot(2, 2, 4)
plt.title("synthetic data")
tmp = plt.hist(X_syn[:, 1], bins=numpy.linspace(0, 140, 20), color="g", label="Feature 2")
plt.legend(loc="best")
plt.xlabel("Feature 2")
plt.ylabel("N")
plt.tight_layout()
def merge_work_and_synthetic_samples(X, X_syn):
The function merges the data into one sample, giving the label "1" to the real data and label "2" to the synthetic data
# build the labels vector
Y = numpy.ones(len(X))
Y_syn = numpy.ones(len(X_syn)) * 2
Y_total = numpy.concatenate((Y, Y_syn))
X_total = numpy.concatenate((X, X_syn))
return X_total, Y_total
X_total, Y_total = merge_work_and_synthetic_samples(X, X_syn)
# declare an RF
N_TRAIN = 500 # number of trees in the forest
rand_f = sklearn.ensemble.RandomForestClassifier(n_estimators=N_TRAIN)
rand_f.fit(X_total, Y_total)
xx, yy = numpy.meshgrid(numpy.linspace(0, 140, 100), numpy.linspace(0, 140, 100))
Z = rand_f.predict_proba(numpy.c_[xx.ravel(), yy.ravel()])[:, 0]
Z = Z.reshape(xx.shape)
plt.rcParams['figure.figsize'] = 6, 6
plt.pcolormesh(xx, yy, Z, cmap='viridis')
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.colorbar()
plt.title("Probability map for Class I (real)")
plt.xticks((0, 20, 40, 60, 80, 100, 120))
plt.yticks((0, 20, 40, 60, 80, 100, 120))
plt.xlim(0, 140)
plt.ylim(0, 140)
plt.tight_layout()
import io
import base64
from IPython.display import HTML
video = io.open('rf_unsup_example.m4v', 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
def build_similarity_matrix(rand_f, X):
The function builds the similarity matrix based on the feature matrix X for the results Y
based on the random forest we've trained
the matrix is normalised so that the biggest similarity is 1 and the lowest is 0
This function counts only leaves in which the object is classified as a "real" object
it is also implemented to optimize running time, asumming one has enough running memory
# apply to get the leaf indices
apply_mat = rand_f.apply(X)
# find the predictions of the sample
is_good_matrix = numpy.zeros(apply_mat.shape)
for i, est in enumerate(rand_f.estimators_):
d = est.predict_proba(X)[:, 0] == 1
is_good_matrix[:, i] = d
# mark leaves that make the wrong prediction as -1, in order to remove them from the distance measurement
apply_mat[is_good_matrix == False] = -1
# now calculate the similarity matrix
sim_mat = numpy.sum((apply_mat[:, None] == apply_mat[None, :]) & (apply_mat[:, None] != -1) & (apply_mat[None, :] != -1), axis=2) / numpy.asfarray(numpy.sum([apply_mat != -1], axis=2), dtype='float')
return sim_mat
sim_mat = build_similarity_matrix(rand_f, X)
dis_mat = 1 - sim_mat
sum_vec = numpy.sum(dis_mat, axis=1)
sum_vec /= float(len(sum_vec))
plt.rcParams['figure.figsize'] = 6, 4
plt.title("Weirdness score histogram")
tmp = plt.hist(sum_vec, bins=60, color="g")
plt.ylabel("N")
plt.xlabel("weirdness score")
N_outliers = 200
sum_vec_outliers = numpy.sort(sum_vec)[::-1][:N_outliers]
obj_ids_outliers = obj_ids[numpy.argsort(sum_vec)][::-1][:N_outliers]
plt.rcParams['figure.figsize'] = 5, 5
plt.title("Data and outliers")
plt.plot(X[:,0], X[:,1], "ok", label="input daya", markersize=4)
plt.plot(X[obj_ids_outliers, 0], X[obj_ids_outliers, 1], "om", label="outliers", markersize=4)
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.legend(loc="best")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: PyStan
Step2: Fit model
Step3: MAP
Step4: MCMC
Step5: Loading from a file
Step6: We can convert to a DataFrame if necessary
Step8: Estimating mean and standard deviation of normal distribution
Step9: Optimization (finding MAP)
Step10: Reusing fitted objects
Step13: Saving compiled models
Step15: Estimating parameters of a linear regression model
Step17: Simple Logistic model
|
<ASSISTANT_TASK:>
Python Code:
coin_code =
data {
int<lower=0> n; // number of tosses
int<lower=0> y; // number of heads
}
transformed data {}
parameters {
real<lower=0, upper=1> p;
}
transformed parameters {}
model {
p ~ beta(2, 2);
y ~ binomial(n, p);
}
generated quantities {}
coin_dat = {
'n': 100,
'y': 61,
}
sm = pystan.StanModel(model_code=coin_code)
op = sm.optimizing(data=coin_dat)
op
fit = sm.sampling(data=coin_dat)
print(fit)
coin_dict = fit.extract()
coin_dict.keys()
# lp_ is the log posterior
df = pd.DataFrame(coin_dict)
df.head(3)
fit.plot('p');
plt.tight_layout()
norm_code =
data {
int<lower=0> n;
real y[n];
}
transformed data {}
parameters {
real<lower=0, upper=100> mu;
real<lower=0, upper=10> sigma;
}
transformed parameters {}
model {
y ~ normal(mu, sigma);
}
generated quantities {}
norm_dat = {
'n': 100,
'y': np.random.normal(10, 2, 100),
}
fit = pystan.stan(model_code=norm_code, data=norm_dat, iter=1000, chains=1)
fit
trace = fit.extract()
plt.figure(figsize=(10,4))
plt.subplot(1,2,1);
plt.hist(trace['mu'][:], 25, histtype='step');
plt.subplot(1,2,2);
plt.hist(trace['sigma'][:], 25, histtype='step');
sm = pystan.StanModel(model_code=norm_code)
op = sm.optimizing(data=norm_dat)
op
new_dat = {
'n': 100,
'y': np.random.normal(10, 2, 100),
}
fit2 = pystan.stan(fit=fit, data=new_dat, chains=1)
fit2
def save(obj, filename):
Save compiled models for reuse.
import pickle
with open(filename, 'wb') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def load(filename):
Reload compiled models for reuse.
import pickle
return pickle.load(open(filename, 'rb'))
model = pystan.StanModel(model_code=norm_code)
save(model, 'norm_model.pic')
new_model = load('norm_model.pic')
fit4 = new_model.sampling(new_dat, chains=1)
fit4
lin_reg_code =
data {
int<lower=0> n;
real x[n];
real y[n];
}
transformed data {}
parameters {
real a;
real b;
real sigma;
}
transformed parameters {
real mu[n];
for (i in 1:n) {
mu[i] <- a*x[i] + b;
}
}
model {
sigma ~ uniform(0, 20);
y ~ normal(mu, sigma);
}
generated quantities {}
n = 11
_a = 6
_b = 2
x = np.linspace(0, 1, n)
y = _a*x + _b + np.random.randn(n)
lin_reg_dat = {
'n': n,
'x': x,
'y': y
}
fit = pystan.stan(model_code=lin_reg_code, data=lin_reg_dat, iter=1000, chains=1)
fit
fit.plot(['a', 'b']);
plt.tight_layout()
# observed data
df = pd.read_csv('HtWt.csv')
df.head()
log_reg_code =
data {
int<lower=0> n;
int male[n];
real weight[n];
real height[n];
}
transformed data {}
parameters {
real a;
real b;
real c;
}
transformed parameters {}
model {
a ~ normal(0, 10);
b ~ normal(0, 10);
c ~ normal(0, 10);
for(i in 1:n) {
male[i] ~ bernoulli(inv_logit(a*weight[i] + b*height[i] + c));
}
}
generated quantities {}
log_reg_dat = {
'n': len(df),
'male': df.male,
'height': df.height,
'weight': df.weight
}
fit = pystan.stan(model_code=log_reg_code, data=log_reg_dat, iter=1000, chains=4)
fit
df_trace = pd.DataFrame(fit.extract(['c', 'b', 'a']))
pd.scatter_matrix(df_trace[:], diagonal='kde');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: All the different models in scikit-learn follow a consistent structure.
Step2: Although this single number might seem unimpressive, metrics are a key component for model evaluation. As a simple example, we can perform a permutation test to determine whether we might see this performance by chance.
Step3: Training, validation, and test datasets
Step4: Both these models appear to give perfect solutions but all they do is map our test samples back to the training samples and return the associated value.
Step5: Model types
Step6: There is an expanded example in the documentation.
Step7: Exercises
Step8: Choose a metric
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
from sklearn import datasets
diabetes = datasets.load_diabetes()
# Description at http://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
X = diabetes.data
y = diabetes.target
print(X.shape, y.shape)
from sklearn import linear_model
clf = linear_model.LinearRegression()
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = linear_model.LinearRegression()
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
from sklearn import metrics
metrics.mean_squared_error(y, clf.predict(X))
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = linear_model.LinearRegression()
clf.fit(X, y)
error = metrics.mean_squared_error(y, clf.predict(X))
rounds = 1000
np.random.seed(0)
errors = []
for i in range(rounds):
y_shuffle = y.copy()
np.random.shuffle(y_shuffle)
clf_shuffle = linear_model.LinearRegression()
clf_shuffle.fit(X, y_shuffle)
errors.append(metrics.mean_squared_error(y_shuffle, clf_shuffle.predict(X)))
better_models_by_chance = len([i for i in errors if i <= error])
if better_models_by_chance > 0:
print('Probability of observing a mean_squared_error of {0} by chance is {1}'.format(error,
better_models_by_chance / rounds))
else:
print('Probability of observing a mean_squared_error of {0} by chance is <{1}'.format(error,
1 / rounds))
from sklearn import tree
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = tree.DecisionTreeRegressor()
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
metrics.mean_squared_error(y, clf.predict(X))
from sklearn import neighbors
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
clf = neighbors.KNeighborsRegressor(n_neighbors=1)
clf.fit(X, y)
plt.plot(y, clf.predict(X), 'k.')
plt.show()
metrics.mean_squared_error(y, clf.predict(X))
from sklearn import neighbors
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, X_test.shape)
clf = neighbors.KNeighborsRegressor(1)
clf.fit(X_train, y_train)
plt.plot(y_test, clf.predict(X_test), 'k.')
plt.show()
metrics.mean_squared_error(y_test, clf.predict(X_test))
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, X_test.shape)
clf = linear_model.LinearRegression()
clf.fit(X_train, y_train)
plt.plot(y_test, clf.predict(X_test), 'k.')
plt.show()
metrics.mean_squared_error(y_test, clf.predict(X_test))
from sklearn import datasets
diabetes = datasets.load_diabetes()
# Description at http://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
X = diabetes.data
y = diabetes.target
print(X.shape, y.shape)
from sklearn import linear_model
clf = linear_model.LassoCV(cv=20)
clf.fit(X, y)
print('Alpha chosen was ', clf.alpha_)
plt.plot(y, clf.predict(X), 'k.')
from sklearn import grid_search
from sklearn import neighbors
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
np.random.seed(0)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
y_train = y[split]
X_test = X[np.logical_not(split)]
y_test = y[np.logical_not(split)]
print(X_train.shape, X_test.shape)
knn = neighbors.KNeighborsRegressor()
parameters = {'n_neighbors':[1,2,3,4,5,6,7,8,9,10]}
clf = grid_search.GridSearchCV(knn, parameters)
clf.fit(X_train, y_train)
plt.plot(y_test, clf.predict(X_test), 'k.')
plt.show()
print(metrics.mean_squared_error(y_test, clf.predict(X_test)))
clf.get_params()
from sklearn import datasets, metrics, ensemble, cross_validation
import numpy as np
np.random.seed(0)
digits = datasets.load_digits()
X = digits.data
y = digits.target
print(X.shape, y.shape)
split = np.random.random(y.shape) > 0.3
X_train = X[split]
X_test = X[np.logical_not(split)]
y_train = y[split]
y_test = y[np.logical_not(split)]
scores = []
cv = 10
for _ in range(cv):
split = np.random.random(y_train.shape) > 1/cv
X_train_train = X_train[split]
y_train_train = y_train[split]
X_val = X_train[np.logical_not(split)]
y_val = y_train[np.logical_not(split)]
rfc = ensemble.RandomForestClassifier(n_estimators=100)
rfc.fit(X_train_train, y_train_train)
scores.append(metrics.accuracy_score(y_val, rfc.predict(X_val)))
print(scores, np.array(scores).mean())
# use cv method from sklearn
rfc = ensemble.RandomForestClassifier(n_estimators=100)
scores = cross_validation.cross_val_score(rfc,
digits.data,
digits.target,
cv=10)
print(scores)
# support vector machines
from sklearn import svm
from sklearn import grid_search
clf = svm.SVC()
parameters = {'C': [1, 0.1, 0.001, 0.0001, 0.00001],
'kernel':['linear', 'poly', 'rbf', 'sigmoid']}
clf = grid_search.GridSearchCV(clf, parameters)
clf.fit(X_train, y_train)
metrics.accuracy_score(y_test, clf.predict(X_test))
metrics.confusion_matrix(y_test, clf.predict(X_test))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import scipy.stats
a = np.random.randn(40)
b = 4*np.random.randn(50)
_, p_value = scipy.stats.ttest_ind(a, b, equal_var = False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Recent matlab files are just hdf5, which we can get from pytables
Step2: Grab the filtered water levels on the grid in FRF coordinates
Step3: data file has nan's where it is unreliable, find the index for about 105m offshore
|
<ASSISTANT_TASK:>
Python Code:
%ls lidar
import tables
lineData = tables.openFile(r"lidar/20150927-0000-01.FRFNProp.line.data.mat","r")
#science = tables.openFile(r"lidar/20150927-0000-01.FRFNProp.line.science.mat","r")
for f in lineData.root:
for g in f:
print g
z = lineData.root.lineGriddedFilteredData.waterGridFiltered[:]
x = lineData.root.lineCoredat.downLineX[:]
%matplotlib notebook
from matplotlib import pyplot as plt
fig = plt.figure()
plt.plot(x,z[:,5000])
import numpy as np
xfinite = x[:]
xfinite[np.isnan(x)] = -1000.0
i105 = np.where(xfinite > 105.0)[0][0]
print i105
fig = plt.figure()
plt.plot(lineData.root.lineCoredat.tGPSVector[:500],z[i105,:500])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set a valid date frame for building the network.
Step2: Filter data according to date frame and export to .gexf file
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from bigbang.archive import Archive
from bigbang.archive import load as load_archive
import bigbang.parse as parse
import bigbang.analysis.graph as graph
import bigbang.ingress.mailman as mailman
import bigbang.analysis.process as process
import networkx as nx
import matplotlib.pyplot as plt
import pandas as pd
from pprint import pprint as pp
import pytz
import os
#Insert a list of archive names
archives_names = ["6lo"]
cwd = os.getcwd()
archives_paths = list()
for archive_name in archives_names:
archives_paths.append('../../archives/'+archive_name+'.csv')
archives_list = [load_archive(archive_path).data for archive_path in archives_paths]
archives = Archive(pd.concat(archives_list))
archives_data = archives.data
#The oldest date and more recent date for the whole mailing lists are displayed, so you WON't set an invalid time frame
print(archives_data['Date'].min())
print(archives_data['Date'].max())
#set the date frame
date_from = pd.datetime(2000,11,1,tzinfo=pytz.utc)
date_to = pd.datetime(2111,12,1,tzinfo=pytz.utc)
def filter_by_date(df,d_from,d_to):
return df[(df['Date'] > d_from) & (df['Date'] < d_to)]
#create filtered network
archives_data_filtered = filter_by_date(archives_data, date_from, date_to)
network = graph.messages_to_interaction_graph(archives_data_filtered)
#export the network in a format that you can open in Gephi.
#insert a file name
file_name = 'architecture_discuss_for_gephi.gexf'
network = nx.write_gexf(network, cwd+file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Make a list of files to mosaic using glob.glob, and print the result. In this example, we are selecting all files ending with _aspect.tif in the folder TEAK_Aspect_Tiles. Note that you will need to change this filepath according to your local machine.
Step2: In order to run the gdal_merge function, we need these files as a series of strings. We can get them in the correct format using join
Step3: Now that we have the list of files we want to mosaic, we can run a system command to combine them into one raster.
Step4: Great! It looks like GDAL merged the files together into the TEAK_Aspect_Mosaic.tif file. Worth pointing out here is that gdal_merge function has a LOT of options and is extremely powerful and flexible. We suggest that you <a href="https
Step5: We can call this function as follows
Step6: Look at the size of the mosaicked tile using .shape. Since we created a mosaic of four 1000m x 1000m tiles, we expect the new tile to be 2000m x 2000m
Step7: Let's take a look at the contents of the metadata dictionary
Step8: Load the function plot_array to plot the array
Step9: Finally, let's take a look at a plot of the tile mosaic
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import os, glob
from osgeo import gdal
files_to_mosaic = glob.glob('/Users/olearyd/Git/data/TEAK_Aspect_Tiles/*_aspect.tif')
files_to_mosaic
files_string = " ".join(files_to_mosaic)
print(files_string)
command = "gdal_merge.py -o /Users/olearyd/Git/data/TEAK_Aspect_Tiles/TEAK_Aspect_Mosaic.tif -of gtiff " + files_string
print(os.popen(command).read())
print(os.popen('ls /Users/olearyd/Git/data/TEAK_Aspect_Tiles/').read())
def raster2array(geotif_file):
metadata = {}
dataset = gdal.Open(geotif_file)
metadata['array_rows'] = dataset.RasterYSize
metadata['array_cols'] = dataset.RasterXSize
metadata['bands'] = dataset.RasterCount
metadata['driver'] = dataset.GetDriver().LongName
metadata['projection'] = dataset.GetProjection()
metadata['geotransform'] = dataset.GetGeoTransform()
mapinfo = dataset.GetGeoTransform()
metadata['pixelWidth'] = mapinfo[1]
metadata['pixelHeight'] = mapinfo[5]
xMin = mapinfo[0]
xMax = mapinfo[0] + dataset.RasterXSize/mapinfo[1]
yMin = mapinfo[3] + dataset.RasterYSize/mapinfo[5]
yMax = mapinfo[3]
metadata['extent'] = (xMin,xMax,yMin,yMax)
raster = dataset.GetRasterBand(1)
array_shape = raster.ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(np.float).shape
metadata['noDataValue'] = raster.GetNoDataValue()
metadata['scaleFactor'] = raster.GetScale()
array = np.zeros((array_shape[0],array_shape[1],dataset.RasterCount),'uint8') #pre-allocate stackedArray matrix
if metadata['bands'] == 1:
raster = dataset.GetRasterBand(1)
metadata['noDataValue'] = raster.GetNoDataValue()
metadata['scaleFactor'] = raster.GetScale()
array = dataset.GetRasterBand(1).ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(np.float)
#array[np.where(array==metadata['noDataValue'])]=np.nan
array = array/metadata['scaleFactor']
elif metadata['bands'] > 1:
for i in range(1, dataset.RasterCount+1):
band = dataset.GetRasterBand(i).ReadAsArray(0,0,metadata['array_cols'],metadata['array_rows']).astype(np.float)
#band[np.where(band==metadata['noDataValue'])]=np.nan
band = band/metadata['scaleFactor']
array[...,i-1] = band
return array, metadata
TEAK_aspect_array, TEAK_aspect_metadata = raster2array('/Users/olearyd/Git/Data/TEAK_Aspect_Tiles/TEAK_Aspect_Mosaic.tif')
TEAK_aspect_array.shape
#print metadata in alphabetical order
for item in sorted(TEAK_aspect_metadata):
print(item + ':', TEAK_aspect_metadata[item])
def plot_array(array,spatial_extent,colorlimit,ax=plt.gca(),title='',cmap_title='',colormap=''):
plot = plt.imshow(array,extent=spatial_extent,clim=colorlimit);
cbar = plt.colorbar(plot,aspect=40); plt.set_cmap(colormap);
cbar.set_label(cmap_title,rotation=90,labelpad=20);
plt.title(title); ax = plt.gca();
ax.ticklabel_format(useOffset=False, style='plain');
rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90);
plot_array(TEAK_aspect_array,
TEAK_aspect_metadata['extent'],
(0,360),
title='TEAK Aspect',
cmap_title='Aspect, degrees',
colormap='jet')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Prediction on toy data
Step3: Generating Kernel weights
Step4: Binary classification using MKL
Step5: To justify the weights, let's train and compare two subkernels with the MKL classification output. Training MKL classifier with a single kernel appended to a combined kernel makes no sense and is just like normal single kernel based classification, but let's do it for comparison.
Step6: As we can see the multiple kernel output seems just about right. Kernel 1 gives a sort of overfitting output while the kernel 2 seems not so accurate. The kernel weights are hence so adjusted to get a refined output. We can have a look at the errors by these subkernels to have more food for thought. Most of the time, the MKL error is lesser as it incorporates aspects of both kernels. One of them is strict while other is lenient, MKL finds a balance between those.
Step7: MKL for knowledge discovery
Step8: These are the type of circles we want to distinguish between. We can try classification with a constant separation between the circles first.
Step9: As we can see the MKL classifier classifies them as expected. Now let's vary the separation and see how it affects the weights.The choice of the kernel width of the Gaussian kernel used for classification is expected to depend on the separation distance of the learning problem. An increased distance between the circles will correspond to a larger optimal kernel width. This effect should be visible in the results of the MKL, where we used MKL-SVMs with four kernels with different widths (1,5,7,10).
Step10: In the above plot we see the kernel weightings obtained for the four kernels. Every line shows one weighting. The courses of the kernel weightings reflect the development of the learning problem
Step11: Let's plot five of the examples to get a feel of the dataset.
Step12: We combine a Gaussian kernel and a PolyKernel. To test, examples not included in training data are used.
Step13: The misclassified examples are surely pretty tough to predict. As seen from the accuracy MKL seems to work a shade better in the case. One could try this out with more and different types of kernels too.
Step14: Now that everything is initialized, let's see MKLOneclass in action by applying it on the test data and on the X-Y grid.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
%matplotlib inline
# import all shogun classes
from modshogun import *
kernel = CombinedKernel()
num=30;
num_components=4
means=zeros((num_components, 2))
means[0]=[-1,1]
means[1]=[2,-1.5]
means[2]=[-1,-3]
means[3]=[2,1]
covs=array([[1.0,0.0],[0.0,1.0]])
gmm=GMM(num_components)
[gmm.set_nth_mean(means[i], i) for i in range(num_components)]
[gmm.set_nth_cov(covs,i) for i in range(num_components)]
gmm.set_coef(array([1.0,0.0,0.0,0.0]))
xntr=array([gmm.sample() for i in xrange(num)]).T
xnte=array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(array([0.0,1.0,0.0,0.0]))
xntr1=array([gmm.sample() for i in xrange(num)]).T
xnte1=array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(array([0.0,0.0,1.0,0.0]))
xptr=array([gmm.sample() for i in xrange(num)]).T
xpte=array([gmm.sample() for i in xrange(5000)]).T
gmm.set_coef(array([0.0,0.0,0.0,1.0]))
xptr1=array([gmm.sample() for i in xrange(num)]).T
xpte1=array([gmm.sample() for i in xrange(5000)]).T
traindata=concatenate((xntr,xntr1,xptr,xptr1), axis=1)
trainlab=concatenate((-ones(2*num), ones(2*num)))
testdata=concatenate((xnte,xnte1,xpte,xpte1), axis=1)
testlab=concatenate((-ones(10000), ones(10000)))
#convert to shogun features and generate labels for data
feats_train=RealFeatures(traindata)
labels=BinaryLabels(trainlab)
_=jet()
figure(figsize(18,5))
subplot(121)
# plot train data
_=scatter(traindata[0,:], traindata[1,:], c=trainlab, s=100)
title('Toy data for classification')
axis('equal')
colors=["blue","blue","red","red"]
# a tool for visualisation
from matplotlib.patches import Ellipse
def get_gaussian_ellipse_artist(mean, cov, nstd=1.96, color="red", linewidth=3):
vals, vecs = eigh(cov)
order = vals.argsort()[::-1]
vals, vecs = vals[order], vecs[:, order]
theta = numpy.degrees(arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * sqrt(vals)
e = Ellipse(xy=mean, width=width, height=height, angle=theta, \
edgecolor=color, fill=False, linewidth=linewidth)
return e
for i in range(num_components):
gca().add_artist(get_gaussian_ellipse_artist(means[i], covs, color=colors[i]))
width0=0.5
kernel0=GaussianKernel(feats_train, feats_train, width0)
width1=25
kernel1=GaussianKernel(feats_train, feats_train, width1)
#combine kernels
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.init(feats_train, feats_train)
mkl = MKLClassification()
#set the norm, weights sum to 1.
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(labels)
#train to get weights
mkl.train()
w=kernel.get_subkernel_weights()
print w
size=100
x1=linspace(-5, 5, size)
x2=linspace(-5, 5, size)
x, y=meshgrid(x1, x2)
#Generate X-Y grid test data
grid=RealFeatures(array((ravel(x), ravel(y))))
kernel0t=GaussianKernel(feats_train, grid, width0)
kernel1t=GaussianKernel(feats_train, grid, width1)
kernelt=CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
#initailize with test grid
kernelt.init(feats_train, grid)
mkl.set_kernel(kernelt)
#prediction
grid_out=mkl.apply()
z=grid_out.get_values().reshape((size, size))
figure(figsize=(10,5))
title("Classification using MKL")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
z=grid_out.get_labels().reshape((size, size))
# MKL
figure(figsize=(20,5))
subplot(131, title="Multiple Kernels combined")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
comb_ker0=CombinedKernel()
comb_ker0.append_kernel(kernel0)
comb_ker0.init(feats_train, feats_train)
mkl.set_kernel(comb_ker0)
mkl.train()
comb_ker0t=CombinedKernel()
comb_ker0t.append_kernel(kernel0)
comb_ker0t.init(feats_train, grid)
mkl.set_kernel(comb_ker0t)
out0=mkl.apply()
# subkernel 1
z=out0.get_labels().reshape((size, size))
subplot(132, title="Kernel 1")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
comb_ker1=CombinedKernel()
comb_ker1.append_kernel(kernel1)
comb_ker1.init(feats_train, feats_train)
mkl.set_kernel(comb_ker1)
mkl.train()
comb_ker1t=CombinedKernel()
comb_ker1t.append_kernel(kernel1)
comb_ker1t.init(feats_train, grid)
mkl.set_kernel(comb_ker1t)
out1=mkl.apply()
# subkernel 2
z=out1.get_labels().reshape((size, size))
subplot(133, title="kernel 2")
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
kernelt.init(feats_train, RealFeatures(testdata))
mkl.set_kernel(kernelt)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print "Test error is %2.2f%% :MKL" % (100*evaluator.evaluate(out,BinaryLabels(testlab)))
comb_ker0t.init(feats_train,RealFeatures(testdata))
mkl.set_kernel(comb_ker0t)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print "Test error is %2.2f%% :Subkernel1"% (100*evaluator.evaluate(out,BinaryLabels(testlab)))
comb_ker1t.init(feats_train, RealFeatures(testdata))
mkl.set_kernel(comb_ker1t)
out=mkl.apply()
evaluator=ErrorRateMeasure()
print "Test error is %2.2f%% :subkernel2" % (100*evaluator.evaluate(out,BinaryLabels(testlab)))
def circle(x, radius, neg):
y=sqrt(square(radius)-square(x))
if neg:
return[x, -y]
else:
return [x,y]
def get_circle(radius):
neg=False
range0=linspace(-radius,radius,100)
pos_a=array([circle(i, radius, neg) for i in range0]).T
neg=True
neg_a=array([circle(i, radius, neg) for i in range0]).T
c=concatenate((neg_a,pos_a), axis=1)
return c
def get_data(r1, r2):
c1=get_circle(r1)
c2=get_circle(r2)
c=concatenate((c1, c2), axis=1)
feats_tr=RealFeatures(c)
return c, feats_tr
l=concatenate((-ones(200),ones(200)))
lab=BinaryLabels(l)
#get two circles with radius 2 and 4
c, feats_tr=get_data(2,4)
c1, feats_tr1=get_data(2,3)
_=gray()
figure(figsize=(10,5))
subplot(121)
title("Circles with different separation")
p=scatter(c[0,:], c[1,:], c=lab)
subplot(122)
q=scatter(c1[0,:], c1[1,:], c=lab)
def train_mkl(circles, feats_tr):
#Four kernels with different widths
kernel0=GaussianKernel(feats_tr, feats_tr, 1)
kernel1=GaussianKernel(feats_tr, feats_tr, 5)
kernel2=GaussianKernel(feats_tr, feats_tr, 7)
kernel3=GaussianKernel(feats_tr, feats_tr, 10)
kernel = CombinedKernel()
kernel.append_kernel(kernel0)
kernel.append_kernel(kernel1)
kernel.append_kernel(kernel2)
kernel.append_kernel(kernel3)
kernel.init(feats_tr, feats_tr)
mkl = MKLClassification()
mkl.set_mkl_norm(1)
mkl.set_C(1, 1)
mkl.set_kernel(kernel)
mkl.set_labels(lab)
mkl.train()
w=kernel.get_subkernel_weights()
return w, mkl
def test_mkl(mkl, grid):
kernel0t=GaussianKernel(feats_tr, grid, 1)
kernel1t=GaussianKernel(feats_tr, grid, 5)
kernel2t=GaussianKernel(feats_tr, grid, 7)
kernel3t=GaussianKernel(feats_tr, grid, 10)
kernelt = CombinedKernel()
kernelt.append_kernel(kernel0t)
kernelt.append_kernel(kernel1t)
kernelt.append_kernel(kernel2t)
kernelt.append_kernel(kernel3t)
kernelt.init(feats_tr, grid)
mkl.set_kernel(kernelt)
out=mkl.apply()
return out
size=50
x1=linspace(-10, 10, size)
x2=linspace(-10, 10, size)
x, y=meshgrid(x1, x2)
grid=RealFeatures(array((ravel(x), ravel(y))))
w, mkl=train_mkl(c, feats_tr)
print w
out=test_mkl(mkl,grid)
z=out.get_values().reshape((size, size))
figure(figsize=(5,5))
c=pcolor(x, y, z)
_=contour(x, y, z, linewidths=1, colors='black', hold=True)
title('classification with constant separation')
_=colorbar(c)
range1=linspace(5.5,7.5,50)
x=linspace(1.5,3.5,50)
temp=[]
for i in range1:
#vary separation between circles
c, feats=get_data(4,i)
w, mkl=train_mkl(c, feats)
temp.append(w)
y=array([temp[i] for i in range(0,50)]).T
figure(figsize=(20,5))
_=plot(x, y[0,:], color='k', linewidth=2)
_=plot(x, y[1,:], color='r', linewidth=2)
_=plot(x, y[2,:], color='g', linewidth=2)
_=plot(x, y[3,:], color='y', linewidth=2)
title("Comparison between kernel widths and weights")
ylabel("Weight")
xlabel("Distance between circles")
_=legend(["1","5","7","10"])
from scipy.io import loadmat, savemat
from os import path, sep
mat = loadmat(sep.join(['..','..','..','data','multiclass', 'usps.mat']))
Xall = mat['data']
Yall = array(mat['label'].squeeze(), dtype=double)
# map from 1..10 to 0..9, since shogun
# requires multiclass labels to be
# 0, 1, ..., K-1
Yall = Yall - 1
random.seed(0)
subset = random.permutation(len(Yall))
#get first 1000 examples
Xtrain = Xall[:, subset[:1000]]
Ytrain = Yall[subset[:1000]]
Nsplit = 2
all_ks = range(1, 21)
print Xall.shape
print Xtrain.shape
def plot_example(dat, lab):
for i in xrange(5):
ax=subplot(1,5,i+1)
title(int(lab[i]))
ax.imshow(dat[:,i].reshape((16,16)), interpolation='nearest')
ax.set_xticks([])
ax.set_yticks([])
_=figure(figsize=(17,6))
gray()
plot_example(Xtrain, Ytrain)
# MKL training and output
labels = MulticlassLabels(Ytrain)
feats = RealFeatures(Xtrain)
#get test data from 5500 onwards
Xrem=Xall[:,subset[5500:]]
Yrem=Yall[subset[5500:]]
#test features not used in training
feats_rem=RealFeatures(Xrem)
labels_rem=MulticlassLabels(Yrem)
kernel = CombinedKernel()
feats_train = CombinedFeatures()
feats_test = CombinedFeatures()
#append gaussian kernel
subkernel = GaussianKernel(10,15)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = RealFeatures(Xtrain)
subkernel = PolyKernel(10,2)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_rem)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = MKLMulticlass(1.2, kernel, labels)
mkl.set_epsilon(1e-2)
mkl.set_mkl_epsilon(0.001)
mkl.set_mkl_norm(1)
mkl.train()
#initialize with test features
kernel.init(feats_train, feats_test)
out = mkl.apply()
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print "Accuracy = %2.2f%%" % (100*accuracy)
idx=where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
w=kernel.get_subkernel_weights()
print w
# Single kernel:PolyKernel
C=1
pk=PolyKernel(10,2)
svm=GMNPSVM(C, pk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print "Accuracy = %2.2f%%" % (100*accuracy)
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
#Single Kernel:Gaussian kernel
width=15
C=1
gk=GaussianKernel()
gk.set_width(width)
svm=GMNPSVM(C, gk, labels)
_=svm.train(feats)
out=svm.apply(feats_rem)
evaluator = MulticlassAccuracy()
accuracy = evaluator.evaluate(out, labels_rem)
print "Accuracy = %2.2f%%" % (100*accuracy)
idx=np.where(out.get_labels() != Yrem)[0]
Xbad=Xrem[:,idx]
Ybad=Yrem[idx]
_=figure(figsize=(17,6))
gray()
plot_example(Xbad, Ybad)
X = -0.3 * random.randn(100,2)
traindata=r_[X + 2, X - 2].T
X = -0.3 * random.randn(20, 2)
testdata = r_[X + 2, X - 2].T
trainlab=concatenate((ones(99),-ones(1)))
#convert to shogun features and generate labels for data
feats=RealFeatures(traindata)
labels=BinaryLabels(trainlab)
xx, yy = meshgrid(linspace(-5, 5, 500), linspace(-5, 5, 500))
grid=RealFeatures(array((ravel(xx), ravel(yy))))
#test features
feats_t=RealFeatures(testdata)
x_out=(random.uniform(low=-4, high=4, size=(20, 2))).T
feats_out=RealFeatures(x_out)
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=CombinedFeatures()
feats_test_out=CombinedFeatures()
feats_grid=CombinedFeatures()
#append gaussian kernel
subkernel=GaussianKernel(10,8)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
#append PolyKernel
feats = RealFeatures(traindata)
subkernel = PolyKernel(10,3)
feats_train.append_feature_obj(feats)
feats_test.append_feature_obj(feats_t)
feats_test_out.append_feature_obj(feats_out)
feats_grid.append_feature_obj(grid)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
mkl = MKLOneClass()
mkl.set_kernel(kernel)
mkl.set_labels(labels)
mkl.set_interleaved_optimization_enabled(False)
mkl.set_epsilon(1e-2)
mkl.set_mkl_epsilon(0.1)
mkl.set_mkl_norm(1)
mkl.train()
print "Weights:"
w=kernel.get_subkernel_weights()
print w
#initialize with test features
kernel.init(feats_train, feats_test)
normal_out = mkl.apply()
#test on abnormally generated data
kernel.init(feats_train, feats_test_out)
abnormal_out = mkl.apply()
#test on X-Y grid
kernel.init(feats_train, feats_grid)
grid_out=mkl.apply()
z=grid_out.get_values().reshape((500,500))
z_lab=grid_out.get_labels().reshape((500,500))
a=abnormal_out.get_labels()
n=normal_out.get_labels()
#check for normal and abnormal classified data
idx=where(normal_out.get_labels() != 1)[0]
abnormal=testdata[:,idx]
idx=where(normal_out.get_labels() == 1)[0]
normal=testdata[:,idx]
figure(figsize(15,6))
pl =subplot(121)
title("One-class classification using MKL")
_=pink()
c=pcolor(xx, yy, z)
_=contour(xx, yy, z_lab, linewidths=1, colors='black', hold=True)
_=colorbar(c)
p1=pl.scatter(traindata[0, :], traindata[1,:], cmap=gray(), s=100)
p2=pl.scatter(normal[0,:], normal[1,:], c="red", s=100)
p3=pl.scatter(abnormal[0,:], abnormal[1,:], c="blue", s=100)
p4=pl.scatter(x_out[0,:], x_out[1,:], c=a, cmap=jet(), s=100)
_=pl.legend((p1, p2, p3), ["Training samples", "normal samples", "abnormal samples"], loc=2)
subplot(122)
c=pcolor(xx, yy, z)
title("One-class classification output")
_=gray()
_=contour(xx, yy, z, linewidths=1, colors='black', hold=True)
_=colorbar(c)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lame params
Step2: Metric tensor
Step3: ${\displaystyle \hat{G}=\sum_{i,j} g_{ij}\vec{R}^i\vec{R}^j}$
Step4: Christoffel symbols
Step5: Gradient of vector
Step6: Physical coordinates
Step7: Strain tensor
Step8: Linear Stiffness matrix
Step9: Mass matrix
|
<ASSISTANT_TASK:>
Python Code:
from sympy import *
from geom_util import *
from sympy.vector import CoordSys3D
N = CoordSys3D('N')
alpha1, alpha2, alpha3 = symbols("alpha_1 alpha_2 alpha_3", real = True, positive=True)
init_printing()
%matplotlib inline
%reload_ext autoreload
%autoreload 2
%aimport geom_util
A,K = symbols('A K')
dA1 = Symbol('A_{{,1}}')
dK1 = Symbol('K_{{,1}}')
H1=A*(1+alpha3*K)
H2=S(1)
H3=S(1)
H=[H1, H2, H3]
DIM=3
dH = zeros(DIM,DIM)
dH[0,0]=dA1+alpha3*(dA1*K+A*dK1)
dH[0,2]=A*K
dH
G_up = getMetricTensorUpLame(H1, H2, H3)
G_down = getMetricTensorDownLame(H1, H2, H3)
DIM=3
G_down_diff = MutableDenseNDimArray.zeros(DIM, DIM, DIM)
for i in range(DIM):
for j in range(DIM):
for k in range(DIM):
G_down_diff[i,i,k]=2*H[i]*dH[i,k]
GK = getChristoffelSymbols2(G_up, G_down_diff, (alpha1, alpha2, alpha3))
GK
def row_index_to_i_j_grad(i_row):
return i_row // 3, i_row % 3
B = zeros(9, 12)
B[0,1] = S(1)
B[1,2] = S(1)
B[2,3] = S(1)
B[3,5] = S(1)
B[4,6] = S(1)
B[5,7] = S(1)
B[6,9] = S(1)
B[7,10] = S(1)
B[8,11] = S(1)
for row_index in range(9):
i,j=row_index_to_i_j_grad(row_index)
B[row_index, 0] = -GK[i,j,0]
B[row_index, 4] = -GK[i,j,1]
B[row_index, 8] = -GK[i,j,2]
B
P=zeros(12,12)
P[0,0]=H[0]
P[1,0]=dH[0,0]
P[1,1]=H[0]
P[2,0]=dH[0,1]
P[2,2]=H[0]
P[3,0]=dH[0,2]
P[3,3]=H[0]
P[4,4]=H[1]
P[5,4]=dH[1,0]
P[5,5]=H[1]
P[6,4]=dH[1,1]
P[6,6]=H[1]
P[7,4]=dH[1,2]
P[7,7]=H[1]
P[8,8]=H[2]
P[9,8]=dH[2,0]
P[9,9]=H[2]
P[10,8]=dH[2,1]
P[10,10]=H[2]
P[11,8]=dH[2,2]
P[11,11]=H[2]
P=simplify(P)
P
B_P = zeros(9,9)
for i in range(3):
for j in range(3):
row_index = i*3+j
B_P[row_index, row_index] = 1/(H[i]*H[j])
Grad_U_P = simplify(B_P*B*P)
Grad_U_P
E=zeros(6,9)
E[0,0]=1
E[1,4]=1
E[2,8]=1
E[3,1]=1
E[3,3]=1
E[4,2]=1
E[4,6]=1
E[5,5]=1
E[5,7]=1
E
StrainL=simplify(E*Grad_U_P)
StrainL
def E_NonLinear(grad_u):
N = 3
du = zeros(N, N)
# print("===Deformations===")
for i in range(N):
for j in range(N):
index = i*N+j
du[j,i] = grad_u[index]
# print("========")
I = eye(3)
a_values = S(1)/S(2) * du * G_up
E_NL = zeros(6,9)
E_NL[0,0] = a_values[0,0]
E_NL[0,3] = a_values[0,1]
E_NL[0,6] = a_values[0,2]
E_NL[1,1] = a_values[1,0]
E_NL[1,4] = a_values[1,1]
E_NL[1,7] = a_values[1,2]
E_NL[2,2] = a_values[2,0]
E_NL[2,5] = a_values[2,1]
E_NL[2,8] = a_values[2,2]
E_NL[3,1] = 2*a_values[0,0]
E_NL[3,4] = 2*a_values[0,1]
E_NL[3,7] = 2*a_values[0,2]
E_NL[4,0] = 2*a_values[2,0]
E_NL[4,3] = 2*a_values[2,1]
E_NL[4,6] = 2*a_values[2,2]
E_NL[5,2] = 2*a_values[1,0]
E_NL[5,5] = 2*a_values[1,1]
E_NL[5,8] = 2*a_values[1,2]
return E_NL
%aimport geom_util
u=getUHat3DPlane(alpha1, alpha2, alpha3)
# u=getUHatU3Main(alpha1, alpha2, alpha3)
gradu=B*u
E_NL = E_NonLinear(gradu)*B
E_NL
%aimport geom_util
C_tensor = getIsotropicStiffnessTensor()
C = convertStiffnessTensorToMatrix(C_tensor)
C
StrainL.T*C*StrainL*H1*H2*H3
rho=Symbol('rho')
B_h=zeros(3,12)
B_h[0,0]=1
B_h[1,4]=1
B_h[2,8]=1
M=simplify(rho*P.T*B_h.T*G_up*B_h*P)
M
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 构建字典方法
Step2: 除了常规语法以及 dict 构建之外,我们可以使用字典推导来构建字典,dictcomp 会由任何一个可迭代对象产生一对 key
Step3: 字典有一个内置方法 d.update(m, [**kargs]) 它会先判断 m,如果 m 有 keys 方法, update 就将它当做映射处理,否则,会退一步,将 m 当做包含键值对 (key, value) 的迭代器,python 绝大多数映射类型构造方法都用了类似的逻辑。因此我们既可以使用一个映射对象来新建一个映射对象,也可以使用 (key, value) 键值对来初始化一个映射对象
Step4: 我们处理 occurrences 的三行可以使用 dict.setdefault 来改为一行
Step5: 换句话说 index.setdefault(word, []).append(location) 与下面等价
Step6: 查找可弹性键的映射
Step7: 工作原理
Step8: 这背后一切的工程其实是 __missing__() 方法,它会在 defaultdict 遇到找不到键的时候调用 default_factory,而实际上这个特性是所有映射类型都可以选择去支持的。
Step9: 像 key in my_dict.keys() 的这种操作是很快的,即使映射类型对象很庞大也没关系,因为 dict.keys() 返回的是一个视图,视图就像一个集合,而且跟字典类似的是,在视图中查找一个元素速度很快。
Step10: 因为 UserDict 是 MutableMapping 的子类,所以 StrKeyDict 里的剩下的那些映射类型方法都是从 UserDict、MutableMapping 和 Mapping 这些超类继承来的,特别是最后的 Mapping 类,它虽然是一个抽象基类(ABC),但是却提供了好几个使用的方法,下面是比较值得关注的方法
Step11: 集合理论
Step12: 集合的元素必须可散列的,set 类型本身是不可散列的,但是 frozenset 可以,所以可以在 set 中放入 frozenset 元素。
Step13: 但是上面的语法只支持集合,我们让其支持所有可迭代类型
Step14: 集合字面量
Step15: 看到 {1} 的方式直接执行了 1 的赋值, set([1]) 步骤比较多
Step16: 集合推导
Step17: 集合操作
|
<ASSISTANT_TASK:>
Python Code:
tt = (1, 2, (30, 40))
hash(tt)
t1 = (1, 2, [30, 40]) # 其中列表是可变的,所以没有哈希值
hash(t1)
tf = (1, 2, frozenset([30, 40])) #frozenset 是冻结的集合,不可变的,所以有哈希值
hash(tf)
a = dict(one = 1, two = 2, three = 3)
b = {'one': 1, 'two': 2, 'three': 3}
c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
d = dict([('two', 2), ('one', 1), ('three', 3)])
e = dict({'three': 3, 'one': 1, 'two': 2})
a == b == c == d == e
DIAL_CODES = [
(86, 'China'),
(91, 'India'),
(1, 'United States')
]
country_code = {country: code for code, country in DIAL_CODES}
country_code
#!/usr/bin/env python
# encoding: utf-8
import sys
import re
WORD_RE = re.compile('\w+') # \w 是匹配任意字母或数字,+ 是匹配一次到任意次
index = {}
#with open(sys.argv[1], encoding="utf-8") as fp: #正常文件名是参数传的
with open("/home/kaka/test.txt", encoding="utf-8") as fp:
for line_no, line in enumerate(fp, 1): # line_no 是索引(从 1 开始),line 是行的内容
for match in WORD_RE.finditer(line): # 返回所有匹配子串,返回类型是迭代器
word = match.group() # group 获取该单词 (match 是一个对象)
column_no = match.start() + 1 # 获取列数,索引从 0 开始
location = (line_no, column_no) # 构造一个元组,内容是 (row, col)
# 这样写很糟糕,这里仅仅是为了演示
occurrences = index.get(word, []) # 判断该单词是否被添加过,没有返回 [ ],注意返回的是原列表的一个备份
occurrences.append(location) # 为该 key 对应的值添加内容
index[word] = occurrences # 这要搜索 word 这个 key 第二次
for word in sorted(index, key = str.upper): # 按照字母顺序排序,忽略大小写
print(word, index[word])
#!/usr/bin/env python
# encoding: utf-8
import sys
import re
WORD_RE = re.compile('\w+') # \w 是匹配任意字母或数字,+ 是匹配一次到任意次
index = {}
#with open(sys.argv[1], encoding="utf-8") as fp: #正常文件名是参数传的
with open("/home/kaka/test.txt", encoding="utf-8") as fp:
for line_no, line in enumerate(fp, 1): # line_no 是索引(从 1 开始),line 是行的内容
for match in WORD_RE.finditer(line): # 返回所有匹配子串,返回类型是迭代器
word = match.group() # group 获取该单词 (match 是一个对象)
column_no = match.start() + 1 # 获取列数,索引从 0 开始
location = (line_no, column_no) # 构造一个元组,内容是 (row, col)
#如果单词没有这个 key,把单词和一个空列表放入映射,然后返回这个空列表,这样就不用二次搜索就可以被更新列表了
index.setdefault(word, []).append(location)
for word in sorted(index, key = str.upper): # 按照字母顺序排序,忽略大小写
print(word, index[word])
#if key not in my_dict:
# my_dict[key] = []
#my_dict[key].append(new_value)
#!/usr/bin/env python
# encoding: utf-8
import sys
import re
import collections
WORD_RE = re.compile('\w+')
index = collections.defaultdict(list) # 使用 list 建立 defaultdict,将它当成 default_factory
#with open(sys.argv[1], encoding="utf-8") as fp:
with open("/home/kaka/test.txt", encoding="utf-8") as fp:
for line_no, line in enumerate(fp, 1):
for match in WORD_RE.finditer(line):
word = match.group()
column_no = match.start() + 1
location = (line_no, column_no)
# 如果不存在 word 键,会调用初始化传的 default_factory 产生一个预设值, 如果没有指定 default_factory,会产生 KeyError 异常
index[word].append(location)
for word in sorted(index, key = str.upper):
print(word, index[word])
import collections
index = collections.defaultdict(list)
print(index.get('hello'))
class StrKeyDict0(dict):
# 找不到键会调用此函数
def __missing__(self, key):
# 如果找不到的键本身就是 str 类型,直接抛出异常
# 这个判断是必要的,如果没有它,当 key 不是字符串,则会递归调用
if isinstance(key, str):
raise KeyError(key)
# 如果找不到的键不是字符串,那么将其转成字符串查找
return self[str(key)]
def get(self, key, default=None):
try:
# get 方法使用 self[key] 的方式调用 __getitem__() 方法查找,那么某个键不存在的话,还可以通过 __missing__() 给它一个机会
return self[key]
except KeyError:
# 如果抛出 KeyError,那么说明 __missing__() 也失败了,返回 default
return default
def __contains__(self, key):
# 先按照键原来的值查找,找不到将其转成字符串查找,注意 or 返回的不是 true 和 false,而是第一个不为假的那个值
# 例如 0 or 2 返回 2
# 注意这里没用 key in self, 这是因为这回导致 这个函数被递归调用,所以采用了显式的方法,直接在 self.keys() 中查询
return key in self.keys() or str(key) in self.keys()
d = StrKeyDict0([('2', 'two'), ('4', 'four')])
d['2']
d[4]
d[1]
d.get('2')
d.get(4)
d.get(1, 'N/A')
2 in d
1 in d
import collections
class StrKeyDict(collections.UserDict):
def __missing__(self, key):
if isinstance(key, str): # 这个方法和之前的一模一样
raise KeyError(key)
return self[str(key)]
def __contains__(self, key): #这个方法更简洁一些,因为已经放心假设所有的键都是字符串
return str(key) in self.data
# 这个方法会把所有的键都转成字符串
def __setitem__(self, key, item):
self.data[str(key)] = item
from types import MappingProxyType
d = {1: 'A'}
d_proxy = MappingProxyType(d)
d_proxy
d_proxy[1]
d_proxy[2] = 'x'
d[2] = 'b'
d_proxy
l = ['spam', 'spam', 'eggs', 'spam']
set(l)
list(set(l))
#found = len(needles & haystack)
#found = len(set(needles) & set(haystack))
# 也可以写成
#found = len(set(needles).intersection(haystack))
# 下面这种方式比 set([1]) 快,因为后者需要先从 set 这个名字查询构造方法,然后新建一个列表,最后再把列表传到构造方法中
# 但是如果像 {1, 2, 3} 这种字面量,Python 有一个专门叫作 BUILD_SET 的字节码创建集合
s = {1}
type(s)
s.pop()
s
s = {}
type(s) #看到这样定义的是一个字典,而不是 set
from dis import dis
dis('{1}')
dis('set([1])')
frozenset(range(10))
from unicodedata import name #用 name 函数获取字符的名字
{chr(i) for i in range(32, 256) if 'SIGN' in name(chr(i), '')} # 将字符名里有 SIGN 的单词挑出来,放到集合
DIAL_CODES = [
(86, 'China'),
(91, 'India'),
(1, 'United States'),
(62, 'Indonesia'),
(55, 'Brazil'),
(92, 'Pakistan'),
(880, 'Bangladesh'),
(234, 'Nigeria'),
(7, 'Russia'),
(81, 'Japan')
]
d1 = dict(DIAL_CODES) # 数组元组顺序按照国家人口排名决定
print('d1:', d1.keys())
d2 = dict(sorted(DIAL_CODES)) # 数组元组顺序按照国家区号大小排名决定
print('d2:', d2.keys())
d3 = dict(sorted(DIAL_CODES, key = lambda x: x[1])) # 数组元组顺序按照国家字母顺序排名决定
print('d3:', d3.keys())
assert d1 == d2 and d2 == d3
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Inspect the Distribution for a Feature
Step2: Extract the values of the KDE curve for fitting purposes
Step3: Let's plot it to make sure it looks like what we've seen above
Step4: Good! It looks like it matches exactly.
Step5: Here I make my ballpark guesses for the amplitudes, means, and deviations
Step6: Take a look at what that gives us
Step7: ... This looks not good, but it's a nice little nucleation point for an optimization routine.
Step8: Put in these optimized parameters and see what we get
Step9: Compare to the actual KDE distribution
Step10: I'd say that's pretty good for a first go
Step11: For 12 degrees of freedom (12 fit parameters), we can look at a $\chi^2$ table to find that we have a $p>0.995$ that this is a good fit to the distribution.
Step12: Overlay onto original data
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
data_df = pd.read_csv('raw-data.csv', index_col='eventID')
data_df.head()
import matplotlib.pylab as plt
import seaborn as sns
# Show plots in notebook
%matplotlib inline
# Set some styling options
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=1.4)
feature = "AfterInhMATRIX5"
sns.distplot(data_df.query("label == 1")[feature])
from sklearn.neighbors import KernelDensity
import numpy as np
import matplotlib.pylab as plt
# Put the data you want to apply the KDE to into an array
data = data_df.query("label == 1")[feature].values[:, np.newaxis]
# Create a KDE object and then fit it to the data
kde = KernelDensity(kernel='gaussian', bandwidth=1400).fit(data)
X_plot = np.linspace(10000, 100000, 1000)[:, np.newaxis]
# Get log density values for each point on the x-axis
log_dens = kde.score_samples(X_plot)
Y_plot = np.exp(log_dens)
# Plot the two against each other
sns.distplot(data_df.query("label == 1")[feature],
hist=False, color='black',
label='Seaborn KDE')
plt.plot(X_plot, Y_plot, '--', color='red', lw=2,
label='scikit-learn KDE')
plt.legend(loc='best')
def gauss_dist(xdata, amp, mean, stddev):
return (amp * np.exp( np.divide(-1 * np.square(xdata-mean),
(2 * stddev**2))))
# Take four amplitudes, means, and standard deviations
# Compute sum of four Gaussians
def my_fit(xdata,
a1, a2, a3, a4,
m1, m2, m3, m4,
s1, s2, s3, s4):
exp1 = gauss_dist(xdata, a1, m1, s1)
exp2 = gauss_dist(xdata, a2, m2, s2)
exp3 = gauss_dist(xdata, a3, m3, s3)
exp4 = gauss_dist(xdata, a4, m4, s4)
return exp1 + exp2 + exp3 + exp4
p0 = [0.00001, 0.00002, 0.000061, 0.000005,
31000, 51000, 66000, 83000,
1000, 1500, 2000, 3000]
my_guess = my_fit(X_plot[:, 0], *p0)
plt.plot(X_plot, my_guess, '-')
from scipy.optimize import curve_fit
popt, pcov = curve_fit(my_fit, X_plot[:, 0], Y_plot, p0)
print popt
optim_fit = my_fit(X_plot[:, 0], *popt)
# Plot the whole fit
plt.plot(X_plot, optim_fit, '-.', lw=3, color='black')
# Along with the consituent gaussians
for i in range(0,4):
plt.plot(X_plot,
gauss_dist(X_plot[:, 0], popt[i], popt[i+4], popt[i+8]),
'-', lw=2)
plt.plot(X_plot, Y_plot, '-', lw=3, color='black',
label='scikit-learn KDE')
plt.plot(X_plot, optim_fit, '-', color='red',
label=r'$\sum_{i=1}^4\ f_G(a_i, \mu_i, \sigma_i)$')
plt.legend(loc='best')
def calc_chisq(obs, exp):
chisq = 0.0
for i in range(0,len(obs)):
chisq += (obs[i] - exp[i])**2 / exp[i]
return chisq
print calc_chisq(Y_plot, optim_fit)
from scipy.stats import chisquare
chisq, pvalue = chisquare(Y_plot, optim_fit, ddof=12)
print ("Chi-squared: %.02f\np-value: %.02f" % (chisq, pvalue))
sns.distplot(data_df.query("label == 1")[feature],
kde=False, hist=True, norm_hist=True)
for i in range(0,4):
plt.plot(X_plot,
gauss_dist(X_plot[:, 0], popt[i], popt[i+4], popt[i+8]),
'-', lw=2)
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.