text_prompt
stringlengths 168
30.3k
| code_prompt
stringlengths 67
124k
|
|---|---|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Artistic Style Transfer with TensorFlow Lite
Step2: Download the content and style images, and the pre-trained TensorFlow Lite models.
Step3: Pre-process the inputs
Step4: Visualize the inputs
Step5: Run style transfer with TensorFlow Lite
Step6: Style transform
Step7: Style blending
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
print(tf.__version__)
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import time
import functools
content_path = tf.keras.utils.get_file('belfry.jpg','https://storage.googleapis.com/khanhlvg-public.appspot.com/arbitrary-style-transfer/belfry-2611573_1280.jpg')
style_path = tf.keras.utils.get_file('style23.jpg','https://storage.googleapis.com/khanhlvg-public.appspot.com/arbitrary-style-transfer/style23.jpg')
style_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/prediction/1?lite-format=tflite')
style_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://tfhub.dev/google/lite-model/magenta/arbitrary-image-stylization-v1-256/int8/transfer/1?lite-format=tflite')
# Function to load an image from a file, and add a batch dimension.
def load_img(path_to_img):
img = tf.io.read_file(path_to_img)
img = tf.io.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
# Function to pre-process by resizing an central cropping it.
def preprocess_image(image, target_dim):
# Resize the image so that the shorter dimension becomes 256px.
shape = tf.cast(tf.shape(image)[1:-1], tf.float32)
short_dim = min(shape)
scale = target_dim / short_dim
new_shape = tf.cast(shape * scale, tf.int32)
image = tf.image.resize(image, new_shape)
# Central crop the image.
image = tf.image.resize_with_crop_or_pad(image, target_dim, target_dim)
return image
# Load the input images.
content_image = load_img(content_path)
style_image = load_img(style_path)
# Preprocess the input images.
preprocessed_content_image = preprocess_image(content_image, 384)
preprocessed_style_image = preprocess_image(style_image, 256)
print('Style Image Shape:', preprocessed_style_image.shape)
print('Content Image Shape:', preprocessed_content_image.shape)
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title:
plt.title(title)
plt.subplot(1, 2, 1)
imshow(preprocessed_content_image, 'Content Image')
plt.subplot(1, 2, 2)
imshow(preprocessed_style_image, 'Style Image')
# Function to run style prediction on preprocessed style image.
def run_style_predict(preprocessed_style_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_predict_path)
# Set model input.
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
interpreter.set_tensor(input_details[0]["index"], preprocessed_style_image)
# Calculate style bottleneck.
interpreter.invoke()
style_bottleneck = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return style_bottleneck
# Calculate style bottleneck for the preprocessed style image.
style_bottleneck = run_style_predict(preprocessed_style_image)
print('Style Bottleneck Shape:', style_bottleneck.shape)
# Run style transform on preprocessed style image
def run_style_transform(style_bottleneck, preprocessed_content_image):
# Load the model.
interpreter = tf.lite.Interpreter(model_path=style_transform_path)
# Set model input.
input_details = interpreter.get_input_details()
interpreter.allocate_tensors()
# Set model inputs.
interpreter.set_tensor(input_details[0]["index"], preprocessed_content_image)
interpreter.set_tensor(input_details[1]["index"], style_bottleneck)
interpreter.invoke()
# Transform content image.
stylized_image = interpreter.tensor(
interpreter.get_output_details()[0]["index"]
)()
return stylized_image
# Stylize the content image using the style bottleneck.
stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image)
# Visualize the output.
imshow(stylized_image, 'Stylized Image')
# Calculate style bottleneck of the content image.
style_bottleneck_content = run_style_predict(
preprocess_image(content_image, 256)
)
# Define content blending ratio between [0..1].
# 0.0: 0% style extracts from content image.
# 1.0: 100% style extracted from content image.
content_blending_ratio = 0.5 #@param {type:"slider", min:0, max:1, step:0.01}
# Blend the style bottleneck of style image and content image
style_bottleneck_blended = content_blending_ratio * style_bottleneck_content \
+ (1 - content_blending_ratio) * style_bottleneck
# Stylize the content image using the style bottleneck.
stylized_image_blended = run_style_transform(style_bottleneck_blended,
preprocessed_content_image)
# Visualize the output.
imshow(stylized_image_blended, 'Blended Stylized Image')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Date string for filenames
Step2: Load data
Step3: Adjusted facility emissions and generation
Step4: Extra gen/fuels from non-reporting
Step5: Total EIA generation/fuel consumption and emission factors
Step6: Calculate CO₂, generation, and fuel consumption that is not captured by facilities
Step7: Total CO₂ (national)
Step8: National Index and gen by fuels
Step9: Regroup generation from state codes to my custom fuel codes
Step10: National Index
Step11: Percent generation by custom fuel type
Step12: Facility state and lat/lon file generation
Step13: Combine the lists of annual plants in 2015 and 2017. This lets us catch facilities that have gone from monthly to annual since 2015, but it also includes plants that were annual in 2015 an may have retired.
Step14: Add NERC region labels
Step15: Added the filter that year must be 2015 - was getting all 2015 annual plants, but for all years!
Step16: This is 2015 data on annual reporting facilities (from both 2015 and 2017)
Step17: With the values below I can allocate extra state-level generation and fuel use to each of the NERC regions!
Step18: Allocate extra gen from the state-level to regions
Step19: Load state-level total gen/fuel consumption
Step20: Simplify the dataframe
Step21: Group the facility data to state fuel categories and add state labels
Step22: Calculate the extra gen/fuel consumption at the state levels
Step23: Sort the index of each dataframe to make sure they can be easily combined.
Step24: Create a copy of the nerc_fraction dataframe with repeated values for every month of the year, so that they MultiIndex matches the state_extra MultiIndex
Step25: Cycle through each year (2016 and 2017 in this case) and each NERC, multiplying the state-level extra generation, total fuel consumption, and fuel consumption for electricity by the share that should be allocated to each NERC.
Step26: Although the dataframe is called final, it's really just the final allocated extra state-level generation/fuel consumption
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import os
from os.path import join
import sys
import json
idx = pd.IndexSlice
cwd = os.getcwd()
data_path = join(cwd, '..', 'Data storage')
file_date = '2018-03-06'
%load_ext watermark
%watermark -iv -v
# Load the "autoreload" extension
%load_ext autoreload
# always reload modules marked with "%aimport"
%autoreload 1
# add the 'src' directory as one where we can import modules
src_dir = join(os.getcwd(), os.pardir, 'src')
sys.path.append(src_dir)
%aimport Data.make_data
from Data.make_data import states_in_nerc
%aimport Analysis.index
from Analysis.index import facility_emission_gen, group_facility_data, add_quarter
%aimport Analysis.index
from Analysis.index import g2lb, change_since_2005, generation_index
%aimport Analysis.index
from Analysis.index import facility_co2, adjust_epa_emissions, group_fuel_cats
%aimport util.utils
from util.utils import rename_cols, add_facility_location
cwd = os.getcwd()
path = join(data_path, 'Derived data',
'Facility gen fuels and CO2 {}.csv'.format(file_date))
eia_fac = pd.read_csv(path)
rename_cols(eia_fac)
path = join(data_path, 'Derived data',
'Monthly EPA emissions {}.csv'.format(file_date))
epa = pd.read_csv(path)
fuel_cat_folder = join(data_path, 'Fuel categories')
state_cats_path = join(fuel_cat_folder, 'State_facility.json')
with open(state_cats_path, 'r') as f:
state_fuel_cat = json.load(f)
custom_cats_path = join(fuel_cat_folder, 'Custom_results.json')
with open(custom_cats_path, 'r') as f:
custom_fuel_cat = json.load(f)
co2, gen_fuels_state = facility_emission_gen(eia_facility=eia_fac, epa=epa,
state_fuel_cat=state_fuel_cat,
custom_fuel_cat=custom_fuel_cat,
export_state_cats=True)
%aimport Analysis.index
from Analysis.index import extra_emissions_gen
cwd = os.getcwd()
path = join(data_path, 'Derived data',
'EIA country-wide gen fuel CO2 {}.csv'.format(file_date))
eia_total = pd.read_csv(path)
path = join(data_path,
'Final emission factors.csv')
ef = pd.read_csv(path, index_col=0)
extra_co2, extra_gen_fuel = extra_emissions_gen(gen_fuels_state, eia_total, ef)
facility_co2 = co2.groupby(['year', 'month']).sum()
national_co2 = (facility_co2.loc[:, 'final co2 (kg)']
+ extra_co2.loc[:, 'elec fuel co2 (kg)']
.groupby(['year', 'month']).sum())
national_co2.name = 'final co2 (kg)'
gen_fuels_state.groupby(['type', 'year', 'month']).sum().head()
national_gen = (gen_fuels_state
.groupby(['type', 'year', 'month'])['generation (mwh)'].sum()
.add(extra_gen_fuel['generation (mwh)'], fill_value=0))
national_gen = group_fuel_cats(national_gen.reset_index(), custom_fuel_cat,
'type', 'fuel category').set_index(['fuel category', 'year', 'month'])
total_gen = national_gen.groupby(['year', 'month']).sum()
national_index = total_gen.copy()
national_index['final co2 (kg)'] = national_co2
national_index['index (g/kwh)'] = (national_index['final co2 (kg)']
/ national_index['generation (mwh)'])
national_index.reset_index(inplace=True)
add_quarter(national_index)
g2lb(national_index)
change_since_2005(national_index)
path = join(data_path, 'National data',
'National index {}.csv'.format(file_date))
national_index.to_csv(path, index=False)
df_list = []
for fuel in national_gen.index.get_level_values('fuel category').unique():
percent_gen = national_gen.loc[fuel].divide(total_gen, fill_value=0)
percent_gen['fuel category'] = fuel
percent_gen.set_index('fuel category', inplace=True, append=True)
df_list.append(percent_gen)
percent_gen = pd.concat(df_list)
path = join(data_path, 'National data',
'National generation {}.csv'.format(file_date))
national_gen.to_csv(path)
path = join(data_path, 'National data',
'National percent gen {}.csv'.format(file_date))
percent_gen.to_csv(path)
%aimport Data.make_data
from Data.make_data import get_annual_plants
annual_ids_2015 = get_annual_plants(2015)
annual_ids_2017 = get_annual_plants(2017)
annual_ids = set(annual_ids_2015.tolist() + annual_ids_2017.tolist())
len(annual_ids)
%aimport Analysis.state2nerc
from Analysis.state2nerc import fraction_state2nerc, add_region
cwd = os.getcwd()
path = join(data_path, 'Facility labels',
'Facility locations_RF.csv')
location_labels = pd.read_csv(path)
nerc_state_path = join(data_path, 'Derived data',
'NERC_states.json')
with open(nerc_state_path, 'r') as f:
nerc_states = json.load(f)
eia_2015_annual = eia_fac.loc[(eia_fac['plant id'].isin(annual_ids)) &
(eia_fac['year'] == 2015)].copy()
# Group to state-level fuel categories
eia_2015_annual = group_fuel_cats(eia_2015_annual, state_fuel_cat)
eia_2015_annual_nerc = add_facility_location(eia_2015_annual, location_labels,
labels=['state', 'nerc', 'year'])
eia_2015_annual_nerc.head()
# Get a list of all state abbreviations
all_states = []
for value in nerc_states.values():
all_states.extend(value)
all_states = set(all_states)
df_list = []
for state in all_states:
try:
df_list.append(fraction_state2nerc(eia_2015_annual_nerc,
state, region_col='nerc', fuel_col='type'))
except:
print(state)
pass
nerc_fraction = pd.concat(df_list)
nerc_fraction.set_index(['state', 'nerc', 'type'], inplace=True)
nerc_fraction.sort_index(inplace=True)
(nerc_fraction.groupby(['state', 'type']).sum() > 1.0001).any()
idx = pd.IndexSlice
# a dictionary to match column names
nerc_frac_match = {'% generation': 'generation (mwh)',
'% total fuel': 'total fuel (mmbtu)',
'% elec fuel': 'elec fuel (mmbtu)'}
path = join(data_path, 'Derived data',
'EIA state-level gen fuel CO2 {}.csv'.format(file_date))
state_total = pd.read_csv(path, parse_dates=['datetime'])
rename_cols(state_total)
state_total['state'] = state_total['geography'].str[-2:]
cols = list(nerc_frac_match.values())
state_total = state_total.groupby(['state', 'year', 'month', 'type'])[cols].sum()
# list of NERC regions
nercs = nerc_fraction.index.get_level_values('nerc').unique()
cols = list(nerc_frac_match.values())
eia_fac_type = group_fuel_cats(eia_fac, state_fuel_cat)
eia_fac_type = add_facility_location(eia_fac_type, location_labels, ['state', 'year'])
eia_fac_type = eia_fac_type.groupby(['state', 'year', 'month', 'type'])[cols].sum()
state_extra = (state_total.loc[idx[:, 2016:, :, :], :]
- eia_fac_type.loc[idx[:, 2016:, :, :], :])
state_extra.dropna(how='all', inplace=True)
state_extra = state_extra.reorder_levels(['year', 'state', 'month', 'type'])
state_extra.sort_index(inplace=True)
nerc_fraction.sort_index(inplace=True)
state_extra.sort_index(inplace=True)
df_list = []
for month in range(1, 13):
df = nerc_fraction.copy()
df['month'] = month
df.set_index('month', append=True, inplace=True)
df_list.append(df)
nerc_frac_monthly = pd.concat(df_list, axis=0)
nerc_frac_monthly.sort_index(inplace=True)
nerc_frac_monthly = (nerc_frac_monthly
.reorder_levels(['nerc', 'state', 'month', 'type']))
df_list_outer = []
for year in [2016, 2017]:
df_list_inner = []
for nerc in nercs:
df = pd.concat([(nerc_frac_monthly
.loc[nerc]['% generation']
* state_extra
.loc[year]['generation (mwh)']).dropna(),
(nerc_frac_monthly.
loc[nerc]['% total fuel']
* state_extra
.loc[year]['total fuel (mmbtu)']).dropna(),
(nerc_frac_monthly
.loc[nerc]['% elec fuel']
* state_extra
.loc[year]['elec fuel (mmbtu)']).dropna()],
axis=1)
df.columns = nerc_frac_match.values()
df['nerc'] = nerc
df['year'] = year
df = df.groupby(['year', 'nerc', 'month', 'type']).sum()
df_list_inner.append(df)
df_list_outer.append(pd.concat(df_list_inner))
final = pd.concat(df_list_outer)
final.sort_index(inplace=True)
nerc_frac_monthly.sort_index(inplace=True)
path = join(data_path, 'Derived data',
'NERC extra gen fuels {}.csv'.format(file_date))
final.to_csv(path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now let's try teaching the model to just output the identity function (i.e. the output should be the same as the input). We train it over a sine wave.
|
<ASSISTANT_TASK:>
Python Code:
n_neurons = 100
ac = AdaptiveControl(n_inputs=1, n_outputs=1, n_neurons=n_neurons, seed=1)
inputs = np.linspace(-1, 1, 100)
rates = np.zeros((len(inputs), n_neurons))
for i, input in enumerate(inputs):
current = ac.compute_neuron_input([input])
activity = ac.neuron(current)
rates[i, :] = activity
pylab.plot(inputs, rates);
n_neurons = 100
ac = AdaptiveControl(n_inputs=1, n_outputs=1, n_neurons=n_neurons, seed=1, learning_rate=1e-3)
inputs = []
outputs = []
errors = []
error = np.zeros(1)
for i in range(2000):
input = np.sin(i*2*np.pi/1000)
output = ac.step([input], error)
error[:] = output - input
inputs.append(input)
outputs.append(output)
errors.append(output-input)
pylab.plot(inputs, label='input')
pylab.plot(outputs, label='output')
pylab.plot(errors, label='error')
pylab.legend(loc='best')
pylab.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: SAP Enqueue packet
Step2: SAP Enqueue Server Admin opcodes
Step3: SAP Enqueue Connection Admin opcodes
Step4: SAP Enqueue Connection Admin parameters
|
<ASSISTANT_TASK:>
Python Code:
from pysap.SAPEnqueue import *
from IPython.display import display
for dest in enqueue_dest_values:
p = SAPEnqueue(dest=dest)
print(enqueue_dest_values[dest])
display(p.canvas_dump())
for opcode in enqueue_server_admin_opcode_values:
p = SAPEnqueue(dest=3, opcode=opcode)
print(enqueue_server_admin_opcode_values[opcode])
display(p.canvas_dump())
for opcode in enqueue_conn_admin_opcode_values:
p = SAPEnqueue(dest=6, opcode=opcode)
print(enqueue_conn_admin_opcode_values[opcode])
display(p.canvas_dump())
for param in enqueue_param_values:
p = SAPEnqueueParam(param=param)
print(enqueue_param_values[param])
display(p.canvas_dump())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'sandbox-3', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: You now have some data $x$ and $y$.
Step2: Solution 2a
Step3: There is a very good chance, though I am not specifically assuming anything, that upon making the previous plot you had a thought along the lines of "these points fall on a line" or "these data represent a linear relationship."
Step4: There is a very good chance, though, again, I am not specifically assuming anything, that for the previous plots that you plotted x along the abscissa and y along the ordinate.
Step5: We have now uncovered lie #1! The relationship between $x$ and $y$ is not the same as the relationship between $y$ and $x$ (as far as least-squares fitting is concerned).
Step6: This "lie" can only be true if the data are singularly valued at every value of the dependent variable. Polynomials of the form $y = \sum_n a_n x^n$ cannot take on multiple values of $y$ at a single value for $x$.
Step7: In this case we see that increasing the order of the polynomial fit to the data lowers the "reduced" $\chi^2$, getting a value that is much much closer to 1.
Step8: If you also suppose that $y$ varies linearly with $x$ (perhaps you have some theory that guides you towards this intuition), but that these data are produced via some noisy process, then you could write down the probability for any individual observation $y_i$ as
Step9: This is somewhat amazing.
Step10: Problem 4a
Step11: We can immediately see that weighted least-squares fitting does not work well in this case.
Step12: If we apply the Huber loss to this data set, we find a fit that looks much better
Step13: However, there should still be something that is nagging at you a little bit – how do you choose the value for $c$?
Step14: Problem 6a
|
<ASSISTANT_TASK:>
Python Code:
x, y, y_unc = pollute_namespace()
# complete
# complete
p = np.polyfit( # complete
# complete
# complete
# complete
p_yx = np.polyfit(y, x, 1)
p_yx_eval = np.poly1d(p_yx)
fig = plt.figure(figsize=(6,5))
ax = plt.subplot2grid((3,1), (0, 0), rowspan=2)
ax_res = plt.subplot2grid((3,1), (2, 0), sharex=ax)
ax.plot(y, x, 'o')
ax.plot([-150,2000], p_yx_eval([-150,2000]))
ax.set_ylabel('x', fontsize=14)
ax_res.plot(y, x - p_yx_eval(y), 'o')
ax_res.axhline(color='C1')
ax_res.set_ylabel('residuals', fontsize=14)
ax_res.set_xlabel('y', fontsize=14)
plt.setp(ax.get_xticklabels(), visible=False)
fig.tight_layout()
print("For y vs. x, we find y = {:.4f}x + {:.4f}".format(p[0], p[1]))
print("\t for x=50 we would forecast y={:.2f}".format(p_eval(50)))
print("For x vs. y, we find x = {:.4f}y + {:.4f}".format(p_yx[0], p_yx[1]))
print("\t for x=50 we would forecast y={:.2f}".format((50 - p_yx[1])/p_yx[0]))
n_obs_n_poly()
chi2_example()
noisy_plot()
nuissance_model()
plot_mix_model()
weighted_least_squares()
huber_plot()
minimize_huber()
x, y, y_unc = gen_mix_data()
# complete
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now we'll setup our aesthetics so date is the x-axis value, variable is the color of each line and value is the y-axis value.
|
<ASSISTANT_TASK:>
Python Code:
meat_subset = meat[['date', 'beef', 'pork']]
df = pd.melt(meat_subset, id_vars=['date'])
df.head()
ggplot(df, aes(x='date', y='value', color='variable')) + geom_line()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Test function img_to_example()
Step2: Function Test
Step3: Test batch_read_write_tfrecords()
Step4: Read back the generated tfrecords and check if the data stored inside the file meets the expectation.
|
<ASSISTANT_TASK:>
Python Code:
from parse_data_to_tfrecord_lib import img_to_example, read_tfrecord, generate_tfexamples_from_detections, batch_read_write_tfrecords
from PIL import Image # used to read images from directory
import tensorflow as tf
import os
import io
import IPython.display as display
import numpy as np
tf.enable_eager_execution()
IMG_PATH = './TC11/svt1/img/19_00.jpg'
features={'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64)}
try:
img = Image.open(IMG_PATH, "r")
except Exception as e:
print(e)
print(IMG_PATH + " is not valid")
example = img_to_example(img, label=0)
features = tf.io.parse_single_example(example.SerializeToString(), features)
# Testing
# The label feature should be value of 0
assert features['label'].numpy() == 0
# The pixel values of the original image and the stored image should be the same
decode_image = tf.image.decode_image(features['image']).numpy()
original_image = np.array(img.getdata())
assert decode_image.flatten().all() == original_image.flatten().all()
# Global constants
# Information from input tfrecord files
SOURCE_ID = 'image/source_id'
BBOX_CONFIDENCE = 'image/object/bbox/confidence'
BBOX_XMIN = 'image/object/bbox/xmin'
BBOX_YMIN = 'image/object/bbox/ymin'
BBOX_XMAX = 'image/object/bbox/xmax'
BBOX_YMAX = 'image/object/bbox/ymax'
INPUT_RECORD_DIR = './streetlearn-detections/'
file_name = "./streetlearn_detections_tfexample-00000-of-01000.tfrecord"
ID = "b'/cns/is-d/home/cityblock-streetsmart/yuxizhang/data/public/streetlearn/003419_2.jpg'"
CONFIDENCE = np.array([0.6700151, 0.45046127, 0.22411232, 0.09745394, 0.07810514, 0.06079888, 0.0587763, 0.05148118])
XMIN = np.array([9., 714., 18., 703., 821., 420., 421., 370.])
YMIN = np.array([298., 441., 538., 613., 655., 649., 656., 637.])
XMAX = np.array([450., 823., 424., 844., 873., 445., 493., 435.])
YMAX = np.array([737., 735., 750., 740., 719., 737., 738., 741.])
parsed_image_dataset = read_tfrecord(os.path.join(INPUT_RECORD_DIR, file_name))
# Testing
# Check the data in the parsed_image_dataset
for example in parsed_image_dataset.take(1):
confidence = example[BBOX_CONFIDENCE].values.numpy()
xmin = example[BBOX_XMIN].values.numpy()
ymin = example[BBOX_YMIN].values.numpy()
xmax = example[BBOX_XMAX].values.numpy()
ymax = example[BBOX_YMAX].values.numpy()
assert str(example[SOURCE_ID].numpy()) == ID
assert confidence.all() == CONFIDENCE.all()
assert xmin.all() == XMIN.all()
assert ymin.all() == YMIN.all()
assert xmax.all() == XMAX.all()
assert ymax.all() == YMAX.all()
INPUT_RECORD_DIR = './streetlearn-detections/'
INPUT_UCF_IMG_DIR = './UCF_Streetview_Dataset/raw/'
TF_FILE_DIR = './test_file.tfrecord'
writer = tf.io.TFRecordWriter(TF_FILE_DIR)
detection_property = {'include_top_camera':True, 'only_keep_top_confidence':True, 'balance':False}
file_range = [0, 1]
batch_read_write_tfrecords(file_range, INPUT_RECORD_DIR, INPUT_UCF_IMG_DIR, writer, detection_property)
writer.close()
# Read the files back from the generated tfrecords
def parse_tf_records(file_dir):
raw_image_dataset = tf.data.TFRecordDataset(file_dir)
# Create a dictionary describing the features.
image_feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'image': tf.io.FixedLenFeature([], tf.string),
}
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, image_feature_description)
parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
return parsed_image_dataset
parsed_image_dataset = parse_tf_records(TF_FILE_DIR)
for image_features in parsed_image_dataset:
print(int(image_features['label']))
image_raw = image_features['image'].numpy()
display.display(display.Image(data=image_raw))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Create a variable of the true number of deaths of an event
Step2: Create a variable that is denotes if the while loop should keep running
Step3: while running is True
|
<ASSISTANT_TASK:>
Python Code:
import random
deaths = 6
running = True
while running:
# Create a variable that randomly create a integer between 0 and 10.
guess = random.randint(0,10)
# if guess equals deaths,
if guess == deaths:
# then print this
print('Correct!')
# and then also change running to False to stop the script
running = False
# else if guess is lower than deaths
elif guess < deaths:
# then print this
print('No, it is higher.')
# if guess is none of the above
else:
# print this
print('No, it is lower')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Below we define a function to generate random intervals with various properties, returning a dataframe of intervals.
Step2: Overlap
Step3: vs total number of intersections
Step4: vs number of chromosomes
Step5: Note this test preserves the number of intersections, which is likely why performance remains similar over the considered range.
Step6: vs other parameters
Step7: Note below that detection of overlaps takes a relatively small fraction of the execution time. The majority of the time the user-facing function spends on formatting the output table.
Step8: Note that sorting inputs provides a moderate speedup, as well as storing chromosomes as categoricals
Step9: Vs Pyranges
Step10: With roundtrips to dataframes
Step11: Memory usage
Step12: The 2x memory consumption of bioframe is due to the fact that bioframe store genomic coordinates as int64 by default, while pyranges uses int32
Step13: Slicing
|
<ASSISTANT_TASK:>
Python Code:
import platform
import psutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['figure.facecolor']='white'
plt.rcParams['font.size']=16
import bioframe
import pyranges
print(f"Bioframe v.{bioframe.__version__}")
print(f"PyRanges v.{pyranges.__version__}")
print(f"System Platform: {platform.platform()}")
print(f"{psutil.cpu_count()} CPUs at {psutil.cpu_freq().current:.0f} GHz")
def make_random_intervals(
n=1e5,
n_chroms=1,
max_coord=None,
max_length=10,
sort=False,
categorical_chroms=False,
):
n = int(n)
n_chroms = int(n_chroms)
max_coord = (n // n_chroms) if max_coord is None else int(max_coord)
max_length = int(max_length)
chroms = np.array(['chr'+str(i+1) for i in range(n_chroms)])[
np.random.randint(0, n_chroms, n)]
starts = np.random.randint(0, max_coord, n)
ends = starts + np.random.randint(0, max_length, n)
df = pd.DataFrame({
'chrom':chroms,
'start':starts,
'end':ends
})
if categorical_chroms:
df['chrom'] = df['chrom'].astype('category')
if sort:
df = df.sort_values(['chrom','start','end']).reset_index(drop=True)
return df
timings = {}
for n in [1e2, 1e3, 1e4, 1e5, 1e6]:
df = make_random_intervals(n=n, n_chroms=1)
df2 = make_random_intervals(n=n, n_chroms=1)
timings[n] = %timeit -o -r 1 bioframe.overlap(df, df2)
plt.loglog(
list(timings.keys()),
list([r.average for r in timings.values()]),
'o-',
)
plt.xlabel('N intervals')
plt.ylabel('time, seconds')
plt.gca().set_aspect(1.0)
plt.grid()
timings = {}
n_intersections = {}
n = 1e4
for avg_interval_len in [3, 1e1, 3e1, 1e2, 3e2]:
df = make_random_intervals(n=n, n_chroms=1, max_length=avg_interval_len*2)
df2 = make_random_intervals(n=n, n_chroms=1, max_length=avg_interval_len*2)
timings[avg_interval_len] = %timeit -o -r 1 bioframe.overlap(df, df2)
n_intersections[avg_interval_len] = bioframe.overlap(df, df2).shape[0]
plt.loglog(
list(n_intersections.values()),
list([r.average for r in timings.values()]),
'o-',
)
plt.xlabel('N intersections')
plt.ylabel('time, seconds')
plt.gca().set_aspect(1.0)
plt.grid()
timings = {}
n_intersections = {}
n = 1e5
for n_chroms in [1, 3, 10, 30, 100, 300, 1000]:
df = make_random_intervals(n, n_chroms)
df2 = make_random_intervals(n, n_chroms)
timings[n_chroms] = %timeit -o -r 1 bioframe.overlap(df, df2)
n_intersections[n_chroms] = bioframe.overlap(df, df2).shape[0]
n_intersections
plt.loglog(
list(timings.keys()),
list([r.average for r in timings.values()]),
'o-',
)
plt.ylim([1e-1, 10])
plt.xlabel('# chromosomes')
plt.ylabel('time, seconds')
# plt.gca().set_aspect(1.0)
plt.grid()
df = make_random_intervals()
df2 = make_random_intervals()
%timeit -r 1 bioframe.overlap(df, df2)
%timeit -r 1 bioframe.overlap(df, df2, how='left', keep_order=False)
df = make_random_intervals()
df2 = make_random_intervals()
%timeit -r 1 bioframe.overlap(df, df2, how='outer')
%timeit -r 1 bioframe.overlap(df, df2, how='inner')
%timeit -r 1 bioframe.overlap(df, df2, how='left', keep_order=False)
df = make_random_intervals()
df2 = make_random_intervals()
%timeit -r 1 bioframe.overlap(df, df2)
%timeit -r 1 bioframe.overlap(df, df2, how='inner')
%timeit -r 1 bioframe.ops._overlap_intidxs(df, df2)
%timeit -r 1 bioframe.ops._overlap_intidxs(df, df2, how='inner')
print('Default inputs (outer/inner joins):')
df = make_random_intervals()
df2 = make_random_intervals()
%timeit -r 1 bioframe.overlap(df, df2)
%timeit -r 1 bioframe.overlap(df, df2, how='inner')
print('Sorted inputs (outer/inner joins):')
df_sorted = make_random_intervals(sort=True)
df2_sorted = make_random_intervals(sort=True)
%timeit -r 1 bioframe.overlap(df_sorted, df2_sorted)
%timeit -r 1 bioframe.overlap(df_sorted, df2_sorted, how='inner')
print('Categorical chromosomes (outer/inner joins):')
df_cat = make_random_intervals(categorical_chroms=True)
df2_cat = make_random_intervals(categorical_chroms=True)
%timeit -r 1 bioframe.overlap(df_cat, df2_cat)
%timeit -r 1 bioframe.overlap(df_cat, df2_cat, how='inner')
def df2pr(df):
return pyranges.PyRanges(
chromosomes=df.chrom,
starts=df.start,
ends=df.end,
)
timings_bf = {}
timings_pr = {}
for n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:
df = make_random_intervals(n=n, n_chroms=1)
df2 = make_random_intervals(n=n, n_chroms=1)
pr = df2pr(df)
pr2 = df2pr(df2)
timings_bf[n] = %timeit -o -r 1 bioframe.overlap(df, df2,how='inner')
timings_pr[n] = %timeit -o -r 1 pr.join(pr2)
plt.loglog(
list(timings_bf.keys()),
list([r.average for r in timings_bf.values()]),
'o-',
label='bioframe'
)
plt.loglog(
list(timings_pr.keys()),
list([r.average for r in timings_pr.values()]),
'o-',
label='pyranges'
)
plt.gca().set(
xlabel='N intervals',
ylabel='time, seconds',
aspect=1.0,
xticks=10**np.arange(2,6.1)
)
plt.grid()
plt.legend()
def pyranges_intersect_dfs(df, df2):
return df2pr(df).intersect(df2pr(df2)).as_df()
timings_bf = {}
timings_pr = {}
for n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:
df = make_random_intervals(n=n, n_chroms=1)
df2 = make_random_intervals(n=n, n_chroms=1)
timings_bf[n] = %timeit -o -r 1 bioframe.overlap(df, df2, how='inner')
timings_pr[n] = %timeit -o -r 1 pyranges_intersect_dfs(df, df2)
plt.loglog(
list(timings_bf.keys()),
list([r.average for r in timings_bf.values()]),
'o-',
label='bioframe'
)
plt.loglog(
list(timings_pr.keys()),
list([r.average for r in timings_pr.values()]),
'o-',
label='pyranges'
)
plt.gca().set(
xlabel='N intervals',
ylabel='time, seconds',
aspect=1.0
)
plt.grid()
plt.legend()
from memory_profiler import memory_usage
import time
def sleep_before_after(func, sleep_sec=0.5):
def _f(*args, **kwargs):
time.sleep(sleep_sec)
func(*args, **kwargs)
time.sleep(sleep_sec)
return _f
mem_usage_bf = {}
mem_usage_pr = {}
for n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:
df = make_random_intervals(n=n, n_chroms=1)
df2 = make_random_intervals(n=n, n_chroms=1)
mem_usage_bf[n] = memory_usage(
(sleep_before_after(bioframe.overlap), (df, df2), dict( how='inner')),
backend='psutil_pss',
include_children=True,
interval=0.1)
mem_usage_pr[n] = memory_usage(
(sleep_before_after(pyranges_intersect_dfs), (df, df2), dict()),
backend='psutil_pss',
include_children=True,
interval=0.1)
plt.figure(figsize=(8,6))
plt.loglog(
list(mem_usage_bf.keys()),
list([max(r) - r[4] for r in mem_usage_bf.values()]),
'o-',
label='bioframe'
)
plt.loglog(
list(mem_usage_pr.keys()),
list([max(r) - r[4] for r in mem_usage_pr.values()]),
'o-',
label='pyranges'
)
plt.gca().set(
xlabel='N intervals',
ylabel='Memory usage, Mb',
aspect=1.0
)
plt.grid()
plt.legend()
print('Bioframe dtypes:')
display(df.dtypes)
print()
print('Pyranges dtypes:')
display(df2pr(df).dtypes)
### Combined performance figure.
fig, axs = plt.subplot_mosaic(
'AAA.BBB',
figsize=(9.0,4))
plt.sca(axs['A'])
plt.text(-0.25, 1.0, 'A', horizontalalignment='center',
verticalalignment='center', transform=plt.gca().transAxes,
fontsize=19)
plt.loglog(
list(timings_bf.keys()),
list([r.average for r in timings_bf.values()]),
'o-',
color='k',
label='bioframe'
)
plt.loglog(
list(timings_pr.keys()),
list([r.average for r in timings_pr.values()]),
'o-',
color='gray',
label='pyranges'
)
plt.gca().set(
xlabel='N intervals',
ylabel='time, s',
aspect=1.0,
xticks=10**np.arange(2,6.1),
yticks=10**np.arange(-3,0.1),
)
plt.grid()
plt.legend()
plt.sca(axs['B'])
plt.text(-0.33, 1.0, 'B', horizontalalignment='center',
verticalalignment='center', transform=plt.gca().transAxes,
fontsize=19)
plt.loglog(
list(mem_usage_bf.keys()),
list([max(r) - r[4] for r in mem_usage_bf.values()]),
'o-',
color='k',
label='bioframe'
)
plt.loglog(
list(mem_usage_pr.keys()),
list([max(r) - r[4] for r in mem_usage_pr.values()]),
'o-',
color='gray',
label='pyranges'
)
plt.gca().set(
xlabel='N intervals',
ylabel='Memory usage, Mb',
aspect=1.0,
xticks=10**np.arange(2,6.1),
)
plt.grid()
plt.legend()
timings_slicing_bf = {}
timings_slicing_pr = {}
for n in [1e2, 1e3, 1e4, 1e5, 1e6, 3e6]:
df = make_random_intervals(n=n, n_chroms=1)
timings_slicing_bf[n] = %timeit -o -r 1 bioframe.select(df, ('chr1', n//2, n//4*3))
pr = df2pr(df)
timings_slicing_pr[n] = %timeit -o -r 1 pr['chr1', n//2:n//4*3]
plt.loglog(
list(timings_slicing_bf.keys()),
list([r.average for r in timings_bf.values()]),
'o-',
label='bioframe'
)
plt.loglog(
list(timings_slicing_pr.keys()),
list([r.average for r in timings_pr.values()]),
'o-',
label='pyranges'
)
plt.gca().set(
xlabel='N intervals',
ylabel='time, s',
aspect=1.0
)
plt.grid()
plt.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data, resample. We will store the raw objects in dicts with entries
Step2: Do some minimal artifact rejection just for VectorView data
Step3: Explore data
Step4: Alignment and forward
Step5: And for OPM
Step6: Compute and apply inverse to PSD estimated using multitaper + Welch.
Step8: Now we can make some plots of each frequency band. Note that the OPM head
Step9: Alpha
Step10: Beta
Step11: Then OPM
Step12: Gamma
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
# Luke Bloy <luke.bloy@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from mne.filter import next_fast_len
import mne
print(__doc__)
data_path = mne.datasets.opm.data_path()
subject = 'OPM_sample'
subjects_dir = op.join(data_path, 'subjects')
bem_dir = op.join(subjects_dir, subject, 'bem')
bem_fname = op.join(subjects_dir, subject, 'bem',
subject + '-5120-5120-5120-bem-sol.fif')
src_fname = op.join(bem_dir, '%s-oct6-src.fif' % subject)
vv_fname = data_path + '/MEG/SQUID/SQUID_resting_state.fif'
vv_erm_fname = data_path + '/MEG/SQUID/SQUID_empty_room.fif'
vv_trans_fname = data_path + '/MEG/SQUID/SQUID-trans.fif'
opm_fname = data_path + '/MEG/OPM/OPM_resting_state_raw.fif'
opm_erm_fname = data_path + '/MEG/OPM/OPM_empty_room_raw.fif'
opm_trans_fname = None
opm_coil_def_fname = op.join(data_path, 'MEG', 'OPM', 'coil_def.dat')
raws = dict()
raw_erms = dict()
new_sfreq = 90. # Nyquist frequency (45 Hz) < line noise freq (50 Hz)
raws['vv'] = mne.io.read_raw_fif(vv_fname, verbose='error') # ignore naming
raws['vv'].load_data().resample(new_sfreq)
raws['vv'].info['bads'] = ['MEG2233', 'MEG1842']
raw_erms['vv'] = mne.io.read_raw_fif(vv_erm_fname, verbose='error')
raw_erms['vv'].load_data().resample(new_sfreq)
raw_erms['vv'].info['bads'] = ['MEG2233', 'MEG1842']
raws['opm'] = mne.io.read_raw_fif(opm_fname)
raws['opm'].load_data().resample(new_sfreq)
raw_erms['opm'] = mne.io.read_raw_fif(opm_erm_fname)
raw_erms['opm'].load_data().resample(new_sfreq)
# Make sure our assumptions later hold
assert raws['opm'].info['sfreq'] == raws['vv'].info['sfreq']
titles = dict(vv='VectorView', opm='OPM')
ssp_ecg, _ = mne.preprocessing.compute_proj_ecg(
raws['vv'], tmin=-0.1, tmax=0.1, n_grad=1, n_mag=1)
raws['vv'].add_proj(ssp_ecg, remove_existing=True)
# due to how compute_proj_eog works, it keeps the old projectors, so
# the output contains both projector types (and also the original empty-room
# projectors)
ssp_ecg_eog, _ = mne.preprocessing.compute_proj_eog(
raws['vv'], n_grad=1, n_mag=1, ch_name='MEG0112')
raws['vv'].add_proj(ssp_ecg_eog, remove_existing=True)
raw_erms['vv'].add_proj(ssp_ecg_eog)
fig = mne.viz.plot_projs_topomap(raws['vv'].info['projs'][-4:],
info=raws['vv'].info)
fig.suptitle(titles['vv'])
fig.subplots_adjust(0.05, 0.05, 0.95, 0.85)
kinds = ('vv', 'opm')
n_fft = next_fast_len(int(round(4 * new_sfreq)))
print('Using n_fft=%d (%0.1f sec)' % (n_fft, n_fft / raws['vv'].info['sfreq']))
for kind in kinds:
fig = raws[kind].plot_psd(n_fft=n_fft, proj=True)
fig.suptitle(titles[kind])
fig.subplots_adjust(0.1, 0.1, 0.95, 0.85)
# Here we use a reduced size source space (oct5) just for speed
src = mne.setup_source_space(
subject, 'oct5', add_dist=False, subjects_dir=subjects_dir)
# This line removes source-to-source distances that we will not need.
# We only do it here to save a bit of memory, in general this is not required.
del src[0]['dist'], src[1]['dist']
bem = mne.read_bem_solution(bem_fname)
fwd = dict()
# check alignment and generate forward for VectorView
kwargs = dict(azimuth=0, elevation=90, distance=0.6, focalpoint=(0., 0., 0.))
fig = mne.viz.plot_alignment(
raws['vv'].info, trans=vv_trans_fname, subject=subject,
subjects_dir=subjects_dir, dig=True, coord_frame='mri',
surfaces=('head', 'white'))
mne.viz.set_3d_view(figure=fig, **kwargs)
fwd['vv'] = mne.make_forward_solution(
raws['vv'].info, vv_trans_fname, src, bem, eeg=False, verbose=True)
with mne.use_coil_def(opm_coil_def_fname):
fig = mne.viz.plot_alignment(
raws['opm'].info, trans=opm_trans_fname, subject=subject,
subjects_dir=subjects_dir, dig=False, coord_frame='mri',
surfaces=('head', 'white'))
mne.viz.set_3d_view(figure=fig, **kwargs)
fwd['opm'] = mne.make_forward_solution(
raws['opm'].info, opm_trans_fname, src, bem, eeg=False, verbose=True)
del src, bem
freq_bands = dict(
delta=(2, 4), theta=(5, 7), alpha=(8, 12), beta=(15, 29), gamma=(30, 45))
topos = dict(vv=dict(), opm=dict())
stcs = dict(vv=dict(), opm=dict())
snr = 3.
lambda2 = 1. / snr ** 2
for kind in kinds:
noise_cov = mne.compute_raw_covariance(raw_erms[kind])
inverse_operator = mne.minimum_norm.make_inverse_operator(
raws[kind].info, forward=fwd[kind], noise_cov=noise_cov, verbose=True)
stc_psd, sensor_psd = mne.minimum_norm.compute_source_psd(
raws[kind], inverse_operator, lambda2=lambda2,
n_fft=n_fft, dB=False, return_sensor=True, verbose=True)
topo_norm = sensor_psd.data.sum(axis=1, keepdims=True)
stc_norm = stc_psd.sum() # same operation on MNE object, sum across freqs
# Normalize each source point by the total power across freqs
for band, limits in freq_bands.items():
data = sensor_psd.copy().crop(*limits).data.sum(axis=1, keepdims=True)
topos[kind][band] = mne.EvokedArray(
100 * data / topo_norm, sensor_psd.info)
stcs[kind][band] = \
100 * stc_psd.copy().crop(*limits).sum() / stc_norm.data
del inverse_operator
del fwd, raws, raw_erms
def plot_band(kind, band):
Plot activity within a frequency band on the subject's brain.
title = "%s %s\n(%d-%d Hz)" % ((titles[kind], band,) + freq_bands[band])
topos[kind][band].plot_topomap(
times=0., scalings=1., cbar_fmt='%0.1f', vmin=0, cmap='inferno',
time_format=title)
brain = stcs[kind][band].plot(
subject=subject, subjects_dir=subjects_dir, views='cau', hemi='both',
time_label=title, title=title, colormap='inferno',
time_viewer=False, show_traces=False,
clim=dict(kind='percent', lims=(70, 85, 99)), smoothing_steps=10)
brain.show_view(dict(azimuth=0, elevation=0), roll=0)
return fig, brain
fig_theta, brain_theta = plot_band('vv', 'theta')
fig_alpha, brain_alpha = plot_band('vv', 'alpha')
fig_beta, brain_beta = plot_band('vv', 'beta')
fig_beta_opm, brain_beta_opm = plot_band('opm', 'beta')
fig_gamma, brain_gamma = plot_band('vv', 'gamma')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The .csv file contains metadata describing the geographies listed in separate sheets in the .xlsx file.
Step2: Extracting Geography Codes
Step3: The register appears to describe only a subset of the codesets listed in the metadata file - codes relating specifically to Scotland are not provided.
Step4: Metadata for the spreadsheet is contained in the RGC sheet.
|
<ASSISTANT_TASK:>
Python Code:
import sqlite3
con = sqlite3.connect("onsgeocodes.sqlite")
import pandas as pd
#Create a function to grab a zip file from an online location and then grab a specified file from inside it
import requests, zipfile
#The following fudge copes with Python 2 and Python 3
try:
from StringIO import StringIO as zreader
except ImportError:
from io import BytesIO as zreader
def ziparchivereader(f):
return zipfile.ZipFile(f, 'r')
def zipgrabber(url):
''' Grab a zip file from a URL '''
r = requests.get(url)
z = ziparchivereader(zreader(r.content))
#z = zipfile.ZipFile(zreader(r.content))
return z
def zipgrabberfile(url, f):
''' Grab a file by name from a zip file grabbed from a URL '''
return zipgrabber(url).open(f)
def zipfilebyname(z,f):
''' Grab a file by name from an already grabbed zip file '''
return z.open(f)
def zipfilelist(z):
''' Return the names of files contained in a grabbed zip file '''
return z.namelist()
url="https://ons.maps.arcgis.com/sharing/rest/content/items/ef6d5454a6f44ea3a7387b3401e2de5e/data"
!mkdir -p downloads
f='downloads/tmp_register_ons_geo_codes.zip'
!rm {f}
!wget -O {f} {url}
zf=ziparchivereader(f)
zipfilelist(zf)
metadata=pd.read_csv(zf.open('RGC_JUN_2017_UK.csv'))
metadata.head()
xl=pd.read_excel(zf.open('RGC_JUN_2017_UK.xlsx'), sheetname=None)
xl.keys()
metadata["codeAbbrv"] = metadata["Entity code"].map(str) + '_' + metadata["Entity abbreviation"].map(str)
codes = metadata["codeAbbrv"].tolist()
print(set(codes) - set(xl.keys()))
xl['RGC']["codeAbbrv"] = xl['RGC']["Entity code"].map(str) + '_' + xl['RGC']["Entity abbreviation"].map(str)
xl['RGC'].to_sql(con=con, name='metadata', index=False, if_exists='replace')
cols=['GEOGCD','GEOGNM','GEOGNMW','OPER_DATE','TERM_DATE','STATUS']
bigcodes=pd.DataFrame(columns=['sheet']+cols)
bigcodes.to_sql(con=con, name='codelist', index=False, if_exists='replace')
sheets= list(xl.keys())
sheets.remove('For_Scotland')
for sheet in sheets[2:]:
xl[sheet].to_sql(con=con, name=sheet, index=False, if_exists='replace')
xl[sheet]['sheet']=sheet
#Reorder the columns
xl[sheet][['sheet']+cols].to_sql(con=con, name='codelist', index=False, if_exists='append')
q='SELECT * FROM E02_MSOA LIMIT 5'
pd.read_sql_query(q, con)
q='SELECT * FROM codelist WHERE "GEOGCD"="{code}"'.format(code='W40000004')
pd.read_sql_query(q, con)
q='''
SELECT * FROM codelist JOIN metadata
WHERE "GEOGNM"="{name}" AND codeAbbrv=sheet AND codelist.STATUS="live"
'''.format(name='Isle of Wight')
pd.read_sql_query(q, con)
q='''
SELECT DISTINCT "Entity name", sheet FROM codelist JOIN metadata
WHERE "GEOGNM" LIKE "%{name}%" AND codeAbbrv=sheet AND codelist.STATUS="live"
'''.format(name='Isle of Wight')
pd.read_sql_query(q, con)
#Lookup a code
q='''
SELECT * FROM codelist JOIN metadata
WHERE "GEOGCD" = "{name}" AND codeAbbrv=sheet
'''.format(name='E05008479')
pd.read_sql_query(q, con)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Authentication and Authorisation
Step3: Post this step, we will have full access to twitter api's
Step9: Streaming with tweepy
Step10: Drawbacks of twitter streaming API
|
<ASSISTANT_TASK:>
Python Code:
import logging # python logging module
# basic format for logging
logFormat = "%(asctime)s - [%(levelname)s] (%(funcName)s:%(lineno)d) %(message)s"
# logs will be stored in tweepy.log
logging.basicConfig(filename='tweepytopuser.log', level=logging.INFO,
format=logFormat, datefmt="%Y-%m-%d %H:%M:%S")
import tweepy # importing all the modules required
import socket # will be used to create sockets
import json # manipulate json
from httplib import IncompleteRead
# Keep these tokens secret, as anyone can have full access to your
# twitter account, using these tokens
consumerKey = "#"
consumerSecret = "#"
accessToken = "#-#"
accessTokenSecret = "#"
# Performing the authentication and authorization, post this step
# we will have full access to twitter api's
def connectToTwitter():
Connect to twitter.
try:
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth)
logging.info("Successfully logged in to twitter.")
return api, auth
except Exception as e:
logging.info("Something went wrong in oauth, please check your tokens.")
logging.error(e)
# Tweet listner class which subclasses from tweepy.StreamListener
class TweetListner(tweepy.StreamListener):
Twitter stream listner
def __init__(self, csocket):
self.clientSocket = csocket
def dataProcessing(self, data):
Process the data, before sending to spark streaming
sendData = {} # data that is sent to spark streamer
user = data.get("user", {})
name = user.get("name", "undefined").encode('utf-8')
sendData["name"] = name
#data_string = "{}:{}".format(name, followersCount)
self.clientSocket.send(json.dumps(sendData) + u"\n") # append new line character, so that spark recognizes it
logging.debug(json.dumps(sendData))
def on_data(self, raw_data):
Called when raw data is received from connection.
return False to stop stream and close connection.
try:
data = json.loads(raw_data)
self.dataProcessing(data)
#self.clientSocket.send(json.dumps(sendData) + u"\n") # Because the connection was breaking
return True
except Exception as e:
logging.error("An unhandled exception has occured, check your data processing")
logging.error(e)
raise e
def on_error(self, status_code):
Called when a non-200 status code is returned
logging.error("A non-200 status code is returned")
return True
# Creating a proxy socket
def createProxySocket(host, port):
Returns a socket which can be used to connect
to spark.
try:
s = socket.socket() # initialize socket instance
s.bind((host, port)) # bind to the given host and port
s.listen(5) # Enable a server to accept connections.
logging.info("Listening on the port {}".format(port))
cSocket, address = s.accept() # waiting for a connection
logging.info("Received Request from: {}".format(address))
return cSocket
except socket.error as e:
if e.errno == socket.errno.EADDRINUSE: # Address in use
logging.error("The given host:port {}:{} is already in use"\
.format(host, port))
logging.info("Trying on port: {}".format(port + 1))
return createProxySocket(host, port + 1)
if __name__ == "__main__":
try:
api, auth = connectToTwitter() # connecting to twitter
# Global information is available by using 1 as the WOEID
# woeid = getWOEIDForTrendsAvailable(api, "Worldwide") # get the woeid of the worldwide
host = "localhost"
port = 8600
cSocket = createProxySocket(host, port) # Creating a socket
while True:
try:
# Connect/reconnect the stream
tweetStream = tweepy.Stream(auth, TweetListner(cSocket)) # Stream the twitter data
# DON'T run this approach async or you'll just create a ton of streams!
tweetStream.filter(track="iphone") # Filter on trending topics
except IncompleteRead:
# Oh well, reconnect and keep trucking
continue
except KeyboardInterrupt:
# Or however you want to exit this loop
tweetStream.disconnect()
break
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
continue
except KeyboardInterrupt: # Keyboard interrupt called
logging.error("KeyboardInterrupt was hit")
except Exception as e:
logging.error("Unhandled exception has occured")
logging.error(e)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 2. Key Properties --> Software Properties
Step12: 2.2. Code Version
Step13: 2.3. Code Languages
Step14: 3. Key Properties --> Timestep Framework
Step15: 3.2. Split Operator Advection Timestep
Step16: 3.3. Split Operator Physical Timestep
Step17: 3.4. Integrated Timestep
Step18: 3.5. Integrated Scheme Type
Step19: 4. Key Properties --> Meteorological Forcings
Step20: 4.2. Variables 2D
Step21: 4.3. Frequency
Step22: 5. Key Properties --> Resolution
Step23: 5.2. Canonical Horizontal Resolution
Step24: 5.3. Number Of Horizontal Gridpoints
Step25: 5.4. Number Of Vertical Levels
Step26: 5.5. Is Adaptive Grid
Step27: 6. Key Properties --> Tuning Applied
Step28: 6.2. Global Mean Metrics Used
Step29: 6.3. Regional Metrics Used
Step30: 6.4. Trend Metrics Used
Step31: 7. Transport
Step32: 7.2. Scheme
Step33: 7.3. Mass Conservation Scheme
Step34: 7.4. Convention
Step35: 8. Emissions
Step36: 8.2. Method
Step37: 8.3. Sources
Step38: 8.4. Prescribed Climatology
Step39: 8.5. Prescribed Climatology Emitted Species
Step40: 8.6. Prescribed Spatially Uniform Emitted Species
Step41: 8.7. Interactive Emitted Species
Step42: 8.8. Other Emitted Species
Step43: 8.9. Other Method Characteristics
Step44: 9. Concentrations
Step45: 9.2. Prescribed Lower Boundary
Step46: 9.3. Prescribed Upper Boundary
Step47: 9.4. Prescribed Fields Mmr
Step48: 9.5. Prescribed Fields Aod Plus Ccn
Step49: 10. Optical Radiative Properties
Step50: 11. Optical Radiative Properties --> Absorption
Step51: 11.2. Dust
Step52: 11.3. Organics
Step53: 12. Optical Radiative Properties --> Mixtures
Step54: 12.2. Internal
Step55: 12.3. Mixing Rule
Step56: 13. Optical Radiative Properties --> Impact Of H2o
Step57: 13.2. Internal Mixture
Step58: 13.3. External Mixture
Step59: 14. Optical Radiative Properties --> Radiative Scheme
Step60: 14.2. Shortwave Bands
Step61: 14.3. Longwave Bands
Step62: 15. Optical Radiative Properties --> Cloud Interactions
Step63: 15.2. Twomey
Step64: 15.3. Twomey Minimum Ccn
Step65: 15.4. Drizzle
Step66: 15.5. Cloud Lifetime
Step67: 15.6. Longwave Bands
Step68: 16. Model
Step69: 16.2. Processes
Step70: 16.3. Coupling
Step71: 16.4. Gas Phase Precursors
Step72: 16.5. Scheme Type
Step73: 16.6. Bulk Scheme Species
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'miroc', 'sandbox-2', 'aerosol')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/volume ratio for aerosols"
# "3D number concenttration for aerosols"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses atmospheric chemistry time stepping"
# "Specific timestepping (operator splitting)"
# "Specific timestepping (integrated)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_3D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.variables_2D')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.meteorological_forcings.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Specific transport scheme (eulerian)"
# "Specific transport scheme (semi-lagrangian)"
# "Specific transport scheme (eulerian and semi-lagrangian)"
# "Specific transport scheme (lagrangian)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.mass_conservation_scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Mass adjustment"
# "Concentrations positivity"
# "Gradients monotonicity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.transport.convention')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Uses Atmospheric chemistry transport scheme"
# "Convective fluxes connected to tracers"
# "Vertical velocities connected to tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Prescribed (climatology)"
# "Prescribed CMIP6"
# "Prescribed above surface"
# "Interactive"
# "Interactive above surface"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Volcanos"
# "Bare ground"
# "Sea surface"
# "Lightning"
# "Fires"
# "Aircraft"
# "Anthropogenic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Interannual"
# "Annual"
# "Monthly"
# "Daily"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.emissions.other_method_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_mmr')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.concentrations.prescribed_fields_aod_plus_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.black_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.dust')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.absorption.organics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.external')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.internal')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.mixtures.mixing_rule')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.size')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.internal_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.impact_of_h2o.external_mixture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.shortwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.radiative_scheme.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.twomey_minimum_ccn')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.drizzle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.cloud_lifetime')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.optical_radiative_properties.cloud_interactions.longwave_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dry deposition"
# "Sedimentation"
# "Wet deposition (impaction scavenging)"
# "Wet deposition (nucleation scavenging)"
# "Coagulation"
# "Oxidation (gas phase)"
# "Oxidation (in cloud)"
# "Condensation"
# "Ageing"
# "Advection (horizontal)"
# "Advection (vertical)"
# "Heterogeneous chemistry"
# "Nucleation"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Radiation"
# "Land surface"
# "Heterogeneous chemistry"
# "Clouds"
# "Ocean"
# "Cryosphere"
# "Gas phase chemistry"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.gas_phase_precursors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "DMS"
# "SO2"
# "Ammonia"
# "Iodine"
# "Terpene"
# "Isoprene"
# "VOC"
# "NOx"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bulk"
# "Modal"
# "Bin"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.aerosol.model.bulk_scheme_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon / soot"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule)"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Теория
Step2: Путём диагонализации истинной матрицы ковариаций $C$, мы можем найти преобразование исходного набора данных, компоненты которого наилучшим образом будут описывать дисперсию, с учётом их ортогональности друг другу
Step3: А теперь сравним эти направления с направлениями, которые выбирает метод главных компонент
Step4: Видно, что уже при небольшом количестве данных они отличаются незначительно. Увеличим размер выборки
Step5: В этом случае главные компоненты значительно точнее приближают истинные направления данных, вдоль которых наблюдается наибольшая дисперсия.
Step6: Вариационный взгляд на модель
Step7: Интерпретация главных компонент
Step8: Интерпретация главных компонент с использованием данных
Step9: Анализ основных недостатков метода главных компонент
Step10: В чём проблема, почему pca здесь работает плохо? Ответ прост
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
matplotlib.style.use('ggplot')
%matplotlib inline
from sklearn.decomposition import PCA
mu = np.zeros(2)
C = np.array([[3,1],[1,2]])
data = np.random.multivariate_normal(mu, C, size=50)
plt.scatter(data[:,0], data[:,1])
plt.show()
v, W_true = np.linalg.eig(C)
plt.scatter(data[:,0], data[:,1])
# построим истинные компоненты, вдоль которых максимальна дисперсия данных
plt.plot(data[:,0], (W_true[0,0]/W_true[0,1])*data[:,0], color="g")
plt.plot(data[:,0], (W_true[1,0]/W_true[1,1])*data[:,0], color="g")
g_patch = mpatches.Patch(color='g', label='True components')
plt.legend(handles=[g_patch])
plt.axis('equal')
limits = [np.minimum(np.amin(data[:,0]), np.amin(data[:,1])),
np.maximum(np.amax(data[:,0]), np.amax(data[:,1]))]
plt.xlim(limits[0],limits[1])
plt.ylim(limits[0],limits[1])
plt.draw()
def plot_principal_components(data, model, scatter=True, legend=True):
W_pca = model.components_
if scatter:
plt.scatter(data[:,0], data[:,1])
plt.plot(data[:,0], -(W_pca[0,0]/W_pca[0,1])*data[:,0], color="c")
plt.plot(data[:,0], -(W_pca[1,0]/W_pca[1,1])*data[:,0], color="c")
if legend:
c_patch = mpatches.Patch(color='c', label='Principal components')
plt.legend(handles=[c_patch], loc='lower right')
# сделаем графики красивыми:
plt.axis('equal')
limits = [np.minimum(np.amin(data[:,0]), np.amin(data[:,1]))-0.5,
np.maximum(np.amax(data[:,0]), np.amax(data[:,1]))+0.5]
plt.xlim(limits[0],limits[1])
plt.ylim(limits[0],limits[1])
plt.draw()
model = PCA(n_components=2)
model.fit(data)
plt.scatter(data[:,0], data[:,1])
# построим истинные компоненты, вдоль которых максимальна дисперсия данных
plt.plot(data[:,0], (W_true[0,0]/W_true[0,1])*data[:,0], color="g")
plt.plot(data[:,0], (W_true[1,0]/W_true[1,1])*data[:,0], color="g")
# построим компоненты, полученные с использованием метода PCA:
plot_principal_components(data, model, scatter=False, legend=False)
c_patch = mpatches.Patch(color='c', label='Principal components')
plt.legend(handles=[g_patch, c_patch])
plt.draw()
data_large = np.random.multivariate_normal(mu, C, size=5000)
model = PCA(n_components=2)
model.fit(data_large)
plt.scatter(data_large[:,0], data_large[:,1], alpha=0.1)
# построим истинные компоненты, вдоль которых максимальна дисперсия данных
plt.plot(data_large[:,0], (W_true[0,0]/W_true[0,1])*data_large[:,0], color="g")
plt.plot(data_large[:,0], (W_true[1,0]/W_true[1,1])*data_large[:,0], color="g")
# построим компоненты, полученные с использованием метода PCA:
plot_principal_components(data_large, model, scatter=False, legend=False)
c_patch = mpatches.Patch(color='c', label='Principal components')
plt.legend(handles=[g_patch, c_patch])
plt.draw()
from sklearn.decomposition import PCA
from sklearn.cross_validation import cross_val_score as cv_score
def plot_scores(d_scores):
n_components = np.arange(1,d_scores.size+1)
plt.plot(n_components, d_scores, 'b', label='PCA scores')
plt.xlim(n_components[0], n_components[-1])
plt.xlabel('n components')
plt.ylabel('cv scores')
plt.legend(loc='lower right')
plt.show()
def write_answer_1(optimal_d):
with open("pca_answer1.txt", "w") as fout:
fout.write(str(optimal_d))
data = pd.read_csv('data_task1.csv')
from sklearn.model_selection import cross_val_score
from tqdm import tqdm
best = -np.inf
best_d = None
for d in tqdm(range(len(data.columns), 0, -1)):
model = PCA(n_components=d, svd_solver='full')
scores = cross_val_score(model, X=data)
if np.mean(scores) > best:
best = np.mean(scores)
best_d = d
write_answer_1(best_d)
from sklearn.decomposition import PCA
from sklearn.cross_validation import cross_val_score as cv_score
def plot_variances(d_variances):
n_components = np.arange(1,d_variances.size+1)
plt.plot(n_components, d_variances, 'b', label='Component variances')
plt.xlim(n_components[0], n_components[-1])
plt.xlabel('n components')
plt.ylabel('variance')
plt.legend(loc='upper right')
plt.show()
def write_answer_2(optimal_d):
with open("pca_answer2.txt", "w") as fout:
fout.write(str(optimal_d))
data = pd.read_csv('data_task2.csv')
model = PCA(n_components=len(data.columns), svd_solver='full')
model.fit(data)
t_data = model.transform(data)
variances = np.std(t_data, axis = 0)
diff = variances[:-1] - variances[1:]
plot_variances(variances)
best = np.argmax(diff) + 1
write_answer_2(best)
from sklearn import datasets
def plot_iris(transformed_data, target, target_names):
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(transformed_data[target == i, 0],
transformed_data[target == i, 1], c=c, label=target_name)
plt.legend()
plt.show()
def write_answer_3(list_pc1, list_pc2):
with open("pca_answer3.txt", "w") as fout:
fout.write(" ".join([str(num) for num in list_pc1]))
fout.write(" ")
fout.write(" ".join([str(num) for num in list_pc2]))
# загрузим датасет iris
iris = datasets.load_iris()
data = iris.data
target = iris.target
target_names = iris.target_names
model = PCA(n_components=2, svd_solver='full')
model.fit(data)
t_data = model.transform(data)
plot_iris(t_data, target, target_names)
best_l = []
best_r = []
left = t_data[:, 0].reshape(len(t_data),)
right = t_data[:, 1].reshape(len(t_data),)
for idx in range(data.shape[1]):
cov_l = np.corrcoef(np.vstack((left, data[:, idx].reshape(len(t_data),))))
cov_r = np.corrcoef(np.vstack((right, data[:, idx].reshape(len(t_data),))))
if cov_l[0, 1] > cov_r[0, 1]:
best_l.append(idx + 1)
else:
best_r.append(idx + 1)
best_l, best_r
write_answer_3(best_l, best_r)
from sklearn.datasets import fetch_olivetti_faces
from sklearn.decomposition import RandomizedPCA
def write_answer_4(list_pc):
with open("pca_answer4.txt", "w") as fout:
fout.write(" ".join([str(num) for num in list_pc]))
data = fetch_olivetti_faces(shuffle=True, random_state=0).data
image_shape = (64, 64)
model = RandomizedPCA(n_components=10)
model.fit(data)
t_data = model.transform(data)
t_data
t_data = t_data - np.mean(t_data, axis=0)
t_data = np.power(t_data, 2)
t_data = t_data / np.sum(t_data, axis = 1).reshape(len(t_data), 1)
indexes = np.argmax(t_data, axis = 0)
write_answer_4(indexes)
indexes
C1 = np.array([[10,0],[0,0.5]])
phi = np.pi/3
C2 = np.dot(C1, np.array([[np.cos(phi), np.sin(phi)],
[-np.sin(phi),np.cos(phi)]]))
data = np.vstack([np.random.multivariate_normal(mu, C1, size=50),
np.random.multivariate_normal(mu, C2, size=50)])
plt.scatter(data[:,0], data[:,1])
# построим истинные интересующие нас компоненты
plt.plot(data[:,0], np.zeros(data[:,0].size), color="g")
plt.plot(data[:,0], 3**0.5*data[:,0], color="g")
# обучим модель pca и построим главные компоненты
model = PCA(n_components=2)
model.fit(data)
plot_principal_components(data, model, scatter=False, legend=False)
c_patch = mpatches.Patch(color='c', label='Principal components')
plt.legend(handles=[g_patch, c_patch])
plt.draw()
C = np.array([[0.5,0],[0,10]])
mu1 = np.array([-2,0])
mu2 = np.array([2,0])
data = np.vstack([np.random.multivariate_normal(mu1, C, size=50),
np.random.multivariate_normal(mu2, C, size=50)])
plt.scatter(data[:,0], data[:,1])
# обучим модель pca и построим главные компоненты
model = PCA(n_components=2)
model.fit(data)
plot_principal_components(data, model)
plt.draw()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Schreiben Sie eine Abfrage, die Ihnen die Kontaktnamen der Lieferanten ausgibt, die
Step2: Schreiben Sie eine Abfrage, die Ihnen die Kundennamen, die entsprechenden Bestellnummern und den Namen des Angestellten, der die Bestellung bearbeitet hat, ausgibt. Es soll nach dem Kontaktperson geordnet sein!
Step3: Schreiben Sie eine Abfrage, die Ihnen alle Kundennamen ausgibt, die eine Bestellung aufgegeben haben, die von Speedy Express ausgeliefert wurde!
|
<ASSISTANT_TASK:>
Python Code:
%load_ext sql
%sql mysql://steinam:steinam@localhost/nordwind
%%sql
select l.`Kontaktperson` , a.`Artikelname` from artikel a, lieferanten l
where a.`Kategorie-Nr` in ('1','2','3')
%%sql
select k.`Firma`,b.`BestellNr` ,p.`Nachname`
from Kunden k, bestellungen b, personal p
where p.`PersonalNr` = b.`PersonalNr` and b.`KundenCode` = k.`KundenCode`
order by k.`Kontaktperson`
%%sql
select Artikelname, Einzelpreis from Artikel a, `Lieferanten` l
where l.`LieferantenNr` = a.`LieferantenNr` and Einzelpreis = (
select max(Einzelpreis) from artikel a2
where l.`LieferantenNr` = a2.`LieferantenNr` and `LieferantenNr` in
(
select `LieferantenNr` from Lieferanten where upper(Ort) = 'BERLIN'
)
)
;
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import some data to play with
Step2: Split the data and prepare data for ROC Curve
Step3: Plot ROC Curve using Matplotlib
Step4: Create ROCAUC using YellowBrick
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
bc = datasets.load_breast_cancer()
X = bc.data
y = bc.target
random_state = np.random.RandomState(0)
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Learn to predict each class against the other
classifier = svm.SVC(kernel='linear', probability=True, random_state=random_state)
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
import yellowbrick as yb
from yellowbrick.classifier import ROCAUC
visualizer = ROCAUC(classifier)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
g = visualizer.show() # Draw/show/show the data
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: P4.2
Step2: So $z=s$ is a free variable, and the rest of the equation
Step3: The free variable is $z=t$.
|
<ASSISTANT_TASK:>
Python Code:
# helper code needed for running in colab
if 'google.colab' in str(get_ipython()):
print('Downloading plot_helpers.py to util/ (only neded for colab')
!mkdir util; wget https://raw.githubusercontent.com/minireference/noBSLAnotebooks/master/util/plot_helpers.py -P util
# setup SymPy
from sympy import *
init_printing()
# setup plotting
%matplotlib inline
import matplotlib.pyplot as mpl
from util.plot_helpers import plot_plane, plot_line, plot_vec, plot_vecs
# aliases
Vector = Matrix # define alias Vector so I don't have to explain this during video
Point = Vector # define alias Point for Vector since they're the same thing
# a) x y z | c
A = Matrix([[3, -2, -1, 2],
[1, 2, 1, 0]])
A.rref()
# b) x y z | c
B = Matrix([[2, 1, -1, 0],
[1, 2, 1, 3]])
B.rref()
v = Vector([3, 4, 1])
normal = Vector([2, -1, 4])
vPperp = (normal.dot(v)/normal.norm()**2)*normal
print('vPperp =', vPperp)
vP = v - vPperp
print('vP =', vP)
plot_plane(normal, 0) # plane P
plot_vec(0.2*normal, color='r') # its normal vec
plot_vecs(v, vPperp, vP)
v = Vector([3, 4, 1])
normal = Vector([2, -1, 4])
D=4
# point on P closest to the origin
alpha = D/normal.norm()**2
p_closest = alpha*normal
# print('len normal', normal.norm())
# print('p_closest', p_closest)
assert p_closest.dot(normal) == 4
vPperp = (normal.dot(v)/normal.norm()**2)*normal
print('vPperp', vPperp)
v_wrong = v - vPperp
print('v_wrong', v_wrong)
plot_plane(normal, D) # plane P
plot_vec(0.2*normal, at=p_closest, color='r') # its normal vec
plot_vecs(v, vPperp, v_wrong)
ax = mpl.gca()
ax.grid(True,which='both')
v = Vector([3, 4, 1])
normal = Vector([2, -1, 4])
D = 4
# some point on P
p0 = Point([2,0,0])
u = v - p0 # vector from p0 to tip of v
uPperp = (normal.dot(u)/normal.norm()**2)*normal
print('uPperp', uPperp)
uInP = u - uPperp
proj_v_on_P = p0 + uInP
print('proj_v_on_P', proj_v_on_P)
plot_plane(normal, D) # plane P
plot_vec(0.2*normal, at=p_closest, color='r') # its normal vec
plot_vec(v)
plot_vec(u, at=p0, color='r')
plot_vec(uPperp, at=p0, color='b')
plot_vec(uInP, at=p0, color='g')
plot_vec(proj_v_on_P, color='y')
ax = mpl.gca()
ax.grid(True,which='both')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Here we import the libraries we need. We'll learn about what each does during the course.
Step2: making folder structure and downloading some images
Step3: somehow the script managed to download things that aren't images, remove them
Step4: move some images from the classes into validation and test
Step5: First look at cat pictures
Step6: Here is how the raw data looks like
Step12: Our first model
Step13: How good is this model? Well, as we mentioned, prior to this competition, the state of the art was 80% accuracy. But the competition resulted in a huge jump to 98.9% accuracy, with the author of a popular deep learning library winning the competition. Extraordinarily, less than 4 years later, we can now beat that result in seconds! Even last year in this same course, our initial model had 98.3% accuracy, which is nearly double the error we're getting just a year later, and that took around 10 minutes to compute.
Step14: Choosing a learning rate
Step15: Our learn object contains an attribute sched that contains our learning rate scheduler, and has some convenient plotting functionality including this one
Step16: Note that in the previous plot iteration is one iteration (or minibatch) of SGD. In one epoch there are
Step17: The loss is still clearly improving at lr=1e-2 (0.01), so that's what we use. Note that the optimal learning rate can change as we training the model, so you may want to re-run this function from time to time.
Step18: Let's create a new data object that includes this augmentation in the transforms.
Step19: By default when we create a learner, it sets all but the last layer to frozen. That means that it's still only updating the weights in the last layer when we call fit.
Step20: What is that cycle_len parameter? What we've done here is used a technique called stochastic gradient descent with restarts (SGDR), a variant of learning rate annealing, which gradually decreases the learning rate as training progresses. This is helpful because as we get closer to the optimal weights, we want to take smaller steps.
Step21: Our validation loss isn't improving much, so there's probably no point further training the last layer on its own.
Step22: Fine-tuning and differential learning rate annealing
Step23: Note that the other layers have already been trained to recognize imagenet photos (whereas our final layers where randomly initialized), so we want to be careful of not destroying the carefully tuned weights that are already there.
Step24: Another trick we've used here is adding the cycle_mult parameter. Take a look at the following chart, and see if you can figure out what the parameter is doing
Step25: Note that's what being plotted above is the learning rate of the final layers. The learning rates of the earlier layers are fixed at the same multiples of the final layer rates as we initially requested (i.e. the first layers have 100x smaller, and middle layers 10x smaller learning rates, since we set lr=np.array([1e-4,1e-3,1e-2]).
Step26: There is something else we can do with data augmentation
Step27: I generally see about a 10-20% reduction in error on this dataset when using TTA at this point, which is an amazing result for such a quick and easy technique!
Step28: A common way to analyze the result of a classification model is to use a confusion matrix. Scikit-learn has a convenient function we can use for this purpose
Step29: We can just print out the confusion matrix, or we can show a graphical view (which is mainly useful for dependents with a larger number of categories).
Step30: Looking at pictures again
Step31: Review
Step32: We need a <b>path</b> that points to the dataset. In this path we will also store temporary data and final results. ImageClassifierData.from_paths reads data from a provided path and creates a dataset ready for training.
Step33: ConvLearner.pretrained builds learner that contains a pre-trained model. The last layer of the model needs to be replaced with the layer of the right dimensions. The pretained model was trained for 1000 classes therfore the final layer predicts a vector of 1000 probabilities. The model for cats and dogs needs to output a two dimensional vector. The diagram below shows in an example how this was done in one of the earliest successful CNNs. The layer "FC8" here would get replaced with a new layer with 2 outputs.
Step34: Parameters are learned by fitting a model to the data. Hyparameters are another kind of parameter, that cannot be directly learned from the regular training process. These parameters express “higher-level” properties of the model such as its complexity or how fast it should learn. Two examples of hyperparameters are the learning rate and the number of epochs.
Step35: Analyzing results
|
<ASSISTANT_TASK:>
Python Code:
# Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
from data_science.j_utils import copyanything
PATH = "data/desserts/"
sz = 224
import os
def walklevel(some_dir, level=1):
'''For copying dirs n levels deep from some_dir. n = 1 by default.'''
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
if not os.path.isdir(PATH):
os.mkdir(PATH)
inputpath = 'data/dogscats/'
outputpath = 'data/desserts'
for dirpath, dirnames, filenames in walklevel(inputpath, level=1):
structure = os.path.join(outputpath, dirpath[len(inputpath):])
if not os.path.isdir(structure):
os.mkdir(structure)
else:
print("Folder already exists!")
# used python ~/google_image_finder/image_download.py <class> 500 to download images
# move the dataset in homedir to data dir, and rename
making_dataset = False
if making_dataset:
!cp -R /home/justin/dataset/. data/desserts/train/
!rm -rf /home/justin/dataset
import os
import imghdr
# print(imghdr.what(f'{PATH}train/cheesecake/cheesecake_0.jpg'))
def get_dirs_dataset(PATH):
'''Function to get paths to lowest directories in PATH. Used for purging
images that mpl cannot load.'''
need_checking_dirs = []
for root, dirs, files in os.walk(PATH):
if not dirs:
need_checking_dirs.append(root)
return need_checking_dirs
def remove_bad_imgs(need_checking_dirs):
for dirs in need_checking_dirs:
for root, _, files in os.walk(dirs):
for file in files:
fpath = os.path.join(root, file)
if not imghdr.what(fpath):
try:
img = plt.imread(fpath)
except OSError:
os.remove(fpath)
def checked_imgs(need_checking_dirs):
for dirs in need_checking_dirs:
for root, _, files in os.walk(dirs):
for file in files:
fpath = os.path.join(root, file)
if not imghdr.what(fpath):
try:
img = plt.imread(fpath)
except OSError:
print('There are still bad images somehow!')
return None
print('All iamges seem good to go!')
return True
removed_bad_imgs = True
if not removed_bad_imgs:
remove_bad_imgs(get_dirs_dataset(PATH+'train'))
imgs_checked = checked_imgs(get_dirs_dataset(PATH+'train'))
from sklearn.model_selection import train_test_split
def get_all_filepaths(PATH):
all_filepaths = []
for root, dirs, files in os.walk(PATH+'train'):
for file in files:
all_filepaths.append(os.path.join(root,file))
return all_filepaths
import os
import errno
def remove_empty_folders(path, removeRoot=True):
'Function to remove empty folders'
if not os.path.isdir(path):
return
# remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
remove_empty_folders(fullpath)
# if folder empty, delete it
files = os.listdir(path)
if len(files) == 0 and removeRoot:
print("Removing empty folder:", path)
os.rmdir(path)
def move_to_target(validset, target):
for file in validset:
dst = file.replace('train', target)
os.makedirs(os.path.dirname(dst), exist_ok=True)
os.rename(file, dst)
print('moved files from train to {0}.'.format(target))
return True
def convert_to_test(PATH):
k = 0
for root, subdirs, files in os.walk(PATH):
if len(files) > 0:
for file in files:
os.rename(os.path.join(root, file), os.path.join(PATH, '{0}.jpg'.format(k)))
k += 1
remove_empty_folders(PATH)
def convert_to_jpg(file):
try:
im = Image.open(file)
except:
os.remove(file)
if '.jpg' not in file:
try:
im = Image.open(file)
name, ext = file.split('.')
im.save(name+'.jpg', 'JPG')
os.remove(file)
im = Image.open(name+'.jpg')
plt.imshow(im)
except:
os.remove(file)
already_moved_valid = True
if not already_moved_valid:
trainset, validset= train_test_split(get_all_filepaths(PATH), test_size=.2)
move_to_target(validset, 'valid')
# make testset
already_moved_test = True
if not already_moved_test:
trainset, testset = train_test_split(get_all_filepaths(PATH), test_size=.2)
move_to_target(testset, 'test1')
convert_to_test(PATH+'test1')
import warnings
warnings.filterwarnings("error")
# convert all images to .jpg
dirs_to_convert = ['train', 'valid', 'test1']
for dirs in dirs_to_convert:
for root, subdirs, files in os.walk(PATH+dirs):
print(root, len(files))
if len(files) > 0:
for file in files:
convert_to_jpg(os.path.join(root,file))
# check images again...
# convert all images to .jpg
dirs_to_check = ['train', 'valid', 'test1']
for dirs in dirs_to_check:
for root, subdirs, files in os.walk(PATH+dirs):
print(root, len(files))
if len(files) > 0:
for file in files:
im = Image.open(os.path.join(root, file))
try:
plt.imshow(im)
except:
os.remove(os.path.join(root, file))
# make sample
inputpath = PATH
outputpath = os.path.join(PATH, 'sample')
for dirpath, dirnames, filenames in os.walk(inputpath):
structure = os.path.join(outputpath, dirpath[len(inputpath):])
if os.path.join(outputpath, 'sample') in structure:
pass
elif not os.path.isdir(structure):
os.mkdir(structure)
if len(filenames) > 0:
try:
files_to_copy = np.random.choice(filenames, 50, replace=False)
except:
files_to_copy = np.random.choice(filenames, 50, replace=True)
for file in files_to_copy:
copyanything(os.path.join(dirpath, file), os.path.join(structure,file))
else:
print("Folder {0} already exists! {1} files contained.".format(structure, len(os.listdir(structure))))
!ls {PATH}
!ls {PATH}valid
files = !ls {PATH}valid/strawberry_cake | head
files
img = plt.imread(f'{PATH}valid/strawberry_cake/{files[0]}')
plt.imshow(img);
img.shape
img[:4,:4]
# Uncomment the below if you need to reset your precomputed activations
# !rm -rf {PATH}tmp
%pdb
%%writefile ../../fastai/dataset.py
# %load ../../fastai/dataset.py
from .imports import *
from .torch_imports import *
from .core import *
from .transforms import *
from .layer_optimizer import *
from .dataloader import DataLoader
def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):
np.random.seed(seed)
n_val = int(val_pct*n)
idx_start = cv_idx*n_val
idxs = np.random.permutation(n)
return idxs[idx_start:idx_start+n_val]
def resize_img(fname, targ, path, new_path):
dest = os.path.join(path,new_path,str(targ),fname)
if os.path.exists(dest): return
im = Image.open(os.path.join(path, fname)).convert('RGB')
r,c = im.size
ratio = targ/min(r,c)
sz = (scale_to(r, ratio, targ), scale_to(c, ratio, targ))
os.makedirs(os.path.split(dest)[0], exist_ok=True)
im.resize(sz, Image.LINEAR).save(dest)
def resize_imgs(fnames, targ, path, new_path):
if not os.path.exists(os.path.join(path,new_path,str(targ),fnames[0])):
with ThreadPoolExecutor(8) as e:
ims = e.map(lambda x: resize_img(x, targ, path, 'tmp'), fnames)
for x in tqdm(ims, total=len(fnames), leave=False): pass
return os.path.join(path,new_path,str(targ))
def read_dir(path, folder):
# TODO: warn or error if no files found?
full_path = os.path.join(path, folder)
fnames = iglob(f"{full_path}/*.*")
if any(fnames):
return [os.path.relpath(f,path) for f in fnames]
else:
raise FileNotFoundError("{} folder doesn't exist or is empty".format(folder))
def read_dirs(path, folder):
labels, filenames, all_labels = [], [], []
full_path = os.path.join(path, folder)
for label in sorted(os.listdir(full_path)):
all_labels.append(label)
for fname in os.listdir(os.path.join(full_path, label)):
filenames.append(os.path.join(folder, label, fname))
labels.append(label)
# import pdb; pdb.set_trace()
return filenames, labels, all_labels
def n_hot(ids, c):
res = np.zeros((c,), dtype=np.float32)
res[ids] = 1
return res
def folder_source(path, folder):
fnames, lbls, all_labels = read_dirs(path, folder)
label2idx = {v:k for k,v in enumerate(all_labels)}
idxs = [label2idx[lbl] for lbl in lbls]
c = len(all_labels)
label_arr = np.array(idxs, dtype=int)
return fnames, label_arr, all_labels
def parse_csv_labels(fn, skip_header=True):
skip = 1 if skip_header else 0
csv_lines = [o.strip().split(',') for o in open(fn)][skip:]
fnames = [fname for fname, _ in csv_lines]
csv_labels = {a:b.split(' ') for a,b in csv_lines}
all_labels = sorted(list(set(p for o in csv_labels.values() for p in o)))
label2idx = {v:k for k,v in enumerate(all_labels)}
return sorted(fnames), csv_labels, all_labels, label2idx
def nhot_labels(label2idx, csv_labels, fnames, c):
all_idx = {k: n_hot([label2idx[o] for o in v], c)
for k,v in csv_labels.items()}
return np.stack([all_idx[o] for o in fnames])
def csv_source(folder, csv_file, skip_header=True, suffix='', continuous=False):
fnames,csv_labels,all_labels,label2idx = parse_csv_labels(csv_file, skip_header)
full_names = [os.path.join(folder,fn+suffix) for fn in fnames]
if continuous:
label_arr = np.array([csv_labels[i] for i in fnames]).astype(np.float32)
else:
label_arr = nhot_labels(label2idx, csv_labels, fnames, len(all_labels))
is_single = np.all(label_arr.sum(axis=1)==1)
if is_single: label_arr = np.argmax(label_arr, axis=1)
return full_names, label_arr, all_labels
class BaseDataset(Dataset):
def __init__(self, transform=None):
self.transform = transform
self.n = self.get_n()
self.c = self.get_c()
self.sz = self.get_sz()
def __getitem__(self, idx):
x,y = self.get_x(idx),self.get_y(idx)
return self.get(self.transform, x, y)
def __len__(self): return self.n
def get(self, tfm, x, y):
return (x,y) if tfm is None else tfm(x,y)
@abstractmethod
def get_n(self): raise NotImplementedError
@abstractmethod
def get_c(self): raise NotImplementedError
@abstractmethod
def get_sz(self): raise NotImplementedError
@abstractmethod
def get_x(self, i): raise NotImplementedError
@abstractmethod
def get_y(self, i): raise NotImplementedError
@property
def is_multi(self): return False
@property
def is_reg(self): return False
def open_image(fn):
Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The numpy array representation of the image in the RGB format
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn):
print('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn):
print('Is a directory: {}'.format(fn))
else:
try:
return cv2.cvtColor(cv2.imread(fn, flags), cv2.COLOR_BGR2RGB).astype(np.float32)/255
except Exception as e:
print(fn, e)
class FilesDataset(BaseDataset):
'''Justin modified get_x'''
def __init__(self, fnames, transform, path):
self.path,self.fnames = path,fnames
super().__init__(transform)
def get_n(self): return len(self.y)
def get_sz(self): return self.transform.sz
def get_x(self, i): return open_image(os.path.join(self.path, self.fnames[i]))
def resize_imgs(self, targ, new_path):
dest = resize_imgs(self.fnames, targ, self.path, new_path)
return self.__class__(self.fnames, self.y, self.transform, dest)
def denorm(self,arr):
Reverse the normalization done to a batch of images.
Arguments:
arr: of shape/size (N,3,sz,sz)
if type(arr) is not np.ndarray: arr = to_np(arr)
if len(arr.shape)==3: arr = arr[None]
return self.transform.denorm(np.rollaxis(arr,1,4))
class FilesArrayDataset(FilesDataset):
def __init__(self, fnames, y, transform, path):
self.y=y
assert(len(fnames)==len(y))
super().__init__(fnames, transform, path)
def get_y(self, i): return self.y[i]
def get_c(self): return self.y.shape[1]
class FilesIndexArrayDataset(FilesArrayDataset):
def get_c(self): return int(self.y.max())+1
class FilesNhotArrayDataset(FilesArrayDataset):
@property
def is_multi(self): return True
class FilesIndexArrayRegressionDataset(FilesArrayDataset):
def is_reg(self): return True
class ArraysDataset(BaseDataset):
def __init__(self, x, y, transform):
self.x,self.y=x,y
assert(len(x)==len(y))
super().__init__(transform)
def get_x(self, i): return self.x[i]
def get_y(self, i): return self.y[i]
def get_n(self): return len(self.y)
def get_sz(self): return self.x.shape[1]
class ArraysIndexDataset(ArraysDataset):
def get_c(self): return int(self.y.max())+1
def get_y(self, i): return self.y[i]
class ArraysNhotDataset(ArraysDataset):
def get_c(self): return self.y.shape[1]
@property
def is_multi(self): return True
class ModelData():
def __init__(self, path, trn_dl, val_dl, test_dl=None):
self.path,self.trn_dl,self.val_dl,self.test_dl = path,trn_dl,val_dl,test_dl
@classmethod
def from_dls(cls, path,trn_dl,val_dl,test_dl=None):
trn_dl,val_dl = ModelDataLoader(trn_dl),ModelDataLoader(val_dl)
if test_dl: test_dl = ModelDataLoader(test_dl)
return cls(path, trn_dl, val_dl, test_dl)
@property
def is_reg(self): return self.trn_ds.is_reg
@property
def trn_ds(self): return self.trn_dl.dataset
@property
def val_ds(self): return self.val_dl.dataset
@property
def test_ds(self): return self.test_dl.dataset
@property
def trn_y(self): return self.trn_ds.y
@property
def val_y(self): return self.val_ds.y
class ModelDataLoader():
def __init__(self, dl): self.dl=dl
@classmethod
def create_dl(cls, *args, **kwargs): return cls(DataLoader(*args, **kwargs))
def __iter__(self):
self.it,self.i = iter(self.dl),0
return self
def __len__(self): return len(self.dl)
def __next__(self):
if self.i>=len(self.dl): raise StopIteration
self.i+=1
return next(self.it)
@property
def dataset(self): return self.dl.dataset
class ImageData(ModelData):
def __init__(self, path, datasets, bs, num_workers, classes):
trn_ds,val_ds,fix_ds,aug_ds,test_ds,test_aug_ds = datasets
self.path,self.bs,self.num_workers,self.classes = path,bs,num_workers,classes
self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl,self.test_dl,self.test_aug_dl = [
self.get_dl(ds,shuf) for ds,shuf in [
(trn_ds,True),(val_ds,False),(fix_ds,False),(aug_ds,False),
(test_ds,False),(test_aug_ds,False)
]
]
def get_dl(self, ds, shuffle):
if ds is None: return None
return ModelDataLoader.create_dl(ds, batch_size=self.bs, shuffle=shuffle,
num_workers=self.num_workers, pin_memory=False)
@property
def sz(self): return self.trn_ds.sz
@property
def c(self): return self.trn_ds.c
def resized(self, dl, targ, new_path):
return dl.dataset.resize_imgs(targ,new_path) if dl else None
def resize(self, targ, new_path):
new_ds = []
dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl]
if self.test_dl: dls += [self.test_dl, self.test_aug_dl]
else: dls += [None,None]
t = tqdm_notebook(dls)
for dl in t: new_ds.append(self.resized(dl, targ, new_path))
t.close()
return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
class ImageClassifierData(ImageData):
@property
def is_multi(self): return self.trn_dl.dataset.is_multi
@staticmethod
def get_ds(fn, trn, val, tfms, test=None, **kwargs):
res = [
fn(trn[0], trn[1], tfms[0], **kwargs), # train
fn(val[0], val[1], tfms[1], **kwargs), # val
fn(trn[0], trn[1], tfms[1], **kwargs), # fix
fn(val[0], val[1], tfms[0], **kwargs) # aug
]
if test is not None:
test_lbls = np.zeros((len(test),1))
res += [
fn(test, test_lbls, tfms[1], **kwargs), # test
fn(test, test_lbls, tfms[0], **kwargs) # test_aug
]
else: res += [None,None]
return res
@classmethod
def from_arrays(cls, path, trn, val, bs=64, tfms=(None,None), classes=None, num_workers=4, test=None):
Read in images and their labels given as numpy arrays
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the
shape of `(5000, 784)` and `y` has the shape of `(5000,)`)
val: a tuple of validation data matrix and target label/classification array.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
classes: a list of all labels/classifications
num_workers: a number of workers
test: a matrix of test data (the shape should match `trn[0]`)
Returns:
ImageClassifierData
datasets = cls.get_ds(ArraysIndexDataset, trn, val, tfms, test=test)
return cls(path, datasets, bs, num_workers, classes=classes)
@classmethod
def from_paths(cls, path, bs=64, tfms=(None,None), trn_name='train', val_name='valid', test_name=None, num_workers=8):
Read in images and their labels given as sub-folder names
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
trn_name: a name of the folder that contains training images.
val_name: a name of the folder that contains validation images.
test_name: a name of the folder that contains test images.
num_workers: number of workers
Returns:
ImageClassifierData
trn,val = [folder_source(path, o) for o in (trn_name, val_name)]
test_fnames = read_dir(path, test_name) if test_name else None
datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test_fnames)
return cls(path, datasets, bs, num_workers, classes=trn[2])
@classmethod
def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None),
val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8):
Read in images and their labels given as a CSV file.
This method should be used when training image labels are given in an CSV file as opposed to
sub-directories with label names.
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
csv_fname: a name of the CSV file which contains target labels.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`
suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file
extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')
test_name: a name of the folder which contains test images.
continuous: TODO
skip_header: skip the first row of the CSV file.
num_workers: number of workers
Returns:
ImageClassifierData
fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous)
((val_fnames,trn_fnames),(val_y,trn_y)) = split_by_idx(val_idxs, np.array(fnames), y)
test_fnames = read_dir(path, test_name) if test_name else None
if continuous:
f = FilesIndexArrayRegressionDataset
else:
f = FilesIndexArrayDataset if len(trn_y.shape)==1 else FilesNhotArrayDataset
datasets = cls.get_ds(f, (trn_fnames,trn_y), (val_fnames,val_y), tfms,
path=path, test=test_fnames)
return cls(path, datasets, bs, num_workers, classes=classes)
def split_by_idx(idxs, *a):
mask = np.zeros(len(a[0]),dtype=bool)
mask[np.array(idxs)] = True
return [(o[mask],o[~mask]) for o in a]
arch=resnet34
data = ImageClassifierData.from_paths(PATH, tfms=tfms_from_model(arch, sz))
learn = ConvLearner.pretrained(arch, data, precompute=True)
learn.fit(0.01, 3)
learn.fit(0.01,3)
learn.fit(0.01,2)
# This is the label for a val data
data.val_y
# from here we know that 'cats' is label 0 and 'dogs' is label 1.
data.classes
# this gives prediction for validation set. Predictions are in log scale
log_preds = learn.predict()
log_preds.shape
log_preds[:10]
preds = np.argmax(log_preds, axis=1) # from log probabilities to 0 or 1
probs = np.exp(log_preds[:,1]) # pr(dog)
def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False)
def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct)
# def plot_val_with_title(idxs, title):
# imgs = np.stack([data.val_ds[x][0] for x in idxs])
# title_probs = [probs[x] for x in idxs]
# print(title)
# return plots(data.val_ds.denorm(imgs), rows=1, titles=title_probs)
def plots(ims, figsize=(12,6), rows=1, titles=None):
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i])
def load_img_id(ds, idx): return np.array(PIL.Image.open(PATH+ds.fnames[idx]))
def plot_val_with_title(idxs, title):
imgs = [load_img_id(data.val_ds,x) for x in idxs]
title_probs = [probs[x] for x in idxs]
print(title)
return plots(imgs, rows=1, titles=title_probs, figsize=(16,8))
# 1. A few correct labels at random
plot_val_with_title(rand_by_correct(True), "Correctly classified")
# 2. A few incorrect labels at random
plot_val_with_title(rand_by_correct(False), "Incorrectly classified")
def most_by_mask(mask, mult):
idxs = np.where(mask)[0]
return idxs[np.argsort(mult * probs[idxs])[:4]]
def most_by_correct(y, is_correct):
mult = -1 if (y==1)==is_correct else 1
return most_by_mask((preds == data.val_y)==is_correct & (data.val_y == y), mult)
plot_val_with_title(most_by_correct(0, True), "Most correct cats")
plot_val_with_title(most_by_correct(1, True), "Most correct dogs")
plot_val_with_title(most_by_correct(0, False), "Most incorrect cats")
plot_val_with_title(most_by_correct(1, False), "Most incorrect dogs")
most_uncertain = np.argsort(np.abs(probs -0.5))[:4]
plot_val_with_title(most_uncertain, "Most uncertain predictions")
learn = ConvLearner.pretrained(arch, data, precompute=True)
lrf=learn.lr_find()
learn.sched.plot_lr()
learn.sched.plot()
tfms = tfms_from_model(resnet34, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
def get_augs():
data = ImageClassifierData.from_paths(PATH, bs=2, tfms=tfms, num_workers=1)
x,_ = next(iter(data.aug_dl))
return data.trn_ds.denorm(x)[1]
ims = np.stack([get_augs() for i in range(6)])
plots(ims, rows=2)
data = ImageClassifierData.from_paths(PATH, tfms=tfms)
learn = ConvLearner.pretrained(arch, data, precompute=True)
learn.fit(1e-2, 1)
learn.precompute=False
learn.fit(1e-2, 30, cycle_len=1)
learn.sched.plot_lr()
learn.save('224_lastlayer')
learn.load('224_lastlayer')
learn.unfreeze()
lr=np.array([1e-4,1e-3,1e-2])
learn.fit(lr, 5, cycle_len=1, cycle_mult=2)
learn.sched.plot_lr()
learn.save('224_all')
learn.load('224_all')
log_preds,y = learn.TTA()
probs = np.mean(np.exp(log_preds),0)
np.exp(log_preds).shape
probs.shape
accuracy(probs, y)
preds = np.argmax(probs, axis=1)
probs = probs[:,1]
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, preds)
plot_confusion_matrix(cm, data.classes)
plot_val_with_title(most_by_correct(0, False), "Most incorrect cats")
plot_val_with_title(most_by_correct(1, False), "Most incorrect dogs")
tfms = tfms_from_model(resnet34, sz)
data = ImageClassifierData.from_paths(PATH, tfms=tfms)
ImageClassifierData.from_paths??
learn = ConvLearner.pretrained(resnet34, data, precompute=True)
learn.fit(1e-2, 1)
def binary_loss(y, p):
return np.mean(-(y * np.log(p) + (1-y)*np.log(1-p)))
acts = np.array([1, 0, 0, 1])
preds = np.array([0.95, 0.1, 0.2, 0.8])
binary_loss(acts, preds)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The figure shows the choice probabilities for two of the options - focal and nonfocal. (Since the choice probabilities sum to one, the probability of the third option is determined by the other two.) Then comes the Anova
Step2: The context ME dominates the predictions of this model but the time pressure ME also makes contribution.
Step3: Usually this situation is handled by post-hoc tests. As the errorbars in the figure indicate, the differences between the time pressure conditions within each group are not significant, so (unless we repeat the experiment with bigger sample size) we can't reliably tell where the interaction comes from.
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
x=[1,2,5]
y=np.array([[0.41,0.44,0.47],[0.25,0.22,0.21]]).T
plt.errorbar(x,y[:,0],yerr=0.7/9.3,fmt='d-b')
plt.errorbar(x,y[:,1],yerr=0.7/9.3,fmt='o-g')
plt.legend(['focal','non-focal'],loc=7)
plt.grid(False,axis='x')
plt.xlabel('Time pressure');plt.ylabel('choice probability')
plt.title('Figure 1')
plt.xlim([0,6]);
py=np.atleast_2d(y.mean(0))+np.atleast_2d(y.mean(1)).T
py/=2.
plt.plot(x,py[:,0],'d-b')
plt.plot(x,py[:,1],'o-g')
plt.legend(['focal','non-focal'],loc=6)
plt.xlim([0,6]);plt.ylim([0.1,0.5])
plt.title('Figure 2');
plt.plot(x,[0.4,0.2,0.4],'d-b')
plt.plot(x,[0.2,0.4,0.2],'o-g')
plt.legend(['focal','non-focal'],loc=6)
plt.xlim([0,6]);plt.ylim([0.1,0.5])
plt.title('Figure 3')
plt.figure()
plt.plot(x,[0.4,0.35,0.2],'d-b')
plt.plot(x,[0.2,0.25,0.4],'o-g')
plt.legend(['focal','non-focal'],loc=6)
plt.xlim([0,6]);plt.ylim([0.1,0.5])
plt.title('Figure 4');
plt.figure()
x=[1,2,4,8]
plt.plot(x,[0.4,0.43,0.46,0.49],'d-b')
plt.plot(x,[0.3,0.25,0.24,0.24],'o-g')
plt.legend(['focal','nonfocal'],loc=7)
plt.xlim([0,9]);plt.grid(False,axis='x')
plt.title('Figure 5');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I can assume $q(x)$ has two forms
|
<ASSISTANT_TASK:>
Python Code:
p = GMM([1.0], np.array([[0.5,0.05]]))
num_samples = 1000
beg = 0.0
end = 1.0
t = np.linspace(beg,end,num_samples)
num_neurons = len(p.pis)
colors = [np.random.rand(num_neurons,) for i in range(num_neurons)]
p_y = p(t)
p_max = p_y.max()
np.random.seed(110)
num_neurons = 1
neuron = Neuron((1,1),[[0.6]], bias=0.0006, decay=0.00005)
samples, labels = p.sample(10000)
ys = []
lbls = []
colors = [np.random.rand(3,) for i in range(num_neurons)]
def f(i=0):
x = np.array(samples[i])
l = labels[i]
y = np.array(neuron(x.reshape(1,1,1)))
ys.append(y)
c = 'b' if l else 'g'
lbls.append(c)
fig, ax = plt.subplots(figsize=(15,5))
ax.plot(t, p_y/p_max, c='r', lw=3, label='$p(x)$')
ax.plot([x,x],[0,p_max],label="$x\sim p(x)$", lw=4)
y = [neuron(t.reshape(num_samples,1,1),update=0)]
for j,yi in enumerate(y):
yj_max = y[j].max()
ax.plot(t, y[j]/yj_max, c=colors[j], lw=3, label="$q(x)$")
ax.set_ylim(0.,1.5)
ax.set_xlim(beg,end)
plt.savefig('for_colloquium/fig%03i.png'%(i))
plt.show()
interactive_plot = interactive(f, i=(0, 9999))
output = interactive_plot.children[-1]
output.layout.height = '450px'
interactive_plot
[n.weights for n in list(network.neurons.items())[0][1]]
[np.sqrt(n.bias) for n in list(network.neurons.items())[0][1]]
[n.pi for n in list(network.neurons.items())[0][1]]
def s(x):
return (1/(1+np.exp(-10*(x-0.25))))
x = np.linspace(0,1,100)
plt.plot(x,s(x))
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Next, we use gluon.data.DataLoader, Gluon's data iterator, to hold the training and test data.
Step2: Now, we are ready to define the actual neural network, and we can do so in five simple lines of code.
Step3: Prior to kicking off the model training process, we need to initialize the model’s parameters and set up the loss with gluon.loss.SoftmaxCrossEntropyLoss() and model optimizer functions with gluon.Trainer.
Step4: Running the training is fairly typical and all the while using Gluon's functionality to make the process simple and seamless.
|
<ASSISTANT_TASK:>
Python Code:
import mxnet as mx
from mxnet import gluon, autograd, ndarray
import numpy as np
train_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=True, transform=lambda data, label:
(data.astype(np.float32)/255, label)), batch_size=32, shuffle=True)
test_data = mx.gluon.data.DataLoader(mx.gluon.data.vision.MNIST(train=False, transform=lambda data, label:
(data.astype(np.float32)/255, label)),batch_size=32, shuffle=False)
# Initialize the model:
net = gluon.nn.Sequential()
# Define the model architecture:
with net.name_scope():
# The first layer has 128 nodes:
net.add(gluon.nn.Dense(128, activation="relu"))
# The second layer has 64 nodes:
net.add(gluon.nn.Dense(64, activation="relu"))
# The output layer has 10 possible outputs:
net.add(gluon.nn.Dense(10))
# Begin with pseudorandom values for all of the model's parameters from a normal distribution
# with a standard deviation of 0.05:
net.collect_params().initialize(mx.init.Normal(sigma=0.05))
# Use the softmax cross entropy loss function to measure how well the model is able to predict
# the correct answer:
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
# Use stochastic gradient descent to train the model and set the learning rate hyperparameter to .1:
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': .1})
epochs = 10
for e in range(epochs):
for i, (data, label) in enumerate(train_data):
data = data.as_in_context(mx.cpu()).reshape((-1, 784))
label = label.as_in_context(mx.cpu())
# Start calculating and recording the derivatives:
with autograd.record():
# Optimize parameters -- Forward iteration:
output = net(data)
loss = softmax_cross_entropy(output, label)
loss.backward()
trainer.step(data.shape[0])
# Record statistics on the model's performance over each epoch:
curr_loss = ndarray.mean(loss).asscalar()
print("Epoch {}. Current Loss: {}.".format(e, curr_loss))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's go over the columns
Step2: Finally, suppose we want a DataFrame of GlaxoSmithKline Phase-III announcements, sorted in descending order by date
|
<ASSISTANT_TASK:>
Python Code:
# import the dataset
from quantopian.interactive.data.eventvestor import clinical_trials
# or if you want to import the free dataset, use:
# from quantopian.data.eventvestor import clinical_trials_free
# import data operations
from odo import odo
# import other libraries we will use
import pandas as pd
# Let's use blaze to understand the data a bit using Blaze dshape()
clinical_trials.dshape
# And how many rows are there?
# N.B. we're using a Blaze function to do this, not len()
clinical_trials.count()
# Let's see what the data looks like. We'll grab the first three rows.
clinical_trials[:3]
phase_three = clinical_trials[clinical_trials.clinical_phase == "Phase III"][['timestamp', 'sid','product_name']].sort('timestamp')
# When displaying a Blaze Data Object, the printout is automatically truncated to ten rows.
phase_three
gsk_sid = symbols('GSK').sid
gsk = clinical_trials[clinical_trials.sid==gsk_sid].sort('timestamp',ascending=False)
gsk_df = odo(gsk, pd.DataFrame)
# now filter down to the Phase 4 trials
gsk_df = gsk_df[gsk_df.clinical_phase=="Phase III"]
gsk_df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fetch an SQLite engine and create an in memory database
Step2: Now lets make a couple of tables and do some queries
Step3: ... and let's add a couple of users. First we make a statement for the insert, and then we would execute it.
Step4: ... and then actually put them into the database
Step5: ... and pull the data back out.
Step6: So the results are an iterator which gives up tuples.
Step7: Lets add some item data
Step8: Now lets now join the two tables together
Step9: That's everything in both tables. But we have id_user in the second table.
Step10: Functions?
Step11: That's great, but we have a convenience function as well
Step12: Group by
Step13: A final 'fun' query
|
<ASSISTANT_TASK:>
Python Code:
import sqlalchemy
sqlalchemy.__version__
from sqlalchemy import create_engine
engine = create_engine('sqlite:///:memory:')
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey
metadata = MetaData()
user = Table('user', metadata,
Column('id_user', Integer, primary_key=True),
Column('name', String),
Column('age', Integer))
item = Table('item', metadata,
Column('id_item', Integer, primary_key=True),
Column('id_user', Integer, ForeignKey('user.id_user')),
Column('thing', String))
metadata.create_all(engine)
people = [
(1, 'Bob', '20'),
(2, 'Sally', '25'),
(3, 'John', '30')]
insert = user.insert()
print(insert)
for p in people:
stmt = insert.values(p)
print(stmt.compile().params)
connection = engine.connect()
for p in people:
connection.execute(insert.values(p))
from sqlalchemy import select
users = connection.execute(select([user]))
print(users)
print(list(users))
users = connection.execute(select([user]))
for u in users:
i = u.items()
# print(i)
print(dict(i))
items = (
(1, 1, 'Peanuts'),
(2, 1, 'VW'),
(3, 1, 'iPad'),
(4, 2, 'Raisins'),
(5, 2, 'Fiat'),
(6, 2, 'Nexus 10'),
(7, 2, 'Timex'),
(8, 3, 'Caviar'),
(9, 3, 'Porche'),
(10, 3, 'Surface Pro'),
(11, 3, 'Rolex'),
(12, 3, 'Boat'),
(13, 3, 'Plane'))
insert = item.insert()
for i in items:
connection.execute(insert.values(i))
for x in connection.execute(select([item])):
print(x)
stmt = select([user, item], use_labels=True)
for s in connection.execute(stmt):
print(s)
# print(s.items())
stmt = select([user, item]).where(user.c.id_user == item.c.id_user)
print(stmt)
for s in connection.execute(stmt):
print(s)
from sqlalchemy import func
stmt = select([func.count(user.c.id_user)])
result = connection.execute(stmt)
print(tuple(result))
result = connection.execute(stmt).scalar()
print(result)
stmt = (select([user, func.count(user.c.id_user).label('item_count')])
.select_from(user.join(item))
.group_by(user.c.id_user))
print(stmt)
for s in connection.execute(stmt):
print(s.items())
stmt1 = select([item.c.id_user]).where(item.c.thing.ilike('boat'))
print(connection.execute(stmt1).fetchone())
stmt2 = select([user]).where(user.c.id_user.in_(stmt1.alias()))
print(stmt2)
print(connection.execute(stmt2).fetchone())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load data
Step2: Load and adjust lookup table
Step3: Create paths per station
Step4: Create hashes based on non-zero values of num and cat data per station
|
<ASSISTANT_TASK:>
Python Code:
const.TRAIN_FILES
const.TEST_FILES
num_data = func.load_data_file(const.TRAIN_FILES[0], ftype='bin')
cat_data = func.load_data_file(const.TRAIN_FILES[1], ftype='bin')
num_data_te = func.load_data_file(const.TEST_FILES[0], ftype='bin')
cat_data_te = func.load_data_file(const.TEST_FILES[1], ftype='bin')
y = num_data['data']['y']
ids = np.concatenate([num_data['data']['ids'].Id.values, num_data_te['data']['ids'].Id.values])
num_f_n = num_data['data']['feature_names'][1:]
cat_f_n = cat_data['data']['feature_names'][1:]
num_data = vstack([num_data['data']['features'], num_data_te['data']['features']], format='csr')
cat_data = vstack([cat_data['data']['features'], cat_data_te['data']['features']], format='csr')
num_data.shape
del num_data_te, cat_data_te
lut = pd.read_csv(os.path.join(const.DATA_PATH, 'date_feat_lut_V2.csv'))
lut.head()
def insert_col_num(x):
''' Adds columns number to lookup table'''
try:
num_col_n = num_f_n.index(x['name_num'])
x['col_num'] = num_col_n
except:
x['col_num'] = np.nan
try:
cat_col_n = cat_f_n.index(x['name_cat'])
x['col_cat'] = cat_col_n
except:
x['col_cat'] = np.nan
return x
lut = lut.apply(lambda x: insert_col_num(x), axis=1)
# Create lookup table for the numeric matrix so we can easily go from feature nr to column no
lut_num_indices = lut.groupby(['line','station_V2']).col_num.agg(['min','max'])
lut_num_indices.head()
# Create lookup table for the numeric matrix so we can easily go from feature nr to column no
lut_cat_indices = lut.groupby(['line','station_V2']).col_cat.agg(['min','max'])
lut_cat_indices.head()
print lut_num_indices.shape
print lut_cat_indices.shape
n_stations = lut.station_V2.nunique()
n_features = lut.feature_nr.nunique()
n_samples = num_data.shape[0]
print('Number of stations: {}'.format(n_stations))
print('Number of features: {}'.format(n_features))
print('Number of samples: {}'.format(n_samples))
# Create matrix B for numeric features
station_filter_num = np.zeros((lut_num_indices.shape[0], num_data.shape[1]))
for i in range(lut_num_indices.shape[0]):
if lut_num_indices.iloc[i]['min']>=0:
i_s = int(lut_num_indices.iloc[i]['min'])
i_e = int(lut_num_indices.iloc[i]['max']+1)
#print i_s, i_e
station_filter_num[i, i_s:i_e] = 1
station_filter_num = csr_matrix(station_filter_num)
# Create matrix C for numeric features
num_per_station = (num_data * station_filter_num.T)>0
# Create matrix B for categorical features
station_filter_cat = np.zeros((lut_cat_indices.shape[0], cat_data.shape[1]))
for i in range(lut_cat_indices.shape[0]):
if lut_cat_indices.iloc[i]['min']>=0:
i_s = int(lut_cat_indices.iloc[i]['min'])
i_e = int(lut_cat_indices.iloc[i]['max']+1)
#print i_s, i_e
station_filter_cat[i, i_s:i_e] = 1
station_filter_cat = csr_matrix(station_filter_cat)
# Create matrix C for categorical features
cat_per_station = (cat_data * station_filter_cat.T)>0
# Double check dimensions
print num_per_station.shape
print cat_per_station.shape
# Now get a array of station nr visited for each
a=time.time()
stations_passed = []
for i in range(num_data.shape[0]):
if not i % 100000:
print('Calculated stations for {} of {}'.format(i+1, num_data.shape[0]))
stations_passed.append(np.unique(np.concatenate([num_per_station[i,:].nonzero()[1], cat_per_station[i,:].nonzero()[1]], axis=0)))
# Below is ~10% slower
#stations_passed.append(np.asarray(set(num_data[i,:].nonzero()[1]) & set(cat_data[i,:].nonzero()[1])))
print time.time()-a
# Method used to convert list of stations visited to either a string '01111000' or array [0,1,0,1,0,0] or both
def to_path_info_per_item(items_visited, n_items, o_type='string'):
tmp = np.zeros((n_items)).astype(int)
for n in items_visited: tmp[n]=1
if o_type=='string':
return ''.join(map(str,tmp))
elif o_type=='array':
return tmp
elif o_type=='both':
return ''.join(map(str,tmp)), tmp
# Convert list of stations to binary vectors
ps_strings = []
ps_arr = []
for i in range(n_samples):
if not i % 100000:
print('Converted {} of {}'.format(i+1, n_samples))
s_s, s_arr = to_path_info_per_item(stations_passed[i], n_stations, o_type='both')
ps_strings.append(s_s)
ps_arr.append(s_arr)
# Now finally get the unique values based on the strings
u_str, u_ix, i_ix = np.unique(ps_strings, return_index=True, return_inverse=True)
u_arr = np.array([ps_arr[n] for n in u_ix])
print('Number of unique paths: {}'.format(u_arr.shape[0]))
# Store the sample_id with unique path id
pd.DataFrame({'id': ids, 'u_arr_ix': i_ix}).to_csv(os.path.join(const.DATA_PATH, 'eda_product_flow_sample_paths_station.csv'),
index=False)
# Store the unique strings with id
pd.DataFrame({'id': range(u_arr.shape[0]), 'u_str': u_str}).to_csv(os.path.join(const.DATA_PATH, 'eda_product_flow_unique_paths_station.csv'),
index=False)
def distance_matrix(bin_arr_list, verbose=False):
tmp = np.empty((bin_arr_list.shape[0], bin_arr_list.shape[0]))
tmp[:] = np.nan
for i in range(bin_arr_list.shape[0]):
if (not i % 1000) & verbose:
print('{}/{}'.format(i+1,bin_arr_list.shape[0]))
for j in range(bin_arr_list.shape[0]):
#if j>i:
##a = np.array([int(n) for n in list(unique_string[i])])
#b = np.array([int(n) for n in list(unique_string[j])])
tmp[i,j] = np.count_nonzero( bin_arr_list[i]!=bin_arr_list[j] )
return tmp
tmp = distance_matrix(u_arr, True)
from sklearn.cluster import KMeans
n_cluster=7
km = KMeans(n_clusters=n_cluster)
tmp
%time km.fit(u_arr)
clusters = km.labels_.tolist()
len(clusters)
from sklearn import decomposition
ns=7600
ne=ns+20
print clusters[ns:ne]
[np.count_nonzero( u_arr[i]!=u_arr[i+1]) for i in range(ns, ne)]
u_arr
u_arr[1]
for n_clus in range(n_cluster):
u_clus = np.array([u_arr[i] for i,n in enumerate(clusters) if n==n_clus])
tmp2 = distance_matrix(u_clus)
print('Mean of matrix: {}'.format(np.nanmean(tmp2)))
print('Max of matrix: {}'.format(np.nanmax(tmp2)))
n_clus=3
nn_clus = np.array([n for i,n in enumerate(clusters) if n==n_clus])
titles = [str(i) for i in range(tmp.shape[0])]
#set up colors per clusters using a dict
cluster_colors = {0: '#1b9e77', 1: '#d95f02', 2: '#7570b3', 3: '#e7298a',4: '#8d03d1', 5:'#2b09db', 6:'#466eb8'}
#set up cluster names using a dict
cluster_names = {0: '1',
1: '2',
2: '3',
3: '4',
4: '5',
5: '6',
6: '7'}
df = pd.DataFrame(dict(x=xs, y=ys, label=clusters, title=titles))
#group by cluster
groups = df.groupby('label')
# set up plot
fig, ax = plt.subplots(figsize=(17, 9)) # set size
ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling
#iterate through groups to layer the plot
#note that I use the cluster_name and cluster_color dicts with the 'name' lookup to return the appropriate color/label
for name, group in groups:
ax.plot(group.x, group.y, marker='o', linestyle='', ms=12,
label=cluster_names[name], color=cluster_colors[name],
mec='none')
ax.set_aspect('auto')
ax.tick_params(\
axis= 'x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
ax.tick_params(\
axis= 'y', # changes apply to the y-axis
which='both', # both major and minor ticks are affected
left='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelleft='off')
ax.legend(numpoints=1) #show legend with only 1 point
#add label in x,y position with the label as the film title
for i in range(len(df)):
ax.text(df.ix[i]['x'], df.ix[i]['y'], df.ix[i]['title'], size=8)
plt.show() #show the plot
pd.DataFrame({'string':station_strings}).to_csv(os.path.join(const.DATA_PATH,'path_string_per_station.csv'), index_label='Id')
stations = []
for i in range(num_per_station.shape[0]):
if not i % 100000:
print('Hashed {} of {}'.format(i+1, num_per_station.shape[0]))
stations.append((np.unique(np.concatenate([num_per_station[i,:].nonzero()[1], cat_per_station[i,:].nonzero()[1]], axis=0))))
pd.DataFrame({'string':ustring}).to_csv(os.path.join(const.DATA_PATH,'path_unique_string_per_station.csv'), index_label='Id')
hashes = []
for i in range(num_per_station.shape[0]):
if not i % 10000:
print('Hashed {} of {}'.format(i+1, num_per_station.shape[0]))
hashes.append(hash(str(np.concatenate([num_per_station[i,:].nonzero()[1], cat_per_station[i,:].nonzero()[1]], axis=0))))
lut.feature_nr.max()
pd.Series(hashes).nunique()
pd.Series(hashes).value_counts().head()
hash_vals = pd.Series(hashes).value_counts()
hash_vals.hist(range=[0,100])
(hash_vals<10).sum()
def to_path_info_per_feature(features_visited, n_features, o_type='string'):
tmp = np.zeros((n_features)).astype(int)
for n in features_visited: tmp[n]=1
if o_type=='string':
return ''.join(map(str,tmp))
elif o_type=='array':
return tmp
elif o_type=='both':
return ''.join(map(str,tmp)), tmp
# Generate a list of all the features products have values for
features_visited = []
for i in range(n_samples):
if not i % 100000:
print('Calculated {} of {}'.format(i+1, n_samples))
#stations_visited.append(hash(str(np.concatenate([num_data[i,:].nonzero()[1], cat_data[i,:].nonzero()[1]], axis=0).data)))
features_visited.append(np.unique(np.concatenate([num_data[i,:].nonzero()[1], cat_data[i,:].nonzero()[1]], axis=0)))
# Covert to list of stations to binary vectors
station_strings = []
station_arr = []
for i in range(n_samples):
if not i % 100000:
print('Hashed {} of {}'.format(i+1, n_samples))
s_s, s_arr = to_path_info_per_feature(features_visited[i], n_features, o_type='both')
station_strings.append(s_s)
station_arr.append(s_arr)
# Now finally get the unique values based on the strings
u_str, u_ix, i_ix = np.unique(station_strings, return_index=True, return_inverse=True)
u_arr = np.array([station_arr[n] for n in u_ix])
print('Number of unique paths: {}'.format(u_arr.shape[0]))
# Store the sample_id with unique path id
pd.DataFrame({'id': ids, 'u_arr_ix': i_ix}).to_csv(os.path.join(const.DATA_PATH, 'eda_product_flow_sample_paths_per_feature.csv'),
index=False)
# Store the unique strings with id
pd.DataFrame({'id': range(u_arr.shape[0]), 'u_str': u_str}).to_csv(os.path.join(const.DATA_PATH, 'eda_product_flow_unique_paths_per_feature.csv'),
index=False)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: When a monochromatic plane wave is incident on a sphere, it scatters and absorbs light depending on the properties of the light and sphere. The sphere has radius $r$ and index of refraction $m=m_\mathrm{re}- j\,m_\mathrm{im}$. The sphere size parameter $x=2\pi x/\lambda$ where $\lambda$ is the wavelength of the plane wave in a vacuum.
Step2: Efficiencies
Step3: Scattering and absorption coefficients
Step4: Efficiencies
Step5: Radiation Pressure
Step6: Graph of backscattering efficiency
|
<ASSISTANT_TASK:>
Python Code:
#!pip install --user miepython
import importlib.resources
import numpy as np
import matplotlib.pyplot as plt
try:
import miepython
except ModuleNotFoundError:
print('miepython not installed. To install, uncomment and run the cell above.')
print('Once installation is successful, rerun this cell again.')
# import the Johnson and Christy data for silver
# ag = np.genfromtxt('https://refractiveindex.info/tmp/data/main/Ag/Johnson.txt', delimiter='\t')
nname = "data/ag-Johnson.txt"
ref = importlib.resources.files('miepython').joinpath(nname)
ag = np.genfromtxt(ref, delimiter='\t')
# data is stacked so need to rearrange
N = len(ag)//2
ag_lam = ag[1:N,0]
ag_mre = ag[1:N,1]
ag_mim = ag[N+1:,1]
plt.scatter(ag_lam*1000,ag_mre,s=2,color='blue')
plt.scatter(ag_lam*1000,ag_mim,s=2,color='red')
plt.xlim(300,800)
plt.ylim(0,5)
plt.xlabel('Wavelength (nm)')
plt.ylabel('Refractive Index')
plt.text(350, 1.2, '$m_{re}$', color='blue', fontsize=14)
plt.text(350, 2.2, '$m_{im}$', color='red', fontsize=14)
plt.title('Complex Refractive Index of Silver')
plt.show()
r = 0.3 #radius in microns
geometric_cross_section = np.pi * r**2
x = 2*np.pi*r/ag_lam;
m = ag_mre - 1.0j * ag_mim
qext, qsca, qback, g = miepython.mie(m,x)
absorb = (qext - qsca) * geometric_cross_section
scatt = qsca * geometric_cross_section
extinct = qext * geometric_cross_section
plt.plot(ag_lam*1000,absorb,color='blue')
plt.plot(ag_lam*1000,scatt,color='red')
plt.plot(ag_lam*1000,extinct,color='green')
plt.text(350, 0.35,'$\sigma_{abs}$', color='blue', fontsize=14)
plt.text(350, 0.54,'$\sigma_{sca}$', color='red', fontsize=14)
plt.text(350, 0.84,'$\sigma_{ext}$', color='green', fontsize=14)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Cross Section (1/microns$^2$)")
plt.title("Cross Sections for %.1f$\mu$m Silver Spheres" % (r*2))
plt.xlim(300,800)
plt.show()
lambda0 = 1 # microns
a = lambda0/10 # also microns
k = 2*np.pi/lambda0 # per micron
m = 1.5
x = a * k
geometric_cross_section = np.pi * a**2
theta = np.linspace(-180,180,180)
mu = np.cos(theta/180*np.pi)
s1,s2 = miepython.mie_S1_S2(m,x,mu)
phase = (abs(s1[0])**2+abs(s2[0])**2)/2
print(' unpolarized =',phase)
print(' |s1[-180]|**2 =',abs(s1[0]**2))
print(' |s2[-180]|**2 =',abs(s2[0]**2))
print(' |s1[ 180]|**2 =',abs(s1[179]**2))
print(' |s2[ 180]|**2 =',abs(s2[179]**2))
print()
qext, qsca, qback, g = miepython.mie(m,x)
Cback = qback * geometric_cross_section
Csca = qsca * geometric_cross_section
print(' Csca =',Csca)
print(' Cback =',Cback)
print('4*pi*Csca*p(180) =',4*np.pi*Csca*phase)
r = 0.3 #radius in microns
x = 2*np.pi*r/ag_lam;
m = ag_mre - 1.0j * ag_mim
qext, qsca, qback, g = miepython.mie(m,x)
plt.plot(ag_lam*1000,qext - qsca,color='blue')
plt.plot(ag_lam*1000,qsca,color='red')
plt.plot(ag_lam*1000,qext,color='green')
plt.text(350, 1.2,'$Q_{abs}$', color='blue', fontsize=14)
plt.text(350, 1.9,'$Q_{sca}$', color='red', fontsize=14)
plt.text(350, 3.0,'$Q_{ext}$', color='green', fontsize=14)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Efficiency (-)")
plt.title("Mie Efficiencies for %.1f$\mu$m Silver Spheres" % (r*2))
plt.xlim(300,800)
plt.show()
r = 0.3 #radius in microns
x = 2*np.pi*r/ag_lam;
m = ag_mre - 1.0j * ag_mim
qext, qsca, qback, g = miepython.mie(m,x)
qpr = qext - g*qsca
plt.plot(ag_lam*1000,qpr,color='blue')
plt.xlabel("Wavelength (nm)")
plt.ylabel("Efficiency $Q_{pr}$ (-)")
plt.title("Radiation Pressure Efficiency for %.1f$\mu$m Silver Spheres" % (r*2))
plt.xlim(300,800)
plt.ylim(1,2.5)
plt.show()
x = np.linspace(0.1,4,50)
m = 3.41-1.94j
qext, qsca, qback, g = miepython.mie(m,x)
plt.plot(x,qback)
plt.text(0.6,0,"m=3.41-1.94j")
m = 10000
qext, qsca, qback, g = miepython.mie(m,x)
plt.plot(x,qback)
plt.text(1.2,3.0,"m=10,000")
plt.xlabel("Size Parameter")
plt.ylabel(r"$Q_{back}$")
plt.title("van de Hulst Figure 61")
plt.grid(True)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step5: Create a utility class for camera calibration
Step13: Create a class to keep track of lane detections
Step16: Use the lane pixals identified to fit a ploygon and draw it back on the original image
Step18: Here we validate the detected lines and add them to the lane class
Step20: Find the lane using sliding window technique
Step22: Find Lanes Wrapper
Step24: Warp the image to get birds' eye view
Step27: Threshold
Step29: Apply all the steps
Step30: Generate obj points and img points
Step31: Calibrate camera and undistort the chessbaord images
Step32: Test on images
Step36: Test on videos
|
<ASSISTANT_TASK:>
Python Code:
import os
import math
import glob
import cv2
from collections import deque
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
%matplotlib inline
class cam_util():
util class for camera operations
ret = None
mtx = None
dist = None
rvecs = None
tvecs = None
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
def gen_camera_points(self):
generate objpoints and impoints from calibration images
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Make a list of calibration images
images = glob.glob('camera_cal/calibration*.jpg')
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
self.objpoints.append(objp)
self.imgpoints.append(corners)
def undistort(self, img):
undistort an image with camera matrix
if self.mtx is None:
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.objpoints, self.imgpoints,
img.shape[:2],None,None)
h, w = img.shape[:2]
newcameramtx, roi=cv2.getOptimalNewCameraMatrix(self.mtx, self.dist, (w,h), 1, (w,h))
dst = cv2.undistort(img, self.mtx, self.dist, None, newcameramtx)
x,y,w,h = roi
return dst[y:y+h, x:x+w]
def clean_mat(self):
Reset camera calibration
self.ret = None
self.mtx = None
self.dist = None
self.rvecs = None
self.tvecs = None
class Line():
class to store detected lane stats
def __init__(self, maxSamples=15):
self.maxSamples = maxSamples
# x values of the last n fits of the line
self.recent_xfitted = deque(maxlen=self.maxSamples)
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#average x values of the fitted line over the last n iterations
self.bestx = None
# was the line detected in the last iteration?
self.detected = False
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
def update_lane(self, ally, allx):
Function to update the stats
# get the mean as the best x
self.bestx = np.mean(allx, axis=0)
# fit a 2 order polynomial
new_fit = np.polyfit(ally, allx, 2)
# calculate the difference between last fit and new fit
self.diffs = np.subtract(self.current_fit, new_fit)
# update current fit
self.current_fit = new_fit
# add the new fit to the queue
self.recent_xfitted.append(self.current_fit)
# Use the queue mean as the best fit
self.best_fit = np.mean(self.recent_xfitted, axis=0)
# meters per pixel in y dimension
ym_per_pix = 30/720
# meters per pixel in x dimension
xm_per_pix = 3.7/700
# Calculate radius of curvature
fit_cr = np.polyfit(ally*ym_per_pix, allx*xm_per_pix, 2)
y_eval = np.max(ally)
self.radius_of_curvature = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
# Utility Functions
def get_roi(img, vertices):
Apply mask and get region of interest within the mask
mask = np.zeros_like(img)
if len(img.shape) > 2:
channel_count = img.shape[2]
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def hide_roi(img, vertices):
Apply mask and get region of interest outside the mask
mask = np.zeros_like(img)
mask=mask+255
if len(img.shape) > 2:
channel_count = img.shape[2]
ignore_mask_color = (0,) * channel_count
else:
ignore_mask_color = 0
cv2.fillPoly(mask, vertices, ignore_mask_color)
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def drow_on_images(img, vertices):
Draw ploygon on image
cv2.polylines(img, [vertices], True, (255,255,255), 2)
plot_img(img, 'img drawing', True)
def plot_img(img, step, show_stages=False):
plot image
if show_stages:
print('######################## '+step+' ########################')
plt.imshow(img, cmap='gray')
plt.show()
def plot_hist(histogram, show_stages=False):
plot histogram
if show_stages:
print('######################## histogram ########################')
plt.plot(histogram)
plt.show()
def write_stats(img):
Write lane stats on image
font = cv2.FONT_HERSHEY_SIMPLEX
size = 1
weight = 2
color = (255,70,0)
cv2.putText(img,'Left Curve : '+ '{0:.2f}'.format(left_line.radius_of_curvature)+' m',(10,30), font, size, color, weight)
cv2.putText(img,'Right Curve : '+ '{0:.2f}'.format(right_line.radius_of_curvature)+' m',(10,60), font, size, color, weight)
cv2.putText(img,'Left Lane Pos: '+ '{0:.2f}'.format(left_line.bestx),(10,100), font, size, color, weight)
cv2.putText(img,'Right Lane Pos: '+ '{0:.2f}'.format(right_line.bestx),(10,130), font, size, color, weight)
cv2.putText(img,'Distance from center: '+ "{0:.2f}".format(left_line.line_base_pos)+' m',(10,180), font, size, color, weight)
def draw_lane(undist, img, Minv):
Draw the detected lane bak on the image
# Generate x and y values for plotting
ploty = np.linspace(300, 700)
# Create an image to draw the lines on
warp_zero = np.zeros_like(img).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
left_fit = left_line.best_fit
right_fit = right_line.best_fit
if left_fit is not None and right_fit is not None:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (20,120, 80))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
write_stats(result)
return result
return undist
def validate_Update_lane(img, nonzero, nonzerox, nonzeroy, left_lane_inds, right_lane_inds, show_stages=False):
Validate the detected lane ids and update the lane stats if valid.
# Extract left and right line pixel positions
left_line_allx = nonzerox[left_lane_inds]
left_line_ally = nonzeroy[left_lane_inds]
right_line_allx = nonzerox[right_lane_inds]
right_line_ally = nonzeroy[right_lane_inds]
# Discard the detections if any of the detected lane is less than 2000 pixals.
# This is done because for very small size the poly fit function gives unpredictable results.
# A better approch would be to use the largest lane curvature to extend the other one
if len(left_line_allx) <= 2000 or len(right_line_allx) <= 2000:
left_line.detected = False
right_line.detected = False
return
left_x_mean = np.mean(left_line_allx, axis=0)
right_x_mean = np.mean(right_line_allx, axis=0)
lane_width = np.subtract(right_x_mean, left_x_mean)
# Discard the detections if the lane with is too large or too small
if left_x_mean > 450 or right_x_mean < 850:
left_line.detected = False
right_line.detected = False
return
if lane_width < 300 or lane_width > 800:
left_line.detected = False
right_line.detected = False
return
# Update the lane stats if the current detection is the first one or
# the detection is within 100 pixals of the last n detection mean
if left_line.bestx is None or np.abs(np.subtract(left_line.bestx, np.mean(left_line_allx, axis=0))) < 100:
left_line.update_lane(left_line_ally, left_line_allx)
left_line.detected = True
else:
left_line.detected = False
if right_line.bestx is None or np.abs(np.subtract(right_line.bestx, np.mean(right_line_allx, axis=0))) < 100:
right_line.update_lane(right_line_ally, right_line_allx)
right_line.detected = True
else:
right_line.detected = False
# Calculate the distance of car from center of lane
lane_center = right_line.bestx - left_line.bestx
left_line.line_base_pos = ((img.shape[1]*0.5 - lane_center)*3.7)/700
right_line.line_base_pos = left_line.line_base_pos
def window_search(img, nonzero, nonzerox, nonzeroy, show_stages=False):
Perform a sliding window search to detect lane pixals.
# Temp image to draw detections on
out_img = np.dstack((img, img, img))*255
# Calculate histogram
histogram = np.sum(img[img.shape[0]*.75:,:], axis=0)
plot_hist(histogram, show_stages)
# Take the midpoint and use the max on each side as starting point
midpoint = np.int(histogram.shape[0]/2)
leftx_base = np.argmax(histogram[0:midpoint])
rightx_base = np.argmax(histogram[midpoint:histogram.shape[0]]) + midpoint
# Choose the number of sliding windows
nwindows = 9
# Set height of windows
window_height = np.int(img.shape[0]/nwindows)
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 80
# Set minimum number of pixels found to recenter window
minpix = 30
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low)
& (nonzeroy < win_y_high)
& (nonzerox >= win_xleft_low)
& (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low)
& (nonzeroy < win_y_high)
& (nonzerox >= win_xright_low)
& (nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
plot_img(out_img, 'sliding window marked', show_stages)
return left_lane_inds, right_lane_inds
def find_lanes(img, show_stages=False):
Lane finding wrapper function
# Get the foreground pixals
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# If the last detection was successful take the non zero pixals within the 30 pixal margin as the new detections
if left_line.detected and right_line.detected:
margin = 30
left_lane_inds = ((nonzerox > (left_line.current_fit[0]*(nonzeroy**2) + left_line.current_fit[1]*nonzeroy + left_line.current_fit[2] - margin))
& (nonzerox < (left_line.current_fit[0]*(nonzeroy**2) + left_line.current_fit[1]*nonzeroy + left_line.current_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_line.current_fit[0]*(nonzeroy**2) + right_line.current_fit[1]*nonzeroy + right_line.current_fit[2] - margin))
& (nonzerox < (right_line.current_fit[0]*(nonzeroy**2) + right_line.current_fit[1]*nonzeroy + right_line.current_fit[2] + margin)))
# Update the lane detections
validate_Update_lane(img, nonzero, nonzerox, nonzeroy, left_lane_inds, right_lane_inds)
# If first detection or the last detection was unsuccessful perform a sliding window search
else:
#print('doing window search')
left_lane_inds, right_lane_inds = window_search(img, nonzero, nonzerox, nonzeroy, show_stages)
# Update the lane detections
validate_Update_lane(img, nonzero, nonzerox, nonzeroy, left_lane_inds, right_lane_inds)
def warp(img):
Warp the image to get birds eye view.
img_shape = img.shape
bounding_top_right = [img_shape[1]*0.5 + 90,img_shape[0]*0.70]
bounding_btm_right = [img_shape[1]*0.5 + 450,img_shape[0]]
bounding_btm_left = [img_shape[1]*0.5 - 400,img_shape[0]]
bounding_top_left = [img_shape[1]*0.5 - 60,img_shape[0]*0.70]
# Select source points
pts1 = np.float32([bounding_top_right,bounding_btm_right,bounding_btm_left,bounding_top_left])
# Select destination points
pts2 = np.float32([[img_shape[1]*0.5 + 250,img_shape[0]*0.60],
[img_shape[1]*0.5 + 390,img_shape[0]],
[img_shape[1]*0.5 - 345,img_shape[0]],
[img_shape[1]*0.5 - 205,img_shape[0]*0.60]])
# Get Perspective Transform
M = cv2.getPerspectiveTransform(pts1, pts2)
# Get inverse Perspective Transform
Minv = cv2.getPerspectiveTransform(pts2, pts1)
# Apply warp transform on source image
dst = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]), flags=cv2.INTER_LINEAR)
return dst, Minv
def rec_threshold(img, roi, t_min=140, t_max=255):
Funtion to apply recursive threshold with increasing/decreasing boundries
based on the area of lane within a region of interest.
binary = np.zeros_like(img)
binary[(img >= t_min) & (img <= t_max)] = 1
# retrun last val if the threshold levels reach minimum or maximum.
if t_min <= 40 or t_min >= 220:
return binary
binary_1 = get_roi(binary, roi)
#print(np.sum(binary_1.nonzero()))
if np.sum(binary_1.nonzero()) > 9800000:
binary = rec_threshold(img, roi, t_min+10)
elif np.sum(binary_1.nonzero()) < 100000:
binary = rec_threshold(img, roi, t_min-10)
return binary
def threshold(img, roi, show_stages=False):
Apply threshold
# Convert image the HSV
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# Take v channel
v_channel = hsv[:,:,2]
plot_img(v_channel, 'v channel', show_stages)
# Apply threshold to find lane
v_binary = rec_threshold(v_channel, roi)
plot_img(v_binary, 'color threshold', show_stages)
# Convert image to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the derivative in x
sobelx = cv2.Sobel(gray, cv2.CV_32F, 1, 0)
#sobelx = cv2.Sobel(sobelx, cv2.CV_32F, 0, 1) # Take the derivative
#plot_img(sobelx, show_stages)
# Absolute x derivative to
abs_sobelx = np.absolute(sobelx)
#accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
#plot_img(sobelx, show_stages)
sxbinary = np.zeros_like(scaled_sobel)
# perform threshold
sxbinary[(scaled_sobel >= 100) & (scaled_sobel <= 255)] = 1
plot_img(sobelx, 'sobel', show_stages)
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, v_binary))
combined_binary = np.zeros_like(sxbinary)
# conbine color and sobel threshold
combined_binary[(v_binary == 1) | (sxbinary == 1)] = 1
plot_img(combined_binary, 'combined threshold', show_stages)
return combined_binary
def process_image(image, show_stages=False):
Wrapper function for all image processing
# Undistort the image
undistorted = cam.undistort(image)
plot_img(undistorted, 'undistorted', show_stages)
# Apply perpective transform
img, Minv = warp(undistorted)
plot_img(img, 'warped', show_stages)
# Get points for region of interst
vertices = np.array([[(image.shape[1]*0.1,image.shape[0]-50),
(image.shape[1]*0.5-100,image.shape[0]*0.60),
(image.shape[1]*0.5+100,image.shape[0]*0.60),
(image.shape[1]*0.95,image.shape[0]-50)]],
dtype=np.int32)
# Apply threshold
img = threshold(img, vertices, show_stages)
vertices = np.array([[(200,img.shape[0]),
(200,0),
(1050,0),
(1050,img.shape[0])]], dtype=np.int32)
# Get roi
img = get_roi(img, vertices)
# Find Lanes
find_lanes(img, show_stages)
# Draw lanes on image
res = draw_lane(undistorted, img, Minv);
#plot_img(res, show_stages)
return res
# init camera
cam = cam_util()
cam.gen_camera_points()
# Undistort a sample calibration image
cal_dir = "camera_cal/"
cal_images = glob.glob(cal_dir+'*.jpg')
for cal_image in cal_images:
cimg = mpimg.imread(cal_image)
cimg_undistort = cam.undistort(cimg)
cv2.imwrite('output_images/undistort_'+cal_image.split('/')[1],cimg_undistort)
print('calibration done')
# Clean camera matrix
cam.clean_mat()
# Test on images
test_dir = "test_images/"
test_images = glob.glob(test_dir+'test*.jpg')
#test_images = glob.glob(test_dir+'straight_lines*.jpg')
#test_images = glob.glob(test_dir+'*.jpg')
for test_image in test_images:
left_line = Line()
right_line = Line()
image = mpimg.imread(test_image)
res = process_image(image, False)
#plot_img(res, True)
print('######################## Sample Stages ########################')
print()
# display stages for a sample image
left_line = Line()
right_line = Line()
image = mpimg.imread('test_images/test3.jpg')
plot_img(image, 'Initial', True)
res = process_image(image, True)
plot_img(res, 'Final', True)
# Test on Videos
# Clean data for video
#
left_line = Line()
right_line = Line()
cam.clean_mat()
project_video_res = 'project_video_res.mp4'
clip1 = VideoFileClip("project_video.mp4")
project_video_clip = clip1.fl_image(process_image)
project_video_clip.write_videofile(project_video_res, audio=False)
#
# Clean data for video
#
left_line = Line()
right_line = Line()
cam.clean_mat()
challenge_video_res = 'challenge_video_res.mp4'
clip2 = VideoFileClip('challenge_video.mp4')
challenge_video_clip = clip2.fl_image(process_image)
challenge_video_clip.write_videofile(challenge_video_res, audio=False)
#
# Clean data for video
#
left_line = Line()
right_line = Line()
cam.clean_mat()
harder_challenge_video_res = 'harder_challenge_video_res.mp4'
clip2 = VideoFileClip('harder_challenge_video.mp4')
harder_challenge_video_clip = clip2.fl_image(process_image)
harder_challenge_video_clip.write_videofile(harder_challenge_video_res, audio=False)
#
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reaction Time Distributions
Step2: Model Fitness
|
<ASSISTANT_TASK:>
Python Code:
# Environment setup
%matplotlib inline
%cd /lang_dec
# Imports
import warnings; warnings.filterwarnings('ignore')
import hddm
import numpy as np
import matplotlib.pyplot as plt
from utils import model_tools, signal_detection
# Import control models
controls_data = hddm.load_csv('/lang_dec/data/controls_clean.csv')
controls_model = hddm.HDDM(controls_data, depends_on={'v': 'stim'}, bias=True)
controls_model.load_db(dbname='language_decision/models/controls', db='txt')
controls_model_threshold = hddm.HDDM(controls_data, depends_on={'v': 'stim', 'a': 'stim'})
controls_model_threshold.load_db(dbname='language_decision/models/controls_threshold', db='txt')
fig = plt.figure()
ax = fig.add_subplot(111, xlabel='RT', ylabel='count', title='RT distributions')
for i, subj_data in controls_data.groupby('subj_idx'):
subj_data.rt.hist(bins=20, histtype='step', ax=ax)
controls_model.plot_posterior_predictive(figsize=(15, 10))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Import pandas
Step2: Import TSV
Step4: Escape special characters
Step7: Creating the markdown files
Step8: These files are in the publications directory, one directory below where we're working from.
|
<ASSISTANT_TASK:>
Python Code:
!cat publications.tsv
import pandas as pd
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
Produce entities within text.
return "".join(html_escape_table.get(c,c) for c in text)
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += collection: publications
md += \npermalink: /publication/ + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
if len(str(item.paper_url)) > 5:
md += "\n[Download paper here](" + item.paper_url + ")\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
!ls ../_publications/
!cat ../_publications/2009-10-01-paper-title-number-1.md
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The mt_obj contains all the data from the edi file, e.g. impedance, tipper, frequency as well as station information (lat/long). To look at any of these parameters you can type, for example
Step2: There are many other parameters you can look at in the mt_obj. Just type mt_obj.[TAB] to see what is available.
Step3: As with the MT object, you can explore the object by typing mt_obj.Z.[TAB] to see the available attributes.
Step4: Make some change to the data and save to a new file
|
<ASSISTANT_TASK:>
Python Code:
# import required modules
from mtpy.core.mt import MT
# Define the path to your edi file
edi_file = "C:/mtpywin/mtpy/examples/data/edi_files_2/Synth00.edi"
# Create an MT object
mt_obj = MT(edi_file)
# To see the latitude and longitude
print(mt_obj.lat, mt_obj.lon)
# To see the easting, northing, and elevation
print(mt_obj.east, mt_obj.north, mt_obj.elev)
# for example, to see the frequency values represented in the impedance tensor:
print(mt_obj.Z.freq)
# or to see the impedance tensor (first 4 elements)
print(mt_obj.Z.z[:4])
# or the resistivity or phase (first 4 values)
print(mt_obj.Z.resistivity[:4])
print(mt_obj.Z.phase[:4])
# import required modules
from mtpy.core.mt import MT
import os
# Define the path to your edi file and save path
edi_file = "C:/mtpywin/mtpy/examples/data/edi_files_2/Synth00.edi"
savepath = r"C:/tmp"
# Create an MT object
mt_obj = MT(edi_file)
# To plot the edi file we read in in Part 1 & save to file:
pt_obj = mt_obj.plot_mt_response(plot_num=1, # 1 = yx and xy; 2 = all 4 components
# 3 = off diagonal + determinant
plot_tipper = 'yri',
plot_pt = 'y' # plot phase tensor 'y' or 'n'
)
#pt_obj.save_plot(os.path.join(savepath,"Synth00.png"), fig_dpi=400)
# import required modules
from mtpy.core.mt import MT
import os
# Define the path to your edi file and save path
edi_file = r"C:/mtpywin/mtpy/examples/data/edi_files_2/Synth00.edi"
savepath = r"C:/tmp"
# Create an MT object
mt_obj = MT(edi_file)
# First, define a frequency array:
# Every second frequency:
new_freq_list = mt_obj.Z.freq[::2]
# OR 5 periods per decade from 10^-4 to 10^3 seconds
from mtpy.utils.calculator import get_period_list
new_freq_list = 1./get_period_list(1e-4,1e3,5)
# Create new Z and Tipper objects containing interpolated data
new_Z_obj, new_Tipper_obj = mt_obj.interpolate(new_freq_list)
# Write a new edi file using the new data
mt_obj.write_mt_file(
save_dir=savepath,
fn_basename='Synth00_5ppd',
file_type='edi',
new_Z_obj=new_Z_obj, # provide a z object to update the data
new_Tipper_obj=new_Tipper_obj, # provide a tipper object
longitude_format='LONG', # write longitudes as 'LONG' not ‘LON’
latlon_format='dd'# write as decimal degrees (any other input
# will write as degrees:minutes:seconds
)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-1', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Source estimation method such as MNE require a noise estimations from the
Step2: The definition of noise depends on the paradigm. In MEG it is quite common
Step3: Now that you have the covariance matrix in an MNE-Python object you can
Step4: Note that this method also attenuates any activity in your
Step5: Plot the covariance matrices
Step6: How should I regularize the covariance matrix?
Step7: This procedure evaluates the noise covariance quantitatively by how well it
Step8: This plot displays both, the whitened evoked signals for each channels and
Step9: This will plot the whitened evoked for the optimal estimator and display the
|
<ASSISTANT_TASK:>
Python Code:
import os.path as op
import mne
from mne.datasets import sample
data_path = sample.data_path()
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference('average', projection=True)
raw.info['bads'] += ['EEG 053'] # bads + 1 more
raw_empty_room.info['bads'] = [
bb for bb in raw.info['bads'] if 'EEG' not in bb]
raw_empty_room.add_proj(
[pp.copy() for pp in raw.info['projs'] if 'EEG' not in pp['desc']])
noise_cov = mne.compute_raw_covariance(
raw_empty_room, tmin=0, tmax=None)
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
baseline=(-0.2, 0.0), decim=3, # we'll decimate for speed
verbose='error') # and ignore the warning about aliasing
noise_cov_baseline = mne.compute_covariance(epochs, tmax=0)
noise_cov.plot(raw_empty_room.info, proj=True)
noise_cov_baseline.plot(epochs.info, proj=True)
noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto',
rank=None)
evoked = epochs.average()
evoked.plot_white(noise_cov_reg, time_unit='s')
noise_covs = mne.compute_covariance(
epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True,
rank=None)
evoked.plot_white(noise_covs, time_unit='s')
evoked_meg = evoked.copy().pick('meg')
noise_cov['method'] = 'empty_room'
noise_cov_baseline['method'] = 'baseline'
evoked_meg.plot_white([noise_cov_baseline, noise_cov], time_unit='s')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Importing data and previewing
Step2: Oh no, these are some messy column names! Gotta clean them up, truncating the first 308 characters.
Step3: Creating two dataframes
Step4: Topic interest
Step5: Results
Step6: Topic expertise
Step7: Meta questions about THW
Step8: Personal experience with scientific computing
Step9: What skill level should we aim for?
Step10: What should our sessions look like?
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
df = pd.read_csv("thwfall2017-survey.csv")
df = df[1:]
df.columns[0:3]
count = 0
for column in df.columns:
if count < 43:
df = df.rename(columns = {column:column[308:]})
print(len(column), column[308:])
count = count + 1
topic_list = df.columns[0:37]
topic_list
df_topics = df[topic_list]
df_meta = df
df_meta = df[['Skill level', 'Personal experience', 'Presentation style']]
topic_interest = {}
topic_teaching = {}
for topic in df_topics:
topic_interest[topic] = 0
topic_teaching[topic] = 0
for row in df_topics[topic]:
# if row contains only value 1, increment interest dict by 1
if str(row).find('1')>=0 and str(row).find('2')==-1:
topic_interest[topic] += 1
# if row contains value 2, increment interest dict by 3
if str(row).find('2')>=0:
topic_interest[topic] += 3
if str(row).find('3')>=0:
topic_teaching[topic] += 1
topic_interest_df = pd.DataFrame.from_dict(topic_interest, orient="index")
topic_interest_df.sort_values([0], ascending=False)
topic_interest_df = topic_interest_df.sort_values([0], ascending=True)
topic_interest_df.plot(figsize=[8,14], kind='barh', fontsize=20)
topic_teaching_df = pd.DataFrame.from_dict(topic_teaching, orient="index")
topic_teaching_df = topic_teaching_df[topic_teaching_df[0] != 0]
topic_teaching_df.sort_values([0], ascending=False)
topic_teaching_df = topic_teaching_df.sort_values([0], ascending=True)
topic_teaching_df.plot(figsize=[8,10], kind='barh', fontsize=20)
df_meta = df_meta.dropna()
df_meta[0:4]
fig, ax = plt.subplots()
pe_df = df_meta['Personal experience'].value_counts(sort=False).sort_index(ascending=True)
pe_plot = pe_df.plot(kind='barh', fontsize=20, figsize=[8,4], ax=ax)
plt.title("What is your personal experience with scientific computing?", size=20)
ax.set_yticklabels(["Beginner", "Intermediate", "Advanced"])
fig, ax = plt.subplots()
skill_df = df_meta['Skill level'].value_counts(sort=False).sort_index(ascending=True)
skill_plot = skill_df.plot(kind='barh', fontsize=20, figsize=[8,4], ax=ax)
plt.title("What skill level should we aim for?", size=20)
ax.set_yticklabels(["Beginner", "Intermediate", "Advanced"])
fig, ax = plt.subplots()
style_df = df_meta['Presentation style'].value_counts(sort=False).sort_index(ascending=True)
style_plot = style_df.plot(kind='barh', fontsize=20, figsize=[8,4], ax=ax)
plt.title("Session format", size=20)
ax.set_yticklabels(["100% presentation / 0% hackathon",
"75% presentation / 25% hackathon",
"50% presentation / 50% hackathon",
"25% presentation / 75% hackathon",
"0% presentation / 100% hackathon"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Classification with TensorFlow
Step2: And then download the dataset.
Step3: And load the data into a DataFrame and take a peek.
Step4: We can see that all of the data is numeric, but varies a bit in scale.
Step5: No missing data. Only 303 rows of data, though, so we aren't working with a huge dataset.
Step6: There are no obviously strong correlations.
Step7: In this data female maps to 0 and male maps to 1, so there are over twice as many men in the dataset.
Step8: In this case the dataset looks more balanced.
Step9: The dataset seems to be pretty heavily skewed toward individuals in their 50s and 60s.
Step10: We'll also want to normalize our feature data before feeding it into the model.
Step11: We can also now split off a validation set from our data. Since we have so many more men than women in this dataset, we will stratify on sex.
Step12: We'll use the TensorFlow Keras Sequential model. The input size needs to be equal to the number of input features that we have. The output size needs to be 1 since we are predicting a yes/no value. The number and width of layers in between are an area for experimentation, as are the activation functions.
Step13: We can now compile the model. We use 'binary_crossentropy' loss since this is a binary classification model.
Step14: And finally, we can actually fit the model. We'll start with a run of 500 training epochs. Once we are done, we'll print out the final accuracy the model achieved.
Step15: We got perfect accuracy in our model. Let's see how the accuracy improves and the loss is reduced over epochs.
Step16: We seem to have kept training this model far too long. The accuracy reaches perfection, and the loss moves to 0.0 after a few hundred epochs.
Step17: As you can see, our predictions are continuous numbers, not the 1 or 0 values that we expected. These values are confidences that the value is 1. Let's look at them in a histogram.
Step18: Here we can see that the model is highly confident yes or no in many cases, but there are some cases where the model was unsure.
Step19: This puts the cut-off threshold for a yes/no decision at 0.5. Let's think about the implications of this.
Step20: When we ran this model, our score was in the low 80s, which is not great. Yours is likely similar.
Step21: Exercise 2
|
<ASSISTANT_TASK:>
Python Code:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
! chmod 600 kaggle.json && (ls ~/.kaggle 2>/dev/null || mkdir ~/.kaggle) && mv kaggle.json ~/.kaggle/ && echo 'Done'
!kaggle datasets download ronitf/heart-disease-uci
!ls
import pandas as pd
df = pd.read_csv('heart-disease-uci.zip')
df.sample(5)
df.describe()
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(10,10))
_ = sns.heatmap(df.corr(), cmap='coolwarm', annot=True)
df['sex'].hist()
df['target'].hist()
df['age'].hist()
FEATURES = df.columns.values[:-1]
TARGET = df.columns.values[-1]
FEATURES, TARGET
df.loc[:, FEATURES] = ((df[FEATURES] - df[FEATURES].min()) / (df[FEATURES].max() - df[FEATURES].min()))
df.describe()
from sklearn.model_selection import train_test_split
X_train, X_validate, y_train, y_validate = train_test_split(
df[FEATURES], df[TARGET], test_size=0.2, stratify=df['sex'])
X_train.shape, X_validate.shape
import tensorflow as tf
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation=tf.nn.relu,
input_shape=(FEATURES.size,)),
tf.keras.layers.Dense(32, activation=tf.nn.relu),
tf.keras.layers.Dense(16, activation=tf.nn.relu),
tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
])
model.summary()
model.compile(
loss='binary_crossentropy',
optimizer='Adam',
metrics=['accuracy']
)
history = model.fit(X_train, y_train, epochs=500, verbose=0)
history.history['accuracy'][-1]
import matplotlib.pyplot as plt
plt.figure(figsize=(16,5))
plt.subplot(1,2,1)
plt.plot(history.history['accuracy'])
plt.title('Training Accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train_accuracy'], loc='best')
plt.subplot(1,2,2)
plt.plot(history.history['loss'])
plt.title('Training Loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train_loss'], loc='best')
predictions = model.predict(X_validate)
predictions[:10]
import matplotlib.pyplot as plt
_ = plt.hist(predictions)
predictions = [round(x[0]) for x in predictions]
_ = plt.hist(predictions)
from sklearn.metrics import accuracy_score
accuracy_score(y_validate, predictions)
# Your Code Goes Here
# Your code goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Compute inverse solution
Step2: View source activations
Step3: Using vector solutions
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
print(__doc__)
data_path = sample.data_path()
label = 'Aud-lh'
label_fname = data_path + '/MEG/sample/labels/%s.label' % label
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
pick_ori = "normal" # Get signed values to see the effect of sign flip
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=pick_ori)
label = mne.read_label(label_fname)
stc_label = stc.in_label(label)
modes = ('mean', 'mean_flip', 'pca_flip')
tcs = dict()
for mode in modes:
tcs[mode] = stc.extract_label_time_course(label, src, mode=mode)
print("Number of vertices : %d" % len(stc_label.data))
fig, ax = plt.subplots(1)
t = 1e3 * stc_label.times
ax.plot(t, stc_label.data.T, 'k', linewidth=0.5, alpha=0.5)
pe = [path_effects.Stroke(linewidth=5, foreground='w', alpha=0.5),
path_effects.Normal()]
for mode, tc in tcs.items():
ax.plot(t, tc[0], linewidth=3, label=str(mode), path_effects=pe)
xlim = t[[0, -1]]
ylim = [-27, 22]
ax.legend(loc='upper right')
ax.set(xlabel='Time (ms)', ylabel='Source amplitude',
title='Activations in Label %r' % (label.name),
xlim=xlim, ylim=ylim)
mne.viz.tight_layout()
pick_ori = 'vector'
stc_vec = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=pick_ori)
data = stc_vec.extract_label_time_course(label, src)
fig, ax = plt.subplots(1)
stc_vec_label = stc_vec.in_label(label)
colors = ['#EE6677', '#228833', '#4477AA']
for ii, name in enumerate('XYZ'):
color = colors[ii]
ax.plot(t, stc_vec_label.data[:, ii].T, color=color, lw=0.5, alpha=0.5,
zorder=5 - ii)
ax.plot(t, data[0, ii], lw=3, color=color, label='+' + name, zorder=8 - ii,
path_effects=pe)
ax.legend(loc='upper right')
ax.set(xlabel='Time (ms)', ylabel='Source amplitude',
title='Mean vector activations in Label %r' % (label.name,),
xlim=xlim, ylim=ylim)
mne.viz.tight_layout()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data preprocessing
Step2: Next, we'll create partitions of the test data depending on whether the spurious feature equals the label or not.
Step5: As such there is a small level of spurious correlation here (i.e., x_sp is spuriously correlated with y), but as we'll see this is
Step6: Max-margin classifier on the original dataset
Step7: Let us see how well this classifier performs on the "majority" data where x_sp = y and the "minority" data where x_sp !=y.
Step8: Since there is not much difference in the proportion of majority and minority data in this dataset, the max-margin classifier fares equally well on both groups!
Step9: Max-margin classifier on dataset with spurious correlation
Step10: Let's train a max-margin classifier on this dataset and observe its performance on the test majority and test minority groups.
Step11: The classifier has worse performance on the minority group, implying that it has positively weighted the spurious feature. As before, let us also look at the exact weight assigned to the spurious feature.
Step12: Max-margin norm vs training set size
Step14: Statistical skews
Step15: Now, we'll create datasets with statistical skews by merely duplicating points in the group x_sp = y. By doing so, we'll create datasets with levels of spurious correlation varying from 0.5 to 0.75 to 0.99. We'll then train with SGD with learning rate 0.01.
Step16: Now let's plot how the weight assigned to the spurious feature evolves over time. First we need to do a little bit of work to gather these weights from the callbacks
Step17: What happens to the max-margin classifier in these settings? The above datasets have roughly the same number of unique datapoints in both the majority and minority group. So we'd expect the max-margin classifier to not assign much weight to the spurious feature in all of these datasets. Let's verify that by looking at its error on the two groups and weight assigned to the spurious component.
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import pprint
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
#import cvxpy as cp
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from sklearn import preprocessing
import keras
from keras.models import Sequential
from keras import backend as K
from keras.models import Model
import numpy as np
import cvxpy as cp
colors = ['black','#377eb8', '#ff7f00', '#4daf4a',
'#984ea3', '#a65628', '#f781bf',
'#999999', '#e41a1c', '#dede00']
markers = [ 'h', '*', '<', 'o', 's', 'v', 'D' ]
local_dir = '/home/jupyter'
plot_dir = f'{local_dir}/plot_pdfs'
np.random.seed(seed=42)
# Load and shuffle data
data = np.loadtxt('third_party/obesity_dataset/ObesityDataSet.csv',skiprows=1,delimiter=',')
np.random.shuffle(data)
spur_ind = 18 # The 18th column inthe data corresponds to the "spurious feature"
label_ind = 20 # The 20th column corresponds to the label
#==================
# Conversion to binary classification task:
#==================
data = data[np.logical_or.reduce([data[:,label_ind] == x for x in [0,1,2,4,5,6]]),:]
print(f"{data.shape[0]} datapoints remain.")
# Convert these labels to 0 and 1.
data[data[:,label_ind]<=3, label_ind] = 0
data[data[:,label_ind]>=4, label_ind] = 1
print(f"Labels have been simplified to {np.unique(data[:,label_ind])}")
labels = data[:,-1]
data = data[:,:-1]
# Rescale the data
#data = preprocessing.scale(data)
min_max_scaler = preprocessing.MinMaxScaler()
data = min_max_scaler.fit_transform(data)
# num_data = data.shape[0]
# Create test-train split
num_data = data.shape[0]
x_train, y_train = data[:int(0.6*num_data)], labels[:int(0.6*num_data)]
x_test, y_test = data[int(0.6*num_data):], labels[int(0.6*num_data):]
# Determine which indices in the training set correspond to x_sp = y and x_sp != y
pos_train_ind = (x_train[:,spur_ind] == y_train)
neg_train_ind = (x_train[:,spur_ind] != y_train)
# Determine which indices in the test set correspond to x_sp = y and x_sp != y
pos_test_ind = (x_test[:,spur_ind] == y_test)
neg_test_ind = (x_test[:,spur_ind] != y_test)
# Select datapoints in the training set where x_sp = y and x_sp != y
pos_x_train, pos_y_train = x_train[pos_train_ind], y_train[pos_train_ind]
neg_x_train, neg_y_train = x_train[neg_train_ind], y_train[neg_train_ind]
# Select datapoints in the test set where x_sp = y and x_sp != y
pos_x_test, pos_y_test = x_test[pos_test_ind], y_test[pos_test_ind]
neg_x_test, neg_y_test = x_test[neg_test_ind], y_test[neg_test_ind]
print(f'Num training data where x_sp = y: {pos_x_train.shape[0]}')
print(f'Num training data where x_sp = y: {neg_x_train.shape[0]}')
print(f'Num test data where x_sp = y: {pos_x_test.shape[0]}')
print(f'Num test data where x_sp = y: {neg_x_test.shape[0]}')
def fit_max_margin(x_train, y_train):
Returns max-margin solution on the training datapoint
# Arguments:
x_train (np.array): training inputs
y_train (np.array): 0/1 training labels
# Returns
weights (np.array): array of weights
bias (float): bias value
# One could also use scipy to do this but
# with the following code there's greater flexibility to play around with the
# constraints and see how things change
x_train = x_train.reshape((x_train.shape[0],-1))
A = np.hstack([x_train,np.ones((x_train.shape[0],1))]) # Append a "1" for the bias feature
Ide = np.identity(x_train.shape[1])
b_ones = np.ones(A.shape[0])
cp_weights = cp.Variable(A.shape[1])
# Quadratic program corresponding to minimizing ||w||^2
# subject to y (x^T w) >= 1
prob = cp.Problem(cp.Minimize(cp.quad_form(cp_weights[:-1], Ide)),
[np.diag(2*y_train-1) @ ((A @ cp_weights)) >= b_ones])
prob.solve(verbose=False, solver=cp.ECOS)
weights = cp_weights.value
return weights[:-1], weights[-1]
def evaluate_max_margin(x_test, y_test, weights, bias):
Returns accuracy of a linear classifier on test data
# Arguments:
x_test (np.array): test inputs
y_test (np.array): 0/1 test labels
weights, bias (np.array, float): weights and bias
# Returns
accuracy: accuracy of the weights on the given test data
x_test = x_test.reshape((x_test.shape[0],-1))
margins = np.matmul(x_test, weights) + bias
accuracy = np.mean(np.multiply(margins, 2*y_test - 1) > 0.0)
return accuracy
mm_weights, mm_bias = fit_max_margin(x_train,y_train)
# If you want to look at how much weight the classifier assigns to different weights, plot this.
#plt.bar(np.arange(mm_weights.shape[0]),np.abs(mm_weights))
#plt.xticks(np.arange(mm_weights.shape[0]));
accuracy = evaluate_max_margin(x_test, y_test, mm_weights, mm_bias)
print(f"Test accuracy = {accuracy}")
pos_accuracy = evaluate_max_margin(pos_x_test, pos_y_test, mm_weights, mm_bias)
print(f"Majority test accuracy = {pos_accuracy}")
neg_accuracy = evaluate_max_margin(neg_x_test, neg_y_test, mm_weights, mm_bias)
print(f"Minority test accuracy = {neg_accuracy}")
(mm_weights[spur_ind])/np.linalg.norm(mm_weights)
factor = 50 # The factor by which we want to reduce the size of the minority datapoints
print(f"Size of new majority group = {pos_x_train.shape[0]}")
print(f"Size of new minority group = {int(neg_x_train.shape[0]/factor)}")
geom_skewed_x_train = np.vstack([pos_x_train, neg_x_train[:int(neg_x_train.shape[0]/factor)]])
geom_skewed_y_train = np.concatenate([pos_y_train, neg_y_train[:int(neg_y_train.shape[0]/factor)]])
skewed_mm_weights, skewed_mm_bias = fit_max_margin(geom_skewed_x_train,geom_skewed_y_train)
pos_accuracy = evaluate_max_margin(pos_x_test, pos_y_test, skewed_mm_weights, skewed_mm_bias)
print(f"Majority test accuracy = {pos_accuracy}")
neg_accuracy = evaluate_max_margin(neg_x_test, neg_y_test, skewed_mm_weights, skewed_mm_bias)
print(f"Minority test accuracy = {neg_accuracy}")
skewed_mm_weights[spur_ind]/np.linalg.norm(skewed_mm_weights)
n_train_list = [4,8,16,32,64,128,256,512,1024] # Varying number of training dataset sizes.
norms = []
for n_train in n_train_list:
norms += [[]]
for k in range(5):
inds = np.random.choice(np.arange(x_train.shape[0]), n_train,replace=False) # Randomly select
# subset of data
# Fit max-margin on the subset of the data, but only after removing the spurious feature
temp_mm_weights, temp_mm_bias = fit_max_margin(np.delete(x_train[inds], spur_ind, axis=1),y_train[inds])
norms[-1] += [np.linalg.norm(temp_mm_weights)]
ys = [np.mean(y) for y in norms]
yerrs = [np.std(y) for y in norms] # Error bars
mpl.rc('xtick', labelsize=18)
mpl.rc('ytick', labelsize=18)
mpl.rcParams['font.family'] ='monospace'
plt.figure(figsize=(5,5))
fig, ax = plt.subplots()
ax.set_xlabel('Training set size',fontsize=24)
ax.set_ylabel(r'$\ell_2$ Norm',fontsize=24)
ax.errorbar(x=np.log(n_train_list),y=ys,yerr= yerrs,marker=markers[1], color=colors[1],
capsize=5,markersize=15)
ax.grid()
plt.tight_layout()
plt.xticks(np.log([16,64,256,1024]), ['16','64','256','1024']);
plt.savefig(f'obesity_geom_skew.pdf')
from keras.layers import Input, Flatten, Dense, Activation
def linear_classifier(input_shape, use_bias = True):
Linear classifier and its logit output
# Arguments
input_shape (tensor): shape of input image tensor
use_bias (boolean): use bias variable or not
# Returns
model (Model): Keras model instance
logits (Model): Keras model instance for the logit layer
inputs = Input(shape=input_shape)
x = Dense(1,kernel_initializer='zeros', use_bias = use_bias)(inputs)
logits = Model(inputs=inputs, outputs=x)
outputs = Activation('sigmoid')(x)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, logits
from keras.optimizers import SGD
from keras.losses import BinaryCrossentropy
# The following callback allows us to store and access weights
# throughout training
from keras.callbacks import Callback
class SaveWeights(Callback):
def __init__(self, verbose=0):
super(SaveWeights, self).__init__()
self.epoch = []
self.history = {}
def on_train_begin(self, logs=None):
self.epoch = []
self.history = {}
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
self.history.setdefault("Weights", []).append(self.model.layers[1].get_weights())
n_epochs = 10000
n_train = 500 # Number of training points we want in the new dataset
spur_p_list= [0.75,0.85] # List of spurious correlation values
# for each value we'll consider a different dataset with that level of spurious correlation
# i.e., Pr[x_sp = y]
# In all the following lists, there'll be one entry corresponding to each of the datasets
# that we create.
histories = []
callback_list = []
# The following are for weights and biases of the max-margin trained on these datasets
stat_skewed_mm_weights_list = []
stat_skewed_mm_bias_list = []
for p in spur_p_list:
# Create a statistically skewed training dataset with spurious correlation level = p
n_pos = int(n_train*p) # Number of points in the x_sp = y group (majority)
n_neg = int(n_train*(1-p))# Number of points in the x_sp != y group (minority)
# We'll create the new "duplicated "majority group (ofsize n_pos) by picking n_neg
# unique points from it and then duplicating it (n_pos/n_neg) times
stat_skewed_x_train = np.vstack([pos_x_train[:n_neg] for i in range(int(n_pos/n_neg))]+
[neg_x_train[:n_neg]])
stat_skewed_y_train = np.concatenate([pos_y_train[:n_neg] for i in range(int(n_pos/n_neg))]+
[neg_y_train[:n_neg]])
print(f"Num points = {stat_skewed_x_train.shape[0]}")
model, _ = linear_classifier(input_shape=x_train.shape[1:])
loss = BinaryCrossentropy(from_logits=False)
model.compile(loss=loss,
optimizer=SGD(learning_rate=0.01),
metrics=['accuracy'])
callbacks = [SaveWeights()]
history = model.fit(stat_skewed_x_train, stat_skewed_y_train,
batch_size=32,
epochs=n_epochs,
validation_data=(x_test, y_test),
shuffle=True, workers=4,
callbacks=callbacks,verbose=0)
histories += [history]
callback_list += [callbacks[0]]
# Learn the max-margin classifier on this dataset and store it in a list
stat_skewed_mm_weights, stat_skewed_mm_bias = fit_max_margin(stat_skewed_x_train,stat_skewed_y_train)
stat_skewed_mm_weights_list += [stat_skewed_mm_weights]
stat_skewed_mm_bias_list += [stat_skewed_mm_bias]
spur_weights_list = [] # Each entry in this list will correspond
# to one of the spurious correlation levels
for callback in callback_list:
# Each loop corresponds to training on a particular level of spurious correlation
spur_weights = [] # This will be a list of scalar values equal to w_sp(t)/||w(t)||
for weights in callback.history['Weights']:
spur_weights += [np.abs(weights[0][spur_ind])/np.linalg.norm(weights[0])]
spur_weights= np.array(spur_weights)
spur_weights_list += [spur_weights]
# Plotting function
mpl.rc('xtick', labelsize=18)
mpl.rc('ytick', labelsize=18)
mpl.rcParams['font.family'] ='monospace'
plt.figure(figsize=(5,5))
fig, ax = plt.subplots()
ax.set_xlabel('Epoch',fontsize=24)
ax.set_ylabel('Spurious component',fontsize=17)
for k in [0,1]:
inds = np.arange(0,n_epochs,100)
ax.plot(inds,spur_weights_list[k][inds],
linestyle = '--',color=colors[k])
inds = np.arange(0,n_epochs,1000)
ax.scatter(inds,spur_weights_list[k][inds],marker=markers[k],
color=colors[k],s=200,label=f'{spur_p_list[k]}')
legend = plt.legend(fontsize=17, ncol=1, loc='upper left',title=r'$Pr[x_{sp}y > 0]$')
plt.setp(legend.get_title(),fontsize='16')
ax.grid()
#plt.ylim([0,1])
plt.tight_layout()
for k in range(len(spur_p_list)):
print(f"Spurious correlation = {spur_p_list[k]}")
pos_accuracy = evaluate_max_margin(pos_x_test, pos_y_test,
stat_skewed_mm_weights_list[k], stat_skewed_mm_bias_list[k])
print(f"Majority test accuracy = {pos_accuracy}")
neg_accuracy = evaluate_max_margin(neg_x_test, neg_y_test,
stat_skewed_mm_weights_list[k], stat_skewed_mm_bias_list[k])
print(f"Minority test accuracy = {neg_accuracy}")
print(f"Spurious component = {stat_skewed_mm_weights_list[k][spur_ind]/np.linalg.norm(stat_skewed_mm_weights_list[k])}")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Recipe Recommender Capstone Project
Step4: Removing the Unrelated Words from Ingredients
Step5: Ingredient Analysis
Step6: Number of unique ingredients
Step7: Find out which ingredients are most frequently used in the recipes
Step8: Most Popular 30 Ingredients
Step9: Filter out Rare Ingredients
Step10: Percentage of recipes having a non-standard ingredient
Step11: Average number of ingredients dropped from recipes
Step12: Ingredient Assosication
Step13: Re-calculate the ingredient occurance matrix
Step18: Ingredient Recommendation
Step19: Randomly select a recipe and ingredients to replace
Step20: Find the Best Fitting Ingredients with a Suprise Factor
Step21: Find the Best Fitting Ingredients Weighted by Ratings
Step22: Network Analysis
|
<ASSISTANT_TASK:>
Python Code:
import json
import numpy as np
from numpy import ma
import io
import re
import itertools
import random
from bokeh.charts import Histogram
import networkx as nx
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
from sklearn.feature_extraction import DictVectorizer
from collections import Counter
from sklearn.feature_extraction.text import CountVectorizer
from bokeh.plotting import figure, output_notebook, show
from bokeh.models import NumeralTickFormatter,ColumnDataSource, LabelSet
output_notebook()
import copy
%matplotlib inline
def get_matrix_form(ingredients_list):
Converts list of ingredients to sparse matrix form
Args:
ingredients_list: list of ingredients lists
vec = DictVectorizer()
X = vec.fit_transform([Counter(x) for x in ingredients_list])
return X.toarray(),np.array(vec.get_feature_names())
#filter out the words that are not ingredients
unrelated = ["","a","an",'teaspoon','oz','cup','tablespoon','gram','kilogram','kg','pound',
'milliliter','ml','envelope','substitute','chip',
'ounce','tsp','tbl','tb','tbsp','pint','pt','lb','liter','fluid',"halves","inch",
"skinless", "boneless","pounded","thickness","thick","cooking","cubed","instant",
"to","from","unsalted","pinch","chopped",'fresh', 'flat', 'leaf',"packages",
"minced","cloves","pinched","sliced","tablespoons","taste","all","purpose",
'teaspoons', 'beaten', 'cups', 'for', 'frying', 'or', 'as', 'needed' ,"ground",
"large","small","big","cut","half","finely","peeled","cooked","lightly",
"in","and","or","kosher","halal","ounces","ounces","can","freshly","crushed","dried",
"slices","slice","diced",'into', 'cubes','divided',"melted","frozen","deveined",
"optional","cans","canned","grated","pounds","lean","packed","mashed","overripe",
'quarters', 'vertically','package','tops', 'removed', 'seeded','softened','extra',
"bottle","condensed","of","style","heavy","torn","piece","shredded","pieces",
"stewed","chunks","chunk","mix","whole","clove","light","assorted","s","with",
"food","jar","spray","toasted",'favorite', 'chop','bite',
'chuck','chop','cleaned','container','covered','degree','delicious','double',
'medium','prepared',
'preserve','quick','refrigerated','rinsed','roast','rolled','room','stew',
'temperature','plus','packet','pack',
'trimmed','unwrapped','warm','flavored','link','sized','bulk','low',
'high','sifted','','square','thinly','drained','halved',
'cube','concentrate','crumb','crumbled','warmed','partially',
'portion','dissolved','halve','skinned','thin','deboned','boiled',
'butterflied','cooled','more','defrosted','size','quartered'
]
def is_numeric(c):
Check if a given string is numeric or alphanumeric
Args:
c (str): string to check
Returns:
True if the given parameter is numeric
try:
float(c)
except ValueError:
return False
else:
return True
def clean_ingredients(ingredients):
Filters the words that are not an ingredient but are in the ingredient specifications
Args:
ing (str): a line from ingredient list
Returns:
ingredient string: if it is considered as an ingredient
False: if the given ingredient is filtered as a non-ingredient
ingredients = list(map(str.lower,re.split("\W+",ingredients)))
return [wnl.lemmatize(ingredient.strip()) for ingredient in ingredients if not (len(ingredient) <1 or ingredient in unrelated or is_numeric(ingredient))]
#read the recipes
all_ingredients = []
recipe_dic = dict()
reviews_dic = dict()
names = []
with open("data/recipes.data","r") as f:
for line in f:
recipe = json.load(io.StringIO(line))
name = recipe['name']
num_rating = recipe['num_ratings']
categories = recipe['categories']
rid = recipe['id']
rating = recipe['rating']
cleaned_ingredients = [clean_ingredients(_ingredients) for _ingredients in recipe['ingredients'] ]
ingredients_filtered = [_ingredient for _ingredient in cleaned_ingredients if len(_ingredient) < 4]
#print (" id: ",rid,"\n Rating: ",rating,"\n #Ratings: ",num_rating,"\n Name: ",name,"\n Categories: ",categories,"\n Ingredients: ",ingredients,"\n\n")
if ingredients_filtered and len(ingredients_filtered) > 0:
recipe_dic[name] = list(set([" ".join(x) for x in ingredients_filtered]))#ingredients_filtered
reviews_dic[name] = rating
len(recipe_dic.values())
#flatten the ingredients of each recipe for vectorization
all_ingredients = [ingredient for ingredient in recipe_dic.values()]
ingredient_matrix,vocabulary = get_matrix_form(all_ingredients)
word_counts = ingredient_matrix.sum(axis=0)
sorted_indicies = np.argsort(word_counts)[::-1]
word_counts = np.array(list(map(lambda x: int(x),word_counts)))
ingredient_matrix.shape[1]
ingredient_frequency = list(zip(vocabulary[sorted_indicies],word_counts[sorted_indicies]))[:1000]
#ingredient_frequency = list(zip(vocabulary[sorted_indicies],word_counts[sorted_indicies]))
#sorted(vec.vocabulary_)
#divide number of occurance of each ingredient to the total number of recipes
popular_ingredients = list(map(lambda x: ( x[0],float( "{0:.2f}".format(x[1] / len(recipe_dic.values())) ) ), ingredient_frequency[:30]))
#separate the ingredient names and frequencies for plotting
ings = list(map(lambda x: x[0], popular_ingredients))[::-1]
freq = list(map(lambda x: x[1] , popular_ingredients))[::-1]
p = figure(title="Most Popular 30 Ingredients", y_range=ings, x_range=[0,1])
p.segment(0, ings, freq, ings, line_width=2, line_color="green", )
p.circle(freq, ings, size=15, fill_color="orange", line_color="green", line_width=3, )
p.xaxis[0].formatter = NumeralTickFormatter(format="0%")
source = ColumnDataSource(data=dict(height=ings,
weight=freq,
names=list(map(lambda x: "{0}%".format(int(x * 100)),freq))))
labels = LabelSet(x='weight', y='height', text='names', level='glyph',text_font_size="7pt",
x_offset=10, y_offset=-1, source=source, render_mode='canvas')
p.add_layout(labels)
show(p)
#get the ingredients occuring at least in 3 recipes
min_10recp_ingredients = [x[0] for x in ingredient_frequency if x[1] >= 5 and len(x[0]) > 2 ]
#calculate stats about the frequent and rare ingredients
nof_eliminated = 0
total_eliminated = 0
updated_recipe_dic = {}
for recipe, ingredients in recipe_dic.items():
selected_ingredients = set(ingredients).intersection(set(min_10recp_ingredients))
if len(ingredients) - len(selected_ingredients) != 0:
nof_eliminated += 1
total_eliminated += len(ingredients) - len(selected_ingredients)
updated_recipe_dic[recipe] = selected_ingredients
round(nof_eliminated / len(recipe_dic.items()),2)
round(total_eliminated / nof_eliminated ,2)
recps_with_essential_ingredients = [ingredient for ingredient in updated_recipe_dic.values()]
ingredient_matrix, vocabulary = get_matrix_form(recps_with_essential_ingredients)
word_counts = ingredient_matrix.sum(axis=0)
sorted_indicies = np.argsort(word_counts)[::-1]
word_counts = np.array(list(map(lambda x: int(x),word_counts)))
len(recps_with_essential_ingredients)
ingredient_matrix.shape
#get the reviews in the same order with recipes
reviews = [round(reviews_dic[recipe],2) for recipe in updated_recipe_dic.keys()]
#np.sum(np.isneginf([x[1] for x in reviews_dic.items()]))
p_reviews = Histogram(reviews, bins=30,
title="Reviews Distribution",xlabel="reviews", ylabel="count",
plot_height=300,plot_width=300)
show(p_reviews)
def get_joint_probabilities(cooccurrence_matrix,indicies,log_probability = True):
Calculates the joint probability of ingredient occurances with the given ones
if log_probability:
#sum log probabilities of the co-occurances with the ingredients of the selected recipe
#and sort them in decsending order
with np.errstate(divide='ignore'):
joint_probabilities = np.sum(
ma.log2(cooccurrence_matrix[indicies]).filled(-500),
axis=0)
else:
#multiply the probabilities to get the joint probability of each ingredient's
#occurance with already existing ones in the recipe
joint_probabilities = np.prod(cooccurrence_matrix[indicies], axis=0)
return joint_probabilities
def select_ingredients(indicies,kept_indicies,cooccurrence_matrix,surprise_factor = 1):
Selects complimentary ingredients that are likely to occur together with the given ones
Surprise factor alters the ingredient selection. If it is 1 then always the ingredient
with the highes probability is selected. If it is 3, for example, the ingredient is selected
from the most likely 3 other ingredients randomly.
new_ingredients = {}
nof_new_ingredients = len(indicies) - len(kept_indicies)
#new_conditional_probabilities = rw_conditional_log_probabilities
#new_sorted_ingredient_indicies = rw_log_sorted_ingredient_indicies.copy()
excluded_ingredients = copy.deepcopy(indicies)
indicies_selected = copy.deepcopy(kept_indicies)
assert surprise_factor > 0, "The surprise factor must be greater than 0"
for i in range(nof_new_ingredients):
con_probabilities = get_joint_probabilities(cooccurrence_matrix,indicies_selected)
sorted_indicies = np.argsort(con_probabilities)[::-1]
#exclude the ingredients of the recipe from the ingredient list
ingredients_to_choose = ma.masked_where(np.in1d(sorted_indicies,
np.array(excluded_ingredients) ),
sorted_indicies)
#choose ingredient that is the most likely or one of the most likely
new_ingredients[i] = random.choice(ingredients_to_choose[~ingredients_to_choose.mask][:surprise_factor].data)
#new_conditional_probabilities = new_conditional_probabilities + cooccurrence_matrix_normalized[new_ingredients[i]]
#new_sorted_ingredient_indicies = np.argsort(new_conditional_probabilities)[::-1]
excluded_ingredients.append(new_ingredients[i])
indicies_selected.append(new_ingredients[i])
return new_ingredients
def calculate_normalized_cooccurrence_matrix(ingredient_matrix, rankings = None):
Calculates the normalized co-occurance matrix of ingredients. If rankings are not None then
calculate the ranking weighted co-occurance matrix
if rankings:
review_weighted_ingredient_matrix = ingredient_matrix * np.array(reviews)[:,None]
cooccurrence_matrix = np.dot(review_weighted_ingredient_matrix.transpose(),
review_weighted_ingredient_matrix)
#rw_cooccurrence_matrix_diagonal = np.diagonal(rw_cooccurrence_matrix)
#with np.errstate(divide='ignore', invalid='ignore'):
#rw_cooccurrence_matrix_normalized = np.nan_to_num(np.true_divide(rw_cooccurrence_matrix, rw_cooccurrence_matrix_diagonal[:, None]))
else:
cooccurrence_matrix = np.dot(ingredient_matrix.transpose(),ingredient_matrix)
cooccurrence_matrix_diagonal = np.diagonal(cooccurrence_matrix)
with np.errstate(divide='ignore', invalid='ignore'):
cooccurrence_matrix_normalized = np.nan_to_num(np.true_divide(cooccurrence_matrix, cooccurrence_matrix_diagonal[:, None]))
#print('\ncooccurrence_matrix_normalized:\n{0}'.format(cooccurrence_matrix_normalized))
return cooccurrence_matrix_normalized
def bulk_select_ingredients(cooccurrence_matrix,indicies,to_replace_indicies):
Select all missing ingredients in one based on the joint
#sort the ingredients from maximum to minimum
conditional_probabilities = get_joint_probabilities(cooccurrence_matrix,indicies,False)
#remove the ingredients already in the recipe
sorted_ingredient_indicies = np.argsort(conditional_probabilities)[::-1]
complimentary_ingredients = [ing_index for ing_index in sorted_ingredient_indicies if not ing_index in indicies]
#get as many new ingredients as the ones to be replaced
new_ingredients = vocabulary[complimentary_ingredients[:len(to_replace_indicies)]]
return new_ingredients
def print_recipe(ingredients,to_replace,new_ingredients):
print ("\nOriginal Recipe: \n" + " , ".join(ingredients))
print("\nIngredients dropped: \n" + "\x1b[31m" + " - ".join(to_replace) + "\x1b[0m")
print("\nNew Ingredients added: \n" + "\x1b[32m" + " - ".join(new_ingredients) + "\x1b[0m")
print ("\nNew recipe: \n" + " ** ".join(new_ingredients) + " ** " + " ** ".join(set(ingredients).difference( set(to_replace))))
recipe, ingredients = random.choice(list(updated_recipe_dic.items()))
print ("\nRecipe: " + recipe)
#print ("\nIngredients: " + str(ingredients))
to_replace = np.random.choice(list(ingredients), np.random.randint(low=1, high=2),replace=False)
#print ("\nIngredients to replace: " + str(to_replace))
indicies = [int(np.where(ingredient == vocabulary)[0]) for ingredient in ingredients]
to_replace_indicies = [int(np.where(ingredient == vocabulary)[0]) for ingredient in to_replace]
to_keep_indicies = [ing_index for ing_index in indicies if not ing_index in to_replace_indicies]
nof_new_ingredients = len(to_replace_indicies)
#print ("\nIngredients to keep: " + str(vocabulary[to_keep_indicies]))
cooccurrence_matrix = calculate_normalized_cooccurrence_matrix(ingredient_matrix)
new_ingredients = bulk_select_ingredients(cooccurrence_matrix,indicies,to_replace_indicies)
print_recipe(ingredients,to_replace,new_ingredients)
selected_indicies = select_ingredients(indicies,to_keep_indicies,cooccurrence_matrix,3)
new_selected_ingredients = vocabulary[list(selected_indicies.values())]
print_recipe(ingredients,to_replace,new_selected_ingredients)
rw_cooccurrence_matrix_normalized = calculate_normalized_cooccurrence_matrix(ingredient_matrix,reviews)
selected_indicies = select_ingredients(indicies,to_keep_indicies,rw_cooccurrence_matrix_normalized,3)
new_selected_ingredients = vocabulary[list(selected_indicies.values())]
print_recipe(ingredients,to_replace,new_selected_ingredients)
G=nx.Graph()
for ingredients in updated_recipe_dic.values():
G.add_nodes_from(ingredients)
G.add_edges_from(list(itertools.combinations(list(ingredients), 2)))
import matplotlib.pyplot as plt
plt.style.use('seaborn-paper')
d = nx.degree(G)
import operator
sorted_x = sorted(d.items(), key=operator.itemgetter(1))[::-1]
plt.axis('off')
nlist = [x[0] for x in sorted_x[20:]]
ndegree = [x[1] for x in sorted_x[20:]]
pos = nx.random_layout(G)
nx.draw_networkx_nodes(G,pos,nodelist=nlist, node_color="green",with_labels=False,node_size=ndegree, alpha=0.3)
#nx.draw_networkx_nodes(G,pos, node_color="blue",with_labels=True,node_size=[v * 10 for v in d.values()], alpha=0.7)
nx.draw_networkx_labels(G,pos,dict(zip(nlist,nlist)),font_size=6, font_color="white")
nx.draw_networkx_edges(G, pos,nodelist=nlist,edge_color="white");
pos = nx.shell_layout(G)
#nx.draw(G, nodelist=d.keys(), node_size=[v * 100 for v in d.values()])
nlist = [x[0] for x in sorted_x[:10]]
ndegree = [x[1] * 10 for x in sorted_x[:10]]
nx.draw_networkx_nodes(G,pos,nodelist=nlist, node_color="black",with_labels=False,node_size=ndegree, alpha=0.9)
#nx.draw_networkx_nodes(G,pos, node_color="blue",with_labels=True,node_size=[v * 10 for v in d.values()], alpha=0.7)
nx.draw_networkx_labels(G,pos,dict(zip(nlist,nlist)),font_size=10, font_color="white")
nx.draw_networkx_edges(G, pos,nodelist=nlist,edge_color="white");
nlist = [x[0] for x in sorted_x[10:20]]
ndegree = [x[1] * 3 for x in sorted_x[10:20]]
nx.draw_networkx_nodes(G,pos,nodelist=nlist, node_color="red",with_labels=False,node_size=ndegree, alpha=0.7)
#nx.draw_networkx_nodes(G,pos, node_color="blue",with_labels=True,node_size=[v * 10 for v in d.values()], alpha=0.7)
nx.draw_networkx_labels(G,pos,dict(zip(nlist,nlist)),font_size=10, font_color="white")
nx.draw_networkx_edges(G, pos,nodelist=nlist,edge_color="white");
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
class newNode :
def __init__(self , x ) :
self . data = x
self . left = self . right = None
def count(root ) :
if(root == None ) :
return 0
return(count(root . left ) + count(root . right ) + 1 )
def checkRec(root , n ) :
if(root == None ) :
return False
if(count(root ) == n - count(root ) ) :
return True
return(checkRec(root . left , n ) or checkRec(root . right , n ) )
def check(root ) :
n = count(root )
return checkRec(root , n )
if __name__== ' __main __' :
root = newNode(5 )
root . left = newNode(1 )
root . right = newNode(6 )
root . left . left = newNode(3 )
root . right . left = newNode(7 )
root . right . right = newNode(4 )
if check(root ) :
print("YES ")
else :
print("NO ")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Decision Tree (Classification)
Step2: Gini Index
Step12: As we can see from the plot, there is not much differences (as in they both increase and decrease at similar range). In practice, Gini Index and Entropy typically yield very similar results and it is often not worth spending much time on evaluating decision tree models using different impurity criteria. As for which one to use, maybe consider Gini Index, because this way, we don’t need to compute the log, which can make it a bit computationly faster.
Step13: We will load the Iris dataset, and use it as a sample dataset to test our algorithm. This data sets consists of 3 different types of irises’ (Setosa, Versicolour, and Virginica). It is stored as a 150x4 numpy.ndarray, where the rows are the samples and the columns being Sepal Length, Sepal Width, Petal Length and Petal Width.
Step14: Advantages of decision trees
|
<ASSISTANT_TASK:>
Python Code:
# code for loading the format for the notebook
import os
# path : store the current path to convert back to it later
path = os.getcwd()
os.chdir(os.path.join('..', 'notebook_format'))
from formats import load_style
load_style(css_style = 'custom2.css')
os.chdir(path)
# 1. magic for inline plot
# 2. magic to print version
# 3. magic so that the notebook will reload external python modules
# 4. magic to enable retina (high resolution) plots
# https://gist.github.com/minrk/3301035
%matplotlib inline
%load_ext watermark
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from graphviz import Source
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.tree import export_graphviz
from sklearn.tree import DecisionTreeClassifier
%watermark -a 'Ethen' -d -t -v -p numpy,pandas,matplotlib,sklearn
def entropy(p):
return - p * np.log2(p) - (1 - p) * np.log2(1 - p)
# change default figure and font size
plt.rcParams['figure.figsize'] = 8, 6
plt.rcParams['font.size'] = 12
x = np.arange(0.0, 1.0, 0.01)
ent = [entropy(p) if p != 0 else None for p in x]
plt.plot(x, ent)
plt.axhline(y = 1.0, linewidth = 1, color = 'k', linestyle = '--')
plt.ylim([ 0, 1.1 ])
plt.xlabel('p(i=1)')
plt.ylabel('Entropy')
plt.show()
def gini(p):
return p * (1 - p) + (1 - p) * ( 1 - (1 - p) )
gi = gini(x)
# plot
for i, lab in zip([ent, gi], ['Entropy', 'Gini Index']):
plt.plot(x, i, label = lab)
plt.legend(loc = 'upper center', bbox_to_anchor = (0.5, 1.15),
ncol = 3, fancybox = True, shadow = False)
plt.axhline(y = 0.5, linewidth = 1, color = 'k', linestyle = '--')
plt.axhline(y = 1.0, linewidth = 1, color = 'k', linestyle = '--')
plt.ylim([ 0, 1.1 ])
plt.xlabel('p(i=1)')
plt.ylabel('Impurity')
plt.tight_layout()
plt.show()
class Tree:
Classification tree using information gain with entropy as impurity
Parameters
----------
max_features : int or None, default None
The number of features to consider when looking for the best split,
None uses all features
min_samples_split : int, default 10
The minimum number of samples required to split an internal node
max_depth : int, default 3
Maximum depth of the tree
minimum_gain : float, default 1e-7
Minimum information gain required for splitting
def __init__(self, max_depth = 3, max_features = None,
minimum_gain = 1e-7, min_samples_split = 10):
self.max_depth = max_depth
self.max_features = max_features
self.minimum_gain = minimum_gain
self.min_samples_split = min_samples_split
def fit(self, X, y):
pass in the 2d-array dataset and the response column
self.n_class = np.unique(y).shape[0]
# in the case you're wondering why we have this implementation of
# choosing the number of features to consider when looking
# for the best split, it will become much clearer when we
# start discussing Random Forest algorithm
if self.max_features is None or self.max_features > X.shape[1]:
self.max_features = X.shape[1]
self.feature_importance = np.zeros(X.shape[1])
self.tree = _create_decision_tree(X, y, self.max_depth,
self.minimum_gain, self.max_features,
self.min_samples_split, self.n_class,
self.feature_importance, X.shape[0])
self.feature_importance /= np.sum(self.feature_importance)
return self
def predict(self, X):
proba = self.predict_proba(X)
pred = np.argmax(proba, axis = 1)
return pred
def predict_proba(self, X):
proba = np.empty((X.shape[0], self.n_class))
for i in range(X.shape[0]):
proba[i] = self._predict_row(X[i, :], self.tree)
return proba
def _predict_row(self, row, tree):
Predict single row
if tree['is_leaf']:
return tree['prob']
else:
if row[tree['split_col']] <= tree['threshold']:
return self._predict_row(row, tree['left'])
else:
return self._predict_row(row, tree['right'])
def _create_decision_tree(X, y, max_depth,
minimum_gain, max_features,
min_samples_split, n_class,
feature_importance, n_row):
recursively grow the decision tree until it reaches the stopping criteria
try:
assert max_depth > 0
assert X.shape[0] > min_samples_split
column, value, gain = _find_best_split(X, y, max_features)
assert gain > minimum_gain
feature_importance[column] += (X.shape[0] / n_row) * gain
# split the dataset and grow left and right child
left_X, right_X, left_y, right_y = _split(X, y, column, value)
left_child = _create_decision_tree(left_X, left_y, max_depth - 1,
minimum_gain, max_features,
min_samples_split, n_class,
feature_importance, n_row)
right_child = _create_decision_tree(right_X, right_y, max_depth - 1,
minimum_gain, max_features,
min_samples_split, n_class,
feature_importance, n_row)
except AssertionError:
# if criteria reached, compute the classification
# probability and return it as a leaf node
# note that some leaf node may only contain partial classes,
# thus specify the minlength so class that don't appear will
# still get assign a probability of 0
counts = np.bincount(y, minlength = n_class)
prob = counts / y.shape[0]
leaf = {'is_leaf': True, 'prob': prob}
return leaf
node = {'is_leaf': False,
'left': left_child,
'right': right_child,
'split_col': column,
'threshold': value}
return node
def _find_best_split(X, y, max_features):
Greedy algorithm to find the best feature and value for a split
subset = np.random.choice(X.shape[1], max_features, replace = False)
max_col, max_val, max_gain = None, None, None
parent_entropy = _compute_entropy(y)
for column in subset:
split_values = _find_splits(X, column)
for value in split_values:
splits = _split(X, y, column, value, return_X = False)
gain = parent_entropy - _compute_splits_entropy(y, splits)
if max_gain is None or gain > max_gain:
max_col, max_val, max_gain = column, value, gain
return max_col, max_val, max_gain
def _compute_entropy(split):
entropy score using a fix log base 2
_, counts = np.unique(split, return_counts = True)
p = counts / split.shape[0]
entropy = -np.sum(p * np.log2(p))
return entropy
def _find_splits(X, column):
find all possible split values (threshold),
by getting unique values in a sorted order
and finding cutoff point (average) between every two values
X_unique = np.unique(X[:, column])
split_values = np.empty(X_unique.shape[0] - 1)
for i in range(1, X_unique.shape[0]):
average = (X_unique[i - 1] + X_unique[i]) / 2
split_values[i - 1] = average
return split_values
def _compute_splits_entropy(y, splits):
compute the entropy for the splits (the two child nodes)
splits_entropy = 0
for split in splits:
splits_entropy += (split.shape[0] / y.shape[0]) * _compute_entropy(split)
return splits_entropy
def _split(X, y, column, value, return_X = True):
split the response column using the cutoff threshold
left_mask = X[:, column] <= value
right_mask = X[:, column] > value
left_y, right_y = y[left_mask], y[right_mask]
if not return_X:
return left_y, right_y
else:
left_X, right_X = X[left_mask], X[right_mask]
return left_X, right_X, left_y, right_y
# load a sample dataset
iris = load_iris()
X = iris.data
y = iris.target
# train model and print the accuracy score
tree = Tree()
tree.fit(X, y)
y_pred = tree.predict(X)
print('classification distribution: ', np.bincount(y_pred))
print('accuracy score: ', accuracy_score(y, y_pred))
# use library to confirm results are comparable
clf = DecisionTreeClassifier(criterion = 'entropy', min_samples_split = 10, max_depth = 3)
clf.fit(X, y)
y_pred = clf.predict(X)
print('classification distribution: ', np.bincount(y_pred))
print('accuracy score: ', accuracy_score(y, y_pred))
# visualize the decision tree
# export it as .dot file, other common parameters include
# `rounded` (boolean to round the score on each node)
export_graphviz(clf, feature_names = iris.feature_names, filled = True,
class_names = iris.target_names, out_file = 'tree.dot')
# read it in and visualize it, or if we wish to
# convert the .dot file into other formats, we can do:
# import os
# os.system('dot -Tpng tree.dot -o tree.jpeg')
with open('tree.dot') as f:
dot_graph = f.read()
Source(dot_graph)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Configuration and Hyperparameters
Step3: Then we'll set all the relevant paths and configurations
Step4: Helper Functions For Loading Data
Step5: Helper Function For Plotting Images
Step6: Build Model
Step7: Before we start training, we use the bottleneck method to extract features from the images in our dataset. We save them as .npy files.
Step8: Then we train a base model on these features.
Step9: Main Training Function
Step10: Helper Functions For Making Predictions
Step11: Run the training and prediction code
Step12: Performance Metrics
Step13: Model Summary & Feature Visualization
Step14: Save Model
Step15: MISC
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
import numpy as np
import random
import os
import glob
import cv2
import datetime
import pandas as pd
import time
import h5py
import csv
from scipy.misc import imresize, imsave
from sklearn.cross_validation import KFold, train_test_split
from sklearn.metrics import log_loss, confusion_matrix
from sklearn.utils import shuffle
from PIL import Image, ImageChops, ImageOps
import matplotlib.pyplot as plt
from keras import backend as K
from keras.callbacks import EarlyStopping, Callback
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras import optimizers
from keras.models import Sequential, model_from_json
from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D, Activation, Dropout, Flatten, Dense
%matplotlib inline
class EarlyStoppingByLossVal(Callback):
Custom class to set a val loss target for early stopping
def __init__(self, monitor='val_loss', value=0.45, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
### paths to training and testing data
train_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/data_no_split/train'
test_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/data_no_split/test'
### path for preloaded vgg16 weights
weights_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/vgg16_weights.h5'
bottleneck_model_weights_path = 'C:/Projects/playground/kaggle/dogs_vs_cats/bottleneck_weights.h5'
### settings for keras early stopping callback
early_stopping = EarlyStopping(monitor='val_loss', patience=1, mode='auto')
# early_stopping = EarlyStoppingByLossVal(verbose=2, value=0.3)
### other hyperparameters
n_folds = 2
batch_size = 16
nb_epoch = 50
bottleneck_epoch = 3 # used when training bottleneck model
val_split = .15 # if not using kfold cv
classes = ["dog", "cat"]
num_classes = len(classes)
### image dimensions
img_width, img_height = 250, 250
num_channels = 3
def load_images(path):
img = cv2.imread(path)
resized = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)
return resized
def load_train():
X_train = []
X_train_id = []
y_train = []
start_time = time.time()
print('Loading training images...')
folders = ["dogs", "cats"]
for fld in folders:
index = folders.index(fld)
print('Loading {} files (Index: {})'.format(fld, index))
path = os.path.join(train_path, fld, '*g')
files = glob.glob(path)
for fl in files:
flbase = os.path.basename(fl)
img = load_images(fl)
X_train.append(img)
X_train_id.append(flbase)
y_train.append(index)
print('Training data load time: {} seconds'.format(round(time.time() - start_time, 2)))
return X_train, y_train, X_train_id
def load_test():
path = os.path.join(test_path, 'test', '*.jpg')
files = sorted(glob.glob(path))
X_test = []
X_test_id = []
for fl in files:
flbase = os.path.basename(fl)
img = load_images(fl)
X_test.append(img)
X_test_id.append(flbase)
return X_test, X_test_id
def normalize_train_data():
train_data, train_target, train_id = load_train()
train_data = np.array(train_data, dtype=np.uint8)
train_target = np.array(train_target, dtype=np.uint8)
train_data = train_data.transpose((0, 3, 1, 2))
train_data = train_data.astype('float32')
train_data = train_data / 255
train_target = np_utils.to_categorical(train_target, num_classes)
print('Shape of training data:', train_data.shape)
return train_data, train_target, train_id
def normalize_test_data():
start_time = time.time()
test_data, test_id = load_test()
test_data = np.array(test_data, dtype=np.uint8)
test_data = test_data.transpose((0, 3, 1, 2))
test_data = test_data.astype('float32')
test_data = test_data / 255
print('Shape of testing data:', test_data.shape)
return test_data, test_id
train_data, train_target, train_id = normalize_train_data()
def plot_images(images, cls_true, cls_pred=None):
if len(images) == 0:
print("no images to show")
return
else:
random_indices = random.sample(range(len(images)), min(len(images), 9))
images, cls_true = zip(*[(images[i], cls_true[i]) for i in random_indices])
# Create figure with 3x3 sub-plots.
fig, axes = plt.subplots(3, 3)
fig.subplots_adjust(hspace=0.3, wspace=0.3)
for i, ax in enumerate(axes.flat):
# Plot image.
image = images[i].transpose((1, 2, 0))
ax.imshow(image)
# Show true and predicted classes.
if cls_pred is None:
xlabel = "True: {0}".format(cls_true[i])
else:
xlabel = "True: {0}, Pred: {1}".format(cls_true[i], cls_pred[i])
# Show the classes as the label on the x-axis.
ax.set_xlabel(xlabel)
# Remove ticks from the plot.
ax.set_xticks([])
ax.set_yticks([])
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def build_model():
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
# build a classifier model to put on top of the convolutional model
bottleneck_model = Sequential()
bottleneck_model.add(Flatten(input_shape=model.output_shape[1:]))
bottleneck_model.add(Dense(256, activation='relu'))
bottleneck_model.add(Dropout(0.5))
bottleneck_model.add(Dense(num_classes, activation='softmax'))
# load weights from bottleneck model
bottleneck_model.load_weights(bottleneck_model_weights_path)
# add the model on top of the convolutional base
model.add(bottleneck_model)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
layer.trainable = False
# compile the model with a SGD/momentum optimizer
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9))
return model
def save_bottleneck_features():
datagen = ImageDataGenerator(rescale=1./255)
# build the VGG16 network
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# create validation split
X_train, X_valid, Y_train, Y_valid = train_test_split(train_data, train_target, test_size=val_split)
# create generator for train data
generator = datagen.flow(
X_train,
Y_train,
batch_size=batch_size,
shuffle=False)
# save train features to .npy file
bottleneck_features_train = model.predict_generator(generator, X_train.shape[0])
np.save(open('bottleneck_features_train.npy', 'wb'), bottleneck_features_train)
# create generator for validation data
generator = datagen.flow(
X_valid,
Y_valid,
batch_size=batch_size,
shuffle=False)
# save validation features to .npy file
bottleneck_features_validation = model.predict_generator(generator, X_valid.shape[0])
np.save(open('bottleneck_features_validation.npy', 'wb'), bottleneck_features_validation)
return Y_train, Y_valid
def train_bottleneck_model():
train_labels, validation_labels = save_bottleneck_features()
train_data = np.load(open('bottleneck_features_train.npy', 'rb'))
validation_data = np.load(open('bottleneck_features_validation.npy', 'rb'))
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit(train_data,
train_labels,
nb_epoch=bottleneck_epoch,
batch_size=batch_size,
validation_data=(validation_data, validation_labels),
callbacks=[early_stopping],
verbose=2)
model.save_weights(bottleneck_model_weights_path)
return model
# train_bottleneck_model() # leave this commented out once it's been done once -- takes a while to run
def run_train(n_folds=n_folds):
num_fold = 0
# sum_score = 0
models = []
callbacks = [
early_stopping
]
### if we just want to train a single model without cross-validation, set n_folds to 0 or None
if not n_folds:
model = build_model()
X_train, X_valid, Y_train, Y_valid = train_test_split(train_data, train_target, test_size=val_split)
print('Training...')
print('Size of train split: ', len(X_train), len(Y_train))
print('Size of validation split: ', len(X_valid), len(Y_valid))
model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
callbacks=callbacks)
predictions_valid = model.predict(X_valid.astype('float32'), batch_size=batch_size, verbose=2)
# score = log_loss(Y_valid, predictions_valid)
# print('Loss: ', score)
# sum_score += score
models.append(model)
else:
kf = KFold(len(train_id), n_folds=n_folds, shuffle=True, random_state=7)
for train_index, test_index in kf:
model = build_model()
X_train = train_data[train_index]
Y_train = train_target[train_index]
X_valid = train_data[test_index]
Y_valid = train_target[test_index]
num_fold += 1
print('Training on fold {} of {}...'.format(num_fold, n_folds))
print('Size of train split: ', len(X_train), len(Y_train))
print('Size of validation split: ', len(X_valid), len(Y_valid))
model.fit(X_train,
Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
callbacks=callbacks)
# predictions_valid = model.predict(X_valid.astype('float32'), batch_size=batch_size, verbose=2)
# score = log_loss(Y_valid, predictions_valid)
# print('Loss for fold {0}: '.format(num_fold), score)
# sum_score += score*len(test_index)
models.append(model)
# score = sum_score/len(train_data)
# print("Average loss across folds: ", score)
info_string = "{0}fold_{1}x{2}_{3}epoch_patience_vgg16".format(n_folds, img_width, img_height, nb_epoch)
return info_string, models
def create_submission(predictions, test_id, info):
result = pd.DataFrame(predictions, columns=classes)
result.loc[:, 'id'] = pd.Series(test_id, index=result.index)
result = result[["id", "dog"]].rename(columns={"dog": "label"})
now = datetime.datetime.now()
sub_file = info + '.csv'
result.to_csv(sub_file, index=False)
def merge_several_folds_mean(data, n_folds):
a = np.array(data[0])
for i in range(1, n_folds):
a += np.array(data[i])
a /= n_folds
return a.tolist()
def ensemble_predict(info_string, models):
num_fold = 0
yfull_test = []
test_id = []
n_folds = len(models)
for i in range(n_folds):
model = models[i]
num_fold += 1
print('Predicting on fold {} of {}'.format(num_fold, n_folds))
test_data, test_id = normalize_test_data()
test_prediction = model.predict(test_data, batch_size=batch_size, verbose=2)
yfull_test.append(test_prediction)
preds = merge_several_folds_mean(yfull_test, n_folds)
create_submission(preds, test_id, info_string)
info_string, models = run_train()
ensemble_predict(info_string)
model = random.choice(models)
### or choose one manually...
# model = models[1]
# perm = np.arange(int(val_split*len(train_target)))
# np.random.shuffle(perm)
# sample_valid = train_data[perm]
# labels_valid = train_target[perm]
ixs = [random.randint(0, len(train_target)) for i in range(1000)]
sample_valid = np.array([train_data[ix] for ix in ixs])
labels_valid = np.array([train_target[ix] for ix in ixs])
def plot_example_errors(cls_pred, correct):
# This function is called from print_validation_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the validation set.
# correct is a boolean array whether the predicted class
# is equal to the true class for each image in the validation set.
# Negate the boolean array.
incorrect = (correct == False)
# Get the images from the validation set that have been
# incorrectly classified.
images = sample_valid[incorrect]
# Get the predicted classes for those images.
cls_pred = cls_pred[incorrect]
# Get the true classes for those images.
labels = np.array([classes[np.argmax(x)] for x in labels_valid])
cls_true = labels[incorrect]
# Plot the first 9 images.
plot_images(images=images[0:9],
cls_true=cls_true[0:9],
cls_pred=cls_pred[0:9])
def plot_confusion_matrix(cls_pred):
# This is called from print_validation_accuracy() below.
# cls_pred is an array of the predicted class-number for
# all images in the validation set.
# Get the true classifications for the test-set.
cls_true = [classes[np.argmax(x)] for x in labels_valid]
# Get the confusion matrix using sklearn.
cm = confusion_matrix(y_true=cls_true,
y_pred=cls_pred,
labels=classes)
# Print the confusion matrix as text.
print(cm)
# Plot the confusion matrix as an image.
plt.matshow(cm)
# Make various adjustments to the plot.
plt.colorbar()
tick_marks = np.arange(num_classes)
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
plt.xlabel('Predicted')
plt.ylabel('True')
# Ensure the plot is shown correctly with multiple plots
# in a single Notebook cell.
plt.show()
def print_validation_accuracy(show_example_errors=False,
show_confusion_matrix=False):
test_batch_size = 4
# Number of images in the validation set.
num_test = len(labels_valid)
cls_pred = np.zeros(shape=num_test, dtype=np.int)
i = 0
# iterate through batches and create list of predictions
while i < num_test:
# The ending index for the next batch is denoted j.
j = min(i + test_batch_size, num_test)
# Get the images from the test-set between index i and j.
images = sample_valid[i:j, :]
# Calculate the predicted class using TensorFlow.
cls_pred[i:j] = [np.argmax(x) for x in model.predict(images)]
# Set the start-index for the next batch to the
# end-index of the current batch.
i = j
# Convenience variable for the true class-numbers of the validation set.
cls_pred = np.array([classes[x] for x in cls_pred])
cls_true = np.array([classes[np.argmax(x)] for x in labels_valid])
# Create a boolean array whether each image is correctly classified.
correct = (cls_true == cls_pred)
# Calculate the number of correctly classified images.
# When summing a boolean array, False means 0 and True means 1.
correct_sum = correct.sum()
# Classification accuracy is the number of correctly classified
# images divided by the total number of images in the test-set.
acc = float(correct_sum) / num_test
# Print the accuracy.
msg = "Accuracy on validation set: {0:.1%} ({1} / {2})"
print(msg.format(acc, correct_sum, num_test))
# Plot some examples of mis-classifications, if desired.
if show_example_errors:
print("Example errors:")
plot_example_errors(cls_pred=cls_pred, correct=correct)
# Plot the confusion matrix, if desired.
if show_confusion_matrix:
print("Confusion Matrix:")
plot_confusion_matrix(cls_pred=cls_pred)
print_validation_accuracy(show_example_errors=True, show_confusion_matrix=True)
model.summary()
layer_name = 'conv5_3'
# util function to convert a tensor into a valid image
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_dim_ordering() == 'th':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# this is the placeholder for the input images
input_img = model.input
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
kept_filters = []
for filter_index in range(0, 512):
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if K.image_dim_ordering() == 'th':
loss = K.mean(layer_output[:, filter_index, :, :])
else:
loss = K.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = K.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = K.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if K.image_dim_ordering() == 'th':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, 3))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# we will stich the best n**2 filters on a n x n grid.
n = 5
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top n**2 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our n x n filters of size with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
img, loss = kept_filters[i * n + j]
stitched_filters[(img_width + margin) * i: (img_width + margin) * i + img_width,
(img_height + margin) * j: (img_height + margin) * j + img_height, :] = img
# save image and display
imsave('feats.jpg', stitched_filters)
plt.imshow(stitched_filters)
### if we like this model, save the weights
model.save_weights("favorite_model.h5")
### augmentation script
# train_path = 'C:/Projects/playground/kaggle/fish/data_aug/train/YFT/'
# ## define data preparation
# datagen = ImageDataGenerator(
# width_shift_range=.1,
# )
# ## fit parameters from data
# generator = datagen.flow_from_directory(
# train_path,
# target_size=(512, 512),
# class_mode=None,
# batch_size=335,
# shuffle=True,
# save_to_dir=train_path,
# save_prefix="aug_"
# )
# for X_batch, y_batch in generator:
# break
### Test on single image
path_to_lucy = "C:/Projects/playground/neural_style_transfer/images/inputs/content/loo_grass.jpg"
img = load_img(path_to_lucy)
plt.imshow(img)
img = imresize(img, (img_width, img_height))
img = img_to_array(img)
img.shape
img = img.reshape(1, 3, 250, 250)
print("This is a {0}.".format(classes[model.predict_classes(img)[0]]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples of text analysis
Step2: First tokenize the text and then we tag the parts of speech
Step3: Let's get a frequency distribution of the parts of speech tags
Step4: Let's look at the part of speech tags for reference
Step5: Now, let's display some graphs just to visualize this frequency distribution
Step6: Now, we're going to start looking into the most common words of the top three part of speech tags
Step7: Finally, we'll just take the product of the five most common words of the VB, VBZ and NNS tags
|
<ASSISTANT_TASK:>
Python Code:
import nltk
abstract =
It's morning, you settle in, check your dashboards and it looks like there is an increase of load coming through on some of your web server logs. What happened? You're about to deploy code that will hopefully fix some issues; how will you know that things worked well? The design team is thinking about changing some of the site icons; do your users like seeing big icons or small icons on your site? These are all scenarios that are all too common and the one thing that helps you answer these is your data.
Pushing data is typically easy. If you're tracking tracking events on a website, you'll probably want to know a lot about click tracking, URL referrals, and user sessions. If you're curious about the number of downloads your users go through per day, you'll probably have some data that you can aggregate a sum. Your data can be small or large or anything in between, but making it available is the most important piece that you'll need to have.
Pulling data can be a bit more complex. Do you have a small amount of data that you're just pulling from a relational database? Or are you processing data through Hadoop or Spark? Data is what you want; how you pull it is dependent on your architecture needs.
Presenting data is a simple task, but are you presenting the correct story? Whether you are presenting your web traffic or your user behavior data, you'll want to present your data that tells the story you want to tell in the best way.
Push data, pull data, present data; these are your main tasks in your typical cycle of product development and analysis. We built out a fairly quick data pipeline using Airflow, a workflow framework made by Airbnb. We push a lot of data so we can make good data-driven business decisions. Pulling data and presenting them have gone hand-in-hand for us. We have utilized Google's BigQuery in order for us to have a fast, columnar data store in order for us to build out dashboards to visualize our data. This will shed light into what a typical push-pull-present cycle looks like and will be exemplified with real-world examples.
tokens = nltk.word_tokenize(abstract)
tokens[:10]
tagged = nltk.pos_tag(tokens)
tagged[:10]
tag_fd = nltk.FreqDist(tag for (word, tag) in tagged)
tag_fd.most_common()
nltk.help.upenn_tagset()
%matplotlib inline
import matplotlib.pyplot as plt
most_common_pos = tag_fd.most_common()
plt.figure(figsize=(15, 10))
plt.bar([x for x in range(len(most_common_pos))],
[count for (pos, count) in most_common_pos],
tick_label=['' for (pos, count) in most_common_pos]
)
plt.show()
most_common_pos = tag_fd.most_common()
plt.figure(figsize=(15, 10))
plt.bar([x for x in range(len(most_common_pos))],
[count for (pos, count) in most_common_pos],
tick_label=[pos for (pos, count) in most_common_pos]
)
plt.xticks(rotation=45)
plt.show()
most_common_pos = tag_fd.most_common()
plt.figure(figsize=(10, 10))
plt.pie([count for (pos, count) in most_common_pos], shadow=True)
plt.show()
most_common_pos = tag_fd.most_common()
plt.figure(figsize=(10, 10))
plt.pie([count for (pos, count) in most_common_pos],
labels=[pos for (pos, count) in most_common_pos],
autopct='%.2f%%',
shadow=True
)
plt.show()
from collections import Counter
NN_tags = Counter([word.lower() for (word, pos) in tagged if pos=='NN'])
NN_tags.most_common()
IN_tags = Counter([word.lower() for (word, pos) in tagged if pos=='IN'])
IN_tags.most_common()
NNS_tags = Counter([word.lower() for (word, pos) in tagged if pos=='NNS'])
NNS_tags.most_common()
verb_tags = Counter([word.lower() for (word, pos) in tagged if pos in {'VB', 'VBG',}])
verb_tags.most_common()
from itertools import product
['{} {}'.format(verb[0], noun[0]) for (verb, noun) in product(verb_tags.most_common()[:5], NNS_tags.most_common()[:5])]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As the data is two-dimensional, we can plot each sample as a point in a two-dimensional coordinate system, with the first feature being the x-axis and the second feature being the y-axis.
Step2: Classification is a supervised task, and since we are interested in its performance on unseen data, we split our data into two parts
Step3: The scikit-learn estimator API
Step4: Next, we instantiate the estimator object.
Step5: To built the model from our data, that is to learn how to classify new points, we call the fit function with the training data, and the corresponding training labels (the desired output for the training data point)
Step6: (Some estimator methods such as fit return self by default. Thus, after executing the code snippet above, you will see the default parameters of this particular instance of LogisticRegression. Another way of retrieving the estimator's ininitialization parameters is to execute classifier.get_params(), which returns a parameter dictionary.)
Step7: We can compare these against the true labels
Step8: We can evaluate our classifier quantitatively by measuring what fraction of predictions is correct. This is called accuracy
Step9: There is also a convenience function , score, that all scikit-learn classifiers have to compute this directly from the test data
Step10: It is often helpful to compare the generalization performance (on the test set) to the performance on the training set
Step11: LogisticRegression is a so-called linear model,
Step12: Estimated parameters
Step13: Another classifier
Step14: This time we set a parameter of the KNeighborsClassifier to tell it we only want to look at one nearest neighbor
Step15: We fit the model with out training data
Step16: <div class="alert alert-success">
|
<ASSISTANT_TASK:>
Python Code:
from sklearn.datasets import make_blobs
X, y = make_blobs(centers=2, random_state=0)
print('X ~ n_samples x n_features:', X.shape)
print('y ~ n_samples:', y.shape)
print('\nFirst 5 samples:\n', X[:5, :])
print('\nFirst 5 labels:', y[:5])
plt.scatter(X[y == 0, 0], X[y == 0, 1],
c='blue', s=40, label='0')
plt.scatter(X[y == 1, 0], X[y == 1, 1],
c='red', s=40, label='1', marker='s')
plt.xlabel('first feature')
plt.ylabel('second feature')
plt.legend(loc='upper right');
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25,
random_state=1234,
stratify=y)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
X_train.shape
y_train.shape
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
print(prediction)
print(y_test)
np.mean(prediction == y_test)
classifier.score(X_test, y_test)
classifier.score(X_train, y_train)
from figures import plot_2d_separator
plt.scatter(X[y == 0, 0], X[y == 0, 1],
c='blue', s=40, label='0')
plt.scatter(X[y == 1, 0], X[y == 1, 1],
c='red', s=40, label='1', marker='s')
plt.xlabel("first feature")
plt.ylabel("second feature")
plot_2d_separator(classifier, X)
plt.legend(loc='upper right');
print(classifier.coef_)
print(classifier.intercept_)
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
c='blue', s=40, label='0')
plt.scatter(X[y == 1, 0], X[y == 1, 1],
c='red', s=40, label='1', marker='s')
plt.xlabel("first feature")
plt.ylabel("second feature")
plot_2d_separator(knn, X)
plt.legend(loc='upper right');
knn.score(X_test, y_test)
# %load solutions/05A_knn_with_diff_k.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Discrete time
Step2: Linear Difference Equations
Step3: Money exercises
Step4: 10% per year, month compound
Step5: 10% per year, day compound
Step6: 10% per year, hour compound
Step7: The Fibonacci sequence is a second order difference equation, so we need to provide two initial conditions. Check the if statements in the code below.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
from __future__ import print_function
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
def plotSequence(y):
n = np.linspace(0, y.size, y.size)
plt.scatter(n, y)
plt.plot([n, n], [np.zeros(n.size), y], color='gray', linestyle="--")
return
w = 1.0 # frequency in hz = 1/seg
n=np.linspace(0,10,num=50)
y = np.sin(w*n)
plt.figure()
plotSequence(y)
def lde(n, lmbda, f0):
return f0*pow(lmbda,n);
# natural frequency and stability
def f(a):
N = 30
f = np.linspace(0,N,N)
f0 = 1
for i in range(N):
f[i] = lde(i, a, f0)
plt.figure()
plotSequence(f)
interact(f, a=(-10.0,10.0,0.1))
lde(30, 1+(10.0/100), 10)
lde(30*12, 1+((10.0/100)/12), 10)
lde(30*365, 1+((10.0/100)/365), 10)
lde(30*365*24, 1+((10.0/100)/(365*24)), 10)
def fibonacci(n):
if n == 0: # first initial condition
return 0
elif n == 1: # second initial condition
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
N = 10
z = np.linspace(0,N,N)
for i in range(N):
z[i] = fibonacci(i)
plt.figure()
plotSequence(z)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graph Execution
Step2: Run the Graph
Step3: Can you mix eager and graph execution together?
Step 1
Step4: Linear Regression
Step5: Loss Function
Step6: Optimizer
Step7: Training Loop
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
print(tf.__version__)
a = tf.constant(value = [5, 3, 8], dtype = tf.int32)
b = tf.constant(value = [3, -1, 2], dtype = tf.int32)
c = tf.add(x = a, y = b)
print(c)
with tf.Session() as sess:
result = sess.run(fetches = c)
print(result)
a = tf.placeholder(dtype = tf.int32, shape = [None])
b = tf.placeholder(dtype = tf.int32, shape = [None])
c = tf.add(x = a, y = b)
with tf.Session() as sess:
result = sess.run(fetches = c, feed_dict = {
a: [3, 4, 5],
b: [-1, 2, 3]
})
print(result)
X = tf.constant(value = [1,2,3,4,5,6,7,8,9,10], dtype = tf.float32)
Y = 2 * X + 10
print("X:{}".format(X))
print("Y:{}".format(Y))
with tf.variable_scope(name_or_scope = "training", reuse = tf.AUTO_REUSE):
w0 = # TODO: Your code goes here
w1 = # TODO: Your code goes here
Y_hat = w0 * X + w1
loss_mse = tf.reduce_mean(input_tensor = (Y_hat - Y)**2)
LEARNING_RATE = # TODO: Your code goes here
optimizer = # TODO: Your code goes here
STEPS = 1000
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # initialize variables
for step in range(STEPS):
#1. Calculate gradients and update weights
# TODO: Your code goes here
#2. Periodically print MSE
if step % 100 == 0:
print("STEP: {} MSE: {}".format(step, sess.run(fetches = loss_mse)))
# Print final MSE and weights
print("STEP: {} MSE: {}".format(STEPS, sess.run(loss_mse)))
print("w0:{}".format(round(float(sess.run(w0)), 4)))
print("w1:{}".format(round(float(sess.run(w1)), 4)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Reading, and writing, comprehension(s)
Step2: List comprehensions
Step3: As you can see, the translation from the math to code is natural.
Step4: Example 2
Step5: NOTE
Step6: The two patterns shown in examples 1 and 2 can be generalised to the following pattern.
Step7: The procedural equivalent of this code is shown below.
Step9: This pattern is syntactically different from the previous pattern. It can be generalized as
Step10: The procedural equivalent of this code is shown below.
Step11: Example 2 also has an equivalent in the two variable case.
Step12: Technically, this is the same pattern as the previous example but uses the ternary operator (as shown in example 2). The procedural equivalent is shown below.
Step13: The two examples can be generalized to
Step14: The procedural equivalent is shown below.
Step15: The pattern can be generalized as
Step16: The patterns discussed in the previous section also apply here. There are mainly two kinds of patterns in the single variable case.
Step17: 2. Do not sacrifice readability over "speed." For example, do not write code like the snippet shown below
Step18: The procedural code is more readable compared to the comprehension
Step19: This can be shortened to comprehension for readability with a little bit of refactoring.
|
<ASSISTANT_TASK:>
Python Code:
from numpy.random import randint
import matplotlib.pyplot as plt
%matplotlib inline
S = randint(low=0, high=11, size=15) # 10 random integers b/w 0 and 10
def f(x):
Dummy function - returns identity
return x
print("1. S == {}".format(S))
y1 = [f(x) for x in S]
print("2. All (x, f(x)) pairs: {}".format(list(zip(S, y1))))
plt.scatter(S, y1)
y2 = []
for x in S:
y2.append(f(x))
print("3. All (x, f(x)) pairs: {}".format(list(zip(S, y2))))
assert y1 == y2
y1 = [0 if x <= 5 else f(x) for x in S]
print(*zip(S,y1))
plt.scatter(S, y1)
y2 = []
for x in S:
if x <= 5:
y2.append(0)
else:
y2.append(f(x))
print(*zip(S, y2))
assert y1 == y2
print("Passed!")
y1 = [f(x) for x in S if x <= 5]
s = [x for x in S if x <= 5]
print(*zip(s,y1))
# Note how the output range has been modified due to the change in input range
plt.scatter(s, y1)
y2 = []
for x in S:
if x <= 5:
y2.append(f(x))
assert y2 == y1
print(*zip(S, y2))
print("Passed!")
import numpy as np
def g(i, j):
Returns the result of division of indices
return (i + 1) / (j + 1)
C1 = [g(i,j) for i in range(0,3) for j in range(0,3)] # replace g with any function that you want
print(C1)
print(np.array(C1).reshape(3,3))
C2 = []
for i in range(3):
for j in range(3):
C2.append(g(i, j))
print(C2)
assert C1 == C2
print("Passed!")
C1 = [g(i,j) if i !=j else 0 for i in range(0,3) for j in range(0,3)]
print(C1)
print(np.array(C1).reshape(3,3))
C2 = []
for i in range(3):
for j in range(3):
if i != j:
C2.append(g(i,j))
else:
C2.append(0)
print(C2)
assert C1 == C2
print("Passed!")
C1 = [ (i, j, g(i,j)) for i in range(0,3) for j in range(0,3) if i !=j]
print(C1) # note that the input restriction on the diagonals removes the diagonals from the output list
C2 = []
for i in range(3):
for j in range(3):
if i != j:
C2.append((i, j, g(i,j)))
print(C2)
assert C1 == C2
print("Passed!")
dict_comp = {x: f(x) for x in S}
print(dict_comp)
# Bad code
[print(i) for i in range(3)]
# you know you can do better than that
for i in range(3):
print(i)
# that's better
x1 = [i if i <= 10 else i**2 if 10 < i <= 20 else i**4 if 20 < i <= 50 else 1.0 / i for i in range(100) if i not in (5, 7, 11, 13, 17, 19, 29, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97)]
# procedural code is more readable in this case here
x2 = []
for i in range(100):
# optimus primes are beyond our reach, https://oeis.org/A217090
if i not in (5, 7, 11, 13, 17, 19, 29, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97):
if i <= 10:
x2.append(i)
elif 10 < i <= 20:
x2.append(i**2)
elif 20 < i <= 50:
x2.append(i**4)
else:
x2.append(1.0 / i)
assert x2 == x1
def function(val):
if val <= 10:
return val
elif 10 < val <= 20:
return val**2
elif 20 < val <= 50:
return val**4
else:
return 1.0 / val
def is_optimus_prime(val):
return val in (5, 7, 11, 13, 17, 19, 29, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97)
x3 = [function(i) for i in range(100) if not is_optimus_prime(i)]
assert x1 == x2 == x3
print("Passed!")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Isotherm display
Step2: Isotherm plotting and comparison
Step3: A black and white (color=False) full scale graph of both adsorption and desorption branches of an
Step4: A graph which plots the both the loading and enthalpy as a function of pressure on the left
Step5: A comparison graph of all the nitrogen isotherms, with both branches shown but without adding the desorption branch to the label (branch='all-nol'). We want each isotherm to use a different marker (marker=len(isotherms)) and to not display the desorption branch component of the legend (only lgd_keys=['material']).
Step6: A black and white version of the same graph (color=False), but with absolute pressure in bar.
Step7: Only some ranges selected for display from all the isotherms (x_range=(0.2, 0.6) and y1_range=(3, 10)).
Step8: The isosteric pressure isotherms, in relative pressure mode and loading in cm3(STP). No markers
Step9: Only desorption branch of some isotherms (branch='des'), displaying the user who recorded the isotherms
|
<ASSISTANT_TASK:>
Python Code:
%run import.ipynb
import matplotlib.pyplot as plt
isotherm = next(i for i in isotherms_n2_77k if i.material=='MCM-41')
ax = isotherm.plot()
import pygaps.graphing as pgg
ax = pgg.plot_iso(
isotherms_isosteric,
branch = 'ads',
logx = True,
x_range=(None,1),
lgd_keys=['temperature'],
loading_unit='cm3(STP)',
color=['b', 'r', 'g']
)
import pygaps.graphing as pgg
from pathlib import Path
path = Path.cwd() / 'novel.png'
isotherm = next(i for i in isotherms_n2_77k if i.material=='MCM-41')
ax = pgg.plot_iso(
isotherm,
branch = 'all',
color=False,
save_path=path,
marker=['x'],
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,5))
pgg.plot_iso(
isotherms_calorimetry[1],
ax=ax1,
x_data='pressure',
y1_data='loading',
y2_data='enthalpy',
lgd_pos='lower right',
y2_range=(0,40),
y1_line_style=dict(markersize=0),
y2_line_style=dict(markersize=3),
)
pgg.plot_iso(
isotherms_calorimetry[1],
ax=ax2,
x_data='loading',
y1_data='enthalpy',
y1_range=(0,40),
lgd_pos='best',
marker=['^'],
y1_line_style=dict(linewidth=0)
)
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='all',
lgd_keys=['material'],
marker=len(isotherms_n2_77k)
)
ax.set_title("Regular isotherms colour")
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='all',
color=False,
lgd_keys=['material'],
pressure_mode='absolute',
pressure_unit='bar',
)
ax.set_title("Black and white")
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='all',
x_range=(0.2, 0.6),
y1_range=(3, 10),
lgd_keys=['material']
)
ax = pgg.plot_iso(
isotherms_isosteric,
branch='ads',
pressure_mode='relative',
loading_unit='cm3(STP)',
lgd_keys=['adsorbate', 'temperature'],
marker=False
)
ax.set_title("Different pressure mode or units")
ax = pgg.plot_iso(
isotherms_n2_77k,
branch='des',
lgd_keys=['material', 'user'],
lgd_pos='out bottom',
)
ax.set_title("Only desorption branch")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2 Viel mächtigere Funktion
Step2: 3 Aber wie sind Funktion, Modules und Libraries aufgebaut?
Step3: 4 Bauen wir die eigenen Funktion
Step4: Und zum aufrufen packe ich meine List in Klammen ()
Step5: Bauen wir eine simple Suche
Step6: 5 Struktur und Troubleshooting
|
<ASSISTANT_TASK:>
Python Code:
lst = [11,2,34, 4,5,5111]
len(lst)
len([11,2,'sort',4,5,5111])
sorted(lst)
lst
lst.sort()
lst
min(lst)
max(lst)
str(1212)
sum([1,2,2])
lst
lst.remove(4)
lst.append(4)
string = 'hello, wie geht, es Dir?'
string.split(',')
import urllib
import requests
import glob
import pandas
from bs4 import BeautifulSoup
import re
#etc. etc.
def sort(string):
elem = input('Bitte geben Sie den Suchbegriff ein: ')
if elem in string:
return 'Treffer'
else:
return 'Kein Treffer'
string_test = "«Guten Tag, ich bin der, der Sie vor einer Stunde geweckt hat», sagte der Moderator des Podiums in Stockholm, als er am Montagmittag den US-Wissenschaftler Richard H. Thaler anrief. Für seine Erforschung der Psychologie hinter wirtschaftlichen Entscheidungen bekommt dieser den Nobelpreis für Wirtschaft. Das gab die Königlich-Schwedische Wissenschaftsakademie bekannt. Der 72-Jährige lehrt an der Universität Chicago. Der Verhaltensökonom habe gezeigt, dass begrenzte Rationalität, soziale."
string_test
def suche(elem, string):
#elem = input('Bitte geben Sie den Suchbegriff ein: ')
if elem in string:
return 'Treffer'
else:
return 'Kein Treffer'
suche(strings[1], string_test)
strings = ['Stockholm', 'blödes Wort', 'Rationalität', 'soziale']
for st in strings:
ergebnis = suche(st, string_test)
print(st, ergebnis)
suche(string_test)
suche(string_test)
lst = [1,3,5]
len(lst)
import os
#Funktioniert leider nicht mit allen Built in Functionen
os.path.split??
#Beispiel Sort
def sort(list):
for index in range(1,len(list)):
value = list[index]
i = index-1
while i>=0:
if value < list[i]:
list[i+1] = list[i]
list[i] = value
i -= 1
else:
break
return list
#Ganz komplexe. Wenn Du nicht mit dem Modul urllib, bzw. urlretrieve
#arbeiten könntest, müsstest Du jetzt all das eintippen.
def urlretrieve(url, filename=None, reporthook=None, data=None):
url_type, path = splittype(url)
with contextlib.closing(urlopen(url, data)) as fp:
headers = fp.info()
# Just return the local path and the "headers" for file://
# URLs. No sense in performing a copy unless requested.
if url_type == "file" and not filename:
return os.path.normpath(path), headers
# Handle temporary file setup.
if filename:
tfp = open(filename, 'wb')
else:
tfp = tempfile.NamedTemporaryFile(delete=False)
filename = tfp.name
_url_tempfiles.append(filename)
with tfp:
result = filename, headers
bs = 1024*8
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, bs, size)
while True:
block = fp.read(bs)
if not block:
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: got only %i out of %i bytes"
% (read, size), result)
return result
import urllib.request
with urllib.request.urlopen('http://tagesanzeiger.ch/') as response:
html = response.read()
html
lst = ['ich', 'habe', None, 'ganz', 'kalt']
def join(mylist):
long_str = ''
for elem in mylist:
try:
long_str = long_str + elem + " "
except:
None
return long_str.strip()
join(lst)
join(lst)
string = ' ich habe ganz kalt '
string.strip()
satz = "Die Unabhängigkeit der Notenbanken von der Politik gilt bisher als anerkannter Grundpfeiler der modernen Wirtschafts- und Geldpolitik in fortgeschrittenen Volkswirtschaften. Zu gross wäre sonst das Risiko, dass gewählte Politiker die Notenpresse anwerfen, wenn es ihren persönlichen Zielen gerade gelegen kommt, und dass dadurch die Stabilität des Geldes und das Vertrauen in das Zahlungsmittel untergraben wird."
sort(satz)
def find(string):
elem = input('Bitte geben Sie den Suchbegriff ein: ')
if elem in string:
return 'Treffer'
else:
return 'Kein Treffer'
find(satz)
print('Immer im Code verwenden, um zu wissen wo der Fehler nun ganz genau passiert.')
#Beispiel Sort
def sort(list):
for index in range(1,len(list)):
value = list[index]
print(value)
i = index-1
print(i)
while i>=0:
if value < list[i]:
list[i+1] = list[i]
list[i] = value
i -= 1
else:
break
return list
sort(lst)
lst
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initializing the NumCosmo library
Step2: Initializing the objects
Step3: Computing the normalized Hubble function
Step4: Initializing the distance object
Step5: ### Computing the comoving distance
Step6: We now compute other cosmological distances
|
<ASSISTANT_TASK:>
Python Code:
try:
import gi
gi.require_version('NumCosmo', '1.0')
gi.require_version('NumCosmoMath', '1.0')
except:
pass
from gi.repository import GObject
from gi.repository import NumCosmo as Nc
from gi.repository import NumCosmoMath as Ncm
import sys
import math
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
__name__ = "NcContext"
Ncm.cfg_init ()
Ncm.cfg_set_log_handler (lambda msg: sys.stdout.write (msg) and sys.stdout.flush ())
cosmo = Nc.HICosmo.new_from_name (Nc.HICosmo, "NcHICosmoDEXcdm{'massnu-length':<1>}")
cosmo.omega_x2omega_k ()
cosmo.param_set_by_name ("H0", 67.66)
cosmo.param_set_by_name ("Omegak", 0.0)
cosmo.param_set_by_name ("w", -0.8)
cosmo.param_set_by_name ("Omegab", 0.049)
cosmo.param_set_by_name ("Omegac", 0.2621)
cosmo.param_set_by_name ("massnu_0", 0.0)
cosmo.param_set_by_name ("ENnu", 3.046)
cosmo.param_set_by_name ("Tgamma0", 2.7255)
E2_list = []
z_list = np.linspace (0.0, 10.0, 100)
for z in z_list:
E2 = cosmo.E2 (z)
E2_list.append(E2)
plt.figure (figsize=(14, 7))
plt.plot (z_list, E2_list, 'b', label="default parameters")
plt.xlabel (r'$z$')
plt.ylabel (r'$E^2(z)$')
leg = plt.legend (loc = 'best')
zmax = 10.0
dist = Nc.Distance.new (zmax)
dist.prepare (cosmo)
comoving_list = []
for z in z_list:
comoving = dist.comoving (cosmo, z) * cosmo.RH_Mpc ()
comoving_list.append(comoving)
plt.figure (figsize=(14, 7))
plt.plot (z_list, comoving_list, 'b', label="default parameters")
plt.xlabel (r'$z$')
plt.ylabel (r'$d_c(z)$ [Mpc]')
leg = plt.legend (loc = 'best')
transv_list = []
lumin_list = []
ang_list = []
for z in z_list:
transv = dist.transverse (cosmo, z)
lumin = dist.luminosity (cosmo, z)
ang = dist.angular_diameter (cosmo, z)
transv_list.append (transv)
lumin_list.append (lumin)
ang_list.append (ang)
plt.figure (figsize=(14, 7))
plt.xscale('log')
plt.plot (z_list, transv_list, 'b', label="Tranversal comoving")
plt.plot (z_list, lumin_list, 'r', label="Luminosity")
plt.plot (z_list, ang_list, 'g', label="Angular diameter")
plt.xlabel (r'$z$')
plt.ylabel (r'Cosmological distances [Mpc]')
leg = plt.legend (loc = 'best')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps
Step2: Inline Question #1
Step3: You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5
Step5: You should expect to see a slightly better performance than with k = 1.
Step6: Cross-validation
|
<ASSISTANT_TASK:>
Python Code:
import os
os.chdir(os.getcwd() + '/..')
# Run some setup code for this notebook
import random
import numpy as np
import matplotlib.pyplot as plt
from utils.data_utils import load_CIFAR10
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
print ('Training data shape: ', X_train.shape)
print ('Training labels shape: ', y_train.shape)
print ('Test data shape: ', X_test.shape)
print ('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
sample_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, sample_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1;
plt.subplot(sample_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = X_train.reshape(X_train.shape[0], -1)
X_test = X_test.reshape(X_test.shape[0], -1)
print(X_train.shape, X_test.shape)
from classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data ans does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
# compute_distance_two_loops.
dists = classifier.compute_distances_two_loops(X_test)
print (dists.shape)
# We can visualize the distance mateix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test == y_test_pred)
accuracy = float(num_correct) / num_test
print ('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print ('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop.
dists_one = classifier.compute_distances_one_loop(X_test)
# Compute Frobenius norm of two matrices:
# the square root of the squared sum of differences of all elements
difference = np.linalg.norm(dists - dists_one, ord='fro')
print('Difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance metrices are the same')
else:
print('Uh-oh! The distance metrices are different')
# fully vectorized version
dists_two = classifier.compute_distances_no_loops(X_test)
difference = np.linalg.norm(dists_two - dists_one, ord='fro')
print('Difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance metrices are the same')
else:
print('Uh-oh! The distance metrices are different')
# Let's compare how fast the implementations are
def time_function(f, *args):
Call a function f with args and return the time(in seconds) that it took to execute
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print('Two loop version took %f seconds' % two_loop_time)
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print('One loop version took %f seconds' % one_loop_time)
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print('No loop version took %f seconds' % no_loop_time)
# you should see significantly faster performance with fully vectorized implementation
num_folds = 5
k_choices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
X_train_folds = []
y_train_folds = []
fold_ids = np.array_split(np.arange(num_training), num_folds)
for i in xrange(num_folds):
X_train_folds.append(X_train[fold_ids[i], :])
y_train_folds.append(y_train[fold_ids[i]])
# a dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
for k in k_choices:
k_to_accuracies[k] = []
for i in xrange(num_folds):
Xs = []
ys = []
X_val = X_train_folds[i]
y_val = y_train_folds[i]
for j in xrange(num_folds):
if i == j:
continue
Xs.append(X_train_folds[j])
ys.append(y_train_folds[j])
Xs = np.concatenate(Xs)
ys = np.concatenate(ys)
classifier.train(Xs, ys)
y_val_pred = classifier.predict(X_val, k=k)
accuracy = np.mean(y_val == y_val_pred)
k_to_accuracies[k].append(accuracy)
print('k = %d, accuracy = %f' % (k, accuracy))
print
# Print out the computed accuracies
for k in k_to_accuracies:
for accuracy in k_to_accuracies[k]:
print('k = %d, accuracy = %f' % (k, accuracy))
# plot the raw observaations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k, v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k, v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 10
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Check out the Data
Step2: EDA
Step3: Training a Linear Regression Model
Step4: Train Test Split
Step5: Creating and Training the Model
Step6: Model Evaluation
Step7: Interpreting the coefficients
Step8: Residual Histogram
Step9: Regression Evaluation Metrics
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
USAhousing = pd.read_csv('USA_Housing.csv')
USAhousing.head()
USAhousing.info()
USAhousing.describe()
USAhousing.columns
sns.pairplot(USAhousing)
sns.distplot(USAhousing['Price'])
sns.heatmap(USAhousing.corr())
X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms',
'Avg. Area Number of Bedrooms', 'Area Population']]
y = USAhousing['Price']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101)
from sklearn.linear_model import LinearRegression
lm = LinearRegression()
lm.fit(X_train,y_train)
# print the intercept
print(lm.intercept_)
coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient'])
coeff_df
predictions = lm.predict(X_test)
plt.scatter(y_test,predictions)
sns.distplot((y_test-predictions),bins=50);
from sklearn import metrics
print('MAE:', metrics.mean_absolute_error(y_test, predictions))
print('MSE:', metrics.mean_squared_error(y_test, predictions))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Save frame and display JPG here
Step2: 3. Gray Scale filter
Step3: 4. Sobel filter
Step4: Step 5
|
<ASSISTANT_TASK:>
Python Code:
from pynq.drivers.video import Frame, HDMI
from IPython.display import Image
hdmi=HDMI('in')
hdmi.start()
frame = hdmi.frame()
orig_img_path = '/home/xilinx/jupyter_notebooks/examples/data/orig.jpg'
frame.save_as_jpeg(orig_img_path)
Image(filename=orig_img_path)
from pynq.drivers.video import MAX_FRAME_WIDTH
grayframe = frame
frame_i = grayframe.frame
height = hdmi.frame_height()
width = hdmi.frame_width()
for y in range(0, height):
for x in range(0, width):
offset = 3 * (y * MAX_FRAME_WIDTH + x)
gray = round((0.299*frame_i[offset+2]) +
(0.587*frame_i[offset+0]) +
(0.114*frame_i[offset+1]))
frame_i[offset+0] = gray
frame_i[offset+1] = gray
frame_i[offset+2] = gray
gray_img_path = '/home/xilinx/jupyter_notebooks/examples/data/gray.jpg'
grayframe.save_as_jpeg(gray_img_path)
Image(filename=gray_img_path)
height = 1080
width = 1920
sobel = Frame(1920, 1080)
frame_i = frame.frame
for y in range(1,height-1):
for x in range(1,width-1):
offset = 3 * (y * MAX_FRAME_WIDTH + x)
upper_row_offset = offset - MAX_FRAME_WIDTH*3
lower_row_offset = offset + MAX_FRAME_WIDTH*3
gx = abs(-frame_i[lower_row_offset-3] + frame_i[lower_row_offset+3] -
2*frame_i[offset-3] + 2*frame_i[offset+3] -
frame_i[upper_row_offset-3] + frame_i[upper_row_offset+3])
gy = abs(frame_i[lower_row_offset-3] + 2*frame_i[lower_row_offset] +
frame_i[lower_row_offset+3] - frame_i[upper_row_offset-3] -
2*frame_i[upper_row_offset] - frame_i[upper_row_offset+3])
grad = gx + gy
if grad > 255:
grad = 255
sobel.frame[offset+0] = grad
sobel.frame[offset+1] = grad
sobel.frame[offset+2] = grad
sobel_img_path = '/home/xilinx/jupyter_notebooks/examples/data/sobel.jpg'
sobel.save_as_jpeg(sobel_img_path)
Image(filename=sobel_img_path)
hdmi.stop()
del sobel
del grayframe
del hdmi
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading the iris dataset
Step2: Let's combined both variables in one back-to-back stem-and-leaf plot
Step3: And of course, we can save a pdf. Note the option needed
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from stemgraphic.num import stem_graphic
import pandas as pd
import numpy as np
import math
df = pd.read_csv('../datasets/iris.csv')
df.describe()
fig, ax = stem_graphic(df['sepal_length'],
random_state=42,
title='sepal_length')
fig, ax = stem_graphic(df['sepal_width'],
random_state=42,
mirror=True,
title='sepal_width')
fig, ax = stem_graphic(df['sepal_length'],
df['sepal_width'],
random_state=42,
legend_pos=None, outliers=True)
fig, ax = stem_graphic(df['sepal_width'],
df['sepal_length'],
random_state=42,
legend_pos=None)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The approach we will take is to take 2 fibers from our graph and verify that we end up with the appropriate voxels in our streamlines being connected
Step2: First, we should check to see that our graph ends up with the right number of vertices. We begin by looking at the floored values of the above voxel positions, since our image resolution is at 1mm scale
Step3: and we see that there are 8 unique possible vertices, defining a vertex as a unique point in 3-dimensional space at 1mm resolution. We then can check out the number of unique vertices in our corresponding graph
Step4: We check that the voxel ids are the same
Step5: Indicating that our vertex indices appear to be correct. Let's check our streamlines to verify that the vertices each streamline is incident to are fully connected (and consequently have nonzero edge weight) in our resulting graph
Step6: Since we don't get any errors here, it is clear that every element that is in our graph should, in fact, be there. Using set notation, what we have shown is that
|
<ASSISTANT_TASK:>
Python Code:
import ndmg
import ndmg.utils as mgu
# run small demo for experiments
print(mgu.execute_cmd('ndmg_demo-dwi', verb=True)[0])
import numpy as np
fibs = np.load('/tmp/small_demo/outputs/fibers/KKI2009_113_1_DTI_s4_fibers.npz')['arr_0']
small_fibs = fibs[1:3]
from ndmg.graph import biggraph as mgg
from ndmg.graph.zindex import XYZMorton
g1 = mgg()
g1.make_graph(small_fibs)
import networkx as nx
gra = nx.Graph()
gra.add_weighted_edges_from(g1.edge_list)
poss_vertices = set() # use a set since we want unique elements
streamlines = []
for stream in small_fibs:
vertices = set()
for vertex in stream:
mid = str(XYZMorton(tuple(np.round(vertex)))) # morton index for vertex
vertices.add(mid)
poss_vertices.add(mid)
streamlines.append(vertices)
print(len(poss_vertices))
print(len(gra.nodes()))
print(poss_vertices == set(gra.nodes()))
from itertools import combinations
edgect = 0 # count the number of edges we should have
for stream in streamlines:
combns = combinations(stream, 2) # stream is a list of vertices
for comb in combns:
edgect += 1
if gra.get_edge_data(*comb) == 0: # check the particular combination
raise ValueError('Edge should exist that isnt in the graph!')
print(edgect == .5*nx.to_numpy_matrix(gra).sum()) # multiply by 2 the expected count since the graph is directed
# whereas the edgecount is undirected
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The reason for the error above is because the single quote in I'm stopped the string. You can use combinations of double and single quotes to get the complete statement.
Step2: Now let's learn about printing strings!
Step3: We can use a print statement to print a string.
Step4: <font color='red'>Python 3 Alert!</font>
Step5: String Basics
Step6: String Indexing
Step7: Let's start indexing!
Step8: We can use a
Step9: Note the above slicing. Here we're telling Python to grab everything from 0 up to 3. It doesn't include the 3rd index. You'll notice this a lot in Python, where statements and are usually in the context of "up to, but not including".
Step10: We can also use negative indexing to go backwards.
Step11: We can also use index and slice notation to grab elements of a sequence by a specified step size (teh default is 1). For instance we can use two colons in a row and then a number specifying the frequency to grab elements. For example
Step12: String Properties
Step13: Notice how the error tells us directly what we can't do, change the item assignment!
Step14: We can use the multiplication symbol to create repetition!
Step15: Basic Built-in String methods
Step16: There are many more methods than the ones covered here. Visit the advanced String section to find out more!
|
<ASSISTANT_TASK:>
Python Code:
# Single word
'hello'
# Entire phrase
'This is also a string'
# We can also use double quote
"String built with double quotes"
# Be careful with quotes!
' I'm using single quotes, but will create an error'
"Now I'm ready to use the single quotes inside a string!"
# We can simply declare a string
'Hello World'
# note that we can't output multiple strings this way
'Hello World 1'
'Hello World 2'
print 'Hello World 1'
print 'Hello World 2'
print 'Use \n to print a new line'
print '\n'
print 'See what I mean?'
# To use print function from Python 3 in Python 2
from __future__ import print_function
print('Hello World')
len('Hello World')
# Assign s as a string
s = 'Hello World'
#Check
s
# Print the object
print(s)
# Show first element (in this case a letter)
s[0]
s[1]
s[2]
# Grab everything past the first term all the way to the length of s which is len(s)
s[1:]
# Note that there is no change to the original s
s
# Grab everything UP TO the 3rd index
s[:3]
#Everything
s[:]
# Last letter (one index behind 0 so it loops back around)
s[-1]
# Grab everything but the last letter
s[:-1]
# Grab everything, but go in steps size of 1
s[::1]
# Grab everything, but go in step sizes of 2
s[::2]
# We can use this to print a string backwards
s[::-1]
s
# Let's try to change the first letter to 'x'
s[0] = 'x'
s
# Concatenate strings!
s + ' concatenate me!'
# We can reassign s completely though!
s = s + ' concatenate me!'
print(s)
s
letter = 'z'
letter*10
s
# Upper Case a string
s.upper()
# Lower case
s.lower()
# Split a string by blank space (this is the default)
s.split()
# Split by a specific element (doesn't include the element that was split on)
s.split('W')
'Insert another string with curly brackets: {}'.format('The inserted string')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Stack NIR and VIS bands
Step2: Calculate NDVI & Plot
Step3: Extract Spectra Using Masks
Step4: Function to calculate the mean spectra for reflectance values thresholed by NDVI using numpy.ma
Step5: We can test out this function for various NDVI thresholds. We'll test two together, and you can try out different values on your own. Let's look at the average spectra for healthy vegetation (NDVI > 0.6), and for a lower threshold (NDVI < 0.3).
|
<ASSISTANT_TASK:>
Python Code:
#Define inputs
filename = '../data/SERC/hyperspectral/NEON_D02_SERC_DP1_20160807_160559_reflectance.h5'
sercRefl, sercRefl_md, wavelengths = h5refl2array(filename)
clipExtDict = {}
clipExtDict['xMin'] = 367400.
clipExtDict['xMax'] = 368100.
clipExtDict['yMin'] = 4305750.
clipExtDict['yMax'] = 4306350.
clipExtent = (clipExtDict['xMin'],clipExtDict['xMax'],clipExtDict['yMin'],clipExtDict['yMax'])
clipIndex = calc_clip_index(clipExtDict,sercRefl_md['ext_dict'])
sercReflSubset = subset_clean_refl(sercRefl,sercRefl_md,clipIndex)
#Select bands to be used in the NDVI calculation
ndvi_bands = (58,90) #NIR and VIS (Red) bands
#Check the center wavelengths that these bands represent
band_width = wavelengths.value[1]-wavelengths.value[0]
print('band 58 wavelength range: ' + str(round(wavelengths.value[57]-band_width/2,2))
+ '-' + str(round(wavelengths.value[57]+band_width/2,2)) + ' nm')
print('band 90 wavelength range: ' + str(round(wavelengths.value[89]-band_width/2,2))
+ '-' + str(round(wavelengths.value[89]+band_width/2,2)) + ' nm')
#Use the stack_subset_bands function to create a stack of the subsetted red and NIR bands needed to calculate NDVI
ndvi_stack = stack_subset_bands(sercRefl,sercRefl_md,ndvi_bands,clipIndex)
vis = ndvi_stack[:,:,0].astype(float)
nir = ndvi_stack[:,:,1].astype(float)
ndvi = np.divide((nir-vis),(nir+vis))
plot_band_array(ndvi,clipExtent,(0,np.max(ndvi)),\
title='SERC Subset NDVI \n (VIS = Band 58, NIR = Band 90)',cmap_title='NDVI',colormap='seismic')
import copy
ndvi_gtpt6 = copy.copy(ndvi)
ndvi_gtpt6[ndvi<0.6] = np.nan #set all pixels with NDVI < 0.6 to nan, keeping only values > 0.6
print('Mean NDVI > 0.6:',round(np.nanmean(ndvi_gtpt6),2))
plot_band_array(ndvi_gtpt6,clipExtent,(0.6,1),title='SERC Subset NDVI > 0.6 \n (VIS = Band 58, NIR = Band 90)',
cmap_title='NDVI',colormap='RdYlGn')
import numpy.ma as ma
def calculate_mean_masked_spectra(reflArray,ndvi,ndvi_threshold,ineq='>'):
mean_masked_refl = np.zeros(reflArray.shape[2])
for i in np.arange(reflArray.shape[2]):
refl_band = reflArray[:,:,i]
if ineq == '>':
ndvi_mask = ma.masked_where((ndvi<=ndvi_threshold) | (np.isnan(ndvi)),ndvi)
elif ineq == '<':
ndvi_mask = ma.masked_where((ndvi>=ndvi_threshold) | (np.isnan(ndvi)),ndvi)
else:
print('ERROR: Invalid inequality. Enter < or >')
masked_refl = ma.MaskedArray(refl_band,mask=ndvi_mask.mask)
mean_masked_refl[i] = ma.mean(masked_refl)
return mean_masked_refl
sercSpectra_ndvi_gtpt6 = calculate_mean_masked_spectra(sercReflSubset,ndvi,0.6)
sercSpectra_ndvi_ltpt3 = calculate_mean_masked_spectra(sercReflSubset,ndvi,0.3,ineq='<')
import pandas
#Remove water vapor band windows & last 10 bands
w = copy.copy(wavelengths.value)
w[((w >= 1340) & (w <= 1445)) | ((w >= 1790) & (w <= 1955))]=np.nan
w[-10:]=np.nan; # the last 10 bands sometimes have noise - best to eliminate
nan_ind = np.argwhere(np.isnan(w))
sercSpectra_ndvi_gtpt6[nan_ind] = np.nan
sercSpectra_ndvi_ltpt3[nan_ind] = np.nan
#Create dataframe with masked NDVI mean spectra
sercSpectra_ndvi_df = pandas.DataFrame()
sercSpectra_ndvi_df['wavelength'] = w
sercSpectra_ndvi_df['mean_refl_ndvi_gtpt6'] = sercSpectra_ndvi_gtpt6
sercSpectra_ndvi_df['mean_refl_ndvi_ltpt3'] = sercSpectra_ndvi_ltpt3
ax = plt.gca();
sercSpectra_ndvi_df.plot(ax=ax,x='wavelength',y='mean_refl_ndvi_gtpt6',color='green',
edgecolor='none',kind='scatter',label='NDVI > 0.6',legend=True);
sercSpectra_ndvi_df.plot(ax=ax,x='wavelength',y='mean_refl_ndvi_ltpt3',color='red',
edgecolor='none',kind='scatter',label='NDVI < 0.3',legend=True);
ax.set_title('Mean Spectra of Reflectance Masked by NDVI')
ax.set_xlim([np.nanmin(w),np.nanmax(w)]); ax.set_ylim(0,0.45)
ax.set_xlabel("Wavelength, nm"); ax.set_ylabel("Reflectance")
ax.grid('on');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We change the number of MC steps to give a view to the time evolution of the M-H chain
Step2: So the constructed chains do converto our desired value. But notice the length of transient steps
Step3: Let $J=1$,$k_B=1$ in the following discussion
Step4: Let's see the result for 1D and 2D Ising Models
Step5: Remarks
Step6: We modify the single-flip MC code a bit to have it output snapshots of the system at the given time stamps
Step7: This is what happens when $T = 2.2$
Step8: So the markov chain, instead of converging, actually goes into an ordered stated with considerable correlation between the spins. The spin clusters formed in the process makes it more difficult for future states to be accepted, as a randomly picked molecule is more like to be inside a cluster than at the boundary between two clusters. Flipping this molecule would thus increase the system's energy, so it's more likely to be rejected. This is the explanation for the above mentioned critical slowing down behavior.
Step9: Cluster MC vs. single-flip MC in the field
Step10: 3.4 Comparison to theoretical results
Step11: 4. Development of MC methods
|
<ASSISTANT_TASK:>
Python Code:
def dice_samples(trials):
prob = {1: 1/2, 2: 1/4, 3: 1/8, 4: 1/16, 5: 1/32, 6: 1/32}
samples = np.zeros(trials + 1, dtype=int)
samples[0] = 1
for i in range(trials):
a = samples[i]
b = np.random.random_integers(1, 6) # uniform a priori distribution
pa = prob[a]
pb = prob[b]
if pb >= pa or np.random.rand() < pb / pa:
samples[i + 1] = b
else:
samples[i + 1] = a
return samples
def summarize(samples):
'''
Return the percentage of every face in the samples
'''
num_samples = len(samples)
distribution = {i: (samples == i).sum() * 100 / num_samples for i in [1, 2, 3, 4, 5 ,6]} # 百分数
return distribution
samples = dice_samples(1000000)
ns = np.array(np.logspace(1, 6, num=50), dtype=int)
distributions = {i: np.zeros(50) for i in [1, 2, 3, 4 ,5 ,6]}
for index in range(50):
n = ns[index]
distribution = summarize(samples[:n])
for i in [1, 2, 3, 4, 5, 6]:
distributions[i][index] = distribution[i]
for i in [1, 2, 3, 4, 5, 6]:
plt.plot(ns, distributions[i], label='Face {}'.format(i))
plt.xlabel('MC iterations')
plt.ylabel('Percentage')
plt.ylim(0, 100)
plt.semilogx()
plt.legend()
plt.grid()
plt.title("The Metropolis-Hastings dice")
# First, we need helper function to transform between the (i, j) coordinate of a 2D lattice and a serial one
# a: length of the square lattice's side
def flatten_2d(i, j, a):
return i * a + j # serial No. = row No. * lenght + colum No.
def unflatten_2d(n, a):
j = n % a
i = (n - j) // a
return i, j
# Generate the adjacency list
def gen_neighbors_1d(N):
neighbors = np.zeros((N, 2), dtype=int)
for n in range(N):
neighbors[n][0] = (n - 1) % N # left
neighbors[n][1] = (n + 1) % N # right
return neighbors
def gen_neighbors_2d(a):
neighbors = np.zeros((a*a, 4), dtype=int)
for n in range(a*a):
i, j = unflatten_2d(n, a)
neighbors[n][0] = flatten_2d(i, (j - 1) % a, a) # left
neighbors[n][1] = flatten_2d(i, (j + 1) % a, a) # right
neighbors[n][2] = flatten_2d((i - 1) % a, j, a) # up
neighbors[n][3] = flatten_2d((i + 1) % a, j, a) # down
return neighbors
def MH_single_flip(neighbors_list, T, iterations):
'''
This function performs single flip MC iterations for an Ising system with arbitrary topology,
given by the adjaceny list `neighbors_list`.
The inital state is chosen randomly.
Returns
=======
`magnetization`: magnetization (average molecular spin) at each MC step
`energy`: total energy of the system at each MC step
'''
# Initialization
size = neighbors_list.shape[0]
spins = np.random.random_integers(0, 1, size)
spins[spins == 0] = -1
# Allocation
magnetization = np.zeros(iterations + 1)
energy = np.zeros(iterations + 1)
magnetization[0] = spins.sum()
energy[0] = -spins.dot(spins[neighbors_list].sum(axis=1)) / 2
for step in range(iterations):
n = np.random.randint(0, size) # Choose next state according to the a priori distribution
delta_E = 2 * spins[n] * spins[neighbors_list[n]].sum()
if delta_E < 0 or np.random.rand() < np.exp(-delta_E / T):
# Acceptance
spins[n] = -spins[n]
magnetization[step + 1] = magnetization[step] + 2 * spins[n]
energy[step + 1] = energy[step] + delta_E
else:
# Rejection
magnetization[step + 1] = magnetization[step]
energy[step + 1] = energy[step]
return magnetization / size, energy
def plot_magnetization(dimension):
if dimension == 1:
neighbors_list = gen_neighbors_1d(400)
elif dimension == 2:
neighbors_list = gen_neighbors_2d(20)
T_list = [0.5, 1.0, 1.5, 1.8, 2.0, 2.2, 2.4, 3.0, 3.5]
fig = plt.figure(figsize=(12, 40))
for i in range(9):
T = T_list[i]
magnetization, _ = MH_single_flip(neighbors_list, T, 100000)
# Random walk history
fig.add_subplot(9, 2, 2 * i + 1)
plt.plot(magnetization)
plt.ylim(-1, 1)
plt.ylabel('Magnetization')
plt.xlabel('Iterations')
plt.annotate('T = {}'.format(T), (10000,0.8))
plt.grid()
# Sample distribution histogram
fig.add_subplot(9, 2, 2 * i + 2)
plt.hist(magnetization, bins=np.linspace(-1, 1, num=20), orientation='horizontal')
plt.ylim(-1, 1)
plt.xlabel('Counts')
plt.grid()
plt.suptitle("Monte Carlo simulation history & distribution to the {:d}D Ising Model".format(dimension))
plot_magnetization(1)
plot_magnetization(2)
def spin_correlation(spins, a):
M = spins.mean()
spins_2d = spins.reshape((a, a))
rs = np.arange(-a/2, a/2 + 1, dtype=int)
num_rs = len(rs)
correlations = np.zeros((num_rs, num_rs))
for i, y in enumerate(rs):
for j, x in enumerate(rs):
correlations[i, j] = (spins_2d * np.roll(np.roll(spins_2d, x, axis=1), y, axis=0)).mean() + M**2
return correlations
def gen_snapshots(time_stamps, a, T):
iterations = time_stamps[-1]
snapshots = {}
neighbors_list = gen_neighbors_2d(a)
size = neighbors_list.shape[0]
spins = np.random.random_integers(0, 1, size)
spins[spins == 0] = -1
for step in range(iterations):
n = np.random.randint(0, size)
delta_E = 2 * spins[n] * spins[neighbors_list[n]].sum()
if delta_E < 0 or np.random.rand() < np.exp(-delta_E / T):
spins[n] = -spins[n]
if step + 1 in time_stamps:
snapshots[step + 1] = {
'spins': spins.copy().reshape((a, a)),
'magnetization': spins.mean(),
'correlation': spin_correlation(spins, a)
}
return snapshots
a = 40
T = 2.2
time_stamps = np.array([1, 10, 100, 1000, 10000, 20000, 40000, 60000, 80000, 100000])
snapshots = gen_snapshots(time_stamps, a, T)
fig, axes = plt.subplots(10, 2, figsize=(12, 6 * 10))
for i, t in enumerate(time_stamps):
axes[i][0].matshow(snapshots[t]['spins'], interpolation='none')
axes[i][0].set_ylabel('MC step {}, M = {:.3f}'.format(t, snapshots[t]['magnetization']))
axes[i][0].set_xlabel('Spins')
axes[i][1].matshow(snapshots[t]['correlation'])
axes[i][1].set_xlabel('Spin correlation function')
axes[i][1].set_xticks([0, 10, 20, 30, 40])
axes[i][1].set_xticklabels([-20, -10, 0, 10, 20])
axes[i][1].set_yticks([0, 10, 20, 30, 40])
axes[i][1].set_yticklabels([-20, -10, 0, 10, 20])
def cluster_MC(neighbors_list, T, iterations):
p = 1 - np.exp(-2 / T) # "magic number"
# Initialization
size = neighbors_list.shape[0]
spins = np.random.random_integers(0, 1, size)
spins[spins == 0] = -1
# Allocation
magnetization = np.zeros(iterations + 1)
magnetization[0] = spins.sum()
energy = np.zeros(iterations + 1)
energy[0] = -spins.dot(spins[neighbors_list].sum(axis=1))
for step in range(iterations):
# Use a deque to implement breadth-first search
n0 = np.random.randint(0, size)
sign = spins[n0]
cluster = set([n0])
pockets = deque([n0])
finished = False
while not finished:
try:
n = pockets.popleft()
neighbors = neighbors_list[n]
for neighbor in neighbors:
if spins[neighbor] == sign and neighbor not in cluster and np.random.rand() < p:
cluster.add(neighbor)
pockets.append(neighbor)
except IndexError:
finished = True
# Flip the cluster
cluster = np.fromiter(cluster, dtype=int)
spins[cluster] = -sign
magnetization[step + 1] = magnetization[step] - 2 * sign * len(cluster)
energy[step + 1] = -spins.dot(spins[neighbors_list].sum(axis=1))
return magnetization / size, energy / 2 # Every pair is counted two times
def plot_magnetization_cluster(dimension):
if dimension == 1:
neighbors_list = gen_neighbors_1d(400)
elif dimension == 2:
neighbors_list = gen_neighbors_2d(20)
T_list = [0.5, 1.0, 1.5, 1.8, 2.0, 2.2, 2.4, 3.0, 3.5]
fig = plt.figure(figsize=(12, 40))
for i in range(9):
T = T_list[i]
magnetization, _ = cluster_MC(neighbors_list, T, 1000)
# 随机行走历史
fig.add_subplot(9, 2, 2 * i + 1)
plt.plot(magnetization)
plt.ylim(-1, 1)
plt.ylabel('Magnetization')
plt.xlabel('Iterations')
plt.annotate('T = {}'.format(T), (50,0.8))
# 样本分布直方图
fig.add_subplot(9, 2, 2 * i + 2)
plt.hist(magnetization, bins=np.linspace(-1, 1, num=20), orientation='horizontal')
plt.ylim(-1, 1)
plt.xlabel('Counts')
plt.suptitle("Monte Carlo simulation history & distribution to the {:d}D Ising Model".format(dimension))
plot_magnetization_cluster(2)
def averages(magnetization, energy, T):
'''计算各量的系综平均'''
M = np.abs(magnetization).mean()
E = energy.mean()
CV = energy.var() / T
return M, E, CV
# The code below runs VERY SLOWLY
points = 31
dimension = 64
iterations = 5000
neighbors_list = gen_neighbors_2d(dimension)
Ts = np.linspace(1.0, 4.0, points)
Ms = np.zeros(points)
for i in range(points):
T = Ts[i]
magnetization, _ = cluster_MC(neighbors_list, T, iterations)
Ms[i] = np.abs(magnetization).mean()
# print("Iteration for T = {:.3f} complete".format(T)) # Uncomment to print progress as the simulation goes
Onsager_Tc = 2.269
Ts_plot = np.linspace(1.0, 4.0, num=200)
Onsager_Ms = np.zeros(len(Ts_plot))
for i, T in enumerate(Ts_plot):
if T <= 2.269:
Onsager_Ms[i] = (1 - (np.sinh(2/T))**(-4))**(1/8)
plt.plot(Ts, Ms, '^', label='simulation')
plt.plot(Ts_plot, Onsager_Ms, '--', label='theoretical')
plt.ylim(0, 1)
plt.legend()
plt.xlabel('Temperature')
plt.ylabel('Magnetization')
plt.grid()
plt.title("Theoretical vs. Numerical values for spontaneous magnitization")
import sys
print("Python version = ", sys.version)
print("Numpy version = ", np.version.version)
print("Matplotlib version = ", matplotlib.__version__)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this notebook we will explore experimental design of competition assays.
Step2: We can use this function to help us decide the appropriate concentrations to use to determine the Kd_A of a competitive ligand. For example, if we see no shift in the Kd_L_app upon adding the competitive ligand, we will not be able to calculate the Kd_A.
Step3: Here we can see that even though we've used a large range of competitor concentration (from 100 uM to 1 pM), we only see a shift of the apparent Kd at the highest concentration for this Bosutinib-Imatinib experiment!
Step4: Here we can see that for the Gefitinib-Imatinib experiment we can get a shift of the K_L_app with much lower concentration of the competitive ligand!
Step5: Even Better!
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set_style("whitegrid")
from matplotlib.colors import LogNorm
%matplotlib inline
#Three component competitive binding function
#This function and its assumptions are defined in greater detail in this notebook:
## 1a-modelling-CompetitiveBinding-ThreeComponentBinding.ipynb
def three_component_competitive_binding(Ptot, Ltot, Kd_L, Atot, Kd_A):
Parameters
----------
Ptot : float
Total protein concentration
Ltot : float
Total tracer(fluorescent) ligand concentration
Kd_L : float
Dissociation constant of the fluorescent ligand
Atot : float
Total competitive ligand concentration
Kd_A : float
Dissociation constant of the competitive ligand
Returns
-------
P : float
Free protein concentration
L : float
Free ligand concentration
A : float
Free ligand concentration
PL : float
Complex concentration
Kd_L_app : float
Apparent dissociation constant of L in the presence of A
Usage
-----
[P, L, A, PL, Kd_L_app] = three_component_competitive_binding(Ptot, Ltot, Kd_L, Atot, Kd_A)
Kd_L_app = Kd_L*(1+Atot/Kd_A)
PL = 0.5 * ((Ptot + Ltot + Kd_L_app) - np.sqrt((Ptot + Ltot + Kd_L_app)**2 - 4*Ptot*Ltot)) # complex concentration (uM)
P = Ptot - PL; # free protein concentration in sample cell after n injections (uM)
L = Ltot - PL; # free tracer ligand concentration in sample cell after n injections (uM)
A = Atot - PL; # free competitive ligand concentration in sample cell after n injections (uM)
return [P, L, A, PL, Kd_L_app]
# Let's take the example of these three ligands affinity for Src Kinase
Kd_Bos = 1.0e-9 # M # Fluorescent
Kd_Gef = 3800e-9 # M # Fluorescent
Kd_Ima = 3000e-9 # M # Non-Fluorescent (Competitive)
Ptot = 0.5e-6 # M
Ltot = 20.0e-6 / np.array([10**(float(i)/2.0) for i in range(12)]) # M
# Create grid of fluorescence as a function of imatinib concentration
#we want to make a grid of 8 Imatinib concentrations
#what if we just go be factors of ten from 10e-5 to 10e-12 M
concentration_range = [10e-5,10e-6,10e-7,10e-8,10e-9,10e-10,10e-11,10e-12] # M
competition_grid_bosutinib = []
for i,conc in enumerate(concentration_range):
[P_bos_ima, L_bos_ima, A_bos_ima, PL_bos_ima, Kd_bos_ima] = three_component_competitive_binding(Ptot, Ltot, Kd_Bos, conc, Kd_Ima)
if i == 0:
competition_grid_bosutinib = PL_bos_ima
else:
competition_grid_bosutinib = np.vstack((competition_grid_bosutinib,PL_bos_ima))
from matplotlib.pyplot import cm
color=cm.PuBu_r(np.linspace(0,0.5,len(competition_grid_bosutinib)))
plt.figure(figsize=(10,4))
for i,PL in enumerate(competition_grid_bosutinib):
plt.semilogx(Ltot,PL,color = color[i])
plt.xlabel('fluorescent ligand concentration (M)')
plt.ylabel('complex concentration (M)');
for i,conc in enumerate(concentration_range):
[P_gef_ima, L_gef_ima, A_gef_ima, PL_gef_ima, Kd_gef_ima] = three_component_competitive_binding(Ptot, Ltot, Kd_Gef, conc, Kd_Ima)
if i == 0:
competition_grid_gefitinib = PL_gef_ima
else:
competition_grid_gefitinib= np.vstack((competition_grid_gefitinib,PL_gef_ima))
from matplotlib.pyplot import cm
color=cm.PuBu_r(np.linspace(0,0.5,len(competition_grid_gefitinib)))
plt.figure(figsize=(10,4))
for i,PL in enumerate(competition_grid_gefitinib):
plt.semilogx(Ltot,PL,color = color[i],label='%s M'%concentration_range[i])
plt.text(Ltot[0],competition_grid_gefitinib[0][0],'%s uM'%(concentration_range[0]*1e6))
plt.text(Ltot[0],competition_grid_gefitinib[1][0],'%s uM'%(concentration_range[1]*1e6))
plt.text(Ltot[0],competition_grid_gefitinib[2][0],'%s uM'%(concentration_range[2]*1e6))
plt.xlabel('fluorescent ligand concentration (M)')
plt.ylabel('complex concentration (M)');
plt.legend(loc=0);
# Let's take look at how this would look like for Gefitinib and Imatinib with Abl
Kd_Gef = 2200e-9 # M # Fluorescent
Kd_Ima = 1.1e-9 # M # Non-Fluorescent (Competitive)
for i,conc in enumerate(concentration_range):
[P_gef_ima, L_gef_ima, A_gef_ima, PL_gef_ima, Kd_gef_ima] = three_component_competitive_binding(Ptot, Ltot, Kd_Gef, conc, Kd_Ima)
if i == 0:
competition_grid_gefitinib = PL_gef_ima
else:
competition_grid_gefitinib= np.vstack((competition_grid_gefitinib,PL_gef_ima))
from matplotlib.pyplot import cm
color=cm.PuBu_r(np.linspace(0,0.7,len(competition_grid_gefitinib)))
plt.figure(figsize=(10,4))
for i,PL in enumerate(competition_grid_gefitinib):
plt.semilogx(Ltot,PL,color = color[i],label='%s M'%concentration_range[i])
plt.text(Ltot[0],competition_grid_gefitinib[3][0],'%s uM'%(concentration_range[3]*1e6))
plt.text(Ltot[0],competition_grid_gefitinib[4][0],'%s uM'%(concentration_range[4]*1e6))
plt.text(Ltot[0],competition_grid_gefitinib[5][0],'%s uM'%(concentration_range[5]*1e6))
plt.xlabel('fluorescent ligand concentration (M)')
plt.ylabel('complex concentration (M)');
plt.legend(loc=0);
#Changing sig figs for Ltot
Ltot_visual = ['%.2g' % a for a in Ltot]
Ltot_visual
plt.figure(figsize=(12, 6))
plt.pcolor(competition_grid_gefitinib,
norm=LogNorm(vmin=competition_grid_gefitinib.min(),
vmax=competition_grid_gefitinib.max()),
edgecolors='w',
linewidths=2,
cmap='PuBu_r')
plt.ticklabel_format(style='plain')
plt.xlabel('fluorescent ligand concentration (M)')
plt.ylabel('non-fluorescent ligand concentration (M)')
plt.xticks(np.arange(0.5, 12.5),Ltot_visual,rotation='vertical');
plt.yticks(np.arange(0.5, 12.5),concentration_range);
plt.ylim((0, len(concentration_range)))
plt.title('Competition Grid for Gefitinib X Imatinib in 0.5 uM Abl')
plt.colorbar(label='complex concentration (M)');
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Original Image (converted to grayscale)
Step2: Output with Python's native Median Filter function
Step3: As shown from the above print, AMF results in almost twice as higher deviation than the native median filter technique.
|
<ASSISTANT_TASK:>
Python Code:
Image.fromarray(output)
Image.fromarray(grayscale_image)
native_output = image_org.filter(ImageFilter.MedianFilter(size = 3))
native_output
deviation_native = np.sqrt(np.sum(np.square(grayscale_image-np.array(rgb2gray(np.array(native_output))))))
deviation_original = np.sum(np.square(grayscale_image-np.array(output)))
print("Deviation from the original salt and pepper images:")
print("Deviation via Median Filter (built-in): ", deviation_native)
print("Deviation via Adaptive Median Filter: ", deviation_original)
print(f"Percent difference b/w deviations: {100*(deviation_original - deviation_native)/deviation_original}%")
### Thereofore, the built-in technique is nowhere as good as compared to the Adaptive Median Filter technique.
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: I'll start with the data from the BRFSS again.
Step2: Here are the mean and standard deviation of female height in cm.
Step3: NormalPdf returns a Pdf object that represents the normal distribution with the given parameters.
Step4: thinkplot provides Pdf, which plots the probability density with a smooth curve.
Step5: Pdf provides MakePmf, which returns a Pmf object that approximates the Pdf.
Step6: If you have a Pmf, you can also plot it using Pdf, if you have reason to think it should be represented as a smooth curve.
Step7: Using a sample from the actual distribution, we can estimate the PDF using Kernel Density Estimation (KDE).
Step8: Moments
Step9: The first raw moment is the mean. The other raw moments don't mean much.
Step10: The central moments are powers of distances from the mean.
Step11: The first central moment is approximately 0. The second central moment is the variance.
Step12: The standardized moments are ratios of central moments, with powers chosen to make the dimensions cancel.
Step13: The third standardized moment is skewness.
Step14: Normally a negative skewness indicates that the distribution has a longer tail on the left. In that case, the mean is usually less than the median.
Step15: But in this case the mean is greater than the median, which indicates skew to the right.
Step16: Because the skewness is based on the third moment, it is not robust; that is, it depends strongly on a few outliers. Pearson's median skewness is more robust.
Step17: Pearson's skewness is positive, indicating that the distribution of female heights is slightly skewed to the right.
Step18: Birth weights
Step19: Based on KDE, it looks like the distribution is skewed to the left.
Step20: The mean is less than the median, which is consistent with left skew.
Step21: And both ways of computing skew are negative, which is consistent with left skew.
Step22: Adult weights
Step23: The mean is greater than the median, which is consistent with skew to the right.
Step24: And both ways of computing skewness are positive.
Step26: Exercises
Step27: Compute the median, mean, skewness and Pearson’s skewness of the resulting sample. What fraction of households report a taxable income below the mean? How do the results depend on the assumed upper bound?
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import print_function, division
%matplotlib inline
import numpy as np
import brfss
import thinkstats2
import thinkplot
df = brfss.ReadBrfss(nrows=None)
female = df[df.sex==2]
female_heights = female.htm3.dropna()
mean, std = female_heights.mean(), female_heights.std()
mean, std
pdf = thinkstats2.NormalPdf(mean, std)
pdf.Density(mean + std)
thinkplot.Pdf(pdf, label='normal')
thinkplot.Config(xlabel='x', ylabel='PDF', xlim=[140, 186])
pmf = pdf.MakePmf()
thinkplot.Pmf(pmf, label='normal')
thinkplot.Config(xlabel='x', ylabel='PDF', xlim=[140, 186])
thinkplot.Pdf(pmf, label='normal')
thinkplot.Config(xlabel='x', ylabel='PDF', xlim=[140, 186])
thinkplot.Pdf(pdf, label='normal')
sample = np.random.normal(mean, std, 500)
sample_pdf = thinkstats2.EstimatedPdf(sample, label='sample')
thinkplot.Pdf(sample_pdf, label='sample KDE')
thinkplot.Config(xlabel='x', ylabel='PDF', xlim=[140, 186])
def RawMoment(xs, k):
return sum(x**k for x in xs) / len(xs)
RawMoment(female_heights, 1), RawMoment(female_heights, 2), RawMoment(female_heights, 3)
def Mean(xs):
return RawMoment(xs, 1)
Mean(female_heights)
def CentralMoment(xs, k):
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
CentralMoment(female_heights, 1), CentralMoment(female_heights, 2), CentralMoment(female_heights, 3)
def Var(xs):
return CentralMoment(xs, 2)
Var(female_heights)
def StandardizedMoment(xs, k):
var = CentralMoment(xs, 2)
std = np.sqrt(var)
return CentralMoment(xs, k) / std**k
StandardizedMoment(female_heights, 1), StandardizedMoment(female_heights, 2), StandardizedMoment(female_heights, 3)
def Skewness(xs):
return StandardizedMoment(xs, 3)
Skewness(female_heights)
def Median(xs):
cdf = thinkstats2.Cdf(xs)
return cdf.Value(0.5)
Mean(female_heights), Median(female_heights)
def PearsonMedianSkewness(xs):
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = np.sqrt(var)
gp = 3 * (mean - median) / std
return gp
PearsonMedianSkewness(female_heights)
import first
live, firsts, others = first.MakeFrames()
birth_weights = live.totalwgt_lb.dropna()
pdf = thinkstats2.EstimatedPdf(birth_weights)
thinkplot.Pdf(pdf, label='birth weight')
thinkplot.Config(xlabel='Birth weight (pounds)', ylabel='PDF')
Mean(birth_weights), Median(birth_weights)
Skewness(birth_weights), PearsonMedianSkewness(birth_weights)
adult_weights = df.wtkg2.dropna()
pdf = thinkstats2.EstimatedPdf(adult_weights)
thinkplot.Pdf(pdf, label='Adult weight')
thinkplot.Config(xlabel='Adult weight (kg)', ylabel='PDF')
Mean(adult_weights), Median(adult_weights)
Skewness(adult_weights), PearsonMedianSkewness(adult_weights)
def InterpolateSample(df, log_upper=6.0):
Makes a sample of log10 household income.
Assumes that log10 income is uniform in each range.
df: DataFrame with columns income and freq
log_upper: log10 of the assumed upper bound for the highest range
returns: NumPy array of log10 household income
# compute the log10 of the upper bound for each range
df['log_upper'] = np.log10(df.income)
# get the lower bounds by shifting the upper bound and filling in
# the first element
df['log_lower'] = df.log_upper.shift(1)
df.loc[0, 'log_lower'] = 3.0
# plug in a value for the unknown upper bound of the highest range
df.loc[41, 'log_upper'] = log_upper
# use the freq column to generate the right number of values in
# each range
arrays = []
for _, row in df.iterrows():
vals = np.linspace(row.log_lower, row.log_upper, row.freq)
arrays.append(vals)
# collect the arrays into a single sample
log_sample = np.concatenate(arrays)
return log_sample
import hinc
income_df = hinc.ReadData()
log_sample = InterpolateSample(income_df, log_upper=6.0)
log_cdf = thinkstats2.Cdf(log_sample)
thinkplot.Cdf(log_cdf)
thinkplot.Config(xlabel='Household income (log $)',
ylabel='CDF')
sample = np.power(10, log_sample)
cdf = thinkstats2.Cdf(sample)
thinkplot.Cdf(cdf)
thinkplot.Config(xlabel='Household income ($)',
ylabel='CDF')
# Solution goes here
# Solution goes here
# Solution goes here
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fitting a decaying oscillation
Step2: Now, using curve_fit to fit this model and determine the estimates and uncertainties for the parameters
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
data=np.load("decay_osc.npz")
T=data["tdata"]
Y=data["ydata"]
dy=data["dy"]
f=plt.figure(figsize=(15,10))
plt.errorbar(T,Y,yerr=dy,fmt='o');
assert True # leave this to grade the data import and raw data plot
par_est=[0,0,0,0]
popt, popc = opt.curve_fit(lambda t,A,lam,ome,delt:A*np.exp(-lam*t)*np.cos(ome*t+delt), T, Y, par_est, sigma=dy, absolute_sigma=True)
print(list(zip(popt, np.diag(popc))))
f=plt.figure(figsize=(15,10))
plt.errorbar(T,Y,yerr=dy,fmt='o')
Tx=np.linspace(0,20, 1000)
plt.plot(Tx,popt[0]*np.exp(-popt[1]*Tx)*np.cos(-popt[2]*Tx+popt[3]));
assert True # leave this cell for grading the fit; should include a plot and printout of the parameters+errors
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input Parameter
Step2: Preparation
Step3: Create space and time vector
Step4: Source signal - Ricker-wavelet
Step5: Time stepping
Step6: Save seismograms
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import time as tm
import matplotlib.pyplot as plt
# Discretization
c1=20 # Number of grid points per dominant wavelength
c2=0.5 # CFL-Number
nx=2000 # Number of grid points
T=10 # Total propagation time
# Source Signal
f0= 10 # Center frequency Ricker-wavelet
q0= 1 # Maximum amplitude Ricker-Wavelet
xscr = 100 # Source position (in grid points)
# Receiver
xrec1=400 # Position Reciever 1 (in grid points)
xrec2=800 # Position Reciever 2 (in grid points)
xrec3=1800 # Position Reciever 3 (in grid points)
# Velocity and density
modell_v = np.hstack((1000*np.ones((int(nx/2))),1500*np.ones((int(nx/2)))))
rho=np.hstack((1*np.ones((int(nx/2))),1.5*np.ones((int(nx/2)))))
# Init wavefields
vx=np.zeros(nx)
p=np.zeros(nx)
vx=np.zeros(nx)
p=np.zeros(nx)
vx_x=np.zeros(nx)
p_x=np.zeros(nx)
vx_x2=np.zeros(nx)
p_x2=np.zeros(nx)
vx_x3=np.zeros(nx)
p_x3=np.zeros(nx)
vx_x4=np.zeros(nx)
p_x4=np.zeros(nx)
# Calculate first Lame-Paramter
l=rho * modell_v * modell_v
cmin=min(modell_v.flatten()) # Lowest P-wave velocity
cmax=max(modell_v.flatten()) # Highest P-wave velocity
fmax=2*f0 # Maximum frequency
dx=cmin/(fmax*c1) # Spatial discretization (in m)
dt=dx/(cmax)*c2 # Temporal discretization (in s)
lampda_min=cmin/fmax # Smallest wavelength
# Output model parameter:
print("Model size: x:",dx*nx,"in m")
print("Temporal discretization: ",dt," s")
print("Spatial discretization: ",dx," m")
print("Number of gridpoints per minimum wavelength: ",lampda_min/dx)
x=np.arange(0,dx*nx,dx) # Space vector
t=np.arange(0,T,dt) # Time vector
nt=np.size(t) # Number of time steps
# Plotting model
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.subplots_adjust(wspace=0.4,right=1.6)
ax1.plot(x,modell_v)
ax1.set_ylabel('VP in m/s')
ax1.set_xlabel('Depth in m')
ax1.set_title('P-wave velocity')
ax2.plot(x,rho)
ax2.set_ylabel('Density in g/cm^3')
ax2.set_xlabel('Depth in m')
ax2.set_title('Density');
tau=np.pi*f0*(t-1.5/f0)
q=q0*(1.0-2.0*tau**2.0)*np.exp(-tau**2)
# Plotting source signal
plt.figure(3)
plt.plot(t,q)
plt.title('Source signal Ricker-Wavelet')
plt.ylabel('Amplitude')
plt.xlabel('Time in s')
plt.draw()
# Init Seismograms
Seismogramm=np.zeros((3,nt)); # Three seismograms
# Calculation of some coefficients
i_dx=1.0/(dx)
print("Starting time stepping...")
## Time stepping
for n in range(2,nt):
# Inject source wavelet
p[xscr]=p[xscr]+q[n]
# Update velocity
for kx in range(5,nx-4):
# Calculating spatial derivative
p_x[kx]=i_dx*9.0/8.0*(p[kx+1]-p[kx])-i_dx*1.0/24.0*(p[kx+2]-p[kx-1])
# Update velocity
vx[kx]=vx[kx]-dt/rho[kx]*(13.0/12.0*p_x[kx]-5.0/24.0*p_x2[kx]+1.0/6.0*p_x3[kx]-1.0/24.0*p_x4[kx])
# np.np.zeros old spatial derivations for Adam-Bashforth method
np.copyto(p_x4,p_x3)
np.copyto(p_x3,p_x2)
np.copyto(p_x2,p_x)
# Update pressure
for kx in range(5,nx-4):
# Calculating spatial derivative
vx_x[kx]= i_dx*9.0/8.0*(vx[kx]-vx[kx-1])-i_dx*1.0/24.0*(vx[kx+1]-vx[kx-2])
# Update pressure
p[kx]=p[kx]-l[kx]*dt*(13.0/12.0*vx_x[kx]-5.0/24.0*vx_x2[kx]+1.0/6.0*vx_x3[kx]-1.0/24.0*vx_x4[kx])
# np.np.zeros old spatial derivations for Adam-Bashforth method
np.copyto(vx_x4,vx_x3)
np.copyto(vx_x3,vx_x2)
np.copyto(vx_x2,vx_x)
# Save seismograms
Seismogramm[0,n]=p[xrec1]
Seismogramm[1,n]=p[xrec2]
Seismogramm[2,n]=p[xrec3]
print("Finished time stepping!")
## Save seismograms
np.save("Seismograms/FD_1D_DX4_DT4_ABS",Seismogramm)
## Plot seismograms
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.4,right=1.6, top = 2 )
ax1.plot(t,Seismogramm[0,:])
ax1.set_title('Seismogram 1')
ax1.set_ylabel('Amplitude')
ax1.set_xlabel('Time in s')
ax1.set_xlim(0, T)
ax2.plot(t,Seismogramm[1,:])
ax2.set_title('Seismogram 2')
ax2.set_ylabel('Amplitude')
ax2.set_xlabel('Time in s')
ax2.set_xlim(0, T)
ax3.plot(t,Seismogramm[2,:])
ax3.set_title('Seismogram 3')
ax3.set_ylabel('Amplitude')
ax3.set_xlabel('Time in s')
ax3.set_xlim(0, T);
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Grid search in Scikit-learn
Step2: To keep things simple, we'll first convert the label column to numeric and then
Step3: Next, we'll build our Scikit-learn model and define the hyperparameters we want to optimize using grid serach.
Step4: Let's see which hyperparameters resulted in the best accuracy.
Step5: Finally, we can generate some test predictions on our model and evaluate its accuracy.
Step6: Hyperparameter tuning with keras-tuner
Step7: Hyperparameter tuning on Cloud AI Platform
Step8: In the cells below, replcae your-project-id with the ID of your Cloud project, and your-gcs-bucket with the name of your Cloud Storage bucket.
Step9: To run this on AI Platform, we'll need to package up our model code in Python's package format, which includes an empty __init__.py file and a setup.py to install dependencies (in this case PyTorch, Scikit-learn, and Pandas).
Step11: Below, we're copying our model training code to a model.py file in our trainer package directory. This code runs training and after training completes, reports the model's final loss to Cloud HyperTune.
Step12: The command below will submit your training job to AI Platform. To view the logs, and the results of each HyperTune trial visit your Cloud console.
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import time
from tensorflow import keras
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, f1_score
# First, download the data
# We've made it publicly available in Google Cloud Storage
!gsutil cp gs://ml-design-patterns/mushrooms.csv .
mushroom_data = pd.read_csv('mushrooms.csv')
mushroom_data.head()
# 1 = edible, 0 = poisonous
mushroom_data.loc[mushroom_data['class'] == 'p', 'class'] = 0
mushroom_data.loc[mushroom_data['class'] == 'e', 'class'] = 1
labels = mushroom_data.pop('class')
dummy_data = pd.get_dummies(mushroom_data)
# Split the data
train_size = int(len(mushroom_data) * .8)
train_data = dummy_data[:train_size]
test_data = dummy_data[train_size:]
train_labels = labels[:train_size].astype(int)
test_labels = labels[train_size:].astype(int)
model = RandomForestClassifier()
grid_vals = {
'max_depth': [5, 10, 100],
'n_estimators': [100, 150, 200]
}
grid_search = GridSearchCV(model, param_grid=grid_vals, scoring='accuracy')
# Train the model while running hyperparameter trials
grid_search.fit(train_data.values, train_labels.values)
grid_search.best_params_
grid_predict = grid_search.predict(test_data.values)
grid_acc = accuracy_score(test_labels.values, grid_predict)
grid_f = f1_score(test_labels.values, grid_predict)
print('Accuracy: ', grid_acc)
print('F1-Score: ', grid_f)
!pip install keras-tuner --quiet
import kerastuner as kt
# Get the mnist data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
def build_model(hp):
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(hp.Int('first_hidden', 128, 256, step=32), activation='relu'),
keras.layers.Dense(hp.Int('second_hidden', 16, 128, step=32), activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.Adam(
hp.Float('learning_rate', .005, .01, sampling='log')),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
tuner = kt.BayesianOptimization(
build_model,
objective='val_accuracy',
max_trials=30
)
tuner.search(x_train, y_train, validation_split=0.1, epochs=10)
best_hps = tuner.get_best_hyperparameters(num_trials = 1)[0]
from google.colab import auth
auth.authenticate_user()
!gcloud config set project your-project-id
BUCKET_URL = 'gs://your-gcs-bucket'
!mkdir trainer
!touch trainer/__init__.py
%%writefile setup.py
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['torch>=1.5', 'scikit-learn>=0.20', 'pandas>=1.0']
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='My training application package.'
)
%%writefile trainer/model.py
import argparse
import hypertune
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.utils import shuffle
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import normalize
def get_args():
Argument parser.
Returns:
Dictionary of arguments.
parser = argparse.ArgumentParser(description='PyTorch MNIST')
parser.add_argument('--job-dir', # handled automatically by AI Platform
help='GCS location to write checkpoints and export ' \
'models')
parser.add_argument('--lr', # Specified in the config file
type=float,
default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', # Specified in the config file
type=float,
default=0.5,
help='SGD momentum (default: 0.5)')
parser.add_argument('--hidden-layer-size', # Specified in the config file
type=int,
default=8,
help='hidden layer size')
args = parser.parse_args()
return args
def train_model(args):
# Get the data
natality = pd.read_csv('https://storage.googleapis.com/ml-design-patterns/natality.csv')
natality = natality.dropna()
natality = shuffle(natality, random_state = 2)
natality.head()
natality_labels = natality['weight_pounds']
natality = natality.drop(columns=['weight_pounds'])
train_size = int(len(natality) * 0.8)
traindata_natality = natality[:train_size]
trainlabels_natality = natality_labels[:train_size]
testdata_natality = natality[train_size:]
testlabels_natality = natality_labels[train_size:]
# Normalize and convert to PT tensors
normalized_train = normalize(np.array(traindata_natality.values), axis=0)
normalized_test = normalize(np.array(testdata_natality.values), axis=0)
train_x = torch.Tensor(normalized_train)
train_y = torch.Tensor(np.array(trainlabels_natality))
test_x = torch.Tensor(normalized_test)
test_y = torch.Tensor(np.array(testlabels_natality))
# Define our data loaders
train_dataset = torch.utils.data.TensorDataset(train_x, train_y)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)
test_dataset = torch.utils.data.TensorDataset(test_x, test_y)
test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False)
# Define the model, while tuning the size of our hidden layer
model = nn.Sequential(nn.Linear(len(train_x[0]), args.hidden_layer_size),
nn.ReLU(),
nn.Linear(args.hidden_layer_size, 1))
criterion = nn.MSELoss()
# Tune hyperparameters in our optimizer
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
epochs = 10
for e in range(epochs):
for batch_id, (data, label) in enumerate(train_dataloader):
optimizer.zero_grad()
y_pred = model(data)
label = label.view(-1,1)
loss = criterion(y_pred, label)
loss.backward()
optimizer.step()
val_mse = 0
num_batches = 0
# Evaluate accuracy on our test set
with torch.no_grad():
for i, (data, label) in enumerate(test_dataloader):
num_batches += 1
y_pred = model(data)
mse = criterion(y_pred, label.view(-1,1))
val_mse += mse.item()
avg_val_mse = (val_mse / num_batches)
# Report the metric we're optimizing for to AI Platform's HyperTune service
# In this example, we're mimizing loss on our test set
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='val_mse',
metric_value=avg_val_mse,
global_step=epochs
)
def main():
args = get_args()
print('in main', args)
train_model(args)
if __name__ == '__main__':
main()
%%writefile config.yaml
trainingInput:
hyperparameters:
goal: MINIMIZE
maxTrials: 10
maxParallelTrials: 5
hyperparameterMetricTag: val_mse
enableTrialEarlyStopping: TRUE
params:
- parameterName: lr
type: DOUBLE
minValue: 0.0001
maxValue: 0.1
scaleType: UNIT_LINEAR_SCALE
- parameterName: momentum
type: DOUBLE
minValue: 0.0
maxValue: 1.0
scaleType: UNIT_LINEAR_SCALE
- parameterName: hidden-layer-size
type: INTEGER
minValue: 8
maxValue: 32
scaleType: UNIT_LINEAR_SCALE
MAIN_TRAINER_MODULE = "trainer.model"
TRAIN_DIR = os.getcwd() + '/trainer'
JOB_DIR = BUCKET_URL + '/output'
REGION = "us-central1"
# Create a unique job name (run this each time you submit a job)
timestamp = str(datetime.datetime.now().time())
JOB_NAME = 'caip_training_' + str(int(time.time()))
# Configure and submit the training job
!gcloud ai-platform jobs submit training $JOB_NAME \
--scale-tier basic \
--package-path $TRAIN_DIR \
--module-name $MAIN_TRAINER_MODULE \
--job-dir $JOB_DIR \
--region $REGION \
--runtime-version 2.1 \
--python-version 3.7 \
--config config.yaml
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Specify an epoch parser with an epoch filename and the specification filename.
|
<ASSISTANT_TASK:>
Python Code:
%cat epochs_spec.cfg
%cat epochs.cfg
ep = burin.config.EpochParser('epochs.cfg', 'epochs_spec.cfg')
ep.is_valid()
ep.get('cal_version', date='20180101')
ep.get('cal_version', date='20180101.120000')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Damped, driven nonlinear pendulum
Step4: Write a function derivs for usage with scipy.integrate.odeint that computes the derivatives for the damped, driven harmonic oscillator. The solution vector at each time will be $\vec{y}(t) = (\theta(t),\omega(t))$.
Step5: Simple pendulum
Step7: Damped pendulum
Step8: Here is an example of the output of your plot_pendulum function that should show a decaying spiral.
Step9: Use interact to explore the plot_pendulum function with
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.integrate import odeint
from IPython.html.widgets import interact, fixed
g = 9.81 # m/s^2
l = 0.5 # length of pendulum, in meters
tmax = 50. # seconds
t = np.linspace(0, tmax, int(100*tmax))
def derivs(y, t, a, b, omega0):
Compute the derivatives of the damped, driven pendulum.
Parameters
----------
y : ndarray
The solution vector at the current time t[i]: [theta[i],omega[i]].
t : float
The current time t[i].
a, b, omega0: float
The parameters in the differential equation.
Returns
-------
dy : ndarray
The vector of derviatives at t[i]: [dtheta[i],domega[i]].
# YOUR CODE HERE
theta = y[0]
omega = y[1]
d2theta = -(g/l) * np.sin(theta)
d2omega = -(g/l) * np.sin(theta) - a*omega - b*np.sin(omega0*t)
assert np.allclose(derivs(np.array([np.pi,1.0]), 0, 1.0, 1.0, 1.0), [1.,-1.])
def energy(y):
Compute the energy for the state array y.
The state array y can have two forms:
1. It could be an ndim=1 array of np.array([theta,omega]) at a single time.
2. It could be an ndim=2 array where each row is the [theta,omega] at single
time.
Parameters
----------
y : ndarray, list, tuple
A solution vector
Returns
-------
E/m : float (ndim=1) or ndarray (ndim=2)
The energy per mass.
# YOUR CODE HERE
raise NotImplementedError()
assert np.allclose(energy(np.array([np.pi,0])),g)
assert np.allclose(energy(np.ones((10,2))), np.ones(10)*energy(np.array([1,1])))
# YOUR CODE HERE
raise NotImplementedError()
# YOUR CODE HERE
raise NotImplementedError()
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this to grade the two plots and their tuning of atol, rtol.
def plot_pendulum(a=0.0, b=0.0, omega0=0.0):
Integrate the damped, driven pendulum and make a phase plot of the solution.
# YOUR CODE HERE
raise NotImplementedError()
plot_pendulum(0.5, 0.0, 0.0)
# YOUR CODE HERE
raise NotImplementedError()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 例題5-1
Step2: Durbin-Watson
Step3: DW=1.094となり、上限分布において有意水準5%でも帰無仮説を棄却することができ、自己相関が存在すると結論することができる。
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# -*- coding:utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.stats.stattools import durbin_watson
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# データ読み込み
data = pd.read_csv('example/k0501.csv')
data
# 説明変数設定
X = data[['X']]
X = sm.add_constant(X)
X
# 被説明変数設定
Y = data['Y']
Y
# OLSの実行(Ordinary Least Squares: 最小二乗法)
model = sm.OLS(Y,X)
results = model.fit()
print(results.summary())
# グラフ生成
plt.plot(data["X"], data["Y"], 'o', label="data")
plt.plot(data["X"], results.fittedvalues, label="OLS")
plt.xlim(min(data["X"])-1, max(data["X"])+1)
plt.ylim(min(data["Y"])-1, max(data["Y"])+1)
plt.title('5-1: Auto Correlation')
plt.legend(loc=2)
plt.show()
# データ読み込み
data = pd.read_csv('example/k0502.csv')
data
# 説明変数設定
X = data[['X']]
X = sm.add_constant(X)
# 被説明変数設定
Y = data['Y']
# OLSの実行(Ordinary Least Squares: 最小二乗法)
model = sm.OLS(Y,X)
results = model.fit()
print(results.summary())
# グラフ生成
plt.plot(data["X"], data["Y"], 'o', label="data")
plt.plot(data["X"], results.fittedvalues, label="OLS")
plt.xlim(min(data["X"])-1, max(data["X"])+1)
plt.ylim(min(data["Y"])-1, max(data["Y"])+1)
plt.title('5-2: Auto Correlation')
plt.legend(loc=2)
plt.show()
data['dX'] = np.nan
data['dY'] = np.nan
for i in range(len(data)):
if i == 0:
data['dX'][i] = np.nan
data['dY'][i] = np.nan
else:
data['dX'][i] = data['X'][i] - data['X'][i-1]
data['dY'][i] = data['Y'][i] - data['Y'][i-1]
data
# 説明変数設定
X = data['dX'][1:]
X = sm.add_constant(X)
# 被説明変数設定
Y = data['dY'][1:]
# OLSの実行(Ordinary Least Squares: 最小二乗法)
model = sm.OLS(Y,X)
results = model.fit()
print(results.summary())
# グラフ生成
plt.plot(data["dX"], data["dY"], 'o', label="data")
plt.plot(data["dX"][1:], results.fittedvalues, label="OLS")
plt.xlim(min(data["dX"][1:])-0.5, max(data["dX"][1:])+0.5)
plt.ylim(min(data["dY"][1:])-0.5, max(data["dY"][1:])+0.5)
plt.title('5-3: Auto Correlation')
plt.legend(loc=2)
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercise 1
Step2: Now it's your turn. Instead of a horizontal line detector, you will create a vertical line detector.
Step3: If you'd like a hint or the solution, uncomment the appropriate line below.
Step4: Exercise 2
|
<ASSISTANT_TASK:>
Python Code:
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning.exercise_1 import *
print("Setup Complete")
horizontal_line_conv = [[1, 1],
[-1, -1]]
# load_my_image and visualize_conv are utility functions provided for this exercise
original_image = load_my_image()
visualize_conv(original_image, horizontal_line_conv)
vertical_line_conv = ____
# Check your answer
q_1.check()
visualize_conv(original_image, vertical_line_conv)
#_COMMENT_IF(PROD)_
q_1.hint()
#q_1.solution()
#%%RM_IF(PROD)%%
vertical_line_conv = [[1, -1],
[1, -1]]
q_1.assert_check_passed()
vertical_line_conv = [[1, 1],
[1, 1]]
q_1.assert_check_failed()
# Check your answer (Run this code cell to receive credit!)
q_2.solution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
a = pd.DataFrame(np.array([[1, 2],[3, 4]]), columns=['one', 'two'])
b = pd.DataFrame(np.array([[5, 6],[7, 8],[9, 10]]), columns=['one', 'two'])
def g(a,b):
if len(a) < len(b):
a = a.append(pd.DataFrame(np.array([[np.nan, np.nan]*(len(b)-len(a))]), columns=a.columns), ignore_index=True)
elif len(a) > len(b):
b = b.append(pd.DataFrame(np.array([[np.nan, np.nan]*(len(a)-len(b))]), columns=a.columns), ignore_index=True)
return pd.DataFrame(np.rec.fromarrays((a.values, b.values)).tolist(), columns=a.columns, index=a.index)
result = g(a.copy(),b.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We create our feature and targets the same as before using train_test_split. This part looks like what you've already seen.
Step2: Creating and fitting the model is similar to what you've done before, except you'll use RandomForestClassifier instead of RandomForestRegressor.
Step3: The simplest metric for classification models is the accuracy, the fraction predictions that are correct. Scikit-learn provides metrics.accuracy_score to calculate this.
Step4: Confusion Matrix
Step5: It's a little easier to understand as a nice little figure like so
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import sklearn.metrics as metrics
data = pd.read_csv('../input/mobile-price-classification/train.csv')
data.head()
data.columns
# Set variables for the targets and features
y = data['price_range']
X = data.drop('price_range', axis=1)
# Split the data into training and validation sets
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=7)
# Create the classifier and fit it to our training data
model = RandomForestClassifier(random_state=7, n_estimators=100)
model.fit(train_X, train_y)
# Predict classes given the validation features
pred_y = model.predict(val_X)
# Calculate the accuracy as our performance metric
accuracy = metrics.accuracy_score(val_y, pred_y)
print("Accuracy: ", accuracy)
# Calculate the confusion matrix itself
confusion = metrics.confusion_matrix(val_y, pred_y)
print(f"Confusion matrix:\n{confusion}")
# Normalizing by the true label counts to get rates
print(f"\nNormalized confusion matrix:")
for row in confusion:
print(row / row.sum())
probs = model.predict_proba(val_X)
print(probs)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Генерация данных
Step2: Модель DecisionTreeClassifier
Step3: Разделяющая поверхность
|
<ASSISTANT_TASK:>
Python Code:
from matplotlib.colors import ListedColormap
from sklearn import cross_validation, datasets, metrics, tree
import numpy as np
%pylab inline
classification_problem = datasets.make_classification(n_features = 2, n_informative = 2,
n_classes = 3, n_redundant=0,
n_clusters_per_class=1, random_state=3)
colors = ListedColormap(['red', 'blue', 'yellow'])
light_colors = ListedColormap(['lightcoral', 'lightblue', 'lightyellow'])
pylab.figure(figsize=(8,6))
pylab.scatter(map(lambda x: x[0], classification_problem[0]), map(lambda x: x[1], classification_problem[0]),
c=classification_problem[1], cmap=colors, s=100)
train_data, test_data, train_labels, test_labels = cross_validation.train_test_split(classification_problem[0],
classification_problem[1],
test_size = 0.3,
random_state = 1)
clf = tree.DecisionTreeClassifier(random_state=1)
clf.fit(train_data, train_labels)
predictions = clf.predict(test_data)
metrics.accuracy_score(test_labels, predictions)
predictions
def get_meshgrid(data, step=.05, border=.5,):
x_min, x_max = data[:, 0].min() - border, data[:, 0].max() + border
y_min, y_max = data[:, 1].min() - border, data[:, 1].max() + border
return np.meshgrid(np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
def plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels,
colors = colors, light_colors = light_colors):
#fit model
estimator.fit(train_data, train_labels)
#set figure size
pyplot.figure(figsize = (16, 6))
#plot decision surface on the train data
pyplot.subplot(1,2,1)
xx, yy = get_meshgrid(train_data)
mesh_predictions = np.array(estimator.predict(np.c_[xx.ravel(), yy.ravel()])).reshape(xx.shape)
pyplot.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
pyplot.scatter(train_data[:, 0], train_data[:, 1], c = train_labels, s = 100, cmap = colors)
pyplot.title('Train data, accuracy={:.2f}'.format(metrics.accuracy_score(train_labels, estimator.predict(train_data))))
#plot decision surface on the test data
pyplot.subplot(1,2,2)
pyplot.pcolormesh(xx, yy, mesh_predictions, cmap = light_colors)
pyplot.scatter(test_data[:, 0], test_data[:, 1], c = test_labels, s = 100, cmap = colors)
pyplot.title('Test data, accuracy={:.2f}'.format(metrics.accuracy_score(test_labels, estimator.predict(test_data))))
estimator = tree.DecisionTreeClassifier(random_state = 1, max_depth = 1)
plot_decision_surface(estimator, train_data, train_labels, test_data, test_labels)
plot_decision_surface(tree.DecisionTreeClassifier(random_state = 1, max_depth = 2),
train_data, train_labels, test_data, test_labels)
plot_decision_surface(tree.DecisionTreeClassifier(random_state = 1, max_depth = 3),
train_data, train_labels, test_data, test_labels)
plot_decision_surface(tree.DecisionTreeClassifier(random_state = 1),
train_data, train_labels, test_data, test_labels)
plot_decision_surface(tree.DecisionTreeClassifier(random_state = 1, min_samples_leaf = 3),
train_data, train_labels, test_data, test_labels)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The required files are in '../ecoli_raw/'
Step2: Checking that CLdb is installed in PATH
Step3: Setting up the CLdb directory
Step4: Downloading the genome genbank files. Using the 'GIs.txt' file
Step5: Creating/loading CLdb of E. coli CRISPR data
Step6: Making CLdb sqlite file
Step7: Setting up CLdb config
Step8: Loading loci
Step9: Notes on the loci table
Step10: Notes on loading
Step11: The summary doesn't show anything for spacers, DRs, genes or leaders!
Step12: Note
Step13: Note
Step14: Setting array sense strand
Step15: Spacer and DR clustering
Step16: Database summary
|
<ASSISTANT_TASK:>
Python Code:
# path to raw files
## CHANGE THIS!
rawFileDir = "~/perl/projects/CLdb/data/Methanosarcina/"
# directory where the CLdb database will be created
## CHANGE THIS!
workDir = "~/t/CLdb_Methanosarcina/"
# viewing file links
import os
import zipfile
import csv
from IPython.display import FileLinks
# pretty viewing of tables
## get from: http://epmoyer.github.io/ipy_table/
from ipy_table import *
rawFileDir = os.path.expanduser(rawFileDir)
workDir = os.path.expanduser(workDir)
FileLinks(rawFileDir)
!CLdb -h
# this makes the working directory
if not os.path.isdir(workDir):
os.makedirs(workDir)
# unarchiving files in the raw folder over to the newly made working folder
files = ['array.zip','loci.zip', 'accessions.txt.zip']
files = [os.path.join(rawFileDir, x) for x in files]
for f in files:
if not os.path.isfile(f):
raise IOError, 'Cannot find file: {}'.format(f)
else:
zip = zipfile.ZipFile(f)
zip.extractall(path=workDir)
print 'unzipped raw files:'
FileLinks(workDir)
# making genbank directory
genbankDir = os.path.join(workDir, 'genbank')
if not os.path.isdir(genbankDir):
os.makedirs(genbankDir)
# downloading genomes
!cd $genbankDir; \
CLdb -- accession-GI2fastaGenome -format genbank -fork 9 < ../accessions.txt
# checking files
!cd $genbankDir; \
ls -thlc *.gbk
!CLdb -- makeDB -h
!cd $workDir; \
CLdb -- makeDB -r -drop
CLdbFile = os.path.join(workDir, 'CLdb.sqlite')
print 'CLdb file location: {}'.format(CLdbFile)
s = 'DATABASE = ' + CLdbFile
configFile = os.path.join(os.path.expanduser('~'), '.CLdb')
with open(configFile, 'wb') as outFH:
outFH.write(s)
print 'Config file written: {}'.format(configFile)
# checking that the config is set
!CLdb --config-params
lociFile = os.path.join(workDir, 'loci', 'loci.txt')
# reading in file
tbl = []
with open(lociFile, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
tbl.append(row)
# making table
make_table(tbl)
apply_theme('basic')
!CLdb -- loadLoci -h
!CLdb -- loadLoci < $lociFile
# This is just a quick summary of the database
## It should show 10 loci for the 'loci' rows
!CLdb -- summary
# an example array file (obtained from CRISPRFinder)
arrayFile = os.path.join(workDir, 'array', 'Methanosarcina_acetivorans_C2A_1.txt')
!head $arrayFile
# loading CRISPR array info
!CLdb -- loadArrays
# This is just a quick summary of the database
!CLdb -- summary
geneDir = os.path.join(workDir, 'genes')
if not os.path.isdir(geneDir):
os.makedirs(geneDir)
!cd $geneDir; \
CLdb -- getGenesInLoci 2> CAS.log > CAS.txt
# checking output
!cd $geneDir; \
head -n 5 CAS.log; \
echo -----------; \
tail -n 5 CAS.log; \
echo -----------; \
head -n 5 CAS.txt
# loading gene table into the database
!cd $geneDir; \
CLdb -- loadGenes < CAS.txt
!CLdb -- setSenseStrand
!CLdb -- clusterArrayElements -s -r
# summary
!cd $workDir; \
CLdb -- summary -name -subtype > summary.txt
# checking output
!cd $workDir; \
cat summary.txt
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: Imagenet 2012 網頁
|
<ASSISTANT_TASK:>
Python Code:
from keras.applications import imagenet_utils
imagenet_utils.CLASS_INDEX_PATH
from urllib.request import urlopen
import json
with urlopen(imagenet_utils.CLASS_INDEX_PATH) as jsonf:
data = jsonf.read()
class_dict = json.loads(data.decode())
[class_dict[str(i)][1] for i in range(1000)]
# 下載 圖片
import os
import urllib
from urllib.request import urlretrieve
dataset = 'ILSVRC2012_val_1000.tar'
def reporthook(a,b,c):
print("\rdownloading: %5.1f%%"%(a*b*100.0/c), end="")
if not os.path.isfile(dataset):
origin = "https://www.dropbox.com/s/vippynksgd8c6qt/ILSVRC2012_val_1000.tar?dl=1"
print('Downloading data from %s' % origin)
urlretrieve(origin, dataset, reporthook=reporthook)
# 解開圖片
from tarfile import TarFile
tar = TarFile(dataset)
tar.extractall()
# 讀取圖片
from PIL import Image as pimage
from glob import glob
imgs = []
files = list(glob('ILSVRC2012_img_val/ILSVRC2012_val_*.JPEG'))
for fn in files:
img = pimage.open(fn)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img.resize((224,224)))
imgs.append(img)
imgs = np.array(imgs)
# 準備資料,轉成通用的格式(扣掉顏色的中間值)
p_imgs = imagenet_utils.preprocess_input(np.float32(imgs))
del imgs
# 實際
predictions = pretrained.predict(p_imgs)
# 對應編碼
results = imagenet_utils.decode_predictions(predictions)
from IPython.display import Image, HTML, display
for fn, res in zip(files[:100], results[:100]):
res_text = "".join("<li>{:05.2f}% : {}</li>".format(x[2]*100, x[1]) for x in res)
display(HTML(
<table><tr>
<td><img width=200 src="{}" /></td>
<td><ul>{}</ul></td>
</tr>
</table>
.format(fn, res_text)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Okay, using in operator gives us a great deal of simplicity, but we should know the behind the scenes of in operator.
Step2: Finding a specific item from unsorted list resulted worst case time of O(n), since it has to go all the way to the end n iteration.
Step3: Finding the Smallest Value
Step4: We can do the same thing for finding the maximum number, Python's max() implementation
Step5: The Binary Search
Step6: Sorting
Step7: Selection Sort
Step8: Insertion Sort
Step9: Working ith Sorted Lists
|
<ASSISTANT_TASK:>
Python Code:
theArray = range(0,100)
key = 101
if key in theArray:
print("The key is in the array.")
else:
print("The key is not in the array.")
def linearSearch(theValues, target):
n = len(theValues)
for i in range(n):
# If the target is in the ith element, return True
if theValues[i] == target:
return True
# If not found, return False.
return False
def sortedLinearSearch(theValues, item):
n = len(theValues)
for i in range(n):
# If the target is found in the ith element, return True
if theValues[i] == item:
return True
# If target is largers than the ith element, it's not in the sequence.
elif theValues[i] > item:
return False
# The item is not in the sequence.
return False
def findSmallest(theValues):
n = len(theValues)
# Assume the first item is the smallest value
smallest = theValues[0]
# Determine if any other item in the sequence is smaller.
for i in range(1,n):
if theValues[i] < smallest:
smallest = theValues[i]
# Return the smallest found.
return smallest
def findBiggest(theValues):
n = len(theValues)
# Assuming the first item is the biggest value
biggest = theValues[0]
# Determine if any other item in the sequence is bigger.
for i in range(1, n):
if theValues[i] > biggest:
biggest = theValues[i]
#Return the biggest found.
return biggest
def binarySearch(theValues, target):
# Start with the entire sequence of elements. 0:length
low = 0
high = len(theValues - 1)
# Repeatedly subdivide the sequence in half until the target is found.
while low <= high:
# Find the midpoint of the sequence.
mid = (high + low) // 2
# Does the midpoint contain the target?
if theValues[mid] == target:
return True
# Or does the target precede the midpoint?
elif target < theValues[mid]:
high = mid - 1 # Update the upper bound
# Or does it follow the midpoint
else:
low = mid + 1 # Update the lower bound
# If the sequence cannot be subdivided further, we're done.
return False
# Sorts a sequence in ascending order using the bubble sort algorith.
def bubbleSort(seq):
not_sorted = True
n = len(seq)
print "At the beginning: "
print seq
while not_sorted:
# If following statements fail next statement will stop the loop
not_sorted = False
for i in range(n-1):
if seq[i] <= seq[i+1]:
continue;
else:
temp = seq[i]
seq[i] = seq[i+1]
seq[i+1] = temp
not_sorted = True
print seq
return seq
import random
_list = random.sample(xrange(1, 101), 10)
_list
bubbleSort(_list)
# Sorts a sequence in ascending order using the selection sort algorithm
def selectionSort(theSeq):
n = len(theSeq)
for i in range(n-1):
# Assume the ith element is the smallest.
smallNdx = i
for j in range(i+1, n):
if theSeq[j] < theSeq[smallNdx]:
smallNdx = j
# Swap the ith value and smallNdx value only if the smallest value is
# not really in its proper position. Some implementations omit testing
# the condition and always swap the two values.
if smallNdx != i:
tmp = theSeq[i]
theSeq[i] = theSeq[smallNdx]
theSeq[smallNdx] = tmp
return theSeq
import random
_list = random.sample(xrange(1, 101), 10)
print _list
selectionSort(_list)
# Sorts a sequence in ascending order using the insertion sort algorithm.
def insertionSort(theSeq):
n = len(theSeq)
# Starts with the first item as the only sorted entry.
for i in range(1, n):
# Save the value to be positioned.
value = theSeq[i]
# Find the position where value fits in the ordered part of the list.
pos = i
while pos > 0 and value < theSeq[pos - 1]:
# Shift the items to the rigth during search
theSeq[pos] = theSeq[pos - 1]
pos -= 1
theSeq[pos] = value
return theSeq
import random
_list = random.sample(xrange(1, 101), 10)
_list
insertionSort(_list)
# Modified version of the binary search that returns the index within
# a sorted sequence indicating where the target should be located.
def findSortedPosition(theList, target):
low = 0
high = len(theList) - 1
while low <= high:
mid = (high + low) // 2
if theList[mid] == target:
# Index of the target
return mid
elif target < theList[mid]:
high = mid - 1
else:
low = mid + 1
# Index where the target value should be.
return low
_list = range(1,24,2)
print(_list)
print("Index is ", findSortedPosition(_list, 12))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set parameters
Step2: Find stimulus event followed by quick button presses
Step3: View evoked response
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD-3-Clause
import mne
from mne import io
from mne.event import define_target_events
from mne.datasets import sample
import matplotlib.pyplot as plt
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + STI 014 - bad channels (modify to your needs)
include = [] # or stim channels ['STI 014']
raw.info['bads'] += ['EEG 053'] # bads
# pick MEG channels
picks = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=True,
include=include, exclude='bads')
reference_id = 5 # presentation of a smiley face
target_id = 32 # button press
sfreq = raw.info['sfreq'] # sampling rate
tmin = 0.1 # trials leading to very early responses will be rejected
tmax = 0.59 # ignore face stimuli followed by button press later than 590 ms
new_id = 42 # the new event id for a hit. If None, reference_id is used.
fill_na = 99 # the fill value for misses
events_, lag = define_target_events(events, reference_id, target_id,
sfreq, tmin, tmax, new_id, fill_na)
print(events_) # The 99 indicates missing or too late button presses
# besides the events also the lag between target and reference is returned
# this could e.g. be used as parametric regressor in subsequent analyses.
print(lag[lag != fill_na]) # lag in milliseconds
# #############################################################################
# Construct epochs
tmin_ = -0.2
tmax_ = 0.4
event_id = dict(early=new_id, late=fill_na)
epochs = mne.Epochs(raw, events_, event_id, tmin_,
tmax_, picks=picks, baseline=(None, 0),
reject=dict(mag=4e-12))
# average epochs and get an Evoked dataset.
early, late = [epochs[k].average() for k in event_id]
times = 1e3 * epochs.times # time in milliseconds
title = 'Evoked response followed by %s button press'
fig, axes = plt.subplots(2, 1)
early.plot(axes=axes[0], time_unit='s')
axes[0].set(title=title % 'late', ylabel='Evoked field (fT)')
late.plot(axes=axes[1], time_unit='s')
axes[1].set(title=title % 'early', ylabel='Evoked field (fT)')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 2. Key Properties --> Resolution
Step9: 2.2. Canonical Horizontal Resolution
Step10: 2.3. Range Horizontal Resolution
Step11: 2.4. Number Of Vertical Levels
Step12: 2.5. High Top
Step13: 3. Key Properties --> Timestepping
Step14: 3.2. Timestep Shortwave Radiative Transfer
Step15: 3.3. Timestep Longwave Radiative Transfer
Step16: 4. Key Properties --> Orography
Step17: 4.2. Changes
Step18: 5. Grid --> Discretisation
Step19: 6. Grid --> Discretisation --> Horizontal
Step20: 6.2. Scheme Method
Step21: 6.3. Scheme Order
Step22: 6.4. Horizontal Pole
Step23: 6.5. Grid Type
Step24: 7. Grid --> Discretisation --> Vertical
Step25: 8. Dynamical Core
Step26: 8.2. Name
Step27: 8.3. Timestepping Type
Step28: 8.4. Prognostic Variables
Step29: 9. Dynamical Core --> Top Boundary
Step30: 9.2. Top Heat
Step31: 9.3. Top Wind
Step32: 10. Dynamical Core --> Lateral Boundary
Step33: 11. Dynamical Core --> Diffusion Horizontal
Step34: 11.2. Scheme Method
Step35: 12. Dynamical Core --> Advection Tracers
Step36: 12.2. Scheme Characteristics
Step37: 12.3. Conserved Quantities
Step38: 12.4. Conservation Method
Step39: 13. Dynamical Core --> Advection Momentum
Step40: 13.2. Scheme Characteristics
Step41: 13.3. Scheme Staggering Type
Step42: 13.4. Conserved Quantities
Step43: 13.5. Conservation Method
Step44: 14. Radiation
Step45: 15. Radiation --> Shortwave Radiation
Step46: 15.2. Name
Step47: 15.3. Spectral Integration
Step48: 15.4. Transport Calculation
Step49: 15.5. Spectral Intervals
Step50: 16. Radiation --> Shortwave GHG
Step51: 16.2. ODS
Step52: 16.3. Other Flourinated Gases
Step53: 17. Radiation --> Shortwave Cloud Ice
Step54: 17.2. Physical Representation
Step55: 17.3. Optical Methods
Step56: 18. Radiation --> Shortwave Cloud Liquid
Step57: 18.2. Physical Representation
Step58: 18.3. Optical Methods
Step59: 19. Radiation --> Shortwave Cloud Inhomogeneity
Step60: 20. Radiation --> Shortwave Aerosols
Step61: 20.2. Physical Representation
Step62: 20.3. Optical Methods
Step63: 21. Radiation --> Shortwave Gases
Step64: 22. Radiation --> Longwave Radiation
Step65: 22.2. Name
Step66: 22.3. Spectral Integration
Step67: 22.4. Transport Calculation
Step68: 22.5. Spectral Intervals
Step69: 23. Radiation --> Longwave GHG
Step70: 23.2. ODS
Step71: 23.3. Other Flourinated Gases
Step72: 24. Radiation --> Longwave Cloud Ice
Step73: 24.2. Physical Reprenstation
Step74: 24.3. Optical Methods
Step75: 25. Radiation --> Longwave Cloud Liquid
Step76: 25.2. Physical Representation
Step77: 25.3. Optical Methods
Step78: 26. Radiation --> Longwave Cloud Inhomogeneity
Step79: 27. Radiation --> Longwave Aerosols
Step80: 27.2. Physical Representation
Step81: 27.3. Optical Methods
Step82: 28. Radiation --> Longwave Gases
Step83: 29. Turbulence Convection
Step84: 30. Turbulence Convection --> Boundary Layer Turbulence
Step85: 30.2. Scheme Type
Step86: 30.3. Closure Order
Step87: 30.4. Counter Gradient
Step88: 31. Turbulence Convection --> Deep Convection
Step89: 31.2. Scheme Type
Step90: 31.3. Scheme Method
Step91: 31.4. Processes
Step92: 31.5. Microphysics
Step93: 32. Turbulence Convection --> Shallow Convection
Step94: 32.2. Scheme Type
Step95: 32.3. Scheme Method
Step96: 32.4. Processes
Step97: 32.5. Microphysics
Step98: 33. Microphysics Precipitation
Step99: 34. Microphysics Precipitation --> Large Scale Precipitation
Step100: 34.2. Hydrometeors
Step101: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Step102: 35.2. Processes
Step103: 36. Cloud Scheme
Step104: 36.2. Name
Step105: 36.3. Atmos Coupling
Step106: 36.4. Uses Separate Treatment
Step107: 36.5. Processes
Step108: 36.6. Prognostic Scheme
Step109: 36.7. Diagnostic Scheme
Step110: 36.8. Prognostic Variables
Step111: 37. Cloud Scheme --> Optical Cloud Properties
Step112: 37.2. Cloud Inhomogeneity
Step113: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Step114: 38.2. Function Name
Step115: 38.3. Function Order
Step116: 38.4. Convection Coupling
Step117: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Step118: 39.2. Function Name
Step119: 39.3. Function Order
Step120: 39.4. Convection Coupling
Step121: 40. Observation Simulation
Step122: 41. Observation Simulation --> Isscp Attributes
Step123: 41.2. Top Height Direction
Step124: 42. Observation Simulation --> Cosp Attributes
Step125: 42.2. Number Of Grid Points
Step126: 42.3. Number Of Sub Columns
Step127: 42.4. Number Of Levels
Step128: 43. Observation Simulation --> Radar Inputs
Step129: 43.2. Type
Step130: 43.3. Gas Absorption
Step131: 43.4. Effective Radius
Step132: 44. Observation Simulation --> Lidar Inputs
Step133: 44.2. Overlap
Step134: 45. Gravity Waves
Step135: 45.2. Sponge Layer
Step136: 45.3. Background
Step137: 45.4. Subgrid Scale Orography
Step138: 46. Gravity Waves --> Orographic Gravity Waves
Step139: 46.2. Source Mechanisms
Step140: 46.3. Calculation Method
Step141: 46.4. Propagation Scheme
Step142: 46.5. Dissipation Scheme
Step143: 47. Gravity Waves --> Non Orographic Gravity Waves
Step144: 47.2. Source Mechanisms
Step145: 47.3. Calculation Method
Step146: 47.4. Propagation Scheme
Step147: 47.5. Dissipation Scheme
Step148: 48. Solar
Step149: 49. Solar --> Solar Pathways
Step150: 50. Solar --> Solar Constant
Step151: 50.2. Fixed Value
Step152: 50.3. Transient Characteristics
Step153: 51. Solar --> Orbital Parameters
Step154: 51.2. Fixed Reference Date
Step155: 51.3. Transient Method
Step156: 51.4. Computation Method
Step157: 52. Solar --> Insolation Ozone
Step158: 53. Volcanos
Step159: 54. Volcanos --> Volcanoes Treatment
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-3', 'atmos')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Configure the Docker Registry for Kubeflow Fairing
Step2: Create PV/PVC to Store the Exported Model
Step3: (Optional) Skip below creating PV/PVC step if you set an existing PV and PVC.
Step4: Use Kubeflow fairing to build the docker image and launch a TFJob for training
Step5: Use Kubeflow Fairing to build a docker image and push to docker registry, and then launch a TFJob in the on-prem cluster for distributed training model.
Step6: Get the Created TFJobs
Step7: Wait For the Training Job to finish
Step8: Check if the TFJob succeeded.
Step9: Get the Training Logs
Step10: Deploy Service using KFServing
Step11: Get the InferenceService
Step12: Get the InferenceService and Service Endpoint
Step13: Run a prediction to the InferenceService
Step14: Clean Up
Step15: Delete the InferenceService.
|
<ASSISTANT_TASK:>
Python Code:
!pip show kubeflow-fairing
# Set docker registry to store image.
# Ensure you have permission for pushing docker image requests.
DOCKER_REGISTRY = 'index.docker.io/jinchi'
# Set namespace. Note that the created PVC should be in the namespace.
my_namespace = 'hejinchi'
# You also can get the default target namepspace using below API.
#namespace = fairing_utils.get_default_target_namespace()
# To satify the distributed training, the PVC should be access from all nodes in the cluster.
# The example creates a NFS PV to satify that.
nfs_server = '172.16.189.69'
nfs_path = '/opt/kubeflow/data/mnist'
pv_name = 'kubeflow-mnist'
pvc_name = 'mnist-pvc'
from kubernetes import client as k8s_client
from kubernetes import config as k8s_config
from kubeflow.fairing.utils import is_running_in_k8s
pv_yaml = f'''
apiVersion: v1
kind: PersistentVolume
metadata:
name: {pv_name}
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
nfs:
path: {nfs_path}
server: {nfs_server}
'''
pvc_yaml = f'''
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {pvc_name}
namespace: {my_namespace}
spec:
accessModes:
- ReadWriteMany
storageClassName: ""
resources:
requests:
storage: 10Gi
'''
if is_running_in_k8s():
k8s_config.load_incluster_config()
else:
k8s_config.load_kube_config()
k8s_core_api = k8s_client.CoreV1Api()
k8s_core_api.create_persistent_volume(yaml.safe_load(pv_yaml))
k8s_core_api.create_namespaced_persistent_volume_claim(my_namespace, yaml.safe_load(pvc_yaml))
num_chief = 1 #number of Chief in TFJob
num_ps = 1 #number of PS in TFJob
num_workers = 2 #number of Worker in TFJob
model_dir = "/mnt"
export_path = "/mnt/export"
train_steps = "1000"
batch_size = "100"
learning_rate = "0.01"
import uuid
from kubeflow import fairing
from kubeflow.fairing.kubernetes.utils import mounting_pvc
tfjob_name = f'mnist-training-{uuid.uuid4().hex[:4]}'
output_map = {
"Dockerfile": "Dockerfile",
"mnist.py": "mnist.py"
}
command=["python",
"/opt/mnist.py",
"--tf-model-dir=" + model_dir,
"--tf-export-dir=" + export_path,
"--tf-train-steps=" + train_steps,
"--tf-batch-size=" + batch_size,
"--tf-learning-rate=" + learning_rate]
fairing.config.set_preprocessor('python', command=command, path_prefix="/app", output_map=output_map)
fairing.config.set_builder(name='docker', registry=DOCKER_REGISTRY, base_image="",
image_name="mnist", dockerfile_path="Dockerfile")
fairing.config.set_deployer(name='tfjob', namespace=my_namespace, stream_log=False, job_name=tfjob_name,
chief_count=num_chief, worker_count=num_workers, ps_count=num_ps,
pod_spec_mutators=[mounting_pvc(pvc_name=pvc_name, pvc_mount_path=model_dir)])
fairing.config.run()
from kubeflow.tfjob import TFJobClient
tfjob_client = TFJobClient()
tfjob_client.get(tfjob_name, namespace=my_namespace)
tfjob_client.wait_for_job(tfjob_name, namespace=my_namespace, watch=True)
tfjob_client.is_job_succeeded(tfjob_name, namespace=my_namespace)
tfjob_client.get_logs(tfjob_name, namespace=my_namespace)
from kubeflow.fairing.deployers.kfserving.kfserving import KFServing
isvc_name = f'mnist-service-{uuid.uuid4().hex[:4]}'
isvc = KFServing('tensorflow', namespace=my_namespace, isvc_name=isvc_name,
default_storage_uri='pvc://' + pvc_name + '/export')
isvc.deploy(isvc.generate_isvc())
from kfserving import KFServingClient
kfserving_client = KFServingClient()
kfserving_client.get(namespace=my_namespace)
mnist_isvc = kfserving_client.get(isvc_name, namespace=my_namespace)
mnist_isvc_name = mnist_isvc['metadata']['name']
mnist_isvc_endpoint = mnist_isvc['status'].get('url', '')
print("MNIST Service Endpoint: " + mnist_isvc_endpoint)
ISTIO_CLUSTER_IP=!kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.clusterIP}'
CLUSTER_IP=ISTIO_CLUSTER_IP[0]
MODEL_HOST=f"Host: {mnist_isvc_name}.{my_namespace}.example.com"
!curl -v -H "{MODEL_HOST}" http://{CLUSTER_IP}/v1/models/{mnist_isvc_name}:predict -d @./input.json
tfjob_client.delete(tfjob_name, namespace=my_namespace)
kfserving_client.delete(isvc_name, namespace=my_namespace)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: One To Many
Step2: To establish a bidirectional relationship in one-to-many, where the “reverse” side is a many to one, specify an additional relationship() and connect the two using the relationship.back_populates parameter
Step3: TIP
Step4: Child will get a parent attribute with many-to-one semantics.
Step5: Many To One
Step6: Alternatively, the backref parameter may be applied to a single relationship(), such as Parent.child
Step7: One To One
Step8: Or for many-to-one
Step9: Many To Many
Step10: For a bidirectional relationship, both sides of the relationship contain a collection. Specify using relationship.back_populates, and for each relationship() specify the common association table
Step11: The secondary argument of relationship() also accepts a callable that returns the ultimate argument, which is evaluated only when mappers are first used. Using this, we can define the association_table at a later point, as long as it’s available to the callable after all module initialization is complete
Step12: With the declarative extension in use, the traditional “string name of the table” is accepted as well, matching the name of the table as stored in Base.metadata.tables
Step13: Deleting Rows from the Many to Many Table
Step14: A question which often arises is how the row in the “secondary” table can be deleted when the child object is handed directly to Session.delete()
|
<ASSISTANT_TASK:>
Python Code:
# SQLAlchemy
from sqlalchemy import Table, Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Date, Integer, String
Base = declarative_base()
class One(Base):
__tablename__ = 'one'
id = Column(Integer, primary_key=True)
many = relationship("Many")
class Many(Base):
__tablename__ = 'many'
id = Column(Integer, primary_key=True)
one_id = Column(Integer, ForeignKey('one.id'))
class person(Base):
__tablename__ = 'pserson'
id = Column(Integer, primary_key=True)
name = Column(String)
address = relationship("address")
class address(Base):
__tablename__ = 'address'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('pserson.id'))
street_name = Column(String)
p = person()
p.name = "Mayank"
p.address.street_name = "200 Timbaktu"
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", back_populates="parent")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
parent = relationship("Parent", back_populates="children")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", backref="parent")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey('child.id'))
child = relationship("Child")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
# Bidirectional behavior
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey('child.id'))
child = relationship("Child", back_populates="parents")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
parents = relationship("Parent", back_populates="child")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey('child.id'))
child = relationship("Child", backref="parents")
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
child = relationship("Child", uselist=False, back_populates="parent")
class Child(Base):
__tablename__ = 'child'
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('parent.id'))
parent = relationship("Parent", back_populates="child")
from sqlalchemy.orm import backref
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
child_id = Column(Integer, ForeignKey('child.id'))
child = relationship("Child", backref=backref("parent", uselist=False))
association_table = Table('association', Base.metadata,
Column('left_id', Integer, ForeignKey('left.id')),
Column('right_id', Integer, ForeignKey('right.id'))
)
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
children = relationship("Child",
secondary=association_table)
class Child(Base):
__tablename__ = 'right'
id = Column(Integer, primary_key=True)
association_table = Table('association', Base.metadata,
Column('left_id', Integer, ForeignKey('left.id')),
Column('right_id', Integer, ForeignKey('right.id'))
)
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
children = relationship(
"Child",
secondary=association_table,
back_populates="parents")
class Child(Base):
__tablename__ = 'right'
id = Column(Integer, primary_key=True)
parents = relationship(
"Parent",
secondary=association_table,
back_populates="children")
association_table = Table('association', Base.metadata,
Column('left_id', Integer, ForeignKey('left.id')),
Column('right_id', Integer, ForeignKey('right.id'))
)
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
children = relationship("Child",
secondary=association_table,
backref="parents")
class Child(Base):
__tablename__ = 'right'
id = Column(Integer, primary_key=True)
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
children = relationship("Child",
secondary=lambda: association_table,
backref="parents")
class Parent(Base):
__tablename__ = 'left'
id = Column(Integer, primary_key=True)
children = relationship("Child",
secondary="association",
backref="parents")
# row will be deleted from the "secondary" table
# automatically
myparent.children.remove(somechild)
session.delete(somechild)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's look at the first file and visualise it
Step2: That is a very nice and clean signal. We don't need to do any preprocessing and can run analysis right away
Step3: That went well.
Step4: Ah!
Step5: We've now reduced the amplitude of the T-wave and are ready for analysis
Step6: Oh dear
Step7: Upsampling the signal has enabled HeartPy to optimize and find the position for all peaks in the signal.
Step8: Again we have a very strong signal present in the recording. That is always nice to see. Analysis again is then straightforward
Step9: And again we can fix the 'mistrusted' peaks with modest upsampling
Step10: Since 1.2.4 HeartPy includes Poincaré nonlinear methods too
|
<ASSISTANT_TASK:>
Python Code:
#import packages
import heartpy as hp
import matplotlib.pyplot as plt
sample_rate = 250
data = hp.get_data('e0103.csv')
plt.figure(figsize=(12,4))
plt.plot(data)
plt.show()
#run analysis
wd, m = hp.process(data, sample_rate)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
data = hp.get_data('e0110.csv')
plt.figure(figsize=(12,4))
plt.plot(data)
plt.show()
#and zoom in a bit
plt.figure(figsize=(12,4))
plt.plot(data[0:2500])
plt.show()
filtered = hp.filter_signal(data, cutoff = 0.05, sample_rate = sample_rate, filtertype='notch')
#visualize again
plt.figure(figsize=(12,4))
plt.plot(filtered)
plt.show()
#and zoom in a bit
plt.figure(figsize=(12,4))
plt.plot(data[0:2500], label = 'original signal')
plt.plot(filtered[0:2500], alpha=0.5, label = 'filtered signal')
plt.legend()
plt.show()
#run analysis
wd, m = hp.process(hp.scale_data(filtered), sample_rate)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
from scipy.signal import resample
#resample the data. Usually 2, 4, or 6 times is enough depending on original sampling rate
resampled_data = resample(filtered, len(filtered) * 2)
#And run the analysis again. Don't forget to up the sample rate as well!
wd, m = hp.process(hp.scale_data(resampled_data), sample_rate * 2)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
data = hp.get_data('e0124.csv')
plt.figure(figsize=(12,4))
plt.plot(data)
plt.show()
#and zoom in a bit
plt.figure(figsize=(12,4))
plt.plot(data[0:2500])
plt.show()
#run analysis
wd, m = hp.process(hp.scale_data(data), sample_rate)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
#resample the data. Usually 2, 4, or 6 times is enough depending on original sampling rate
resampled_data = resample(data, len(filtered) * 2)
#And run the analysis again. Don't forget to up the sample rate as well!
wd, m = hp.process(hp.scale_data(resampled_data), sample_rate * 2)
#visualise in plot of custom size
plt.figure(figsize=(12,4))
hp.plotter(wd, m)
#display computed measures
for measure in m.keys():
print('%s: %f' %(measure, m[measure]))
hp.plot_poincare(wd, m)
#print poincare measures
poincare_measures = ['sd1', 'sd2', 's', 'sd1/sd2']
print('\nnonlinear poincare measures:')
for measure in poincare_measures:
print('%s: %f' %(measure, m[measure]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Install the latest GA version of google-cloud-storage library as well.
Step2: Restart the kernel
Step3: Before you begin
Step4: Region
Step5: Timestamp
Step6: Authenticate your Google Cloud account
Step7: Create a Cloud Storage bucket
Step8: Only if your bucket doesn't already exist
Step9: Finally, validate access to your Cloud Storage bucket by examining its contents
Step10: Set up variables
Step11: Initialize Vertex SDK for Python
Step12: Set hardware accelerators
Step13: Set pre-built containers
Step14: Set machine type
Step15: Tutorial
Step16: Task.py contents
Step17: Store training script on your Cloud Storage bucket
Step18: Create and run custom training job
Step19: Prepare your command-line arguments
Step20: Run the custom training job
Step21: Load the saved model
Step22: Evaluate the model
Step23: Perform the model evaluation
Step24: Serving function for image data
Step25: Get the serving function signature
Step26: Upload the model
Step27: Deploy the model
Step28: Get test item
Step29: Prepare the request content
Step30: Make the prediction
Step31: Undeploy the model
Step32: Cleaning up
|
<ASSISTANT_TASK:>
Python Code:
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
! pip3 install -U google-cloud-storage $USER_FLAG
if os.environ["IS_TESTING"]:
! pip3 install --upgrade tensorflow $USER_FLAG
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
REGION = "us-central1" # @param {type: "string"}
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
! gsutil mb -l $REGION $BUCKET_NAME
! gsutil ls -al $BUCKET_NAME
import google.cloud.aiplatform as aip
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (None, None)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
if os.getenv("IS_TESTING_TF"):
TF = os.getenv("IS_TESTING_TF")
else:
TF = "2-1"
if TF[0] == "2":
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf2-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf2-cpu.{}".format(TF)
else:
if TRAIN_GPU:
TRAIN_VERSION = "tf-gpu.{}".format(TF)
else:
TRAIN_VERSION = "tf-cpu.{}".format(TF)
if DEPLOY_GPU:
DEPLOY_VERSION = "tf-gpu.{}".format(TF)
else:
DEPLOY_VERSION = "tf-cpu.{}".format(TF)
TRAIN_IMAGE = "gcr.io/cloud-aiplatform/training/{}:latest".format(TRAIN_VERSION)
DEPLOY_IMAGE = "gcr.io/cloud-aiplatform/prediction/{}:latest".format(DEPLOY_VERSION)
print("Training:", TRAIN_IMAGE, TRAIN_GPU, TRAIN_NGPU)
print("Deployment:", DEPLOY_IMAGE, DEPLOY_GPU, DEPLOY_NGPU)
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'tensorflow_datasets==1.3.0',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: CIFAR10 image classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: aferlitsch@google.com\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
%%writefile custom/trainer/task.py
# Single, Mirror and Multi-Machine Distributed Training for CIFAR-10
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.python.client import device_lib
import argparse
import os
import sys
tfds.disable_progress_bar()
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv("AIP_MODEL_DIR"), type=str, help='Model dir.')
parser.add_argument('--lr', dest='lr',
default=0.01, type=float,
help='Learning rate.')
parser.add_argument('--epochs', dest='epochs',
default=10, type=int,
help='Number of epochs.')
parser.add_argument('--steps', dest='steps',
default=200, type=int,
help='Number of steps per epoch.')
parser.add_argument('--distribute', dest='distribute', type=str, default='single',
help='distributed training strategy')
args = parser.parse_args()
print('Python Version = {}'.format(sys.version))
print('TensorFlow Version = {}'.format(tf.__version__))
print('TF_CONFIG = {}'.format(os.environ.get('TF_CONFIG', 'Not found')))
print('DEVICES', device_lib.list_local_devices())
# Single Machine, single compute device
if args.distribute == 'single':
if tf.test.is_gpu_available():
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0")
# Single Machine, multiple compute device
elif args.distribute == 'mirror':
strategy = tf.distribute.MirroredStrategy()
# Multiple Machine, multiple compute device
elif args.distribute == 'multi':
strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy()
# Multi-worker configuration
print('num_replicas_in_sync = {}'.format(strategy.num_replicas_in_sync))
# Preparing dataset
BUFFER_SIZE = 10000
BATCH_SIZE = 64
def make_datasets_unbatched():
# Scaling CIFAR10 data from (0, 255] to (0., 1.]
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255.0
return image, label
datasets, info = tfds.load(name='cifar10',
with_info=True,
as_supervised=True)
return datasets['train'].map(scale).cache().shuffle(BUFFER_SIZE).repeat()
# Build the Keras model
def build_and_compile_cnn_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, 3, activation='relu', input_shape=(32, 32, 3)),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.MaxPooling2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(
loss=tf.keras.losses.sparse_categorical_crossentropy,
optimizer=tf.keras.optimizers.SGD(learning_rate=args.lr),
metrics=['accuracy'])
return model
# Train the model
NUM_WORKERS = strategy.num_replicas_in_sync
# Here the batch size scales up by number of workers since
# `tf.data.Dataset.batch` expects the global batch size.
GLOBAL_BATCH_SIZE = BATCH_SIZE * NUM_WORKERS
train_dataset = make_datasets_unbatched().batch(GLOBAL_BATCH_SIZE)
with strategy.scope():
# Creation of dataset, and model building/compiling need to be within
# `strategy.scope()`.
model = build_and_compile_cnn_model()
model.fit(x=train_dataset, epochs=args.epochs, steps_per_epoch=args.steps)
model.save(args.model_dir)
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_cifar10.tar.gz
job = aip.CustomTrainingJob(
display_name="cifar10_" + TIMESTAMP,
script_path="custom/trainer/task.py",
container_uri=TRAIN_IMAGE,
requirements=["gcsfs==0.7.1", "tensorflow-datasets==4.4"],
)
print(job)
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
EPOCHS = 20
STEPS = 100
DIRECT = True
if DIRECT:
CMDARGS = [
"--model-dir=" + MODEL_DIR,
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
else:
CMDARGS = [
"--epochs=" + str(EPOCHS),
"--steps=" + str(STEPS),
]
if TRAIN_GPU:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=True,
)
else:
job.run(
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=True,
)
model_path_to_deploy = MODEL_DIR
import tensorflow as tf
local_model = tf.keras.models.load_model(MODEL_DIR)
import numpy as np
from tensorflow.keras.datasets import cifar10
(_, _), (x_test, y_test) = cifar10.load_data()
x_test = (x_test / 255.0).astype(np.float32)
print(x_test.shape, y_test.shape)
local_model.evaluate(x_test, y_test)
CONCRETE_INPUT = "numpy_inputs"
def _preprocess(bytes_input):
decoded = tf.io.decode_jpeg(bytes_input, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
resized = tf.image.resize(decoded, size=(32, 32))
rescale = tf.cast(resized / 255.0, tf.float32)
return rescale
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def preprocess_fn(bytes_inputs):
decoded_images = tf.map_fn(
_preprocess, bytes_inputs, dtype=tf.float32, back_prop=False
)
return {
CONCRETE_INPUT: decoded_images
} # User needs to make sure the key matches model's input
@tf.function(input_signature=[tf.TensorSpec([None], tf.string)])
def serving_fn(bytes_inputs):
images = preprocess_fn(bytes_inputs)
prob = m_call(**images)
return prob
m_call = tf.function(local_model.call).get_concrete_function(
[tf.TensorSpec(shape=[None, 32, 32, 3], dtype=tf.float32, name=CONCRETE_INPUT)]
)
tf.saved_model.save(
local_model, model_path_to_deploy, signatures={"serving_default": serving_fn}
)
loaded = tf.saved_model.load(model_path_to_deploy)
serving_input = list(
loaded.signatures["serving_default"].structured_input_signature[1].keys()
)[0]
print("Serving function input:", serving_input)
model = aip.Model.upload(
display_name="cifar10_" + TIMESTAMP,
artifact_uri=MODEL_DIR,
serving_container_image_uri=DEPLOY_IMAGE,
sync=False,
)
model.wait()
DEPLOYED_NAME = "cifar10-" + TIMESTAMP
TRAFFIC_SPLIT = {"0": 100}
MIN_NODES = 1
MAX_NODES = 1
if DEPLOY_GPU:
endpoint = model.deploy(
deployed_model_display_name=DEPLOYED_NAME,
traffic_split=TRAFFIC_SPLIT,
machine_type=DEPLOY_COMPUTE,
accelerator_type=DEPLOY_GPU,
accelerator_count=DEPLOY_NGPU,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
else:
endpoint = model.deploy(
deployed_model_display_name=DEPLOYED_NAME,
traffic_split=TRAFFIC_SPLIT,
machine_type=DEPLOY_COMPUTE,
accelerator_type=DEPLOY_GPU,
accelerator_count=0,
min_replica_count=MIN_NODES,
max_replica_count=MAX_NODES,
)
test_image = x_test[0]
test_label = y_test[0]
print(test_image.shape)
import base64
import cv2
cv2.imwrite("tmp.jpg", (test_image * 255).astype(np.uint8))
bytes = tf.io.read_file("tmp.jpg")
b64str = base64.b64encode(bytes.numpy()).decode("utf-8")
# The format of each instance should conform to the deployed model's prediction input schema.
instances = [{serving_input: {"b64": b64str}}]
prediction = endpoint.predict(instances=instances)
print(prediction)
endpoint.undeploy_all()
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: First, we'll download the dataset to our local machine. The data consists of characters rendered in a variety of fonts on a 28x28 image. The labels are limited to 'A' through 'J' (10 classes). The training set has about 500k and the testset 19000 labelled examples. Given these sizes, it should be possible to train models quickly on any machine.
Step4: Extract the dataset from the compressed .tar.gz file.
Step5: Problem 1
Step7: Now let's load the data in a more manageable format. Since, depending on your computer setup you might not be able to fit it all in memory, we'll load each class into a separate dataset, store them on disk and curate them independently. Later we'll merge them into a single dataset of manageable size.
Step8: Problem 2
Step9: Problem 3
Step10: Merge and prune the training data as needed. Depending on your computer setup, you might not be able to fit it all in memory, and you can tune train_size as needed. The labels will be stored into a separate array of integers 0 through 9.
Step11: Next, we'll randomize the data. It's important to have the labels well shuffled for the training and test distributions to match.
Step12: Problem 4
Step13: Finally, let's save the data for later reuse
Step14: Problem 5
Step15: Comparison parallel and sync overlapping calculation
Step16: Synchronously
Step17: Asynchronously
Step18: Estimation overlapping
Step19: Problem 6
|
<ASSISTANT_TASK:>
Python Code:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
from datetime import timedelta
import tarfile
from IPython.display import display, Image
from scipy import ndimage
from scipy.spatial import distance
from sklearn.linear_model import LogisticRegression
from six.moves.urllib.request import urlretrieve
from six.moves import cPickle as pickle
from ipyparallel import Client, require
# Config the matplotlib backend as plotting inline in IPython
%matplotlib inline
%run label_util.py
def draw_images(label, a_arr, b_arr, bins_size=20):
x = np.array(range(bins_size))
f, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(8, 1))
h_a = np.histogram(a_arr, bins=bins_size)
h_b = np.histogram(b_arr, bins=bins_size)
ax1.imshow(a_arr)
ax1.set_title('Label: ' + label)
ax2.bar(x, h_a[0])
ax3.imshow(b_arr)
ax4.bar(x, h_b[0])
plt.show()
def overlapping_comparison(counters, dataset_a, dataset_b, number_shown = 3):
letters = list(counters.keys())
i = 0
shown = 0
while shown < number_shown and i < len(letters):
character = letters[i]
similar_keys = list(counters[character].keys())
key = similar_keys[0]
if key == 'counter' and len(similar_keys) > 1:
key = similar_keys[1]
idx_a = int(key)
idx_b = counters[character][key][0]
label = '{0} (Distance: {1}) [{2}] - [{3}]'.format(character, manhattan_distance(dataset_a[idx_a], dataset_b[idx_b]), idx_a, idx_b)
draw_images(label, dataset_a[idx_a], dataset_b[idx_b])
i += 1
shown += 1
def display_overlap(counters):
key_lst = sorted(counters.keys())
total = 0
for key in key_lst:
total += counters[key]['counter']
print('Label {0}: {1}'.format(key, counters[key]['counter']))
print('Total:', total)
def wrap_tuples(labels, dataset):
result = []
for idx, item in enumerate(zip(labels, dataset)):
result.append((idx, item[0], item[1]))
return result
def is_equal_comparison(a_arr, b_arr):
return (a_arr==b_arr).all()
def euclidean_distance(a_arr, b_arr):
'''Euclidean distance without the sqrt'''
return np.sum(np.power(a_arr - b_arr, 2))
@require('numpy as np')
def manhattan_distance(a_arr, b_arr):
return np.sum(np.absolute(a_arr - b_arr))
def count_duplication(counters, lbl, idxA, idxB):
str_lbl = get_char_by_lbl(lbl)
if str_lbl not in counters:
counters[str_lbl] = {}
counters[str_lbl]['counter'] = 0
counters[str_lbl]['counter'] += 1
if str(idxA) not in counters[str_lbl]:
counters[str_lbl][str(idxA)] = []
counters[str_lbl][str(idxA)].append(idxB)
def count_equal_data(label_lst_A, data_lst_A, label_lst_B, data_lst_B, distance_threshold=0, min_distance_threshold = 0):
start_time = time.clock()
counters = {}
for idxA, lblA in enumerate(label_lst_A):
for idxB, lblB in enumerate(label_lst_B):
if lblA == lblB:
itemA = data_lst_A[idxA]
itemB = data_lst_B[idxB]
if distance_threshold == 0 and is_equal_comparison(itemA, itemB):
count_duplication(counters, lblA, idxA, idxB)
if distance_threshold > 0 and distance_threshold >= manhattan_distance(itemA, itemB) > min_distance_threshold:
count_duplication(counters, lblA, idxA, idxB)
end_time = time.clock()
return (counters, timedelta(seconds=end_time - start_time))
def count_equal_tuples(tuple_lst_A, tuple_lst_B, distance_threshold=0, min_distance_threshold = 0):
idx_idx = 0
lbl_idx = 1
data_idx = 2
counters = {}
for item_A in tuple_lst_A:
for item_B in tuple_lst_B:
if item_A[lbl_idx] == item_B[lbl_idx]:
if distance_threshold == 0 and is_equal_comparison(item_A[data_idx], item_B[data_idx]):
count_duplication(counters, item_A[lbl_idx], item_A[idx_idx], item_B[idx_idx])
if distance_threshold > 0 and distance_threshold >= manhattan_distance(item_A[data_idx], item_B[data_idx]) > min_distance_threshold:
count_duplication(counters, item_A[lbl_idx], item_A[idx_idx], item_B[idx_idx])
return counters
@require(get_char_by_lbl)
def count_duplication(counters, lbl, idxA, idxB):
str_lbl = get_char_by_lbl(lbl)
if str_lbl not in counters:
counters[str_lbl] = {}
counters[str_lbl]['counter'] = 0
counters[str_lbl]['counter'] += 1
if str(idxA) not in counters[str_lbl]:
counters[str_lbl][str(idxA)] = []
counters[str_lbl][str(idxA)].append(idxB)
@require(is_equal_comparison, count_duplication, manhattan_distance)
def item_acync_handler():
idx_idx = 0
lbl_idx = 1
data_idx = 2
for item_A in tuple_lst_A:
for item_B in tuple_lst_B:
if item_A[lbl_idx] == item_B[lbl_idx]:
if distance_threshold == 0 and is_equal_comparison(item_A[data_idx], item_B[data_idx]):
count_duplication(counters, item_A[lbl_idx], item_A[idx_idx], item_B[idx_idx])
if distance_threshold > 0 and distance_threshold >= manhattan_distance(item_A[data_idx], item_B[data_idx]) > min_distance_threshold:
count_duplication(counters, item_A[lbl_idx], item_A[idx_idx], item_B[idx_idx])
def reduce_counters(counters_lst):
result = {}
for counters in counters_lst:
for letter_key, item in counters.items():
if letter_key not in result:
result[letter_key] = {'counter': 0}
for key, value in item.items():
if key == 'counter':
result[letter_key][key] += value
elif key not in result[letter_key]:
result[letter_key][key] = value
else:
for idx in value:
result[letter_key][key].append(idx)
return result
def count_equal_tuples_parallel(tuple_lst_A, tuple_lst_B, distance_threshold=0, min_distance_threshold = 0):
rc = Client()
dview = rc[:]
dview.push(dict(tuple_lst_B = tuple_lst_B, map_dict=map_dict,
distance_threshold=distance_threshold,
min_distance_threshold=min_distance_threshold))
dview['counters'] = {}
dview.scatter('tuple_lst_A', tuple_lst_A)
dview.block=True
dview.apply(item_acync_handler)
result = reduce_counters(dview['counters'])
return result
url = 'http://commondatastorage.googleapis.com/books1000/'
last_percent_reported = None
def download_progress_hook(count, blockSize, totalSize):
A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def maybe_download(filename, expected_bytes, force=False):
Download a file if not present, and make sure it's the right size.
if force or not os.path.exists(filename):
print('Attempting to download:', filename)
filename, _ = urlretrieve(url + filename, filename, reporthook=download_progress_hook)
print('\nDownload Complete!')
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)
test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)
num_classes = 10
np.random.seed(133)
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
if len(data_folders) != num_classes:
raise Exception(
'Expected %d folders, one per class. Found %d instead.' % (
num_classes, len(data_folders)))
print(data_folders)
return data_folders
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
from IPython.display import Image, display
num_first_items = 1
def display_first_items(folder_path):
print('Letter:', folder_path[-1:])
lst = os.listdir(folder_path)[:num_first_items]
for file_name in lst:
full_file_name = os.path.join(folder_path, file_name)
display(Image(filename=full_file_name))
for folder in train_folders:
display_first_items(folder)
for folder in test_folders:
display_first_items(folder)
image_size = 28 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
def load_letter(folder, min_num_images):
Load the data for a single letter label.
image_files = os.listdir(folder)
dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
dtype=np.float32)
print(folder)
num_images = 0
for image in image_files:
image_file = os.path.join(folder, image)
try:
image_data = (ndimage.imread(image_file).astype(float) -
pixel_depth / 2) / pixel_depth
if image_data.shape != (image_size, image_size):
raise Exception('Unexpected image shape: %s' % str(image_data.shape))
if image_data.mean() == 0.5:
print('No data in image:', image_file)
continue
dataset[num_images, :, :] = image_data
num_images = num_images + 1
except IOError as e:
print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
dataset = dataset[0:num_images, :, :]
if num_images < min_num_images:
raise Exception('Many fewer images than expected: %d < %d' %
(num_images, min_num_images))
print('Full dataset tensor:', dataset.shape)
print('Mean:', np.mean(dataset))
print('Standard deviation:', np.std(dataset))
return dataset
def maybe_pickle(data_folders, min_num_images_per_class, force=False):
dataset_names = []
for folder in data_folders:
set_filename = folder + '.pickle'
dataset_names.append(set_filename)
if os.path.exists(set_filename) and not force:
# You may override by setting force=True.
print('%s already present - Skipping pickling.' % set_filename)
else:
print('Pickling %s.' % set_filename)
dataset = load_letter(folder, min_num_images_per_class)
try:
with open(set_filename, 'wb') as f:
pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', set_filename, ':', e)
return dataset_names
train_datasets = maybe_pickle(train_folders, 45000)
test_datasets = maybe_pickle(test_folders, 1600)
def show_pickle(file_path):
print(file_path)
with open(file_path, 'rb') as f:
dataset = pickle.load(f)
plt.figure(figsize=(1,1))
plt.imshow(dataset[1])
plt.show()
for pickle_file in train_datasets:
show_pickle(pickle_file)
for pickle_file in test_datasets:
show_pickle(pickle_file)
def show_pickle_stats(file_path):
with open(file_path, 'rb') as f:
dataset = pickle.load(f)
print(file_path, len(dataset))
for pickle_file in train_datasets:
show_pickle_stats(pickle_file)
for pickle_file in test_datasets:
show_pickle_stats(pickle_file)
def make_arrays(nb_rows, img_size):
if nb_rows:
dataset = np.ndarray((nb_rows, img_size, img_size), dtype=np.float32)
labels = np.ndarray(nb_rows, dtype=np.int32)
else:
dataset, labels = None, None
return dataset, labels
def merge_datasets(pickle_files, train_size, valid_size=0):
num_classes = len(pickle_files)
valid_dataset, valid_labels = make_arrays(valid_size, image_size)
train_dataset, train_labels = make_arrays(train_size, image_size)
vsize_per_class = valid_size // num_classes
tsize_per_class = train_size // num_classes
start_v, start_t = 0, 0
end_v, end_t = vsize_per_class, tsize_per_class
end_l = vsize_per_class+tsize_per_class
for label, pickle_file in enumerate(pickle_files):
try:
with open(pickle_file, 'rb') as f:
letter_set = pickle.load(f)
# let's shuffle the letters to have random validation and training set
np.random.shuffle(letter_set)
if valid_dataset is not None:
valid_letter = letter_set[:vsize_per_class, :, :]
valid_dataset[start_v:end_v, :, :] = valid_letter
valid_labels[start_v:end_v] = label
start_v += vsize_per_class
end_v += vsize_per_class
train_letter = letter_set[vsize_per_class:end_l, :, :]
train_dataset[start_t:end_t, :, :] = train_letter
train_labels[start_t:end_t] = label
start_t += tsize_per_class
end_t += tsize_per_class
except Exception as e:
print('Unable to process data from', pickle_file, ':', e)
raise
return valid_dataset, valid_labels, train_dataset, train_labels
train_size = 200000
valid_size = 10000
test_size = 10000
valid_dataset, valid_labels, train_dataset, train_labels = merge_datasets(
train_datasets, train_size, valid_size)
_, _, test_dataset, test_labels = merge_datasets(test_datasets, test_size)
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
print('train labels:', count_labels(train_labels))
print('valid labels:', count_labels(valid_labels))
print('test labels:', count_labels(test_labels))
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_dataset, train_labels = randomize(train_dataset, train_labels)
test_dataset, test_labels = randomize(test_dataset, test_labels)
valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)
print('train labels:', count_labels(train_labels))
print('valid labels:', count_labels(valid_labels))
print('test labels:', count_labels(test_labels))
def show_data(dataset, labels, size=3):
print('=============================================')
for lbl, img_arr in zip(labels[:size], dataset[:size]):
print(map_dict[str(lbl)])
plt.figure(figsize=(1,1))
plt.imshow(img_arr)
plt.show()
show_data(train_dataset, train_labels)
show_data(test_dataset, test_labels)
show_data(valid_dataset, valid_labels)
pickle_file = 'notMNIST.pickle'
try:
f = open(pickle_file, 'wb')
save = {
'train_dataset': train_dataset,
'train_labels': train_labels,
'valid_dataset': valid_dataset,
'valid_labels': valid_labels,
'test_dataset': test_dataset,
'test_labels': test_labels,
}
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
statinfo = os.stat(pickle_file)
print('Compressed pickle size:', statinfo.st_size)
bins_size = 28 * 4
def calc_histogram(dataset, bins = bins_size):
start_time = time.clock()
hist_list = []
for item in dataset:
hist = np.histogram(item, bins=bins)
hist_list.append(hist[0])
end_time = time.clock()
return (hist_list, timedelta(seconds=end_time - start_time))
train_histogram, calc_duration = calc_histogram(train_dataset, bins_size)
print('Histograms for train dataset calculates in', calc_duration)
valid_histogram, calc_duration = calc_histogram(valid_dataset, bins_size)
print('Histograms for validation dataset calculates in', calc_duration)
test_histogram, calc_duration = calc_histogram(test_dataset, bins_size)
print('Histograms for test dataset calculates in', calc_duration)
# pickle_hist_file = 'notMNIST.hist.pickle'
# try:
# f = open(pickle_hist_file, 'wb')
# save = {
# 'train_histogram': train_histogram,
# 'valid_histogram': valid_histogram,
# 'test_histogram': test_histogram,
# }
# pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
# f.close()
# except Exception as e:
# print('Unable to save data to', pickle_hist_file, ':', e)
# raise
# statinfo = os.stat(pickle_hist_file)
# print('Compressed histograms pickle size:', statinfo.st_size)
pickle_file = 'notMNIST.pickle'
pickle_hist_file = 'notMNIST.hist.pickle'
try:
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
print('Training:', train_dataset.shape, train_labels.shape)
print('Validation:', valid_dataset.shape, valid_labels.shape)
print('Testing:', test_dataset.shape, test_labels.shape)
except Exception as e:
print('Unable to load full dataset to', pickle_file, ':', e)
raise
# try:
# with open(pickle_hist_file, 'rb') as f:
# save = pickle.load(f)
# train_histogram = save['train_histogram']
# valid_histogram = save['valid_histogram']
# test_histogram = save['test_histogram']
# print('Training histogram:', len(train_histogram))
# print('Validation histogram:', len(valid_histogram))
# print('Testing histogram:', len(test_histogram))
# except Exception as e:
# print('Unable to load full dataset to', pickle_file, ':', e)
# raise
start_time = time.clock()
train_tuple_lst = wrap_tuples(train_labels, train_dataset)
valid_tuple_lst = wrap_tuples(valid_labels, valid_dataset)
test_tuple_lst = wrap_tuples(test_labels, test_dataset)
end_time = time.clock()
print('Labels and data sets to tuples time:', timedelta(seconds=end_time - start_time))
distance_overlapping = 10
start_time = time.clock()
overlap_valid_test = count_equal_tuples(valid_tuple_lst, test_tuple_lst)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets during', duration)
display_overlap(overlap_valid_test)
start_time = time.clock()
overlap_valid_test_near = count_equal_tuples(valid_tuple_lst, test_tuple_lst, distance_overlapping)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets (with overlaping distance) during', duration)
display_overlap(overlap_valid_test_near)
start_time = time.clock()
overlap_valid_test = count_equal_tuples_parallel(valid_tuple_lst, test_tuple_lst)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets during', duration)
display_overlap(overlap_valid_test)
overlapping_comparison(overlap_valid_test, valid_dataset, test_dataset)
start_time = time.clock()
overlap_valid_test_near = count_equal_tuples_parallel(valid_tuple_lst, test_tuple_lst, distance_overlapping)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets (with overlaping distance) during', duration)
display_overlap(overlap_valid_test_near)
overlapping_comparison(overlap_valid_test_near, valid_dataset, test_dataset)
start_time = time.clock()
overlap_valid_test_far = count_equal_tuples_parallel(valid_tuple_lst, test_tuple_lst, 110, 100)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets (with overlaping interval) during', duration)
display_overlap(overlap_valid_test_far)
overlapping_comparison(overlap_valid_test_far, valid_dataset, test_dataset)
start_time = time.clock()
overlap_train_valid = count_equal_tuples_parallel(train_tuple_lst, valid_tuple_lst)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets during', duration)
display_overlap(overlap_train_valid)
overlapping_comparison(overlap_train_valid, train_dataset, valid_dataset)
start_time = time.clock()
overlap_train_valid_near = count_equal_tuples_parallel(train_tuple_lst, valid_tuple_lst, distance_overlapping)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets (with overlaping distance) during', duration)
display_overlap(overlap_train_valid_near)
overlapping_comparison(overlap_train_valid_near, train_dataset, valid_dataset)
start_time = time.clock()
overlap_train_test = count_equal_tuples_parallel(train_tuple_lst, test_tuple_lst)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets during', duration)
display_overlap(overlap_train_test)
overlapping_comparison(overlap_train_test, train_dataset, test_dataset)
start_time = time.clock()
overlap_train_test_near = count_equal_tuples_parallel(train_tuple_lst, test_tuple_lst, distance_overlapping)
end_time = time.clock()
duration = timedelta(seconds=end_time - start_time)
print('Counting overlapping between validation and test datasets (with overlaping distance) during', duration)
display_overlap(overlap_train_test_near)
overlapping_comparison(overlap_train_test_near, train_dataset, test_dataset)
%timeit is_equal_comparison(item_a, item_b)
%timeit manhattan_distance(valid_histogram[8], valid_histogram[9])
%timeit distance.cityblock(item_a.flatten(), item_b.flatten())
from sklearn.linear_model import LogisticRegression
%run label_util.py
# print('train labels:', count_labels(train_labels))
# print('valid labels:', count_labels(valid_labels))
# print('test labels:', count_labels(test_labels))
# show_data(train_dataset, train_labels)
# show_data(test_dataset, test_labels)
# show_data(valid_dataset, valid_labels)
from collections import Counter
cnt = Counter(valid_labels)
keys = cnt.keys()
one_class_size = 50 // len(keys)
for key in keys:
class_indexes = np.where(valid_labels == key)[0][:one_class_size]
print(type(valid_labels[class_indexes]), valid_labels[class_indexes])
valid_labels.shape
logreg = linear_model.LogisticRegression()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Optimization
Step2: Curve fitting using least squares
|
<ASSISTANT_TASK:>
Python Code:
from scipy import optimize
def f(X):
Cost function.
return (X**2).sum()
X0 = [1.,1.] # Initial guess
sol = optimize.minimize(f, X0, method = "nelder-mead")
X = sol.x
print "Solution: ", X
def func(x, omega, tau):
return np.exp(-x / tau) * np.sin(omega * x)
xdata = np.linspace(0, 3., 100)
y = func(xdata, omega = 2. * np.pi, tau = 10.)
ydata = y + .5 * np.random.normal(size=len(xdata))
params, cov = optimize.curve_fit(func, xdata, ydata)
omega, tau = params
ysol = func(xdata, omega, tau)
fig = plt.figure(0)
plt.clf()
plt.plot(xdata, y, label = "Target")
plt.plot(xdata, ydata, "o", label = "Target + noise")
plt.plot(xdata, ysol, label = "Solution")
plt.grid()
plt.xlabel("$x$")
plt.ylabel("$y$")
plt.legend()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this tutorial, we will use the MNIST hand-written digits data to show how images can be encoded and decoded (restored) using feed-forward networks. We will visualize the original and the restored images. We illustrate feed forward network based on two autoencoders
Step2: There are two run modes
Step3: Data reading
Step4: <a id='#Model Creation'></a>
Step5: The input data is a set of hand written digits images each of 28 x 28 pixels. In this tutorial, we will consider each image as a linear array of 784 pixel values. These pixels are considered as an input having 784 dimensions, one per pixel. Since the goal of the autoencoder is to compress the data and reconstruct the original image, the output dimension is same as the input dimension. We will compress the input to mere 32 dimensions (referred to as the encoding_dim). Additionally, since the maximum input value is 255, we normalize the input between 0 and 1.
Step6: Setup the network for training and testing
Step7: Let us train the simple autoencoder. We create a training and a test reader
Step8: Visualize the simple autoencoder results
Step9: Let us plot the original and the decoded image. They should look visually similar.
Step10: Deep Auto encoder
Step11: The encoding dimensions are 128, 64 and 32 while the decoding dimensions are symmetrically opposite 64, 128 and 784. This increases the number of parameters used to model the transformation and achieves lower error rates at the cost of longer training duration and memory footprint. If we train this deep encoder for larger number iterations by turning the isFast flag to be False, we get a lower error and the reconstructed images are also marginally better.
Step12: Visualize the deep autoencoder results
Step13: Let us plot the original and the decoded image with the deep autoencoder. They should look visually similar.
Step14: We have shown how to encode and decode an input. In this section we will explore how we can compare one to another and also show how to extract an encoded input for a given input. For visualizing high dimension data in 2D, t-SNE is probably one of the best methods. However, it typically requires relatively low-dimensional data. So a good strategy for visualizing similarity relationships in high-dimensional data is to encode data into a low-dimensional space (e.g. 32 dimensional) using an autoencoder first, extract the encoding of the input data followed by using t-SNE for mapping the compressed data to a 2D plane.
Step15: We will compute cosine distance between two images using scipy.
Step16: Note
Step17: Let us compare the distance between different digits.
Step18: Print the results of the deep encoder test error for regression testing
|
<ASSISTANT_TASK:>
Python Code:
# Figure 1
Image(url="http://cntk.ai/jup/MNIST-image.jpg", width=300, height=300)
# Import the relevant modules
from __future__ import print_function # Use a function definition from future version (say 3.x from 2.7 interpreter)
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
# Import CNTK
import cntk as C
#import cntk.tests.test_utils
#cntk.tests.test_utils.set_device_from_pytest_env() # (only needed for our build system)
C.cntk_py.set_fixed_random_seed(1) # fix a random seed for CNTK components
%matplotlib inline
isFast = True
# Read a CTF formatted text (as mentioned above) using the CTF deserializer from a file
def create_reader(path, is_training, input_dim, num_label_classes):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
labels_viz = C.io.StreamDef(field='labels', shape=num_label_classes, is_sparse=False),
features = C.io.StreamDef(field='features', shape=input_dim, is_sparse=False)
)), randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
# Ensure the training and test data is generated and available for this tutorial.
# We search in two locations in the toolkit for the cached MNIST data set.
data_found = False
for data_dir in [os.path.join("..", "Examples", "Image", "DataSets", "MNIST"),
os.path.join("data", "MNIST"),
r'E:\local\TestData\mnist-data\Data']:
train_file = os.path.join(data_dir, "Train-28x28_cntk_text.txt")
test_file = os.path.join(data_dir, "Test-28x28_cntk_text.txt")
if os.path.isfile(train_file) and os.path.isfile(test_file):
data_found = True
break
if not data_found:
raise ValueError("Please generate the data by completing CNTK 103 Part A")
print("Data directory is {0}".format(data_dir))
# Figure 2
Image(url="http://cntk.ai/jup/SimpleAEfig.jpg", width=200, height=200)
input_dim = 784
encoding_dim = 64
output_dim = input_dim
def create_model(features):
with C.layers.default_options(init = C.glorot_uniform()):
# We scale the input pixels to 0-1 range
encode = C.layers.Dense(encoding_dim, activation = C.relu)(features/255.0)
decode = C.layers.Dense(input_dim, activation = C.sigmoid)(encode)
return decode
def train_and_test(reader_train, reader_test, model_func):
###############################################
# Training the model
###############################################
# Instantiate the input and the label variables
input = C.input_variable(input_dim)
label = C.input_variable(input_dim)
# Create the model function
model = model_func(input)
# The labels for this network is same as the input MNIST image.
# Note: Inside the model we are scaling the input to 0-1 range
# Hence we rescale the label to the same range
# We show how one can use their custom loss function
# loss = -(y* log(p)+ (1-y) * log(1-p)) where p = model output and y = target
# We have normalized the input between 0-1. Hence we scale the target to same range
target = label/255.0
loss = -(target * C.log(model) + (1 - target) * C.log(1 - model))
label_error = C.classification_error(model, target)
# training config
epoch_size = 30000 # 30000 samples is half the dataset size
minibatch_size = 64
num_sweeps_to_train_with = 5 if isFast else 100
num_samples_per_sweep = 60000
num_minibatches_to_train = (num_samples_per_sweep * num_sweeps_to_train_with) // minibatch_size
# Instantiate the trainer object to drive the model training
lr_per_sample = [0.00003]
lr_schedule = C.learning_rate_schedule(lr_per_sample, C.UnitType.sample, epoch_size)
# Momentum
momentum_as_time_constant = C.momentum_as_time_constant_schedule(700)
# We use a variant of the Adam optimizer which is known to work well on this dataset
# Feel free to try other optimizers from
# https://www.cntk.ai/pythondocs/cntk.learner.html#module-cntk.learner
learner = C.fsadagrad(model.parameters,
lr=lr_schedule, momentum=momentum_as_time_constant)
# Instantiate the trainer
progress_printer = C.logging.ProgressPrinter(0)
trainer = C.Trainer(model, (loss, label_error), learner, progress_printer)
# Map the data streams to the input and labels.
# Note: for autoencoders input == label
input_map = {
input : reader_train.streams.features,
label : reader_train.streams.features
}
aggregate_metric = 0
for i in range(num_minibatches_to_train):
# Read a mini batch from the training data file
data = reader_train.next_minibatch(minibatch_size, input_map = input_map)
# Run the trainer on and perform model training
trainer.train_minibatch(data)
samples = trainer.previous_minibatch_sample_count
aggregate_metric += trainer.previous_minibatch_evaluation_average * samples
train_error = (aggregate_metric*100.0) / (trainer.total_number_of_samples_seen)
print("Average training error: {0:0.2f}%".format(train_error))
#############################################################################
# Testing the model
# Note: we use a test file reader to read data different from a training data
#############################################################################
# Test data for trained model
test_minibatch_size = 32
num_samples = 10000
num_minibatches_to_test = num_samples / test_minibatch_size
test_result = 0.0
# Test error metric calculation
metric_numer = 0
metric_denom = 0
test_input_map = {
input : reader_test.streams.features,
label : reader_test.streams.features
}
for i in range(0, int(num_minibatches_to_test)):
# We are loading test data in batches specified by test_minibatch_size
# Each data point in the minibatch is a MNIST digit image of 784 dimensions
# with one pixel per dimension that we will encode / decode with the
# trained model.
data = reader_test.next_minibatch(test_minibatch_size,
input_map = test_input_map)
# Specify the mapping of input variables in the model to actual
# minibatch data to be tested with
eval_error = trainer.test_minibatch(data)
# minibatch data to be trained with
metric_numer += np.abs(eval_error * test_minibatch_size)
metric_denom += test_minibatch_size
# Average of evaluation errors of all test minibatches
test_error = (metric_numer*100.0) / (metric_denom)
print("Average test error: {0:0.2f}%".format(test_error))
return model, train_error, test_error
num_label_classes = 10
reader_train = create_reader(train_file, True, input_dim, num_label_classes)
reader_test = create_reader(test_file, False, input_dim, num_label_classes)
model, simple_ae_train_error, simple_ae_test_error = train_and_test(reader_train,
reader_test,
model_func = create_model )
# Read some data to run the eval
num_label_classes = 10
reader_eval = create_reader(test_file, False, input_dim, num_label_classes)
eval_minibatch_size = 50
eval_input_map = { input : reader_eval.streams.features }
eval_data = reader_eval.next_minibatch(eval_minibatch_size,
input_map = eval_input_map)
img_data = eval_data[input].asarray()
# Select a random image
np.random.seed(0)
idx = np.random.choice(eval_minibatch_size)
orig_image = img_data[idx,:,:]
decoded_image = model.eval(orig_image)[0]*255
# Print image statistics
def print_image_stats(img, text):
print(text)
print("Max: {0:.2f}, Median: {1:.2f}, Mean: {2:.2f}, Min: {3:.2f}".format(np.max(img),
np.median(img),
np.mean(img),
np.min(img)))
# Print original image
print_image_stats(orig_image, "Original image statistics:")
# Print decoded image
print_image_stats(decoded_image, "Decoded image statistics:")
# Define a helper function to plot a pair of images
def plot_image_pair(img1, text1, img2, text2):
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6, 6))
axes[0].imshow(img1, cmap="gray")
axes[0].set_title(text1)
axes[0].axis("off")
axes[1].imshow(img2, cmap="gray")
axes[1].set_title(text2)
axes[1].axis("off")
# Plot the original and the decoded image
img1 = orig_image.reshape(28,28)
text1 = 'Original image'
img2 = decoded_image.reshape(28,28)
text2 = 'Decoded image'
plot_image_pair(img1, text1, img2, text2)
# Figure 3
Image(url="http://cntk.ai/jup/DeepAEfig.jpg", width=500, height=300)
input_dim = 784
encoding_dims = [512, 256, 128,64,32]
decoding_dims = [64,128,256,512]
encoded_model = None
def create_deep_model(features):
with C.layers.default_options(init = C.layers.glorot_uniform()):
encode = C.element_times(C.constant(1.0/255.0), features)
for encoding_dim in encoding_dims:
encode = C.layers.Dense(encoding_dim, activation = C.relu)(encode)
global encoded_model
encoded_model= encode
decode = encode
for decoding_dim in decoding_dims:
decode = C.layers.Dense(decoding_dim, activation = C.relu)(decode)
decode = C.layers.Dense(input_dim, activation = C.sigmoid)(decode)
return decode
num_label_classes = 10
reader_train = create_reader(train_file, True, input_dim, num_label_classes)
reader_test = create_reader(test_file, False, input_dim, num_label_classes)
model, deep_ae_train_error, deep_ae_test_error = train_and_test(reader_train,
reader_test,
model_func = create_deep_model)
# Run the same image as the simple autoencoder through the deep encoder
orig_image = img_data[idx,:,:]
decoded_image = model.eval(orig_image)[0]*255
# Print image statistics
def print_image_stats(img, text):
print(text)
print("Max: {0:.2f}, Median: {1:.2f}, Mean: {2:.2f}, Min: {3:.2f}".format(np.max(img),
np.median(img),
np.mean(img),
np.min(img)))
# Print original image
print_image_stats(orig_image, "Original image statistics:")
# Print decoded image
print_image_stats(decoded_image, "Decoded image statistics:")
# Plot the original and the decoded image
img1 = orig_image.reshape(28,28)
text1 = 'Original image'
img2 = decoded_image.reshape(28,28)
text2 = 'Decoded image'
plot_image_pair(img1, text1, img2, text2)
# Read some data to run get the image data and the corresponding labels
num_label_classes = 10
reader_viz = create_reader(test_file, False, input_dim, num_label_classes)
image = C.input_variable(input_dim)
image_label = C.input_variable(num_label_classes)
viz_minibatch_size = 50
viz_input_map = {
image : reader_viz.streams.features,
image_label : reader_viz.streams.labels_viz
}
viz_data = reader_eval.next_minibatch(viz_minibatch_size,
input_map = viz_input_map)
img_data = viz_data[image].asarray()
imglabel_raw = viz_data[image_label].asarray()
# Map the image labels into indices in minibatch array
img_labels = [np.argmax(imglabel_raw[i,:,:]) for i in range(0, imglabel_raw.shape[0])]
from collections import defaultdict
label_dict=defaultdict(list)
for img_idx, img_label, in enumerate(img_labels):
label_dict[img_label].append(img_idx)
# Print indices corresponding to 3 digits
randIdx = [1, 3, 9]
for i in randIdx:
print("{0}: {1}".format(i, label_dict[i]))
from scipy import spatial
def image_pair_cosine_distance(img1, img2):
if img1.size != img2.size:
raise ValueError("Two images need to be of same dimension")
return 1 - spatial.distance.cosine(img1, img2)
# Let s compute the distance between two images of the same number
digit_of_interest = 6
digit_index_list = label_dict[digit_of_interest]
if len(digit_index_list) < 2:
print("Need at least two images to compare")
else:
imgA = img_data[digit_index_list[0],:,:][0]
imgB = img_data[digit_index_list[1],:,:][0]
# Print distance between original image
imgA_B_dist = image_pair_cosine_distance(imgA, imgB)
print("Distance between two original image: {0:.3f}".format(imgA_B_dist))
# Plot the two images
img1 = imgA.reshape(28,28)
text1 = 'Original image 1'
img2 = imgB.reshape(28,28)
text2 = 'Original image 2'
plot_image_pair(img1, text1, img2, text2)
# Decode the encoded stream
imgA_decoded = model.eval([imgA])[0]
imgB_decoded = model.eval([imgB]) [0]
imgA_B_decoded_dist = image_pair_cosine_distance(imgA_decoded, imgB_decoded)
# Print distance between original image
print("Distance between two decoded image: {0:.3f}".format(imgA_B_decoded_dist))
# Plot the two images
# Plot the original and the decoded image
img1 = imgA_decoded.reshape(28,28)
text1 = 'Decoded image 1'
img2 = imgB_decoded.reshape(28,28)
text2 = 'Decoded image 2'
plot_image_pair(img1, text1, img2, text2)
imgA = img_data[digit_index_list[0],:,:][0]
imgA_encoded = encoded_model.eval([imgA])
print("Length of the original image is {0:3d} and the encoded image is {1:3d}".format(len(imgA),
len(imgA_encoded[0])))
print("\nThe encoded image: ")
print(imgA_encoded[0])
digitA = 3
digitB = 8
digitA_index = label_dict[digitA]
digitB_index = label_dict[digitB]
imgA = img_data[digitA_index[0],:,:][0]
imgB = img_data[digitB_index[0],:,:][0]
# Print distance between original image
imgA_B_dist = image_pair_cosine_distance(imgA, imgB)
print("Distance between two original image: {0:.3f}".format(imgA_B_dist))
# Plot the two images
img1 = imgA.reshape(28,28)
text1 = 'Original image 1'
img2 = imgB.reshape(28,28)
text2 = 'Original image 2'
plot_image_pair(img1, text1, img2, text2)
# Decode the encoded stream
imgA_decoded = model.eval([imgA])[0]
imgB_decoded = model.eval([imgB])[0]
imgA_B_decoded_dist = image_pair_cosine_distance(imgA_decoded, imgB_decoded)
#Print distance between original image
print("Distance between two decoded image: {0:.3f}".format(imgA_B_decoded_dist))
# Plot the original and the decoded image
img1 = imgA_decoded.reshape(28,28)
text1 = 'Decoded image 1'
img2 = imgB_decoded.reshape(28,28)
text2 = 'Decoded image 2'
plot_image_pair(img1, text1, img2, text2)
# Simple autoencoder test error
print(simple_ae_test_error)
# Deep autoencoder test error
print(deep_ae_test_error)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 🔗 Connect Your Google Cloud Project
Step2: ☁ Configure Your Google Cloud Project
Step3: Enable the Cloud SQL Admin API within your project.
Step4: ☁️ Setting up Cloud SQL
Step5: 🎬 Create a Movies Database
Step6: 🥷 Create Batman Database User
Step7: <img src='https
Step8: 🐬 Connect to a MySQL Instance
Step9: ✅ Basic Usage
Step10: To use this connector with SQLAlchemy, we use the creator argument for sqlalchemy.create_engine
Step11: You have successfully been able to connect to a Cloud SQL instance from this notebook and make a query. YOU DID IT! 🕺 🎊 💃
Step12: 🪪 IAM Database Authentication
Step13: Now the current IAM user can be added to the Cloud SQL instance as an IAM database user.
Step14: Finally, let's update our getconn function to connect to our Cloud SQL instance with IAM database authentication enabled.
Step15: Sucess! You were able to connect to Cloud SQL as an IAM authenticated user using the Cloud SQL Python Connector! 🍾 👏 🏆
Step16: ✍ Appendix
|
<ASSISTANT_TASK:>
Python Code:
from google.colab import auth
auth.authenticate_user()
#@markdown Please fill in the value below with your GCP project ID and then run the cell.
# Please fill in these values.
project_id = "" #@param {type:"string"}
# Quick input validations.
assert project_id, "⚠️ Please provide a Google Cloud project ID"
# Configure gcloud.
!gcloud config set project {project_id}
# grant Cloud SQL Client role to authenticated user
current_user = !gcloud auth list --filter=status:ACTIVE --format="value(account)"
!gcloud projects add-iam-policy-binding {project_id} \
--member=user:{current_user[0]} \
--role="roles/cloudsql.client"
# enable Cloud SQL Admin API
!gcloud services enable sqladmin.googleapis.com
#@markdown Please fill in the both the Google Cloud region and name for your Cloud SQL instance. Once filled in, run the cell.
# Please fill in these values.
region = "us-central1" #@param {type:"string"}
instance_name = "" #@param {type:"string"}
# Quick input validations.
assert region, "⚠️ Please provide a Google Cloud region"
assert instance_name, "⚠️ Please provide the name of your instance"
# check if Cloud SQL instance exists in the provided region
database_version = !gcloud sql instances describe {instance_name} --format="value(databaseVersion)"
if database_version[0].startswith("MYSQL"):
print("Found existing MySQL Cloud SQL Instance!")
else:
print("Creating new Cloud SQL instance...")
password = input("Please provide a password to be used for database 'root' user: ")
!gcloud sql instances create {instance_name} --database-version=MYSQL_8_0 \
--region={region} --cpu=1 --memory=4GB --root-password={password} \
--database-flags=cloudsql_iam_authentication=On
!gcloud sql databases create movies --instance={instance_name}
!gcloud sql users create batman \
--instance={instance_name} \
--password="robin"
# install dependencies
import sys
!{sys.executable} -m pip install cloud-sql-python-connector["pymysql"] SQLAlchemy
# initialize parameters
INSTANCE_CONNECTION_NAME = f"{project_id}:{region}:{instance_name}" # i.e demo-project:us-central1:demo-instance
print(f"Your instance connection name is: {INSTANCE_CONNECTION_NAME}")
DB_USER = "batman"
DB_PASS = "robin"
DB_NAME = "movies"
from google.cloud.sql.connector import Connector
import sqlalchemy
# initialize Connector object
connector = Connector()
# function to return the database connection object
def getconn():
conn = connector.connect(
INSTANCE_CONNECTION_NAME,
"pymysql",
user=DB_USER,
password=DB_PASS,
db=DB_NAME
)
return conn
# create connection pool with 'creator' argument to our connection object function
pool = sqlalchemy.create_engine(
"mysql+pymysql://",
creator=getconn,
)
# connect to connection pool
with pool.connect() as db_conn:
# create ratings table in our movies database
db_conn.execute(
"CREATE TABLE IF NOT EXISTS ratings "
"( id SERIAL NOT NULL, title VARCHAR(255) NOT NULL, "
"genre VARCHAR(255) NOT NULL, rating FLOAT NOT NULL, "
"PRIMARY KEY (id));"
)
# insert data into our ratings table
insert_stmt = sqlalchemy.text(
"INSERT INTO ratings (title, genre, rating) VALUES (:title, :genre, :rating)",
)
# insert entries into table
db_conn.execute(insert_stmt, title="Batman Begins", genre="Action", rating=8.5)
db_conn.execute(insert_stmt, title="Star Wars: Return of the Jedi", genre="Action", rating=9.1)
db_conn.execute(insert_stmt, title="The Breakfast Club", genre="Drama", rating=8.3)
# query and fetch ratings table
results = db_conn.execute("SELECT * FROM ratings").fetchall()
# show results
for row in results:
print(row)
# cleanup connector object
connector.close()
# add Cloud SQL Instance User role to current logged in IAM user
!gcloud projects add-iam-policy-binding {project_id} \
--member=user:{current_user[0]} \
--role="roles/cloudsql.instanceUser"
# add current logged in IAM user to database
!gcloud sql users create {current_user[0]} \
--instance={instance_name} \
--type=cloud_iam_user
from google.cloud.sql.connector import Connector
import google.auth
from google.auth.transport.requests import Request
import sqlalchemy
# IAM database user parameter (IAM user's email before the "@" sign, mysql truncates usernames)
# ex. IAM user with email "demo-user@test.com" would have database user name "demo-user"
IAM_USER = current_user[0].split("@")[0]
# get application default credentials of IAM user (current logged in user)
credentials, project = google.auth.default()
# refresh credentials if expired
if not credentials.valid:
request = Request()
credentials.refresh(request)
# initialize connector
connector = Connector()
# getconn now using IAM user and OAuth 2 token with IAM Auth enabled
def getconn():
conn = connector.connect(
INSTANCE_CONNECTION_NAME,
"pymysql",
user=IAM_USER,
password=credentials.token,
db="", # log in to instance but don't connect to specific database
enable_iam_auth=True
)
return conn
# create connection pool
pool = sqlalchemy.create_engine(
"mysql+pymysql://",
creator=getconn,
)
# connect to connection pool
with pool.connect() as db_conn:
# get current datetime from database
results = db_conn.execute("SELECT NOW()").fetchone()
# output time
print("Current time: ", results[0])
# cleanup connector
connector.close()
# delete Cloud SQL instance
!gcloud sql instances delete {instance_name}
from google.cloud.sql.connector import Connector, IPTypes
import sqlalchemy
# initialize connector
connector = Connector()
# getconn now set to private IP
def getconn():
conn = connector.connect(
INSTANCE_CONNECTION_NAME, # <PROJECT-ID>:<REGION>:<INSTANCE-NAME>
"pymysql",
user=DB_USER,
password=DB_PASS,
db=DB_NAME,
ip_type=IPTypes.PRIVATE
)
return conn
# create connection pool
pool = sqlalchemy.create_engine(
"mysql+pymysql://",
creator=getconn,
)
# connect to connection pool
with pool.connect() as db_conn:
# query database and fetch results
results = db_conn.execute("SELECT * FROM ratings").fetchall()
# show results
for row in results:
print(row)
# cleanup connector
connector.close()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The print function
Step2: Variables
Step3: Other Value Types
Step4: Selecting / Slicing
Step5: To access a single value in a list use this syntax
Step6: To select multiple value from a list use this syntax
Step7: Notice
Step8: You can use negative indexing in selecting multiple values.
Step9: The third location in the index is the step. If the step is negative the the list is returned in descending order.
Step10: Working with Strings
Step11: Notice
Step12: Unicode
Step13: String Formatting
Step14: Other formatters could be used to format numbers
Step15: To find unicode symbols
Step16: Using format(*args, **kwargs) function
Step17: Mathematics
Step18: Notice
Step19: To raise a number to any power use down asterisk **. To represent $a^{n}$
Step20: To calculate the remainder (modulo operator) use %. To represent $a \mod b = r$
Step21: You can
Step22: Loops
Step23: range
Step24: Notice
Step25: Notice
Step26: While Loop
Step27: If .. Else
Step28: If you like Math
Step30: Functions
|
<ASSISTANT_TASK:>
Python Code:
1+2
1+1
1+2
print(1+2)
a = 4
b = 1.5
c = 121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212121212
d = 1j
e = 1/3
f = True
a+b
a*c
(b+d)*a
a+f
type(1.5)
my_name = "Roshan"
print(my_name)
my_list = [1,2,3,4,5]
my_list
my_list + [6]
my_list
my_list += [6,7,8]
my_list
my_list.append(9)
my_list
my_tuple = (1,2,3)
my_tuple
my_tuple + (4,5,6)
my_dict = {"name":"Roshan", "credit":100}
my_dict
my_dict["name"]
my_dict["level"] = 4
my_dict
my_dict.values()
my_dict.keys()
len(my_list)
my_list[0]
my_list[1:2]
my_list[:3]
my_list[3:]
my_list[-1]
my_list[-2]
my_list[-2:]
my_list[:-2]
my_list[3:-1]
my_list[::2]
my_list[3::2]
my_list[::-1]
my_name
my_name[0]
my_name[:2]
# Sorted by most spoken languages in order
divide_by_zero = {"zho":"你不能除以零",
"eng":"You cannot divide by zero",
"esp":"No se puede dividir por cero",
"hin":"आप शून्य से विभाजित नहीं किया जा सकता \u2248",
"arb":"لا يمكن القسمة على صفر"}
print(divide_by_zero["hin"])
type(divide_by_zero["hin"])
first_name = "Roshan"
last_name = "Rush"
formatted_name = "%s, %s." % (last_name, first_name[0])
print(formatted_name)
print("π ≈ %.2f" % 3.14159)
homeworks = 15.75
midterm = 22
final = 51
total = homeworks + midterm + final
print("Homeworks: %.2f\nMid-term: %.2f\nFinal: %.2f\nTotal: %.2f/100" % (homeworks, midterm, final, total))
url = "http://{language}.wikipedia.org/"
url = url.format(language="en")
url
1+1
4-5
14/5
14//5
2*5
2**3
10 % 3
import math
n=52
k=1
math.factorial(n) / (math.factorial(k) * math.factorial(n-k))
for counter in [1,2,3,4]:
print(counter)
for counter in range(5):
print(counter)
list(range(1,10)) == list(range(1,5)) + list(range(5,10))
for counter in range(1,5):
print(counter)
for counter in range(2,10,2):
print(counter)
counter =1
while counter < 5:
print(counter)
counter += 1
if math.pi == 3.2:
print("Edward J. Goodwin was right!")
else:
print("π is irrational")
if math.sqrt(2) == (10/7):
print("Edward J. Goodwin was right!")
elif math.sqrt(2) != (10/7):
print("Square root of 2 is irrational")
probability = 0.3
if probability >= 0.75:
print("Sure thing")
elif probability >= 0.5:
print("Maybe")
elif probability >= 0.25:
print("Unusual")
else:
print("No way")
def get_circumference(r):
return math.pi * r * 2
get_circumference(5)
def binomilal_coef(n,k):
This function returns the binominal coef
Parameters:
===========
n, k int
return n!/(k!*(n-k)!)
value = math.factorial(n)/(math.factorial(k)*math.factorial(n-k))
return value
binomilal_coef(52,2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Driving the Robot - Forwards, Backwards, Turns
Step2: In the code cell below, see if you can write a programme that drives the robot forwards at speed 0.5 for 3 seconds.
Step3: How would you get the robot to drive forwards for 2 seconds and then backwards for 3 seconds? Modify your programme in the cell above and try it out.
Step4: A Simple Square
Step5: Once you have have found some values that allow your robot to turn through ninety degrees or so, see if you can get your robot to draw out a square.
Step6: So how did you do?
|
<ASSISTANT_TASK:>
Python Code:
%run 'Set-up.ipynb'
%run 'Loading scenes.ipynb'
%run 'vrep_models/PioneerP3DX.ipynb'
%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX
# Use the time library to set a wait duration
import time
#Tell the robot to move forward by setting both motors to speed 1
robot.move_forward(1)
#Wait for two seconds
time.sleep(2)
#At the end of the programme the simulation stops
#The robot returns to its original location
%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX
# Use the time library to set a wait duration
import time
#YOUR CODE HERE
%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX
# Use the time library to set a wait duration
import time
#YOUR CODE HERE
#FOR EXAMPLE, TO DRIVE CLOCKWISE, USE: robot.rotate_right()
#DON'T FORGET TO USE time.wait(TIME_IN_SECONDS) to give the robot time to turn
%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX
import time
#try to get the robot to draw an L shape: forward, right angle turn, forward
%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX
import time
#Program to draw a square
%%vrepsim '../scenes/OU_Pioneer.ttt' PioneerP3DX
import time
#side 1
robot.move_forward()
time.sleep(1)
#turn 1
robot.rotate_left(1.8)
time.sleep(0.45)
#side 2
robot.move_forward()
time.sleep(1)
#turn 2
robot.rotate_left(1.8)
time.sleep(0.45)
#side 3
robot.move_forward()
time.sleep(1)
#turn 3
robot.rotate_left(1.8)
time.sleep(0.45)
#side 4
robot.move_forward()
time.sleep(1)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the data
Step2: Affine coupling layer
Step4: Real NVP
Step5: Model training
Step6: Performance evaluation
|
<ASSISTANT_TASK:>
Python Code:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import regularizers
from sklearn.datasets import make_moons
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
data = make_moons(3000, noise=0.05)[0].astype("float32")
norm = layers.Normalization()
norm.adapt(data)
normalized_data = norm(data)
# Creating a custom layer with keras API.
output_dim = 256
reg = 0.01
def Coupling(input_shape):
input = keras.layers.Input(shape=input_shape)
t_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
t_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_1)
t_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_2)
t_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(t_layer_3)
t_layer_5 = keras.layers.Dense(
input_shape, activation="linear", kernel_regularizer=regularizers.l2(reg)
)(t_layer_4)
s_layer_1 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(input)
s_layer_2 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_1)
s_layer_3 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_2)
s_layer_4 = keras.layers.Dense(
output_dim, activation="relu", kernel_regularizer=regularizers.l2(reg)
)(s_layer_3)
s_layer_5 = keras.layers.Dense(
input_shape, activation="tanh", kernel_regularizer=regularizers.l2(reg)
)(s_layer_4)
return keras.Model(inputs=input, outputs=[s_layer_5, t_layer_5])
class RealNVP(keras.Model):
def __init__(self, num_coupling_layers):
super(RealNVP, self).__init__()
self.num_coupling_layers = num_coupling_layers
# Distribution of the latent space.
self.distribution = tfp.distributions.MultivariateNormalDiag(
loc=[0.0, 0.0], scale_diag=[1.0, 1.0]
)
self.masks = np.array(
[[0, 1], [1, 0]] * (num_coupling_layers // 2), dtype="float32"
)
self.loss_tracker = keras.metrics.Mean(name="loss")
self.layers_list = [Coupling(2) for i in range(num_coupling_layers)]
@property
def metrics(self):
List of the model's metrics.
We make sure the loss tracker is listed as part of `model.metrics`
so that `fit()` and `evaluate()` are able to `reset()` the loss tracker
at the start of each epoch and at the start of an `evaluate()` call.
return [self.loss_tracker]
def call(self, x, training=True):
log_det_inv = 0
direction = 1
if training:
direction = -1
for i in range(self.num_coupling_layers)[::direction]:
x_masked = x * self.masks[i]
reversed_mask = 1 - self.masks[i]
s, t = self.layers_list[i](x_masked)
s *= reversed_mask
t *= reversed_mask
gate = (direction - 1) / 2
x = (
reversed_mask
* (x * tf.exp(direction * s) + direction * t * tf.exp(gate * s))
+ x_masked
)
log_det_inv += gate * tf.reduce_sum(s, [1])
return x, log_det_inv
# Log likelihood of the normal distribution plus the log determinant of the jacobian.
def log_loss(self, x):
y, logdet = self(x)
log_likelihood = self.distribution.log_prob(y) + logdet
return -tf.reduce_mean(log_likelihood)
def train_step(self, data):
with tf.GradientTape() as tape:
loss = self.log_loss(data)
g = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(g, self.trainable_variables))
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
def test_step(self, data):
loss = self.log_loss(data)
self.loss_tracker.update_state(loss)
return {"loss": self.loss_tracker.result()}
model = RealNVP(num_coupling_layers=6)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.0001))
history = model.fit(
normalized_data, batch_size=256, epochs=300, verbose=2, validation_split=0.2
)
plt.figure(figsize=(15, 10))
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.legend(["train", "validation"], loc="upper right")
plt.ylabel("loss")
plt.xlabel("epoch")
# From data to latent space.
z, _ = model(normalized_data)
# From latent space to data.
samples = model.distribution.sample(3000)
x, _ = model.predict(samples)
f, axes = plt.subplots(2, 2)
f.set_size_inches(20, 15)
axes[0, 0].scatter(normalized_data[:, 0], normalized_data[:, 1], color="r")
axes[0, 0].set(title="Inference data space X", xlabel="x", ylabel="y")
axes[0, 1].scatter(z[:, 0], z[:, 1], color="r")
axes[0, 1].set(title="Inference latent space Z", xlabel="x", ylabel="y")
axes[0, 1].set_xlim([-3.5, 4])
axes[0, 1].set_ylim([-4, 4])
axes[1, 0].scatter(samples[:, 0], samples[:, 1], color="g")
axes[1, 0].set(title="Generated latent space Z", xlabel="x", ylabel="y")
axes[1, 1].scatter(x[:, 0], x[:, 1], color="g")
axes[1, 1].set(title="Generated data space X", label="x", ylabel="y")
axes[1, 1].set_xlim([-2, 2])
axes[1, 1].set_ylim([-2, 2])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Tokenizing Social Media
Step2: N-grams
Step3: Stop-words
Step4: Sentiment
|
<ASSISTANT_TASK:>
Python Code:
from nltk.tokenize import TreebankWordTokenizer
sentence = "How does nltk tokenize this sentence?"
tokenizer = TreebankWordTokenizer()
tokenizer.tokenize(sentence)
from nltk.tokenize.casual import casual_tokenize
tweet = "OMG @twitterguy that was sooooooooo cool :D :D :D!!!!"
print(casual_tokenize(tweet))
casual_tokenize(tweet, reduce_len=True, strip_handles=True)
from nltk.util import ngrams
list(ngrams(sentence.split(), 2))
import nltk
nltk.download("stopwords")
stop_words = nltk.corpus.stopwords.words("english")
stop_words[:10]
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sia = SentimentIntensityAnalyzer()
negative_sentence = "This is the worst!!! I hate it so much :( :("
sia.polarity_scores(negative_sentence)
sia.polarity_scores(tweet)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Emcee has multithreadding support. Set this to the number of cores you would like to use. In this demo we will use the python multiprocessing module support built in to emcee. Emcee can also use MPI if you're working on a cluster and want to distribute the job across nodes. See the documentation for that.
Step2: Fitting a Line
Step3: Least-Squares Fit
Step4: Maximum likelihood
Step5: What about the Errors?
Step6: That took about 10 seconds on my desktop (3.4 GHz Core i7).
Step7: This acceptance rate is okay. If it is too low, the emcee documentation suggests increasing the number of walkers until the acceptance fraction is at the desired level.
Step8: It looks like the walkers have "burned in" by 50 steps, so keep only those samples after 50 steps.
Step9: What does this look like? Let's visualize with the traditional corner plot.
Step10: Now let's plot a bunch of sample fits from the MCMC chain, on top of our data and other models.
Step12: Astrophysical Example
Step13: MCMC
Step14: Because of the larger parameter space and more complex model, this will take longer to run.
Step15: Again, look at the distribution of parameter estimates. But here, show the estimated parameters from the maximum likelihood model as the "true" values.
Step16: The offsets between the MCMC median values and the maximum likelihood are at least partially a consequence of our chosen prior on the temperature.
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import emcee
import corner
nthreads = 2
# define our true relation
m_true = 1.7
b_true = 2.7
f_true = 0.3
# generate some data
N = 30
x = np.sort(10*np.random.rand(N))
yerr = 0.2+0.6*np.random.rand(N)
y = m_true*x+b_true
y += np.abs(f_true*y) * np.random.randn(N)
y += yerr * np.random.randn(N)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.errorbar(x, y,
yerr=yerr,
ls='',
marker='.',
color='gray',
label='Data')
ax.plot(x, m_true*x + b_true,
color='black',
ls='-',
label='True Relation')
ax.set_ylabel('y', fontsize='x-large')
ax.set_xlabel('x', fontsize='x-large')
ax.minorticks_on()
ax.legend(loc='best')
A = np.vstack((np.ones_like(x), x)).T
C = np.diag(yerr * yerr)
cov = np.linalg.inv(np.dot(A.T, np.linalg.solve(C, A)))
b_ls, m_ls = np.dot(cov, np.dot(A.T, np.linalg.solve(C, y)))
print('Least squares fitting result:')
print('slope: {0:1.2f}'.format(m_ls))
print('y-intercept: {0:1.2f}'.format(b_ls))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.errorbar(x, y,
yerr=yerr,
ls='',
marker='.',
color='gray',
label='Data')
ax.plot(x, m_true*x + b_true,
color='black',
ls='-',
label='True Relation')
ax.plot(x, m_ls * x + b_ls,
color='red',
ls=':',
label='Least Squares')
ax.set_ylabel('y', fontsize='x-large')
ax.set_xlabel('x', fontsize='x-large')
ax.minorticks_on()
ax.legend(loc='best')
import scipy.optimize as op
def lnlike(theta, x, y, yerr):
b, m, lnf = theta
model = m * x + b
inv_sigma2 = 1.0/(yerr**2 + model**2*np.exp(2*lnf))
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
# let's make some initial guesses for our parameters
# remember this is now theta and b_perp
p2 = [b_true, m_true, f_true]
nll = lambda *args: -lnlike(*args)
result = op.minimize(nll,
p2,
args=(x, y, yerr))
if not(result['success']):
print("Max likelihood failed.")
print(result['message'])
ml_b, ml_m, ml_f = result['x']
print("Maximum likelihood result:")
print("slope: {0:1.2f}".format(ml_m))
print("y-intercept: {0:1.2f}".format(ml_b))
print("ln(f): {0:1.2f}".format(ml_f))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.errorbar(x, y,
yerr=yerr,
ls='',
marker='.',
color='gray',
label='Data')
ax.plot(x, m_true*x + b_true,
color='black',
ls='-',
label='True Relation')
ax.plot(x, m_ls * x + b_ls,
color='red',
ls=':',
label='Least Squares')
ax.plot(x, ml_m * x + ml_b,
color='blue',
ls='--',
label='Max likelihood')
ax.set_ylabel('y', fontsize='x-large')
ax.set_xlabel('x', fontsize='x-large')
ax.minorticks_on()
ax.legend(loc='best')
def lnprior(theta):
b, m, lnf = theta
if lnf >= 0.0:
return -np.inf
return 0.0
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
# now let's set up the MCMC chains
ndim = 3
nwalkers = 500
steps = 500
# initialize the walkers to the vicinity of the parameters derived from
# ML
pos = [result["x"] + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
# initialze the sampler
sampler = emcee.EnsembleSampler(nwalkers,
ndim,
lnprob,
args=(x,
y,
yerr),
threads=nthreads)
# go! go! go!
# run the sampler for 500 steps
sampler.run_mcmc(pos, steps)
samples = sampler.chain
print("Mean acceptance rate is: {0:1.2f}".format(np.mean(sampler.acceptance_fraction)))
fig = plt.figure()
dim_name = [r'$b$', r'$m$', r'$\ln f$']
for dim in range(ndim):
ax = fig.add_subplot(ndim, 1, dim+1)
for i in range(nwalkers):
ax.plot(np.arange(steps),
samples[i, :, dim],
ls='-',
color='black',
alpha=10./nwalkers)
ax.set_ylabel(dim_name[dim], fontsize='large')
ax.set_xlabel('step', fontsize='large')
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
fig = corner.corner(samples,
labels=[r"$b$", r"$m$", r"$\ln\,f$"],
quantiles=[0.16, 0.5, 0.84],
truths=[b_true,
m_true,
np.log(f_true)],
show_titles=True)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.errorbar(x, y,
yerr=yerr,
ls='',
marker='.',
color='gray',
label='Data')
ax.plot(x, m_true*x + b_true,
color='black',
ls='-',
label='True Relation')
ax.plot(x, m_ls * x + b_ls,
color='red',
ls=':',
label='Least Squares')
ax.plot(x, ml_m * x + ml_b,
color='blue',
ls='--',
label='Max likelihood')
for b, m, lnf in samples[np.random.randint(len(samples), size=100)]:
ax.plot(x,
m * x + b,
color='green',
alpha=0.1)
ax.set_ylabel('y', fontsize='x-large')
ax.set_xlabel('x', fontsize='x-large')
ax.minorticks_on()
ax.legend(loc='best')
samples[:, 2] = np.exp(samples[:, 2])
b_mcmc, m_mcmc, f_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print("MCMC Parameter estimates:")
print("slope: {0:1.2f} (+{1:1.2f}, -{2:1.2f})".format(m_mcmc[0],
m_mcmc[1],
m_mcmc[2]))
print("y-intercept: {0:1.2f} (+{1:1.2f}, -{2:1.2f})".format(b_mcmc[0],
b_mcmc[1],
b_mcmc[2]))
print("\nTrue values:")
print("slope: {0:1.2f}".format(m_true))
print("y-intercept: {0:1.2f}".format(b_true))
from dust_emissivity.blackbody import modified_blackbody
import astropy.units as u
def fit_bb(x, *p):
simpler wrapper function to get the units right
I don't care about the absolute amplitude, so the 1e-9 factor is just
for numerical happiness.
return 1.e-9* p[1] * modified_blackbody((x*u.micron).to(u.Hz, equivalencies=u.spectral()),
p[0] * u.K,
beta=p[2],
kappa0=0.48*u.m**2/u.kg,
nu0=(250*u.micron).to('Hz', u.spectral())).to('Jy').value
FIRm = np.array([(70., 50., 2.6),
(100., 55., 2.3),
(160., 34., 1.6),
(250., 12., 0.8),
(350., 4.6, 0.3),
(500., 1.3, 0.1)],
dtype=[('wave', float),
('flux', float),
('dflux', float)])
plotrange = np.arange(FIRm['wave'][0],
FIRm['wave'][-1],
1)
def lnlike(theta, x, y, yerr):
T, amp, beta, lnf = theta
model = fit_bb(x, T, amp, beta)
inv_sigma2 = 1.0 / (yerr**2 + model**2*np.exp(2*lnf))
return -0.5 * np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2))
# initial guesses. 25K, arbitrary
p0 = [25, 1, 1.8, -1]
nll = lambda *args: -lnlike(*args)
maxlike = op.minimize(nll, p0,
args=(FIRm['wave'],
FIRm['flux'],
FIRm['dflux']))
Tfit, Ampfit, betafit, lnffit = maxlike["x"]
print("Max likelihood:")
print("T: {0:1.1f} K".format(Tfit))
print("amp: {0:1.1f}".format(Ampfit))
print("beta: {0:1.2f}".format(betafit))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.errorbar(FIRm['wave'],
FIRm['flux'],
yerr=FIRm['dflux'],
ls='',
marker='.',
color='black',
label='Herschel PACS+SPIRE')
ax.plot(plotrange,
fit_bb(plotrange,
Tfit,
Ampfit,
betafit),
color='red',
label='Max likelihood')
ax.set_ylabel(r'F$_{\nu}$')
ax.set_xlabel('$\lambda$ ($\mu m$)')
ax.set_xlim([60, 600])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(loc='best')
def lnprior(theta):
T, amp, lnf, beta = theta
if T >= 2.73 and amp > 0.:
return -1 * (T - 25)**2 / (2 * 2.5**2)
return -np.inf
def lnprob(theta, x, y, yerr):
lp = lnprior(theta)
if not(np.isfinite(lp)):
return -np.inf
return lp + lnlike(theta, x, y, yerr)
ndim, nwalkers = 4, 300
pos = [maxlike["x"] + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(FIRm['wave'],
FIRm['flux'],
FIRm['dflux']),
threads=nthreads)
sampler.run_mcmc(pos, 1000)
samples = sampler.chain[:, 100:, :].reshape((-1, ndim))
# show best-fit values as the "truth" values
fig = corner.corner(samples,
labels=["T", "Amp", r"$\beta$", r"$\ln\,f$"],
quantiles=[0.16, 0.5, 0.84],
show_titles=True,
truths=[Tfit, Ampfit, betafit, lnffit])
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.errorbar(FIRm['wave'],
FIRm['flux'],
yerr=FIRm['dflux'],
ls='',
marker='.',
color='black',
label='Herschel PACS+SPIRE')
ax.plot(plotrange,
fit_bb(plotrange,
Tfit,
Ampfit,
betafit),
color='red',
label='Max likelihood')
for T, A, b, lnf in samples[np.random.randint(len(samples), size=100)]:
ax.plot(plotrange,
fit_bb(plotrange, T, A, b),
color='green',
alpha=0.05)
ax.set_ylabel(r'F$_{\nu}$')
ax.set_xlabel('$\lambda$ ($\mu m$)')
ax.set_xlim([60, 600])
ax.set_yscale('log')
ax.set_xscale('log')
ax.legend(loc='best')
samples[:, 3] = np.exp(samples[:, 3])
T_mcmc, A_mcmc, beta_mcmc, f_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print("MCMC Parameter estimates:")
print("T: {0:1.2f} (+{1:1.2f}, -{2:1.2f}) K".format(T_mcmc[0],
T_mcmc[1],
T_mcmc[2]))
print("beta: {0:1.2f} (+{1:1.2f}, -{2:1.2f})".format(beta_mcmc[0],
beta_mcmc[1],
beta_mcmc[2]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'pcmdi', 'sandbox-2', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run training
Step2: Run evaluation
Step3: See also ks_spectral.nc and kdv_spectral.nc in the same directory for reference simulations with KS and KdV equations.
Step4: Very simple evaluation
Step5: An example solution from our reference model, at high resolution
Step6: Coarse-grained simulation with our neural network
Step7: Difference between the neural network results and coarse-grained reference results
|
<ASSISTANT_TASK:>
Python Code:
! pip install -q -U xarray matplotlib
! rm -rf data-driven-discretization-1d
! git clone https://github.com/google/data-driven-discretization-1d.git
! pip install -q -e data-driven-discretization-1d
# install the seaborn bug-fix from https://github.com/mwaskom/seaborn/pull/1602
! pip install -U -q git+git://github.com/stfnrpplngr/seaborn.git@309a9de383fac4db1c66dbf87815c4ba0c439c59
# Ensure we're using Tensorflow 1.x in Colab. If not using Colab, remove this magic.
%tensorflow_version 1.x
import tensorflow as tf
assert tf.__version__[:2] == '1.'
! gsutil cp gs://data-driven-discretization-public/training-data/burgers.h5 .
%%time
! python data-driven-discretization-1d/pde_superresolution/scripts/run_training.py \
--checkpoint_dir burgers-checkpoints \
--equation burgers \
--hparams resample_factor=16,learning_stops=[5000,10000] \
--input_path burgers.h5
# Use pre-computed "exact" solution from WENO.
# You could also run this yourself using scripts/create_exact_data.py
! gsutil cp gs://data-driven-discretization-public/time-evolution/exact/burgers_weno.nc .
import xarray
# remove extra samples, so evaluation runs faster
reference = xarray.open_dataset('burgers_weno.nc').isel(sample=slice(10)).load()
reference.to_netcdf('burgers_weno_10samples.nc')
%%time
! python data-driven-discretization-1d/pde_superresolution/scripts/run_evaluation.py \
--checkpoint_dir burgers-checkpoints \
--exact_solution_path burgers_weno_10samples.nc \
--equation_name burgers \
--stop_times "[10]" \
--num_samples 10 \
--warmup 10 \
--time_delta 0.1 \
--time_max 50 \
--logtostderr
! gsutil cp gs://data-driven-discretization-public/time-evolution/model/burgers_16x_samples.nc .
import xarray
results = xarray.open_dataset('burgers_16x_samples.nc').load()
results
reference
reference.y[0].sel(time=slice(10, 60)).plot.imshow()
results.y[0].plot.imshow()
(results.y.sel(sample=0)
- reference.y.sel(sample=0, time=slice(10, 60)).coarsen(x=16).mean()
.assign_coords(x=results.x)).plot.imshow()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
<ASSISTANT_TASK:>
Python Code::
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import make_column_transformer
ohe = OneHotEncoder()
df = pd.read_csv('onehotend_data.csv')
ohe.fit(df[['town']])
ct = make_column_transformer((OneHotEncoder(categories = ohe.categories_), ['town']), remainder = 'passthrough')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <a id='sample_matrices'></a>
Step3: <a id='sample_node_metrics'></a>
Step4: <a id='components'></a>
Step5: <a id='gc_parameters'></a>
Step6: <a id='edge_metrics'></a>
Step7: <a id='external_node'></a>
Step8: <a id='networkx_graph'></a>
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
from scipy import stats
from IPython.display import Image #this is for displaying the widgets in the github repo
from shaolin.dashboards.graph import GraphCalculator
forex_data = pd.read_hdf('gcalculator_data/forex_sample.h5')
forex_data.items,forex_data.minor_axis,forex_data.major_axis
fund = forex_data['fund']
fund.head()
matrices = {}
matrices['corr'] = fund.corr()
matrices['cov'] = fund.cov()
matrices['exchange'] = fund.corr()#alpha version hack, needs to be a matrix called exchange. you dont
#need to use it
matrix_panel = pd.Panel(matrices)
matrix_panel
def calculate_pdf(x,n=1000):
Fits a Gaussian Kernel, resamples n values and returns (X,p(x=X))
try:
kernel = stats.gaussian_kde(x,bw_method='scott')
X= kernel.resample(n)
p = kernel(X)
except:
X= x
p = np.ones(len(x))*1.0/len(x)
return X.flatten(),p.flatten()
def shannon_entropy(x,n=1000):
X,p = calculate_pdf(x,n)
ent = p*np.log2(p)
return -ent.sum()/(10*n)# this way we dont get values too big.
#After all, entropy is a purely arbitrary measure ;)
def mean_rets(x):
return x.pct_change().mean()*10e4
def std_rets(x):
return x.pct_change().mean()*10e4
def total_rets(x):
return x.pct_change().sum()
node_metrics = pd.DataFrame(columns=fund.columns)
funcs = [total_rets, np.mean, np.std, shannon_entropy,mean_rets,std_rets]
for fun in funcs:
node_metrics.ix[fun.__name__] = fund.apply(fun)
node_metrics
gc = GraphCalculator(node_metrics=node_metrics,matrix_panel=matrix_panel)
gc.widget
Image(filename='gcalculator_data/img_1.png')
gc.node = gc.node.dropna(axis=1)
gc.node.describe()
gc.node.head()
gc.edge
gc.edge.items
gc.edge['cov']
gc.edge['edge_betweenness']
gc.node_metrics
gc.G.node
gc.G.edge
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Print the version of pandas that has been imported.
Step2: 3. Print out all the version information of the libraries that are required by the pandas library.
Step3: DataFrame basics
Step4: 5. Display a summary of the basic information about this DataFrame and its data.
Step5: 6. Return the first 3 rows of the DataFrame df.
Step6: 7. Select just the 'animal' and 'age' columns from the DataFrame df.
Step7: 8. Select the data in rows [3, 4, 8] and in columns ['animal', 'age'].
Step8: 9. Select only the rows where the number of visits is greater than 3.
Step9: 10. Select the rows where the age is missing, i.e. is NaN.
Step10: 11. Select the rows where the animal is a cat and the age is less than 3.
Step11: 12. Select the rows the age is between 2 and 4 (inclusive).
Step12: 13. Change the age in row 'f' to 1.5.
Step13: 14. Calculate the sum of all visits (the total number of visits).
Step14: 15. Calculate the mean age for each different animal in df.
Step15: 16. Append a new row 'k' to df with your choice of values for each column. Then delete that row to return the original DataFrame.
Step16: 17. Count the number of each type of animal in df.
Step17: 18. Sort df first by the values in the 'age' in decending order, then by the value in the 'visit' column in ascending order.
Step18: 19. The 'priority' column contains the values 'yes' and 'no'. Replace this column with a column of boolean values
Step19: 20. In the 'animal' column, change the 'snake' entries to 'python'.
Step20: 21. For each animal type and each number of visits, find the mean age. In other words, each row is an animal, each column is a number of visits and the values are the mean ages (hint
Step21: DataFrames
Step22: 23. Given a DataFrame of numeric values, say
Step23: 24. Suppose you have DataFrame with 10 columns of real numbers, for example
Step24: 25. How do you count how many unique rows a DataFrame has (i.e. ignore all rows that are duplicates)?
Step25: The next three puzzles are slightly harder...
Step26: 27. A DataFrame has a column of groups 'grps' and and column of numbers 'vals'. For example
Step27: 28. A DataFrame has two integer columns 'A' and 'B'. The values in 'A' are between 1 and 100 (inclusive). For each group of 10 consecutive integers in 'A' (i.e. (0, 10], (10, 20], ...), calculate the sum of the corresponding values in column 'B'.
Step28: DataFrames
Step29: Here's an alternative approach based on a cookbook recipe
Step30: And another approach using a groupby
Step31: 30. Consider a DataFrame containing rows and columns of purely numerical data. Create a list of the row-column index locations of the 3 largest values.
Step32: 31. Given a DataFrame with a column of group IDs, 'grps', and a column of corresponding integer values, 'vals', replace any negative values in 'vals' with the group mean.
Step33: 32. Implement a rolling mean over groups with window size 3, which ignores NaN value. For example consider the following DataFrame
Step34: Series and DatetimeIndex
Step35: 34. Find the sum of the values in s for every Wednesday.
Step36: 35. For each calendar month in s, find the mean of values.
Step37: 36. For each group of four consecutive calendar months in s, find the date on which the highest value occurred.
Step38: 37. Create a DateTimeIndex consisting of the third Thursday in each month for the years 2015 and 2016.
Step39: Cleaning Data
Step40: 39. The From_To column would be better as two separate columns! Split each string on the underscore delimiter _ to give a new temporary DataFrame with the correct values. Assign the correct column names to this temporary DataFrame.
Step41: 40. Notice how the capitalisation of the city names is all mixed up in this temporary DataFrame. Standardise the strings so that only the first letter is uppercase (e.g. "londON" should become "London".)
Step42: 41. Delete the From_To column from df and attach the temporary DataFrame from the previous questions.
Step43: 42. In the Airline column, you can see some extra puctuation and symbols have appeared around the airline names. Pull out just the airline name. E.g. '(British Airways. )' should become 'British Airways'.
Step44: 43. In the RecentDelays column, the values have been entered into the DataFrame as a list. We would like each first value in its own column, each second value in its own column, and so on. If there isn't an Nth value, the value should be NaN.
Step45: The DataFrame should look much better now.
Step46: 45. Check the index of s is lexicographically sorted (this is a necessary proprty for indexing to work correctly with a MultiIndex).
Step47: 46. Select the labels 1, 3 and 6 from the second level of the MultiIndexed Series.
Step48: 47. Slice the Series s; slice up to label 'B' for the first level and from label 5 onwards for the second level.
Step49: 48. Sum the values in s for each label in the first level (you should have Series giving you a total for labels A, B and C).
Step50: 49. Suppose that sum() (and other methods) did not accept a level keyword argument. How else could you perform the equivalent of s.sum(level=1)?
Step51: 50. Exchange the levels of the MultiIndex so we have an index of the form (letters, numbers). Is this new Series properly lexsorted? If not, sort it.
Step52: Minesweeper
Step53: 52. For this DataFrame df, create a new column of zeros (safe) and ones (mine). The probability of a mine occuring at each location should be 0.4.
Step54: 53. Now create a new column for this DataFrame called 'adjacent'. This column should contain the number of mines found on adjacent squares in the grid.
Step55: 54. For rows of the DataFrame that contain a mine, set the value in the 'adjacent' column to NaN.
Step56: 55. Finally, convert the DataFrame to grid of the adjacent mine counts
Step57: Plotting
Step58: 57. Columns in your DataFrame can also be used to modify colors and sizes. Bill has been keeping track of his performance at work over time, as well as how good he was feeling that day, and whether he had a cup of coffee in the morning. Make a plot which incorporates all four features of this DataFrame.
Step59: 58. What if we want to plot multiple things? Pandas allows you to pass in a matplotlib Axis object for plots, and plots will also return an Axis object.
Step60: Now we're finally ready to create a candlestick chart, which is a very common tool used to analyze stock price data. A candlestick chart shows the opening, closing, highest, and lowest price for a stock during a time window. The color of the "candle" (the thick part of the bar) is green if the stock closed above its opening price, or red if below.
Step61: 59. Generate a day's worth of random stock data, and aggregate / reformat it so that it has hourly summaries of the opening, highest, lowest, and closing prices
Step62: 60. Now that you have your properly-formatted data, try to plot it yourself as a candlestick chart. Use the plot_candlestick(df) function above, or matplotlib's plot documentation if you get stuck.
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.__version__
pd.show_versions()
df = pd.DataFrame(data, index=labels)
df.info()
# ...or...
df.describe()
df.iloc[:3]
# or equivalently
df.head(3)
df.loc[:, ['animal', 'age']]
# or
df[['animal', 'age']]
df.loc[df.index[[3, 4, 8]], ['animal', 'age']]
df[df['visits'] > 3]
df[df['age'].isnull()]
df[(df['animal'] == 'cat') & (df['age'] < 3)]
df[df['age'].between(2, 4)]
df.loc['f', 'age'] = 1.5
df['visits'].sum()
df.groupby('animal')['age'].mean()
df.loc['k'] = [5.5, 'dog', 'no', 2]
# and then deleting the new row...
df = df.drop('k')
df['animal'].value_counts()
df.sort_values(by=['age', 'visits'], ascending=[False, True])
df['priority'] = df['priority'].map({'yes': True, 'no': False})
df['animal'] = df['animal'].replace('snake', 'python')
df.pivot_table(index='animal', columns='visits', values='age', aggfunc='mean')
df.loc[df['A'].shift() != df['A']]
df.sub(df.mean(axis=1), axis=0)
df.sum().idxmin()
len(df) - df.duplicated(keep=False).sum()
# or perhaps more simply...
len(df.drop_duplicates(keep=False))
(df.isnull().cumsum(axis=1) == 3).idxmax(axis=1)
df.groupby('grp')['vals'].nlargest(3).sum(level=0)
df.groupby(pd.cut(df['A'], np.arange(0, 101, 10)))['B'].sum()
izero = np.r_[-1, (df['X'] == 0).nonzero()[0]] # indices of zeros
idx = np.arange(len(df))
df['Y'] = idx - izero[np.searchsorted(izero - 1, idx) - 1]
# http://stackoverflow.com/questions/30730981/how-to-count-distance-to-the-previous-zero-in-pandas-series/
# credit: Behzad Nouri
x = (df['X'] != 0).cumsum()
y = x != x.shift()
df['Y'] = y.groupby((y != y.shift()).cumsum()).cumsum()
df['Y'] = df.groupby((df['X'] == 0).cumsum()).cumcount()
# We're off by one before we reach the first zero.
first_zero_idx = (df['X'] == 0).idxmax()
df['Y'].iloc[0:first_zero_idx] += 1
df.unstack().sort_values()[-3:].index.tolist()
# http://stackoverflow.com/questions/14941261/index-and-column-for-the-max-value-in-pandas-dataframe/
# credit: DSM
def replace(group):
mask = group<0
group[mask] = group[~mask].mean()
return group
df.groupby(['grps'])['vals'].transform(replace)
# http://stackoverflow.com/questions/14760757/replacing-values-with-groupby-means/
# credit: unutbu
g1 = df.groupby(['group'])['value'] # group values
g2 = df.fillna(0).groupby(['group'])['value'] # fillna, then group values
s = g2.rolling(3, min_periods=1).sum() / g1.rolling(3, min_periods=1).count() # compute means
s.reset_index(level=0, drop=True).sort_index() # drop/sort index
# http://stackoverflow.com/questions/36988123/pandas-groupby-and-rolling-apply-ignoring-nans/
dti = pd.date_range(start='2015-01-01', end='2015-12-31', freq='B')
s = pd.Series(np.random.rand(len(dti)), index=dti)
s[s.index.weekday == 2].sum()
s.resample('M').mean()
s.groupby(pd.TimeGrouper('4M')).idxmax()
pd.date_range('2015-01-01', '2016-12-31', freq='WOM-3THU')
df['FlightNumber'] = df['FlightNumber'].interpolate().astype(int)
temp = df.From_To.str.split('_', expand=True)
temp.columns = ['From', 'To']
temp['From'] = temp['From'].str.capitalize()
temp['To'] = temp['To'].str.capitalize()
df = df.drop('From_To', axis=1)
df = df.join(temp)
df['Airline'] = df['Airline'].str.extract('([a-zA-Z\s]+)', expand=False).str.strip()
# note: using .strip() gets rid of any leading/trailing spaces
# there are several ways to do this, but the following approach is possibly the simplest
delays = df['RecentDelays'].apply(pd.Series)
delays.columns = ['delay_{}'.format(n) for n in range(1, len(delays.columns)+1)]
df = df.drop('RecentDelays', axis=1).join(delays)
letters = ['A', 'B', 'C']
numbers = list(range(10))
mi = pd.MultiIndex.from_product([letters, numbers])
s = pd.Series(np.random.rand(30), index=mi)
s.index.is_lexsorted()
# or more verbosely...
s.index.lexsort_depth == s.index.nlevels
s.loc[:, [1, 3, 6]]
s.loc[pd.IndexSlice[:'B', 5:]]
# or equivalently without IndexSlice...
s.loc[slice(None, 'B'), slice(5, None)]
s.sum(level=0)
# One way is to use .unstack()...
# This method should convince you that s is essentially
# just a regular DataFrame in disguise!
s.unstack().sum(axis=0)
new_s = s.swaplevel(0, 1)
# check
new_s.index.is_lexsorted()
# sort
new_s = new_s.sort_index()
p = pd.tools.util.cartesian_product([np.arange(X), np.arange(Y)])
df = pd.DataFrame(np.asarray(p).T, columns=['x', 'y'])
# One way is to draw samples from a binomial distribution.
df['mine'] = np.random.binomial(1, 0.4, X*Y)
# Here is one way to solve using merges.
# It's not necessary the optimal way, just
# the solution I thought of first...
df['adjacent'] = \
df.merge(df + [ 1, 1, 0], on=['x', 'y'], how='left')\
.merge(df + [ 1, -1, 0], on=['x', 'y'], how='left')\
.merge(df + [-1, 1, 0], on=['x', 'y'], how='left')\
.merge(df + [-1, -1, 0], on=['x', 'y'], how='left')\
.merge(df + [ 1, 0, 0], on=['x', 'y'], how='left')\
.merge(df + [-1, 0, 0], on=['x', 'y'], how='left')\
.merge(df + [ 0, 1, 0], on=['x', 'y'], how='left')\
.merge(df + [ 0, -1, 0], on=['x', 'y'], how='left')\
.iloc[:, 3:]\
.sum(axis=1)
# An alternative solution is to pivot the DataFrame
# to form the "actual" grid of mines and use convolution.
# See https://github.com/jakevdp/matplotlib_pydata2013/blob/master/examples/minesweeper.py
from scipy.signal import convolve2d
mine_grid = df.pivot_table(columns='x', index='y', values='mine')
counts = convolve2d(mine_grid.astype(complex), np.ones((3, 3)), mode='same').real.astype(int)
df['adjacent'] = (counts - mine_grid).ravel('F')
df.loc[df['mine'] == 1, 'adjacent'] = np.nan
df.drop('mine', axis=1)\
.set_index(['y', 'x']).unstack()
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
df = pd.DataFrame({"xs":[1,5,2,8,1], "ys":[4,2,1,9,6]})
df.plot.scatter("xs", "ys", color = "black", marker = "x")
df = pd.DataFrame({"productivity":[5,2,3,1,4,5,6,7,8,3,4,8,9],
"hours_in" :[1,9,6,5,3,9,2,9,1,7,4,2,2],
"happiness" :[2,1,3,2,3,1,2,3,1,2,2,1,3],
"caffienated" :[0,0,1,1,0,0,0,0,1,1,0,1,0]})
df.plot.scatter("hours_in", "productivity", s = df.happiness * 30, c = df.caffienated)
df = pd.DataFrame({"revenue":[57,68,63,71,72,90,80,62,59,51,47,52],
"advertising":[2.1,1.9,2.7,3.0,3.6,3.2,2.7,2.4,1.8,1.6,1.3,1.9],
"month":range(12)
})
ax = df.plot.bar("month", "revenue", color = "green")
df.plot.line("month", "advertising", secondary_y = True, ax = ax)
ax.set_xlim((-1,12))
#This function is designed to create semi-interesting random stock price data
import numpy as np
def float_to_time(x):
return str(int(x)) + ":" + str(int(x%1 * 60)).zfill(2) + ":" + str(int(x*60 % 1 * 60)).zfill(2)
def day_stock_data():
#NYSE is open from 9:30 to 4:00
time = 9.5
price = 100
results = [(float_to_time(time), price)]
while time < 16:
elapsed = np.random.exponential(.001)
time += elapsed
if time > 16:
break
price_diff = np.random.uniform(.999, 1.001)
price *= price_diff
results.append((float_to_time(time), price))
df = pd.DataFrame(results, columns = ['time','price'])
df.time = pd.to_datetime(df.time)
return df
def plot_candlestick(agg):
fig, ax = plt.subplots()
for time in agg.index:
ax.plot([time.hour] * 2, agg.loc[time, ["high","low"]].values, color = "black")
ax.plot([time.hour] * 2, agg.loc[time, ["open","close"]].values, color = agg.loc[time, "color"], linewidth = 10)
ax.set_xlim((8,16))
ax.set_ylabel("Price")
ax.set_xlabel("Hour")
ax.set_title("OHLC of Stock Value During Trading Day")
plt.show()
df = day_stock_data()
df.head()
df.set_index("time", inplace = True)
agg = df.resample("H").ohlc()
agg.columns = agg.columns.droplevel()
agg["color"] = (agg.close > agg.open).map({True:"green",False:"red"})
agg.head()
plot_candlestick(agg)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Creating features
Step2: Contain any of the following
Step3: Lenght of the url
Step4: Create Model
Step5: Save model
Step6: Part 2
Step7: Part 3
Step8: Create api
Step9: Load model and create function that predicts an URL
Step10: Run API
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import zipfile
with zipfile.ZipFile('../datasets/phishing.csv.zip', 'r') as z:
f = z.open('phishing.csv')
data = pd.read_csv(f, index_col=False)
data.head()
data.phishing.value_counts()
data.url[data.phishing==1].sample(50, random_state=1).tolist()
keywords = ['https', 'login', '.php', '.html', '@', 'sign']
for keyword in keywords:
data['keyword_' + keyword] = data.url.str.contains(keyword).astype(int)
data['lenght'] = data.url.str.len() - 2
domain = data.url.str.split('/', expand=True).iloc[:, 2]
data['lenght_domain'] = domain.str.len()
domain.head(12)
data['isIP'] = (domain.str.replace('.', '') * 1).str.isnumeric().astype(int)
data['count_com'] = data.url.str.count('com')
data.sample(15, random_state=4)
X = data.drop(['url', 'phishing'], axis=1)
y = data.phishing
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
clf = RandomForestClassifier(n_jobs=-1, n_estimators=100)
cross_val_score(clf, X, y, cv=10)
clf.fit(X, y)
from sklearn.externals import joblib
joblib.dump(clf, '22_clf_rf.pkl', compress=3)
from m22_model_deployment import predict_proba
predict_proba('http://www.vipturismolondres.com/com.br/?atendimento=Cliente&/LgSgkszm64/B8aNzHa8Aj.php')
from flask import Flask
from flask.ext.restplus import Api
from flask.ext.restplus import fields
from sklearn.externals import joblib
from flask.ext.restplus import Resource
from sklearn.externals import joblib
import pandas as pd
app = Flask(__name__)
api = Api(
app,
version='1.0',
title='Phishing Prediction API',
description='Phishing Prediction API')
ns = api.namespace('predict',
description='Phishing Classifier')
parser = api.parser()
parser.add_argument(
'URL',
type=str,
required=True,
help='URL to be analyzed',
location='args')
resource_fields = api.model('Resource', {
'result': fields.String,
})
clf = joblib.load('22_clf_rf.pkl')
@ns.route('/')
class PhishingApi(Resource):
@api.doc(parser=parser)
@api.marshal_with(resource_fields)
def get(self):
args = parser.parse_args()
result = self.predict_proba(args)
return result, 200
def predict_proba(self, args):
url = args['URL']
url_ = pd.DataFrame([url], columns=['url'])
# Create features
keywords = ['https', 'login', '.php', '.html', '@', 'sign']
for keyword in keywords:
url_['keyword_' + keyword] = url_.url.str.contains(keyword).astype(int)
url_['lenght'] = url_.url.str.len() - 2
domain = url_.url.str.split('/', expand=True).iloc[:, 2]
url_['lenght_domain'] = domain.str.len()
url_['isIP'] = (url_.url.str.replace('.', '') * 1).str.isnumeric().astype(int)
url_['count_com'] = url_.url.str.count('com')
# Make prediction
p1 = clf.predict_proba(url_.drop('url', axis=1))[0,1]
print('url=', url,'| p1=', p1)
return {
"result": p1
}
app.run(debug=True, use_reloader=False, host='0.0.0.0', port=5000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Querying the MP for garnets
Step2: Structure matching
Step3: Sorting out experimental data
Step4: Other garnet structures
Step5: Storing in SMACT compatible database
|
<ASSISTANT_TASK:>
Python Code:
from smact.structure_prediction import prediction, database, mutation, probability_models, structure, utilities
import json
import itertools
from itertools import zip_longest
import smact
# An optional utility to display a progress bar
# for long-running loops. `pip install tqdm`.
from tqdm import tqdm
from ipywidgets import widgets
from pymatgen.ext.matproj import MPRester
from pprint import pprint
from pymatgen.analysis import structure_matcher
from pymatgen.util.plotting import pretty_plot
import pandas as pd
#Use the API Key
m = MPRester()
#We use a csv file downloaded from the web version of MP with the MP ids
mp_df=pd.read_csv("_Materials Project .csv")
#Extracts a list of mp-ids
mp_ids=mp_df["Materials Id"].to_list()
print(f"We have {len(mp_ids)} entries with formula A3B2C3O12")
mp_df.head(10)
#Query materials project
data=m.query(criteria={"task_id": {"$in": mp_ids}}, properties=["pretty_formula","material_id","spacegroup.symbol","icsd_ids","e_above_hull","exp","structure","cif"])
#get structure of Ca3Fe2(SiO4)3
SM=structure_matcher.StructureMatcher(attempt_supercell=True)
known_garnet=m.query("mp-6672", properties=["pretty_formula","material_id","spacegroup.symbol","icsd_ids","e_above_hull","exp","structure","cif"])
known_garnet_structure=known_garnet[0]["structure"]
#Iterate over query data and verify if they have the correct structure
fitted_data=[]
for i in data:
if SM.fit_anonymous(i['structure'], known_garnet_structure):
fitted_data.append(i)
print(len(fitted_data))
experimental_list=[]
theoretical_list=[]
for i in fitted_data:
if len(i["icsd_ids"])!=0:
experimental_list.append(i)
else:
theoretical_list.append(i)
print(len(experimental_list))
print(len(theoretical_list))
#Query materials project for X3Y5O12
data_2=m.query("*3*5O12", properties=["pretty_formula","material_id","spacegroup.symbol","icsd_ids","e_above_hull","exp","structure","cif"])
print(f"There are {len(data_2)} materials with formula X3Y2Y3O12 in the MP")
print("")
#get structure of YAG
YAG=m.query("mp-3050", properties=["pretty_formula","material_id","spacegroup.symbol","icsd_ids","e_above_hull","exp","structure","cif"])
YAG_structure=YAG[0]["structure"]
#Iterate over query data and verify if they have the correct structure
fitted_data_2=[]
for i in data_2:
if SM.fit_anonymous(i['structure'], YAG_structure):
fitted_data_2.append(i)
print(f"Of the {len(data_2)} materials, {len(fitted_data_2)} match the structure of YAG")
print("")
#Find the number of experimental and theoretical materials
experimental_list_2=[]
theoretical_list_2=[]
for i in fitted_data_2:
if len(i["icsd_ids"])!=0:
experimental_list_2.append(i)
else:
theoretical_list_2.append(i)
print(f"There are {len(experimental_list_2)} experimental garnet structures with formula X3Y5O12")
print(f"There are {len(theoretical_list_2)} theoretical structures with formula X3Y5O12")
print(f"Considering formulas A3B2C3O12 and X3Y5O12, suggest there are {len(fitted_data)+len(fitted_data_2)} materials with the garnet structure in the Materials Project")
print("")
print(f"Of these {len(fitted_data)+len(fitted_data_2)} materials, {len(experimental_list)+len(experimental_list_2)} are experimental ")
print("")
print(f"Of these {len(fitted_data)+len(fitted_data_2)} materials, {len(theoretical_list)+len(theoretical_list_2)} are theoretical ")
new_fitted_data=fitted_data+fitted_data_2
new_theoretical_list=theoretical_list+theoretical_list_2
new_experimental_list=experimental_list+experimental_list_2
df_new=pd.DataFrame(new_fitted_data)
df_new.to_csv("MP_garnets.csv", index=False)
print(df_new.shape)
df_new.head()
theoretical=[]
for i in df_new["icsd_ids"]:
if len(i)!=0:
theoretical.append("No")
else:
theoretical.append("Yes")
df_new["theoretical?"]=theoretical
#This creates the database object
DB=database.StructureDB("Garnets.db")
#These create tables within the database
DB.add_table("Garnets")
DB.add_table("Experimental")
DB.add_table("Theoretical")
#Create an iterable of the query data
structs=[]
for i in new_fitted_data:
structs.append(database.parse_mprest(i))
exp_structs=[]
for i in new_experimental_list:
exp_structs.append(database.parse_mprest(i))
theo_structs=[]
for i in new_theoretical_list:
theo_structs.append(database.parse_mprest(i))
#Uncomment the line below the first time you run this notebook
DB.add_structs(structs, "Garnets")
DB.add_structs(exp_structs, "Experimental")
DB.add_structs(theo_structs, "Theoretical")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We'll make a couple helper functions for "pretty printing" our responses
Step2: Now let's use the Requests library to get the Subscription. We'll get the Subscription using its id, which is a Universally Unique Identifier (UUID) string.
Step3: Let's take a look at the Subscription we just requested
Step4: The above output should look familiar from the previous tutorial. We see the title, AOI, TOI, and other metadata about the Subscription.
Step5: Subscription Results Items
Step6: We'll use the url to make our request
Step7: Nice! Now we've got our first set of Results data. The Results for a Subscription come to us as a GeoJSON FeatureCollection.
Step8: The request we made to get these items is actually a paged list of results.
Step9: The "rel"
Step10: We can use our helper function to get the first "next" link
Step11: Now let's iterate through our data and follow each "next" link to get all the data associated with our Subscription's Results Collection.
Step12: We can also increase the number of results returned on each page (default 250, max 10000)
Step13: 3. Working with Planet Analytics Results
Step14: When we created the DataFrame with the .from_features() method, GeoPandas automatically converted our GeoJSON geometries to Shapely shapes.
Step15: Most of these properties should be self-explanatory, and different Feed types may include different properties.
Step16: Let's visualize our Results. For starters, we can create a simple plot using GeoPandas built in plot method. Later on we'll make some more interesting interactive visualizations.
Step17: Inspecting a single detection
Step18: Lets look at the first detection's geometry visually. When we created our GeoDataFrame from our collection items data, the features were automatically converted to Shapely Geometries
Step19: Visualizing Results with GeoViews
Step20: Now let's plot the detection geometry on a basemap
Step21: Nice! GeoViews natively handles our Shapely Geometries and has no issue plotting our detection using an interactive map with Bokeh.
Step22: Getting observation source imagery data
Step23: The result should be a GeoJSON Feature with the following noteable fields
Step24: There's a nice preview of our image, so we can take a peek without having to download the entire asset!
Step25: Using our helper function, we can construct the preview URLs
Step26: View a single detection with it's source Planet Imagery
Step27: We now have an interactive map with our detection geometry and Planet source imagery. Pan around the map to see more of the scene!
Step28: Now let's use our helper function to render a Holoviews Layout. We'll render the first twenty detections
Step29: Interactive Detections Plot
Step30: Visualize by Time Dimension
Step31: Next let's define the render function to use for each day
Step32: Finally let's create our plot
Step33: 4. Segmentation Results
Step34: Save startTime and endTime as variables to filter our results below.
Step35: We'll use the links property to get the link to the Subscription's Feed and then request the Feed from the API
Step36: Now that we have our Feed data for the Subscription, let's use the target property to determine our mosaic series
Step37: Using the Mosaic Series Id, we can make a request to the Planet Mosaics API using the following endpoint https
Step38: We can see some metadata here about our series, including the first_acquired and last_acquired dates, the interval at which a new mosaic gets created.
Step39: The response from our last request includes a mosaics property that contains an array/list of individual mosaics available for the series.
Step40: Let's get the two peices of information that are most useful for us here, the tiles links and the last_acquired date
Step41: Here's a small function to help us format our tile urls for HoloViews.
Step42: Now we can make an interactive visualization of each interval's tiles using the HoloMap. We'll also add a query parameter to the target tiles url that allow us to style the target tiles
Step43: We're also able to show the source imagery that the Subscription output was detected against! Remember how we got the Subscription's target mosaic series? We can do the same for the source mosaic series
Step44: Now let's prepare our Source Mosaic data
Step45: Let's merge the two tile url DataFrames
Step46: Finally, let's create the visualization with both source and target imagery
Step47: With the above visualization, you're able to compare the source imagery with the Subscription's output!
Step48: Each Feature in the response FeatureCollection represents a quad, and each Feature's geometry is the footprint of that quad.
Step49: We can see that the geometries of the quads all intersect with our Subscription AOI.
Step50: We see the links for both target-quad (the result output), and source-quad (the source imagery that was used to create the detections)
|
<ASSISTANT_TASK:>
Python Code:
import os
# Configure Auth and Base URL
# Planet Analytics API Base URL
PAA_BASE_URL = "https://api.planet.com/analytics/"
# API Key Config
API_KEY = os.environ['PL_API_KEY']
# Alternatively, you can just set your API key directly as a string variable:
# API_KEY = "YOUR_PLANET_API_KEY_HERE"
# Setup Auth
BASIC_AUTH = (API_KEY, '')
# Pretty Print Helper
import pprint
pp = pprint.PrettyPrinter(indent=4)
# JSON Pretty Print Helper
import json
def jpp(data):
print(json.dumps(data, indent=4))
# Request a Subscription
import requests
# Define the Subscription UUID, which in this case is for Ship Detections
subscription_id = "9db92275-1d89-4d3b-a0b6-68abd2e94142"
# Construct the URL for the Subscription
subscription_url = PAA_BASE_URL + 'subscriptions/' + subscription_id
print("Request URL: {}".format(subscription_url))
# Make the GET request for Subscriptions list
subscription = requests.get(subscription_url, auth=BASIC_AUTH).json()
# Print some data
print("{} \n{}\nSubscription Id: {}\n".format(subscription['title'], subscription['description'], subscription['id']))
# Print the subscription object
print(json.dumps(subscription, sort_keys=True, indent=4))
# Construct the URL for the subscription's Results collection
subscription_results_url = PAA_BASE_URL + 'collections/' + subscription['id']
print("Request URL: {}".format(subscription_results_url))
# Get subscription results collection
subscription_results = requests.get(subscription_results_url, auth=BASIC_AUTH).json()
# Pretty Print response JSON
print(json.dumps(subscription_results, sort_keys=True, indent=4))
# Construct the URL
subscription_items_url = list(filter(lambda link: link['rel'] == 'results', subscription['links']))[0]['href']
print("Subscription Items URL:\n{}\n".format(subscription_items_url))
# Request subscription Items
# Get request Subscription features (collection items)
subscription_items = requests.get(subscription_items_url, auth=BASIC_AUTH).json()
# Pretty Print response JSON
print("Subscription Results Collection Items: (GeoJSON FeatureCollection):\n {}".format(json.dumps(subscription_items, sort_keys=True, indent=4)))
# Print the number of Features in the subscription's collection:
print("\nNumber of Items (Features): {} \n".format(len(subscription_items['features'])))
# Pretty Print the links
pp.pprint(subscription_items['links'])
# Helper to get the "next" link
def get_next_link(links):
for link in links:
if link['rel'] == 'next':
return link['href']
# Get the first "next" link
next_link = get_next_link(subscription_items['links'])
page_number = 2
print(next_link)
# Loop to get all pages of features
while next_link:
print("Getting page number {} at {}".format(page_number, next_link))
# Use "next" link to request next page
next_collection = requests.get(next_link, auth=BASIC_AUTH).json()
print("Found {} additional Features \n".format(len(next_collection['features'])))
# Get the next "next" link
next_link = get_next_link(next_collection["links"])
# Add features from new page to our original list of features
subscription_items['features'].extend(next_collection['features'])
# increment the page number
page_number += 1
print("Total Subscription Items: {}".format(len(subscription_items['features'])))
# Now we have our complete GeoJSON Feature Collection
subscription_items
import geopandas as gpd
import pandas as pd
# Make a GeoPandas data frame from our collection items
# Since we're importing a GeoJSON file, we can use the `from_features` method
gdf = gpd.GeoDataFrame.from_features(subscription_items['features'])
# Show the first five results from our GeoDataFrame
gdf.head()
# List the item properties (columns)
for column in gdf.columns.values:
print(column)
# Convert observed column into a datetime datatype
gdf["observed"] = pd.to_datetime(gdf["observed"])
# Simple GeoPandas Plot of features
gdf.plot();
# Select the first detection by index
first_detection = gdf.iloc[0]
first_detection
# Use shapely to show the first geometry
first_detection['geometry']
# Import GeoViews
import geoviews as gv
# Import HoloViews
import holoviews as hv
from holoviews import opts as hvOpts
# Set rendering backends
gv.extension('bokeh')
hv.extension('bokeh')
# Create a basemap to use in our visualizations
basemap = gv.tile_sources.CartoLight
# List built in GeoViews tile sources
# help(gv.tile_sources)
# Plot the detection using GeoViews Shape element with a basemap
gv.Shape(first_detection['geometry']).opts(padding=0.5, width=500, height=400) * basemap
# Get the sensor type
item_type = first_detection['source_item_type']
# Get the item id
scene_id = first_detection['source_item_id']
print("Item Type: {} \nScene Id: {}".format(item_type, scene_id))
# Import the Planet Data API module
from planet import api as PlanetAPI
# Create a client for the API
planet_client = PlanetAPI.ClientV1(api_key=API_KEY)
# Get data for our item
source_scene = planet_client.get_item(item_type, scene_id).get()
# Pretty Print our scene info
jpp(source_scene)
from IPython.display import Image
# Get the thumbnail URL
# Append our api key to authenticate
thumb_url = source_scene['_links']['thumbnail'] + '?api_key=' + API_KEY
# Render the preview image with IPython.display
Image(url=thumb_url)
# Get source imagery helper utility
# Takes type xyz or thumb
def get_source_imagery_url(scene_id, scene_sensor, preview_type="xyz"):
# Determine parts of URL
prefix = "" if preview_type == "xyz" else "item-types/"
middle = "/" if preview_type == "xyz" else "/items/"
suffix = "/{Z}/{X}/{Y}.png" if preview_type == "xyz" else "/thumb"
# Construct the URL
source_imagery_url = "https://tiles.planet.com/data/v1/" + prefix + scene_sensor + \
middle + scene_id + suffix + "?api_key=" + API_KEY
return source_imagery_url
from IPython.core.display import HTML
# Get the scene thumbnail URL
thumb_url = get_source_imagery_url(scene_id, item_type, 'thumb')
# Get the scene tiles URL
tiles_url = get_source_imagery_url(scene_id, item_type, 'xyz')
# Print the thumbnail link
print("Thumbnail URL:")
HTML('<a href="{}" target="_blank">Thumbnail URL</a>'.format(thumb_url))
# Plot the detection geometry
single_detection = gv.Shape(first_detection['geometry'], label="Detection in {} - {}".format(item_type, scene_id)).opts(
fill_alpha=0,
line_color="red",
width=600, height=500, padding=3,
)
# Plot with web map tiles
single_detection * basemap * gv.WMTS(data=tiles_url)
# Helper function to render a thumbnail for an item
# Takes an index (row) from DataFrame
def render_item_thumb(index):
# Get the item row from DataFrame
item = gdf.iloc[index]
# Construct tile URL
tile_url = get_source_imagery_url(item['source_item_id'], item['source_item_type'], 'xyz')
# Create detection plot
detection = gv.Shape(item['geometry']).opts(
fill_alpha=0,
line_color="red",
width=200, height=200,
default_tools=[],
xaxis=None,
yaxis=None
)
return (detection * basemap * gv.WMTS(data=tile_url))
# Iterate through the first 20 thumbs
thumbs_list = [render_item_thumb(i) for i in list(gdf.index[0:20])]
# Create a Layout visualization
layout = hv.Layout(thumbs_list).opts(shared_axes=False).cols(4)
layout
# Plot all features on a map
# Pick the properties (dimensions) we want to include
items_dimensions = ['score', 'object_diagonal_m', 'observed']
# Plot the features with GeoViews
all_features_plot = gv.Polygons(gdf, vdims=items_dimensions).opts(
colorbar_position="bottom", colorbar_opts={"title": "score"},
cmap="RdYlBu_r",
width=600, height=500,
colorbar=True,
title="All Detections for {}".format(subscription['title']))
# Plot the features map with a histogram and basemap
all_features_plot.hist() * basemap
# Bucket the data by time interval
# Set the interval
interval = "D" # W = Week, D = Day, Month = M
time_bucketed_data = gdf.set_index('observed')[[
"source_item_id",
"geometry",
"source_cloud_cover",
"score"]].groupby([pd.Grouper(freq=interval)]).agg({
'geometry': 'count',
'source_item_id': 'nunique',
'source_cloud_cover': "mean",
'score':'mean'
}).fillna(0).rename(index=str, columns={"geometry": "detection_count", "source_item_id": "imagery_count", "source_cloud_cover":"mean_cloud_cover", "score":"mean_score"})
# Remove rows without detections
time_bucketed_data = time_bucketed_data[time_bucketed_data["detection_count"] != 0]
from datetime import timedelta
# Render Features for a date
def plot_features_for_date(start_date):
# Increment start date by one day
end_date = start_date + timedelta(days=1)
# Slice our data to only include features between start and end date
mask = (gdf["observed"] > start_date) & (gdf["observed"] <= end_date)
sliced_data = gdf.loc[mask]
# Create the Polygons Plot
return gv.Polygons(sliced_data, vdims=['score']).opts(fill_alpha=0, nonselection_line_alpha=0.1, line_color="score", selection_line_color="green", cmap="RdYlBu_r", width=600, height=400, tools=["hover", "tap"], title="Detections for {} on {}".format(subscription['title'], start_date))
# Render HoloViews HoloMap
# Define the date dimension
kdims = [hv.Dimension(('start_date', 'Observation Date'))]
# Create all the plots
features_for_date = {(date): plot_features_for_date(date) for date in list(pd.to_datetime(time_bucketed_data.index))}
# Create the HoloMap
features_by_day_plot = hv.HoloMap(features_for_date, kdims=kdims)
# Add a basemap to the features plot and render
features_by_day_plot * basemap
# Define the Subscription UUID, which in this case is for Monthly Road Detections
subscription_id = 'e0c33581-5080-4e81-98a1-2d555e91b41e'
# Construct the URL for a Segmentation Feed Subscription
seg_subscription_url = PAA_BASE_URL + 'subscriptions/' + subscription_id
print("Request URL: {}".format(subscription_url))
# Make the GET request for Subscription
seg_subscription = requests.get(seg_subscription_url, auth=BASIC_AUTH).json()
seg_subscription
# We only need the year, month, and day for monthly Basemaps
start_time = seg_subscription['startTime'].split('T')[0]
end_time = seg_subscription['endTime'].split('T')[0]
print('Subscription startTime: {}'.format(start_time))
print('Subscription endTime: {}'.format(end_time))
# Construct the Feed request url from the Subscription link
seg_subscription_feed_url = list(filter(lambda link: link['rel'] == 'feed', seg_subscription['links']))[0]['href']
print(seg_subscription_feed_url)
# Make the request for the Subscription's Feed
seg_subscription_feed = requests.get(seg_subscription_feed_url, auth=BASIC_AUTH).json()
seg_subscription_feed
# Get the mosaics series id
mosaic_target_series_id = seg_subscription_feed['target']['config']['series_id']
mosaic_target_series_id
# Construct the mosaics series url
mosaic_target_series_url = "https://api.planet.com/basemaps/v1/series/" + mosaic_target_series_id
print(mosaic_target_series_url)
# Make a request to the Mosaics API
target_mosaic_series = requests.get(mosaic_target_series_url, auth=BASIC_AUTH).json()
target_mosaic_series
# Make a request to get the subscription's mosaics
target_mosaics = requests.get(target_mosaic_series['_links']['mosaics'], auth=BASIC_AUTH).json()['mosaics']
# Filter the target mosaics to return only those that fit within our subscription's startTime and endTime
subscription_mosaics = [m for m in target_mosaics if m['first_acquired'] >= start_time and m['last_acquired'] <= end_time]
jpp(subscription_mosaics)
from pandas import json_normalize
# Create a dataframe from list of mosaics (normalize json to break out individual links)
mosaics_df = pd.DataFrame(json_normalize(subscription_mosaics))
mosaics_df.head()
# Create a new DataFrame with limited columns
target_tiles_df = mosaics_df.loc[:, ['_links.tiles', 'last_acquired']]
# Rename the tiles column
target_tiles_df.rename(columns={'_links.tiles':'target_tileurl', 'last_acquired':'date'}, inplace=True)
# Only use year/month for date
target_tiles_df['date'] = pd.to_datetime(target_tiles_df["date"])
target_tiles_df['date'] = target_tiles_df['date'].dt.strftime('%Y/%m')
target_tiles_df.head()
# Helper to format target tile urls for HoloViews
def format_target_mosaic_url(in_url):
url = in_url.replace("{x}", "{X}").replace("{y}", "{Y}").replace("{z}", "{Z}")
return url
from shapely.geometry import shape
# Get the extent of our subscription
seg_subscription_bounds = shape(seg_subscription['geometry']).bounds
# Style the target mosaic
target_style = "&exp=bincat:0|a50f15"
# Create data for HoloMap
mosaics = {(mosaic['date']):gv.WMTS(format_target_mosaic_url(mosaic['target_tileurl']) + target_style, extents=seg_subscription_bounds) for index, mosaic in target_tiles_df.iterrows() }
# Create the visualization
subscription_mosaics_plot = hv.HoloMap(mosaics, kdims = [hv.Dimension(('date', 'Date'))]).opts(width=600, height=500)
basemap * subscription_mosaics_plot
# Get the source mosaics series id
mosaic_source_series_id = seg_subscription_feed['source'][0]['config']['series_id']
print("Source Mosaic Series Id: {}\n".format(mosaic_source_series_id))
# Construct the source mosaics series url
mosaic_source_series_url = "https://api.planet.com/basemaps/v1/series/" + mosaic_source_series_id
print("Source Mosaic Series Url: {}\n".format(mosaic_source_series_url))
# Make a request to the Mosaics API for source mosaics series
source_mosaic_series = requests.get(mosaic_source_series_url, auth=BASIC_AUTH).json()
# Make a request to the Mosaics API for source mosaics
source_mosaics = requests.get(source_mosaic_series['_links']['mosaics'], auth=BASIC_AUTH).json()
# Create a dataframe from list of mosaics (normalize json to break out individual links)
src_mosaics_df = pd.DataFrame(json_normalize(source_mosaics['mosaics']))
# Create a new DataFrame with limited columns
src_tiles_df = src_mosaics_df.loc[:, ['_links.tiles', 'last_acquired']]
# Rename the tiles column
src_tiles_df.rename(columns={'_links.tiles':'src_tileurl', 'last_acquired':'date'}, inplace=True)
# Only use year/month for date
src_tiles_df['date'] = pd.to_datetime(src_tiles_df["date"])
src_tiles_df['date'] = src_tiles_df['date'].dt.strftime('%Y/%m')
src_tiles_df.head()
# Merge src and target tile urls
combined_tiles = pd.merge(target_tiles_df, src_tiles_df, on='date')
# Set datetime index
combined_tiles['index'] = pd.to_datetime(combined_tiles['date'])
combined_tiles.set_index('index', inplace=True)
combined_tiles.head()
# Create data for HoloMap
mosaics = {(mosaic['date']): gv.WMTS(format_target_mosaic_url(mosaic['target_tileurl']) + target_style, extents=seg_subscription_bounds) + gv.WMTS(format_target_mosaic_url(mosaic['src_tileurl'])) for index, mosaic in combined_tiles.iterrows() }
# Create the visualization
combined_mosaics_plot = hv.HoloMap(mosaics, kdims = [hv.Dimension(('date', 'Date'))]).opts(width=600, height=500).collate()
basemap * combined_mosaics_plot
seg_subscription_results_url = PAA_BASE_URL + 'collections/' + seg_subscription['id'] + '/items'
print("Request URL: {}".format(seg_subscription_results_url))
# Make the GET request for Subscription Results
seg_subscription_results = requests.get(seg_subscription_results_url, auth=BASIC_AUTH).json()
seg_subscription_results
print(len(seg_subscription_results['features']))
# Create a GeoDataFrame of quads
quads_gdf = gpd.GeoDataFrame.from_features(seg_subscription_results['features'])
# Plot the quads
quads_plot = gv.Polygons(quads_gdf, label="Quads").opts(fill_alpha=0.1, fill_color="yellow", show_legend=True)
# Plot the subscription AOI
subscription_aoi = gv.Shape(shape(seg_subscription['geometry']), label="AOI").opts(fill_alpha=0.5, line_color="red", fill_color="red", show_legend=True)
# Plot our visualization
(quads_plot * subscription_aoi * basemap).opts(width=600, height=400)
seg_sub_results_links = seg_subscription_results['features'][0]['links']
seg_sub_results_links
# Construct the URL to source quad
source_quad = list(filter(lambda link: link['rel'] == 'source-quad', seg_sub_results_links))[0]['href']
print("Source Quad URL:\n{}\n".format(source_quad))
# Construct the URL to target quad
target_quad = list(filter(lambda link: link['rel'] == 'target-quad', seg_sub_results_links))[0]['href']
print("Target (Result) Quad URL:\n{}\n".format(target_quad))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the above example, it is clear that $ts1$ and $ts2$ are most similar (they are both $sin$ functions under different transformations). $ts3$ is clearly the most different. Let's compute the Euclidean distance $d(ts1,ts2)$ and $d(ts1,ts3)$ to see if the Euclidean distance measure agrees with what our intuition tells us. Let's first create a function that computes the Euclidean distance between two time series.
Step2: Let's now find the Euclidean distance between $ts1$ and $ts2$
Step3: and the Euclidean distance between $ts1$ and $ts3$
Step4: This is not good because according to the Euclidean distance measure, $ts1$ is more similar to $ts3$ than to $ts2$ which contradicts our intuition. This is the problem with using the Euclidean distance measure. It often produced pessimistic similarity measures when it encounters distortion in the time axis. The way to deal with this is to use dynamic time warping.
Step5: Now let's compute the Euclidean distance between $ts1$ and $ts2$ using dynamic time warping.
Step6: and now the dynamic time warping distance between $ts1$ and $ts3$
Step7: As you can see, our results have changed from when we only used the Euclidean distance measure. Now, in agreement with our intuition, $ts2$ is shown to be more similar to $ts1$ than $ts3$ is.
Step8: Let's test this faster version.
Step9: Another way to speed things up is to use the LB Keogh lower bound of dynamic time warping. It is defined as $$LBKeogh(Q,C)=\sum_{i=1}^n (c_i-U_i)^2I(c_i > U_i)+(c_i-L_i)^2I(c_i < L_i)$$
Step10: Let's now test on $ts1$ and $ts2$
Step11: and now $ts1$ and $ts3$.
Step12: The LB Keogh lower bound method is linear whereas dynamic time warping is quadratic in complexity which make it very advantageous for searching over large sets of time series.
Step13: Now let's test it on some data. We will use a window size of 4. Although the code is sped up with the use of the LB Keogh bound and the dynamic time warping locality contraint, it may still take a few minutes to run.
Step14: The same idea can also be applied to k-means clustering. In this algorithm, the number of clusters is set apriori and similar time series are clustered together.
Step17: Let's test it on the entire data set (i.e. the training set and the test set stacked together).
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
x=np.linspace(0,50,100)
ts1=pd.Series(3.1*np.sin(x/1.5)+3.5)
ts2=pd.Series(2.2*np.sin(x/3.5+2.4)+3.2)
ts3=pd.Series(0.04*x+3.0)
#ts1.plot()
#ts2.plot()
#ts3.plot()
#plt.ylim(-2,10)
#plt.legend(['ts1','ts2','ts3'])
#plt.show()
def euclid_dist(t1,t2):
return sqrt(sum((t1-t2)**2))
#print euclid_dist(ts1,ts2)
#print euclid_dist(ts1,ts3)
def DTWDistance(s1, s2):
DTW={}
for i in range(len(s1)):
DTW[(i, -1)] = float('inf')
for i in range(len(s2)):
DTW[(-1, i)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(len(s1)):
for j in range(len(s2)):
dist= (s1[i]-s2[j])**2
DTW[(i, j)] = dist + min(DTW[(i-1, j)],DTW[(i, j-1)], DTW[(i-1, j-1)])
return sqrt(DTW[len(s1)-1, len(s2)-1])
#print DTWDistance(ts1,ts2)
#print DTWDistance(ts1,ts3)
def DTWDistance(s1, s2):
DTW={}
w = 5
w = max(w, abs(len(s1)-len(s2)))
for i in range(-1,len(s1)):
for j in range(-1,len(s2)):
DTW[(i, j)] = float('inf')
DTW[(-1, -1)] = 0
for i in range(len(s1)):
for j in range(max(0, i-w), min(len(s2), i+w)):
dist= (s1[i]-s2[j])**2
DTW[(i, j)] = dist + min(DTW[(i-1, j)],DTW[(i, j-1)], DTW[(i-1, j-1)])
return sqrt(DTW[len(s1)-1, len(s2)-1])
#rint DTWDistance(ts1,ts2,10)
#print DTWDistance(ts1,ts3,10)
def LB_Keogh(s1,s2,r):
LB_sum=0
for ind,i in enumerate(s1):
#print(ind -r, ind+r)
lower_bound=min(s2[(ind-r if ind-r>=0 else 0):(ind+r)])
upper_bound=max(s2[(ind-r if ind-r>=0 else 0):(ind+r)])
if i>upper_bound:
LB_sum=LB_sum+(i-upper_bound)**2
elif i<lower_bound:
LB_sum=LB_sum+(i-lower_bound)**2
return sqrt(LB_sum)
#print LB_Keogh(ts1,ts2,20)
#print LB_Keogh(ts1,ts3,20)
#from sklearn.metrics import classification_report
from math import sqrt
def knn(train,test,w):
preds=[]
for ind,i in enumerate(test):
min_dist=float('inf')
closest_seq=[]
#print ind
for j in train:
if LB_Keogh(i[:-1],j[:-1],5)<min_dist:
dist=DTWDistance(i[:-1],j[:-1],w)
if dist<min_dist:
min_dist=dist
closest_seq=j
preds.append(closest_seq[-1])
return classification_report(test[:,-1],preds)
train = np.genfromtxt('datasets/train.csv', delimiter='\t')
test = np.genfromtxt('datasets/test.csv', delimiter='\t')
#print (knn(train,test,4))
import random
def k_means_clust(data,num_clust,num_iter,w=5):
centroids=random.sample(data,num_clust)
counter=0
for n in range(num_iter):
counter+=1
print (counter)
assignments={}
#assign data points to clusters
for ind,i in enumerate(data):
min_dist=float('inf')
closest_clust=None
for c_ind,j in enumerate(centroids):
if LB_Keogh(i,j,200)<min_dist:
cur_dist=DTWDistance(i,j,w)
if cur_dist<min_dist:
min_dist=cur_dist
closest_clust=c_ind
if closest_clust in assignments:
assignments[closest_clust].append(ind)
else:
assignments[closest_clust]=[]
#recalculate centroids of clusters
for key in assignments:
clust_sum=0
for k in assignments[key]:
clust_sum= clust_sum+data[k]
print("DEBUG")
for m in clust_sum:
#print(m)
t = m/float(len(assignments[key]))
centroids[key] = m/float(len(assignments[key])) #centroids[key]=[m/float(len(assignments[key])) for m in clust_sum]
return centroids
train = np.genfromtxt('datasets/train.csv', delimiter='\t')
test = np.genfromtxt('datasets/test.csv', delimiter='\t')
data1=np.vstack((train[:,:-1],test[:,:-1]))
#print(type(train))
#print(np.fromfile("ndarray.csv"))
#print("origi dataset")
df = pd.DataFrame.from_csv("vmarrays.csv")
#data = np.ndarray(df)
#numpyMatrix = df.as_matrix()
data1=np.vstack((train[:,:-1],test[:,:-1]))
print(data1[0])
print(type(data1[0]))
data = np.fromfile("prices.csv")
data = np.vstack(data)
print(data[0])
print(type(data[0]))
d = df.values.tolist()
#data = np.vstack(d)
#for i in range(26):
# if np.isnan(d[i][-1]):
# d[i][-1] = 0.1
#data = np.ndarray(d)
#type(data1[0])
#input = data1
y=np.array([np.array(di)[:100] for di in d])
#ts1 = y[0]
#ts2 = y[1]
#print LB_Keogh(ts1,ts3,2)
#print(data1[1])
#y = np.delete(y, 25, 0)
# len of ts in example is 60 - range=5
# len of ts in datset is 1416 - 100
#for i in range(26):
# ts = y[i][:60]
# y[i] = ts
#(y[1][-1])
#print(y[24])
#len(y[1])
y
import matplotlib.pylab as plt
centroids=k_means_clust(data1,4,10,4) #data,num_clust,num_iter,w=5
print("centroids" ,centroids)
for i in centroids:
plt.plot(i)
plt.show()
import numpy as np;
import seaborn as sns;
import pandas as pd
from scipy import stats
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
num_samples = 61
group_size = 10
df = pd.DataFrame.from_csv("ndarray.csv")
d = df.values.tolist()
data = np.vstack(d)
for i in range(26):
d[i][-1] = 0
#data = np.ndarray(d)
#type(data1[0])
input = data1
y=np.array([np.array(di)[:60] for di in d])
for i in range(26):
timeseries = y[i]
timeSeries = (timeseries-timeseries.min())/(timeseries.max()-timeseries.min())
y[i] = timeSeries
timeSeries = pd.DataFrame()
#timeSeries = (timeseries-timeseries.min())/(timeseries.max()-timeseries.min())
ax = None
for arr in y:
#for arr in data1:
#arr = arr + np.random.rand(group_size, num_samples) + (np.random.randn(group_size, 1)*3)
df = pd.DataFrame(arr)
#print(df)
timeSeries = timeSeries.append(df)
# We use seaborn to plot what we have
#ax = sns.tsplot(ax=ax, data=df.values, ci=[68, 95])
#ax = sns.tsplot(ax=ax, data=df.values, err_style="unit_traces")
# Just one line :)
import numpy as np;
import seaborn as sns;
import pandas as pd
from scipy import stats
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
timeSeries = pd.DataFrame()
#timeSeries = (timeseries-timeseries.min())/(timeseries.max()-timeseries.min())
ax = None
for arr in y:
#for arr in data1:
#arr = arr + np.random.rand(group_size, num_samples) + (np.random.randn(group_size, 1)*3)
df = pd.DataFrame(arr)
#print(df)
timeSeries = timeSeries.append(df)
#Z = hac.linkage(timeSeries, 'single', 'correlation')
Z = hac.linkage(timeSeries, 'ward')
import sys
sys.setrecursionlimit(15000) # DON'T TOUCH IT, IT's MAGIC
#sys.setrecursionlimit(10000)
# Plot the dendogram
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
hac.dendrogram(
Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=14., # font size for the x axis labels
)
plt.show()
print("method is ward")
# Just one line :)
import numpy as np;
import seaborn as sns;
import pandas as pd
from scipy import stats
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
timeSeries = pd.DataFrame()
#timeSeries = (timeseries-timeseries.min())/(timeseries.max()-timeseries.min())
ax = None
for arr in y:
#for arr in data1:
#arr = arr + np.random.rand(group_size, num_samples) + (np.random.randn(group_size, 1)*3)
df = pd.DataFrame(arr)
#print(df)
timeSeries = timeSeries.append(df)
#Z = hac.linkage(timeSeries, 'single', 'correlation')
Z = hac.linkage(timeSeries, 'complete')
import sys
sys.setrecursionlimit(15000) # DON'T TOUCH IT, IT's MAGIC
#sys.setrecursionlimit(10000)
# Plot the dendogram
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
hac.dendrogram(
Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=14., # font size for the x axis labels
)
plt.show()
print("method is complete")
# Just one line :)
import numpy as np;
import seaborn as sns;
import pandas as pd
from scipy import stats
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
timeSeries = pd.DataFrame()
#timeSeries = (timeseries-timeseries.min())/(timeseries.max()-timeseries.min())
ax = None
for arr in y:
#for arr in data1:
#arr = arr + np.random.rand(group_size, num_samples) + (np.random.randn(group_size, 1)*3)
df = pd.DataFrame(arr)
#print(df)
timeSeries = timeSeries.append(df)
#Z = hac.linkage(timeSeries, 'single', 'correlation')
Z = hac.linkage(timeSeries, 'centroid')
import sys
sys.setrecursionlimit(15000) # DON'T TOUCH IT, IT's MAGIC
#sys.setrecursionlimit(10000)
# Plot the dendogram
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
hac.dendrogram(
Z,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=14., # font size for the x axis labels
)
plt.show()
print("method is centroid")
# Just one line :)
import numpy as np;
import seaborn as sns;
import pandas as pd
from scipy import stats
import scipy.cluster.hierarchy as hac
import matplotlib.pyplot as plt
timeSeries = pd.DataFrame()
#timeSeries = (timeseries-timeseries.min())/(timeseries.max()-timeseries.min())
ax = None
for arr in y:
#for arr in data1:
#arr = arr + np.random.rand(group_size, num_samples) + (np.random.randn(group_size, 1)*3)
df = pd.DataFrame(arr)
#print(df)
timeSeries = timeSeries.append(df)
#Z = hac.linkage(timeSeries, 'single', 'correlation')
Z = hac.linkage(timeSeries, 'single', DTWDistance)
import sys
sys.setrecursionlimit(15000) # DON'T TOUCH IT, IT's MAGIC
#sys.setrecursionlimit(10000)
# Plot the dendogram
plt.figure(figsize=(25, 10))
plt.title('Hierarchical Clustering Dendrogram')
plt.xlabel('sample index')
plt.ylabel('distance')
hac.dendrogram(
Z,
show_leaf_counts=False,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=14., # font size for the x axis labels
show_contracted=True,
)
plt.show()
print("method is single")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Variable scopes control variable (re)use
Step2: You’ll need to use reuse_variables() to implement Deep Networks
Step3: Case 2
Step4: <br/>
|
<ASSISTANT_TASK:>
Python Code:
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
tf.get_variable_scope().reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
assert v.name == "foo/v:0"
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
saver = tf.train.Saver()
save_path = saver.save(sess, model_path)
print "Model saved in file: %s" % save_path
load_path = saver.restore(sess, model_path)
print "Model restored from file: %s" % save_path
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 载入数据
Step2: 建模与交叉验证
Step3: 第1步- 对于高的学习率找到最合适的estimators个数
Step4: Tune subsample and colsample_bytree
Step5: tune subsample
Step6: 对正则化做交叉验证
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
from sklearn import cross_validation, metrics
from sklearn.grid_search import GridSearchCV
import matplotlib.pylab as plt
%matplotlib inline
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
train = pd.read_csv('train_modified.csv')
test = pd.read_csv('test_modified.csv')
train.shape, test.shape
target='Disbursed'
IDcol = 'ID'
train['Disbursed'].value_counts()
#test_results = pd.read_csv('test_results.csv')
def modelfit(alg, dtrain, dtest, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
xgtest = xgb.DMatrix(dtest[predictors].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
early_stopping_rounds=early_stopping_rounds, show_progress=False)
alg.set_params(n_estimators=cvresult.shape[0])
#建模
alg.fit(dtrain[predictors], dtrain['Disbursed'],eval_metric='auc')
#对训练集预测
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#输出模型的一些结果
print "\n关于现在这个模型"
print "准确率 : %.4g" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions)
print "AUC 得分 (训练集): %f" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob)
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
predictors = [x for x in train.columns if x not in [target, IDcol]]
xgb1 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb1, train, test, predictors)
#对subsample 和 max_features 用grid search查找最好的参数
param_test1 = {
'max_depth':range(3,10,2),
'min_child_weight':range(1,6,2)
}
gsearch1 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=5,
min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1, seed=27),
param_grid = param_test1, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch1.fit(train[predictors],train[target])
gsearch1.grid_scores_, gsearch1.best_params_, gsearch1.best_score_
# 对于max_depth和min_child_weight查找最好的参数
param_test2 = {
'max_depth':[4,5,6],
'min_child_weight':[4,5,6]
}
gsearch2 = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=5,
min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test2, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch2.fit(train[predictors],train[target])
gsearch2.grid_scores_, gsearch2.best_params_, gsearch2.best_score_
#交叉验证对min_child_weight寻找最合适的参数
param_test2b = {
'min_child_weight':[6,8,10,12]
}
gsearch2b = GridSearchCV(estimator = XGBClassifier( learning_rate=0.1, n_estimators=140, max_depth=4,
min_child_weight=2, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test2b, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch2b.fit(train[predictors],train[target])
gsearch2b.grid_scores_, gsearch2b.best_params_, gsearch2b.best_score_
#Grid seach选择合适的gamma
param_test3 = {
'gamma':[i/10.0 for i in range(0,5)]
}
gsearch3 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=140, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test3, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch3.fit(train[predictors],train[target])
gsearch3.grid_scores_, gsearch3.best_params_, gsearch3.best_score_
predictors = [x for x in train.columns if x not in [target, IDcol]]
xgb2 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=4,
min_child_weight=6,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb2, train, test, predictors)
#对subsample 和 colsample_bytree用grid search寻找最合适的参数
param_test4 = {
'subsample':[i/10.0 for i in range(6,10)],
'colsample_bytree':[i/10.0 for i in range(6,10)]
}
gsearch4 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test4, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch4.fit(train[predictors],train[target])
gsearch4.grid_scores_, gsearch4.best_params_, gsearch4.best_score_
# 同上
param_test5 = {
'subsample':[i/100.0 for i in range(75,90,5)],
'colsample_bytree':[i/100.0 for i in range(75,90,5)]
}
gsearch5 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test5, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch5.fit(train[predictors],train[target])
gsearch5.grid_scores_, gsearch5.best_params_, gsearch5.best_score_
#对reg_alpha用grid search寻找最合适的参数
param_test6 = {
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100]
}
gsearch6 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test6, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch6.fit(train[predictors],train[target])
gsearch6.grid_scores_, gsearch6.best_params_, gsearch6.best_score_
# 换一组参数对reg_alpha用grid search寻找最合适的参数
param_test7 = {
'reg_alpha':[0, 0.001, 0.005, 0.01, 0.05]
}
gsearch7 = GridSearchCV(estimator = XGBClassifier( learning_rate =0.1, n_estimators=177, max_depth=4,
min_child_weight=6, gamma=0.1, subsample=0.8, colsample_bytree=0.8,
objective= 'binary:logistic', nthread=4, scale_pos_weight=1,seed=27),
param_grid = param_test7, scoring='roc_auc',n_jobs=4,iid=False, cv=5)
gsearch7.fit(train[predictors],train[target])
gsearch7.grid_scores_, gsearch7.best_params_, gsearch7.best_score_
xgb3 = XGBClassifier(
learning_rate =0.1,
n_estimators=1000,
max_depth=4,
min_child_weight=6,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
reg_alpha=0.005,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb3, train, test, predictors)
xgb4 = XGBClassifier(
learning_rate =0.01,
n_estimators=5000,
max_depth=4,
min_child_weight=6,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
reg_alpha=0.005,
objective= 'binary:logistic',
nthread=4,
scale_pos_weight=1,
seed=27)
modelfit(xgb4, train, test, predictors)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Functions
Step5: Functions can be passed in as arguments
Step6: $$ f(x) = 3x^2 + 5x + 3$$
Step9: Functions can also be returned by functions
Step11: Higher order functions<a id='higherorder'></a>
Step12: ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Step13: 2) Using reduce and map, write a python program to find the largest element in the list of integers, floats or strings (that are numbers). <br>
Step14: ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Step15: ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Step16: ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Step18: Recursion<a id='recursion'></a>
Step21: Fibonacci sequence
Step23: This is because it makes many duplicate function calls. For example
Step24: ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Step25: Iterators<a id='iterators'></a>
Step26: ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Step28: Review Problems<a id='hmwk'></a>
Step30: Q2. Write a function, normalize(x) that takes in a vector x and outputs a normalized vector x_norm in the following way
Step31: Q3. Matrix Multiplication and list comprehension
Step32: Q4.
Step33: Q5. Pascal's triangle
Step34: Bonus (Hard!) Questions
Step36: Q2.
|
<ASSISTANT_TASK:>
Python Code:
def square(x):
Square of x.
return x*x
def cube(x):
Cube of x.
return x*x*x
def root(x):
Square root of x.
return x**.5
# create a dictionary of functions
funcs = {
'square': square,
'cube': cube,
'root': root,
}
x = 2
print square(x)
print cube(x)
print root(x)
# print function name and output, sorted by function name
for func_name in sorted(funcs):
print func_name, funcs[func_name](x)
def derivative(x, f, h=0.01):
Calculate the derivative of any continuous, differentiable function
return (f(x+h) - f(x-h))/(2*h)
def some_func(x):
return 3*x**2 + 5*x + 3
derivative(2, some_func) # passing in function f
import time
def sum_squares(n):
Sum of the squares from 1 to n
s = sum([x*x for x in range(n)])
return s
def timer(f,n):
time how long it takes to evaluate function
start = time.clock()
result = f(n)
elapsed = time.clock() - start
return result, elapsed
n = 1000000
timer(sum_squares, n)
# The map function applies a function to each member of a collection
# map(aFunction, aSequence)
map(square, range(10))
# The filter function applies a predicate to each member of a collection,
# retaining only those members where the predicate is True
def is_even(x):
return x % 2 == 0
filter(is_even, range(10))
# It is common to combine map and filter
map(square, filter(is_even, range(10)))
# The reduce function reduces a collection using a binary operator to combine items two at a time
def my_add(x, y):
return x + y
# another implementation of the sum function - like a running total
reduce(my_add, [1,2,3,4,5])
def custom_sum(xs, func):
Returns the sum of xs after a user specified transform.
return sum(map(func, xs))
xs = range(10)
print custom_sum(xs, square)
print custom_sum(xs, cube)
print custom_sum(xs, root)
## SOLUTION - 1
map(len,['Donald','Ted','Hilary','Joe','Bernie'])
## SOLUTION - 2
reduce(max, (map (int, [2, '3', 4.0, 2, -1, '10', 9, -4.3, 8, 7, 11, 3])))
# Using standard functions
n = 10
def square(x):
return x*x
square(n)
map(square, range(n))
# Using lambda function
sqr = lambda x: x*x
sqr(n)
map(sqr, range(n))
# what does this function do?
s1 = reduce(lambda x, y: x+y, map(lambda x: x**2, range(1,10)))
print(s1)
# functional expressions and lambdas are cool
# but can be difficult to read when over-used
# Here is a more comprehensible version
s2 = sum(x**2 for x in range(1, 10))
print(s2)
ans = map(lambda x: x*x, filter(lambda x: x%2 == 0, range(10)))
print ans
## SOLUTION
ans = [x*x for x in range(10) if x%2 == 0]
print ans
def fact(n):
Returns the factorial of n.
# base case
if n==0:
return 1
# recursive case
else:
return n * fact(n-1)
[(n,fact(n)) for n in range(1,10)]
def fib1(n):
Fib with recursion.
# base case
if n==0 or n==1:
return 1
# recursive caae
else:
return fib1(n-1) + fib1(n-2)
[(i,fib1(i)) for i in range(20)]
# In Python, a more efficient version that does not use recursion is
def fib2(n):
Fib without recursion.
a, b = 0, 1
for i in range(1, n+1):
a, b = b, a+b
return b
[(i,fib2(i)) for i in range(20)]
# Note that the recursive version is much slower than the non-recursive version
%timeit fib1(20)
%timeit fib2(20)
# Recursion is used to show off the divide-and-conquer paradigm
def quick_sort(xs):
Classic quick sort
# base case
if xs == []:
return xs
# recursive case
else:
pivot = xs[0] # choose starting pivot to be on the left
less_than = [x for x in xs[1:] if x <= pivot]
more_than = [x for x in xs[1:] if x > pivot]
return quick_sort(less_than) + [pivot] + quick_sort(more_than)
xs = [11,3,1,4,1,5,9,2,6,5,3,5,9,0,10,4,3,7,4,5,8,-1]
print quick_sort(xs)
from __future__ import division
## SOLUTION
def gcd(a,b):
if b == 0:
return a
else:
return gcd(b, a % b)
print gcd(17384,1928)
def lcm(a,b):
return a*b/gcd(a,b)
print lcm(17384,1928)
# Iterators can be created from sequences with the built-in function iter()
xs = [1,2,3]
x_iter = iter(xs)
print x_iter.next() # python "remembers" where the pointer is
print x_iter.next()
print x_iter.next()
print x_iter.next()
# Most commonly, iterators are used (automatically) within a for loop
# which terminates when it encouters a StopIteration exception
x_iter = iter(xs)
for x in x_iter:
print x
## SOLUTION
# using a for loop
for_list = []
for i in range(1,20):
if i % 2 != 0:
for_list.append(i**2)
print for_list
# using list comprehension
print [i**2 for i in range(1,20) if i % 2 != 0]
# using map and filter
def sqr(x):
return x**2
def is_even(x):
return True if x%2!=0 else False
print map(sqr, filter(is_even, range(1,20)))
# using lambda
print map(lambda a: a**2, (filter(lambda x: x % 2 !=0, range(1,20))))
## SOLUTION
def fact1(n):
Returns the factorial of n.
return reduce(lambda x, y: x*y, range(1, n+1))
for i in range(1,11):
print fact1(i),
## SOLUTION
def normalize(x):
mean_x = sum(x)/len(x)
std_ = 0
for i in x:
std_ += (i-mean_x)**2
std_ = std_/len(x)
x_prime = []
for i in x:
x_prime.append((i - mean_x)/(std_**0.5))
return x_prime
x = [1,2,3,4.]
normalize(x)
## SOLUTION
def dot_product(A, B):
rows = len(A)
shared = len(B)
cols = len(B[0])
return [[sum(A[i][k]*B[k][j] for k in range(shared)) for j in range(cols)] for i in range(rows)]
dot_product(A,B)
## SOLUTION
from collections import OrderedDict
a = {
"key1" : 1,
"key3" : "snafu",
"key2" : 5,
"key5" : 7,
"key4" : 0,
}
b = {
"key2" : 6,
"key1" : 8,
"key4" : "bar",
"key3" : 9,
"key5" : "foo"
}
a = OrderedDict(sorted(a.items()))
b = OrderedDict(sorted(b.items()))
# combine graph_attr and unique_attr. Order is the same because of OrderedDict
c = {}
for e, (k, v) in enumerate(a.items()):
c[k] = zip(a.values(), b.values())[e]
c
## SOLUTION
def pascal(c,r):
assert c <= r, "Bad parameters - column cannot be greater than row"
if c == 1 or c == r:
return 1
else:
return pascal(c-1, r-1) + pascal(c, r-1)
depth = 10
for row in range(1, depth+1):
for col in range(1, row+1):
print pascal(col, row),
print
BIG_NUM = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
max_num = 0 # this holds the largest number for the current iteration of the loop
NUM_ADJACENT = 13
def product_consecutive(ind):
''' Calculate the product of consecutive digits'''
prod = 1
for digit in str(BIG_NUM)[ind:ind + NUM_ADJACENT]: # loop over every digit in the 13 consecutive digits
prod *= int(digit)
return prod
for ind in range(len(str(BIG_NUM))): # loop over the range of digits in big_num
current_product = product_consecutive(ind)
if current_product > max_num:
max_num = current_product
print(max_num)
## SOLUTION
from itertools import groupby
def calc_runs_dict(trials):
Returns the counts for runs of length k for each k observed in a dictionary
runs = {}
count_consecutive = lambda x: [(k, sum(1 for i in g)) for k, g in groupby(x)] # function to group consecutive values
grouped_counts = count_consecutive(trials)
run_values = [i[1] for i in grouped_counts if i[0] == 1] # filter out 0's
update_dictionary = lambda x: runs.update({x:runs.get(x,0)+1})
list(map(update_dictionary, run_values)) # update runs dictionary for new consecutive 1's
return runs
trials = [0, 1, 0, 1, 1, 0, 0, 0, 0, 1]
calc_runs_dict(trials)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Auch die
Step2: Für den Fall, dass ein Benutzer ein Buch entlehnen will, könnten wir eine
Step3: Die Sache kann schnell komplex werden, wenn
Step4: erzeugt ein (leeres) Set-Objekt, genau so wie
Step5: nichts anderes ist als die Kurzschreibweise für
Step6: wodurch eine neues list-Objekt erzeugt wird.
Step7: Klassen definieren also Datentypen.
Step8: Sobald wir die Klasse definiert haben, können wir daraus neue Objekte erzeugen
Step9: Wir haben also wirklich einen neuen Datentyp Student geschaffen.
Step10: Hier sehen wir bereits, dass das freie Zuweisen von Eigenschaften an existierende Objekte nicht ganz unproblematisch ist, weil wir dadurch Objekte vom selben Typ erhalten, die u.U. unterschiedliche Eigenschaften haben, was das Konzept eines Typs sabotiert. Wir sollten deshalb besser die Klasse nutzen, um alle benötigten Eigenschaften festzulegen.
Step11: Innerhalb der __init__()-Methode weisen wir die übergebenen Werte dem Objekt (referenziert über den Namen self) zu. Damit werden die Werte Eigenschaftswerte des Objekts.
Step12: Wenn wir eine Klasse mit einer __init__()-Methode ausstatten, muss ein Objekt mit den entsprechenden Argumenten erzeugt werden. Das ist der Grund warum der folgende Code nicht funktioniert
Step13: Da aber die Methode __init__() nichts anderes ist, als eine dem Objekt zugewiesene Funktion, gilt hier alles, was wir bereits bei Funktionen gelernt haben. Man kann also z.B. Defaultwerte definieren
Step14: Methoden
Step15: get_area() ist eine Funktion, die nicht allgemein überall im Programm verfügbar ist,
Step16: sondern nur im Kontext eines Objekts dieser Klasse
|
<ASSISTANT_TASK:>
Python Code:
books = [
("Klein, Bernd", "Einführung in Python", "Hanser", "3", 2017),
("Sweigart, Al", "Automate the Boring Stuff with Python", "No Starch Press", "1", 2015),
("Weigend, Michael", "Python", "mitp", "6., erw. Aufl.", 2016),
("Downey, Allen B.", "Programmieren lernen mit Python", "O'Reilly", "1", 2014)
]
users = [
{'firstname': 'Anton',
'lastname': 'Huber',
'address': 'Maygasse 12, 8010 Graz',
'status': 'Student',
'borrowed_books': []
},
{'firstname': 'Anna',
'lastname': 'Schmidt',
'address': 'Rosenberg 5, 8010 Graz',
'status': 'Professor',
'borrowed_books': []
},
]
def entlehne(user, book):
user["borrowed_books"].append(book)
distinct_names = set()
names = []
names = list()
type(names)
class Student:
pass
hans = Student()
anna = Student()
hans
type(hans)
hans.firstname = 'Hans'
anna.lastname = 'Huber'
hans.firstname
anna.firstname
class Student:
def __init__(self, firstname, lastname):
self.firstname = firstname
self.lastname = lastname
hans = Student('Hans', 'Meier')
hans.firstname
hans.lastname
anna = Student()
import random
class Student:
def __init__(self, firstname, lastname, matrikelnummer=None):
self.firstname = firstname
self.lastname = lastname
# if matrikelnummer is None, generate it randomly
if matrikelnummer is None:
self.matrikelnummer = '017{}'.format(random.randint(100000, 999999))
else:
self.matrikelnummer = matrikelnummer
hans = Student('Hans', 'Meier', '017542345')
anna = Student('Anna', 'Huber')
hans.matrikelnummer
anna.matrikelnummer
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def get_area(self):
return self.length * self.width
get_area()
a_rect = Rectangle(80, 50)
a_rect.get_area()
<END_TASK>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.