Unnamed: 0
int64 0
15.9k
| cleaned_code
stringlengths 67
124k
⌀ | cleaned_prompt
stringlengths 168
30.3k
⌀ |
|---|---|---|
2,000
|
<ASSISTANT_TASK:>
Python Code:
def power(x , y , mod ) :
res = 1
while(y ) :
if(y & 1 ) :
res =(res * x ) % mod
x =(x * x ) % mod
y =(y >> 1 )
return(res % mod )
def ncr(n , r , mod ) :
res = 1
for i in range(1 , r + 1 ) :
res =(res *(n - i + 1 ) ) % mod
inv = power(i , mod - 2 , mod )
res =(res * inv ) % mod
return(res % mod )
def NoOfChords(A ) :
mod = 10 ** 9 + 7
ans = ncr(2 * A , A , mod )
inv = power(A + 1 , mod - 2 , mod )
ans =(ans * inv ) % mod
return(ans % mod )
N = 2
print(NoOfChords(N ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,001
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
df = pd.DataFrame({'Sp':['MM2','MM2','MM4','MM4','MM4'],
'Mt':['S4','S4','S2','S2','S2'],
'Value':['bg','dgd','rd','cb','uyi'],
'count':[10,1,2,8,8]})
def g(df):
return df[df.groupby(['Sp', 'Mt'])['count'].transform(max) == df['count']]
result = g(df.copy())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,002
|
<ASSISTANT_TASK:>
Python Code:
# 将函数作为值返回
def lazy_sum(*args):
def sum():
ax = 0
for n in args:
ax = ax + n
return ax
return sum
f = lazy_sum(1, 3, 5, 7, 9)
print(f())
# 进制转换函数
print(int(12345))
print(int('1000',base=2))
print(int('1A',base=16))
import functools
int2 = functools.partial(int, base=2)
print(int2('1000000'))
print(int2('1010101'))
# 偏函数举例
# 原来的一个函数
def func(x=2,y=3,z=4):
return x+y+z
print(func(x=3))
print(func(y=6))
print(func(x=4,y=10))
print(func(2,3))
# 构造偏函数,设置默认值
import functools
f1 = functools.partial(func, x=2,z=3)
print(f1(y=3))
print(f1(y=2))
print(f1(2)) # 会报错,不需要再输入 z 的值
# map 函数举例
def f(x):
return x * x
r = map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])
for i in r:
print(i)
# 这个 f(x) 函数可以比较复杂,包含更多逻辑
def f(x):
y = x * x + 3
return y
r = map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9])
for i in r:
print(i)
# 进行 map 处理的数据也可以复杂一些
def f(x):
y = x * x + 3
return y
list1 = [x for x in range(1,100,7) if x % 2 ==0]
print(list1)
# 主要的程序还是很简洁就可以了
r = map(f, list1)
for i in r:
print(i)
# map 函数也可以同时作用在两组数据上
def addition(x, y):
return x + y
numbers1 = [5, 6, 2, 8]
numbers2 = [7, 1, 4, 9]
result = map(addition, numbers1, numbers2)
print(list(result))
# map 函数更加复杂的用法
def multiply(x):
return (x*x)
def add(x):
return (x+x)
func = [multiply, add]
for i in range(5):
value = list(map(lambda x: x(i), func))
print(value)
# reduce 举例,一个加法函数
from functools import reduce
def add(x, y):
return x + y
print(reduce(add, [1, 3, 5, 7, 9]))
# reduce,模拟一个字符串转换为整数的函数
from functools import reduce
def f(x, y):
return x * 10 + y
def char2int(s):
return {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5,
'6': 6, '7': 7, '8': 8, '9': 9}[s]
def str2int(s):
return reduce(f, map(char2int, s))
print(str2int('13579'))
print(type(str2int('13579')))
# 拆解上面的函数,先 map
list1 = map(char2int, '13579')
for i in list1:
print(i,type(i))
# 拆解上面的函数,再 reduce
list1 = map(char2int, '13579')
print(reduce(f,list1))
# filter 举例,在一个list中,删掉偶数,只保留奇数
# 判断是否是奇数
def is_odd(n):
return n % 2 == 1
print(list(filter(is_odd, [1, 2, 4, 5, 6])))
# 筛选一个 list 中为空的元素
def is_empty(s):
# strip() 用于移除字符串头尾指定的字符(默认为空格)
if len(s.strip()) ==0:
return False
else:
return True
print(list(filter(is_empty, ['A', '', 'B','C', ' '])))
# 返回一定范围内既不能被2整除也不能被3整数的数字
def f(x):
return x % 2 != 0 and x % 3 != 0
print(list(filter(f, range(2, 30))))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 闭包
Step2: 虽然默认参数还是很容易使用,但是如果我们在某个场景需要大量调用的话,还是有点不方便,特别是对于有很多参数的函数来说,会让程序显得复杂。还记得之前那个 max min 的程序举例么?我们可以用偏函数来解决整个问题。
Step3: map() 函数
Step4: reduce() 函数
Step5: filter() 函数
|
2,003
|
<ASSISTANT_TASK:>
Python Code:
from os import system
from os.path import join, expandvars
from joblib import Parallel, delayed
from glob import glob
from tax_credit.framework_functions import (recall_novel_taxa_dirs,
parameter_sweep,
move_results_to_repository)
project_dir = "../.."
analysis_name= "novel-taxa-simulations"
results_dir = join('..', '..', 'novel-taxa-tmp')
iterations = 10
data_dir = join(project_dir, "data", analysis_name)
# databases is a list of names given as dictionary keys in the second
# cell of the database generation notebook. Just list the names here.
databases = ['B1-REF', 'F1-REF']
# Generate a list of input directories
(dataset_reference_combinations, reference_dbs) = recall_novel_taxa_dirs(data_dir, databases, iterations)
method_parameters_combinations = { # probabalistic classifiers
'rdp': {'confidence': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5,
0.6, 0.7, 0.8, 0.9, 1.0]},
# global alignment classifiers
'uclust': {'min_consensus_fraction': [0.51, 0.76, 1.0],
'similarity': [0.8, 0.9],
'uclust_max_accepts': [1, 3, 5]},
# local alignment classifiers
'sortmerna': {'sortmerna_e_value': [1.0],
'min_consensus_fraction': [0.51, 0.76, 1.0],
'similarity': [0.8, 0.9],
'sortmerna_best_N_alignments ': [1, 3, 5],
'sortmerna_coverage' : [0.8, 0.9]},
'blast' : {'blast_e_value' : [0.0000000001, 0.001, 1, 1000]}
}
command_template = 'bash -c "source activate qiime1; source ./.bashrc; mkdir -p {0} ; assign_taxonomy.py -v -i {1} -o {0} -r {2} -t {3} -m {4} {5} --rdp_max_memory 16000"'
commands = parameter_sweep(data_dir, results_dir, reference_dbs,
dataset_reference_combinations,
method_parameters_combinations, command_template,
infile='query.fasta', output_name='query_tax_assignments.txt')
for method in method_parameters_combinations:
print(method)
for command in commands:
if '/'+method+'/' in command:
print(command)
break
print(len(commands))
Parallel(n_jobs=23)(delayed(system)(command) for command in commands);
(dataset_reference_combinations, reference_dbs) = recall_novel_taxa_dirs(
data_dir, databases, iterations, ref_seqs='ref_seqs.qza', ref_taxa='ref_taxa.qza')
method_parameters_combinations = {
'blast+' : {'p-evalue': [0.001],
'p-maxaccepts': [1, 10],
'p-perc-identity': [0.80, 0.97, 0.99],
'p-min-consensus': [0.51, 0.75, 0.99]}
}
command_template = ("mkdir -p {0}; "
"qiime feature-classifier classify-consensus-blast --i-query {1} --o-classification "
"{0}/rep_seqs_tax_assignments.qza --i-reference-reads {2} --i-reference-taxonomy {3} {5}; "
"qiime tools export {0}/rep_seqs_tax_assignments.qza --output-dir {0}; "
"mv {0}/taxonomy.tsv {0}/query_tax_assignments.txt")
commands = parameter_sweep(data_dir, results_dir, reference_dbs,
dataset_reference_combinations,
method_parameters_combinations, command_template,
infile='query.qza', output_name='rep_seqs_tax_assignments.qza')
Parallel(n_jobs=23)(delayed(system)(command) for command in commands);
method_parameters_combinations = {
'vsearch' : {'p-maxaccepts': [1, 10],
'p-perc-identity': [0.80, 0.90, 0.97, 0.99],
'p-min-consensus': [0.51, 0.99]}
}
command_template = ("mkdir -p {0}; "
"qiime feature-classifier classify-consensus-vsearch --i-query {1} --o-classification "
"{0}/rep_seqs_tax_assignments.qza --i-reference-reads {2} --i-reference-taxonomy {3} {5}; "
"qiime tools export {0}/rep_seqs_tax_assignments.qza --output-dir {0}; "
"mv {0}/taxonomy.tsv {0}/query_tax_assignments.txt")
commands = parameter_sweep(data_dir, results_dir, reference_dbs,
dataset_reference_combinations,
method_parameters_combinations, command_template,
infile='query.qza', output_name='rep_seqs_tax_assignments.qza')
Parallel(n_jobs=23)(delayed(system)(command) for command in commands);
method_parameters_combinations = {
'naive-bayes' : {'p-feat-ext--ngram-range':
['[4,4]', '[6,6]', '[8,8]', '[16,16]', '[32,32]',
'[7,7]', '[9,9]', '[10,10]', '[11,11]',
'[12,12]', '[14,14]', '[18,18]'],
'p-classify--alpha': [0.001]},
'naive-bayes-bespoke' : {'p-feat-ext--ngram-range':
['[4,4]', '[6,6]', '[8,8]', '[16,16]', '[32,32]',
'[7,7]', '[9,9]', '[10,10]', '[11,11]',
'[12,12]', '[14,14]', '[18,18]'],
'p-classify--alpha': [0.001],
'p-classify--fit-prior': ['']}
}
command_template = ('mkdir -p "{0}"; '
'qiime feature-classifier fit-classifier-naive-bayes --o-classifier '
'"{0}/classifier.qza" --i-reference-reads {2} --i-reference-taxonomy {3} {5}; ')
confidences = [0.0, 0.5, 0.7, 0.9, 0.92, 0.94,
0.96, 0.98, 1.0]
command_template += ''.join(
'mkdir -p "{0}:' + str(c) + '"; '
'qiime feature-classifier classify-sklearn '
'--o-classification "{0}:' + str(c) + '/rep_seqs_tax_assignments.qza" '
'--i-classifier "{0}/classifier.qza" '
'--i-reads {1} --p-confidence ' + str(c) + '; '
'qiime tools export "{0}:' + str(c) + '/rep_seqs_tax_assignments.qza" --output-dir "{0}:' + str(c) + '"; '
'mv "{0}:' + str(c) + '/taxonomy.tsv" "{0}:' + str(c) + '/query_tax_assignments.txt"; 'for c in confidences)
command_template += 'rm "{0}/classifier.qza"; rmdir "{0}"'
commands = parameter_sweep(data_dir, results_dir, reference_dbs,
dataset_reference_combinations,
method_parameters_combinations, command_template,
infile='query.qza', output_name='rep_seqs_tax_assignments.qza')
print(len(commands), 'commands')
print('\n\n'.join(commands[0].split(';')))
Parallel(n_jobs=23)(delayed(system)(command) for command in commands);
precomputed_results_dir = join(project_dir, "data", "precomputed-results", analysis_name)
method_dirs = glob(join(results_dir, '*', '*', '*', '*'))
move_results_to_repository(method_dirs, precomputed_results_dir)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing data set sweep
Step2: Preparing the method/parameter combinations and generating commands
Step3: Now enter the template of the command to sweep, and generate a list of commands with parameter_sweep().
Step4: As a sanity check, we can look at the first command that was generated and the number of commands generated.
Step5: Finally, we run our commands.
Step6: BLAST+
Step7: VSEARCH
Step8: scikit-learn
Step9: Move result files to repository
|
2,004
|
<ASSISTANT_TASK:>
Python Code:
%%bash
pip install sh --upgrade pip # needed to execute shell scripts later
import os
PROJECT = 'PROJECT' # REPLACE WITH YOUR PROJECT ID
REGION = 'us-central1' # REPLACE WITH YOUR REGION e.g. us-central1
# do not change these
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = 'recserve_' + PROJECT
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
# create GCS bucket with recserve_PROJECT_NAME if not exists
exists=$(gsutil ls -d | grep -w gs://${BUCKET}/)
if [ -n "$exists" ]; then
echo "Not creating recserve_bucket since it already exists."
else
echo "Creating recserve_bucket"
gsutil mb -l ${REGION} gs://${BUCKET}
fi
# %%bash
# run app engine creation commands
# gcloud app create --region ${REGION} # see: https://cloud.google.com/compute/docs/regions-zones/
# gcloud app update --no-split-health-checks
%%bash
gsutil -m cp gs://cloud-training-demos/courses/machine_learning/deepdive/10_recommendation/endtoend/data/ga_sessions_sample.json.gz gs://${BUCKET}/data/ga_sessions_sample.json.gz
gsutil -m cp gs://cloud-training-demos/courses/machine_learning/deepdive/10_recommendation/endtoend/data/recommendation_events.csv data/recommendation_events.csv
gsutil -m cp gs://cloud-training-demos/courses/machine_learning/deepdive/10_recommendation/endtoend/data/recommendation_events.csv gs://${BUCKET}/data/recommendation_events.csv
%%bash
# create BigQuery dataset if it doesn't already exist
exists=$(bq ls -d | grep -w GA360_test)
if [ -n "$exists" ]; then
echo "Not creating GA360_test since it already exists."
else
echo "Creating GA360_test dataset."
bq --project_id=${PROJECT} mk GA360_test
fi
# create the schema and load our sample Google Analytics session data
bq load --source_format=NEWLINE_DELIMITED_JSON \
GA360_test.ga_sessions_sample \
gs://${BUCKET}/data/ga_sessions_sample.json.gz \
data/ga_sessions_sample_schema.json # can't load schema files from GCS
%%bash
cd wals_ml_engine
echo "creating distributable package"
python setup.py sdist
echo "copying ML package to bucket"
gsutil cp dist/wals_ml_engine-0.1.tar.gz gs://${BUCKET}/code/
%%bash
# view the ML train local script before running
cat wals_ml_engine/mltrain.sh
%%bash
cd wals_ml_engine
# train locally with unoptimized hyperparams
./mltrain.sh local ../data/recommendation_events.csv --data-type web_views --use-optimized
# Options if we wanted to train on CMLE. We will do this with Cloud Composer later
# train on ML Engine with optimized hyperparams
# ./mltrain.sh train ../data/recommendation_events.csv --data-type web_views --use-optimized
# tune hyperparams on ML Engine:
# ./mltrain.sh tune ../data/recommendation_events.csv --data-type web_views
ls wals_ml_engine/jobs
%%bash
export JOB_MODEL=$(find wals_ml_engine/jobs -name "model" | tail -1)
gsutil cp ${JOB_MODEL}/* gs://${BUCKET}/model/
echo "Recommendation model file numpy arrays in bucket:"
gsutil ls gs://${BUCKET}/model/
%%bash
cd scripts
cat prepare_deploy_api.sh
%%bash
printf "\nCopy and run the deploy script generated below:\n"
cd scripts
./prepare_deploy_api.sh # Prepare config file for the API.
%%bash
gcloud endpoints services deploy [REPLACE_WITH_TEMP_FILE_NAME.yaml]
%%bash
# view the app deployment script
cat scripts/prepare_deploy_app.sh
%%bash
# prepare to deploy
cd scripts
./prepare_deploy_app.sh
%%bash
gcloud -q app deploy app/app_template.yaml_deploy.yaml
%%bash
cd scripts
./query_api.sh # Query the API.
#./generate_traffic.sh # Send traffic to the API.
AIRFLOW_BUCKET = 'us-central1-composer-21587538-bucket' # REPLACE WITH AIRFLOW BUCKET NAME
os.environ['AIRFLOW_BUCKET'] = AIRFLOW_BUCKET
%%writefile airflow/dags/training.py
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DAG definition for recserv model training.
import airflow
from airflow import DAG
# Reference for all available airflow operators:
# https://github.com/apache/incubator-airflow/tree/master/airflow/contrib/operators
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.hooks.base_hook import BaseHook
# from airflow.contrib.operators.mlengine_operator import MLEngineTrainingOperator
# above mlengine_operator currently doesnt support custom MasterType so we import our own plugins:
# custom plugins
from airflow.operators.app_engine_admin_plugin import AppEngineVersionOperator
from airflow.operators.ml_engine_plugin import MLEngineTrainingOperator
import datetime
def _get_project_id():
Get project ID from default GCP connection.
extras = BaseHook.get_connection('google_cloud_default').extra_dejson
key = 'extra__google_cloud_platform__project'
if key in extras:
project_id = extras[key]
else:
raise ('Must configure project_id in google_cloud_default '
'connection from Airflow Console')
return project_id
PROJECT_ID = _get_project_id()
# Data set constants, used in BigQuery tasks. You can change these
# to conform to your data.
# TODO: Specify your BigQuery dataset name and table name
DATASET = 'GA360_test'
TABLE_NAME = 'ga_sessions_sample'
ARTICLE_CUSTOM_DIMENSION = '10'
# TODO: Confirm bucket name and region
# GCS bucket names and region, can also be changed.
BUCKET = 'gs://recserve_' + PROJECT_ID
REGION = 'us-east1'
# The code package name comes from the model code in the wals_ml_engine
# directory of the solution code base.
PACKAGE_URI = BUCKET + '/code/wals_ml_engine-0.1.tar.gz'
JOB_DIR = BUCKET + '/jobs'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['airflow@example.com'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': datetime.timedelta(minutes=5)
}
# Default schedule interval using cronjob syntax - can be customized here
# or in the Airflow console.
# TODO: Specify a schedule interval in CRON syntax to run once a day at 2100 hours (9pm)
# Reference: https://airflow.apache.org/scheduler.html
schedule_interval = '00 21 * * *'
# TODO: Title your DAG to be recommendations_training_v1
dag = DAG('recommendations_training_v1',
default_args=default_args,
schedule_interval=schedule_interval)
dag.doc_md = __doc__
#
#
# Task Definition
#
#
# BigQuery training data query
bql='''
#legacySql
SELECT
fullVisitorId as clientId,
ArticleID as contentId,
(nextTime - hits.time) as timeOnPage,
FROM(
SELECT
fullVisitorId,
hits.time,
MAX(IF(hits.customDimensions.index={0},
hits.customDimensions.value,NULL)) WITHIN hits AS ArticleID,
LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId, visitNumber
ORDER BY hits.time ASC) as nextTime
FROM [{1}.{2}.{3}]
WHERE hits.type = "PAGE"
) HAVING timeOnPage is not null and contentId is not null;
'''
bql = bql.format(ARTICLE_CUSTOM_DIMENSION, PROJECT_ID, DATASET, TABLE_NAME)
# TODO: Complete the BigQueryOperator task to truncate the table if it already exists before writing
# Reference: https://airflow.apache.org/integration.html#bigqueryoperator
t1 = BigQueryOperator(
task_id='bq_rec_training_data',
bql=bql,
destination_dataset_table='%s.recommendation_events' % DATASET,
write_disposition='WRITE_TRUNCATE', # specify to truncate on writes
dag=dag)
# BigQuery training data export to GCS
# TODO: Fill in the missing operator name for task #2 which
# takes a BigQuery dataset and table as input and exports it to GCS as a CSV
training_file = BUCKET + '/data/recommendation_events.csv'
t2 = BigQueryToCloudStorageOperator(
task_id='bq_export_op',
source_project_dataset_table='%s.recommendation_events' % DATASET,
destination_cloud_storage_uris=[training_file],
export_format='CSV',
dag=dag
)
# ML Engine training job
job_id = 'recserve_{0}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M'))
job_dir = BUCKET + '/jobs/' + job_id
output_dir = BUCKET
training_args = ['--job-dir', job_dir,
'--train-files', training_file,
'--output-dir', output_dir,
'--data-type', 'web_views',
'--use-optimized']
# TODO: Fill in the missing operator name for task #3 which will
# start a new training job to Cloud ML Engine
# Reference: https://airflow.apache.org/integration.html#cloud-ml-engine
# https://cloud.google.com/ml-engine/docs/tensorflow/machine-types
t3 = MLEngineTrainingOperator(
task_id='ml_engine_training_op',
project_id=PROJECT_ID,
job_id=job_id,
package_uris=[PACKAGE_URI],
training_python_module='trainer.task',
training_args=training_args,
region=REGION,
scale_tier='CUSTOM',
master_type='complex_model_m_gpu',
dag=dag
)
# App Engine deploy new version
t4 = AppEngineVersionOperator(
task_id='app_engine_deploy_version',
project_id=PROJECT_ID,
service_id='default',
region=REGION,
service_spec=None,
dag=dag
)
# TODO: Be sure to set_upstream dependencies for all tasks
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
%%bash
gsutil cp airflow/dags/training.py gs://${AIRFLOW_BUCKET}/dags # overwrite if it exists
gsutil cp -r airflow/plugins gs://${AIRFLOW_BUCKET} # copy custom plugins
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Setup environment variables
Step2: Setup Google App Engine permissions
Step3: Part One
Step4: 2. Create empty BigQuery dataset and load sample JSON data
Step5: Install WALS model training package and model data
Step6: 2. Run the WALS model on the sample data set
Step7: This will take a couple minutes, and create a job directory under wals_ml_engine/jobs like "wals_ml_local_20180102_012345/model", containing the model files saved as numpy arrays.
Step8: 3. Copy the model files from this directory to the model folder in the project bucket
Step9: Install the recserve endpoint
Step10: This will output somthing like
Step11: 3. Prepare the deploy template for the App Engine App
Step12: You can ignore the script output "ERROR
Step13: This will take 7 - 10 minutes to deploy the app. While you wait, consider starting on Part Two below and completing the Cloud Composer DAG file.
Step14: If the call is successful, you will see the article IDs recommended for that specific user by the WALS ML model <br/>
Step17: Complete the training.py DAG file
Step18: Copy local Airflow DAG file and plugins into the DAGs folder
|
2,005
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
!head -n 30 open_exoplanet_catalogue.txt
data = np.genfromtxt(fname = 'open_exoplanet_catalogue.txt', delimiter = ',')
data[np.isnan(data)] = 0
assert data.shape==(1993,24)
fig = plt.figure(figsize=(7,7))
plt.hist(x=data[::1,2], bins = 1500)
plt.title('Planetary Masses Histogram')
plt.tick_params(top=False, right=False)
plt.xlabel('Planetary Mass(Jupiter Mass)')
plt.ylabel('Number of Planets')
plt.ylim(0,80)
plt.xlim(0,270)
plt.tick_params(axis='both', direction = 'out')
a = []
b = []
c = []
for x in data[::1,2]:
if x > 0 and x < 1:
a.append(x)
elif x > 1 and x < 12:
b.append(x)
elif x > 12:
c.append(x)
fig = plt.figure(figsize=(10,7))
plt.subplot(1,3,1)
plt.hist(a)
plt.ylabel('Number of Planets')
plt.xlabel('Planetary Mass(Jupiter Mass)')
plt.tick_params(top=False, right=False)
plt.tick_params(axis='both', direction = 'out')
plt.subplot(1,3,2)
plt.hist(b)
plt.title('Planetary Masses Histogram')
plt.xlabel('Planetary Mass(Jupiter Mass)')
plt.tick_params(top=False, right=False)
plt.tick_params(axis='both', direction = 'out')
plt.subplot(1,3,3)
plt.hist(c)
plt.xlabel('Planetary Mass(Jupiter Mass)')
plt.tick_params(top=False, right=False)
plt.tick_params(axis='both', direction = 'out')
plt.tight_layout()
assert True # leave for grading
fig= plt.figure(figsize=(8,5))
plt.scatter(data[::1,5], data[::1,6], marker = '.')
plt.title('Scatter Plot of Obrbital Eccentricity vs. Semimajor Axis')
plt.tick_params(right=False, top=False)
plt.ylabel('Orbital Eccentricity')
plt.xlabel('Semimajor Axis')
#plt.semilogx()
plt.xlim(0,2)
plt.ylim(-.01,1)
plt.tick_params(axis='both', direction = 'out')
assert True # leave for grading
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exoplanet properties
Step2: Use np.genfromtxt with a delimiter of ',' to read the data into a NumPy array called data
Step3: Looked this up on stackoverflow to replace nans in array with zeros
Step4: Make a histogram of the distribution of planetary masses. This will reproduce Figure 2 in the original paper.
Step5: Make a scatter plot of the orbital eccentricity (y) versus the semimajor axis. This will reproduce Figure 4 of the original paper. Use a log scale on the x axis.
|
2,006
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats
from statsmodels.stats.weightstats import *
from statsmodels.stats.proportion import proportion_confint
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
water = pd.read_csv('water.txt', delimiter='\t')
water.info()
water.describe()
water.head()
sns.jointplot('hardness', 'mortality', water, kind="reg");
print('Pearson correlation: %.4f' % stats.pearsonr(water.hardness, water.mortality)[0])
print('Spearman correlation: %.4f' % stats.spearmanr(water.hardness, water.mortality)[0])
water_south = water[water.location == 'South']
water_north = water[water.location == 'North']
water_south.shape
water_north.shape
print('Pearson "South" correlation: %.4f' % stats.pearsonr(water_south.hardness, water_south.mortality)[0])
print('Pearson "North" correlation: %.4f' % stats.pearsonr(water_north.hardness, water_north.mortality)[0])
bars_sex = np.array([[203., 239.], [718., 515.]])
def matthewsr(a, b, c, d):
return (a*d - b*c) / np.sqrt((a + b)*(a + c)*(b + d)*(c + d))
matthews_coeff = matthewsr(*bars_sex.flatten())
print('Matthews correlation: %.4f' % matthews_coeff)
bars_sex.shape
print('Matthews significance p-value: %f' % stats.chi2_contingency(bars_sex)[1])
def proportions_diff_confint_ind(sample1, sample2, alpha = 0.05):
z = stats.norm.ppf(1 - alpha / 2.)
p1 = sample1[0] / np.sum(sample1)
p2 = sample2[0] / np.sum(sample2)
left_boundary = (p1 - p2) - z * np.sqrt(p1 * (1 - p1)/ np.sum(sample1) + p2 * (1 - p2)/ np.sum(sample2))
right_boundary = (p1 - p2) + z * np.sqrt(p1 * (1 - p1)/ np.sum(sample1) + p2 * (1 - p2)/ np.sum(sample2))
return (left_boundary, right_boundary)
print('95%% confidence interval for a difference of men and women: [%.4f, %.4f]' %
proportions_diff_confint_ind(bars_sex[:,1], bars_sex[:,0]))
def proportions_diff_z_stat_ind(sample1, sample2):
n1 = np.sum(sample1)
n2 = np.sum(sample2)
p1 = sample1[0] / n1
p2 = sample2[0] / n2
P = float(p1*n1 + p2*n2) / (n1 + n2)
return (p1 - p2) / np.sqrt(P * (1 - P) * (1. / n1 + 1. / n2))
def proportions_diff_z_test(z_stat, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'two-sided':
return 2 * (1 - stats.norm.cdf(np.abs(z_stat)))
if alternative == 'less':
return stats.norm.cdf(z_stat)
if alternative == 'greater':
return 1 - stats.norm.cdf(z_stat)
print('p-value: %f' % proportions_diff_z_test(proportions_diff_z_stat_ind(bars_sex[:,1], bars_sex[:,0])))
happiness = np.array( [[197., 111., 33. ],
[382., 685., 331.],
[110., 342., 333.]] )
stats.chi2_contingency(happiness)
print('Chi2 stat value: %.4f' % stats.chi2_contingency(happiness)[0])
print('Chi2 stat p-value: %.62f' % stats.chi2_contingency(happiness)[1])
def cramers_stat(confusion_matrix):
chi2 = stats.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum()
return np.sqrt(chi2 / (n*(min(confusion_matrix.shape)-1)))
print('V Cramer stat value: %.4f' % cramers_stat(happiness))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <b>
Step2: Сохраняется ли связь между признаками, если разбить выборку на северные и южные города? Посчитайте значения корреляции Пирсона между средней годовой смертностью и жёсткостью воды в каждой из двух подвыборок, введите наименьшее по модулю из двух значений, округлив его до четырёх знаков после десятичной точки.
Step3: <b>
Step4: В предыдущей задаче проверьте, значимо ли коэффициент корреляции Мэтьюса отличается от нуля. Посчитайте достигаемый уровень значимости; используйте функцию scipy.stats.chi2_contingency. Введите номер первой значащей цифры (например, если вы получили 5.5×10−8, нужно ввести 8).
Step5: В предыдущей задаче давайте попробуем ответить на немного другой вопрос
Step6: Проверьте гипотезу о равенстве долей любителей часто проводить вечера в баре среди мужчин и женщин. Посчитайте достигаемый уровень значимости, используя двустороннюю альтернативу. Введите номер первой значащей цифры (например, если вы получили 5.5×10−8, нужно ввести 8).
Step7: <b>
Step8: На данных из предыдущего вопроса посчитайте значение достигаемого уровня значимости. Введите номер первой значащей цифры (например, если вы получили 5.5×10−8, нужно ввести 8).
Step9: Чему в предыдущей задаче равно значение коэффициента V Крамера для рассматриваемых признаков? Округлите ответ до четырёх знаков после десятичной точки.
|
2,007
|
<ASSISTANT_TASK:>
Python Code:
graph = {'A': {'B': 14, 'C': 9, 'D': 7},
'B': {'A': 14, 'C': 2, 'F': 9},
'C': {'A': 9, 'B': 2, 'D': 7, 'E': 11},
'D': {'A': 7, 'C':10, 'E':15},
'E': {'C': 11, 'D':15, 'F': 6},
'F': {'B': 9, 'E': 6}
}
graph['C']['B']
# equivalently:
graph['B']['C']
float('inf') > 99
def dijkstra(graph, start, destination):
# initialize costs of starting node and its neighbors
costs = {node: float('inf') for node in graph.keys()}
costs[start] = 0
# and use parent_nodes to keep track of the chain of
# nodes that make up the shortest path
parent_nodes = {}
for neighbor in graph[start].keys():
costs[neighbor] = graph[start][neighbor]
parent_nodes[neighbor] = start
nodes_checked = set()
while not len(nodes_checked) == len(graph.keys()):
# get lowest cost node
min_cost, min_cost_node = float('inf'), None
for node in costs:
curr_cost = costs[node]
if curr_cost < min_cost and node not in nodes_checked:
min_cost, min_cost_node = curr_cost, node
# check if we can reach any of the lowest cost node's
# neigbors by going through the lowest cose node
for neighbor in graph[min_cost_node].keys():
new_cost = min_cost + graph[min_cost_node][neighbor]
if new_cost < costs[neighbor]:
costs[neighbor] = new_cost
parent_nodes[neighbor] = min_cost_node
# early stopping if we visited the destination
if neighbor == destination:
break
if neighbor == destination:
break
# add the node to the checked nodes
nodes_checked.add(min_cost_node)
return costs, parent_nodes
costs, parent_nodes = dijkstra(graph, start='A', destination='F')
print('Costs:', costs)
print('Parent Nodes:', parent_nodes)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: For example, to get the cost of the edge connecting C and B, we can use the dictionary as follows
|
2,008
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import itertools
from scipy import stats
from statsmodels.stats.descriptivestats import sign_test
from statsmodels.stats.weightstats import zconfint
%pylab inline
mouses_data = pd.read_csv('mirror_mouses.txt', header = None)
mouses_data.columns = ['proportion_of_time']
mouses_data
mouses_data.describe()
pylab.hist(mouses_data.proportion_of_time)
pylab.show()
print '95%% confidence interval for the median time: [%f, %f]' % zconfint(mouses_data)
print "M: %d, p-value: %f" % sign_test(mouses_data, 0.5)
m0 = 0.5
stats.wilcoxon(mouses_data.proportion_of_time - m0)
def permutation_t_stat_1sample(sample, mean):
t_stat = sum(map(lambda x: x - mean, sample))
return t_stat
permutation_t_stat_1sample(mouses_data.proportion_of_time, 0.5)
def permutation_zero_distr_1sample(sample, mean, max_permutations = None):
centered_sample = map(lambda x: x - mean, sample)
if max_permutations:
signs_array = set([tuple(x) for x in 2 * np.random.randint(2, size = (max_permutations,
len(sample))) - 1 ])
else:
signs_array = itertools.product([-1, 1], repeat = len(sample))
distr = [sum(centered_sample * np.array(signs)) for signs in signs_array]
return distr
pylab.hist(permutation_zero_distr_1sample(mouses_data.proportion_of_time, 0.5), bins = 15)
pylab.show()
def permutation_test(sample, mean, max_permutations = None, alternative = 'two-sided'):
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
t_stat = permutation_t_stat_1sample(sample, mean)
zero_distr = permutation_zero_distr_1sample(sample, mean, max_permutations)
if alternative == 'two-sided':
return sum([1. if abs(x) >= abs(t_stat) else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'less':
return sum([1. if x <= t_stat else 0. for x in zero_distr]) / len(zero_distr)
if alternative == 'greater':
return sum([1. if x >= t_stat else 0. for x in zero_distr]) / len(zero_distr)
print "p-value: %f" % permutation_test(mouses_data.proportion_of_time, 0.5)
print "p-value: %f" % permutation_test(mouses_data.proportion_of_time, 0.5, 10000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Загрузка данных
Step2: Одновыборочные критерии
Step3: Критерий знаков
Step4: Критерий знаковых рангов Вилкоксона
Step5: Перестановочный критерий
|
2,009
|
<ASSISTANT_TASK:>
Python Code:
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.conv_learner import *
PATH = 'data/planet/'
# Data preparation steps if you are using Crestle:
os.makedirs('data/planet/models', exist_ok=True)
os.makedirs('/cache/planet/tmp', exist_ok=True)
!ln -s /datasets/kaggle/planet-understanding-the-amazon-from-space/train-jpg {PATH}
!ln -s /datasets/kaggle/planet-understanding-the-amazon-from-space/test-jpg {PATH}
!ln -s /datasets/kaggle/planet-understanding-the-amazon-from-space/train_v2.csv {PATH}
!ln -s /cache/planet/tmp {PATH}
ls {PATH}
from fastai.plots import *
def get_1st(path): return glob(f'{path}/*.*')[0]
dc_path = "data/dogscats/valid/"
list_paths = [get_1st(f"{dc_path}cats"), get_1st(f"{dc_path}dogs")]
plots_from_files(list_paths, titles=["cat", "dog"], maintitle="Single-label classification")
list_paths = [f"{PATH}train-jpg/train_0.jpg", f"{PATH}train-jpg/train_1.jpg"]
titles=["haze primary", "agriculture clear primary water"]
plots_from_files(list_paths, titles=titles, maintitle="Multi-label classification")
from planet import f2
metrics=[f2]
f_model = resnet34
label_csv = f'{PATH}train_v2.csv'
n = len(list(open(label_csv)))-1
val_idxs = get_cv_idxs(n)
def get_data(sz):
tfms = tfms_from_model(f_model, sz, aug_tfms=transforms_top_down, max_zoom=1.05)
return ImageClassifierData.from_csv(PATH, 'train-jpg', label_csv, tfms=tfms,
suffix='.jpg', val_idxs=val_idxs, test_name='test-jpg')
data = get_data(256)
x,y = next(iter(data.val_dl))
y
list(zip(data.classes, y[0]))
plt.imshow(data.val_ds.denorm(to_np(x))[0]*1.4);
sz=64
data = get_data(sz)
data = data.resize(int(sz*1.3), 'tmp')
learn = ConvLearner.pretrained(f_model, data, metrics=metrics)
lrf=learn.lr_find()
learn.sched.plot()
lr = 0.2
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)
lrs = np.array([lr/9,lr/3,lr])
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)
learn.save(f'{sz}')
learn.sched.plot_loss()
sz=128
learn.set_data(get_data(sz))
learn.freeze()
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)
learn.save(f'{sz}')
sz=256
learn.set_data(get_data(sz))
learn.freeze()
learn.fit(lr, 3, cycle_len=1, cycle_mult=2)
learn.unfreeze()
learn.fit(lrs, 3, cycle_len=1, cycle_mult=2)
learn.save(f'{sz}')
multi_preds, y = learn.TTA()
preds = np.mean(multi_preds, 0)
f2(preds,y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Multi-label versus single-label classification
Step2: In single-label classification each sample belongs to one class. In the previous example, each image is either a dog or a cat.
Step3: In multi-label classification each sample can belong to one or more clases. In the previous example, the first images belongs to two clases
Step4: We use a different set of data augmentations for this dataset - we also allow vertical flips, since we don't expect vertical orientation of satellite images to change our classifications.
|
2,010
|
<ASSISTANT_TASK:>
Python Code:
USE_VISUAL=False
#
# Either use this cell, in which case you will be using VPython
# Note: VPython only works if you have it installed on your local
# computer. Also, stopping a VPython simulation appears to restart the kernel. Save first!
#
import numpy as np
if USE_VISUAL:
import vpython as vp
#
# Or use this one, in which case you will be using pylab
#
if not USE_VISUAL:
%matplotlib inline
import matplotlib.pyplot as pl
#
# Basic functions and definitions used by VPython and pylab versions
#
L=10
kb=10.0
ks=1.0
m=1.0
dt=0.03
t=0.0
#
# Note that these displacements do *not* match an eigenvector, so the resulting
# motion (frequency) is a superposition of two eigenvalues (frequencies)
# What happens when you initialize the displacements with an eigenvector?
#
x1i=0.4*L/10 # initial displacement from equil, m1
v1i=0.0 # initial vel, m1
x2i=1*L/10 # initial displacement from equil, m2
v2i=0.0 # initial vel, m2
s=np.array([x1i,v1i,x2i,v2i]) # initial state
def derivs_2m(s, t):
x1=s[0] # get the variables from the state
v1=s[1]
x2=s[2]
v2=s[3]
a1 = (-(kb+ks)*x1 + ks*x2)/m
a2 = (-(kb+ks)*x2 + ks*x1)/m
return np.array([v1, a1, v2, a2])
def RK4Step(s, dt, t, derivs):
Take a single RK4 step.
f1 = derivs(s, t)
f2 = derivs(s+f1*dt/2.0, t+dt/2.0)
f3 = derivs(s+f2*dt/2.0, t+dt/2.0)
f4 = derivs(s+f3*dt, t+dt)
return s + (f1+2*f2+2*f3+f4)*dt/6.0
#
# VPython: Simulation of 2 coupled masses on springs.
#
if USE_VISUAL:
vp.canvas()
wallLeft = vp.box(pos=vp.vec(-L,0,0), height=L/2, width=L/2, length=L/10, color=vp.color.green)
wallRight = vp.box(pos=vp.vec(L,0,0), height=L/2, width=L/2, length=L/10, color=vp.color.green)
m1 = vp.box(pos=vp.vec(-L/3,0,0), height=L/5, width=L/5, length=L/5, color=vp.color.red)
m2 = vp.box(pos=vp.vec(+L/3,0,0), height=L/5, width=L/5, length=L/5, color=vp.color.red)
vWall=vp.vector(wallLeft.length/2,0,0) # sorta like axis of wallLeft?
vMass=vp.vector(m1.length/2,0,0) # same for the masses
s1 = vp.helix(pos=wallLeft.pos+vWall, axis=(m1.pos-vMass)-(wallLeft.pos+vWall), radius=0.5)
s2 = vp.helix(pos=m1.pos+vMass, axis=(m2.pos-vMass)-(m1.pos+vMass), radius=0.5)
s3 = vp.helix(pos=m2.pos+vMass, axis=(wallRight.pos-vWall)-(m2.pos+vMass), radius=0.5)
x10=m1.pos.x # equilibrium pos of m1
x20=m2.pos.x # equilibrium pos of m2
#
# VPython: Simulation of 2 coupled masses on springs.
#
if USE_VISUAL:
def updateScreen(s):
x1=s[0] # get the variables from the state
v1=s[1]
x2=s[2]
v2=s[3]
m1.pos.x = x10+x1 # update mass positions
m2.pos.x = x20+x2
#
# now just update spring positions and axes
#
s1.axis = (m1.pos-vMass)-(wallLeft.pos+vWall)
s2.pos = m1.pos+vMass
s2.axis = (m2.pos-vMass)-(m1.pos+vMass)
s3.pos = m2.pos+vMass
s3.axis = (wallRight.pos-vWall)-(m2.pos+vMass)
updateScreen(s)
#
# VPython: Simulation of 2 coupled masses on springs.
#
if USE_VISUAL:
while True:
vp.rate(30)
s = RK4Step(s, dt, t, derivs_2m)
t += dt
updateScreen(s)
if not USE_VISUAL:
x1list=[s[0]]
x2list=[s[2]]
tlist=[0.0]
t=0.0
while t<6:
s = RK4Step(s, dt, t, derivs_2m)
t += dt
x1list.append(s[0])
x2list.append(s[2])
tlist.append(t)
pl.subplot(211)
pl.ylabel("x1")
pl.title("motion of coupled masses")
pl.plot(tlist,x1list,label="x1")
pl.subplot(212)
pl.ylabel("x2")
pl.xlabel("t")
pl.plot(tlist,x2list,label="x2")
Mat = np.array([[(kb+ks)/m, -ks/m],
[-ks/m, (kb+ks)/m]])
vals, vecs = np.linalg.eig(Mat)
print("values:", vals)
print("vec(0):", vecs[:,0])
print("vec(1):", vecs[:,1])
if not USE_VISUAL:
pl.figure()
ax = pl.axes()
ax.set_aspect('equal')
ax.arrow(0, 0, vecs[0,0], vecs[1,0], head_width=0.04, fc='k', ec='k')
ax.arrow(0, 0, vecs[0,1], vecs[1,1], head_width=0.04, fc='k', ec='k')
pl.xlim([-1,1])
pl.ylim([-1,1])
pl.xlabel("x1")
pl.ylabel("x2")
pl.grid()
if not USE_VISUAL:
pl.figure()
ax = pl.axes()
ax.set_aspect('equal')
ax.arrow(0, 0, vecs[0,0], vecs[1,0], head_width=0.04, fc='k', ec='k')
ax.arrow(0, 0, vecs[0,1], vecs[1,1], head_width=0.04, fc='k', ec='k')
pl.xlim([-0.7,1.5])
pl.ylim([-1,1.5])
pl.grid()
x0 = np.array([.4,1])
c0 = vecs[:,0].dot(x0)
c1 = vecs[:,1].dot(x0)
ax.arrow(0, 0, c0*vecs[0,0], c0*vecs[1,0], head_width=0.04,fc='r', ec='r')
ax.arrow(0, 0, c1*vecs[0,1], c1*vecs[1,1], head_width=0.04,fc='b', ec='b')
ax.arrow(0, 0, x0[0], x0[1], head_width=0.04, fc='g', ec='g')
print("c1 = ", c1)
print("c0 = ", c0)
if not USE_VISUAL:
Tf = 15*2*np.pi/np.sqrt(vals[0]) # take 5 periods of the high freq
t = np.linspace(0,Tf,500)
x = c0*np.array([vecs[:,0]]).T*np.cos(np.sqrt(vals[0])*t) + c1*np.array([vecs[:,1]]).T*np.cos(np.sqrt(vals[1])*t)
pl.subplot(211)
pl.ylabel("x1")
pl.title("motion of coupled masses")
pl.plot(t,x[0,:],label="x1")
pl.grid()
pl.subplot(212)
pl.ylabel("x2")
pl.xlabel("t")
pl.plot(t,x[1,:],label="x2")
pl.grid()
from scipy.optimize import curve_fit
def cosFit(t, A, omega, phi):
Function def for a cosine fit
return A*np.cos(omega*t+phi)
x1a=np.array(x1list)
ta=np.array(tlist)
popt, pcov = curve_fit(cosFit, ta, x1a, p0=(0.707, np.sqrt(10.0), 0.0))
A=popt[0]
omega=popt[1]
phi=popt[2]
print("A =>", A)
print("omega**2 =>", omega**2)
print("phi =>", phi)
pl.title('Fit to find frequency.')
pl.xlabel('t')
pl.ylabel('x1')
pl.plot(ta, cosFit(ta, A, omega, phi), 'b-', label="fit")
pl.plot(ta, x1a, 'r.', label='data')
pl.legend()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Eigenvectors
Step2: We can also sort out what's happening using the matrix formulation developed in the slides. The eigenvalue problem
Step4: Project 10 (option A)
|
2,011
|
<ASSISTANT_TASK:>
Python Code:
import os
import numpy as np
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_evk_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis-ave.fif')
evokeds_list = mne.read_evokeds(sample_data_evk_file, baseline=(None, 0),
proj=True, verbose=False)
# show the condition names
for e in evokeds_list:
print(e.comment)
conds = ('aud/left', 'aud/right', 'vis/left', 'vis/right')
evks = dict(zip(conds, evokeds_list))
# ‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾‾ this is equivalent to:
# {'aud/left': evokeds_list[0], 'aud/right': evokeds_list[1],
# 'vis/left': evokeds_list[2], 'vis/right': evokeds_list[3]}
evks['aud/left'].plot(exclude=[])
evks['aud/left'].plot(picks='mag', spatial_colors=True, gfp=True)
times = np.linspace(0.05, 0.13, 5)
evks['aud/left'].plot_topomap(ch_type='mag', times=times, colorbar=True)
fig = evks['aud/left'].plot_topomap(ch_type='mag', times=0.09, average=0.1)
fig.text(0.5, 0.05, 'average from 40-140 ms', ha='center')
mags = evks['aud/left'].copy().pick_types(meg='mag')
mne.viz.plot_arrowmap(mags.data[:, 175], mags.info, extrapolate='local')
evks['vis/right'].plot_joint()
def custom_func(x):
return x.max(axis=1)
for combine in ('mean', 'median', 'gfp', custom_func):
mne.viz.plot_compare_evokeds(evks, picks='eeg', combine=combine)
mne.viz.plot_compare_evokeds(evks, picks='MEG 1811', colors=dict(aud=0, vis=1),
linestyles=dict(left='solid', right='dashed'))
temp_list = list()
for idx, _comment in enumerate(('foo', 'foo', '', None, 'bar'), start=1):
_evk = evokeds_list[0].copy()
_evk.comment = _comment
_evk.data *= idx # so we can tell the traces apart
temp_list.append(_evk)
mne.viz.plot_compare_evokeds(temp_list, picks='mag')
evks['vis/right'].plot_image(picks='meg')
mne.viz.plot_compare_evokeds(evks, picks='eeg', colors=dict(aud=0, vis=1),
linestyles=dict(left='solid', right='dashed'),
axes='topo', styles=dict(aud=dict(linewidth=1),
vis=dict(linewidth=1)))
mne.viz.plot_evoked_topo(evokeds_list)
subjects_dir = os.path.join(sample_data_folder, 'subjects')
sample_data_trans_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
maps = mne.make_field_map(evks['aud/left'], trans=sample_data_trans_file,
subject='sample', subjects_dir=subjects_dir)
evks['aud/left'].plot_field(maps, time=0.1)
for ch_type in ('mag', 'grad', 'eeg'):
evk = evks['aud/right'].copy().pick(ch_type)
_map = mne.make_field_map(evk, trans=sample_data_trans_file,
subject='sample', subjects_dir=subjects_dir,
meg_surf='head')
fig = evk.plot_field(_map, time=0.1)
mne.viz.set_3d_title(fig, ch_type, size=20)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Instead of creating the ~mne.Evoked object from an ~mne.Epochs object,
Step2: To make our life easier, let's convert that list of ~mne.Evoked
Step3: Plotting signal traces
Step4: Notice the completely flat EEG channel and the noisy gradiometer channel
Step5: Plotting scalp topographies
Step6: Additional examples of plotting scalp topographies can be found in
Step7: Joint plots
Step8: Like ~mne.Evoked.plot_topomap you can specify the times at which
Step9: One nice feature of ~mne.viz.plot_compare_evokeds is that when
Step10: The legends generated by ~mne.viz.plot_compare_evokeds above used the
Step11: Image plots
Step12: Topographical subplots
Step13: For larger numbers of sensors, the method evoked.plot_topo()
Step14: By default, ~mne.viz.plot_evoked_topo will plot all MEG sensors (if
Step15: By default, MEG sensors will be used to estimate the field on the helmet
Step16: You can also use MEG sensors to estimate the scalp field by passing
|
2,012
|
<ASSISTANT_TASK:>
Python Code:
import time
from IPython.display import IFrame
SERVER = 'labs.graphistry.com'
current_time = str(int(time.time()))
dataset='Facebook'
# We add the current time to the end of the workbook name to ensure it is unique
workbook = 'popularCommunities' + current_time
current_time = str(int(time.time()))
url = 'http://' + SERVER + '/graph/graph.html?dataset=' + dataset + '&workbook=' + workbook + '&splashAfter=' + current_time
IFrame(url, width=1000, height=500)
current_time = str(int(time.time()))
dataset='Marvel'
url = 'http://' + SERVER + '/graph/graph.html?dataset=' + dataset + '&workbook=' + workbook + '&splashAfter=' + current_time
IFrame(url, width=1000, height=500)
current_time = str(int(time.time()))
dataset='Marvel'
url = 'http://' + SERVER + '/graph/graph.html?dataset=' + dataset + '&splashAfter=' + current_time
IFrame(url, width=1000, height=500)
import pandas as pd
import graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
edges_1_df = pd.DataFrame({'s': [0,1,2], 'd': [1,2,0]})
edges_2_df = pd.DataFrame({'s': [0,1,2, 3], 'd': [1,2,0,1]})
g = graphistry.bind(source='s', destination='d').settings(url_params={'workbook': 'my_' + workbook})
g.plot(edges_1_df)
g.plot(edges_2_df)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Set the location of the graphistry server
Step2: Let's first take a look at a subgraph of Facebook's social network, and create a new workbook named popularCommunities
Step3: In this vizualization, let's create a notebook with a filter to find communities of popular characters.
Step4: Confirm the filter has been persisted, by clicking on the filters button, and checking that 'point
Step5: Use with pygraphistry
|
2,013
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import problem_unittests as tests
import tarfile
cifar10_dataset_folder_path = 'cifar-10-batches-py'
# Use Floyd's cifar-10 dataset if present
floyd_cifar10_location = '/input/cifar-10/python.tar.gz'
if isfile(floyd_cifar10_location):
tar_gz_path = floyd_cifar10_location
else:
tar_gz_path = 'cifar-10-python.tar.gz'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(tar_gz_path):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc='CIFAR-10 Dataset') as pbar:
urlretrieve(
'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz',
tar_gz_path,
pbar.hook)
if not isdir(cifar10_dataset_folder_path):
with tarfile.open(tar_gz_path) as tar:
tar.extractall()
tar.close()
tests.test_folder_path(cifar10_dataset_folder_path)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import numpy as np
# Explore the dataset
batch_id = 1
sample_id = 5
helper.display_stats(cifar10_dataset_folder_path, batch_id, sample_id)
def normalize(x, range_min=0, range_max=255):
Normalize a list of sample image data in the range of 0 to 1
: x: List of image data. The image shape is (32, 32, 3)
: return: Numpy array of normalize data
# Avoiding exactly zero and one, due to possible saturation issues with some activation functions
# or risks of underflow
a = 0
b = 1.0
range_min = 0
range_max = 255
return a + ( ( (x - range_min)*(b - a) )/( range_max - range_min ) )
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_normalize(normalize)
def one_hot_encode(x, n_labels=10):
One hot encode a list of sample labels. Return a one-hot encoded vector for each label.
: x: List of sample Labels
: return: Numpy array of one-hot encoded labels
# ohe via identity matrix for labels times examples
# should not change between uses unless labels change and there is
# no need for outer scope mutation of variables
return np.eye(n_labels)[x]
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_one_hot_encode(one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
# Preprocess Training, Validation, and Testing Data
helper.preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
DON'T MODIFY ANYTHING IN THIS CELL
import pickle
import problem_unittests as tests
import helper
# Load the Preprocessed Validation data
valid_features, valid_labels = pickle.load(open('preprocess_validation.p', mode='rb'))
import tensorflow as tf
def neural_net_image_input(image_shape):
Return a Tensor for a batch of image input
: image_shape: Shape of the images
: return: Tensor for image input.
return tf.placeholder(tf.float32, shape=[None, *image_shape], name="x")
def neural_net_label_input(n_classes, channels=3):
Return a Tensor for a batch of label input
: n_classes: Number of classes
: return: Tensor for label input.
return tf.placeholder(tf.float32, shape=[None, n_classes,], name="y")
def neural_net_keep_prob_input():
Return a Tensor for keep probability
: return: Tensor for keep probability.
return tf.placeholder(tf.float32, name="keep_prob")
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tf.reset_default_graph()
tests.test_nn_image_inputs(neural_net_image_input)
tests.test_nn_label_inputs(neural_net_label_input)
tests.test_nn_keep_prob_inputs(neural_net_keep_prob_input)
def conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides):
Apply convolution then max pooling to x_tensor
:param x_tensor: TensorFlow Tensor
:param conv_num_outputs: Number of outputs for the convolutional layer
:param conv_ksize: kernal size 2-D Tuple for the convolutional layer
:param conv_strides: Stride 2-D Tuple for convolution
:param pool_ksize: kernal size 2-D Tuple for pool
:param pool_strides: Stride 2-D Tuple for pool
: return: A tensor that represents convolution and max pooling of x_tensor
W = tf.Variable(tf.random_normal(
shape=[conv_ksize[0], conv_ksize[1], x_tensor.get_shape().as_list()[3], conv_num_outputs],
mean=0.0,
stddev=0.01,
dtype=tf.float32))
b = tf.Variable(tf.zeros([conv_num_outputs]))
#print(conv_strides)
conv = tf.nn.conv2d(x_tensor, W, strides=[1, *conv_strides, 1], padding="SAME")
conv = tf.nn.bias_add(conv, b)
conv = tf.nn.relu(conv)
conv = tf.nn.max_pool(conv,
[1, *pool_ksize, 1],
[1, *pool_strides, 1],
padding="SAME")
return conv
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_con_pool(conv2d_maxpool)
def flatten(x_tensor):
Flatten x_tensor to (Batch Size, Flattened Image Size)
: x_tensor: A tensor of size (Batch Size, ...), where ... are the image dimensions.
: return: A tensor of size (Batch Size, Flattened Image Size).
# Highlevel is nice
return tf.contrib.layers.flatten(x_tensor)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_flatten(flatten)
def fully_conn(x_tensor, num_outputs):
Apply a fully connected layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
return tf.contrib.layers.fully_connected(x_tensor,
num_outputs,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1),
#biased in favor of activating, with biases > 0, since we use relu
biases_initializer=tf.random_normal_initializer(mean=0.1, stddev=0.01),
activation_fn=tf.nn.relu)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_fully_conn(fully_conn)
def output(x_tensor, num_outputs):
Apply a output layer to x_tensor using weight and bias
: x_tensor: A 2-D tensor where the first dimension is batch size.
: num_outputs: The number of output that the new tensor should be.
: return: A 2-D tensor where the second dimension is num_outputs.
return tf.contrib.layers.fully_connected(x_tensor,
num_outputs,
weights_initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01),
biases_initializer=tf.zeros_initializer(), activation_fn=None)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_output(output)
def conv_net(x, keep_prob):
Create a convolutional neural network model
: x: Placeholder tensor that holds image data.
: keep_prob: Placeholder tensor that hold dropout keep probability.
: return: Tensor that represents logits
#x_ = tf.cast(x, tf.float32)
# TODO: Apply 1, 2, or 3 Convolution and Max Pool layers
# Play around with different number of outputs, kernel size and stride
# Function Definition from Above:
# conv2d_maxpool(x_tensor, conv_num_outputs, conv_ksize, conv_strides, pool_ksize, pool_strides)
conv1 = conv2d_maxpool(x, 32, (2,2), (2,2), (3,3), (2,2))
conv2 = conv2d_maxpool(conv1, 64, (2,2), (2,2), (1,1), (1,1))
conv3 = conv2d_maxpool(conv2, 128, (2,2), (2,2), (1,1), (1,1))
# TODO: Apply a Flatten Layer
# Function Definition from Above:
# flatten(x_tensor)
f1 = flatten(conv3)
# TODO: Apply 1, 2, or 3 Fully Connected Layers
# Play around with different number of outputs
# Function Definition from Above:
# fully_conn(x_tensor, num_outputs)
net = fully_conn(f1,400)
drop1 = tf.nn.dropout(net, keep_prob)
net2 = fully_conn(drop1,200)
drop2 = tf.nn.dropout(net2, keep_prob)
net3 = fully_conn(drop2,100)
drop3 = tf.nn.dropout(net3, keep_prob)
# TODO: Apply an Output Layer
# Set this to the number of classes
# Function Definition from Above:
# output(x_tensor, num_outputs)
return output(drop3,10)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
##############################
## Build the Neural Network ##
##############################
# Remove previous weights, bias, inputs, etc..
tf.reset_default_graph()
# Inputs
x = neural_net_image_input((32, 32, 3))
y = neural_net_label_input(10)
keep_prob = neural_net_keep_prob_input()
# Model
logits = conv_net(x, keep_prob)
# Name logits Tensor, so that is can be loaded from disk after training
logits = tf.identity(logits, name='logits')
# Loss and Optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Accuracy
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name='accuracy')
tests.test_conv_net(conv_net)
def train_neural_network(session, optimizer, keep_probability, feature_batch, label_batch):
Optimize the session on a batch of images and labels
: session: Current TensorFlow session|
: optimizer: TensorFlow optimizer function
: keep_probability: keep probability
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
# Just the side-effect
session.run(optimizer, feed_dict={x:feature_batch, y:label_batch, keep_prob:keep_probability})
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_train_nn(train_neural_network)
def print_stats(session, feature_batch, label_batch, cost, accuracy):
Print information about loss and validation accuracy
: session: Current TensorFlow session
: feature_batch: Batch of Numpy image data
: label_batch: Batch of Numpy label data
: cost: TensorFlow cost function
: accuracy: TensorFlow accuracy function
# TODO: Implement Function
loss = session.run(cost, feed_dict={x: feature_batch, y:label_batch, keep_prob:1.0})
valid_acc = session.run(accuracy, feed_dict={x:valid_features, y:valid_labels, keep_prob:1.0})
print("Current loss: {0}, validation accuracy: {1}".format(loss, valid_acc))
# TODO: Tune Parameters
epochs = 100
batch_size = 1024 # 1080 TI
keep_probability = 0.5
DON'T MODIFY ANYTHING IN THIS CELL
print('Checking the Training on a Single Batch...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
batch_i = 1
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
DON'T MODIFY ANYTHING IN THIS CELL
save_model_path = './image_classification'
print('Training...')
with tf.Session() as sess:
# Initializing the variables
sess.run(tf.global_variables_initializer())
# Training cycle
for epoch in range(epochs):
# Loop over all batches
n_batches = 5
for batch_i in range(1, n_batches + 1):
for batch_features, batch_labels in helper.load_preprocess_training_batch(batch_i, batch_size):
train_neural_network(sess, optimizer, keep_probability, batch_features, batch_labels)
print('Epoch {:>2}, CIFAR-10 Batch {}: '.format(epoch + 1, batch_i), end='')
print_stats(sess, batch_features, batch_labels, cost, accuracy)
# Save Model
saver = tf.train.Saver()
save_path = saver.save(sess, save_model_path)
DON'T MODIFY ANYTHING IN THIS CELL
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import tensorflow as tf
import pickle
import helper
import random
# Set batch size if not already set
try:
if batch_size:
pass
except NameError:
batch_size = 64
save_model_path = './image_classification'
n_samples = 4
top_n_predictions = 3
def test_model():
Test the saved model against the test dataset
test_features, test_labels = pickle.load(open('preprocess_test.p', mode='rb'))
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load model
loader = tf.train.import_meta_graph(save_model_path + '.meta')
loader.restore(sess, save_model_path)
# Get Tensors from loaded model
loaded_x = loaded_graph.get_tensor_by_name('x:0')
loaded_y = loaded_graph.get_tensor_by_name('y:0')
loaded_keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
loaded_logits = loaded_graph.get_tensor_by_name('logits:0')
loaded_acc = loaded_graph.get_tensor_by_name('accuracy:0')
# Get accuracy in batches for memory limitations
test_batch_acc_total = 0
test_batch_count = 0
for test_feature_batch, test_label_batch in helper.batch_features_labels(test_features, test_labels, batch_size):
test_batch_acc_total += sess.run(
loaded_acc,
feed_dict={loaded_x: test_feature_batch, loaded_y: test_label_batch, loaded_keep_prob: 1.0})
test_batch_count += 1
print('Testing Accuracy: {}\n'.format(test_batch_acc_total/test_batch_count))
# Print Random Samples
random_test_features, random_test_labels = tuple(zip(*random.sample(list(zip(test_features, test_labels)), n_samples)))
random_test_predictions = sess.run(
tf.nn.top_k(tf.nn.softmax(loaded_logits), top_n_predictions),
feed_dict={loaded_x: random_test_features, loaded_y: random_test_labels, loaded_keep_prob: 1.0})
helper.display_image_predictions(random_test_features, random_test_labels, random_test_predictions)
test_model()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Image Classification
Step2: Explore the Data
Step5: Implement Preprocess Functions
Step8: One-hot encode
Step10: Randomize Data
Step12: Check Point
Step17: Build the network
Step20: Convolution and Max Pooling Layer
Step23: Flatten Layer
Step26: Fully-Connected Layer
Step29: Output Layer
Step32: Create Convolutional Model
Step35: Train the Neural Network
Step37: Show Stats
Step38: Hyperparameters
Step40: Train on a Single CIFAR-10 Batch
Step42: Fully Train the Model
Step45: Checkpoint
|
2,014
|
<ASSISTANT_TASK:>
Python Code:
#codes here
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("https://raw.githubusercontent.com/Yorko/mlcourse.ai/master/data/telecom_churn.csv")
df.head()
#codes here
df.dtypes
#codes here
plt.figure(figsize=(10,5))
plt.hist(df['Total day minutes'])
plt.xlabel('Total Day Minutes')
plt.ylabel('Frequency')
plt.title('Histogram of Total Day Minutes')
plt.show()
#codes here
plt.figure(figsize=(10,5))
plt.hist(df['Total intl calls'])
plt.xlabel('Total Intl Calls')
plt.ylabel('Frequency')
plt.title('Histogram of Total Intl Calls')
plt.show()
#codes here
from scipy.stats import kde
data = df['Total day minutes']
density = kde.gaussian_kde(data)
x = np.linspace(0,350,20)
y = density(x)
plt.plot(x,y)
plt.title("Density Plot of Total Day Minutes")
plt.show()
#codes here
data = df['Total intl calls']
density = kde.gaussian_kde(data)
x = np.linspace(0,20,300)
y = density(x)
plt.plot(x,y)
plt.title("Density Plot of the Total Intl Calls")
plt.show()
# Boxplot
plt.figure(figsize=(10,5))
plt.boxplot(df['Total intl calls'])
plt.ylabel('Total Intl Calls')
plt.title('Boxplot of Total Intl Calls')
plt.xticks([])
plt.show()
# Violin Plot
plt.figure(figsize=(10,5))
plt.violinplot(df['Total intl calls'])
plt.xlabel('Probability')
plt.ylabel('Total Intl Calls')
plt.title('Violin plot of Total Intl Calls')
plt.show()
# Distplot
sns.set(rc={"figure.figsize": (8, 4)}); np.random.seed(0)
x = df['Total intl calls']
ax = sns.distplot(x)
plt.show()
#codes here
p = sns.countplot(data=df, x = 'Churn')
#codes here
p = sns.countplot(data=df, x = 'Customer service calls')
#codes here
sns.heatmap(df.corr())
plt.show()
#codes here
plt.figure(figsize=(10,10))
plt.subplot(2,1,1)
plt.scatter(df['Total day minutes'],df['Total night minutes'])
plt.xlabel('Total day minutes calls')
plt.ylabel('Total night minutes')
plt.title('Total day minutes vs Total night minutes')
plt.show()
#codes here
sns.catplot(
x="Churn",
y="Total day minutes",
col="Customer service calls",
data=df[df["Customer service calls"] < 8],
kind="box",
col_wrap=4,
height=3,
aspect=0.8,
);
#codes here
fig, axs = plt.subplots(7, 2, figsize=(7, 7))
sns.histplot(data=df, x="Number vmail messages", kde=True, color="violet", ax=axs[0, 0])
sns.histplot(data=df, x="Total day minutes", kde=True, color="indigo", ax=axs[0, 1])
sns.histplot(data=df, x="Total day calls", kde=True, color="blue", ax=axs[1, 0])
sns.histplot(data=df, x="Total day charge", kde=True, color="green", ax=axs[1, 1])
sns.histplot(data=df, x="Total eve minutes", kde=True, color="yellow", ax=axs[2, 0])
sns.histplot(data=df, x="Total eve calls", kde=True, color="orange", ax=axs[2, 1])
sns.histplot(data=df, x="Total eve charge", kde=True, color="red", ax=axs[3, 0])
sns.histplot(data=df, x="Total night minutes", kde=True, color="lightblue", ax=axs[3, 1])
sns.histplot(data=df, x="Total night calls", kde=True, color="black", ax=axs[4, 0])
sns.histplot(data=df, x="Total night charge", kde=True, color="purple", ax=axs[4, 1])
sns.histplot(data=df, x="Total intl minutes", kde=True, color="navy", ax=axs[5, 0])
sns.histplot(data=df, x="Total intl calls", kde=True, color="coral", ax=axs[5, 1])
sns.histplot(data=df, x="Total intl charge", kde=True, color="cyan", ax=axs[6, 0])
sns.histplot(data=df, x="Customer service calls", kde=True, color="magenta", ax=axs[6, 1])
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2. Check the types of the variable that you take into account along the way.
Step2: 3. Draw the histogram of total day minutes and total intl calls and interpret the result.
Step3: The above histogram shows us the frequency of the variable "Total day minutes" in the telecom_churn dataset. The histogram reads as follows
Step4: The above histogram shows us the frequency of the variable "Total intl calls" in the telecom_churn dataset. The histogram reads as follows
Step5: Density plots are, by definition, smoothed-out versions of the respective historgrams. We get roughly the same information from the density plots as we do from the histograms, which is that "Total day minutes" is normally distributed, whereas "Total intl calls" has a significant right skew.
Step6: A box plot helps us to understand the extent to which data spreads out. We see from the above box plot of "Total intl calls" that
Step7: The above violin plot of "Total intl calls" includes a rotated kernel density plot on each side. It shows us the full distribution of the data, and confirms that the data are most dense between 2.5 and 5.0 calls.
Step8: The above distplot of "Total intl calls" shows us similar information as the violin plot, which is that the data are most dense between 2.5 and 5.0 calls.
Step9: Most customers do not churn.
Step10: The most frequent value for 'Customer service calls' is 1.0, followed by 2, 0, 3, 4, 5, 6, and 7. It's interesting that a significant number of customers don't seem to make customer service calls, since 0 is the third most frequent number of calls.
Step11: Total day minutes is strongly correlated with Total day charge.
Step12: There does not appear to be a linear relationship between Total day minutes and Total night minutes.
Step13: From the above catplot, we see that the median number of total day minutes for customers who have churned is higher than the median number of total day minutes for customers who have not churned, for customer service calls under 4. Starting at 4 calls, the trend reverses and the median number of total day minutes for customers who have churned is lower than the median number of total day minutes for customers who have not churned.
|
2,015
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import IPython.display as display
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (12,12)
mpl.rcParams['axes.grid'] = False
import numpy as np
import PIL.Image
import time
import functools
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
content_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg')
style_path = tf.keras.utils.get_file('kandinsky5.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg')
def load_img(path_to_img):
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
img = img[tf.newaxis, :]
return img
def imshow(image, title=None):
if len(image.shape) > 3:
image = tf.squeeze(image, axis=0)
plt.imshow(image)
if title:
plt.title(title)
content_image = load_img(content_path)
style_image = load_img(style_path)
plt.subplot(1, 2, 1)
imshow(content_image, 'Content Image')
plt.subplot(1, 2, 2)
imshow(style_image, 'Style Image')
import tensorflow_hub as hub
hub_model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
stylized_image = hub_model(tf.constant(content_image), tf.constant(style_image))[0]
tensor_to_image(stylized_image)
x = tf.keras.applications.vgg19.preprocess_input(content_image*255)
x = tf.image.resize(x, (224, 224))
vgg = tf.keras.applications.VGG19(include_top=True, weights='imagenet')
prediction_probabilities = vgg(x)
prediction_probabilities.shape
predicted_top_5 = tf.keras.applications.vgg19.decode_predictions(prediction_probabilities.numpy())[0]
[(class_name, prob) for (number, class_name, prob) in predicted_top_5]
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
print()
for layer in vgg.layers:
print(layer.name)
content_layers = ['block5_conv2']
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1']
num_content_layers = len(content_layers)
num_style_layers = len(style_layers)
def vgg_layers(layer_names):
Creates a vgg model that returns a list of intermediate output values.
# Load our model. Load pretrained VGG, trained on imagenet data
vgg = tf.keras.applications.VGG19(include_top=False, weights='imagenet')
vgg.trainable = False
outputs = [vgg.get_layer(name).output for name in layer_names]
model = tf.keras.Model([vgg.input], outputs)
return model
style_extractor = vgg_layers(style_layers)
style_outputs = style_extractor(style_image*255)
#Look at the statistics of each layer's output
for name, output in zip(style_layers, style_outputs):
print(name)
print(" shape: ", output.numpy().shape)
print(" min: ", output.numpy().min())
print(" max: ", output.numpy().max())
print(" mean: ", output.numpy().mean())
print()
def gram_matrix(input_tensor):
result = tf.linalg.einsum('bijc,bijd->bcd', input_tensor, input_tensor)
input_shape = tf.shape(input_tensor)
num_locations = tf.cast(input_shape[1]*input_shape[2], tf.float32)
return result/(num_locations)
class StyleContentModel(tf.keras.models.Model):
def __init__(self, style_layers, content_layers):
super(StyleContentModel, self).__init__()
self.vgg = vgg_layers(style_layers + content_layers)
self.style_layers = style_layers
self.content_layers = content_layers
self.num_style_layers = len(style_layers)
self.vgg.trainable = False
def call(self, inputs):
"Expects float input in [0,1]"
inputs = inputs*255.0
preprocessed_input = tf.keras.applications.vgg19.preprocess_input(inputs)
outputs = self.vgg(preprocessed_input)
style_outputs, content_outputs = (outputs[:self.num_style_layers],
outputs[self.num_style_layers:])
style_outputs = [gram_matrix(style_output)
for style_output in style_outputs]
content_dict = {content_name:value
for content_name, value
in zip(self.content_layers, content_outputs)}
style_dict = {style_name:value
for style_name, value
in zip(self.style_layers, style_outputs)}
return {'content':content_dict, 'style':style_dict}
extractor = StyleContentModel(style_layers, content_layers)
results = extractor(tf.constant(content_image))
print('Styles:')
for name, output in sorted(results['style'].items()):
print(" ", name)
print(" shape: ", output.numpy().shape)
print(" min: ", output.numpy().min())
print(" max: ", output.numpy().max())
print(" mean: ", output.numpy().mean())
print()
print("Contents:")
for name, output in sorted(results['content'].items()):
print(" ", name)
print(" shape: ", output.numpy().shape)
print(" min: ", output.numpy().min())
print(" max: ", output.numpy().max())
print(" mean: ", output.numpy().mean())
style_targets = extractor(style_image)['style']
content_targets = extractor(content_image)['content']
image = tf.Variable(content_image)
def clip_0_1(image):
return tf.clip_by_value(image, clip_value_min=0.0, clip_value_max=1.0)
opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)
style_weight=1e-2
content_weight=1e4
def style_content_loss(outputs):
style_outputs = outputs['style']
content_outputs = outputs['content']
style_loss = tf.add_n([tf.reduce_mean((style_outputs[name]-style_targets[name])**2)
for name in style_outputs.keys()])
style_loss *= style_weight / num_style_layers
content_loss = tf.add_n([tf.reduce_mean((content_outputs[name]-content_targets[name])**2)
for name in content_outputs.keys()])
content_loss *= content_weight / num_content_layers
loss = style_loss + content_loss
return loss
@tf.function()
def train_step(image):
with tf.GradientTape() as tape:
outputs = extractor(image)
loss = style_content_loss(outputs)
grad = tape.gradient(loss, image)
opt.apply_gradients([(grad, image)])
image.assign(clip_0_1(image))
train_step(image)
train_step(image)
train_step(image)
tensor_to_image(image)
import time
start = time.time()
epochs = 10
steps_per_epoch = 100
step = 0
for n in range(epochs):
for m in range(steps_per_epoch):
step += 1
train_step(image)
print(".", end='', flush=True)
display.clear_output(wait=True)
display.display(tensor_to_image(image))
print("Train step: {}".format(step))
end = time.time()
print("Total time: {:.1f}".format(end-start))
def high_pass_x_y(image):
x_var = image[:,:,1:,:] - image[:,:,:-1,:]
y_var = image[:,1:,:,:] - image[:,:-1,:,:]
return x_var, y_var
x_deltas, y_deltas = high_pass_x_y(content_image)
plt.figure(figsize=(14,10))
plt.subplot(2,2,1)
imshow(clip_0_1(2*y_deltas+0.5), "Horizontal Deltas: Original")
plt.subplot(2,2,2)
imshow(clip_0_1(2*x_deltas+0.5), "Vertical Deltas: Original")
x_deltas, y_deltas = high_pass_x_y(image)
plt.subplot(2,2,3)
imshow(clip_0_1(2*y_deltas+0.5), "Horizontal Deltas: Styled")
plt.subplot(2,2,4)
imshow(clip_0_1(2*x_deltas+0.5), "Vertical Deltas: Styled")
plt.figure(figsize=(14,10))
sobel = tf.image.sobel_edges(content_image)
plt.subplot(1,2,1)
imshow(clip_0_1(sobel[...,0]/4+0.5), "Horizontal Sobel-edges")
plt.subplot(1,2,2)
imshow(clip_0_1(sobel[...,1]/4+0.5), "Vertical Sobel-edges")
def total_variation_loss(image):
x_deltas, y_deltas = high_pass_x_y(image)
return tf.reduce_sum(tf.abs(x_deltas)) + tf.reduce_sum(tf.abs(y_deltas))
total_variation_loss(image).numpy()
tf.image.total_variation(image).numpy()
total_variation_weight=30
@tf.function()
def train_step(image):
with tf.GradientTape() as tape:
outputs = extractor(image)
loss = style_content_loss(outputs)
loss += total_variation_weight*tf.image.total_variation(image)
grad = tape.gradient(loss, image)
opt.apply_gradients([(grad, image)])
image.assign(clip_0_1(image))
image = tf.Variable(content_image)
import time
start = time.time()
epochs = 10
steps_per_epoch = 100
step = 0
for n in range(epochs):
for m in range(steps_per_epoch):
step += 1
train_step(image)
print(".", end='', flush=True)
display.clear_output(wait=True)
display.display(tensor_to_image(image))
print("Train step: {}".format(step))
end = time.time()
print("Total time: {:.1f}".format(end-start))
file_name = 'stylized-image.png'
tensor_to_image(image).save(file_name)
try:
from google.colab import files
except ImportError:
pass
else:
files.download(file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 神经风格迁移
Step2: 下载图像并选择风格图像和内容图像:
Step3: 将输入可视化
Step4: 创建一个简单的函数来显示图像:
Step5: 使用 TF-Hub 进行快速风格迁移
Step6: 定义内容和风格的表示
Step7: 现在,加载没有分类部分的 VGG19 ,并列出各层的名称:
Step8: 从网络中选择中间层的输出以表示图像的风格和内容:
Step10: 用于表示风格和内容的中间层
Step11: 然后建立模型:
Step12: 风格计算
Step13: 提取风格和内容
Step14: 在图像上调用此模型,可以返回 style_layers 的 gram 矩阵(风格)和 content_layers 的内容:
Step15: 梯度下降
Step16: 定义一个 tf.Variable 来表示要优化的图像。 为了快速实现这一点,使用内容图像对其进行初始化( tf.Variable 必须与内容图像的形状相同)
Step17: 由于这是一个浮点图像,因此我们定义一个函数来保持像素值在 0 和 1 之间:
Step18: 创建一个 optimizer 。 本教程推荐 LBFGS,但 Adam 也可以正常工作:
Step19: 为了优化它,我们使用两个损失的加权组合来获得总损失:
Step20: 使用 tf.GradientTape 来更新图像。
Step21: 现在,我们运行几个步来测试一下:
Step22: 运行正常,我们来执行一个更长的优化:
Step23: 总变分损失
Step24: 这显示了高频分量如何增加。
Step25: 与此相关的正则化损失是这些值的平方和:
Step26: 这展示了它的作用。但是没有必要自己去实现它,因为 TensorFlow 包括一个标准的实现:
Step27: 重新进行优化
Step28: 现在,将它加入 train_step 函数中:
Step29: 重新初始化优化的变量:
Step30: 并进行优化:
Step31: 最后,保存结果:
|
2,016
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
my_list = [1,2,3]
my_list
np.array(my_list)
my_matrix = [[1,2,3],[4,5,6],[7,8,9]]
my_matrix
np.array(my_matrix)
np.arange(0,10)
np.arange(0,11,2)
np.zeros(3)
np.zeros((5,5))
np.ones(3)
np.ones((3,3))
np.linspace(0,10,3)
np.linspace(0,5,20)
np.linspace(0,5,21)
np.eye(4)
np.random.rand(2)
np.random.rand(5,5)
np.random.randn(2)
np.random.randn(5,5)
np.random.randint(1,100)
np.random.randint(1,100,10)
np.random.seed(42)
np.random.rand(4)
np.random.seed(42)
np.random.rand(4)
arr = np.arange(25)
ranarr = np.random.randint(0,50,10)
arr
ranarr
arr.reshape(5,5)
ranarr
ranarr.max()
ranarr.argmax()
ranarr.min()
ranarr.argmin()
# Vector
arr.shape
# Notice the two sets of brackets
arr.reshape(1,25)
arr.reshape(1,25).shape
arr.reshape(25,1)
arr.reshape(25,1).shape
arr.dtype
arr2 = np.array([1.2, 3.4, 5.6])
arr2.dtype
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NumPy has many built-in functions and capabilities. We won't cover them all but instead we will focus on some of the most important aspects of NumPy
Step2: Built-in Methods
Step3: zeros and ones
Step4: linspace
Step5: <font color=green>Note that .linspace() includes the stop value. To obtain an array of common fractions, increase the number of items
Step6: eye
Step7: Random
Step8: randn
Step9: randint
Step10: seed
Step11: Array Attributes and Methods
Step12: Reshape
Step13: max, min, argmax, argmin
Step14: Shape
Step15: dtype
|
2,017
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccr-iitm', 'sandbox-2', 'atmoschem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Chemistry Scheme Scope
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables Form
Step9: 1.6. Number Of Tracers
Step10: 1.7. Family Approach
Step11: 1.8. Coupling With Chemical Reactivity
Step12: 2. Key Properties --> Software Properties
Step13: 2.2. Code Version
Step14: 2.3. Code Languages
Step15: 3. Key Properties --> Timestep Framework
Step16: 3.2. Split Operator Advection Timestep
Step17: 3.3. Split Operator Physical Timestep
Step18: 3.4. Split Operator Chemistry Timestep
Step19: 3.5. Split Operator Alternate Order
Step20: 3.6. Integrated Timestep
Step21: 3.7. Integrated Scheme Type
Step22: 4. Key Properties --> Timestep Framework --> Split Operator Order
Step23: 4.2. Convection
Step24: 4.3. Precipitation
Step25: 4.4. Emissions
Step26: 4.5. Deposition
Step27: 4.6. Gas Phase Chemistry
Step28: 4.7. Tropospheric Heterogeneous Phase Chemistry
Step29: 4.8. Stratospheric Heterogeneous Phase Chemistry
Step30: 4.9. Photo Chemistry
Step31: 4.10. Aerosols
Step32: 5. Key Properties --> Tuning Applied
Step33: 5.2. Global Mean Metrics Used
Step34: 5.3. Regional Metrics Used
Step35: 5.4. Trend Metrics Used
Step36: 6. Grid
Step37: 6.2. Matches Atmosphere Grid
Step38: 7. Grid --> Resolution
Step39: 7.2. Canonical Horizontal Resolution
Step40: 7.3. Number Of Horizontal Gridpoints
Step41: 7.4. Number Of Vertical Levels
Step42: 7.5. Is Adaptive Grid
Step43: 8. Transport
Step44: 8.2. Use Atmospheric Transport
Step45: 8.3. Transport Details
Step46: 9. Emissions Concentrations
Step47: 10. Emissions Concentrations --> Surface Emissions
Step48: 10.2. Method
Step49: 10.3. Prescribed Climatology Emitted Species
Step50: 10.4. Prescribed Spatially Uniform Emitted Species
Step51: 10.5. Interactive Emitted Species
Step52: 10.6. Other Emitted Species
Step53: 11. Emissions Concentrations --> Atmospheric Emissions
Step54: 11.2. Method
Step55: 11.3. Prescribed Climatology Emitted Species
Step56: 11.4. Prescribed Spatially Uniform Emitted Species
Step57: 11.5. Interactive Emitted Species
Step58: 11.6. Other Emitted Species
Step59: 12. Emissions Concentrations --> Concentrations
Step60: 12.2. Prescribed Upper Boundary
Step61: 13. Gas Phase Chemistry
Step62: 13.2. Species
Step63: 13.3. Number Of Bimolecular Reactions
Step64: 13.4. Number Of Termolecular Reactions
Step65: 13.5. Number Of Tropospheric Heterogenous Reactions
Step66: 13.6. Number Of Stratospheric Heterogenous Reactions
Step67: 13.7. Number Of Advected Species
Step68: 13.8. Number Of Steady State Species
Step69: 13.9. Interactive Dry Deposition
Step70: 13.10. Wet Deposition
Step71: 13.11. Wet Oxidation
Step72: 14. Stratospheric Heterogeneous Chemistry
Step73: 14.2. Gas Phase Species
Step74: 14.3. Aerosol Species
Step75: 14.4. Number Of Steady State Species
Step76: 14.5. Sedimentation
Step77: 14.6. Coagulation
Step78: 15. Tropospheric Heterogeneous Chemistry
Step79: 15.2. Gas Phase Species
Step80: 15.3. Aerosol Species
Step81: 15.4. Number Of Steady State Species
Step82: 15.5. Interactive Dry Deposition
Step83: 15.6. Coagulation
Step84: 16. Photo Chemistry
Step85: 16.2. Number Of Reactions
Step86: 17. Photo Chemistry --> Photolysis
Step87: 17.2. Environmental Conditions
|
2,018
|
<ASSISTANT_TASK:>
Python Code:
%%javascript
IPython.load_extensions('calico-document-tools');
!date
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
help(pg.opengl.GLLinePlotItem)
help(pg.opengl.GLGridItem)
help(pg.QtGui.QGraphicsRectItem)
image_shape = (4,4)
uniform_values = np.ones(image_shape) * 255
uniform_image = pg.makeARGB(uniform_values)
print uniform_values
print uniform_image
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 200
w.show()
w.setWindowTitle('pyqtgraph example: GLImageItem')
## create volume data set to slice three images from
shape = (100,100,70)
data = np.random.normal(size=shape)
#data += pg.gaussianFilter(np.random.normal(size=shape), (15,15,15))*15
## slice out three planes, convert to RGBA for OpenGL texture
levels = (-0.08, 0.08)
tex1 = pg.makeRGBA(data[shape[0]/2], levels=levels)[0] # yz plane
tex2 = pg.makeRGBA(data[:,shape[1]/2], levels=levels)[0] # xz plane
tex3 = pg.makeRGBA(data[:,:,shape[2]/2], levels=levels)[0] # xy plane
#tex1[:,:,3] = 128
tex2[:,:,3] = 128
#tex3[:,:,3] = 128
## Create three image items from textures, add to view
v1 = gl.GLImageItem(tex1)
v1.translate(-shape[1]/2, -shape[2]/2, 0)
v1.rotate(90, 0,0,1)
v1.rotate(-90, 0,1,0)
#w.addItem(v1)
v2 = gl.GLImageItem(tex1)
v2.translate(-shape[0]/2, -shape[2]/2, 0)
v2.rotate(-90, 1,0,0)
w.addItem(v2)
v3 = gl.GLImageItem(tex3)
v3.translate(-shape[0]/2, -shape[1]/2, 0)
#w.addItem(v3)
ax = gl.GLAxisItem()
w.addItem(ax)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
print shape[0], shape[1], shape[2]
print len(data[shape[0]/2]), len(data[:,shape[1]/2])
shape = (5,4,3)
data = np.random.normal(size=shape)
print data
print data[shape[0]/2]
print data[:,shape[1]/2]
print data[:,:,shape[2]/2]
tex = pg.makeRGBA(data[shape[2]/2])[0]
print tex
image_shape = (3,5)
uniform_values = np.ones(image_shape) * 255
uniform_image = pg.makeARGB(uniform_values)[0]
uniform_image[:,:,3] = 128
print uniform_image
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 20
w.show()
w.setWindowTitle('pyqtgraph example: GLImageItem')
## create volume data set to slice three images from
shape = (100,100,70)
data = np.random.normal(size=shape)
#data += pg.gaussianFilter(np.random.normal(size=shape), (15,15,15))*15
## make images
image_shape = (6,6)
uniform_values = np.ones(image_shape) * 255
uniform_image = pg.makeARGB(uniform_values)[0]
uniform_image[:,:,1] = 128
uniform_image_transparent = pg.makeARGB(uniform_values)[0]
uniform_image_transparent[:,:,3] = 128
## Create image items from textures, add to view
v2 = gl.GLImageItem(uniform_image)
v2.translate(-image_shape[0]/2, -image_shape[1]/2, 0)
v2.rotate(90, 1,0,0)
v2.translate(0, -2, 0)
w.addItem(v2)
v1 = gl.GLImageItem(uniform_image_transparent)
v1.translate(-image_shape[0]/2, -image_shape[1]/2, 0)
v1.rotate(90, 1,0,0)
w.addItem(v1)
ax = gl.GLAxisItem()
w.addItem(ax)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
x = np.linspace(0,2,3)
y = np.linspace(10,12,3)
z = np.linspace(20,22,3)
print x, '\n', y, '\n', z, '\n'
pts = np.vstack([x,y,z])
print pts, '\n'
pts = pts.transpose()
print pts
x = np.linspace(0,3,4)
y = np.linspace(10,13,4)
z = np.linspace(20,23,4)
#print x, '\n', y, '\n', z, '\n'
pts = np.vstack([x,y,z])
#print pts, '\n'
pts = pts.transpose()
print pts
print pts.shape
pts2 = np.zeros(shape=(2*pts.shape[0], pts.shape[1]))
print pts2
print pts2.shape
for i in range(pts.shape[0]):
pts2[2*i,2] = pts[i,2]
pts2[2*i + 1,:] = pts[i,:]
print pts2
# Function to create new array from old
# where new array is formatted to prepare to
# draw lines perpendicular from z-axis to
# curve defined by input array
def preptomakelines(pts):
pts2 = np.zeros(shape=(2*pts.shape[0], pts.shape[1]))
for i in range(pts.shape[0]):
pts2[2*i,2] = pts[i,2]
pts2[2*i + 1,:] = pts[i,:]
return pts2
pts2 = preptomakelines(pts)
print pts, '\n\n', pts2
x = np.linspace(0,3,4)
y = np.linspace(10,13,4)
z = np.linspace(20,23,4)
pts = np.vstack([x,y,z])
pts = pts.transpose()
print pts
temp2Darray = [[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]
rot_efield_coord = np.array(temp2Darray)
print rot_efield_coord
pts_efield_coord = np.dot(pts, rot_efield_coord)
print pts_efield_coord
temp2Darray = [[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]
rot_hfield_coord = np.array(temp2Darray)
print rot_hfield_coord
pts_hfield_coord = np.dot(pts, rot_hfield_coord)
print pts_hfield_coord
print pts
pts = np.dot(pts, rot_efield_coord)
print pts
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Objective
Step2: Figure out what makeARGB is doing
Step3: Make a semi-transparent rectangle (image)
Step4: What is np.vstack.transpose() doing?
Step5: Answer
Step6: Simple coordinate transformation
|
2,019
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import pandas as pd
%matplotlib inline
# Read data from data/coffees.csv
data = pd.read_csv("data/coffees.csv")
data
# .head()
data.head()
# .loc or .iloc
data.loc[2]
# [] indexing on a series
data.coffees[:5]
print("Dataset length :")
# len()
print(len(data))
# .describe()
data.describe()
# .isnull() and boolean indexing with []
data[data.coffees.isnull()]
# .dtypes
data.dtypes
# print the first element of the series with [] indexing
print(data.timestamp[0])
# print its type()
print(type(data.timestamp[0]))
# cast the coffees column using pd.to_numeric, and coerce errors
data.coffees = pd.to_numeric(data.coffees, errors="coerce")
data.head()
# Use .dropna() using a subset, and pass inplace
data.dropna(subset=["coffees"], inplace=True)
data.head()
# Cast to int using .astype()
data.coffees = data.coffees.astype(int)
data.head()
# pd.to_datetime()
data.timestamp = pd.to_datetime(data.timestamp)
# Confirm dtypes
data.dtypes
# .describe(), passing the include kwarg to see all information
data.describe(include="all")
# What do the first few rows look like ?
data.head()
# .plot() on the coffees series
data.coffees.plot()
# .plot() on the dataframe, setting x to the timestamp, with dot-dash style
data.plot(x=data.timestamp, style=".-")
# .tail() with ten rows
data.tail(n=10)
# Use conditional indexing against the timestamp
data = data[data.timestamp < "2013-03-01"]
data.tail()
# Once again, plot the data against the timestamp
data.plot(x=data.timestamp, style=".-")
# .value_counts()
data.contributor.value_counts()
# .plot() a bar chart from the value counts
data.contributor.value_counts().plot(kind="bar")
# Create a series of the weekdays for each entry using .dt.weekday
weekdays = data.timestamp.dt.weekday
# assign() it to our dataframe
data = data.assign(weekdays=weekdays)
data.head()
weekday_names = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
weekday_dict = {key: weekday_names[key] for key in range(7)}
# Use .apply() to apply a custom function to the weekdays column
data.weekdays = data.weekdays.apply(lambda x: weekday_dict[x])
data.head()
# .groupby() the weekdays and then .count() rows in each group
weekday_counts = data.groupby("weekdays").count()
# We can reorder this dataframe by our weekday_names list
weekday_counts = weekday_counts.loc[weekday_names]
weekday_counts
# .plot() a bar chart of data in weekday_counts
weekday_counts.timestamp.plot(kind="bar", title="Datapoints added on each weekday")
# Set the dataframe's .index property
data.index = data.timestamp
# Let's drop the timestamp column, as we no longer need it
data.drop(["timestamp"], axis=1, inplace=True)
data.head()
# pd.date_range, with daily frequency, and normalisation
midnights = pd.date_range(data.index[0], data.index[-1], freq="D", normalize=True)
midnights
# Take the union of the existing and new indices
new_index = midnights.union(data.index)
new_index
# .reindex() the dataframe
upsampled_data = data.reindex(new_index)
upsampled_data.head(10)
# .interpolate the upsampled_data using the time method
upsampled_data = upsampled_data.interpolate(method="time")
upsampled_data.head(10)
# .resample() followed by .asfreq()
daily_data = upsampled_data.resample("D").asfreq()
# Drop the contributor column, we no longer need it
daily_data = daily_data.drop(["contributor"], axis=1)
# Generate a column of weekday names
daily_data["weekdays"] = daily_data.index.weekday_name # We did it the slow way before...
daily_data.head()
# Let's plot the data once more, to see how we're doing
daily_data.plot(figsize=(15, 4), style=".")
# Use .diff() on the coffees column; follow up with .shift()
coffees_made = daily_data.coffees.diff().shift(-1)
# Add this as a column to the dataframe
daily_data["coffees_made_today"] = coffees_made
daily_data.head()
# .groupby weekdays, take the mean, and grab the coffees_made_today column
coffees_by_day = daily_data.groupby("weekdays").mean().coffees_made_today
coffees_by_day
# Sort coffees_by_day by our list of weekday names
coffees_by_day = coffees_by_day[weekday_names]
# Plot a bar chart
coffees_by_day.plot(kind="bar")
# Bring in data/department_members.csv;
# have the first column be the index, and parse the dates
people = pd.read_csv("data/department_members.csv", index_col=[0], parse_dates=True)
people.head()
# Use an outer join, then interpolate over missing values using nearest values
daily_data = daily_data.join(people, how="outer").interpolate(method="nearest")
daily_data.head()
# New column is the ratio of coffees made on a given day to number of members in the department
daily_data["coffees_per_person"] = daily_data.coffees_made_today / daily_data.members
# Let's drop those remaining NaNs while we're at it
daily_data.dropna(inplace=True)
daily_data.head()
# Plot the coffees_per_person column
daily_data.coffees_per_person.plot()
# pd.read_csv(); try using data/coffee_status.csv
# parse_dates as kwarg; also pass index_col
machine_status = pd.read_csv("data/coffee_status.csv", parse_dates=["date"], index_col="date")
machine_status.head()
# .value_counts()
machine_status.status.value_counts()
# Make a pd.Series from the status series where things are OK
numerical_status = machine_status.status == "OK"
numerical_status.plot()
# .join()
daily_data = daily_data.join(machine_status)
daily_data.head()
# Column depicting when the status was "OK"
# Cast the series to ints before as you create a new column in the dataframe
daily_data["numerical_status"] = (daily_data.status == "OK").astype(int)
daily_data.head()
# Plot both columns on the same graph, using default args
daily_data[["coffees_per_person", "numerical_status"]].plot()
# Resample weekly, taking the mean of each week to get a weekly value
weekly_data = daily_data.resample("W").mean()
weekly_data[["coffees_per_person", "numerical_status"]].plot()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Note
Step2: Note
Step3: Note
Step4: Let's just look at the first few rows.
Step5: We have an index, and three columns
Step6: Definitely a string. We'll note this as something to fix after we finish looking around.
Step7: Note
Step8: What else can we find out ?
Step9: Looks like we also have some missing data - we have 671 rows, but the coffees column only has 658 entries.
Step10: Note
Step11: The contributor column makes sense as object, because we expect strings there; but surely the timestamp should be a timestamp-type, and coffees should be numerical ?
Step12: It looks like the timestamp field was read from CSV as a string. That makes sense - CSV files are very basic. We'll have pandas interpret these strings as datetimes for us automatically.
Step13: #### The coffees column contains NaNs.
Step14: The coffees column is of type float.
Step15: Let's have pandas parse the timestamp strings to datetime objects.
Step16: So where do we stand ?
Step17: Note
Step18: pandas is plotting the coffees against the index, which is just a series of integers.
Step19: We have some very uneven spacing in places. We might start by cutting off the last few points of this time-series, which is missing a lot of data.
Step20: After mid-March, things start getting spaced rather erratically.
Step21: Note
Step22: 1. Contributions to the time-series
Step23: Note
Step24: On which weekdays were contributions made ?
Step25: Can we replace these integers with actual weekdays ?
Step26: Let's group by these weekdays.
Step27: Note
Step28: 2. Weekday trends
Step29: Let's add some rows at midnight on every day.
Step30: Note
Step31: Note
Step32: Note
Step33: We're now ready to resample the time-series at a daily frequency.
Step34: Let's begin by figuring out how many coffees are made on any given day.
Step35: Note
Step36: Let's order this series and then plot it.
Step37: Wednesdays was seminar day...
Step38: Let's join the datasets.
Step39: Note
Step40: We can now plot this column.
Step41: Those are strange plateaus. We'll pull in another dataset, telling us when the machine was broken.
Step42: Note
Step43: A quick trick to plot this as a time-series...
Step44: Note
Step45: We'll bring in this numerical representation of status column into our dataframe too.
Step46: Let's plot both the coffees per person and the numerical status.
Step47: We see a strong weekday-weekend effect. Resampling weekly will fix that.
|
2,020
|
<ASSISTANT_TASK:>
Python Code:
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://raw.githubusercontent.com/AllenDowney/' +
'ModSimPy/master/modsim.py')
# import functions from modsim
from modsim import *
download('https://github.com/AllenDowney/ModSimPy/raw/master/' +
'chap11.py')
# import code from previous notebooks
from chap11 import make_system
from chap11 import update_func
from chap11 import run_simulation
def add_immunization(system, fraction):
system.init.S -= fraction
system.init.R += fraction
tc = 3 # time between contacts in days
tr = 4 # recovery time in days
beta = 1 / tc # contact rate in per day
gamma = 1 / tr # recovery rate in per day
system = make_system(beta, gamma)
def calc_total_infected(results, system):
s_0 = results.S[0]
s_end = results.S[system.t_end]
return s_0 - s_end
from numpy import exp
def logistic(x, A=0, B=1, C=1, M=0, K=1, Q=1, nu=1):
Computes the generalize logistic function.
A: controls the lower bound
B: controls the steepness of the transition
C: not all that useful, AFAIK
M: controls the location of the transition
K: controls the upper bound
Q: shift the transition left or right
nu: affects the symmetry of the transition
returns: float or array
exponent = -B * (x - M)
denom = C + Q * exp(exponent)
return A + (K-A) / denom ** (1/nu)
spending = linspace(0, 1200, 21)
def compute_factor(spending):
Reduction factor as a function of spending.
spending: dollars from 0 to 1200
returns: fractional reduction in beta
return logistic(spending, M=500, K=0.2, B=0.01)
percent_reduction = compute_factor(spending) * 100
make_series(spending, percent_reduction).plot()
decorate(xlabel='Hand-washing campaign spending (USD)',
ylabel='Percent reduction in infection rate',
title='Effect of hand washing on infection rate')
def compute_factor(spending):
return logistic(spending, M=500, K=0.2, B=0.01)
def add_hand_washing(system, spending):
factor = compute_factor(spending)
system.beta *= (1 - factor)
def sweep_hand_washing(spending_array):
sweep = SweepSeries()
for spending in spending_array:
system = make_system(beta, gamma)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
sweep[spending] = calc_total_infected(results, system)
return sweep
from numpy import linspace
spending_array = linspace(0, 1200, 20)
infected_sweep2 = sweep_hand_washing(spending_array)
infected_sweep2.plot()
decorate(xlabel='Hand-washing campaign spending (USD)',
ylabel='Total fraction infected',
title='Effect of hand washing on total infections')
num_students = 90
budget = 1200
price_per_dose = 100
max_doses = int(budget / price_per_dose)
max_doses
dose_array = linrange(max_doses)
def sweep_doses(dose_array):
sweep = SweepSeries()
for doses in dose_array:
fraction = doses / num_students
spending = budget - doses * price_per_dose
system = make_system(beta, gamma)
add_immunization(system, fraction)
add_hand_washing(system, spending)
results = run_simulation(system, update_func)
sweep[doses] = calc_total_infected(results, system)
return sweep
infected_sweep3 = sweep_doses(dose_array)
infected_sweep3.plot()
decorate(xlabel='Doses of vaccine',
ylabel='Total fraction infected',
title='Total infections vs. doses')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Click here to run this case study on Colab
Step3: Hand washing
Step4: The following array represents the range of possible spending.
Step6: compute_factor computes the reduction in beta for a given level of campaign spending.
Step7: Here's what it looks like.
Step8: The result is the following function, which
Step9: I use compute_factor to write add_hand_washing, which takes a
Step10: Now we can sweep a range of values for spending and use the simulation
Step11: Here's how we run it
Step12: The following figure shows the result.
Step13: Below \$200, the campaign has little effect.
Step14: The fraction budget/price_per_dose might not be an integer. int is a
Step15: In this example we call linrange with only one argument; it returns a NumPy array with the integers from 0 to max_doses, including both.
Step16: For each number of doses, we compute the fraction of students we can
|
2,021
|
<ASSISTANT_TASK:>
Python Code:
from ipysankeywidget import SankeyWidget
from ipywidgets import Layout
layout = Layout(width="300", height="200")
def sankey(margin_top=10, **value):
Show SankeyWidget with default values for size and margins
return SankeyWidget(layout=layout,
margins=dict(top=margin_top, bottom=0, left=30, right=60),
**value)
links = [
{'source': 'A', 'target': 'B', 'value': 1},
{'source': 'B', 'target': 'C', 'value': 1},
{'source': 'A', 'target': 'D', 'value': 1},
]
sankey(links=links)
rank_sets = [
{ 'type': 'same', 'nodes': ['C', 'D'] }
]
sankey(links=links, rank_sets=rank_sets)
order = [
['A'],
['D', 'B'],
['C'],
]
sankey(links=links, order=order)
order = [
[ [ ], ['A'], [], ],
[ ['B'], [ ], ['D'] ],
[ [ ], ['C'], [] ],
]
sankey(links=links, order=order)
links = [
{'source': 'A', 'target': 'B', 'value': 1},
{'source': 'B', 'target': 'C', 'value': 1},
{'source': 'C', 'target': 'D', 'value': 1},
{'source': 'A', 'target': 'E', 'value': 0.5},
]
nodes = [
{'id': 'C', 'direction': 'l'},
{'id': 'D', 'direction': 'l'},
]
sankey(links=links, nodes=nodes)
nodes = [
{'id': 'C', 'direction': 'r'},
{'id': 'D', 'direction': 'l'},
]
sankey(links=links, nodes=nodes)
nodes = [
{'id': 'C', 'direction': 'l'},
{'id': 'D', 'direction': 'r'},
]
sankey(links=links, nodes=nodes)
links = [
{'source': 'A', 'target': 'B', 'value': 3, 'type': 'x'},
{'source': 'B', 'target': 'C', 'value': 2, 'type': 'y'},
{'source': 'B', 'target': 'D', 'value': 1, 'type': 'z'},
]
sankey(links=links)
links = [
{'source': 'A', 'target': 'B', 'value': 3, 'color': 'steelblue'},
{'source': 'B', 'target': 'C', 'value': 2, 'color': '#aaa'},
{'source': 'B', 'target': 'D', 'value': 1, 'color': 'goldenrod'},
]
sankey(links=links)
nodes = [
{'id': 'B', 'title': 'Middle node', 'style': 'process' },
]
sankey(links=links, nodes=nodes)
%%html
<style>
.sankey .node {
font-style: italic;
}
</style>
links = [
{'source': 'A1', 'target': 'B', 'value': 1.5, 'type': 'x'},
{'source': 'A1', 'target': 'B', 'value': 0.5, 'type': 'y'},
{'source': 'A2', 'target': 'B', 'value': 0.5, 'type': 'x'},
{'source': 'A2', 'target': 'B', 'value': 1.5, 'type': 'y'},
{'source': 'B', 'target': 'C', 'value': 2.0, 'type': 'x'},
{'source': 'B', 'target': 'C', 'value': 2.0, 'type': 'y'},
]
sankey(links=links, nodes=[])
sankey(links=links, align_link_types=True)
order = [
['A2', 'A1'],
['B'],
['C'],
]
sankey(links=links, align_link_types=True, order=order)
from ipywidgets import Button, VBox
links = [
{'source': 'A', 'target': 'B', 'value': 1},
{'source': 'B', 'target': 'C', 'value': 1},
{'source': 'A', 'target': 'D', 'value': 1},
]
order = [
['A'],
['D', 'B'],
['C'],
]
s = sankey(links=links, order=order)
def swap(x):
global order
order = [list(reversed(o)) for o in order]
s.order = order
b = Button(description='Swap')
b.on_click(swap)
VBox([b, s])
links = [
{'source': 'A', 'target': 'B', 'value': 3, 'type': 'x'},
{'source': 'B', 'target': 'C', 'value': 2, 'type': 'y'},
{'source': 'B', 'target': 'D', 'value': 1, 'type': 'z'},
]
groups = [
{'id': 'G', 'title': 'Group', 'nodes': ['C', 'D']}
]
sankey(links=links, nodes=[], groups=groups, margin_top=30)
sankey(links=links, linkLabelFormat='.1f')
links[2]['value'] = 0.1
links[1]['value'] = 2.9
sankey(links=links, linkLabelFormat='.1f')
sankey(links=links, linkLabelFormat='.1f', linkLabelMinWidth=4)
links[0]['marker'] = 2.5
sankey(links=links)
links = [
{'source': 'A', 'target': 'B', 'value': 3, 'type': 'x', 'info_html': 'Hi!'},
{'source': 'B', 'target': 'C', 'value': 2, 'type': 'y', 'info_html': 'B <b>to</b> C'},
{'source': 'B', 'target': 'D', 'value': 1, 'type': 'z'},
]
sankey(links=links, show_link_info_html=True)
links = [
{'source': 'A', 'target': 'B', 'value': 30},
{'source': 'B', 'target': 'C', 'value': 20},
{'source': 'B', 'target': 'D', 'value': 10},
]
nodes = [
{'id': 'A', 'position': [0, 50]},
{'id': 'B', 'position': [100, 50]},
{'id': 'C', 'position': [200, 30]},
{'id': 'D', 'position': [200, 100]},
]
w = sankey(
links=links,
nodes=nodes,
node_position_attr='position'
)
w
# Try changing this
w.scale = 2
# Try changing this
w.nodes[0]['position'] = [50, 50]
w.send_state()
# w.node_position_attr = None
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step2: <i class="fa fa-gears fa-2x fa-fw text-info"></i> A convenience factory function
Step3: Rank assignment
Step4: Reversed nodes
Step5: Variations
Step6: Styling
Step7: You can also set the colours directly
Step8: Process titles default to their ids, but can be overridden. There are also one built-in alternative "style" of node
Step9: Of course, you can also use CSS to adjust the styling
Step10: Aligning link types
Step11: Dynamic updating
Step12: Node groups
Step13: Link labels
Step14: By default the labels for small links are hidden, but you can customize this using linkLabelMinWidth
Step15: Link markers
Step16: Extra link info
Step17: Custom layout
Step18: The positions are in display coordinates, within the margins specified. The scale is set to 1 by default, if not specified. When node positions are specified manually, they are not affected by the scale -- only the width of the lines is scaled.
|
2,022
|
<ASSISTANT_TASK:>
Python Code:
DON'T MODIFY ANYTHING IN THIS CELL
import helper
import problem_unittests as tests
source_path = 'data/small_vocab_en'
target_path = 'data/small_vocab_fr'
source_text = helper.load_data(source_path)
target_text = helper.load_data(target_path)
view_sentence_range = (0, 10)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in source_text.split()})))
sentences = source_text.split('\n')
word_counts = [len(sentence.split()) for sentence in sentences]
print('Number of sentences: {}'.format(len(sentences)))
print('Average number of words in a sentence: {}'.format(np.average(word_counts)))
print()
print('English sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(source_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
print()
print('French sentences {} to {}:'.format(*view_sentence_range))
print('\n'.join(target_text.split('\n')[view_sentence_range[0]:view_sentence_range[1]]))
def text_to_ids(source_text, target_text, source_vocab_to_int, target_vocab_to_int):
Convert source and target text to proper word ids
:param source_text: String that contains all the source text.
:param target_text: String that contains all the target text.
:param source_vocab_to_int: Dictionary to go from the source words to an id
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: A tuple of lists (source_id_text, target_id_text)
source_id_text = [[source_vocab_to_int[word] for word in sent.split()] for sent in source_text.split("\n")]
target_id_text = [[target_vocab_to_int[word] for word in (sent + ' <EOS>').split()] for sent in target_text.split("\n")]
return (source_id_text, target_id_text)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_text_to_ids(text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
helper.preprocess_and_save_data(source_path, target_path, text_to_ids)
DON'T MODIFY ANYTHING IN THIS CELL
import numpy as np
import helper
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
DON'T MODIFY ANYTHING IN THIS CELL
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
# Check TensorFlow Version
assert LooseVersion(tf.__version__) in [LooseVersion('1.0.0'), LooseVersion('1.0.1')], 'This project requires TensorFlow version 1.0 You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def model_inputs():
Create TF Placeholders for input, targets, and learning rate.
:return: Tuple (input, targets, learning rate, keep probability)
# TODO: Implement Function
input = tf.placeholder(tf.int32, shape=(None, None), name='input')
targets = tf.placeholder(tf.int32, shape=(None, None))
lr = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
return (input, targets, lr, keep_prob)
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_model_inputs(model_inputs)
def process_decoding_input(target_data, target_vocab_to_int, batch_size):
Preprocess target data for decoding
:param target_data: Target Placeholder
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param batch_size: Batch Size
:return: Preprocessed target data
# TODO: Implement Function
ending = tf.strided_slice(target_data, begin=[0, 0], end=[batch_size, -1], strides=[1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], target_vocab_to_int['<GO>']), ending], 1)
return dec_input
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_process_decoding_input(process_decoding_input)
def encoding_layer(rnn_inputs, rnn_size, num_layers, keep_prob):
Create encoding layer
:param rnn_inputs: Inputs for the RNN
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param keep_prob: Dropout keep probability
:return: RNN state
# TODO: Implement Function
enc_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)])
dropout = tf.contrib.rnn.DropoutWrapper(enc_cell, output_keep_prob=keep_prob)
_, enc_state = tf.nn.dynamic_rnn(dropout, rnn_inputs, dtype=tf.float32)
return enc_state
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_encoding_layer(encoding_layer)
def decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob):
Create a decoding layer for training
:param encoder_state: Encoder State
:param dec_cell: Decoder RNN Cell
:param dec_embed_input: Decoder embedded input
:param sequence_length: Sequence Length
:param decoding_scope: TenorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Train Logits
# TODO: Implement Function
# drop out
dec_cell = tf.contrib.rnn.DropoutWrapper(dec_cell, output_keep_prob=keep_prob)
# generates a decoder fn
dynamic_fn_train = tf.contrib.seq2seq.simple_decoder_fn_train(encoder_state)
outputs_train, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(
cell=dec_cell, decoder_fn=dynamic_fn_train, inputs=dec_embed_input,
sequence_length=sequence_length, scope=decoding_scope
)
# Apply output function
train_logits = output_fn(outputs_train)
return train_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_train(decoding_layer_train)
def decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, start_of_sequence_id, end_of_sequence_id,
maximum_length, vocab_size, decoding_scope, output_fn, keep_prob):
Create a decoding layer for inference
:param encoder_state: Encoder state
:param dec_cell: Decoder RNN Cell
:param dec_embeddings: Decoder embeddings
:param start_of_sequence_id: GO ID
:param end_of_sequence_id: EOS Id
:param maximum_length: The maximum allowed time steps to decode
:param vocab_size: Size of vocabulary
:param decoding_scope: TensorFlow Variable Scope for decoding
:param output_fn: Function to apply the output layer
:param keep_prob: Dropout keep probability
:return: Inference Logits
# TODO: Implement Function
dynamic_decoder_fn_inf = tf.contrib.seq2seq.simple_decoder_fn_inference(
output_fn, encoder_state, dec_embeddings, start_of_sequence_id,
end_of_sequence_id, maximum_length - 1, vocab_size)
inference_logits, _, _ = tf.contrib.seq2seq.dynamic_rnn_decoder(dec_cell, dynamic_decoder_fn_inf, scope=decoding_scope)
return inference_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer_infer(decoding_layer_infer)
def decoding_layer(dec_embed_input, dec_embeddings, encoder_state, vocab_size, sequence_length, rnn_size,
num_layers, target_vocab_to_int, keep_prob):
Create decoding layer
:param dec_embed_input: Decoder embedded input
:param dec_embeddings: Decoder embeddings
:param encoder_state: The encoded state
:param vocab_size: Size of vocabulary
:param sequence_length: Sequence Length
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:param keep_prob: Dropout keep probability
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
# dec cell
dec_cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(rnn_size) for _ in range(num_layers)])
with tf.variable_scope("decoding") as decoding_scope:
# output layer, None for linear act. fn
output_fn = lambda x: tf.contrib.layers.fully_connected(x, vocab_size, None, scope=decoding_scope)
train_logits = decoding_layer_train(encoder_state, dec_cell, dec_embed_input, sequence_length, decoding_scope,
output_fn, keep_prob)
with tf.variable_scope("decoding", reuse=True) as decoding_scope:
inf_logits = decoding_layer_infer(encoder_state, dec_cell, dec_embeddings, target_vocab_to_int['<GO>'],
target_vocab_to_int['<EOS>'], sequence_length,
vocab_size, decoding_scope, output_fn, keep_prob)
return train_logits, inf_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_decoding_layer(decoding_layer)
def seq2seq_model(input_data, target_data, keep_prob, batch_size, sequence_length, source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size, rnn_size, num_layers, target_vocab_to_int):
Build the Sequence-to-Sequence part of the neural network
:param input_data: Input placeholder
:param target_data: Target placeholder
:param keep_prob: Dropout keep probability placeholder
:param batch_size: Batch Size
:param sequence_length: Sequence Length
:param source_vocab_size: Source vocabulary size
:param target_vocab_size: Target vocabulary size
:param enc_embedding_size: Decoder embedding size
:param dec_embedding_size: Encoder embedding size
:param rnn_size: RNN Size
:param num_layers: Number of layers
:param target_vocab_to_int: Dictionary to go from the target words to an id
:return: Tuple of (Training Logits, Inference Logits)
# TODO: Implement Function
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, enc_embedding_size)
enc_state = encoding_layer(enc_embed_input, rnn_size, num_layers, keep_prob)
dec_input = process_decoding_input(target_data, target_vocab_to_int, batch_size)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, dec_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
train_logits, inf_logits = decoding_layer(dec_embed_input, dec_embeddings, enc_state, target_vocab_size,
sequence_length, rnn_size, num_layers, target_vocab_to_int, keep_prob)
return train_logits, inf_logits
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_seq2seq_model(seq2seq_model)
# Number of Epochs
epochs = 10
# Batch Size
batch_size = 256
# RNN Size
rnn_size = 256
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 100
decoding_embedding_size = 100
# Learning Rate
learning_rate = 0.002
# Dropout Keep Probability
keep_probability = 0.7
DON'T MODIFY ANYTHING IN THIS CELL
save_path = 'checkpoints/dev'
(source_int_text, target_int_text), (source_vocab_to_int, target_vocab_to_int), _ = helper.load_preprocess()
max_source_sentence_length = max([len(sentence) for sentence in source_int_text])
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, keep_prob = model_inputs()
sequence_length = tf.placeholder_with_default(max_source_sentence_length, None, name='sequence_length')
input_shape = tf.shape(input_data)
train_logits, inference_logits = seq2seq_model(
tf.reverse(input_data, [-1]), targets, keep_prob, batch_size, sequence_length, len(source_vocab_to_int), len(target_vocab_to_int),
encoding_embedding_size, decoding_embedding_size, rnn_size, num_layers, target_vocab_to_int)
tf.identity(inference_logits, 'logits')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
train_logits,
targets,
tf.ones([input_shape[0], sequence_length]))
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
DON'T MODIFY ANYTHING IN THIS CELL
import time
def get_accuracy(target, logits):
Calculate accuracy
max_seq = max(target.shape[1], logits.shape[1])
if max_seq - target.shape[1]:
target = np.pad(
target,
[(0,0),(0,max_seq - target.shape[1])],
'constant')
if max_seq - logits.shape[1]:
logits = np.pad(
logits,
[(0,0),(0,max_seq - logits.shape[1]), (0,0)],
'constant')
return np.mean(np.equal(target, np.argmax(logits, 2)))
train_source = source_int_text[batch_size:]
train_target = target_int_text[batch_size:]
valid_source = helper.pad_sentence_batch(source_int_text[:batch_size])
valid_target = helper.pad_sentence_batch(target_int_text[:batch_size])
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(epochs):
for batch_i, (source_batch, target_batch) in enumerate(
helper.batch_data(train_source, train_target, batch_size)):
start_time = time.time()
_, loss = sess.run(
[train_op, cost],
{input_data: source_batch,
targets: target_batch,
lr: learning_rate,
sequence_length: target_batch.shape[1],
keep_prob: keep_probability})
if batch_i % 200 == 0 and batch_i > 0:
batch_train_logits = sess.run(
inference_logits,
{input_data: source_batch, keep_prob: 1.0})
batch_valid_logits = sess.run(
inference_logits,
{input_data: valid_source, keep_prob: 1.0})
train_acc = get_accuracy(target_batch, batch_train_logits)
valid_acc = get_accuracy(np.array(valid_target), batch_valid_logits)
end_time = time.time()
print('Epoch {:>3} Batch {:>4}/{} - Train Accuracy: {:>6.3f}, Validation Accuracy: {:>6.3f}, Loss: {:>6.3f}'
.format(epoch_i, batch_i, len(source_int_text) // batch_size, train_acc, valid_acc, loss))
# Save Model
saver = tf.train.Saver()
saver.save(sess, save_path)
print('Model Trained and Saved')
DON'T MODIFY ANYTHING IN THIS CELL
# Save parameters for checkpoint
helper.save_params(save_path)
DON'T MODIFY ANYTHING IN THIS CELL
import tensorflow as tf
import numpy as np
import helper
import problem_unittests as tests
_, (source_vocab_to_int, target_vocab_to_int), (source_int_to_vocab, target_int_to_vocab) = helper.load_preprocess()
load_path = helper.load_params()
def sentence_to_seq(sentence, vocab_to_int):
Convert a sentence to a sequence of ids
:param sentence: String
:param vocab_to_int: Dictionary to go from the words to an id
:return: List of word ids
# TODO: Implement Function
sent = sentence.lower()
unk_id = vocab_to_int['<UNK>']
ids = [vocab_to_int.get(word, unk_id) for word in sent.split()]
return ids
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
tests.test_sentence_to_seq(sentence_to_seq)
translate_sentence = 'he saw a old yellow truck .'
DON'T MODIFY ANYTHING IN THIS CELL
translate_sentence = sentence_to_seq(translate_sentence, source_vocab_to_int)
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(load_path + '.meta')
loader.restore(sess, load_path)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('logits:0')
keep_prob = loaded_graph.get_tensor_by_name('keep_prob:0')
translate_logits = sess.run(logits, {input_data: [translate_sentence], keep_prob: 1.0})[0]
print('Input')
print(' Word Ids: {}'.format([i for i in translate_sentence]))
print(' English Words: {}'.format([source_int_to_vocab[i] for i in translate_sentence]))
print('\nPrediction')
print(' Word Ids: {}'.format([i for i in np.argmax(translate_logits, 1)]))
print(' French Words: {}'.format([target_int_to_vocab[i] for i in np.argmax(translate_logits, 1)]))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Language Translation
Step3: Explore the Data
Step6: Implement Preprocessing Function
Step8: Preprocess all the data and save it
Step10: Check Point
Step12: Check the Version of TensorFlow and Access to GPU
Step15: Build the Neural Network
Step18: Process Decoding Input
Step21: Encoding
Step24: Decoding - Training
Step27: Decoding - Inference
Step30: Build the Decoding Layer
Step33: Build the Neural Network
Step34: Neural Network Training
Step36: Build the Graph
Step39: Train
Step41: Save Parameters
Step43: Checkpoint
Step46: Sentence to Sequence
Step48: Translate
|
2,023
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
%load_ext autoreload
%autoreload 2
from importlib import reload
import numpy as np
import matplotlib.pyplot as plt
from keras import models, layers, optimizers
from keras.layers import Dense, Input, Conv1D, Reshape, Flatten
from keras.models import Model
from keras.optimizers import Adam
lr = 0.0002
adam = Adam(lr=lr, beta_1=0.5)
model_compile = lambda model: model.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])
class GAN:
def __init__(self, ni_D, nh_D, nh_G):
D = models.Sequential()
D.add(Dense(nh_D, activation='relu', input_shape=(ni_D,)))
D.add(Dense(nh_D, activation='relu'))
D.add(Dense(1, activation='sigmoid'))
model_compile(D)
G = models.Sequential() # (Batch, ni_D)
G.add(Reshape((ni_D, 1), input_shape=(ni_D,))) # (Batch, steps=ni_D, input_dim=1)
G.add(Conv1D(nh_G, 1)) # (Batch, ni_D, nh_G)
G.add(Conv1D(nh_G, 1)) # (Batch, ni_D, nh_G)
G.add(Conv1D(1, 1)) # (Batch, ni_D, 1)
G.add(Flatten()) # (Batch, ni_D)
model_compile(G)
GD = models.Sequential()
GD.add(G)
GD.add(D)
D.trainable = False
model_compile(GD)
D.trainable = True
self.D, self.G, self.GD = D, G, GD
def D_train_on_batch(self, Real, Gen):
D = self.D
X = np.concatenate([Real, Gen], axis=0)
y = [1] * Real.shape[0] + [0] * Gen.shape[0]
D.train_on_batch(X, y)
def GD_train_on_batch(self, Z):
GD, D = self.GD, self.D
y = [1] * Z.shape[0]
GD.train_on_batch(Z, y)
gan = GAN(ni_D=100, nh_D=50, nh_G=50)
class Data:
def __init__(self, mu, sigma, ni_D):
self.real_sample = lambda n_batch: np.random.normal(mu, sigma, (n_batch, ni_D))
self.in_sample = lambda n_batch: np.random.rand(n_batch, ni_D)
# self.ni_D = ni_D
class Machine:
def __init__(self, n_batch=10, ni_D=100):
self.data = Data(0, 1, ni_D)
self.gan = GAN(ni_D=ni_D, nh_D=50, nh_G=50)
self.n_batch = n_batch
# self.ni_D = ni_D
def train_D(self):
gan = self.gan
n_batch = self.n_batch
data = self.data
# Real data
Real = data.real_sample(n_batch) # (n_batch, ni_D)
# print(Real.shape)
# Generated data
Z = data.in_sample(n_batch) # (n_batch, ni_D)
Gen = gan.G.predict(Z) # (n_batch, ni_D)
# print(Gen.shape)
gan.D.trainable = True
gan.D_train_on_batch(Real, Gen)
def train_GD(self):
gan = self.gan
n_batch = self.n_batch
data = self.data
# Seed data for data generation
Z = data.in_sample(n_batch)
gan.D.trainable = False
gan.GD_train_on_batch(Z)
def train_each(self):
self.train_D()
self.train_GD()
def train(self, epochs):
for epoch in range(epochs):
self.train_each()
def test(self, n_test):
generate a new image
gan = self.gan
data = self.data
Z = data.in_sample(n_test)
Gen = gan.G.predict(Z)
return Gen, Z
def show_hist(self, Real, Gen, Z):
plt.hist(Real.reshape(-1), histtype='step', label='Real')
plt.hist(Gen.reshape(-1), histtype='step', label='Generated')
plt.hist(Z.reshape(-1), histtype='step', label='Input')
plt.legend(loc=0)
def test_and_show(self, n_test):
data = self.data
Gen, Z = self.test(n_test)
Real = data.real_sample(n_test)
self.show_hist(Real, Gen, Z)
def run(self, epochs, n_test):
train GAN and show the results
for showing, the original and the artificial results will be compared
self.train(epochs)
self.test_and_show(n_test)
def run_loop(self, n_iter=100, epochs_each=1000, n_test=1000):
for ii in range(n_iter):
print('Stage', ii)
machine.run(epochs_each, n_test)
plt.show()
machine = Machine(n_batch=10, ni_D=1000)
machine.run_loop(100, 1000, 1000)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Define Model
Step2: Load Data
Step5: Test train
|
2,024
|
<ASSISTANT_TASK:>
Python Code:
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# Display a description of the dataset
display(data.describe())
# TODO: Select three indices of your choice you wish to sample from the dataset
import random
random.seed(14)
indices = [random.randint(0, data.shape[0]) for x in range(3)]
sampleIndices = indices
print("Indices: {}".format(indices))
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeRegressor
def find_relevance(data, target_label):
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = data.drop([target_label], axis=1, inplace=False)
target = data[target_label]
# TODO: Split the data into training and testing sets using the given feature as the target
X_train, X_test, y_train, y_test = train_test_split(new_data, target, test_size=0.25, random_state=14)
# TODO: Create a decision tree regressor and fit it to the training set
regressor = DecisionTreeRegressor(random_state=14)
regressor.fit(X_train, y_train)
# TODO: Report the score of the prediction using the testing set
score = regressor.score(X_test, y_test)
return score
for target_label in data.columns:
score = find_relevance(data, target_label)
print("{:>20s}: {:+0.3f}".format(target_label, score))
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# TODO: Scale the data using the natural logarithm
log_data = np.log(data)
# TODO: Scale the sample data using the natural logarithm
log_samples = np.log(samples)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# Display the log-transformed sample data
display(log_samples)
features = log_data.columns
outlierLimitDict = {}
outlierDict = {}
# For each feature find the data points with extreme high or low values
for feature in features:
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], 75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
iqr = Q3 - Q1
step = 1.5 * iqr
outlierLimitDict[feature] = (Q1 - step, Q3 + step)
# Display the outliers
outliers = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
for index in outliers.index:
originalCount = outlierDict.get(index, 0)
outlierDict[index] = originalCount + 1
print "Data points considered outliers for the feature '{}':".format(feature)
display(outliers)
# Print indices of rows that are outliers for multiple features
for index in sorted(outlierDict.keys()):
if outlierDict[index] > 1:
print("{:3}: {}".format(index, outlierDict[index]))
# OPTIONAL: Select the indices for data points you wish to remove
outliers = []
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
# Make sure samples don't contain these indices
for index in sampleIndices:
if index in outliers:
raise Exception("The samples contain an outlier (index {})".format(index))
def color_point(row):
if row.name in outliers:
return "red"
if row.name in outlierDict.keys():
return "green"
return "black"
pd.scatter_matrix(log_data, figsize = (14,8), diagonal = 'kde', alpha=1, lw=0, c=log_data.apply(color_point, axis=1));
from sklearn.decomposition import PCA
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
n = min(good_data.shape)
pca = PCA(n_components=n)
pca.fit(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = vs.pca_results(good_data, pca)
for i in range(1,n+1):
print("The total variance explained by the first {} principle component{} is {}.".format(
i,
" " if i == 1 else "s",
sum(pca.explained_variance_ratio_[0:i])
))
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = PCA(n_components=2)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.fit_transform(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
from sklearn.mixture import GMM
from sklearn.metrics import silhouette_score
# TODO: Apply your clustering algorithm of choice to the reduced data
def getGmmSilhouetteScore(n, data, samples):
clusterer = GMM(n_components=n, random_state=14)
clusterer.fit(data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(data)
# TODO: Find the cluster centers
centers = clusterer.means_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(data, preds)
return score, centers, preds, sample_preds
bestSilhouetteScoreN = 0
bestSilhouetteScore = -1
maxN = 10
for n in range(2, maxN):
score, _, _, _ = getGmmSilhouetteScore(n, reduced_data, pca_samples)
if score > bestSilhouetteScore:
bestSilhouetteScore = score
bestSilhouetteScoreN = n
print("Sillhouette score for n={}: {}".format(n, score))
score, centers, preds, sample_preds = getGmmSilhouetteScore(bestSilhouetteScoreN, reduced_data, pca_samples)
print("")
print("Best n is {} with a silhouette score of {}.".format(bestSilhouetteScoreN, score))
# Display the results of the clustering from implementation
vs.cluster_results(reduced_data, preds, centers, pca_samples)
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
# Display the clustering results based on 'Channel' data
vs.channel_results(reduced_data, outliers, pca_samples)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data Exploration
Step2: Implementation
Step3: Question 1
Step4: Question 2
Step5: Question 3
Step6: Observation
Step7: Implementation
Step8: Question 4
Step9: Question 5
Step10: Implementation
Step11: Observation
Step12: Visualizing a Biplot
Step13: Observation
Step14: Question 7
Step15: Implementation
Step16: Question 8
Step17: Answer
|
2,025
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@test {"skip": true}
!pip install tensorflow-lattice
import tensorflow as tf
import logging
import numpy as np
import pandas as pd
import sys
import tensorflow_lattice as tfl
from tensorflow import feature_column as fc
from tensorflow_estimator.python.estimator.canned import optimizers
from tensorflow_estimator.python.estimator.head import binary_class_head
logging.disable(sys.maxsize)
csv_file = tf.keras.utils.get_file(
'heart.csv', 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')
df = pd.read_csv(csv_file)
target = df.pop('target')
train_size = int(len(df) * 0.8)
train_x = df[:train_size]
train_y = target[:train_size]
test_x = df[train_size:]
test_y = target[train_size:]
df.head()
LEARNING_RATE = 0.1
BATCH_SIZE = 128
NUM_EPOCHS = 1000
# Feature columns.
# - age
# - sex
# - ca number of major vessels (0-3) colored by flourosopy
# - thal 3 = normal; 6 = fixed defect; 7 = reversable defect
feature_columns = [
fc.numeric_column('age', default_value=-1),
fc.categorical_column_with_vocabulary_list('sex', [0, 1]),
fc.numeric_column('ca'),
fc.categorical_column_with_vocabulary_list(
'thal', ['normal', 'fixed', 'reversible']),
]
train_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=train_x,
y=train_y,
shuffle=True,
batch_size=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
num_threads=1)
test_input_fn = tf.compat.v1.estimator.inputs.pandas_input_fn(
x=test_x,
y=test_y,
shuffle=False,
batch_size=BATCH_SIZE,
num_epochs=1,
num_threads=1)
def model_fn(features, labels, mode, config):
model_fn for the custom estimator.
del config
input_tensors = tfl.estimators.transform_features(features, feature_columns)
inputs = {
key: tf.keras.layers.Input(shape=(1,), name=key) for key in input_tensors
}
lattice_sizes = [3, 2, 2, 2]
lattice_monotonicities = ['increasing', 'none', 'increasing', 'increasing']
lattice_input = tf.keras.layers.Concatenate(axis=1)([
tfl.layers.PWLCalibration(
input_keypoints=np.linspace(10, 100, num=8, dtype=np.float32),
# The output range of the calibrator should be the input range of
# the following lattice dimension.
output_min=0.0,
output_max=lattice_sizes[0] - 1.0,
monotonicity='increasing',
)(inputs['age']),
tfl.layers.CategoricalCalibration(
# Number of categories including any missing/default category.
num_buckets=2,
output_min=0.0,
output_max=lattice_sizes[1] - 1.0,
)(inputs['sex']),
tfl.layers.PWLCalibration(
input_keypoints=[0.0, 1.0, 2.0, 3.0],
output_min=0.0,
output_max=lattice_sizes[0] - 1.0,
# You can specify TFL regularizers as tuple
# ('regularizer name', l1, l2).
kernel_regularizer=('hessian', 0.0, 1e-4),
monotonicity='increasing',
)(inputs['ca']),
tfl.layers.CategoricalCalibration(
num_buckets=3,
output_min=0.0,
output_max=lattice_sizes[1] - 1.0,
# Categorical monotonicity can be partial order.
# (i, j) indicates that we must have output(i) <= output(j).
# Make sure to set the lattice monotonicity to 'increasing' for this
# dimension.
monotonicities=[(0, 1), (0, 2)],
)(inputs['thal']),
])
output = tfl.layers.Lattice(
lattice_sizes=lattice_sizes, monotonicities=lattice_monotonicities)(
lattice_input)
training = (mode == tf.estimator.ModeKeys.TRAIN)
model = tf.keras.Model(inputs=inputs, outputs=output)
logits = model(input_tensors, training=training)
if training:
optimizer = optimizers.get_optimizer_instance_v2('Adagrad', LEARNING_RATE)
else:
optimizer = None
head = binary_class_head.BinaryClassHead()
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
optimizer=optimizer,
logits=logits,
trainable_variables=model.trainable_variables,
update_ops=model.updates)
estimator = tf.estimator.Estimator(model_fn=model_fn)
estimator.train(input_fn=train_input_fn)
results = estimator.evaluate(input_fn=test_input_fn)
print('AUC: {}'.format(results['auc']))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: TF Lattice Custom Estimators
Step2: Importing required packages
Step3: Downloading the UCI Statlog (Heart) dataset
Step4: Setting the default values used for training in this guide
Step5: Feature Columns
Step6: Note that categorical features do not need to be wrapped by a dense feature column, since tfl.laysers.CategoricalCalibration layer can directly consume category indices.
Step8: Creating model_fn
Step9: Training and Estimator
|
2,026
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
DATAFILE = '/home/data/archive.ics.uci.edu/BankMarketing/bank.csv'
###DATAFILE = 'data/bank.csv' ### using locally
df = pd.read_csv(DATAFILE, sep=';')
list(df.columns)
### use sets and '-' difference operation 'A-B'. Also there is a symmetric different '^'
all_features = set(df.columns)-set(['y'])
num_features = set(df.describe().columns)
cat_features = all_features-num_features
print("All features: ", ", ".join(all_features), "\nNumerical features: ", ", ".join(num_features), "\nCategorical features: ", ", ".join(cat_features))
set(df.columns)-set(df.describe().columns)-set('y')
### Describe Columns
help(pd.DataFrame.describe)
### Let's get the description of the numeric data for each of the target values separately.
### We need to rename the columns before we can properly join the tables. The column names may look strange...
desc_yes = df[df.y=='yes'].describe().rename_axis(lambda c: "%s|A"%c, axis='columns')
desc_no = df[df.y=='no'].describe().rename_axis(lambda c: "%s|B"%c, axis='columns')
### ...but this way we can get them in the desired order...
desc = desc_yes.join(desc_no).reindex_axis(sorted(desc_yes.columns), axis=1)
### ...because we're changing them anyway:
#desc.set_axis(1, [sorted(list(num_features)*2), ['yes', 'no']*len(num_features)])
#desc
%matplotlib inline
fig = plt.figure(figsize=(32, 8))
for i in range(len(num_features)):
f = list(num_features)[i]
plt.subplot(2, 4, i+1)
hst = plt.hist(df[f], alpha=0.5)
plt.title(f)
plt.suptitle('Distribution of Numeric Values', fontsize=20)
None
for f in cat_features:
tab = df[f].value_counts()
print('%s:\t%s' % (f, ', '.join([ ("%s(%d)" %(tab.index[i], tab.values[i])) for i in range(len(tab))]) ))
mat = pd.DataFrame(
[ df[f].value_counts() for f in list(cat_features) ],
index=list(cat_features)
).stack()
pd.DataFrame(mat.values, index=mat.index)
help(pd.DataFrame.as_matrix)
## We copy our original dataframe into a new one, and then perform replacements on categorical levels.
## We may also keep track of our replacement
level_substitution = {}
def levels2index(levels):
dct = {}
for i in range(len(levels)):
dct[levels[i]] = i
return dct
df_num = df.copy()
for c in cat_features:
level_substitution[c] = levels2index(df[c].unique())
df_num[c].replace(level_substitution[c], inplace=True)
## same for target
df_num.y.replace({'no':0, 'yes':1}, inplace=True)
df_num
level_substitution
X = df_num[list(all_features)].as_matrix()
y = df_num.y.as_matrix()
X, y
### Scikit-learn provides us with a nice function to split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)
from sklearn.tree import DecisionTreeClassifier
clf = DecisionTreeClassifier(max_depth=5)
clf.fit(X_train, y_train)
score_train = clf.score(X_train, y_train)
score_test = clf.score(X_test, y_test)
print('Ratio of correctly classified samples for:\n\tTraining-set:\t%f\n\tTest-set:\t%f'%(score_train, score_test))
import sklearn.tree
import pydot_ng as pdot
dot_data = sklearn.tree.export_graphviz(clf, out_file=None, feature_names = list(all_features), class_names=['no', 'yes'])
graph = pdot.graph_from_dot_data(dot_data)
#--- we can save the graph into a file ... preferrably vector graphics
#graph.write_svg('mydt.svg')
graph.write_pdf('/home/pmolnar/public_html/mydt.pdf')
#--- or display right here
##from IPython.display import HTML
HTML(str(graph.create_svg().decode('utf-8')))
ŷ = clf.predict(X_test)
## a function that produces the confusion matrix: 1. parameter y=actual target, 2. parameter ŷ=predicted
def binary_confusion_matrix(y,ŷ):
TP = ((y+ŷ)== 2).sum()
TN = ((y+ŷ)== 0).sum()
FP = ((y-ŷ)== -1).sum()
FN = ((y-ŷ)== 1).sum()
return pd.DataFrame( [[TP, FP], [FN, TN]], index=[['Prediction', 'Prediction'],['Yes', 'No']], columns=[['Actual', 'Actual'],['Yes', 'No']])
cm = binary_confusion_matrix(y_test, ŷ)
cm
### Scikit-Learn can do that too ... so so nice though
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, ŷ)
cm
### Here are some metrics
from sklearn.metrics import classification_report
print(classification_report(y_test, ŷ))
### http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
import itertools
np.set_printoptions(precision=2)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
%matplotlib inline
fig = plt.figure()
plot_confusion_matrix(cm, classes=['No', 'Yes'], normalize=True, title='Normalized confusion matrix')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Step 1
Step2: Let's look at the distribution of numerical features...
Step3: Now, let's look at the categorical variables and their distribution...
Step4: Results in a data frame
Step5: Step 2
Step6: Step 3
Step7: score returns the mean accuracy on the given test data and labels. In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted. For binary classification it means percentage of correctly classified samples.
Step 4
Step9: Now, we use out classifier and predict on the test set (In order to get the ŷ character type
|
2,027
|
<ASSISTANT_TASK:>
Python Code:
import datacube
dc = datacube.Datacube(app='load-data-example')
data = dc.load(product='ls5_nbar_albers', x=(149.25, 149.5), y=(-36.25, -36.5),
time=('2008-01-01', '2009-01-01'))
data
data = dc.load(product='ls5_nbar_albers', x=(1543137.5, 1569137.5), y=(-4065537.5, -4096037.5),
time=('2008-01-01', '2009-01-01'), crs='EPSG:3577')
data
data = dc.load(product='ls5_nbar_albers', x=(149.25, 149.5), y=(-36.25, -36.5),
time=('2008-01-01', '2009-01-01'), measurements=['red', 'nir'])
data
help(dc.load)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Loading data
Step2: Load data via a products native co-ordinate system
Step3: Load specific measurements of a given product
Step4: Additional help can be found by calling help(dc.load)
|
2,028
|
<ASSISTANT_TASK:>
Python Code:
# print("Hello World)
# Lots...
# and lots...
# of comments...
print("this works") # this works because the "#" symbol is placed AFTER the bit of code we want to run!
"abc" * 4 # ???
"a" * 3 # string * number repeats the character. Thus "a" * 2 = "aa" and "az" * 2 = "azaz".
print( 4 * 2 ) # Very simply and clear code, you can tell what it does just by looking at it.
print( int(chr(52)).__mul__(int(chr(50))) ) # A TERRIBLE and confusing way to calculate "4 * 2"
# print("\n * ,MMM8&&&. *\n MMMM88&&&&& .\n MMMM88&&&&&&&\n * MMM88&&&&&&&&\n MMM88&&&&&&&&\n 'MMM88&&&&&&'\n 'MMM8&&&' *\n |\\___/|\n ) ( . '\n =\\ /=\n )===( *\n / \\\n | |\n / \\\n \\ /\n _/\\_/\\_/\\__ _/_/\\_/\\_/\\_/\\_/\\_/\\_/\\_/\\_/\\_\n | | | |( ( | | | | | | | | | |\n | | | | ) ) | | | | | | | | | |\n | | | |(_( | | | | | | | | | |\n | | | | | | | | | | | | | | |\n | | | | | | | | | | | | | | |\n")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Woah !? Nothing happened!? Why is that?
Step2: You can also place comments after some code, in which case the code executes. Here, let me show you
Step3: What are comments for?
Step4: In later lectures I'll explain how multiplying strings work. But for now just notice that in the first case you didn't know what was going on (because there were no helpful comments), but you understand what is happening in the second case because the comment explains the code.
Step5: Complex code often requires comments to explain what it does, and that's why comments are (sort-of) bad.
|
2,029
|
<ASSISTANT_TASK:>
Python Code:
f = open("files/simple-file.txt")
for l in f.readlines():
print(l,end="")
f.close()
with open("files/simple-file.txt") as f:
for l in f:
print(l.strip())
with open("files/simple-file.txt.gz") as f:
for l in f:
print(l.strip())
import gzip
with gzip.open("files/simple-file.txt.gz") as f:
for l in f:
l_ascii = l.decode("ascii")
print(l_ascii.strip())
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Problem
Step2: python
Step3: Use the gzip module
|
2,030
|
<ASSISTANT_TASK:>
Python Code:
import os, sys
sys.path = [os.path.abspath("../../")] + sys.path
from perception4e import *
from notebook4e import *
import matplotlib.pyplot as plt
plt.imshow(gray_scale_image, cmap='gray', vmin=0, vmax=255)
plt.axis('off')
plt.show()
gray_img = gen_gray_scale_picture(100, 5)
plt.imshow(gray_img, cmap='gray', vmin=0, vmax=255)
plt.axis('off')
plt.show()
discs = gen_discs(100, 1)
fig=plt.figure(figsize=(10, 10))
for i in range(8):
img = discs[0][i]
fig.add_subplot(1, 8, i+1)
plt.axis('off')
plt.imshow(img, cmap='gray', vmin=0, vmax=255)
plt.show()
discs = gen_discs(10, 1)
contours = probability_contour_detection(gray_img, discs[0])
show_edges(contours)
contours = group_contour_detection(gray_scale_image, 3)
show_edges(contours)
import numpy as np
import matplotlib.image as mpimg
stapler_img = mpimg.imread('images/stapler.png', format="gray")
contours = group_contour_detection(stapler_img, 5)
plt.axis('off')
plt.imshow(contours, cmap="gray")
contours = group_contour_detection(stapler_img, 15)
plt.axis('off')
plt.imshow(contours, cmap="gray")
image = gen_gray_scale_picture(size=10, level=2)
show_edges(image)
graph = Graph(image)
graph.min_cut((0,0), (9,9))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's take a look at it
Step2: You can also generate your own grayscale images by calling gen_gray_scale_picture and pass the image size and grayscale levels needed
Step3: Now let's generate the discs we are going to use as sampling masks to tell the intensity difference between two half of the care area of an image. We can generate the discs of size 100 pixels and show them
Step4: The white part of disc images is of value 1 while dark places are of value 0. Thus convolving the half-disc image with the corresponding area of an image will yield only half of its content. Of course, discs of size 100 is too large for an image of the same size. We will use discs of size 10 and pass them to the detector.
Step5: As we are using discs of size 10 and some boundary conditions are not dealt with in our naive algorithm, the extracted contour has a bold edge with missings near the image border. But the main structures of contours are extracted correctly which shows the ability of this algorithm.
Step6: Now let's show the extracted contours
Step7: It is not obvious as our generated image already has very clear boundaries. Let's apply the algorithm on the stapler example to see whether it will be more obvious
Step8: The segmentation is very rough when using only 5 clusters. Adding to the cluster number will increase the degree of subtle of each group thus the whole picture will be more alike the original one
Step9: Minimum Cut Segmentation
|
2,031
|
<ASSISTANT_TASK:>
Python Code:
# Load libraries
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
import numpy as np
# Load data
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Make class highly imbalanced by removing first 40 observations
X = X[40:,:]
y = y[40:]
# Create target vector indicating if class 0, otherwise 1
y = np.where((y == 0), 0, 1)
# Standarize features
scaler = StandardScaler()
X_std = scaler.fit_transform(X)
# Create decision tree classifer object
clf = LogisticRegression(random_state=0, class_weight='balanced')
# Train model
model = clf.fit(X_std, y)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Iris Flower Dataset
Step2: Make Classes Imbalanced
Step3: Standardize Features
Step4: Train A Logistic Regression With Weighted Classes
|
2,032
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
raw_data = pd.read_csv("heightWeightData.txt", header=None, names=["gender", "height", "weight"])
raw_data.info()
raw_data.head()
male_data = raw_data[raw_data.gender == 1]
male_data.head()
mu_male = male_data.mean(axis=0)[1:].as_matrix() # remove gender
male_mean_diff = male_data.iloc[:,1:].as_matrix() - mu_male
covariance_male = np.dot(male_mean_diff.T, male_mean_diff)/len(male_data)
print(mu_male)
print(covariance_male)
plt.figure(figsize=(6,6))
plt.plot(male_data.height, male_data.weight, 'ko')
plt.axis([60,80,120,285])
plt.title('raw')
plt.xlabel('height')
plt.ylabel('weight')
plt.axes().set_aspect(0.2)
plt.grid(True)
plt.show()
def calculate_2d_gaussian_confidence_region(mu, Sigma, p = 0.95, points = 200):
Returns a points x 2 numpy.ndarray of the confidence region.
Keyword arguments:
mu -- mean
Sigma -- covariance matrix
p -- percent confidence
points -- number of points to interpolate
assert(len(mu) == len(Sigma))
assert(np.all(Sigma == Sigma.T))
eigenvalues, S = np.linalg.eig(Sigma)
S = S[:,eigenvalues.argsort()[::-1]]
eigenvalues = eigenvalues[eigenvalues.argsort()[::-1]]
theta = np.linspace(0, 2*np.pi, num = points)
x = np.sqrt(eigenvalues[0]*stats.chi2.ppf(p, df=2))*np.cos(theta)
y = np.sqrt(eigenvalues[1]*stats.chi2.ppf(p, df=2))*np.sin(theta)
return np.dot(S, np.array([x,y])).T + mu
def plot_raw_males(ax=None):
if ax == None:
ax = plt.gca()
gaussian_fit_male = calculate_2d_gaussian_confidence_region(mu_male, covariance_male, p = 0.95, points = 100)
ax.axis([60,80,90,285])
ax.set_title('raw')
ax.set_xlabel('height')
ax.set_ylabel('weight')
for row in male_data.itertuples():
ax.text(row.height, row.weight, row.Index, horizontalalignment='center', verticalalignment='center')
ax.set_aspect(0.2)
ax.plot(gaussian_fit_male[:,0], gaussian_fit_male[:,1], linewidth=3, color='red')
ax.plot(mu_male[0], mu_male[1], 'rx', markersize=10, markeredgewidth=3)
ax.grid(True)
plt.figure(figsize=(8,8))
plot_raw_males(plt.gca())
plt.show()
def standardize(x, mean, sd):
Standardizes assuming x is normally distributed.
return (x - mean)/sd
def plot_standardized_males(ax=None):
if ax == None:
ax = plt.gca()
gaussian_fit_male = calculate_2d_gaussian_confidence_region(mu_male, covariance_male, p = 0.95, points = 100)
ax.set_title('standardized')
ax.set_xlabel('height')
ax.set_ylabel('weight')
ax.plot(standardize(male_data.height, mu_male[0], np.sqrt(covariance_male[0,0])),
standardize(male_data.weight, mu_male[1], np.sqrt(covariance_male[1,1])),
" ")
for row in male_data.itertuples():
ax.text(standardize(row.height, mu_male[0], np.sqrt(covariance_male[0,0])),
standardize(row.weight, mu_male[1], np.sqrt(covariance_male[1,1])),
row.Index, horizontalalignment='center', verticalalignment='center')
ax.set_aspect('equal')
ax.plot(standardize(gaussian_fit_male[:,0], mu_male[0], np.sqrt(covariance_male[0,0])),
standardize(gaussian_fit_male[:,1], mu_male[1], np.sqrt(covariance_male[1,1])),
linewidth=3, color='red')
ax.plot(0, 0, 'rx', markersize=10, markeredgewidth=3)
ax.grid(True)
plt.figure(figsize=(8,8))
plot_standardized_males()
plt.show()
def whiten(X, mu, Sigma):
assert(len(mu) == len(Sigma))
assert(np.all(Sigma == Sigma.T))
eigenvalues, S = np.linalg.eig(Sigma)
S = S[:,eigenvalues.argsort()[::-1]]
eigenvalues = eigenvalues[eigenvalues.argsort()[::-1]]
inverse_precision = np.diag(1/np.sqrt(eigenvalues))
return np.dot(np.dot(X - mu, S), inverse_precision)
def plot_whitened_males(ax=None):
if ax == None:
ax = plt.gca()
gaussian_fit_male = calculate_2d_gaussian_confidence_region(mu_male, covariance_male, p = 0.95, points = 100)
whitened_gaussian_fit_male = whiten(gaussian_fit_male, mu_male, covariance_male)
ax.set_title('whitened')
ax.set_xlabel('height')
ax.set_ylabel('weight')
whitened_male_data = whiten(np.array([male_data.height, male_data.weight]).T, mu_male, covariance_male)
ax.plot(whitened_male_data[:,0], whitened_male_data[:,1], " ")
for i in range(len(whitened_male_data)):
ax.text(whitened_male_data[i, 0], whitened_male_data[i, 1],
male_data.index[i], horizontalalignment='center', verticalalignment='center')
ax.set_aspect('equal')
ax.plot(whitened_gaussian_fit_male[:,0], whitened_gaussian_fit_male[:,1],
linewidth=3, color='red')
ax.plot(0, 0, 'rx', markersize=10, markeredgewidth=3)
ax.grid(True)
plt.figure(figsize=(8,8))
plot_whitened_males()
plt.show()
fig = plt.figure(figsize=(20,8))
ax1 = fig.add_subplot(1,3,1)
ax2 = fig.add_subplot(1,3,2)
ax3 = fig.add_subplot(1,3,3)
plot_raw_males(ax1)
plot_standardized_males(ax2)
plot_whitened_males(ax3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, just read in data, and take a peek. The data can be found on GitHub.
Step2: We're told that for gender, 1 is male, and 2 is female. Part (a) says to extract the height/weight data corresponding to the males. Then, we fit a 2d Gaussian to the male data, using the empirical mean and covariance. Then, we'll plot this data.
Step3: Next, we'll calculate the empirical mean and covariance.
Step4: Let's plot the data now.
Step6: Let $\mathbf{x} \sim \mathcal{N}\left(\boldsymbol\mu, \Sigma\right)$, where $\mathbf{x} \in \mathbb{R}^p$. We can write $\Sigma = SDS^\intercal$ by the spectral theorem, where the columns of $S$ are orthonormal eigenvectors, and $D$ is a diagonal matrix of eigenvectors, $\lambda_1, \lambda_2,\ldots,\lambda_n$.
Step8: For part (b) says to do the same thing with standardized data.
Step9: Part (c) deals with whitening or sphereing the data. This involves transforming the data so that the dimensions are uncorrelated and have equal variances along the axes. Recall that
Step10: Now, we can plot all three figures together just like in the textbook.
|
2,033
|
<ASSISTANT_TASK:>
Python Code:
# Load the libraries
import numpy as np
import pandas as pd
from scipy import stats
from sklearn import linear_model
# Load the data again!
df = pd.read_csv("data/Weed_Price.csv", parse_dates=[-1])
df.sort(columns=['State','date'], inplace=True)
df1 = df[df.State=="California"].copy()
df1.set_index("date", inplace=True)
print df1.shape
idx = pd.date_range(df1.index.min(), df1.index.max())
df1 = df1.reindex(idx)
df1.fillna(method = "ffill", inplace=True)
print df1.shape
df1.head()
#Reading demographics data
demographics = pd.DataFrame.from_csv("data/Demographics_State.csv",header=0,index_col=False,sep=',')
demographics.rename(columns={'region':'State'}, inplace=True)
demographics.head()
df['State'] = df['State'].str.lower()
df.head()
df_demo = pd.merge(df, demographics, how="inner", on="State")
df_demo.head()
corr_bw_percapita_highq = stats.pearsonr(df_demo.per_capita_income, df_demo.HighQ)[0]
print corr_bw_percapita_highq
state_location = pd.read_csv("data/State_Location.csv")
state_location.head()
pd.unique(state_location.status)
df['year'] = pd.DatetimeIndex(df['date']).year
df['month'] = pd.DatetimeIndex(df['date']).month
df['week'] = pd.DatetimeIndex(df['date']).week
df['weekday'] = pd.DatetimeIndex(df['date']).weekday
df_demo_ca = df_demo[df_demo.State=="california"].copy()
df_demo_ca['year'] = pd.DatetimeIndex(df_demo_ca['date']).year
df_demo_ca['month'] = pd.DatetimeIndex(df_demo_ca['date']).month
df_demo_ca['week'] = pd.DatetimeIndex(df_demo_ca['date']).week
df_demo_ca['weekday'] = pd.DatetimeIndex(df_demo_ca['date']).weekday
df_demo_ca.head()
df_demo_ca.groupby("weekday").HighQ.mean()
df.groupby(["State", "weekday"]).HighQ.mean()
df_st_wk = df.groupby(["State", "weekday"]).HighQ.mean()
df_st_wk.reset_index()
#Answer:
model_data = df1.loc[:,['HighQ']].copy()
idx = pd.date_range(model_data.index.min(), model_data.index.max()+ 30)
model_data.reset_index(inplace=True)
model_data.set_index("index", inplace=True)
model_data = model_data.reindex(idx)
model_data.tail(35)
model_data['IND'] = np.arange(model_data.shape[0])
model_data.tail(35)
model_data['IND_SQ'] = model_data['IND']**2
x = model_data.ix[0:532, ["IND","IND_SQ"]]
y = model_data.ix[0:532, "HighQ"]
x_test = model_data.ix[532:, ["IND","IND_SQ"]]
print x.shape, y.shape
ols = linear_model.LinearRegression(fit_intercept=True)
ols.fit(x, y)
ols_predict = ols.predict(x_test)
ols_predict
ols.coef_
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Correlation
Step2: Exercise Find correlation between percent_white and highQ
Step3: Exercise Find mean prices of HighQ weed for states that are legal and for states that are illegal
Step4: Exercise If I need to buy weed on a wednesday, which state should I be in?
Step5: Regression
|
2,034
|
<ASSISTANT_TASK:>
Python Code:
def maxSetBitCount(s , k ) :
maxCount = 0
n = len(s )
count = 0
for i in range(k ) :
if(s[i ] == '1' ) :
count += 1
maxCount = count
for i in range(k , n ) :
if(s[i - k ] == '1' ) :
count -= 1
if(s[i ] == '1' ) :
count += 1
maxCount = max(maxCount , count )
return maxCount
if __name__== ' __main __' :
s = "100111010"
k = 3
print(maxSetBitCount(s , k ) )
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,035
|
<ASSISTANT_TASK:>
Python Code:
# Run cell with Ctrl + Enter
# Import main pycoQC module
from pycoQC.Barcode_split import Barcode_split
# Import helper functions from pycoQC
from pycoQC.common import jhelp, head, ls
jhelp(Barcode_split)
Barcode_split (
summary_file="./data/Guppy-2.2.4-basecall-1D-DNA_sequencing_summary+barcode.txt.gz",
output_unclassified=True,
output_dir="./results/")
Barcode_split (
summary_file="./data/Guppy-basecall-1D-DNA_sequencing_summary.txt.gz",
barcode_file="./data/Guppy-basecall-1D-DNA_deepbinner_barcoding_summary.txt.gz",
output_dir="./results/")
Barcode_split (
summary_file="./data/Guppy-basecall-1D-DNA_sequencing_summary.txt.gz",
output_dir="./results/")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Running Barcode_split
Step2: Basic usage
Step3: With externaly provided barcodes
Step4: If no barcode an error is raised
|
2,036
|
<ASSISTANT_TASK:>
Python Code:
# A bit of setup
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.neural_net import TwoLayerNet
from __future__ import print_function
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
returns relative error
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
net = init_toy_model()
X, y = init_toy_data()
scores = net.loss(X)
print('Your scores:')
print(scores)
print()
print('correct scores:')
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print(correct_scores)
print()
# The difference should be very small. We get < 1e-7
print('Difference between your scores and correct scores:')
print(np.sum(np.abs(scores - correct_scores)))
loss, _ = net.loss(X, y, reg=0.05)
correct_loss = 1.30378789133
# should be very small, we get < 1e-12
print('Difference between your loss and correct loss:')
print(np.sum(np.abs(loss - correct_loss)))
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.05)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.05)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)
print('%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name])))
net = init_toy_model()
stats = net.train(X, y, X, y,
learning_rate=1e-1, reg=5e-6,
num_iters=100, verbose=False)
print('Final training loss: ', stats['loss_history'][-1])
# plot the loss history
plt.plot(stats['loss_history'])
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=2000, batch_size=200,
learning_rate=1e-4, learning_rate_decay=0.95,
reg=0.25, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)
# Plot the loss function and train / validation accuracies
def fig1():
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
fig1()
from cs231n.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(net)
best_net = None # store the best model into this
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
#################################################################################
input_size = 32 * 32 * 3
hidden_size = 70
num_classes = 10
best_net = net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=4000, batch_size=200,
learning_rate=9e-4, learning_rate_decay=0.95,
reg=0.4, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print('Validation accuracy: ', val_acc)
fig1()
show_net_weights(net)
#################################################################################
# END OF YOUR CODE #
#################################################################################
# visualize the weights of the best network
show_net_weights(best_net)
test_acc = (best_net.predict(X_test) == y_test).mean()
print('Test accuracy: ', test_acc)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Implementing a Neural Network
Step2: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
Step3: Forward pass
Step4: Forward pass
Step5: Backward pass
Step6: Train the network
Step8: Load the data
Step9: Train a network
Step10: Debug the training
Step11: Tune your hyperparameters
Step12: Run on the test set
|
2,037
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.0,<2.1"
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
b.set_value('sma@binary', 20)
b.set_value('q', 0.8)
b.set_value('ecc', 0.8)
b.set_value('per0', 45)
print phoebe.__version__
b.get_parameter('t0_supconj', context='component')
b.get_parameter('t0_perpass', context='component')
b.get_parameter('t0_perpass', context='constraint')
b.get_parameter('t0_ref', context='component')
b.get_parameter('t0_ref', context='constraint')
b.get_parameter('t0', context='system')
b.add_dataset('orb', times=np.linspace(-1,1,1001))
b.run_compute(ltte=False)
axs, artists = b.plot(x='xs', y='zs', time='t0_supconj')
axs, artists = b.plot(x='xs', y='zs', time='t0_perpass')
axs, artists = b.plot(x='xs', y='zs', time='t0_ref')
b.to_phase(0.0)
b.to_phase(0.0, component='binary', t0='t0_supconj')
b.to_phase(0.0, component='binary', t0='t0_perpass')
b.to_phase(0.0, component='binary', t0='t0_ref')
b.add_dataset('lc', times=np.linspace(0,1,51), ld_func='linear', ld_coeffs=[0.0])
b.run_compute(ltte=False, irrad_method='none', atm='blackbody')
axs, artists = b['lc01@model'].plot(x='phases', t0='t0_supconj', xlim=(-0.3,0.3))
axs, artists = b['lc01@model'].plot(x='phases', t0='t0_perpass', xlim=(-0.3,0.3))
axs, artists = b['lc01@model'].plot(x='phases', t0='t0_ref', xlim=(-0.3,0.3))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: And let's make our system a little more interesting so that we can discriminate between the various t0s
Step3: t0 Parameters
Step4: There are three t0 parameters that are available to define an orbit (but only one of which is editable at any given time), as well as a t0 parameter for the entire system. Let's first access the three t0 parameters for our binary orbit.
Step5: 't0_perpass' defines the time at which both components in our orbit is at periastron passage. By default this parameter is constrained by 't0_supconj'. For more details or information on how to change which parameter is editable, see the Constraints Tutorial.
Step6: The 't0_ref' defines the time at which the primary component in our orbit passes an arbitrary reference point. This 't0_ref' is defined in the same way as PHOEBE legacy's 'HJD0' parameter, so is included for convenience translating between the two.
Step7: In addition, there is a single 't0' parameter that is system-wide. This parameter simply defines the time at which all parameters are defined and therefore at which all computations start. The value of this parameter begins to play an important role if any parameter is given a time-derivative (see apsidal motion for an example) or when using N-body instead of Keplerian dynamics (coming in a future release).
Step8: Influence on Oribits (positions)
Step9: To visualize where these times are with respect to the orbits, we can plot the model orbit and highlight the positions of each star at the times defined by these parameters. Note here that the observer is in the positive z-direction.
Step10: Influence on Phasing
Step11: Similarly, if plotting phases on any axis, passing the 't0' keyword will set the zero-phase accordingly. To see this, let's compute a light curve and phase it with the various t0s shown in the orbits above.
|
2,038
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.datasets as data
%matplotlib inline
sns.set_context('poster')
sns.set_style('white')
sns.set_color_codes()
plot_kwds = {'alpha' : 0.5, 's' : 80, 'linewidths':0}
moons, _ = data.make_moons(n_samples=50, noise=0.05)
blobs, _ = data.make_blobs(n_samples=50, centers=[(-0.75,2.25), (1.0, 2.0)], cluster_std=0.25)
test_data = np.vstack([moons, blobs])
plt.scatter(test_data.T[0], test_data.T[1], color='b', **plot_kwds)
import hdbscan
clusterer = hdbscan.HDBSCAN(min_cluster_size=5, gen_min_span_tree=True)
clusterer.fit(test_data)
clusterer.minimum_spanning_tree_.plot(edge_cmap='viridis',
edge_alpha=0.6,
node_size=80,
edge_linewidth=2)
clusterer.single_linkage_tree_.plot(cmap='viridis', colorbar=True)
clusterer.condensed_tree_.plot()
clusterer.condensed_tree_.plot(select_clusters=True, selection_palette=sns.color_palette())
palette = sns.color_palette()
cluster_colors = [sns.desaturate(palette[col], sat)
if col >= 0 else (0.5, 0.5, 0.5) for col, sat in
zip(clusterer.labels_, clusterer.probabilities_)]
plt.scatter(test_data.T[0], test_data.T[1], c=cluster_colors, **plot_kwds)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The next thing we'll need is some data. To make for an illustrative example we'll need the data size to be fairly small so we can see what is going on. It will also be useful to have several clusters, preferably of different kinds. Fortunately sklearn has facilities for generating sample clustering data so I'll make use of that and make a dataset of one hundred data points.
Step2: Now, the best way to explain HDBSCAN is actually just use it and then go through the steps that occurred along the way teasing out what is happening at each step. So let's load up the hdbscan library and get to work.
Step3: So now that we have clustered the data -- what actually happened? We can break it out into a series of steps
Step4: Build the cluster hierarchy
Step5: This brings us to the point where robust single linkage stops. We want more though; a cluster hierarchy is good, but we really want a set of flat clusters. We could do that by drawing a a horizontal line through the above diagram and selecting the clusters that it cuts through. This is in practice what DBSCAN effectively does (declaring any singleton clusters at the cut level as noise). The question is, how do we know where to draw that line? DBSCAN simply leaves that as a (very unintuitive) parameter. Worse, we really want to deal with variable density clusters and any choice of cut line is a choice of mutual reachability distance to cut at, and hence a single fixed density level. Ideally we want to be able to cut the tree at different places to select our clusters. This is where the next steps of HDBSCAN begin and create the difference from robust single linkage.
Step6: This is much easier to look at and deal with, particularly in as simple a clustering problem as our current test dataset. However we still need to pick out clusters to use as a flat clustering. Looking at the plot above should give you some ideas about how one might go about doing this.
Step7: Now that we have the clusters it is a simple enough matter to turn that into cluster labelling as per the sklearn API. Any point not in a selected cluster is simply a noise point (and assigned the label -1). We can do a little more though
|
2,039
|
<ASSISTANT_TASK:>
Python Code:
A = np.array([[1, 3, -2], [3, 5, 6], [2, 4, 3]])
A
b = np.array([[5], [7], [8]])
b
Ainv = np.linalg.inv(A)
Ainv
x = np.dot(Ainv, b) # 앞에
x
np.dot(A, x) - b #수치적인 에러떄문에 0이 나오지않는다. inverse 명령은 실생활에서 사용하지않는다. 역행렬이 뭔지 알고싶을때만 쓴다.
x, resid, rank, s = np.linalg.lstsq(A, b) # A가 안정적인거여서 똑같이 나왔지만...
x
np.random.seed(0)
A = np.random.randn(3, 3)
A
np.linalg.det(A)
A = np.array([[2, 0], [-1, 1], [0, 2]])
A
b = np.array([[1], [0], [-1]])
b
Apinv = np.dot(np.linalg.inv(np.dot(A.T, A)), A.T)
Apinv
x = np.dot(Apinv, b)
x
np.dot(A, x) - b
x, resid, rank, s = np.linalg.lstsq(A, b) #resid = error값, rank , s
x
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 위 해결 방법에는 두 가지 의문이 존재한다. 우선 역행렬이 존재하는지 어떻게 알 수 있는가? 또 두 번째 만약 미지수의 수와 방정식의 수가 다르다면 어떻게 되는가?
Step2: 행렬식과 역행렬 사이에는 다음의 관계가 있다.
|
2,040
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
!pip install --upgrade pip==21.3
!pip install -U seaborn scikit-learn model-card-toolkit
from datetime import date
from io import BytesIO
from IPython import display
import model_card_toolkit as mctlib
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_roc_curve, plot_confusion_matrix
import base64
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import uuid
cancer = load_breast_cancer()
X = pd.DataFrame(cancer.data, columns=cancer.feature_names)
y = pd.Series(cancer.target)
X_train, X_test, y_train, y_test = train_test_split(X, y)
X_train.head()
y_train.head()
# Utility function that will export a plot to a base-64 encoded string that the model card will accept.
def plot_to_str():
img = BytesIO()
plt.savefig(img, format='png')
return base64.encodebytes(img.getvalue()).decode('utf-8')
# Plot the mean radius feature for both the train and test sets
sns.displot(x=X_train['mean radius'], hue=y_train)
mean_radius_train = plot_to_str()
sns.displot(x=X_test['mean radius'], hue=y_test)
mean_radius_test = plot_to_str()
# Plot the mean texture feature for both the train and test sets
sns.displot(x=X_train['mean texture'], hue=y_train)
mean_texture_train = plot_to_str()
sns.displot(x=X_test['mean texture'], hue=y_test)
mean_texture_test = plot_to_str()
# Create a classifier and fit the training data
clf = GradientBoostingClassifier().fit(X_train, y_train)
# Plot a ROC curve
plot_roc_curve(clf, X_test, y_test)
roc_curve = plot_to_str()
# Plot a confusion matrix
plot_confusion_matrix(clf, X_test, y_test)
confusion_matrix = plot_to_str()
mct = mctlib.ModelCardToolkit()
model_card = mct.scaffold_assets()
model_card.model_details.name = 'Breast Cancer Wisconsin (Diagnostic) Dataset'
model_card.model_details.overview = (
'This model predicts whether breast cancer is benign or malignant based on '
'image measurements.')
model_card.model_details.owners = [
mctlib.Owner(name= 'Model Cards Team', contact='model-cards@google.com')
]
model_card.model_details.references = [
mctlib.Reference(reference='https://archive.ics.uci.edu/ml/datasets/Breast+Cancer+Wisconsin+(Diagnostic)'),
mctlib.Reference(reference='https://minds.wisconsin.edu/bitstream/handle/1793/59692/TR1131.pdf')
]
model_card.model_details.version.name = str(uuid.uuid4())
model_card.model_details.version.date = str(date.today())
model_card.considerations.ethical_considerations = [mctlib.Risk(
name=('Manual selection of image sections to digitize could create '
'selection bias'),
mitigation_strategy='Automate the selection process'
)]
model_card.considerations.limitations = [mctlib.Limitation(description='Breast cancer diagnosis')]
model_card.considerations.use_cases = [mctlib.UseCase(description='Breast cancer diagnosis')]
model_card.considerations.users = [mctlib.User(description='Medical professionals'), mctlib.User(description='ML researchers')]
model_card.model_parameters.data.append(mctlib.Dataset())
model_card.model_parameters.data[0].graphics.description = (
f'{len(X_train)} rows with {len(X_train.columns)} features')
model_card.model_parameters.data[0].graphics.collection = [
mctlib.Graphic(image=mean_radius_train),
mctlib.Graphic(image=mean_texture_train)
]
model_card.model_parameters.data.append(mctlib.Dataset())
model_card.model_parameters.data[1].graphics.description = (
f'{len(X_test)} rows with {len(X_test.columns)} features')
model_card.model_parameters.data[1].graphics.collection = [
mctlib.Graphic(image=mean_radius_test),
mctlib.Graphic(image=mean_texture_test)
]
model_card.quantitative_analysis.graphics.description = (
'ROC curve and confusion matrix')
model_card.quantitative_analysis.graphics.collection = [
mctlib.Graphic(image=roc_curve),
mctlib.Graphic(image=confusion_matrix)
]
mct.update_model_card(model_card)
# Return the model card document as an HTML page
html = mct.export_format()
display.display(display.HTML(html))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Scikit-Learn Model Card Toolkit Demo
Step2: Did you restart the runtime?
Step3: Load data
Step4: Plot data
Step5: Train model
Step6: Evaluate model
Step7: Create a model card
Step8: Annotate information into model card
Step9: Generate model card
|
2,041
|
<ASSISTANT_TASK:>
Python Code:
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Install jdk8
!apt-get install openjdk-8-jdk-headless -qq > /dev/null
import os
# Set environment variable JAVA_HOME.
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
!update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
!java -version
import sys
# Set current python version
python_version = f"3.7.10"
# Install Miniconda
!wget https://repo.continuum.io/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh
!chmod +x Miniconda3-4.5.4-Linux-x86_64.sh
!./Miniconda3-4.5.4-Linux-x86_64.sh -b -f -p /usr/local
# Update Conda
!conda install --channel defaults conda python=$python_version --yes
!conda update --channel defaults --all --yes
# Append to the sys.path
_ = (sys.path
.append(f"/usr/local/lib/python3.7/site-packages"))
os.environ['PYTHONHOME']="/usr/local"
# Install latest pre-release version of Analytics Zoo
# Installing Analytics Zoo from pip will automatically install pyspark, bigdl, and their dependencies.
!pip install --pre --upgrade analytics-zoo
# Install python dependencies
!pip install torch==1.7.1 torchvision==0.8.2
!pip install six cloudpickle
!pip install jep==3.9.0
# import necesary libraries and modules
from __future__ import print_function
import os
import argparse
from zoo.orca import init_orca_context, stop_orca_context
from zoo.orca import OrcaContext
# recommended to set it to True when running Analytics Zoo in Jupyter notebook.
OrcaContext.log_output = True # (this will display terminal's stdout and stderr in the Jupyter notebook).
cluster_mode = "local"
if cluster_mode == "local":
init_orca_context(cores=1, memory="2g") # run in local mode
elif cluster_mode == "k8s":
init_orca_context(cluster_mode="k8s", num_nodes=2, cores=4) # run on K8s cluster
elif cluster_mode == "yarn":
init_orca_context(
cluster_mode="yarn-client", cores=4, num_nodes=2, memory="2g",
driver_memory="10g", driver_cores=1,
conf={"spark.rpc.message.maxSize": "1024",
"spark.task.maxFailures": "1",
"spark.driver.extraJavaOptions": "-Dbigdl.failure.retryTimes=1"}) # run on Hadoop YARN cluster
import torch
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4*4*50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = LeNet()
model.train()
criterion = nn.NLLLoss()
lr = 0.001
adam = torch.optim.Adam(model.parameters(), lr)
import torch
from torchvision import datasets, transforms
torch.manual_seed(0)
dir='/tmp/dataset'
batch_size=320
test_batch_size=320
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(dir, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size= batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(dir, train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=test_batch_size, shuffle=False)
from zoo.orca.learn.pytorch import Estimator
from zoo.orca.learn.metrics import Accuracy
est = Estimator.from_torch(model=model, optimizer=adam, loss=criterion, metrics=[Accuracy()])
from zoo.orca.learn.trigger import EveryEpoch
est.fit(data=train_loader, epochs=1, validation_data=test_loader,
checkpoint_trigger=EveryEpoch())
result = est.evaluate(data=test_loader)
for r in result:
print(r, ":", result[r])
# stop orca context when program finishes
stop_orca_context()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Environment Preparation
Step2: Install Analytics Zoo
Step3: You can install the latest pre-release version using pip install --pre --upgrade analytics-zoo.
Step4: Distributed PyTorch using Orca APIs
Step5: Step 1
Step6: This is the only place where you need to specify local or distributed mode. View Orca Context for more details.
Step7: Step 3
Step8: Step 4
Step9: Next, fit and evaluate using the Estimator.
Step10: Finally, evaluate using the Estimator.
Step11: The accuracy of this model has reached 98%.
|
2,042
|
<ASSISTANT_TASK:>
Python Code:
with open('example_run.csv') as f: s = f.read()
N = 10
runs = [[1/N for _ in range(N)]]
for line in s.split('\n'):
line = line.strip('[]')
if len(line) > 0:
li = [float(i) for i in line.split(',')]
runs.append(li)
for i, r in enumerate(runs):
plt.bar(list(range(10)), r)
plt.xlabel('Location')
plt.ylabel('Likelihood after {} iterations'.format(i))
plt.xticks(range(N))
plt.show()
fig, ax = plt.subplots()
# fig.set_tight_layout(True)
ax.set_xlim((0, 10))
ax.set_ylim((0, 1))
line, = ax.plot([], [])
x = list(range(N))
ylabel_func = lambda i: 'Likelihood after {} iterations'.format(i)
def init():
line.set_data([], [])
return (line, )
def animate(i):
y = runs[i]
line.set_data(x, y)
return (line,)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=72, interval=40, blit=True)
# HTML(anim.to_html5_video())
rc('animation', html='html5')
anim
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In the next plots you will see that at the beginning the likelihood for the fault location is evenly distributed. There was no observation made.
|
2,043
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_iris
iris = load_iris()
test_idx = [0, 50, 100]
train_y = np.delete(iris.target, test_idx)
train_X = np.delete(iris.data, test_idx, axis=0)
test_y = iris.target[test_idx]
test_X = iris.data[test_idx]
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(train_X,train_y)
from sklearn.externals.six import StringIO
import pydot
import matplotlib.image as mpimg
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data, feature_names=iris.feature_names,
class_names=iris.target_names,
filled=True, rounded=True, impurity=False)
pydot_graph = pydot.graph_from_dot_data(dot_data.getvalue())
png_str = pydot_graph.create_png(prog='dot')
# treat the dot output string as an image file
sio = StringIO()
sio.write(png_str)
sio.seek(0)
img = mpimg.imread(sio)
# plot the image
f, axes = plt.subplots(1, 1, figsize=(12,12))
imgplot = axes.imshow(img, aspect='equal')
plt.show()
from sklearn.metrics import accuracy_score
print(accuracy_score(test_y, clf.predict(test_X)))
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier()
clf = clf.fit(train_X,train_y)
print(accuracy_score(test_y, clf.predict(test_X)))
from scipy.spatial import distance
class ScrappyKNN(object):
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
return self
def predict(self, X_test):
predictions = []
for row in X_test:
label = self.closest(row)
predictions.append(label)
return predictions
def closest(self, row):
best_dist = distance.euclidean(row, self.X_train[0])
best_index = 0
for i in range(1, len(self.X_train)):
dist = distance.euclidean(row, self.X_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
clf = ScrappyKNN()
clf = clf.fit(train_X,train_y)
print(accuracy_score(test_y, clf.predict(test_X)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Choosing a dataset
Step2: Splitting the dataset
Step3: Decision Tree Classifier
Step4: Visualize the decision tree
Step5: Evaluating the model
Step6: KNN-Classifier
Step7: Implementing your own KNN
|
2,044
|
<ASSISTANT_TASK:>
Python Code:
1 % 2
# code goes here
# code for 1
import numpy as np
random_number = np.random.randint(35, 76, 1)
# put your code below here
# code for 2
import numpy as np
data = np.random.randint(0, 10, 100) # generate 100 integers between 0 & 10 (both included)
# put your code below here
# Below is function, we get to them soon
# Remember to change the last parameter of the "check_while_loop"
def check_while_loop(orignial_data, new_data):
tmp = new_data - orignial_data
if tmp.max() != 1:
print("something went wrong")
else:
print("success!")
check_while_loop(data, your_data_here)
n = 10
for i in range(n): # *i* = counter, *range(n)* the sequence
print(i)
# code for 1
import numpy as np
random_number = np.random.randint(35, 76, 1)
# put your code below here
# code for 2
import numpy as np
data = np.random.randint(0, 10, 100) # generate 100 integers between 0 & 10 (both included)
# put your code below here
# Remember to change the last parameter of the "check_for_loop"
def check_for_loop(orignial_data, new_data):
tmp = new_data - orignial_data
if tmp.max() != 1:
print("something went wrong")
else:
print("success!")
check_for_loop(data, your_data_here)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercises
Step2: Boolean expressions
Step3: For loop example
Step4: Exercises
|
2,045
|
<ASSISTANT_TASK:>
Python Code:
%pylab inline
# Import libraries
from __future__ import absolute_import, division, print_function
# Ignore warnings
import warnings
warnings.filterwarnings('ignore')
import sys
sys.path.append('tools/')
import numpy as np
import pandas as pd
import math
# Graphing Libraries
import matplotlib.pyplot as pyplt
import seaborn as sns
sns.set_style("white")
# Configure for presentation
np.set_printoptions(threshold=50, linewidth=50)
import matplotlib as mpl
mpl.rc('font', size=16)
from IPython.display import display
def sample(num_sample, top):
Create a random sample from a table
Attributes
---------
num_sample: int
top: dataframe
Returns a random subset of table index
df_index = []
for i in np.arange(0, num_sample, 1):
# pick randomly from the whole table
sample_index = np.random.randint(0, len(top))
# store index
df_index.append(sample_index)
return df_index
def sample_no_replacement(num_sample, top):
Create a random sample from a table
Attributes
---------
num_sample: int
top: dataframe
Returns a random subset of table index
df_index = []
lst = np.arange(0, len(top), 1)
for i in np.arange(0, num_sample, 1):
# pick randomly from the whole table
sample_index = np.random.choice(lst)
lst = np.setdiff1d(lst,[sample_index])
df_index.append(sample_index)
return df_index
die = pd.DataFrame()
die["Face"] = [1,2,3,4,5,6]
die
coin = pd.DataFrame()
coin["Face"] = [1,2]
coin
index_ = sample(3, die)
df = die.ix[index_, :]
df
index_ = sample(1, coin)
df = coin.ix[index_, :]
df
def sum_draws( n, box ):
Construct histogram for the sum of n draws from a box with replacement
Attributes
-----------
n: int (number of draws)
box: dataframe (the box model)
data = numpy.zeros(shape=(n,1))
if n > 0:
for i in range(n):
index_ = np.random.randint(0, len(box), n)
df = box.ix[index_, :]
data[i] = df.Content.sum()
bins = np.arange(data.min()-0.5, data.max()+1, 1)
pyplt.hist(data, bins=bins, normed=True)
pyplt.ylabel('percent per unit')
pyplt.xlabel('Number on ticket')
pyplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.);
else:
raise ValueError('n has to be greater than 0')
box = pd.DataFrame()
box["Content"] = [0,1,2,3,4]
pyplt.rcParams['figure.figsize'] = (4, 3)
sum_draws(100, box)
pyplt.rcParams['figure.figsize'] = (4, 3)
low, high = box.Content.min() - 0.5, box.Content.max() + 1
bins = np.arange(low, high, 1)
box.plot.hist(bins=bins, normed=True)
pyplt.ylabel('percent per unit')
pyplt.xlabel('Number on ticket')
pyplt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.);
sum_draws(1000, box)
def number_of_heads( n, box ):
The number of heads in n tosses
Attributes
-----------
n: int (number of draws)
box: dataframe (the coin box model)
data = numpy.zeros(shape=(n,1))
if n > 0:
value = np.random.randint(0, len(box), n)
data = value
else:
raise ValueError('n has to be greater than 0')
return data.sum()
box = pd.DataFrame()
box["Content"] = [0,1]
low, high, step = 100, 10000, 2
length = len(range(low, high, step))
num_tosses = numpy.zeros(shape=(length,1))
num_heads = numpy.zeros(shape=(length,1))
chance_error = numpy.zeros(shape=(length,1))
percentage_difference = numpy.zeros(shape=(length,1))
i= 0
for n in range(low, high, step):
observed = number_of_heads(n, box)
expected = n//2
num_tosses[i] = n
num_heads[i] = observed
chance_error[i] = math.fabs(expected - observed)
percentage_difference[i] = math.fabs(((num_heads[i] / num_tosses[i]) * 100) - 50)
i += 1
avg_heads = pd.DataFrame(index= range(low, high, step) )
avg_heads['num_tosses'] = num_tosses
avg_heads['num_heads'] = num_heads
avg_heads['chance_error'] = chance_error
avg_heads['percentage_difference'] = percentage_difference
avg_heads.reset_index(inplace=True)
pyplt.rcParams['figure.figsize'] = (8, 3)
pyplt.plot(avg_heads.chance_error, 'ro', markersize=1)
pyplt.ylim(-50, 500)
pyplt.title('Modeling the Law of Averages')
pyplt.ylabel('Difference between \nObserved versus Expected')
pyplt.xlabel('Number of Tosses');
pyplt.rcParams['figure.figsize'] = (8, 4)
ax = pyplt.plot(avg_heads.percentage_difference, 'bo', markersize=1)
pyplt.ylim(-5, 20)
pyplt.ylabel('The Percentage Difference\n Between Observed and Expected')
pyplt.xlabel('Number of Tosses');
pyplt.rcParams['figure.figsize'] = (4, 3)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: Uniform Sample
Step4: Dice
Step5: Coin
Step7: We can simulate the act of rolling dice by just pulling out rows
Step9: Modeling the Law of Averages
|
2,046
|
<ASSISTANT_TASK:>
Python Code:
%%bash
cat /root/src/main/python/debug/debug_model_cpu.py
%%bash
cat /root/src/main/python/debug/debug_model_gpu.py
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Run the following in the Terminal (CPU)
|
2,047
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
pd.set_option('max_rows', 5)
from learntools.core import binder; binder.bind(globals())
from learntools.pandas.creating_reading_and_writing import *
print("Setup complete.")
# Your code goes here. Create a dataframe matching the above diagram and assign it to the variable fruits.
fruits = ____
# Check your answer
q1.check()
fruits
#%%RM_IF(PROD)%%
dat = [[30, 21]]
cols = ['Apples', 'Bananas']
fruits = pd.DataFrame(dat, columns=cols)
q1.assert_check_passed()
#%%RM_IF(PROD)%%
fruits = pd.DataFrame({'Apples': [30], 'Bananas': [21]})
q1.assert_check_passed()
#%%RM_IF(PROD)%%
fruits = pd.DataFrame({'Apples': [30.], 'Bananas': [21.]})
q1.assert_check_failed()
#%%RM_IF(PROD)%%
fruits = pd.DataFrame({'Apples': [30], 'bananas': [21]})
q1.assert_check_failed()
#_COMMENT_IF(PROD)_
q1.hint()
#_COMMENT_IF(PROD)_
q1.solution()
# Your code goes here. Create a dataframe matching the above diagram and assign it to the variable fruit_sales.
fruit_sales = ____
# Check your answer
q2.check()
fruit_sales
#%%RM_IF(PROD)%%
fruit_sales = pd.DataFrame([[35, 21], [41, 34]], columns=['Apples', 'Bananas'],
index=['2017 Sales', '2018 Sales'])
q2.assert_check_passed()
#%%RM_IF(PROD)%%
fruit_sales = pd.DataFrame([[35, 21], [41, 34]][::-1], columns=['Apples', 'Bananas'],
index=['2017 Sales', '2018 Sales'])
q2.assert_check_failed()
#_COMMENT_IF(PROD)_
q2.hint()
#_COMMENT_IF(PROD)_
q2.solution()
ingredients = ____
# Check your answer
q3.check()
ingredients
#%%RM_IF(PROD)%%
quantities = ['4 cups', '1 cup', '2 large', '1 can']
items = ['Flour', 'Milk', 'Eggs', 'Spam']
ingredients = pd.Series(quantities, index=items, name='Dinner')
q3.assert_check_passed()
#%%RM_IF(PROD)%%
quantities = ['4 cups', '1 cup', '2 large', '1 can']
items = ['Flour', 'Milk', 'Eggs', 'Spam']
ingredients = pd.Series(quantities, index=items)
q3.assert_check_failed()
#%%RM_IF(PROD)%%
quantities = ['4 cups', '1 cup', '2 large', '1 can'][::-1]
items = ['Flour', 'Milk', 'Eggs', 'Spam'][::-1]
ingredients = pd.Series(quantities, index=items, name='Dinner')
q3.assert_check_failed()
#_COMMENT_IF(PROD)_
q3.hint()
#_COMMENT_IF(PROD)_
q3.solution()
reviews = ____
# Check your answer
q4.check()
reviews
#%%RM_IF(PROD)%%
reviews = pd.read_csv('../input/wine-reviews/winemag-data_first150k.csv', index_col=0)
q4.assert_check_passed()
#%%RM_IF(PROD)%%
reviews = pd.read_csv('../input/wine-reviews/winemag-data_first150k.csv')
q4.assert_check_failed()
#_COMMENT_IF(PROD)_
q4.hint()
#_COMMENT_IF(PROD)_
q4.solution()
#%%RM_IF(PROD)%%
import os
def cleanup_ungulates():
Function for cleaning up file system state between tests.
try:
os.remove('cows_and_goats.csv')
except FileNotFoundError:
pass
cleanup_ungulates()
animals = pd.DataFrame({'Cows': [12, 20], 'Goats': [22, 19]}, index=['Year 1', 'Year 2'])
animals
# Your code goes here
# Check your answer
q5.check()
#%%RM_IF(PROD)%%
animals.to_csv("cows_and_goats.csv")
q5.assert_check_passed()
cleanup_ungulates()
#%%RM_IF(PROD)%%
animals.to_csv("cows_and_goats.csv", index=False)
q5.assert_check_failed()
cleanup_ungulates()
#_COMMENT_IF(PROD)_
q5.hint()
#_COMMENT_IF(PROD)_
q5.solution()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercises
Step2: 2.
Step3: 3.
Step5: 4.
Step6: 5.
Step7: In the cell below, write code to save this DataFrame to disk as a csv file with the name cows_and_goats.csv.
|
2,048
|
<ASSISTANT_TASK:>
Python Code:
#|all_slow
#|all_multicuda
from fastai.vision.all import *
from fastai.text.all import *
from fastai.tabular.all import *
from fastai.collab import *
from accelerate import notebook_launcher
from fastai.distributed import *
# from accelerate.utils import write_basic_config
# write_basic_config()
path = untar_data(URLs.PETS)/'images'
def train():
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2,
label_func=lambda x: x[0].isupper(), item_tfms=Resize(224))
learn = vision_learner(dls, resnet34, metrics=error_rate).to_fp16()
with learn.distrib_ctx(in_notebook=True, sync_bn=False):
learn.fine_tune(1)
notebook_launcher(train, num_processes=2)
path = untar_data(URLs.CAMVID_TINY)
def train():
dls = SegmentationDataLoaders.from_label_func(
path, bs=8, fnames = get_image_files(path/"images"),
label_func = lambda o: path/'labels'/f'{o.stem}_P{o.suffix}',
codes = np.loadtxt(path/'codes.txt', dtype=str)
)
learn = unet_learner(dls, resnet34)
with learn.distrib_ctx(in_notebook=True, sync_bn=False):
learn.fine_tune(8)
notebook_launcher(train, num_processes=2)
path = untar_data(URLs.IMDB_SAMPLE)
df = pd.read_csv(path/'texts.csv')
def train():
imdb_clas = DataBlock(blocks=(TextBlock.from_df('text', seq_len=72), CategoryBlock),
get_x=ColReader('text'), get_y=ColReader('label'), splitter=ColSplitter())
dls = imdb_clas.dataloaders(df, bs=64)
learn = rank0_first(lambda: text_classifier_learner(dls, AWD_LSTM, drop_mult=0.5, metrics=accuracy))
with learn.distrib_ctx(in_notebook=True):
learn.fine_tune(4, 1e-2)
notebook_launcher(train, num_processes=2)
path = untar_data(URLs.ADULT_SAMPLE)
df = pd.read_csv(path/'adult.csv')
def train():
dls = TabularDataLoaders.from_csv(path/'adult.csv', path=path, y_names="salary",
cat_names = ['workclass', 'education', 'marital-status', 'occupation',
'relationship', 'race'],
cont_names = ['age', 'fnlwgt', 'education-num'],
procs = [Categorify, FillMissing, Normalize])
learn = tabular_learner(dls, metrics=accuracy)
with learn.distrib_ctx(in_notebook=True):
learn.fit_one_cycle(3)
notebook_launcher(train, num_processes=2)
path = untar_data(URLs.ML_SAMPLE)
df = pd.read_csv(path/'ratings.csv')
def train():
dls = CollabDataLoaders.from_df(df)
learn = collab_learner(dls, y_range=(0.5,5.5))
with learn.distrib_ctx(in_notebook=True):
learn.fine_tune(6)
notebook_launcher(train, num_processes=2)
path = untar_data(URLs.BIWI_HEAD_POSE)
def img2pose(x): return Path(f'{str(x)[:-7]}pose.txt')
def get_ctr(f):
ctr = np.genfromtxt(img2pose(f), skip_header=3)
c1 = ctr[0] * cal[0][0]/ctr[2] + cal[0][2]
c2 = ctr[1] * cal[1][1]/ctr[2] + cal[1][2]
return tensor([c1,c2])
img_files = get_image_files(path)
cal = np.genfromtxt(path/'01'/'rgb.cal', skip_footer=6)
def train():
biwi = DataBlock(
blocks=(ImageBlock, PointBlock),
get_items=get_image_files,
get_y=get_ctr,
splitter=FuncSplitter(lambda o: o.parent.name=='13'),
batch_tfms=[*aug_transforms(size=(240,320)),
Normalize.from_stats(*imagenet_stats)])
dls = biwi.dataloaders(path)
learn = vision_learner(dls, resnet18, y_range=(-1,1))
with learn.distrib_ctx(in_notebook=True, sync_bn=False):
learn.fine_tune(1)
notebook_launcher(train, num_processes=2)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Important
Step2: Image Classification
Step3: Image Segmentation
Step4: Text Classification
Step5: Tabular
Step6: Collab Filtering
Step7: Keypoints
|
2,049
|
<ASSISTANT_TASK:>
Python Code:
import sys, os
from numpy import *
from matplotlib.pyplot import *
%matplotlib inline
matplotlib.rcParams['savefig.dpi'] = 100
%load_ext autoreload
%autoreload 2
from rnnlm import RNNLM
# Gradient check on toy data, for speed
random.seed(10)
wv_dummy = random.randn(10,50)
model = RNNLM(L0 = wv_dummy, U0 = wv_dummy,
alpha=0.005, rseed=10, bptt=4)
model.grad_check(array([1,2,3]), array([2,3,4]))
from data_utils import utils as du
import pandas as pd
# Load the vocabulary
vocab = pd.read_table("data/lm/vocab.ptb.txt", header=None, sep="\s+",
index_col=0, names=['count', 'freq'], )
# Choose how many top words to keep
vocabsize = 2000
num_to_word = dict(enumerate(vocab.index[:vocabsize]))
word_to_num = du.invert_dict(num_to_word)
##
# Below needed for 'adj_loss': DO NOT CHANGE
fraction_lost = float(sum([vocab['count'][word] for word in vocab.index
if (not word in word_to_num)
and (not word == "UUUNKKK")]))
fraction_lost /= sum([vocab['count'][word] for word in vocab.index
if (not word == "UUUNKKK")])
print "Retained %d words from %d (%.02f%% of all tokens)" % (vocabsize, len(vocab),
100*(1-fraction_lost))
# Load the training set
docs = du.load_dataset('data/lm/ptb-train.txt')
S_train = du.docs_to_indices(docs, word_to_num)
X_train, Y_train = du.seqs_to_lmXY(S_train)
# Load the dev set (for tuning hyperparameters)
docs = du.load_dataset('data/lm/ptb-dev.txt')
S_dev = du.docs_to_indices(docs, word_to_num)
X_dev, Y_dev = du.seqs_to_lmXY(S_dev)
# Load the test set (final evaluation only)
docs = du.load_dataset('data/lm/ptb-test.txt')
S_test = du.docs_to_indices(docs, word_to_num)
X_test, Y_test = du.seqs_to_lmXY(S_test)
# Display some sample data
print " ".join(d[0] for d in docs[7])
print S_test[7]
hdim = 100 # dimension of hidden layer = dimension of word vectors
random.seed(10)
L0 = zeros((vocabsize, hdim)) # replace with random init,
# or do in RNNLM.__init__()
# test parameters; you probably want to change these
model = RNNLM(L0, U0 = L0, alpha=0.1, rseed=10, bptt=1)
# Gradient check is going to take a *long* time here
# since it's quadratic-time in the number of parameters.
# run at your own risk... (but do check this!)
# model.grad_check(array([1,2,3]), array([2,3,4]))
#### YOUR CODE HERE ####
##
# Pare down to a smaller dataset, for speed
# (optional - recommended to not do this for your final model)
ntrain = len(Y_train)
X = X_train[:ntrain]
Y = Y_train[:ntrain]
#### END YOUR CODE ####
## Evaluate cross-entropy loss on the dev set,
## then convert to perplexity for your writeup
dev_loss = model.compute_mean_loss(X_dev, Y_dev)
## DO NOT CHANGE THIS CELL ##
# Report your numbers, after computing dev_loss above.
def adjust_loss(loss, funk, q, mode='basic'):
if mode == 'basic':
# remove freebies only: score if had no UUUNKKK
return (loss + funk*log(funk))/(1 - funk)
else:
# remove freebies, replace with best prediction on remaining
return loss + funk*log(funk) - funk*log(q)
# q = best unigram frequency from omitted vocab
# this is the best expected loss out of that set
q = vocab.freq[vocabsize] / sum(vocab.freq[vocabsize:])
print "Unadjusted: %.03f" % exp(dev_loss)
print "Adjusted for missing vocab: %.03f" % exp(adjust_loss(dev_loss, fraction_lost, q))
##
# Save to .npy files; should only be a few MB total
assert(min(model.sparams.L.shape) <= 100) # don't be too big
assert(max(model.sparams.L.shape) <= 5000) # don't be too big
save("rnnlm.L.npy", model.sparams.L)
save("rnnlm.U.npy", model.params.U)
save("rnnlm.H.npy", model.params.H)
def seq_to_words(seq):
return [num_to_word[s] for s in seq]
seq, J = model.generate_sequence(word_to_num["<s>"],
word_to_num["</s>"],
maxlen=100)
print J
# print seq
print " ".join(seq_to_words(seq))
# Replace UUUNKKK with a random unigram,
# drawn from vocab that we skipped
from nn.math import MultinomialSampler, multinomial_sample
def fill_unknowns(words):
#### YOUR CODE HERE ####
ret = words # do nothing; replace this
#### END YOUR CODE ####
return ret
print " ".join(fill_unknowns(seq_to_words(seq)))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: (e)
Step2: Prepare Vocabulary and Load PTB Data
Step3: Load the datasets, using the vocabulary in word_to_num. Our starter code handles this for you, and also generates lists of lists X and Y, corresponding to input words and target words*.
Step4: (f)
Step5: The performance of the model is skewed somewhat by the large number of UUUNKKK tokens; if these are 1/6 of the dataset, then that's a sizeable fraction that we're just waving our hands at. Naively, our model gets credit for these that's not really deserved; the formula below roughly removes this contribution from the average loss. Don't worry about how it's derived, but do report both scores - it helps us compare across models with different vocabulary sizes.
Step6: Save Model Parameters
Step7: (g)
Step8: BONUS
|
2,050
|
<ASSISTANT_TASK:>
Python Code:
# Authors: Eric Larson <larson.eric.d@gmail.com>
# Chris Holdgraf <choldgraf@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat
import mne
from mne.viz import plot_alignment, snapshot_brain_montage
print(__doc__)
mat = loadmat(mne.datasets.misc.data_path() + '/ecog/sample_ecog.mat')
ch_names = mat['ch_names'].tolist()
elec = mat['elec'] # electrode positions given in meters
# Now we make a montage stating that the sEEG contacts are in head
# coordinate system (although they are in MRI). This is compensated
# by the fact that below we do not specicty a trans file so the Head<->MRI
# transform is the identity.
montage = mne.channels.make_dig_montage(ch_pos=dict(zip(ch_names, elec)),
coord_frame='head')
print('Created %s channel positions' % len(ch_names))
info = mne.create_info(ch_names, 1000., 'ecog').set_montage(montage)
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
fig = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces=['pial'])
mne.viz.set_3d_view(fig, 200, 70)
# We'll once again plot the surface, then take a snapshot.
fig_scatter = plot_alignment(info, subject='sample', subjects_dir=subjects_dir,
surfaces='pial')
mne.viz.set_3d_view(fig_scatter, 200, 70)
xy, im = snapshot_brain_montage(fig_scatter, montage)
# Convert from a dictionary to array to plot
xy_pts = np.vstack([xy[ch] for ch in info['ch_names']])
# Define an arbitrary "activity" pattern for viz
activity = np.linspace(100, 200, xy_pts.shape[0])
# This allows us to use matplotlib to create arbitrary 2d scatterplots
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(im)
ax.scatter(*xy_pts.T, c=activity, s=200, cmap='coolwarm')
ax.set_axis_off()
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's load some ECoG electrode locations and names, and turn them into
Step2: Now that we have our electrode positions in MRI coordinates, we can create
Step3: We can then plot the locations of our electrodes on our subject's brain.
Step4: Sometimes it is useful to make a scatterplot for the current figure view.
|
2,051
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from qutip import *
N = 15
w0 = 1.0 * 2 * np.pi
A = 0.1 * 2 * np.pi
times = np.linspace(0, 15, 301)
gamma = 0.25
ntraj = 150
nsubsteps = 50
a = destroy(N)
x = a + a.dag()
y = -1.0j*(a - a.dag())
H = w0 * a.dag() * a + A * (a + a.dag())
rho0 = coherent(N, np.sqrt(5.0), method='analytic')
c_ops = [np.sqrt(gamma) * a]
e_ops = [a.dag() * a, x, y]
result_ref = mesolve(H, rho0, times, c_ops, e_ops)
plot_expectation_values(result_ref);
from qutip.expect import expect_rho_vec
L = liouvillian(H)
D = lindblad_dissipator(c_ops[0])
d1_operator = L + D
def d1_rho_func(t, rho_vec):
return d1_operator * rho_vec
B1 = spre(c_ops[0]) + spost(c_ops[0].dag())
B2 = spre(c_ops[0]) + spost(c_ops[0].dag())
def d2_rho_func(t, rho_vec):
e1 = expect_rho_vec(B1.data, rho_vec, False)
drho1 = B1 * rho_vec - e1 * rho_vec
e1 = expect_rho_vec(B2.data, rho_vec, False)
drho2 = B2 * rho_vec - e1 * rho_vec
return np.vstack([1.0/np.sqrt(2) * drho1, -1.0j/np.sqrt(2) * drho2])
result = general_stochastic(ket2dm(rho0), times, d1_rho_func, d2_rho_func,
e_ops=[spre(op) for op in e_ops],
len_d2=2, ntraj=ntraj, nsubsteps=nsubsteps*2, solver="platen",
dW_factors=[np.sqrt(2/gamma), np.sqrt(2/gamma)],
m_ops=[spre(x), spre(y)],
store_measurement=True, map_func=parallel_map)
plot_expectation_values([result, result_ref]);
fig, ax = plt.subplots(figsize=(8,4))
for m in result.measurement:
ax.plot(times, m[:, 0].real, 'b', alpha=0.05)
ax.plot(times, m[:, 1].real, 'r', alpha=0.05)
ax.plot(times, result_ref.expect[1], 'b', lw=2);
ax.plot(times, result_ref.expect[2], 'r', lw=2);
ax.set_ylim(-10, 10)
ax.set_xlim(0, times.max())
ax.set_xlabel('time', fontsize=12)
ax.plot(times, np.array(result.measurement).mean(axis=0)[:,0].real, 'k', lw=2);
ax.plot(times, np.array(result.measurement).mean(axis=0)[:,1].real, 'k', lw=2);
opt = Options()
opt.store_states = True
result = smesolve(H, rho0, times, [], [np.sqrt(gamma/2) * a, -1.0j * np.sqrt(gamma/2) * a],
e_ops, ntraj=100, nsubsteps=nsubsteps, solver="taylor15",
m_ops=[x, y], dW_factors=[np.sqrt(2/gamma), np.sqrt(2/gamma)],
method='homodyne', store_measurement=True,
map_func=parallel_map)
plot_expectation_values([result, result_ref])
fig, ax = plt.subplots(figsize=(8,4))
for m in result.measurement:
ax.plot(times, m[:, 0].real, 'b', alpha=0.05)
ax.plot(times, m[:, 1].real, 'r', alpha=0.05)
ax.plot(times, result_ref.expect[1], 'b', lw=2);
ax.plot(times, result_ref.expect[2], 'r', lw=2);
ax.set_xlim(0, times.max())
ax.set_ylim(-25, 25)
ax.set_xlabel('time', fontsize=12)
ax.plot(times, np.array(result.measurement).mean(axis=0)[:,0].real, 'k', lw=2);
ax.plot(times, np.array(result.measurement).mean(axis=0)[:,1].real, 'k', lw=2);
result = smesolve(H, rho0, times, [], [np.sqrt(gamma) * a],
e_ops, ntraj=ntraj, nsubsteps=nsubsteps, solver="taylor15",
method='heterodyne', store_measurement=True,
map_func=parallel_map)
plot_expectation_values([result, result_ref]);
fig, ax = plt.subplots(figsize=(8,4))
for m in result.measurement:
ax.plot(times, m[:, 0, 0].real / np.sqrt(gamma), 'b', alpha=0.05)
ax.plot(times, m[:, 0, 1].real / np.sqrt(gamma), 'r', alpha=0.05)
ax.plot(times, result_ref.expect[1], 'b', lw=2);
ax.plot(times, result_ref.expect[2], 'r', lw=2);
ax.set_xlim(0, times.max())
ax.set_ylim(-15, 15)
ax.set_xlabel('time', fontsize=12)
ax.plot(times, np.array(result.measurement).mean(axis=0)[:, 0, 0].real / np.sqrt(gamma), 'k', lw=2);
ax.plot(times, np.array(result.measurement).mean(axis=0)[:, 0, 1].real / np.sqrt(gamma), 'k', lw=2);
N = 5
w0 = 1.0 * 2 * np.pi
A = 0.1 * 2 * np.pi
times = np.linspace(0, 15, 301)
gamma = 0.25
ntraj = 150
nsubsteps = 50
a = destroy(N)
x = a + a.dag()
y = -1.0j*(a - a.dag())
H = w0 * a.dag() * a + A * (a + a.dag())
rho0 = coherent(N, np.sqrt(5.0), method='analytic')
c_ops = [np.sqrt(gamma) * a]
e_ops = [a.dag() * a, x, y]
opt = Options()
opt.store_states = True
result = smesolve(H, rho0, times, [], [np.sqrt(gamma) * a],
e_ops, ntraj=1, nsubsteps=5, solver="euler",
method='heterodyne', store_measurement=True,
map_func=parallel_map, options=opt, normalize=False)
result.states[0][100]
sp.linalg.eigh(result.states[0][10].full())
help(stochastic_solvers)
from qutip.ipynbtools import version_table
version_table()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Introduction
Step2: Heterodyne implementation #1
Step3: $D_{2}^{(1)}[A]\rho = \frac{1}{\sqrt{2}} \sqrt{\gamma} \mathcal{H}[a] \rho =
Step4: The heterodyne currents for the $x$ and $y$ quadratures are
Step5: Heterodyne implementation #2
Step6: Implementation #3
Step7: Common problem
Step8: Using smaller integration steps by increasing the nsubstep will lower the numerical errors.
Step9: Versions
|
2,052
|
<ASSISTANT_TASK:>
Python Code:
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
%matplotlib inline
import qp
import numpy as np
import scipy.stats as sps
P = qp.PDF(funcform=sps.norm(loc=0.0, scale=1.0))
x, sigma = 2.0, 1.0
Q = qp.PDF(funcform=sps.norm(loc=x, scale=sigma))
infinity = 100.0
D = qp.metrics.calculate_kld(P, Q, limits=(-infinity,infinity), vb=False)
print D
x, sigma = 0.0, 4.37
Q = qp.PDF(funcform=sps.norm(loc=x, scale=sigma))
D = qp.metrics.calculate_kld(P, Q, limits=(-infinity,infinity), vb=False)
print D
widths = np.logspace(-2.0,2.0,13)
D = np.empty_like(widths)
E = np.empty_like(widths)
x = 0.0
infinity = 1000.0
for k,sigma in enumerate(widths):
Q = qp.PDF(funcform=sps.norm(loc=x, scale=sigma))
D[k] = qp.metrics.calculate_kld(P, Q, limits=(-infinity,infinity), vb=False)
E[k] = qp.metrics.calculate_rmse(P, Q, limits=(-infinity,infinity), vb=False)
print zip(widths, D)
x = widths
y = np.log(widths*(2.0/np.pi))
# plt.plot(widths, D, color='black', linestyle='-', lw=2.0, alpha=1.0, label='Offset=0.0')
# plt.xscale('log')
# plt.ylim(0.0,32.0)
# plt.xlabel('Width of approximating Gaussian $\sigma$')
# plt.ylabel('KL divergence (nats)')
# l = plt.legend(loc='upper right')
# plt.show()
# plt.plot(widths, E, color='black', linestyle='-', lw=2.0, alpha=1.0, label='Offset=0.0')
# plt.xscale('log')
# plt.xlabel('Width of approximating Gaussian $\sigma$')
# plt.ylabel('RMSE')
# l = plt.legend(loc='upper right')
# plt.show()
fig, ax = plt.subplots()
ax.plot(x, y, color='gray', linestyle='-', lw=8.0, alpha=0.5, label=r'$\log[2\sigma/\pi\sigma_{0}]$')
ax.set_xscale('log')
ax.set_xlabel(r'root variance ratio $\sigma/\sigma_{0}$')
ax.set_xlim(1e-2, np.max(widths))
ax.set_ylim(0, 20)
ax.plot(widths, D, color='k', linestyle='-', lw=2.0, alpha=1.0, label='KLD')
ax.set_ylabel('KLD (nats)')
l = ax.legend(loc='upper right')
axr = ax.twinx()
axr.set_ylim(0, 0.1)
axr.plot(widths, E, color='k', linestyle=':', lw=2.0, alpha=1.0, label='RMSE')
axr.set_ylabel('RMSE', rotation=270, labelpad=15)
l = axr.legend(loc= 'lower left')
fig.show()
fig.savefig('precision.pdf', dpi=250)
separations = np.linspace(0.0,15.0,16)
D = np.empty_like(separations)
E = np.empty_like(separations)
sigma = 1.0
infinity = 100.0
for k,x0 in enumerate(separations):
Q = qp.PDF(funcform=sps.norm(loc=x0, scale=sigma))
D[k] = qp.metrics.calculate_kld(P, Q, limits=(-infinity,infinity), vb=False)
E[k] = qp.metrics.calculate_rmse(P, Q, limits=(-infinity,infinity), vb=False)
print zip(separations, D)
fig, ax = plt.subplots()
ax.set_xlabel(r'separation $|\mu-\mu_{0}|$')
ax.set_xlim(0, 10)
ax.set_xlim(0, np.max(D))
ax.plot(separations, D, color='k', linestyle='-', lw=2.0, alpha=1.0, label='KLD')
ax.set_ylabel('KLD (nats)')
l = ax.legend(loc='lower right')
axr = ax.twinx()
axr.plot(separations, E, color='k', linestyle=':', lw=2.0, alpha=1.0, label='RMSE')
axr.set_ylabel('RMSE', rotation=270, labelpad=15)
axr.set_xlim(0, 10)
l = axr.legend(loc='upper left')
fig.show()
import sys
print np.log(sys.float_info.epsilon)
infinity = 100.0
widths = np.array([1.0,1.5,2.0,2.5,3.0,3.5,4.0])
separations = np.linspace(0.0,7.0,15)
D = np.zeros([7,len(separations)])
E = np.zeros([7,len(separations)])
tensions = np.empty_like(D)
for j,sigma in enumerate(widths):
for k,x0 in enumerate(separations):
Q = qp.PDF(funcform=sps.norm(loc=x0, scale=sigma))
D[j,k] = qp.metrics.calculate_kld(P, Q, limits=(-infinity,infinity), vb=False)
E[j,k] = qp.metrics.calculate_rmse(P, Q, limits=(-infinity,infinity), vb=False)
tensions[j,k] = x0 / np.sqrt(sigma*sigma + 1.0)
x = tensions[0,:]
y = x**2
fig, ax = plt.subplots()
ax.plot(x, y, color='gray', linestyle='-', lw=8.0, alpha=0.5, label='$t^2$')
ax.set_xlabel('tension $t$ (sigma)')
ax.set_xlim(0, np.max(tensions))
ax.plot([-1], [-1], color='black', linestyle='-', lw=2.0, alpha=1.0, label='KLD')
ax.plot([-1], [-1], color='black', linestyle=':', lw=2.0, alpha=1.0, label='RMSE')
colors = {'blueviolet':1.0, 'forestgreen':2.0, 'darkorange':3.0}
for item in colors.keys():
ax.scatter([0], [0], color=item, label='Width='+str(colors[item])[0]+r'$\sigma$')
ax.plot(tensions[0,:], D[0,:], color='blueviolet', linestyle='-', lw=2.0, alpha=1.0)#, label='Width=1.0')
# ax.plot(tensions[1,:], D[1,:], color='violet', linestyle='-', lw=2.0, alpha=1.0, label='Width=1,5')
ax.plot(tensions[2,:], D[2,:], color='forestgreen', linestyle='-', lw=2.0, alpha=1.0)#, label='Width=2.0')
# ax.plot(tensions[3,:], D[3,:], color='green', linestyle='-', lw=2.0, alpha=1.0, label='Width=2.5')
ax.plot(tensions[4,:], D[4,:], color='darkorange', linestyle='-', lw=2.0, alpha=1.0)#, label='Width=3.0')
# ax.plot(tensions[5,:], D[5,:], color='orange', linestyle='-', lw=2.0, alpha=1.0, label='Width=3.5')
# ax.plot(tensions[6,:], D[6,:], color='forestgreen', linestyle='-', lw=2.0, alpha=1.0, label='Width=4.0')
ax.set_ylabel('KLD (nats)')
l = ax.legend(loc='lower right')
axr = ax.twinx()
axr.plot(tensions[0,:], E[0,:], color='blueviolet', linestyle=':', lw=2.0, alpha=1.0)#, label='Width=1.0')
# axr.plot(tensions[1,:], E[1,:], color='violet', linestyle=':', lw=2.0, alpha=1.0, label='Width=1,5')
axr.plot(tensions[2,:], E[2,:], color='forestgreen', linestyle=':', lw=2.0, alpha=1.0)#, label='Width=2.0')
# axr.plot(tensions[3,:], E[3,:], color='green', linestyle=':', lw=2.0, alpha=1.0, label='Width=2.5')
axr.plot(tensions[4,:], E[4,:], color='darkorange', linestyle=':', lw=2.0, alpha=1.0)#, label='Width=3.0')
# axr.plot(tensions[5,:], E[5,:], color='orange', linestyle=':', lw=2.0, alpha=1.0, label='Width=3.5')
# axr.plot(tensions[6,:], E[6,:], color='forestgreen', linestyle=':', lw=2.0, alpha=1.0, label='Width=4.0')
axr.set_ylabel('RMSE', rotation=270, labelpad=15)
axr.set_xlim(0, np.max(tensions))
fig.show()
fig.savefig('tension.pdf', dpi=250)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: i.e. Two equal-width Gaussians overlapping at their 1-sigma points have a KLD of 2 nats.
Step2: i.e. Two concentric 1D Gaussian PDFs differing in width by a factor of 4.37 have a KLD of 1 nat.
Step3: It looks as though using an increasingly broad approximation distribution leads to logarithmically increasing information loss.
Step4: For separations greater than about 7 sigma, numerical precision starts to matter
Step5: Probably the precision analysis of the previous section suffered from the same type of numerical error, at very low approximation distribution widths.
|
2,053
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
n = 100
prob = 0.75
F_H0 = stats.binom(n, prob)
x = np.linspace(0,100,101)
plt.bar(x, F_H0.pmf(x), align = 'center')
plt.xlim(60, 90)
plt.show()
print('p-value: %.4f' % stats.binom_test(67, 100, prob))
print('p-value: %.10f' % stats.binom_test(22, 50, prob))
pines_data = pd.read_table('pines.txt')
pines_data.describe()
pines_data.head()
sns.pairplot(pines_data, size=4);
sn_num, we_num = 5, 5
trees_bins = stats.binned_statistic_2d(pines_data.sn, pines_data.we, None, statistic='count', bins=[sn_num, we_num])
trees_squares_num = trees_bins.statistic
trees_squares_num
trees_bins.x_edge
trees_bins.y_edge
mean_trees_num = np.sum(trees_squares_num) / 25
print(mean_trees_num)
stats.chisquare(trees_squares_num.flatten(), ddof = 0)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: <b>
Step2: <b>
Step3: <b>
|
2,054
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'inpe', 'sandbox-3', 'ocean')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Family
Step7: 1.4. Basic Approximations
Step8: 1.5. Prognostic Variables
Step9: 2. Key Properties --> Seawater Properties
Step10: 2.2. Eos Functional Temp
Step11: 2.3. Eos Functional Salt
Step12: 2.4. Eos Functional Depth
Step13: 2.5. Ocean Freezing Point
Step14: 2.6. Ocean Specific Heat
Step15: 2.7. Ocean Reference Density
Step16: 3. Key Properties --> Bathymetry
Step17: 3.2. Type
Step18: 3.3. Ocean Smoothing
Step19: 3.4. Source
Step20: 4. Key Properties --> Nonoceanic Waters
Step21: 4.2. River Mouth
Step22: 5. Key Properties --> Software Properties
Step23: 5.2. Code Version
Step24: 5.3. Code Languages
Step25: 6. Key Properties --> Resolution
Step26: 6.2. Canonical Horizontal Resolution
Step27: 6.3. Range Horizontal Resolution
Step28: 6.4. Number Of Horizontal Gridpoints
Step29: 6.5. Number Of Vertical Levels
Step30: 6.6. Is Adaptive Grid
Step31: 6.7. Thickness Level 1
Step32: 7. Key Properties --> Tuning Applied
Step33: 7.2. Global Mean Metrics Used
Step34: 7.3. Regional Metrics Used
Step35: 7.4. Trend Metrics Used
Step36: 8. Key Properties --> Conservation
Step37: 8.2. Scheme
Step38: 8.3. Consistency Properties
Step39: 8.4. Corrected Conserved Prognostic Variables
Step40: 8.5. Was Flux Correction Used
Step41: 9. Grid
Step42: 10. Grid --> Discretisation --> Vertical
Step43: 10.2. Partial Steps
Step44: 11. Grid --> Discretisation --> Horizontal
Step45: 11.2. Staggering
Step46: 11.3. Scheme
Step47: 12. Timestepping Framework
Step48: 12.2. Diurnal Cycle
Step49: 13. Timestepping Framework --> Tracers
Step50: 13.2. Time Step
Step51: 14. Timestepping Framework --> Baroclinic Dynamics
Step52: 14.2. Scheme
Step53: 14.3. Time Step
Step54: 15. Timestepping Framework --> Barotropic
Step55: 15.2. Time Step
Step56: 16. Timestepping Framework --> Vertical Physics
Step57: 17. Advection
Step58: 18. Advection --> Momentum
Step59: 18.2. Scheme Name
Step60: 18.3. ALE
Step61: 19. Advection --> Lateral Tracers
Step62: 19.2. Flux Limiter
Step63: 19.3. Effective Order
Step64: 19.4. Name
Step65: 19.5. Passive Tracers
Step66: 19.6. Passive Tracers Advection
Step67: 20. Advection --> Vertical Tracers
Step68: 20.2. Flux Limiter
Step69: 21. Lateral Physics
Step70: 21.2. Scheme
Step71: 22. Lateral Physics --> Momentum --> Operator
Step72: 22.2. Order
Step73: 22.3. Discretisation
Step74: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Step75: 23.2. Constant Coefficient
Step76: 23.3. Variable Coefficient
Step77: 23.4. Coeff Background
Step78: 23.5. Coeff Backscatter
Step79: 24. Lateral Physics --> Tracers
Step80: 24.2. Submesoscale Mixing
Step81: 25. Lateral Physics --> Tracers --> Operator
Step82: 25.2. Order
Step83: 25.3. Discretisation
Step84: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Step85: 26.2. Constant Coefficient
Step86: 26.3. Variable Coefficient
Step87: 26.4. Coeff Background
Step88: 26.5. Coeff Backscatter
Step89: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Step90: 27.2. Constant Val
Step91: 27.3. Flux Type
Step92: 27.4. Added Diffusivity
Step93: 28. Vertical Physics
Step94: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Step95: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
Step96: 30.2. Closure Order
Step97: 30.3. Constant
Step98: 30.4. Background
Step99: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
Step100: 31.2. Closure Order
Step101: 31.3. Constant
Step102: 31.4. Background
Step103: 32. Vertical Physics --> Interior Mixing --> Details
Step104: 32.2. Tide Induced Mixing
Step105: 32.3. Double Diffusion
Step106: 32.4. Shear Mixing
Step107: 33. Vertical Physics --> Interior Mixing --> Tracers
Step108: 33.2. Constant
Step109: 33.3. Profile
Step110: 33.4. Background
Step111: 34. Vertical Physics --> Interior Mixing --> Momentum
Step112: 34.2. Constant
Step113: 34.3. Profile
Step114: 34.4. Background
Step115: 35. Uplow Boundaries --> Free Surface
Step116: 35.2. Scheme
Step117: 35.3. Embeded Seaice
Step118: 36. Uplow Boundaries --> Bottom Boundary Layer
Step119: 36.2. Type Of Bbl
Step120: 36.3. Lateral Mixing Coef
Step121: 36.4. Sill Overflow
Step122: 37. Boundary Forcing
Step123: 37.2. Surface Pressure
Step124: 37.3. Momentum Flux Correction
Step125: 37.4. Tracers Flux Correction
Step126: 37.5. Wave Effects
Step127: 37.6. River Runoff Budget
Step128: 37.7. Geothermal Heating
Step129: 38. Boundary Forcing --> Momentum --> Bottom Friction
Step130: 39. Boundary Forcing --> Momentum --> Lateral Friction
Step131: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Step132: 40.2. Ocean Colour
Step133: 40.3. Extinction Depth
Step134: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Step135: 41.2. From Sea Ice
Step136: 41.3. Forced Mode Restoring
|
2,055
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from scipy.interpolate import interp1d
# YOUR CODE HERE
raise NotImplementedError()
assert isinstance(x, np.ndarray) and len(x)==40
assert isinstance(y, np.ndarray) and len(y)==40
assert isinstance(t, np.ndarray) and len(t)==40
# YOUR CODE HERE
raise NotImplementedError()
assert newt[0]==t.min()
assert newt[-1]==t.max()
assert len(newt)==200
assert len(newx)==200
assert len(newy)==200
# YOUR CODE HERE
raise NotImplementedError()
assert True # leave this to grade the trajectory plot
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 2D trajectory interpolation
Step2: Use these arrays to create interpolated functions $x(t)$ and $y(t)$. Then use those functions to create the following arrays
Step3: Make a parametric plot of ${x(t),y(t)}$ that shows the interpolated values and the original points
|
2,056
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
# Import neurom module
import neurom as nm
# Import neurom visualization module
from neurom import viewer
# Load a single morphology
neuron = nm.load_neuron('../test_data/valid_set/Neuron.swc')
# Load a population of morphologies from a set of files
pop = nm.load_neurons('../test_data/valid_set/')
# Get a single morphology from the population
single_neuron = pop.neurons[0]
# Visualize a morphology in two dimensions
fig, ax = viewer.draw(neuron)
# Visualize a morphology in three dimensions
fig, ax = viewer.draw(neuron, mode='3d')
# Visualize a single tree in three dimensions
fig, ax = viewer.draw(neuron.neurites[0], mode='3d')
# Visualize the dendrogram of a morphology
fig, ax = viewer.draw(neuron, mode='dendrogram')
# Extract the total number of neurites (basal and apical dendrites, and axons)
number_of_neurites = nm.get('number_of_neurites', neuron)
# Extract the total number of sections
number_of_sections = nm.get('number_of_sections', neuron)
# Extract the soma radius
soma_radius = neuron.soma.radius
# Extract the number of sections per neurite
number_of_sections_per_neurite = nm.get('number_of_sections_per_neurite', neuron)
# Print result
print "Neuron id : {0} \n\
Number of neurites : {1} \n\
Soma radius : {2:.2f} \n\
Number of sections : {3}".format(neuron.name, number_of_neurites[0], soma_radius, number_of_sections[0])
print
print "Neurite type \t\t\t| Number of sections"
for i, neurite in enumerate(neuron.neurites):
print "{0:31} | {1}".format(str(neurite.type), number_of_sections_per_neurite[i])
# Extract the lengths of the sections
section_lengths = nm.get('section_lengths', neuron)
# Extract the lengths of the segments
segment_lengths = nm.get('segment_lengths', neuron)
# Extract the local bifurcation angles
local_bif_angles = nm.get('local_bifurcation_angles', neuron)
# Extract the remote bifurcation angles
remote_bif_angles = nm.get('remote_bifurcation_angles', neuron)
# Extract the radial distances of the sections
section_radial_distances = nm.get('section_radial_distances', neuron)
# Extract the path distances of the sections
section_path_distances = nm.get('section_path_distances', neuron)
# Print result
features = (segment_lengths, section_lengths, local_bif_angles,
remote_bif_angles, section_path_distances, section_radial_distances)
def check(feature_list, n):
return '{0:.2f}'.format(feature_list[n]) if n < len(feature_list) else ''
print '|sg_len|sc_len|lc_bif_angles|rm_bif_angles|sc_path_dists|sc_rad_dists|'
for n in range(0, 50):
args = (check(f, n) for f in features)
print '|{0:^6}|{1:^6}|{2:^13}|{3:^13}|{4:^13}|{5:^12}|'.format(*args)
# Extract the section lengths of axonal trees
ax_section_lengths = nm.get('section_lengths', neuron, neurite_type=nm.AXON)
# Extract the section lengths of basal dendrite trees
ba_section_lengths = nm.get('section_lengths', neuron, neurite_type=nm.BASAL_DENDRITE)
# Extract the section lengths of apical dendrite trees
ap_section_lengths = nm.get('section_lengths', neuron, neurite_type=nm.APICAL_DENDRITE)
print '\nAxonal section lengths = ', ax_section_lengths
print '\nBasal section lengths = ', ba_section_lengths
print '\nApical section lengths = ', ap_section_lengths
import numpy as np
# We can get the mean section length
mean_sl = np.mean(section_lengths)
# We can get the standard deviation of the section lengths
std_sl = np.std(section_lengths)
# We can get the minimum section length
min_sl = np.min(section_lengths)
# ... and the maximum section length
max_sl = np.max(section_lengths)
print 'Section length statistics:'
print ' mean = {0:.2f} +- {1:.2f}'.format(mean_sl, std_sl)
print ' [min, max]: [{0:.2f}, {1:.2f}]'.format(min_sl, max_sl)
import matplotlib.pyplot as plt
# Select the feature of choice
feature = nm.get('segment_lengths', neuron)
# Create empty figure
fig = plt.figure(figsize=(11,3))
# Create histogram
ax = fig.add_subplot('131')
ax.hist(feature, bins=25, edgecolor='black')
# Create cumulative histogram
ax = fig.add_subplot('132')
ax.hist(feature, bins=25, cumulative=True, edgecolor='black')
# Create boxplot; flier points are indicated with green dots
ax = fig.add_subplot('133')
_ = ax.boxplot(feature, sym='g.')
from neurom import stats
data = nm.get('segment_lengths', neuron)
# Let’s start with a normal distribution. We will fit the data that we extracted above with a normal distribution
p = stats.fit(data, distribution='norm')
# The output of the function is a named tuple of type FitResults
print 'Fit output type : ', type(p)
# The parameters are stored in the variable params, which in the case of the normal distribution stores the mu and sigma
# of the normal distribution
mu, sigma = p.params
ks_dist, pvalue = p.errs
# Print result
print '[mu, sigma] : [{0:.2f}, {1:.2f}]\n'.format(mu, sigma)
# We need to check the statistical error of the performed fit to evaluate the accuracy of the
# selected model. To do so we use the errors variable of FitResults:
print 'Kolmogorov-Smirnov distance : {0:.2f}'.format(ks_dist)
print 'P-value : {0:.2f}'.format(pvalue)
from scipy.stats import norm
# Create a histogram as above
fig = plt.figure()
plt.hist(data, bins=25, normed=True, edgecolor='black')
# Plot range: 5 standard deviations around the mean
norm_range = np.arange(mu - 5.*sigma, mu + 5.*sigma, 0.001)
# Plot the normal pdf with the given range, mu and sigma
_ = plt.plot(norm_range, norm.pdf(norm_range, mu, sigma), linewidth=3., c='r', alpha=0.8)
p = stats.optimal_distribution(data, distr_to_check=('lognorm', 'logistic', 'norm'))
print 'Fit results:', p
# Threshold value
threshold = 10
# Get the ids of sections which length exceeds the threshold
selected_ids = np.where(section_lengths > threshold)
# Get the values of section lengths that exceed the threshold
section_lengths[selected_ids]
# Get the length of all sections with a radial distance between 0.0 and 60.0
section_indices = np.where((section_radial_distances >= 0.0) & (section_radial_distances < 60.0))
selected_section_lengths = section_lengths[section_indices]
print selected_section_lengths
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1. Loading a morphology or a population
Step2: 2. Morphology visualization
Step3: 3. Morphology analysis
Step4: 3.2 Analyze different types of trees
Step5: 3.3 Perform statistical analysis on extracted measurements
Step6: 3.4 Generate plots from the extracted morphometrics
Step7: 3.5 Fit the extracted data with a statistical distribution
Step8: The result of the fitting can be visualized
Step9: It is also possible to find the optimal distribution that best fits the data, among a number of distributions that are
Step10: 3.6 Apply more advanced manipulation on extracted data
Step11: 3.7 Combine morphometrics
|
2,057
|
<ASSISTANT_TASK:>
Python Code:
# Setup plotting
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('animation', html='html5')
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning_intro.ex6 import *
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer
hotel = pd.read_csv('../input/dl-course-data/hotel.csv')
X = hotel.copy()
y = X.pop('is_canceled')
X['arrival_date_month'] = \
X['arrival_date_month'].map(
{'January':1, 'February': 2, 'March':3,
'April':4, 'May':5, 'June':6, 'July':7,
'August':8, 'September':9, 'October':10,
'November':11, 'December':12}
)
features_num = [
"lead_time", "arrival_date_week_number",
"arrival_date_day_of_month", "stays_in_weekend_nights",
"stays_in_week_nights", "adults", "children", "babies",
"is_repeated_guest", "previous_cancellations",
"previous_bookings_not_canceled", "required_car_parking_spaces",
"total_of_special_requests", "adr",
]
features_cat = [
"hotel", "arrival_date_month", "meal",
"market_segment", "distribution_channel",
"reserved_room_type", "deposit_type", "customer_type",
]
transformer_num = make_pipeline(
SimpleImputer(strategy="constant"), # there are a few missing values
StandardScaler(),
)
transformer_cat = make_pipeline(
SimpleImputer(strategy="constant", fill_value="NA"),
OneHotEncoder(handle_unknown='ignore'),
)
preprocessor = make_column_transformer(
(transformer_num, features_num),
(transformer_cat, features_cat),
)
# stratify - make sure classes are evenlly represented across splits
X_train, X_valid, y_train, y_valid = \
train_test_split(X, y, stratify=y, train_size=0.75)
X_train = preprocessor.fit_transform(X_train)
X_valid = preprocessor.transform(X_valid)
input_shape = [X_train.shape[1]]
from tensorflow import keras
from tensorflow.keras import layers
# YOUR CODE HERE: define the model given in the diagram
model = ____
# Check your answer
q_1.check()
#%%RM_IF(PROD)%%
from tensorflow import keras
from tensorflow.keras import layers
# Wrong activations
model = keras.Sequential([
layers.BatchNormalization(input_shape=input_shape),
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(1),
])
q_1.assert_check_failed()
#%%RM_IF(PROD)%%
from tensorflow import keras
from tensorflow.keras import layers
# Wrong layers
model = keras.Sequential([
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(1, activation='sigmoid'),
])
q_1.assert_check_failed()
#%%RM_IF(PROD)%%
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.BatchNormalization(input_shape=input_shape),
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(256, activation='relu'),
layers.BatchNormalization(),
layers.Dropout(0.3),
layers.Dense(1, activation='sigmoid'),
])
q_1.assert_check_passed()
# YOUR CODE HERE
____
# Check your answer
q_2.check()
#%%RM_IF(PROD)%%
# missing loss
model.compile(
optimizer='adam',
metrics=['binary_accuracy']
)
q_2.assert_check_failed()
#%%RM_IF(PROD)%%
# missing optimizer
model.compile(
loss='binary_crossentropy',
metrics=['binary_accuracy']
)
q_2.assert_check_failed()
#%%RM_IF(PROD)%%
# wrong loss
model.compile(
optimizer='adam',
loss='mae',
metrics=['binary_accuracy']
)
q_2.assert_check_failed()
#%%RM_IF(PROD)%%
# wrong optimizer
model.compile(
optimizer='sgd',
loss='binary_crossentropy',
metrics=['binary_accuracy']
)
q_2.assert_check_failed()
#%%RM_IF(PROD)%%
# wrong metrics
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['mse']
)
q_2.assert_check_failed()
#%%RM_IF(PROD)%%
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['binary_accuracy']
)
q_2.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
q_2.hint()
#_COMMENT_IF(PROD)_
q_2.solution()
early_stopping = keras.callbacks.EarlyStopping(
patience=5,
min_delta=0.001,
restore_best_weights=True,
)
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=512,
epochs=200,
callbacks=[early_stopping],
)
history_df = pd.DataFrame(history.history)
history_df.loc[:, ['loss', 'val_loss']].plot(title="Cross-entropy")
history_df.loc[:, ['binary_accuracy', 'val_binary_accuracy']].plot(title="Accuracy")
# View the solution (Run this cell to receive credit!)
q_3.check()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First, load the Hotel Cancellations dataset.
Step2: 1) Define Model
Step3: 2) Add Optimizer, Loss, and Metric
Step4: Finally, run this cell to train the model and view the learning curves. It may run for around 60 to 70 epochs, which could take a minute or two.
Step5: 3) Train and Evaluate
|
2,058
|
<ASSISTANT_TASK:>
Python Code:
import pymatgen.core as mg
si = mg.Element("Si")
print("Atomic mass of Si is {}".format(si.atomic_mass))
print("Si has a melting point of {}".format(si.melting_point))
print("Ionic radii for Si: {}".format(si.ionic_radii))
print("Atomic mass of Si in kg: {}".format(si.atomic_mass.to("kg")))
fe2 = mg.Species("Fe", 2)
print(fe2.atomic_mass)
print(fe2.ionic_radius)
comp = mg.Composition("Fe2O3")
print("Weight of Fe2O3 is {}".format(comp.weight))
print("Amount of Fe in Fe2O3 is {}".format(comp["Fe"]))
print("Atomic fraction of Fe is {}".format(comp.get_atomic_fraction("Fe")))
print("Weight fraction of Fe is {}".format(comp.get_wt_fraction("Fe")))
# Creates cubic Lattice with lattice parameter 4.2
lattice = mg.Lattice.cubic(4.2)
print(lattice.parameters)
structure = mg.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
print("Unit cell vol = {}".format(structure.volume))
print("First site of the structure is {}".format(structure[0]))
structure.make_supercell([2, 2, 1]) #Make a 3 x 2 x 1 supercell of the structure
del structure[0] #Remove the first site
structure.append("Na", [0,0,0]) #Append a Na atom.
structure[-1] = "Li" #Change the last added atom to Li.
structure[0] = "Cs", [0.01, 0.5, 0] #Shift the first atom by 0.01 in fractional coordinates in the x-direction.
immutable_structure = mg.IStructure.from_sites(structure) #Create an immutable structure (cannot be modified).
print(immutable_structure)
#Determining the symmetry
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
finder = SpacegroupAnalyzer(structure)
print("The spacegroup is {}".format(finder.get_space_group_symbol()))
from pymatgen.analysis.structure_matcher import StructureMatcher
#Let's create two structures which are the same topologically, but with different elements, and one lattice is larger.
s1 = mg.Structure(lattice, ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
s2 = mg.Structure(mg.Lattice.cubic(5), ["Rb", "F"], [[0, 0, 0], [0.5, 0.5, 0.5]])
m = StructureMatcher()
print(m.fit_anonymous(s1, s2)) #Returns a mapping which maps s1 and s2 onto each other. Strict element fitting is also available.
#Convenient IO to various formats. Format is intelligently determined from file name and extension.
structure.to(filename="POSCAR")
structure.to(filename="CsCl.cif")
#Or if you just supply fmt, you simply get a string.
print(structure.to(fmt="poscar"))
print(structure.to(fmt="cif"))
#Reading a structure from a file.
structure = mg.Structure.from_file("POSCAR")
from pymatgen.io.vasp.sets import MPRelaxSet
v = MPRelaxSet(structure)
v.write_input("MyInputFiles") #Writes a complete set of input files for structure to the directory MyInputFiles
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Basic Element, Specie and Composition objects
Step2: You can see that units are printed for atomic masses and ionic radii. Pymatgen comes with a complete system of managing units in pymatgen.core.unit. A Unit is a subclass of float that attaches units and handles conversions. For example,
Step3: Please refer to the Units example for more information on units. Species are like Elements, except they have an explicit oxidation state. They can be used wherever Element is used for the most part.
Step4: A Composition is essentially an immutable mapping of Elements/Species with amounts, and useful properties like molecular weight, get_atomic_fraction, etc. Note that you can conveniently either use an Element/Specie object or a string as keys (this is a feature).
Step5: Lattice & Structure objects
Step6: A Structure object represents a crystal structure (lattice + basis). A Structure is essentially a list of PeriodicSites with the same Lattice. Let us now create a CsCl structure.
Step7: The Structure object contains many useful manipulation functions. Since Structure is essentially a list, it contains a simple pythonic API for manipulation its sites. Some examples are given below. Please note that there is an immutable version of Structure known as IStructure, for the use case where you really need to enforce that the structure does not change. Conversion between these forms of Structure can be performed using from_sites().
Step8: Basic analyses
Step9: We also have an extremely powerful structure matching tool.
Step10: Input/output
Step11: The vaspio_set module provides a means o obtain a complete set of VASP input files for performing calculations. Several useful presets based on the parameters used in the Materials Project are provided.
|
2,059
|
<ASSISTANT_TASK:>
Python Code:
from google.cloud import aiplatform
REGION = "us-central1"
PROJECT_ID = !(gcloud config get-value project)
PROJECT_ID = PROJECT_ID[0]
# Set `PATH` to include the directory containing KFP CLI
PATH = %env PATH
%env PATH=/home/jupyter/.local/bin:{PATH}
!cat trainer_image_vertex/Dockerfile
IMAGE_NAME = "trainer_image_covertype_vertex"
TAG = "latest"
TRAINING_CONTAINER_IMAGE_URI = f"gcr.io/{PROJECT_ID}/{IMAGE_NAME}:{TAG}"
TRAINING_CONTAINER_IMAGE_URI
!gcloud builds submit --timeout 15m --tag $TRAINING_CONTAINER_IMAGE_URI trainer_image_vertex
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/sklearn-cpu.0-20:latest"
)
%%writefile ./pipeline_vertex/pipeline.py
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
Kubeflow Covertype Pipeline.
import os
from kfp import dsl
from training_lightweight_component import train_and_deploy
from tuning_lightweight_component import tune_hyperparameters
PIPELINE_ROOT = os.getenv("PIPELINE_ROOT")
PROJECT_ID = os.getenv("PROJECT_ID")
REGION = os.getenv("REGION")
TRAINING_CONTAINER_IMAGE_URI = os.getenv("TRAINING_CONTAINER_IMAGE_URI")
SERVING_CONTAINER_IMAGE_URI = os.getenv("SERVING_CONTAINER_IMAGE_URI")
TRAINING_FILE_PATH = os.getenv("TRAINING_FILE_PATH")
VALIDATION_FILE_PATH = os.getenv("VALIDATION_FILE_PATH")
MAX_TRIAL_COUNT = int(os.getenv("MAX_TRIAL_COUNT", "5"))
PARALLEL_TRIAL_COUNT = int(os.getenv("PARALLEL_TRIAL_COUNT", "5"))
THRESHOLD = float(os.getenv("THRESHOLD", "0.6"))
@dsl.pipeline(
name="covertype-kfp-pipeline",
description="The pipeline training and deploying the Covertype classifier",
pipeline_root=PIPELINE_ROOT,
)
def covertype_train(
training_container_uri: str = TRAINING_CONTAINER_IMAGE_URI,
serving_container_uri: str = SERVING_CONTAINER_IMAGE_URI,
training_file_path: str = TRAINING_FILE_PATH,
validation_file_path: str = VALIDATION_FILE_PATH,
accuracy_deployment_threshold: float = THRESHOLD,
max_trial_count: int = MAX_TRIAL_COUNT,
parallel_trial_count: int = PARALLEL_TRIAL_COUNT,
pipeline_root: str = PIPELINE_ROOT,
):
staging_bucket = f"{pipeline_root}/staging"
tuning_op = tune_hyperparameters(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
)
accuracy = tuning_op.outputs["best_accuracy"]
with dsl.Condition(
accuracy >= accuracy_deployment_threshold, name="deploy_decision"
):
train_and_deploy_op = ( # pylint: disable=unused-variable
train_and_deploy(
project=PROJECT_ID,
location=REGION,
container_uri=training_container_uri,
serving_container_uri=serving_container_uri,
training_file_path=training_file_path,
validation_file_path=validation_file_path,
staging_bucket=staging_bucket,
alpha=tuning_op.outputs["best_alpha"],
max_iter=tuning_op.outputs["best_max_iter"],
)
)
ARTIFACT_STORE = f"gs://{PROJECT_ID}-kfp-artifact-store"
PIPELINE_ROOT = f"{ARTIFACT_STORE}/pipeline"
DATA_ROOT = f"{ARTIFACT_STORE}/data"
TRAINING_FILE_PATH = f"{DATA_ROOT}/training/dataset.csv"
VALIDATION_FILE_PATH = f"{DATA_ROOT}/validation/dataset.csv"
%env PIPELINE_ROOT={PIPELINE_ROOT}
%env PROJECT_ID={PROJECT_ID}
%env REGION={REGION}
%env SERVING_CONTAINER_IMAGE_URI={SERVING_CONTAINER_IMAGE_URI}
%env TRAINING_CONTAINER_IMAGE_URI={TRAINING_CONTAINER_IMAGE_URI}
%env TRAINING_FILE_PATH={TRAINING_FILE_PATH}
%env VALIDATION_FILE_PATH={VALIDATION_FILE_PATH}
!gsutil ls | grep ^{ARTIFACT_STORE}/$ || gsutil mb -l {REGION} {ARTIFACT_STORE}
PIPELINE_JSON = "covertype_kfp_pipeline.json"
!dsl-compile-v2 --py pipeline_vertex/pipeline.py --output $PIPELINE_JSON
!head {PIPELINE_JSON}
aiplatform.init(project=PROJECT_ID, location=REGION)
pipeline = aiplatform.PipelineJob(
display_name="covertype_kfp_pipeline",
template_path=PIPELINE_JSON,
enable_caching=False,
)
pipeline.run()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Understanding the pipeline design
Step2: Let's now build and push this trainer container to the container registry
Step3: To match the ml framework version we use at training time while serving the model, we will have to supply the following serving container to the pipeline
Step5: Note
Step6: Compile the pipeline
Step7: Let us make sure that the ARTIFACT_STORE has been created, and let us create it if not
Step8: Note
Step9: Note
Step10: Deploy the pipeline package
|
2,060
|
<ASSISTANT_TASK:>
Python Code:
from __future__ import division, print_function
import pylab as plt
import matplotlib.pyplot as mpl
from pymatgen.core import Element, Composition
%matplotlib inline
import csv
with open("ICSD/icsd-ternaries.csv", "r") as f:
csv_reader = csv.reader(f, dialect = csv.excel_tab)
data = [line for line in csv_reader]
formulas = [line[2] for line in data]
compositions = [Composition(x) for x in formulas]
unique_formulas = list(set(formulas))
unique_compositions = set(Composition(f).formula for f in unique_formulas)
unique_data=[]
found_comps=[]
for line in data:
form=Composition(line[2]).formula
if form not in found_comps:
unique_data.append(line)
found_comps.append(form)
with open("ICSD/Unique_ICSD.dat","w") as f:
for line in unique_data:
print("\t".join(line),end='\n',file=f)
print("Number of unique compositions found by Chuck:", len(unique_compositions))
print("Number of lines we just wrote to file:",len(unique_data))
with open('ICSD/Unique_ICSD.dat','r') as f:
data_1=csv.reader(f,"excel-tab")
list_data1=[[element.strip() for element in row] for row in data_1]
for row1 in list_data1:
row1[1]=row1[1].replace(' ','')
list_space=[row1[1].rstrip('Z').rstrip('S').rstrip("H").rstrip('R') for row1 in list_data1]
with open("ICSD/spacegroups.dat",'r') as f:
dat=csv.reader(f,dialect='excel-tab',quoting=csv.QUOTE_NONE)
list_dat=[element.strip() for row in dat for element in row ]
list1=[[int(list_dat[i*2]),list_dat[i*2+1]] for i in range(int(len(list_dat)/2))]
dict_space={}
for i in range(len(list1)):
dict_space[list1[i][1]]=list1[i][0]
with open('ICSD/spacegroups_2.dat','r') as f1:
f=f1.readlines()
for line in f:
data2=[element.strip() for element in line.split()]
if data2[1] not in dict_space.keys():
dict_space[data2[1]]=int(data2[0])
with open('ICSD/spacegroups_3.dat','r') as f1:
f=f1.readlines()
for line in f:
data3=[element.strip() for element in line.split()]
if data3[0] not in dict_space.keys():
dict_space[data3[0]]=int(data3[1])
plt.figure(figsize = (8,5))
list_nf=[]
count_f=plt.array([0]*230)
count_not=0
for s in list_space:
if s in dict_space.keys():
#print "Found Element in dictionary for space_group_name {0}, with space_group number {1}".format(s,dict_space[s])
count_f[dict_space[s]-1]+=1
else:
#print "Entry not found for space group name ",s
list_nf.append(s)
print("Found Entries={0}, Not Found Entries={1}".format(sum(count_f),len(list_space)-sum(count_f)))
print("Found No Entries for these spacegroups",plt.array(plt.where(count_f==0))+1)
plt.xlabel("Space Group #")
plt.ylabel("Number of instances")
plt.title("Frequency distibution of data from New Data file based on unique coompositions")
plt.plot(plt.arange(230),count_f,'bo-')
sg_counts = sorted(enumerate(count_f,1), key = lambda x: x[1], reverse = True)
print(" SG Count")
print("--- -----")
for i in range(20):
sg,count = sg_counts[i]
print("{:3} {:4}".format(sg, count))
plt.semilogy(range(len(sg_counts)), [e[1] for e in sg_counts], "o-")
with open('ICSD/icsd-ternaries.csv','r') as f:
data=csv.reader(f,"excel-tab")
list_data=[[element.strip() for element in row] for row in data]
for row in list_data:
row[1]=row[1].replace(' ','')
list_space_old=[row[1].rstrip('Z').rstrip('S').rstrip("H").rstrip('R') for row in list_data]
plt.figure(figsize = (8,5))
list_nf_old=[]
count_f_old=plt.array([0]*230)
count_not_old=0
for s in list_space_old:
if s in dict_space.keys():
#print "Found Element in dictionary for space_group_name {0}, with space_group number {1}".format(s,dict_space[s])
count_f_old[dict_space[s]-1]+=1
else:
#print "Entry not found for space group name ",s
list_nf_old.append(s)
print("Found Entries={0}, Not Found Entries={1}".format(sum(count_f_old),len(list_space_old)-sum(count_f_old)))
print("Found No Entries for these spacegroups",plt.array(plt.where(count_f_old==0))+1)
plt.xlabel("Space Group #")
plt.ylabel("Number of instances")
plt.title("Frequency distibution of data from New Data file based on unique coompositions")
plt.plot(plt.arange(230),count_f_old,'bo-')
for a in [27,48,89,93,153,170,171,172,179,184,192,207,211]:
print(a,count_f_old[a-1])
from pymatgen.matproj.rest import MPRester
def desired_element(elem):
omit = ['Po', 'At', 'Rn', 'Fr', 'Ra']
return not e.is_noble_gas and not e.is_actinoid and not e.symbol in omit
#element_universe = [str(e) for e in Element if desired_element(e)]
element_universe = [str(e) for e in Element]
dict_element={}
for i,j in enumerate(element_universe):
dict_element[str(j)]=i
print("Number of included elements =", len(element_universe))
dict_element['D']=103
dict_element['T']=104
print(dict_element.keys())
import numpy as np
stoich_array=np.zeros((len(list_data1),len(dict_element)),dtype=float)
for index,entry in enumerate(list_data1):
comp=Composition(entry[2])
temp_dict=dict(comp.get_el_amt_dict())
#print(index,temp_dict.keys())
for key in temp_dict.keys():
if dict_element.has_key(key):
stoich_array[index][dict_element[key]]= temp_dict[key]
else:
print("For line_number {0}, we did not find element {1} in formula {2} in line with entry number {3}".format(index,key,comp.formula,entry[0]))
print("Entry Number Element list Number of occurances ")
print("------------ -------------- ------------------------- ")
dict_inverse = dict ( (v,k) for k, v in dict_element.items() )
for i, entry in enumerate(stoich_array[0:20]):
nzentries=np.where(entry!=0)[0]
present_els=[dict_inverse[ent] for ent in nzentries]
print("{:<13} {:<22} {:<10}".format(i,present_els,entry[nzentries]))
import scipy.sparse
sparse_stoich=scipy.sparse.csr_matrix(stoich_array)
print(sparse_stoich[0:10])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: We import all the data and check the unique compositions by string matching of the pymatgen formulas. We then make a list out of all the unique entries and write them to a file called Unique_ICSD.dat in the ICSD subfolder.
Step2: Just to check that everything worked out fine, we check that the number of entries in the list we just wrote to file is the same as number of unique compositions found by chuck in the ICSD_ternaries notebook.
Step3: From now on, this becomes our default datafile. Let us now try to import it in the same way as in Cleaning_spacegroups.ipynb and see if the spacegroup number parsing works.
Step4: By comparing the output from Cleaning_spacegroups.ipynb, we see that there are 5 new spacegroups that now have no instances. Also the number of instances of each of the spacegroups has dropped drastically. So some compositions clearly exist in multiple spacegroups and therefore just using compositions to mark unque entries is probably a bad idea. Let us import the mother datafile and see how many entries the newly empty spacegroups had originally.
Step5: We see that the newly empty groups had very little data in the first place. But we definitely need to have more sophisticated methods for catching data duplication.
Step6: Some Compositions have Deutorium and Tritium. Right now I am creating new elements entry for D and T with array indices 103 and 104. We might want to map these to Hydrogen later. In that case the cell below would be
Step7: Storing this array as a sparse csr matrix and outputting the first 10 entries just to show how the storage is done.
|
2,061
|
<ASSISTANT_TASK:>
Python Code:
def checkPalindrome(str ) :
n = len(str )
count = 0
for i in range(0 , int(n / 2 ) ) :
if(str[i ] != str[n - i - 1 ] ) :
count = count + 1
if(count <= 1 ) :
return True
else :
return False
str = "abccaa "
if(checkPalindrome(str ) ) :
print("Yes ")
else :
print("No ")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
|
2,062
|
<ASSISTANT_TASK:>
Python Code:
from gensim.corpora.wikicorpus import WikiCorpus
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from pprint import pprint
import multiprocessing
wiki = WikiCorpus("enwiki-latest-pages-articles.xml.bz2")
#wiki = WikiCorpus("enwiki-YYYYMMDD-pages-articles.xml.bz2")
class TaggedWikiDocument(object):
def __init__(self, wiki):
self.wiki = wiki
self.wiki.metadata = True
def __iter__(self):
for content, (page_id, title) in self.wiki.get_texts():
yield TaggedDocument([c.decode("utf-8") for c in content], [title])
documents = TaggedWikiDocument(wiki)
pre = Doc2Vec(min_count=0)
pre.scan_vocab(documents)
for num in range(0, 20):
print('min_count: {}, size of vocab: '.format(num), pre.scale_vocab(min_count=num, dry_run=True)['memory']['vocab']/700)
cores = multiprocessing.cpu_count()
models = [
# PV-DBOW
Doc2Vec(dm=0, dbow_words=1, size=200, window=8, min_count=19, iter=10, workers=cores),
# PV-DM w/average
Doc2Vec(dm=1, dm_mean=1, size=200, window=8, min_count=19, iter =10, workers=cores),
]
models[0].build_vocab(documents)
print(str(models[0]))
models[1].reset_from(models[0])
print(str(models[1]))
for model in models:
%%time model.train(documents, total_examples=model.corpus_count, epochs=model.iter)
for model in models:
print(str(model))
pprint(model.docvecs.most_similar(positive=["Machine learning"], topn=20))
for model in models:
print(str(model))
pprint(model.docvecs.most_similar(positive=["Lady Gaga"], topn=10))
for model in models:
print(str(model))
vec = [model.docvecs["Lady Gaga"] - model["american"] + model["japanese"]]
pprint([m for m in model.docvecs.most_similar(vec, topn=11) if m[0] != "Lady Gaga"])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Preparing the corpus
Step2: Define TaggedWikiDocument class to convert WikiCorpus into suitable form for Doc2Vec.
Step3: Preprocessing
Step4: In the original paper, they set the vocabulary size 915,715. It seems similar size of vocabulary if we set min_count = 19. (size of vocab = 898,725)
Step5: Now we’re ready to train Doc2Vec of the English Wikipedia.
Step6: Similarity interface
Step7: DBOW model interpret the word 'Machine Learning' as a part of Computer Science field, and DM model as Data Science related field.
Step8: DBOW model reveal the similar singer in the U.S., and DM model understand that many of Lady Gaga's songs are similar with the word "Lady Gaga".
|
2,063
|
<ASSISTANT_TASK:>
Python Code:
# Imports
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import glob
import csv
import calendar
import webbrowser
from datetime import datetime
# Constants
DATA_FOLDER = 'Data/'
'''
Functions needed to solve task 1
'''
#function to import excel file into a dataframe
def importdata(path,date):
allpathFiles = glob.glob(DATA_FOLDER+path+'/*.csv')
list_data = []
for file in allpathFiles:
excel = pd.read_csv(file,parse_dates=[date])
list_data.append(excel)
return pd.concat(list_data)
#function to add the month on a new column of a DataFrame
def add_month(df):
copy_df = df.copy()
months = [calendar.month_name[x.month] for x in copy_df.Date]
copy_df['Month'] = months
return copy_df
#founction which loc only the column within a country and a specified month
#return a dataframe
def chooseCountry_month(dataframe,country,descr,month):
df = dataframe.loc[(dataframe['Country']==country) & (dataframe['Description']==descr)]
#df = add_month(df)
df_month = df.loc[(df['Month']==month)]
return df_month
# Create a dataframe with the number of death, the new cases and the daily infos for a country and a specified month
def getmonthresults(dataframe,country,month):
if country =='Liberia':
descr_kill ='Total death/s in confirmed cases'
descr_cases ='Total confirmed cases'
if country =='Guinea':
descr_kill ='Total deaths of confirmed'
descr_cases ='Total cases of confirmed'
if country == 'Sierra Leone':
descr_kill ='death_confirmed'
descr_cases ='cum_confirmed'
df_kill = chooseCountry_month(dataframe,country,descr_kill,month)
df_cases = chooseCountry_month(dataframe,country,descr_cases,month)
#calculate the number of new cases and of new deaths for the all month
res_kill = int(df_kill.iloc[len(df_kill)-1].Totals)-int(df_kill.iloc[0].Totals)
res_cases = int(df_cases.iloc[len(df_cases)-1].Totals)-int(df_cases.iloc[0].Totals)
#calculate the number of days counted which is last day of register - first day of register
nb_day = df_kill.iloc[len(df_kill)-1].Date.day-df_kill.iloc[0].Date.day
# Sometimes the values in the dataframe are wrong due to the excelfiles which are not all the same!
# We then get negative results. Therefor we replace them all by NaN !
if(res_cases < 0)&(res_kill <0):
monthreport = pd.DataFrame({'New cases':[np.nan],'Deaths':[np.nan],'daily average of New cases':[np.nan],'daily average of Deaths':[np.nan],'month':[month],'Country':[country]})
elif(res_cases >= 0) &( res_kill <0):
monthreport = pd.DataFrame({'New cases':[res_cases],'Deaths':[np.nan],'daily average of New cases':[res_cases/nb_day],'daily average of Deaths':[np.nan],'month':[month],'Country':[country]})
elif(res_cases < 0) & (res_kill >= 0):
monthreport = pd.DataFrame({'New cases':[np.nan],'Deaths':[res_kill],'daily average of New cases':[np.nan],'daily average of Deaths':[res_kill/nb_day],'month':[month],'Country':[country]})
elif(nb_day == 0):
monthreport = pd.DataFrame({'New cases':'notEnoughdatas','Deaths':'notEnoughdatas','daily average of New cases':'notEnoughdatas','daily average of Deaths':'notEnoughdatas','month':[month],'Country':[country]})
else:
monthreport = pd.DataFrame({'New cases':[res_cases],'Deaths':[res_kill],'daily average of New cases':[res_cases/nb_day],'daily average of Deaths':[res_kill/nb_day],'month':[month],'Country':[country]})
return monthreport
#check if the month and the country is in the dataframe df
def checkData(df,month,country):
check = df.loc[(df['Country']==country)& (df['Month']== month)]
return check
#return a dataframe with all the infos(daily new cases, daily death) for each month and each country
def getResults(data):
Countries = ['Guinea','Liberia','Sierra Leone']
Months = ['January','February','March','April','May','June','July','August','September','October','November','December']
results=[]
compteur =0
for country in Countries:
for month in Months:
if not(checkData(data,month,country).empty) : #check if the datas for the month and country exist
res = getmonthresults(data,country,month)
results.append(res)
return pd.concat(results)
# import data from guinea
path_guinea = 'Ebola/guinea_data/'
data_guinea = importdata(path_guinea,'Date')
# set the new order / change the columns / keep only the relevant datas / add the name of the country
data_guinea = data_guinea[['Date', 'Description','Totals']]
data_guinea['Country'] = ['Guinea']*len(data_guinea)
#search for New cases and death!!
#descr(newcases): "Total cases of confirmed" // descr(deaths): "Total deaths of confirmed"
data_guinea = data_guinea.loc[(data_guinea.Description=='Total cases of confirmed')|(data_guinea.Description=='Total deaths of confirmed')]
#import data from liberia
path_liberia = 'Ebola/liberia_data/'
data_liberia = importdata(path_liberia,'Date')
# set the new order / change the columns / keep only the relevant datas / add the name of the country
data_liberia = data_liberia[['Date', 'Variable','National']]
data_liberia['Country'] = ['Liberia']*len(data_liberia)
#search for New cases and death!!
#descr(newcases): "Total confirmed cases" // descr(deaths): "Total death/s in confirmed cases"
data_liberia = data_liberia.loc[(data_liberia.Variable=='Total confirmed cases')|(data_liberia.Variable=='Total death/s in confirmed cases')]
#change the name of the columns to be able merge the 3 data sets
data_liberia = data_liberia.rename(columns={'Date': 'Date', 'Variable': 'Description','National':'Totals'})
#import data from sierra leonne
path_sl = 'Ebola/sl_data/'
data_sl = importdata(path_sl,'date')
# set the new order / change the columns / keep only the relevant datas / add the name of the country
data_sl = data_sl[['date', 'variable','National']]
data_sl['Country'] = ['Sierra Leone']*len(data_sl)
#search for new cases and death
#descr(newcases): "cum_confirmed" // descr(deaths): "death_confirmed"
data_sl = data_sl.loc[(data_sl.variable=='cum_confirmed')|(data_sl.variable=='death_confirmed')]
#change the name of the columns to be able merge the 3 data sets
data_sl = data_sl.rename(columns={'date': 'Date', 'variable': 'Description','National':'Totals'})
#merge the 3 dataframe into ONE which we'll apply our analysis
dataFrame = [data_guinea,data_liberia,data_sl]
data = pd.concat(dataFrame)
# Replace the NaN by 0;
data = data.fillna(0)
#add a column with the month
data = add_month(data)
#we now show the whole merged dataframe with the input of each file
data
#get the results from the data set -> see the function
results = getResults(data)
#print the resuults
results
Sheet10_Meta = pd.read_excel(DATA_FOLDER +'microbiome/metadata.xls')
allFiles = glob.glob(DATA_FOLDER + 'microbiome' + "/MID*.xls")
allFiles
#Creating an empty DataFrame to store our data and initializing a counter.
Combined_data = pd.DataFrame()
K = 0
while (K < int(len(allFiles))):
#Creating a DataFrame and filling it with the excel's data
df = pd.read_excel(allFiles[K], header=None)
#Getting the metadata of the corresponding spreadsheet
df['BARCODE'] = Sheet10_Meta.at[int(K), 'BARCODE']
df['GROUP'] = Sheet10_Meta.at[int(K), 'GROUP']
df['SAMPLE'] = Sheet10_Meta.at[int(K),'SAMPLE']
#Append the recently created DataFrame to our combined one
Combined_data = Combined_data.append(df)
K = K + 1
#Renaming the columns with meaningfull names
Combined_data.columns = ['Name', 'Value','BARCODE','GROUP','SAMPLE']
Combined_data.head()
#Replacing the NaN values with unknwown
Combined_data = Combined_data.fillna('unknown')
#Reseting the index
Combined_data = Combined_data.set_index('Name')
#Showing the result
Combined_data
'''
Here is a sample of the information in the titanic dataframe
'''
# Importing titanic.xls info with Pandas
titanic = pd.read_excel('Data/titanic.xls')
# printing only the 30 first and last rows of information
print(titanic.head)
'''
To describe the INTENDED values and types of the data we will show you the titanic.html file that was provided to us
Notice:
- 'age' is of type double, so someone can be 17.5 years old, mostly used with babies that are 0.x years old
- 'cabin' is stored as integer, but it har characters and letters
- By this model, embarked is stored as an integer, witch has to be interpreted as the 3 different embarkation ports
- It says that 'boat' is stored as a integer even though it has spaces and letters, it should be stored as string
PS: it might be that the information stored as integer is supposed to be categorical data,
...because they have a "small" amount of valid options
'''
# Display html info in Jupyter Notebook
from IPython.core.display import display, HTML
htmlFile = 'Data/titanic.html'
display(HTML(htmlFile))
'''
The default types of the data after import:
Notice:
- the strings and characters are imported as objects
- 'survived' is imported as int instead of double (which is in our opinion better since it's only 0 and 1
- 'sex' is imported as object not integer because it is a string
'''
titanic.dtypes
'''
Below you can see the value range of the different numerical values.
name, sex, ticket, cabin, embarked, boat and home.dest is not included because they can't be quantified numerically.
'''
titanic.describe()
'''
Additional information that is important to remember when manipulation the data
is if/where there are NaN values in the dataset
'''
# This displays the number of NaN there is in different attributes
print(pd.isnull(titanic).sum())
'''
Some of this data is missing while some is meant to describe 'No' or something of meaning.
Example:
Cabin has 1014 NaN in its column, it might be that every passenger had a cabin and the data is missing.
Or it could mean that most passengers did not have a cabin or a mix. The displayed titanic.html file
give us some insight if it is correct. It says that there are 0 NaN in the column. This indicates that
there are 1014 people without a cabin. Boat has also 823 NaN's, while the titanic lists 0 NaN's.
It is probably because most of those who died probably weren't in a boat.
'''
'''
What attributes should be stored as categorical information?
Categorical data is essentially 8-bit integers which means it can store up to 2^8 = 256 categories
Benefit is that it makes memory usage lower and it has a performance increase in calculations.
'''
print('Number of unique values in... :')
for attr in titanic:
print(" {attr}: {u}".format(attr=attr, u=len(titanic[attr].unique())))
'''
We think it will be smart to categorize: 'pclass', 'survived', 'sex', 'cabin', 'embarked' and 'boat'
because they have under 256 categories and don't have a strong numerical value like 'age'
'survived' is a bordercase because it might be more practical to work with integers in some settings
'''
# changing the attributes to categorical data
titanic.pclass = titanic.pclass.astype('category')
titanic.survived = titanic.survived.astype('category')
titanic.sex = titanic.sex.astype('category')
titanic.cabin = titanic.cabin.astype('category')
titanic.embarked = titanic.embarked.astype('category')
titanic.boat = titanic.boat.astype('category')
#Illustrate the change by printing out the new types
titanic.dtypes
#Plotting the ratio different classes(1st, 2nd and 3rd class) the passengers have
pc = titanic.pclass.value_counts().sort_index().plot(kind='bar')
pc.set_title('Travel classes')
pc.set_ylabel('Number of passengers')
pc.set_xlabel('Travel class')
pc.set_xticklabels(('1st class', '2nd class', '3rd class'))
plt.show(pc)
#Plotting the amount of people that embarked from different cities(C=Cherbourg, Q=Queenstown, S=Southampton)
em = titanic.embarked.value_counts().sort_index().plot(kind='bar')
em.set_title('Ports of embarkation')
em.set_ylabel('Number of passengers')
em.set_xlabel('Port of embarkation')
em.set_xticklabels(('Cherbourg', 'Queenstown', 'Southampton'))
plt.show(em)
#Plotting what sex the passengers are
sex = titanic.sex.value_counts().plot(kind='bar')
sex.set_title('Gender of the passengers')
sex.set_ylabel('Number of Passengers')
sex.set_xlabel('Gender')
sex.set_xticklabels(('Female', 'Male'))
plt.show(sex)
#Plotting agegroup of passengers
bins = [0,10,20,30,40,50,60,70,80]
age_grouped = pd.DataFrame(pd.cut(titanic.age, bins))
ag = age_grouped.age.value_counts().sort_index().plot.bar()
ag.set_title('Age of Passengers ')
ag.set_ylabel('Number of passengers')
ag.set_xlabel('Age groups')
plt.show(ag)
'''
Parsing the cabinfloor, into floors A, B, C, D, E, F, G, T and display in a pie chart
'''
#Dropping NaN (People without cabin)
cabin_floors = titanic.cabin.dropna()
# removes digits and spaces
cabin_floors = cabin_floors.str.replace(r'[\d ]+', '')
# removes duplicate letters and leave unique (CC -> C) (FG -> G)
cabin_floors = cabin_floors.str.replace(r'(.)(?=.*\1)', '')
# removes ambigous data from the dataset (FE -> NaN)(FG -> NaN)
cabin_floors = cabin_floors.str.replace(r'([A-Z]{1})\w+', 'NaN' )
# Recategorizing (Since we altered the entries, we messed with the categories)
cabin_floors = cabin_floors.astype('category')
# Removing NaN (uin this case ambigous data)
cabin_floors = cabin_floors.cat.remove_categories('NaN')
cabin_floors = cabin_floors.dropna()
# Preparing data for plt.pie
numberOfCabinPlaces = cabin_floors.count()
grouped = cabin_floors.groupby(cabin_floors).count()
sizes = np.array(grouped)
labels = np.array(grouped.index)
# Plotting the pie chart
plt.pie(sizes, labels=labels, autopct='%1.1f%%', pctdistance=0.75, labeldistance=1.1)
print("There are {cabin} passengers that have cabins and {nocabin} passengers without a cabin"
.format(cabin=numberOfCabinPlaces, nocabin=(len(titanic) - numberOfCabinPlaces)))
# function that returns the number of people that survived and died given a specific travelclass
def survivedPerClass(pclass):
survived = len(titanic.survived[titanic.survived == 1][titanic.pclass == pclass])
died = len(titanic.survived[titanic.survived == 0][titanic.pclass == pclass])
return [survived, died]
# Fixing the layout horizontal
the_grid = plt.GridSpec(1, 3)
labels = ["Survived", "Died"]
# Each iteration plots a pie chart
for p in titanic.pclass.unique():
sizes = survivedPerClass(p)
plt.subplot(the_grid[0, p-1], aspect=1 )
plt.pie(sizes, labels=labels, autopct='%1.1f%%')
plt.show()
# group by selected data and get a count for each category
survivalrate = titanic.groupby(['pclass', 'sex', 'survived']).size()
# calculate percentage
survivalpercentage = survivalrate.groupby(level=['pclass', 'sex']).apply(lambda x: x / x.sum() * 100)
# plotting in a histogram
histogram = survivalpercentage.filter(like='1', axis=0).plot(kind='bar')
histogram.set_title('Proportion of the passengers that survived by travel class and sex')
histogram.set_ylabel('Percent likelyhood of surviving titanic')
histogram.set_xlabel('class/gender group')
plt.show(histogram)
#drop NaN rows
age_without_nan = titanic.age.dropna()
#categorizing
age_categories = pd.qcut(age_without_nan, 2, labels=["Younger", "Older"])
#Numbers to explain difference
median = int(np.float64(age_without_nan.median()))
amount = int(age_without_nan[median])
print("The Median age is {median} years old".format(median = median))
print("and there are {amount} passengers that are {median} year old \n".format(amount=amount, median=median))
print(age_categories.groupby(age_categories).count())
print("\nAs you can see the pd.qcut does not cut into entirely equal sized bins, because the age is of a discreet nature")
# imported for the sake of surpressing some warnings
import warnings
warnings.filterwarnings('ignore')
# extract relevant attributes
csas = titanic[['pclass', 'sex', 'age', 'survived']]
csas.dropna(subset=['age'], inplace=True)
# Defining the categories
csas['age_group'] = csas.age > csas.age.median()
csas['age_group'] = csas['age_group'].map(lambda age_category: 'older' if age_category else "younger")
# Converting to int to make it able to aggregate and give percentage
csas.survived = csas.survived.astype(int)
g_categories = csas.groupby(['pclass', 'age_group', 'sex'])
result = pd.DataFrame(g_categories.survived.mean()).rename(columns={'survived': 'survived proportion'})
# reset current index and spesify the unique index
result.reset_index(inplace=True)
unique_index = result.pclass.astype(str) + ': ' + result.age_group.astype(str) + ' ' + result.sex.astype(str)
# Finalize the unique index dataframe
result_w_unique = result[['survived proportion']]
result_w_unique.set_index(unique_index, inplace=True)
print(result_w_unique)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Task 1. Compiling Ebola Data
Step2: Task 2. RNA Sequences
Step3: Creating and filling the DataFrame
Step4: 3. Cleaning and reindexing
Step5: Task 3. Class War in Titanic
Step6: Question 3.2
Step7: Question 3.3
Step8: Question 3.4
Step9: Question 3.5
Step10: Question 3.6
|
2,064
|
<ASSISTANT_TASK:>
Python Code:
def least_squares(y, tx):
calculate the least squares solution.
a = tx.T.dot(tx)
b = tx.T.dot(y)
return np.linalg.solve(a, b)
from helpers import *
def test_your_least_squares():
height, weight, gender = load_data_from_ex02(sub_sample=False, add_outlier=False)
x, mean_x, std_x = standardize(height)
y, tx = build_model_data(x, weight)
weight = least_squares(y, tx)
print(weight)
test_your_least_squares()
# load dataset
x, y = load_data()
print("shape of x {}".format(x.shape))
print("shape of y {}".format(y.shape))
def build_poly(x, degree):
polynomial basis functions for input data x, for j=0 up to j=degree.
poly = np.ones((len(x), 1))
for deg in range(1, degree+1):
poly = np.c_[poly, np.power(x, deg)]
return poly
from costs import compute_mse
from plots import *
def polynomial_regression():
Constructing the polynomial basis function expansion of the data,
and then running least squares regression.
# define parameters
degrees = [1, 3, 7, 12]
# define the structure of the figure
num_row = 2
num_col = 2
f, axs = plt.subplots(num_row, num_col)
for ind, degree in enumerate(degrees):
# form dataset to do polynomial regression.
tx = build_poly(x, degree)
# least squares
weights = least_squares(y, tx)
# compute RMSE
rmse = np.sqrt(2 * compute_mse(y, tx, weights))
print("Processing {i}th experiment, degree={d}, rmse={loss}".format(
i=ind + 1, d=degree, loss=rmse))
# plot fit
plot_fitted_curve(
y, x, weights, degree, axs[ind // num_col][ind % num_col])
plt.tight_layout()
plt.savefig("visualize_polynomial_regression")
plt.show()
polynomial_regression()
def split_data(x, y, ratio, seed=1):
split the dataset based on the split ratio.
# set seed
np.random.seed(seed)
# generate random indices
num_row = len(y)
indices = np.random.permutation(num_row)
index_split = int(np.floor(ratio * num_row))
index_tr = indices[: index_split]
index_te = indices[index_split:]
# create split
x_tr = x[index_tr]
x_te = x[index_te]
y_tr = y[index_tr]
y_te = y[index_te]
return x_tr, x_te, y_tr, y_te
def train_test_split_demo(x, y, degree, ratio, seed):
polynomial regression with different split ratios and different degrees.
x_tr, x_te, y_tr, y_te = split_data(x, y, ratio, seed)
# form tx
tx_tr = build_poly(x_tr, degree)
tx_te = build_poly(x_te, degree)
weight = least_squares(y_tr, tx_tr)
# calculate RMSE for train and test data.
rmse_tr = np.sqrt(2 * compute_mse(y_tr, tx_tr, weight))
rmse_te = np.sqrt(2 * compute_mse(y_te, tx_te, weight))
print("proportion={p}, degree={d}, Training RMSE={tr:.3f}, Testing RMSE={te:.3f}".format(
p=ratio, d=degree, tr=rmse_tr, te=rmse_te))
seed = 6
degrees = [1, 3, 7, 12]
split_ratios = [0.9, 0.5, 0.1]
for split_ratio in split_ratios:
for degree in degrees:
train_test_split_demo(x, y, degree, split_ratio, seed)
def ridge_regression(y, tx, lambda_):
implement ridge regression.
aI = 2 * tx.shape[0] * lambda_ * np.identity(tx.shape[1])
a = tx.T.dot(tx) + aI
b = tx.T.dot(y)
return np.linalg.solve(a, b)
def ridge_regression_demo(x, y, degree, ratio, seed):
ridge regression demo.
# define parameter
lambdas = np.logspace(-5, 0, 15)
# split data
x_tr, x_te, y_tr, y_te = split_data(x, y, ratio, seed)
# form tx
tx_tr = build_poly(x_tr, degree)
tx_te = build_poly(x_te, degree)
# ridge regression with different lambda
rmse_tr = []
rmse_te = []
for ind, lambda_ in enumerate(lambdas):
# ridge regression
weight = ridge_regression(y_tr, tx_tr, lambda_)
rmse_tr.append(np.sqrt(2 * compute_mse(y_tr, tx_tr, weight)))
rmse_te.append(np.sqrt(2 * compute_mse(y_te, tx_te, weight)))
print("proportion={p}, degree={d}, lambda={l:.3f}, Training RMSE={tr:.3f}, Testing RMSE={te:.3f}".format(
p=ratio, d=degree, l=lambda_, tr=rmse_tr[ind], te=rmse_te[ind]))
plot_train_test(rmse_tr, rmse_te, lambdas, degree)
seed = 56
degree = 7
split_ratio = 0.5
ridge_regression_demo(x, y, degree, split_ratio, seed)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 1 Least squares and linear basis functions models
Step2: Load the data
Step3: Test it here
Step5: 1.2 Least squares with a linear basis function model
Step7: Let us play with polynomial regression. Note that we will use your implemented function compute_mse. Please copy and paste your implementation from exercise02.
Step8: Run polynomial regression
Step10: 2 Evaluating model predication performance
Step12: Then, test your split_data function below.
Step13: Demo time
Step16: Ridge Regression
Step17: Demo time
|
2,065
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# Import the example plot from the figures directory
from fig_code import plot_sgd_separator
plot_sgd_separator()
from fig_code import plot_linear_regression
plot_linear_regression()
from IPython.core.display import Image, display
display(Image(filename='images/iris_setosa.jpg'))
print("Iris Setosa\n")
display(Image(filename='images/iris_versicolor.jpg'))
print("Iris Versicolor\n")
display(Image(filename='images/iris_virginica.jpg'))
print("Iris Virginica")
from sklearn.datasets import load_iris
iris = load_iris()
iris.keys()
n_samples, n_features = iris.data.shape
print((n_samples, n_features))
print(iris.data[10])
print(iris.data.shape)
print(iris.target.shape)
print(iris.target)
print(iris.target_names)
import numpy as np
import matplotlib.pyplot as plt
x_index = 2
y_index = 1
# this formatter will label the colorbar with the correct target names
formatter = plt.FuncFormatter(lambda i, *args: iris.target_names[int(i)])
plt.scatter(iris.data[:, x_index], iris.data[:, y_index],
c=iris.target, cmap=plt.cm.get_cmap('RdYlBu', 3))
plt.colorbar(ticks=[0, 1, 2], format=formatter)
plt.clim(-0.5, 2.5)
plt.xlabel(iris.feature_names[x_index])
plt.ylabel(iris.feature_names[y_index]);
from sklearn import datasets
# Type datasets.fetch_<TAB> or datasets.load_<TAB> in IPython to see all possibilities
# datasets.fetch_
# datasets.load_
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: This may seem like a trivial task, but it is a simple version of a very important concept.
Step2: Again, this is an example of fitting a model to data, such that the model can make
Step3: Quick Question
Step4: This data is four dimensional, but we can visualize two of the dimensions
Step5: Quick Exercise
|
2,066
|
<ASSISTANT_TASK:>
Python Code:
import datetime
import Image
import gc
import numpy as np
import os
import random
from scipy import misc
import string
import time
# Set some Theano config before initializing
os.environ["THEANO_FLAGS"] = "mode=FAST_RUN,device=cpu,floatX=float32,allow_gc=False,openmp=True"
import theano
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import emotion_model
import dwdii_transforms
random.seed(20275)
print "device:", theano.config.device
print "floatX:", theano.config.floatX
print "mode:", theano.config.mode
print "openmp:", theano.config.openmp
print "allow_gc:", theano.config.allow_gc
imagePath = "/root/facial_expressions/images"
dataPath = "/root/facial_expressions/data/legend.csv"
imgResize = (150, 150)
os.listdir('/root/facial_expressions/data')
maxData = 1845
X_data, Y_data = dwdii_transforms.load_data(dataPath, imagePath, maxData = maxData, verboseFreq = 200, imgResize=imgResize)
print X_data.shape
print Y_data.shape
imgDataGenCount = 12
transformCount = 9 + imgDataGenCount
newImgs = np.zeros([X_data.shape[0] * transformCount, X_data.shape[1], X_data.shape[2]])
newYs = np.zeros([Y_data.shape[0] * transformCount, Y_data.shape[1]], dtype=np.int8)
print newImgs.shape
print newYs.shape
img = X_data[0]
img.shape
ndx = 0
for i in range(X_data.shape[0]):
img = X_data[i]
img0 = dwdii_transforms.reflectY(img)
newImgs[ndx] = img0
newYs[ndx] = Y_data[i]
#misc.imsave("test0.png", img0)
ndx += 1
img1 = dwdii_transforms.cvDilate(img)
newImgs[ndx] = img1
newYs[ndx] = Y_data[i]
#misc.imsave("test1.png", img1)
ndx += 1
img2 = dwdii_transforms.cvErode(img)
newImgs[ndx] = img2
newYs[ndx] = Y_data[i]
#misc.imsave("test2.png", img2)
ndx += 1
img3 = dwdii_transforms.cvDilate2(img)
newImgs[ndx] = img3
newYs[ndx] = Y_data[i]
#misc.imsave("test3.png", img3)
ndx += 1
#img4 = dwdii_transforms.cvMedianBlur(img)
#newImgs[ndx] = img4
#newYs[ndx] = Y_data[i]
#misc.imsave("test4.png", img4)
#ndx += 1
img5 = dwdii_transforms.cvExcessiveSharpening(img)
newImgs[ndx] = img5
newYs[ndx] = Y_data[i]
#misc.imsave("test5.png", img5)
ndx += 1
img6 = dwdii_transforms.cvEdgeEnhancement(img)
newImgs[ndx] = img6
newYs[ndx] = Y_data[i]
#misc.imsave("test6.png", img6)
ndx += 1
img7 = dwdii_transforms.cvBlurMotion1(img)
newImgs[ndx] = img7
newYs[ndx] = Y_data[i]
#misc.imsave("test7.png", img7)
ndx += 1
img8 = dwdii_transforms.cvBlurMotion2(img)
newImgs[ndx] = img8
newYs[ndx] = Y_data[i]
#misc.imsave("test8.png", img8)
ndx += 1
img9 = dwdii_transforms.reflectY(img)
newImgs[ndx] = img9
#print img9.shape
newYs[ndx] = Y_data[i]
#misc.imsave("test9.png", img9)
ndx += 1
for n in range(imgDataGenCount):
imgX = emotion_model.imageDataGenTransform(img, Y_data[i])
#print imgX
#print imgX.shape
imgX = imgX.reshape(150, 150)
#print imgX.shape
newImgs[ndx] = imgX
newYs[ndx] = Y_data[i]
#misc.imsave("testX.png", imgX)
ndx += 1
#break
print("Done", str(datetime.datetime.now()))
import numpy
print numpy.version.version
print numpy.__version__
gc.collect()
X_data2 = np.concatenate((X_data, newImgs))
Y_data2 = np.concatenate((Y_data, newYs))
print X_data2.shape
print Y_data2.shape
skippedTransforms = False
if skippedTransforms:
X_data2 = X_data
Y_data2 = Y_data
gc.collect()
def unison_shuffled_copies(a, b):
http://stackoverflow.com/a/4602224/2604144
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
# First shuffle the data
X_data2, Y_data2 = unison_shuffled_copies(X_data2, Y_data2)
# Split the data into Training and Test sets
trainNdx = int(X_data2.shape[0] * .8)
print trainNdx
X_train, X_test = np.split(X_data2, [trainNdx])
Y_train, Y_test = np.split(Y_data2, [trainNdx])
print X_train.shape
print X_test.shape
print Y_train.shape
print Y_test.shape
# Map the emotions to integers for categorization later.
emotions = dwdii_transforms.emotionNumerics()
print emotions
print len(emotions)
#model = emotion_model.emotion_model_v3_2(len(emotions), verbose=True)
model = emotion_model.emotion_model_jh_v5(len(emotions), verbose=True,
input_shape=(1,X_train.shape[1],X_train.shape[2]))
#print(model.summary())
# Reshape to the appropriate shape for the CNN input
testX = X_test.reshape(X_test.shape[0], 1, X_train.shape[1],X_train.shape[2])
trainX = X_train.reshape(X_train.shape[0], 1, X_train.shape[1],X_train.shape[2])
loadWeights = False
if loadWeights:
model.load_weights("dwdii-emo-01vjh-1-Cloud.hdf5")
print "Training start: " + str(datetime.datetime.now())
m, h = emotion_model.run_network([trainX, testX, Y_train, Y_test], model, batch=200, epochs=30, verbosity=1)
model.save_weights("dwdii-emo-150-jhv5-21tf-30e-Cloud.hdf5", overwrite=True)
predictOutput = model.predict(testX)
predictOutput[0]
import collections
prMetrics = {}
# For each emotion
for e in emotions.keys():
prMetrics[e] = collections.defaultdict(int)
print prMetrics
numEmo = dwdii_transforms.numericEmotions()
print numEmo
# For each predicted image
for i in range(len(predictOutput)):
arPred = np.array(predictOutput[i])
predictionProb = arPred.max()
predictionNdx = arPred.argmax()
predictedEmo = numEmo[predictionNdx]
# True Positives
if predictionNdx == Y_test[i]:
prMetrics[predictedEmo]["TruePos"] += 1.0
# False Positives
else:
prMetrics[predictedEmo]["FalsePos"] += 1.0
# Look for false negatives
for i in range(len(Y_test)):
arPred = np.array(predictOutput[i])
predictionProb = arPred.max()
predictionNdx = arPred.argmax()
predictedEmo = numEmo[predictionNdx]
yEmo = numEmo[int(Y_test[i])]
if Y_test[i] == predictionNdx:
# Ok
pass
else:
prMetrics[yEmo]["FalseNeg"] += 1.0
prMetrics
emotionPrecision = {}
emotionRecall = {}
for p in prMetrics:
emotionPrecision[p] = prMetrics[p]["TruePos"] / ( prMetrics[p]["TruePos"] + prMetrics[p]["FalsePos"])
emotionRecall[p] = prMetrics[p]["TruePos"] /( prMetrics[p]["TruePos"] + prMetrics[p]["FalseNeg"])
print "Precision by Emotion"
print "--------------------"
for e in emotionPrecision:
print e, emotionPrecision[e]
print
print "Recall by Emotion"
print "--------------------"
for e in emotionRecall:
print e, emotionRecall[e]
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load Training and Test Data
Step2: Transformations
Step4: Split Training/Test Sets
Step5: Define the Model
Step6: Our model is a convolutional neural network with 4 hidden layers.
Step7: Training the Model
Step8: Model v2
Step9: Precision & Recall
|
2,067
|
<ASSISTANT_TASK:>
Python Code:
print("Hello World")
# sample function
def add(op1, op2):
return op1 + op2
# Integers
var1 = 10
var2 = 20
var3 = add(var1, var2)
print(var3)
# Floats
var1, var2 = 1.5, 2.6 # multiple assignment
print(add(var1, var2))
# Strings
var1 = "ABCD"
var2 = "EFGH"
var3 = add(var1, var2)
print(var3)
print(">> With great power comes great responsibility!")
x = "Hello"
print(type(x))
x = 10
print(type(x))
x = 1e10
print(type(x))
x = True
print(type(x))
x = None
print(type(x))
class User(object):
def __init__(self, name, email):
self.name = name
self.email = email
u1 = User(name="TG", email="tg@isi.edu")
print(u1.name, u1.email)
u1 = None # in python equivalent of NULL is None
print(u1)
# GC will free the above object when it needs memory
import json
u1 = User(name="TG", email="tg@isi.edu")
print(json.dumps(u1.__dict__))
# or, another way
®®from json import dumps
print(dumps(u1.__dict__))
# aliasing, yet another way
from json import dumps as to_json
print(to_json(u1.__dict__))
# Another example
import numpy as np
A = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
print(A)
AT_A = np.matmul(A.transpose(), A)
print(AT_A)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Two major version branches
Step2: Mis-Conceptions
Step3: Automatic Memory Management
Step4: General Purpose
|
2,068
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import Online_temporal_clustering_JSI_release as OTC
import Utilities_JSI_release as Util
from sklearn.preprocessing import scale
###########################################
# parameters
np.random.seed(2)
tolerance = 22
activePool = 3
minDur = 16
OTC.deltaT = tolerance #bigger number bigger clusters, tends to combine small clusters with big ones
OTC.memoryDelta = tolerance +1 #constant
OTC.num_clusterss = activePool #bigger number scattered clusters, lots of empty space... if you increase this, also increase the memory parameters
OTC.threshold_cluster_size = minDur
# Load the data (features already extracted)
# data Format: [timestamp, f1, f2, f3, ... fn, label]
data_features = np.loadtxt('data_JSI/data_features_1.csv', delimiter=';')
features_list = [1, 2, 3, 4, 5, 6, 9, 10, 11, 12, 13, 14, 17, 18, 19, 20, 21, 22, 23, 24]
data_features = sorted(data_features, key=lambda a_entry: a_entry[0])
data_array = np.array(data_features)
# Select features
data_array[:, features_list] = scale(data_array[:, features_list])
dataAll = np.column_stack((data_array[:, [3, 5]], data_array[:, 0], data_array[:, -1]))
points = data_array[:, features_list]
timestamps = dataAll[:, [2]]
n = len(points)
start = time.time()
# Perform the clustering
c = OTC.OnlineCluster(OTC.num_clusterss)
for ind1, point in enumerate(points):
c.cluster(point, timestamps[ind1])
clusters = c.trimclusters()
n_clusters_ = len(clusters)
print "Clustered %d points in %.2f seconds and found %d clusters." % (n, time.time() - start, n_clusters_)
# Validation and Visualization of the clusters
clusters = Util.removeContained(clusters)
data_array2 = Util.remove_small_activities(data_array, dataAll[:, [3]], minDur)
dataAll2 = np.column_stack((data_array2[:, [3, 5]], data_array2[:, 0], data_array2[:, -1]))
activity_means = Util.get_activity_means(np.column_stack((data_array2[:, features_list], data_array2[:, [0, -1]])))
activities_set = list(set(dataAll2[:, [3]].T[0]))
dict_activity_index_colour = dict(zip(activities_set, np.arange(len(activities_set)))) # {1:0, 2:1, 6:2, 32:3}
# find the closest activity to each cluster and assign color
cluster_segments, cluster_segments_complex, cluster_colors_set, cluster_array, ratios = \
Util.findClosestActivity(clusters, activity_means, dict_activity_index_colour)
#Validate and visualize
confusion_matrix_detailed, hungarian_matrix, result = \
Util.validation(cluster_colors_set, dataAll, dict_activity_index_colour, activities_set,
cluster_segments_complex, True, [], cluster_array, [], n_clusters_,
cluster_segments, minDur, True)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Data
Step2: Clustering
Step3: Validation and Visualization
|
2,069
|
<ASSISTANT_TASK:>
Python Code:
import locale
import glob
import os.path
import requests
import tarfile
import sys
import codecs
import smart_open
dirname = 'aclImdb'
filename = 'aclImdb_v1.tar.gz'
locale.setlocale(locale.LC_ALL, 'C')
if sys.version > '3':
control_chars = [chr(0x85)]
else:
control_chars = [unichr(0x85)]
# Convert text to lower-case and strip punctuation/symbols from words
def normalize_text(text):
norm_text = text.lower()
# Replace breaks with spaces
norm_text = norm_text.replace('<br />', ' ')
# Pad punctuation with spaces on both sides
for char in ['.', '"', ',', '(', ')', '!', '?', ';', ':']:
norm_text = norm_text.replace(char, ' ' + char + ' ')
return norm_text
import time
import smart_open
start = time.clock()
if not os.path.isfile('aclImdb/alldata-id.txt'):
if not os.path.isdir(dirname):
if not os.path.isfile(filename):
# Download IMDB archive
print("Downloading IMDB archive...")
url = u'http://ai.stanford.edu/~amaas/data/sentiment/' + filename
r = requests.get(url)
with smart_open.smart_open(filename, 'wb') as f:
f.write(r.content)
tar = tarfile.open(filename, mode='r')
tar.extractall()
tar.close()
# Concatenate and normalize test/train data
print("Cleaning up dataset...")
folders = ['train/pos', 'train/neg', 'test/pos', 'test/neg', 'train/unsup']
alldata = u''
for fol in folders:
temp = u''
output = fol.replace('/', '-') + '.txt'
# Is there a better pattern to use?
txt_files = glob.glob(os.path.join(dirname, fol, '*.txt'))
for txt in txt_files:
with smart_open.smart_open(txt, "rb") as t:
t_clean = t.read().decode("utf-8")
for c in control_chars:
t_clean = t_clean.replace(c, ' ')
temp += t_clean
temp += "\n"
temp_norm = normalize_text(temp)
with smart_open.smart_open(os.path.join(dirname, output), "wb") as n:
n.write(temp_norm.encode("utf-8"))
alldata += temp_norm
with smart_open.smart_open(os.path.join(dirname, 'alldata-id.txt'), 'wb') as f:
for idx, line in enumerate(alldata.splitlines()):
num_line = u"_*{0} {1}\n".format(idx, line)
f.write(num_line.encode("utf-8"))
end = time.clock()
print ("Total running time: ", end-start)
import os.path
assert os.path.isfile("aclImdb/alldata-id.txt"), "alldata-id.txt unavailable"
import gensim
from gensim.models.doc2vec import TaggedDocument
from collections import namedtuple
from smart_open import smart_open
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
alldocs = [] # Will hold all docs in original order
with smart_open('aclImdb/alldata-id.txt', 'rb') as alldata:
alldata = alldata.read().decode('utf-8')
for line_no, line in enumerate(alldata):
tokens = gensim.utils.to_unicode(line).split()
words = tokens[1:]
tags = [line_no] # 'tags = [tokens[0]]' would also work at extra memory cost
split = ['train', 'test', 'extra', 'extra'][line_no//25000] # 25k train, 25k test, 25k extra
sentiment = [1.0, 0.0, 1.0, 0.0, None, None, None, None][line_no//12500] # [12.5K pos, 12.5K neg]*2 then unknown
alldocs.append(SentimentDocument(words, tags, split, sentiment))
train_docs = [doc for doc in alldocs if doc.split == 'train']
test_docs = [doc for doc in alldocs if doc.split == 'test']
doc_list = alldocs[:] # For reshuffling per pass
print('%d docs: %d train-sentiment, %d test-sentiment' % (len(doc_list), len(train_docs), len(test_docs)))
from gensim.models import Doc2Vec
import gensim.models.doc2vec
from collections import OrderedDict
import multiprocessing
cores = multiprocessing.cpu_count()
assert gensim.models.doc2vec.FAST_VERSION > -1, "This will be painfully slow otherwise"
simple_models = [
# PV-DM w/ concatenation - window=5 (both sides) approximates paper's 10-word total window size
Doc2Vec(dm=1, dm_concat=1, size=100, window=5, negative=5, hs=0, min_count=2, workers=cores),
# PV-DBOW
Doc2Vec(dm=0, size=100, negative=5, hs=0, min_count=2, workers=cores),
# PV-DM w/ average
Doc2Vec(dm=1, dm_mean=1, size=100, window=10, negative=5, hs=0, min_count=2, workers=cores),
]
# Speed up setup by sharing results of the 1st model's vocabulary scan
simple_models[0].build_vocab(alldocs) # PV-DM w/ concat requires one special NULL word so it serves as template
print(simple_models[0])
for model in simple_models[1:]:
model.reset_from(simple_models[0])
print(model)
models_by_name = OrderedDict((str(model), model) for model in simple_models)
from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]])
models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]])
import numpy as np
import statsmodels.api as sm
from random import sample
# For timing
from contextlib import contextmanager
from timeit import default_timer
import time
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end-start
def logistic_predictor_from_data(train_targets, train_regressors):
logit = sm.Logit(train_targets, train_regressors)
predictor = logit.fit(disp=0)
# print(predictor.summary())
return predictor
def error_rate_for_model(test_model, train_set, test_set, infer=False, infer_steps=3, infer_alpha=0.1, infer_subsample=0.1):
Report error rate on test_doc sentiments, using supplied model and train_docs
train_targets, train_regressors = zip(*[(doc.sentiment, test_model.docvecs[doc.tags[0]]) for doc in train_set])
train_regressors = sm.add_constant(train_regressors)
predictor = logistic_predictor_from_data(train_targets, train_regressors)
test_data = test_set
if infer:
if infer_subsample < 1.0:
test_data = sample(test_data, int(infer_subsample * len(test_data)))
test_regressors = [test_model.infer_vector(doc.words, steps=infer_steps, alpha=infer_alpha) for doc in test_data]
else:
test_regressors = [test_model.docvecs[doc.tags[0]] for doc in test_docs]
test_regressors = sm.add_constant(test_regressors)
# Predict & evaluate
test_predictions = predictor.predict(test_regressors)
corrects = sum(np.rint(test_predictions) == [doc.sentiment for doc in test_data])
errors = len(test_predictions) - corrects
error_rate = float(errors) / len(test_predictions)
return (error_rate, errors, len(test_predictions), predictor)
from collections import defaultdict
best_error = defaultdict(lambda: 1.0) # To selectively print only best errors achieved
from random import shuffle
import datetime
alpha, min_alpha, passes = (0.025, 0.001, 20)
alpha_delta = (alpha - min_alpha) / passes
print("START %s" % datetime.datetime.now())
for epoch in range(passes):
shuffle(doc_list) # Shuffling gets best results
for name, train_model in models_by_name.items():
# Train
duration = 'na'
train_model.alpha, train_model.min_alpha = alpha, alpha
with elapsed_timer() as elapsed:
train_model.train(doc_list, total_examples=len(doc_list), epochs=1)
duration = '%.1f' % elapsed()
# Evaluate
eval_duration = ''
with elapsed_timer() as eval_elapsed:
err, err_count, test_count, predictor = error_rate_for_model(train_model, train_docs, test_docs)
eval_duration = '%.1f' % eval_elapsed()
best_indicator = ' '
if err <= best_error[name]:
best_error[name] = err
best_indicator = '*'
print("%s%f : %i passes : %s %ss %ss" % (best_indicator, err, epoch + 1, name, duration, eval_duration))
if ((epoch + 1) % 5) == 0 or epoch == 0:
eval_duration = ''
with elapsed_timer() as eval_elapsed:
infer_err, err_count, test_count, predictor = error_rate_for_model(train_model, train_docs, test_docs, infer=True)
eval_duration = '%.1f' % eval_elapsed()
best_indicator = ' '
if infer_err < best_error[name + '_inferred']:
best_error[name + '_inferred'] = infer_err
best_indicator = '*'
print("%s%f : %i passes : %s %ss %ss" % (best_indicator, infer_err, epoch + 1, name + '_inferred', duration, eval_duration))
print('Completed pass %i at alpha %f' % (epoch + 1, alpha))
alpha -= alpha_delta
print("END %s" % str(datetime.datetime.now()))
# Print best error rates achieved
print("Err rate Model")
for rate, name in sorted((rate, name) for name, rate in best_error.items()):
print("%f %s" % (rate, name))
doc_id = np.random.randint(simple_models[0].docvecs.count) # Pick random doc; re-run cell for more examples
print('for doc %d...' % doc_id)
for model in simple_models:
inferred_docvec = model.infer_vector(alldocs[doc_id].words)
print('%s:\n %s' % (model, model.docvecs.most_similar([inferred_docvec], topn=3)))
import random
doc_id = np.random.randint(simple_models[0].docvecs.count) # pick random doc, re-run cell for more examples
model = random.choice(simple_models) # and a random model
sims = model.docvecs.most_similar(doc_id, topn=model.docvecs.count) # get *all* similar documents
print(u'TARGET (%d): «%s»\n' % (doc_id, ' '.join(alldocs[doc_id].words)))
print(u'SIMILAR/DISSIMILAR DOCS PER MODEL %s:\n' % model)
for label, index in [('MOST', 0), ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
print(u'%s %s: «%s»\n' % (label, sims[index], ' '.join(alldocs[sims[index][0]].words)))
word_models = simple_models[:]
import random
from IPython.display import HTML
# pick a random word with a suitable number of occurences
while True:
word = random.choice(word_models[0].wv.index2word)
if word_models[0].wv.vocab[word].count > 10:
break
# or uncomment below line, to just pick a word from the relevant domain:
#word = 'comedy/drama'
similars_per_model = [str(model.most_similar(word, topn=20)).replace('), ','),<br>\n') for model in word_models]
similar_table = ("<table><tr><th>" +
"</th><th>".join([str(model) for model in word_models]) +
"</th></tr><tr><td>" +
"</td><td>".join(similars_per_model) +
"</td></tr></table>")
print("most similar words for '%s' (%d occurences)" % (word, simple_models[0].wv.vocab[word].count))
HTML(similar_table)
# Download this file: https://github.com/nicholas-leonard/word2vec/blob/master/questions-words.txt
# and place it in the local directory
# Note: this takes many minutes
if os.path.isfile('questions-words.txt'):
for model in word_models:
sections = model.accuracy('questions-words.txt')
correct, incorrect = len(sections[-1]['correct']), len(sections[-1]['incorrect'])
print('%s: %0.2f%% correct (%d of %d)' % (model, float(correct*100)/(correct+incorrect), correct, correct+incorrect))
This cell left intentionally erroneous.
from gensim.models import KeyedVectors
w2v_g100b = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)
w2v_g100b.compact_name = 'w2v_g100b'
word_models.append(w2v_g100b)
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
rootLogger = logging.getLogger()
rootLogger.setLevel(logging.INFO)
%load_ext autoreload
%autoreload 2
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The text data is small enough to be read into memory.
Step2: Set-up Doc2Vec Training & Evaluation Models
Step3: Le and Mikolov notes that combining a paragraph vector from Distributed Bag of Words (DBOW) and Distributed Memory (DM) improves performance. We will follow, pairing the models together for evaluation. Here, we concatenate the paragraph vectors obtained from each model.
Step5: Predictive Evaluation Methods
Step6: Bulk Training
Step7: Achieved Sentiment-Prediction Accuracy
Step8: In our testing, contrary to the results of the paper, PV-DBOW performs best. Concatenating vectors from different models only offers a small predictive improvement over averaging vectors. There best results reproduced are just under 10% error rate, still a long way from the paper's reported 7.42% error rate.
Step9: (Yes, here the stored vector from 20 epochs of training is usually one of the closest to a freshly-inferred vector for the same words. Note the defaults for inference are very abbreviated – just 3 steps starting at a high alpha – and likely need tuning for other applications.)
Step10: (Somewhat, in terms of reviewer tone, movie genre, etc... the MOST cosine-similar docs usually seem more like the TARGET than the MEDIAN or LEAST.)
Step11: Do the DBOW words look meaningless? That's because the gensim DBOW model doesn't train word vectors – they remain at their random initialized values – unless you ask with the dbow_words=1 initialization parameter. Concurrent word-training slows DBOW mode significantly, and offers little improvement (and sometimes a little worsening) of the error rate on this IMDB sentiment-prediction task.
Step12: Even though this is a tiny, domain-specific dataset, it shows some meager capability on the general word analogies – at least for the DM/concat and DM/mean models which actually train word vectors. (The untrained random-initialized words of the DBOW model of course fail miserably.)
Step13: To mix the Google dataset (if locally available) into the word tests...
Step14: To get copious logging output from above steps...
Step15: To auto-reload python code while developing...
|
2,070
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import sys
from sklearn import linear_model
import matplotlib.pyplot as plt
%matplotlib inline
dtype_dict = {'bathrooms':float, 'waterfront':int, 'sqft_above':int, 'sqft_living15':float, 'grade':int, 'yr_renovated':int, 'price':float, 'bedrooms':float, 'zipcode':str, 'long':float, 'sqft_lot15':float, 'sqft_living':float, 'floors':str, 'condition':int, 'lat':float, 'date':str, 'sqft_basement':int, 'yr_built':int, 'id':str, 'sqft_lot':int, 'view':int}
sales = pd.read_csv('kc_house_data.csv', dtype=dtype_dict)
train_data = pd.read_csv('kc_house_train_data.csv', dtype=dtype_dict)
test_data = pd.read_csv('kc_house_test_data.csv', dtype=dtype_dict)
print(sales['sqft_living'].values.dtype)
def get_numpy_data(dataset, features, output_name):
dataset['constant'] = 1
output = dataset[[output_name]].values
return (dataset[['constant'] + features].values.reshape((len(output), len(features) + 1)), output.reshape((len(output), 1)))
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
print(example_features[:5])
print(example_output[:5])
def predict_output(X, w):
return X.dot(w)
def feature_derivative_ridge(errors, feature, weight, l2_penalty, feature_is_constant):
# If feature_is_constant is True, derivative is twice the dot product of errors and feature
derivative = 2*feature.T.dot(errors)
# Otherwise, derivative is twice the dot product plus 2*l2_penalty*weight
if not feature_is_constant:
derivative += 2*l2_penalty*weight
return derivative
(example_features, example_output) = get_numpy_data(sales, ['sqft_living'], 'price')
my_weights = np.array([1., 10.], dtype=np.float16).reshape((2,1))
test_predictions = predict_output(example_features, my_weights)
errors = test_predictions - example_output # prediction errors
# next two lines should print the same values
print(feature_derivative_ridge(errors, example_features[:,1].reshape((len(example_features[:,1]), 1)), my_weights[1], 1, False))
print(np.sum(errors*example_features[:,1].reshape((len(example_features[:,1]), 1)))*2+20.)
# next two lines should print the same values
print(feature_derivative_ridge(errors, example_features[:,0].reshape((len(example_features[:,0]), 1)), my_weights[0], 1, True))
print(np.sum(errors)*2.)
def ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations=100):
weights = np.array(initial_weights).reshape((len(initial_weights), 1)) # make sure it's a numpy array
print('feature_matrix: %s' % (feature_matrix[:5,:]))
iteration = 0
while iteration < max_iterations:
#while not reached maximum number of iterations:
# compute the predictions based on feature_matrix and weights using your predict_output() function
predictions = predict_output(feature_matrix, weights)
# compute the errors as predictions - output
errors = predictions - output
old_weights = np.copy(weights)
for i in range(len(weights)): # loop over each weight
# Recall that feature_matrix[:,i] is the feature column associated with weights[i]
# compute the derivative for weight[i].
#(Remember: when i=0, you are computing the derivative of the constant!)
derivative = feature_derivative_ridge(errors, feature_matrix[:, i], old_weights[i,0], l2_penalty, i == 0)
# subtract the step size times the derivative from the current weight
weights[i,0] -= step_size * derivative
iteration += 1
return weights
simple_features = ['sqft_living']
my_output = 'price'
(simple_feature_matrix, output) = get_numpy_data(train_data, simple_features, my_output)
(simple_test_feature_matrix, test_output) = get_numpy_data(test_data, simple_features, my_output)
initial_weights = np.array([0., 0.])
step_size = 1e-12
max_iterations=1000
l2_penalty = 0
simple_weights_0_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
l2_penalty = 1e11
simple_weights_high_penalty = ridge_regression_gradient_descent(simple_feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
print(simple_weights_0_penalty)
print(simple_weights_high_penalty)
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(simple_feature_matrix,output,'k.',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_0_penalty),'b-',
simple_feature_matrix,predict_output(simple_feature_matrix, simple_weights_high_penalty),'r-')
(test_features, test_output) = get_numpy_data(test_data, ['sqft_living'], 'price')
no_regularization_prediction = predict_output(test_features, simple_weights_0_penalty)
test_errors = no_regularization_prediction - test_output
RSS_no_penalty = test_errors.T.dot(test_errors)
print(RSS_no_penalty)
high_regularization_prediction = predict_output(test_features, simple_weights_high_penalty)
test_errors = high_regularization_prediction - test_output
RSS_high_penalty = test_errors.T.dot(test_errors)
print(RSS_high_penalty)
print(simple_weights_0_penalty[1,0])
print(simple_weights_high_penalty[1,0])
model_features = ['sqft_living', 'sqft_living15'] # sqft_living15 is the average squarefeet for the nearest 15 neighbors.
my_output = 'price'
(feature_matrix, output) = get_numpy_data(train_data, model_features, my_output)
(test_feature_matrix, test_output) = get_numpy_data(test_data, model_features, my_output)
initial_weights = np.array([0.0,0.0,0.0])
step_size = 1e-12
max_iterations = 1000
l2_penalty=0.0
multiple_weights_0_penalty = ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
l2_penalty=1e11
multiple_weights_high_penalty = ridge_regression_gradient_descent(feature_matrix, output, initial_weights, step_size, l2_penalty, max_iterations)
all_zeros_weights = np.array([[0],[0],[0]])
test_predictions_all_zeros = predict_output(test_feature_matrix, all_zeros_weights)
test_errors = test_predictions_all_zeros - test_output
RSS_all_zeros_penalty = test_errors.T.dot(test_errors)
print(RSS_all_zeros_penalty)
test_predictions_no = predict_output(test_feature_matrix, multiple_weights_0_penalty)
test_errors = test_predictions_no - test_output
RSS_no_penalty = test_errors.T.dot(test_errors)
print(RSS_no_penalty)
test_predictions_high = predict_output(test_feature_matrix, multiple_weights_high_penalty)
test_errors = test_predictions_high - test_output
RSS_high_penalty = test_errors.T.dot(test_errors)
print(RSS_high_penalty)
print(test_predictions_no[0] - test_output[0])
print(test_predictions_high[0] - test_output[0])
print(multiple_weights_0_penalty[1])
print(multiple_weights_high_penalty[1])
RSS_no_penalty[0][0]
RSS_high_penalty[0,0]
sales = pd.read_csv('kc_house_data.csv', dtype=dtype_dict)
sales = sales.sort(['sqft_living','price'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load in house sales data
Step2: If we want to do any "feature engineering" like creating new features or adjusting existing ones we should do this directly using the SFrames as seen in the first notebook of Week 2. For this notebook, however, we will work with the existing features.
Step3: Also, copy and paste the predict_output() function to compute the predictions for an entire matrix of features given the matrix and the weights
Step4: Computing the Derivative
Step5: To test your feature derivartive run the following
Step6: Gradient Descent
Step7: Visualizing effect of L2 penalty
Step8: In this part, we will only use 'sqft_living' to predict 'price'. Use the get_numpy_data function to get a Numpy versions of your data with only this feature, for both the train_data and the test_data.
Step9: Let's set the parameters for our optimization
Step10: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step11: Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step12: This code will plot the two learned models. (The blue line is for the model with no regularization and the red line is for the one with high regularization.)
Step13: Compute the RSS on the TEST data for the following three sets of weights
Step14: QUIZ QUESTIONS
Step15: Running a multiple regression with L2 penalty
Step16: We need to re-inialize the weights, since we have one extra parameter. Let us also set the step size and maximum number of iterations.
Step17: First, let's consider no regularization. Set the l2_penalty to 0.0 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step18: Next, let's consider high regularization. Set the l2_penalty to 1e11 and run your ridge regression algorithm to learn the weights of your model. Call your weights
Step19: Compute the RSS on the TEST data for the following three sets of weights
Step20: Predict the house price for the 1st house in the test set using the no regularization and high regularization models. (Remember that python starts indexing from 0.) How far is the prediction from the actual price? Which weights perform best for the 1st house?
Step21: QUIZ QUESTIONS
Step22: Estimating 1 assignment
|
2,071
|
<ASSISTANT_TASK:>
Python Code:
# Train log-transform model
training_samples = []
logz = np.log(0.001 + z)
vw = pyvw.vw("-b 2 --loss_function squared -l 0.1 --holdout_off -f vw.log.model --readable_model vw.readable.log.model")
for i in range(len(logz)):
training_samples.append("{label} | x:{x} y:{y}".format(label=logz[i], x=x[i], y=y[i]))
# Do hundred passes over the data and store the model in vw.log.model
for iteration in range(100):
for i in range(len(training_samples)):
vw.learn(training_samples[i])
vw.finish()
# Generate predictions from the log-transform model
vw = pyvw.vw("-i vw.log.model -t")
log_predictions = [vw.predict(sample) for sample in training_samples]
# Measure bias in the log-domain
log_bias = np.mean(log_predictions - logz)
bias = np.mean(np.exp(log_predictions) - z)
# Train original domain model using poisson regression
training_samples = []
vw = pyvw.vw("-b 2 --loss_function poisson -l 0.1 --holdout_off -f vw.poisson.model --readable_model vw.readable.poisson.model")
for i in range(len(z)):
training_samples.append("{label} | x:{x} y:{y}".format(label=z[i], x=x[i], y=y[i]))
# Do hundred passes over the data and store the model in vw.log.model
for iteration in range(100):
for i in range(len(training_samples)):
vw.learn(training_samples[i])
vw.finish()
# Generate predictions from the poisson model
vw = pyvw.vw("-i vw.poisson.model")
poisson_predictions = [np.exp(vw.predict(sample)) for sample in training_samples]
poisson_bias = np.mean(poisson_predictions - z)
plt.figure(figsize=(18,6))
# Measure bias in the log-domain
plt.subplot(131)
plt.plot(logz, log_predictions, '.')
plt.plot(logz, logz, 'r')
plt.title('Log-domain bias:%f'%(log_bias))
plt.xlabel('label')
plt.ylabel('prediction')
plt.subplot(132)
plt.plot(z, np.exp(log_predictions), '.')
plt.plot(z, z, 'r')
plt.title('Original-domain bias:%f'%(bias))
plt.xlabel('label')
plt.ylabel('prediction')
plt.subplot(133)
plt.plot(z, poisson_predictions, '.')
plt.plot(z, z, 'r')
plt.title('Poisson bias:%f'%(poisson_bias))
plt.xlabel('label')
plt.ylabel('prediction')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Although the model is relatively unbiased in the log-domain where we trained our model, in the original domain there is underprediction as we expected from Jensenn's inequality
|
2,072
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Image
from IPython.html.widgets import interact, interactive, fixed
Image('fermidist.png')
def fermidist(energy, mu, kT):
Compute the Fermi distribution at energy, mu and kT.
# YOUR CODE HERE
F = 1/(np.exp((energy-mu)/kT)+1)
return F
assert np.allclose(fermidist(0.5, 1.0, 10.0), 0.51249739648421033)
assert np.allclose(fermidist(np.linspace(0.0,1.0,10), 1.0, 10.0),
np.array([ 0.52497919, 0.5222076 , 0.51943465, 0.5166605 , 0.51388532,
0.51110928, 0.50833256, 0.50555533, 0.50277775, 0.5 ]))
def plot_fermidist(mu, kT):
ax = plt.gca()
energy = np.arange(0,11.0)
plt.plot(energy,fermidist(energy,mu,kT))
plt.ylim(0,2.0)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plot_fermidist(4.0, 1.0)
assert True # leave this for grading the plot_fermidist function
interact(plot_fermidist,mu = [0.0,5.0], kT = [0.1,10.0])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exploring the Fermi distribution
Step3: In this equation
Step4: Write a function plot_fermidist(mu, kT) that plots the Fermi distribution $F(\epsilon)$ as a function of $\epsilon$ as a line plot for the parameters mu and kT.
Step5: Use interact with plot_fermidist to explore the distribution
|
2,073
|
<ASSISTANT_TASK:>
Python Code:
import collections
import glob
import os
from os import path
import matplotlib_venn
import pandas as pd
rome_path = path.join(os.getenv('DATA_FOLDER'), 'rome/csv')
OLD_VERSION = '343'
NEW_VERSION = '344'
old_version_files = frozenset(glob.glob(rome_path + '/*{}*'.format(OLD_VERSION)))
new_version_files = frozenset(glob.glob(rome_path + '/*{}*'.format(NEW_VERSION)))
new_files = new_version_files - frozenset(f.replace(OLD_VERSION, NEW_VERSION) for f in old_version_files)
deleted_files = old_version_files - frozenset(f.replace(NEW_VERSION, OLD_VERSION) for f in new_version_files)
print('{:d} new files'.format(len(new_files)))
print('{:d} deleted files'.format(len(deleted_files)))
# Load all ROME datasets for the two versions we compare.
VersionedDataset = collections.namedtuple('VersionedDataset', ['basename', 'old', 'new'])
def read_csv(filename):
try:
return pd.read_csv(filename)
except pd.errors.ParserError:
display(f'While parsing: {filename}')
raise
rome_data = [VersionedDataset(
basename=path.basename(f),
old=read_csv(f.replace(NEW_VERSION, OLD_VERSION)),
new=read_csv(f))
for f in sorted(new_version_files)]
def find_rome_dataset_by_name(data, partial_name):
for dataset in data:
if 'unix_{}_v{}_utf8.csv'.format(partial_name, NEW_VERSION) == dataset.basename:
return dataset
raise ValueError('No dataset named {}, the list is\n{}'.format(partial_name, [d.basename for d in data]))
for dataset in rome_data:
if set(dataset.old.columns) != set(dataset.new.columns):
print('Columns of {} have changed.'.format(dataset.basename))
same_row_count_files = 0
for dataset in rome_data:
diff = len(dataset.new.index) - len(dataset.old.index)
if diff > 0:
print('{:d}/{:d} values added in {}'.format(
diff, len(dataset.new.index), dataset.basename))
elif diff < 0:
print('{:d}/{:d} values removed in {}'.format(
-diff, len(dataset.old.index), dataset.basename))
else:
same_row_count_files += 1
print('{:d}/{:d} files with the same number of rows'.format(
same_row_count_files, len(rome_data)))
jobs = find_rome_dataset_by_name(rome_data, 'referentiel_appellation')
new_jobs = set(jobs.new.code_ogr) - set(jobs.old.code_ogr)
obsolete_jobs = set(jobs.old.code_ogr) - set(jobs.new.code_ogr)
stable_jobs = set(jobs.new.code_ogr) & set(jobs.old.code_ogr)
matplotlib_venn.venn2((len(obsolete_jobs), len(new_jobs), len(stable_jobs)), (OLD_VERSION, NEW_VERSION));
pd.options.display.max_colwidth = 2000
jobs.new[jobs.new.code_ogr.isin(new_jobs)][['code_ogr', 'libelle_appellation_long', 'code_rome']]
items = find_rome_dataset_by_name(rome_data, 'item')
new_items = set(items.new.code_ogr) - set(items.old.code_ogr)
obsolete_items = set(items.old.code_ogr) - set(items.new.code_ogr)
stable_items = set(items.new.code_ogr) & set(items.old.code_ogr)
matplotlib_venn.venn2((len(obsolete_items), len(new_items), len(stable_items)), (OLD_VERSION, NEW_VERSION));
items.new[items.new.code_ogr.isin(new_items)].head()
links = find_rome_dataset_by_name(rome_data, 'liens_rome_referentiels')
old_links_on_stable_items = links.old[links.old.code_ogr.isin(stable_items)]
new_links_on_stable_items = links.new[links.new.code_ogr.isin(stable_items)]
old = old_links_on_stable_items[['code_rome', 'code_ogr']]
new = new_links_on_stable_items[['code_rome', 'code_ogr']]
links_merged = old.merge(new, how='outer', indicator=True)
links_merged['_diff'] = links_merged._merge.map({'left_only': 'removed', 'right_only': 'added'})
links_merged._diff.value_counts()
job_group_names = find_rome_dataset_by_name(rome_data, 'referentiel_code_rome').new.set_index('code_rome').libelle_rome
item_names = items.new.set_index('code_ogr').libelle.drop_duplicates()
links_merged['job_group_name'] = links_merged.code_rome.map(job_group_names)
links_merged['item_name'] = links_merged.code_ogr.map(item_names)
display(links_merged[links_merged._diff == 'removed'].dropna().head(5))
links_merged[links_merged._diff == 'added'].dropna().head(5)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: First let's check if there are new or deleted files (only matching by file names).
Step2: Cool, no new nor deleted files.
Step3: Let's make sure the structure hasn't changed
Step4: OK no columns have changed.
Step5: There are some minor changes in many files, but based on my knowledge of ROME, none from the main files.
Step6: Alright, so the only change seems to be 15 new jobs added. Let's take a look (only showing interesting fields)
Step7: Those are indeed new jobs. Some are related to COVID-19 sneaking in.
Step8: As anticipated it is a very minor change (hard to see it visually)
Step9: The new ones seem legit to me and related to the new jobs.
Step10: So in addition to the added items, there are few fixes. Let's have a look at them
|
2,074
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
import numpy as np
#read csv as data frame
df_gdp_raw = pd.read_csv("../data/countries_GDP.csv")
#select columns and use these that have data in 'Unamed:0', which
#actually is the country code
df_gdp = df_gdp_raw[[0,1,3,4]][df_gdp_raw['Unnamed: 0'].notnull()]
#rename columns and index
df_gdp.columns=["country_code","position","country_name","gdp"]
df_gdp.index = range(df_gdp.shape[0])
#show head
print df_gdp.head()
#show types, take into account that gdp should be integer
print df_gdp.dtypes
#change gdp dtype to numeric
df_gdp.gdp = df_gdp.gdp.apply(lambda x: x.replace(",","").strip(" "))
df_gdp.gdp = pd.to_numeric(df_gdp.gdp,errors="coerce")
print df_gdp.gdp.dtype
print df_gdp.head()
print df_gdp.tail()
#save as csv, set header as false
df_gdp.to_csv("../data/countries_GDP_clean.csv",header=False,sep=";")
import pandas as pd
import numpy as np
df_country_raw = pd.read_csv("../data/countries_data.csv",sep=";")
df_country_raw.head(15)
df_country_raw.to_csv("../data/countries_data_clean.csv",header=False)
import pandas as pd
import numpy as np
def generate_users_df(num_users, num_topics):
#generate num_users usernames
usernames_df = pd.Series(["user"]*num_users).str.cat(pd.Series(np.arange(num_users)).map(str))
#read topics csv
news_topics = pd.read_csv("../data/news_topics.csv",header=None)
#generate a list of N int picked uniformly random from range 0 .. num_topics
#WARNING: is really an uniform distribution??
rand_ints = pd.Series(np.random.randint(1,num_topics+1,num_users))
#WARNING: what happens if x>len(news_topics)
topics_df = rand_ints.apply(lambda x: "|".join(np.random.choice(news_topics.T[0],x,replace=False)))
return pd.concat({'username':usernames_df,'topics':topics_df},axis=1)
M = 5
N = 100
users_df = generate_users_df(N,M)
users_df.head(10)
import csv
M = 20
N = 1000
users_df = generate_users_df(N,M)
users_df.to_csv("../data/users_events_example/user_info_%susers_%stopics.csv" % (N,M),
columns=["username","topics"],
header=None,
index=None)
#quoting=csv.QUOTE_MINIMAL)
import datetime
def generate_user_events(date_start, num_files, num_users, num_events):
#generate usernames
usernames_df = pd.Series(["user"]*num_users).str.cat(pd.Series(np.arange(num_users)).map(str))
#read topics
news_topics = pd.read_csv("../data/news_topics.csv",header=None,lineterminator="\n").T
#create time index
df_index = pd.date_range(date_start,
periods=num_events,
freq=pd.DateOffset(seconds=float(5*60)/num_events))
#generate data
event_data = {"user" : np.random.choice(usernames_df,num_events,replace=True),
"event" : np.random.choice(news_topics[0],num_events,replace=True)}
#generate df
return pd.DataFrame(event_data, index = df_index, columns=["user", "event"])
num_files = 10
num_users = 100
num_events = 1000
date_start = datetime.datetime.strptime('1/1/2016', '%d/%m/%Y')
for idx,i in enumerate(range(num_files)):
print "File ",idx+1," of ", num_files, " at ",date_start
userevent_df = generate_user_events(date_start, num_files, num_users, num_events)
file_name = "../data/users_events_example/userevents_" + date_start.strftime("%d%m%Y%H%M%S") + ".log"
userevent_df.to_csv(file_name, header=None)
date_start = date_start + datetime.timedelta(0,300)
import csv, re
import pandas as pd
import numpy as np
f = file("../data/papers.lst","rb")
papers = []
for idx,l in enumerate(f.readlines()):
t = re.match("(\d+)(\s*)(.\d*)(\s*)(\w+)(\s*)(.*)",l)
if t:
#print "|",t.group(1),"|",t.group(3),"|",t.group(5),"|",t.group(7),"|"
papers.append([t.group(1),t.group(3),t.group(5),t.group(7)])
papers_df = pd.DataFrame(papers)
papers_df.to_csv("../data/papers.csv", header = None)
N = papers_df.shape[0]
#let's assume that a paper can have 30 references at max and 5 at min
M = 30
papers_references = pd.DataFrame(np.arange(N))
papers_references[1] = papers_references[0].apply(
lambda x:
";".join(
[str(x) for x in np.random.choice(papers_references[0],np.random.randint(5,M))]))
papers_references.columns = ["paper_id","references"]
papers_references.to_csv("../data/papers_references.csv",header=None,index=None)
import pandas as pd
cc_df0 = pd.read_excel("../data/country_info_worldbank.xls")
#delete unnececary rows
cc_df1 = cc_df0[cc_df0["Unnamed: 2"].notnull()]
#get columnames and set to dataframe
colnames = cc_df1.iloc[0].tolist()
colnames[0] = "Order"
cc_df1.columns = colnames
#delete void columns
cc_df2 = cc_df1.loc[:,cc_df1.iloc[1].notnull()]
#delete first row as it is colnames
cc_df3 = cc_df2.iloc[1:]
#reindex
cc_df3.index = np.arange(cc_df3.shape[0])
cc_df3[:]["Economy"] = cc_df3.Economy.str.encode('utf-8')
cc_df3.to_csv("../data/worldbank_countrycodes_clean.csv")
import pandas as pd
est_df = pd.read_csv("../data/estacions_meteo.tsv",sep="\t")
est_df.head()
est_df.columns = est_df.columns.str.lower().\
str.replace("\[codi\]","").\
str.replace("\(m\)","").str.strip()
est_df.longitud = est_df.longitud.str.replace(",",".")
est_df.latitud = est_df.latitud.str.replace(",",".")
est_df.longitud = pd.to_numeric(est_df.longitud)
est_df.latitud = pd.to_numeric(est_df.latitud)
import pandas as pd
df = pd.read_csv("../data/iqsize.csv", na_values="n/a")
df.dtypes
#clean piq
errors = pd.to_numeric(df.piq, errors="coerce")
print df["piq"][errors.isnull()]
df["piq"] = pd.to_numeric(df["piq"].str.replace("'","."))
df.dtypes
errors = pd.to_numeric(df.height, errors="coerce")
print df["height"][errors.isnull()]
df["height"] = pd.to_numeric(df["height"].str.replace("'","."))
df.dtypes
df.sex.unique()
df.sex = df.sex.str.replace("Woman","Female")
df.sex = df.sex.str.replace("woman","Female")
df.sex = df.sex.str.replace("woman","Female")
df.sex = df.sex.str.replace("man","Male")
df.sex = df.sex.str.replace("Man","Male")
df.sex.unique()
df.to_csv("../data/iqsize_clean.csv",index=None)
df = pd.read_csv("../data/iqsize_clean.csv")
df
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Exercice
Step2: Exercice
Step3: Exercice
Step4: Exercice
Step5: Exercice
Step6: Exercice
Step7: Exercice
Step8: Exercice
|
2,075
|
<ASSISTANT_TASK:>
Python Code:
from pygchem import datasets
bmk_root = '/home/bovy/geoschem'
%cd {bmk_root}/1yr_benchmarks/v10-01/v10-01c/Run1
filename = 'bpch/ctm.bpch.v10-01c-geosfp-Run1.20120801'
dataset = datasets.load(filename)
print dataset[-20:]
filename = 'netcdf/v10-01c-geosfp-Run1.20120801.nc'
clb = datasets.load_callbacks['gamap_bpch2coards']
dataset = datasets.load(filename, callback=clb)
print dataset[-20:]
% cd {bmk_root}/1yr_benchmarks/v9-02/v9-02r/geos5/Run0
filename = 'netcdf/v9-02r-geos5-Run0.20050101.nc'
clb = datasets.load_callbacks['gamap_bpch2nc']
dataset = datasets.load(filename, callback=clb)
print dataset[-20:]
%cd {bmk_root}/1yr_benchmarks/v10-01/v10-01c/Run1
filename = 'netcdf/v10-01c-geosfp-Run1.20120801.nc'
clb = datasets.load_callbacks['gamap_bpch2coards']
dataset = datasets.load(filename, "IJ_AVG_S__O3",
callback=clb)
print dataset
dataset = datasets.load(filename, ["IJ_AVG_S__O3", "IJ_AVG_S__NO"],
callback=clb)
print dataset
import iris
check_ij_avg = lambda cube: cube.name().startswith("IJ_AVG_S")
ij_avg = iris.Constraint(cube_func=check_ij_avg)
dataset = datasets.load(filename, ij_avg,
callback=clb)
print dataset
def lon_subset(cell):
return True or False as to whether the cell
center in question should be kept
return cell > 0. and cell < 20.
lon_cst = iris.Constraint(longitude=lon_subset)
dataset = datasets.load(filename,
"IJ_AVG_S__O3" & lon_cst,
callback=clb)
print dataset
# note the reduced grid-size for the longitude
# note the wildcard character in the filename
# (UNIX expressions are supported)
filename = 'bpch/ctm.bpch.v10-01c-geosfp-Run1.*'
diagnostics = ["BXHGHT_S__BXHEIGHT",
"BXHGHT_S__N(AIR)",
"IJ_AVG_S__NO2"]
dataset = datasets.load(filename, diagnostics)
print dataset
# note the additional time dimension
dataset_nomerge = datasets.load_raw(filename, diagnostics)
print dataset_nomerge
dataset_lon_subset = dataset.extract(lon_cst)
print dataset_lon_subset
no2_avg = dataset.extract_strict("IJ_AVG_S__NO2")
print no2_avg
outfile = 'netcdf/test.nc'
datasets.save(dataset, outfile)
!ncdump -h netcdf/test.nc
print datasets.load('netcdf/test.nc')
print no2_avg
no2_avg.name()
no2_avg.standard_name
no2_avg.long_name
no2_avg.var_name
no2_avg.attributes
no2_avg.units
no2_avg.convert_units('ppmv')
print no2_avg
lat_coord = no2_avg.coord('latitude')
lat_coord.points
lat_coord.bounds
lat_coord.units
print no2_avg
# Get the first element of the 1st and last dimensions (time and model level number)
no2_avg_t0_l1 = no2_avg[0, :, :, 0]
print no2_avg_t0_l1
no2_avg_time_slices = no2_avg.slices(['longitude', 'latitude', 'model_level_number'])
for s in no2_avg_time_slices:
print s
import iris.analysis
no2_avg_sum_levels = no2_avg.collapsed('model_level_number', iris.analysis.SUM)
print no2_avg_sum_levels
# extract the data fields (cubes) needed to compute the tracer columns
box_heights = dataset.extract_strict("BXHGHT_S__BXHEIGHT")
n_air = dataset.extract_strict("BXHGHT_S__N(AIR)")
# convert units back to ppbv for the NO2 tracer
no2_avg.convert_units('ppbv')
# calculate the columns
no2_avg_columns = (box_heights * n_air * no2_avg).collapsed('model_level_number',
iris.analysis.SUM)
# set name convert units to count/cm2 (count is used for #molecules)
no2_avg_columns.rename("NO2 columns")
no2_avg_columns.convert_units('count/cm2')
# string repr
print no2_avg_columns
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import iris.quickplot as qplt
%matplotlib inline
fig = plt.figure(figsize=(10, 8))
qplt.contourf(no2_avg_columns[0], 15)
plt.gca().coastlines()
import matplotlib.dates as mdates
no2_hovmoller = no2_avg_columns.collapsed('latitude',
iris.analysis.MEAN)
fig = plt.figure(figsize=(10, 8))
qplt.contourf(no2_hovmoller, 20)
# fine control over time axis ticks and labels
plt.gca().yaxis.set_major_locator(mdates.MonthLocator())
plt.gca().yaxis.set_major_formatter(mdates.DateFormatter('%m-%Y'))
plt.gca().set_ylabel("Time")
from pygchem import diagnostics
dinfo = diagnostics.CTMDiagnosticInfo(diaginfo_file='diaginfo.dat',
tracerinfo_file='tracerinfo.dat')
dinfo.categories
# get the 1st category (a Record like object)
cat_ij_avg = dinfo.categories[0]
cat_ij_avg
cat_ij_avg.name
cat_ij_avg.offset
# convert the record object to a dict
cat_ij_avg.to_dict()
# select a category based on its name (key)
dinfo.categories.select_item("NS-FLX-$")
# select a diagnostic (tracer) based on its number (key)
dinfo.diagnostics.select_item(11)
# select categories based on other attributes
dinfo.categories.select(offset=3000)
# advanced selection
dinfo.diagnostics.select(lambda d: d.unit == 'ppbC' and d.number > 10)
new_tracer = diagnostics.CTMDiagnostic(9999, 'NEW', full_name='a new tracer')
dinfo.diagnostics.append(new_tracer)
dinfo.diagnostics[-1]
# select the new tracer added to the list
s = dinfo.diagnostics.select(9999)
# remove the selected entry
s.selection_remove()
dinfo.diagnostics[-1]
dinfo.save_diaginfo('diaginfo_test.dat')
dinfo.save_tracerinfo('tracerinfo_test.dat')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NOTE
Step2: Loading datasets
Step3: Simple (unconstrained) loading
Step4: The line below print the list of the 20 lasts data fields of the list (name, units, dimensions and coordinates). Note that the field name may be reformatted during the loading so that it is consistent with the name set by the GAMAP's netCDF writing routines. Note also that the units may also be reformatted as required by the udunits2 library (used by Iris).
Step5: The same function can be used for loading the content of the netCDF files. The netCDF files available in this simulation were created by the 'BPCH2COARDS' GAMAP routine. As there are differences between the COARDS Conventions and the CF Conventions, we have to use a callback function 'gamap_bpch2coards' to properly load the content of the netCDF file into Iris cubes
Step6: PyGChem also provides a callback for loading netCDF files created by the GAMAP routine 'BPCH2NC'
Step7: Constrained loading
Step8: We can specify multiple variables
Step9: It is also possible to define more advanced constraints. For example, to load all "IJ-AVG-$" diagnostics
Step11: A more advanced example, combining constraints and extracting data subsets
Step12: Loading multiple files
Step13: Merging fields may take a long time. If speed matters, it is still possible to load the fields without any merging
Step14: Apply contraints to the field list after loading
Step15: To select only one field (cube), the extract_strict method can be used
Step16: Saving datasets
Step17: A text representation of (the header information of) the written netCDF file using the ncdump utility (provided with the netCDF4 package)
Step18: Loading the written file using the load function
Step19: It is also possible to write the datasets to the BPCH format, using the low-level function write_bpch in the module pygchem.io.bpch (not yet documented).
Step20: Field name(s)
Step21: PyGChem considers that the GEOS-Chem variable name (category + tracer) is a standard name, although it is not CF-compliant (i.e., not listed in the standard name table of the udunits package).
Step22: long_name is the full name of the diagnostic
Step23: var_name is the (netCDF) variable name
Step24: Attributes
Step25: Field units
Step26: It is easy to change the units of the field (data values are re-computed accordingly)
Step27: Coordinates
Step28: Coordinate data and metadata
Step29: Data
Step30: Note that another way to extract a subset is by applying one or more constraints on the cube (see above).
Step31: Collapsing data dimensions (statistics)
Step32: Basic cube mathematics
Step33: Plotting datasets
Step34: Plot the NO2 total columns for the first time slice
Step35: A Hovmoller diagram example
Step36: The CTM diagnostics
Step37: To load a couple of files
Step38: A CTMDiagnosticInfo object contains all information stored in those files. The attributes categories and diagnostics contains each record (line) in diaginfo.dat and tracerinfo.dat, respectively
Step39: These attributes behave like a Python list, with added key reference and database lookup-like capabilities. Each item of the list coorespond to a record.
Step40: It is aslo possible to filter the data (queries)
Step41: We can add or remove entries
Step42: Exporting to diaginfo and tracerinfo files
|
2,076
|
<ASSISTANT_TASK:>
Python Code:
import pandas as pd
data_dir = "/Users/seddont/Dropbox/Tom/MIDS/W209_work/Tom_project/"
# Get sample of the full database to understand what columns we want
smp = pd.read_csv(data_dir+"en.openfoodfacts.org.products.csv", sep = "\t", nrows = 100)
for c in smp.columns:
print(c)
# Specify what columns we need for the demonstration visualizations
demo_cols = ['code', 'creator', 'product_name', 'generic_name', 'quantity',
'brands', 'brands_tags', 'categories', 'categories_tags', 'serving_size',
'serving_size', 'energy_100g', 'energyfromfat_100g', 'fat_100g',
'saturatedfat_100g', 'monounsaturatedfat_100g',
'polyunsaturatedfat_100g', 'omega3fat_100g', 'omega6fat_100g',
'omega9fat_100g', 'oleicacid_100g', 'transfat_100g', 'cholesterol_100g',
'carbohydrates_100g', 'sugars_100g', 'sucrose_100g', 'glucose_100g',
'fructose_100g', 'lactose_100g', 'maltose_100g', 'starch_100g',
'fiber_100g', 'proteins_100g', 'salt_100g', 'sodium_100g',
'alcohol_100g', 'vitamina_100g', 'betacarotene_100g', 'vitamind_100g',
'vitamine_100g', 'vitamink_100g', 'vitaminc_100g', 'vitaminb1_100g',
'vitaminb2_100g', 'vitaminpp_100g', 'vitaminb6_100g', 'vitaminb9_100g',
'folates_100g', 'vitaminb12_100g', 'bicarbonate_100g', 'potassium_100g',
'chloride_100g', 'calcium_100g', 'iron_100g', 'fluoride_100g',
'iodine_100g', 'caffeine_100g', 'cocoa_100g',
'ingredients_list']
# Create a list of columns to drop
drop_cols = [c for c in smp.columns if c not in demo_cols]
print(drop_cols)
# Pull in full dataset
df = pd.read_csv(data_dir+"en.openfoodfacts.org.products.csv", sep = "\t")
# Drop unwanted columns
df.drop(drop_cols, axis = 1, inplace = True)
# Take a quick look
df
# Drop all rows that are not from the usda ndb import
df = df[df.creator == "usda-ndb-import"]
df
df[df["product_name"].str.lower().str.contains("baked donut", na = False)]
df[df["product_name"].str.lower().str.contains("cracker", na = False)]
df[df["product_name"].str.lower().str.contains("cereal", na = False)]
# reminder on column names remaining
df.columns
# Words we want to find that indicate product type
cat_words = ["donut", "cracker", "cereal"]
# Some of these generate confusion, so also have an 'exclude' dictionary
# This is pretty crude, but seems ok for generating demo
exclude_dict = {"donut": "coffee",
"cracker": "Nut",
"cereal": "Bar"}
# What we want to get variation on
pick_factors = ['fat_100g', 'sugars_100g', 'proteins_100g', 'sodium_100g']
# Points we want to pick (percentiles). Can tune this to get more or fewer picks.
pick_percentiles = [0.1, 0.5, 0.9]
# pick_percentiles = [0, 0.25, 0.5, 0.75, 1.0]
demo_picks = []
for cat in cat_words:
# first get all the items containing the cat word
catf = df[df["product_name"].str.lower().str.contains(cat, na = False)]
# then exclude any of these that contain the relevant exclude word
catf = catf[~catf["product_name"].str.lower().str.contains(exclude_dict[cat], na = False)]
# Identify what rank each product is in that category, for each main factor
for p in pick_factors:
catf[p + "_rank"] = catf[p].rank(method = "first")
# Select five products, at quintiles on each
high = catf[p + "_rank"].max()
pick_index = [max(1, round(n * high)) for n in pick_percentiles]
demo_picks.extend(catf[catf[p+"_rank"].isin(pick_index)].code)
demo_df = df[df.code.isin(demo_picks)]
# Add in category identifier
demo_df["demo_cat"] = "None"
for w in cat_words:
is_cat = demo_df.product_name.str.lower().str.contains(w)
demo_df["demo_cat"][is_cat] = w
# Take a look at what we built
demo_df
# Now write it out to disk
outfile = "demo_food_data.csv"
demo_df.to_csv(data_dir+outfile)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Working from the full database, because the usda_imports_filtered.csv file in the shared drive does not have brand information, which will be useful for displaying.
Step2: Now down to a manageable number of rows and columns. Going to explore for a few typical items to use as demo data. Let's take a look at donuts, crackers and cereal -- the three categories used in the paper prototype.
Step3: Looks like there are plenty of options for these. For demo purposes I want to pick 12 of each with a reasonable range of variation on the key factors of sugar, fat, sodium, protein, so that I can have one plus up to 11 comparison products.
Step4: Now going to go through and find items that have certain category words in the product name. Then filter these to exclude the most often word that is confused in there (e.g. donut flavor coffee gets picked up under donut).
|
2,077
|
<ASSISTANT_TASK:>
Python Code:
# <help>
# <api>
from collections import defaultdict
import datetime
import pandas as pd
import numpy as np
def load_data(clean=True, us=True):
df = pd.read_sql_table('frontpage_texts', 'postgres:///frontpages')
df_newspapers = pd.read_sql_table('newspapers', 'postgres:///frontpages')
if clean:
df['text'] = df['text'].str.strip()
df = df[df['text'].str.len() > 1]
# This is the date that the Newseum had a "Day without News":
# http://www.newseum.org/withoutnews/
df = df[df.date != datetime.datetime(2017, 6, 5)]
df = dedupe_text(df)
if us:
df_newspapers = df_newspapers[df_newspapers.country == 'USA']
df = df[df.slug.isin(set(df_newspapers.slug))]
df['page_height_round'] = df['page_height'].apply(int)
df['page_width_round'] = df['page_width'].apply(int)
df['page_width_round_10'] = df['page_width'].apply(lambda w: int(w/10)*10)
df['page_height_round_10'] = df['page_height'].apply(lambda w: int(w/10)*10)
df['aspect_ratio'] = np.round(df['page_width_round_10'] / df['page_height_round_10'], decimals=1)
return df, df_newspapers
def dedupe_text(df):
text_counts = df.groupby(['slug']).text.value_counts()
duplicate_text = text_counts[text_counts > 1].reset_index(name='count').drop('count', axis=1)
duplicate_text_dict = defaultdict(set)
duplicate_text.apply(lambda row: duplicate_text_dict[row.slug].add(row.text), axis=1)
return df[df.apply(lambda row: row.text not in duplicate_text_dict[row.slug], axis=1)]
df, df_newspapers = load_data()
df_clean = dedupe_text(df)
df_newspapers.head()
us_newspapers_df = df_newspapers[df_newspapers.country == 'USA']
print('''We have metadata for {} newspapers.
There are {} total countries represented. The top 5 are:
{}.
Within the US, there is representation from {} states. The states with the most newspapers are:
{}
And the least:
{}
'''.format(
df_newspapers.shape[0],
df_newspapers.country.nunique(),
df_newspapers.country.value_counts()[:5],
us_newspapers_df.state.nunique(),
us_newspapers_df.state.value_counts()[:5],
us_newspapers_df.state.value_counts()[-5:],
))
df_us = df[df.slug.isin(set(us_newspapers_df.slug))]
newspapers_in_df = df_newspapers[df_newspapers.slug.isin(set(df_us.slug))]
print('''Currently, there are:
{} rows of text
{} days of scrapes
(earliest: {}
latest : {})
{} total newspapers (not all the pdfs were extractable).
Filtering down to the US, there are now:
{} newspapers
{} rows of text
For those newspapers that are available in the US, there are:
{} states
states with most newspapers:
{}
with least:
{}
with none:
{}
'''.format(
df.shape[0],
df.date.nunique(),
df.date.min(),
df.date.max(),
df.slug.nunique(),
df_us.slug.nunique(),
df_us.shape[0],
newspapers_in_df.state.nunique(),
newspapers_in_df.state.value_counts()[:5],
newspapers_in_df.state.value_counts()[-5:],
set(df_newspapers.state) - set(newspapers_in_df.state)
))
print('''Fonts are often written in a format like this: {}.
Out of {} rows...
{} of the fonts have non-empty text
{} of the fonts have a '+'
{} of the fonts have a '-'
'''.format(
df.fontface.iloc[0],
df.shape[0],
(df.fontface.str.len() > 0).sum(),
df.fontface.str.contains('\+').sum(),
df.fontface.str.contains('-').sum()
))
print('''This seems to mean that we can break apart the font into:
[optional-leading-thing]+[font-family]-[font-weight]
''')
font_partition = df.fontface.str.rpartition('+')
df['font_family_weight'] = font_partition[2]
font_family_partition = df['font_family_weight'].str.partition('-')
df['font_leading_thing'] = font_partition[0]
df['font_family'] = font_family_partition[0]
df['font_weight'] = font_family_partition[2]
print('''After doing that,
There are...
{} unique font families
{} unique font weights
{} unique optional-leading-things'''.format(
df.font_family.nunique(),
df.font_weight.nunique(),
df.font_leading_thing.nunique()
))
df_us = df[df.slug.isin(set(us_newspapers_df.slug))]
# Let's do something with a Denver paper
df_newspapers[df_newspapers.city == 'Denver']
import numpy as np
df_denver_post = df_us[df_us.slug == 'CO_DP']
font_stats = df_denver_post.groupby(['font_family_weight']).fontsize.agg({'count': len, 'min': np.min, 'max': np.max, 'avg': np.mean})
print('''We have {} days of scraped Denver Post front pages.
We have {} unique font-weight combos. Here is a mapping of each font family to their min, average, and max font size.
{}
'''.format(
df_denver_post.date.nunique(),
df_denver_post.groupby(['font_family_weight']).first().shape[0],
font_stats
))
font_days = df_denver_post.groupby(['font_family_weight']).date.nunique().sort_values(ascending=False)
print('''Fonts by number of days on which they appear
{}
'''.format(
font_days
))
%matplotlib inline
import matplotlib.pyplot as plt
font_stats['days_present'] = font_days
plt.suptitle('Number of days a font appears, vs. total font appearances')
plt.scatter(font_stats.days_present, font_stats['count'])
df_denver_post.sort_values(['date', 'avg_character_area'], ascending=False).groupby('date').head(5).head(10)
# <api>
import pprint
import string
from nltk import word_tokenize
chars = set(string.ascii_letters)
def include_word(word):
return sum([c in chars for c in word]) >= 3
def preprocess_text(text):
lowered = text.strip().lower()
lowered = ''.join(lowered.split('-\n'))
lowered = lowered.replace('\n', ' ')
words = word_tokenize(lowered)
filtered_words = [word for word in words if include_word(word)]
return filtered_words
def bag_of_words(text):
'''Literally, this returns a set of the bag of words for fast single-token searches'''
return set(preprocess_text(text))
def preprocess_all(texts):
for text in texts:
yield text, preprocess_text(text)
print('''For text preprocessing, we consider a few cases:
* Newlines should be stripped
* Everything should be lower-cased
* We should return a tokenized list
* Tokens without a certain number of ascii characters (US-English analysis for now) will be rejected
The extraction from PDFs still contains word-continuations across line breaks.
For now, we'll consider all lines that end with "-" as continuations, and
link the text from before and after.
Newlines without continuations will be replaced with spaces.
Examples:
{}
'''.format(
pprint.pformat(list(preprocess_all([
'Hel-\nlo, bye\nnow\n',
*df_denver_post.text.sample(3)
])))
))
df_us['bow'] = df_us.text.apply(bag_of_words)
df_denver_post_latest = df_us[(df_us.slug == 'CO_DP') & (df_us.date == df_us.date.max())]
def percent_of_page(unigram, one_paper_df):
unigram = unigram.lower().strip()
lines_with_unigram = one_paper_df[one_paper_df.bow.apply(lambda bag: unigram in bag)]
return lines_with_unigram.percent_of_page.sum()
print('''Now we write a method to get the percent of page that a unigram occupies, for a particular front page.
Syria, Denver Post, latest day: {}
garbage input, should be 0: {}'''.format(
percent_of_page('Syria', df_denver_post_latest),
percent_of_page('asdflkjasdflasdfkjasdf', df_denver_post_latest)
))
# filter down to newspapers with entries with more than 3 days
days_of_newspapers = df_us.groupby('slug').date.nunique()
df_us_3plus = df_us[df_us.slug.isin(set(days_of_newspapers[days_of_newspapers > 3].index))]
print('''Number of newspapers with >3 days: {}
(Number of total newspapers: {})
'''.format(
df_us_3plus.slug.nunique(),
df_us.slug.nunique()
))
from functools import partial
def unigram_percent_of_page(query, dataframe):
return dataframe.groupby(['slug', 'date']).apply(partial(percent_of_page, query))
def _reshape_percent_of_day_series(percent_of_page):
return percent_of_page.reset_index().rename(columns={0: 'percent_of_page'})
def percent_of_page_by_day(percent_of_page_df):
return _reshape_percent_of_day_series(percent_of_page_df).groupby('date').percent_of_page.mean()
def percent_of_papers_with_mention(percent_of_page_df, threshold=0):
percents_by_paper_date = _reshape_percent_of_day_series(percent_of_page_df)
greater_than_thresh = (percents_by_paper_date.groupby(['slug', 'date']).percent_of_page.max() > threshold).reset_index()
return greater_than_thresh.groupby('date').mean()
# Average mentions per day
syria_results = unigram_percent_of_page('Syria', df_us_3plus)
print('''Percent of papers that mentioned Syria by day:
{}
Average percent of newspaper front page devoted to Syria by day:
{}'''.format(
percent_of_papers_with_mention(syria_results),
percent_of_page_by_day(syria_results),
))
df_population = pd.read_csv('~/data/sub-est2015_all.csv', encoding='ISO-8859-2')
df_cities = df_population[df_population.NAME.str.endswith('city') | df_population.NAME.str.endswith('town')]
df_cities['city'] = df_cities.NAME.str.slice(0, -5).str.lower()
df_cities['place_name'] = df_cities.city + ', ' + df_cities.STNAME.str.lower()
df_cities = df_cities.sort_values('POPESTIMATE2015').groupby('place_name').head(1)
df_cities.head()
state_abbreviation_to_name = {}
with open('files/states.csv') as f:
next(f) # skip header
for line in f:
state, abbrev = line.strip().split(',')
state_abbreviation_to_name[abbrev.strip('"')] = state.strip('"').lower()
us_newspapers_df['place_name'] = us_newspapers_df.city.str.lower() + ', ' + us_newspapers_df.state.apply(state_abbreviation_to_name.get)
us_newspapers_with_pop = pd.merge(us_newspapers_df, df_cities[['place_name', 'POPESTIMATE2015']], how='left', on='place_name', copy=False)
print('''{} out of {} newspapers had places found in the census.
Examples of ones that didn't:
{}
'''.format(
us_newspapers_with_pop.POPESTIMATE2015.count(),
us_newspapers_with_pop.shape[0],
us_newspapers_with_pop[us_newspapers_with_pop.POPESTIMATE2015.isnull()].place_name.head()
))
us_newspapers_df.head()
unidentified_map = {}
unidentified_places = us_newspapers_with_pop[us_newspapers_with_pop.POPESTIMATE2015.isnull()]
for i, row in unidentified_places.iterrows():
matches = (df_population.STNAME == row.state) & (df_population.NAME.str.lower().str.contains(row.city.lower()))
if matches.sum() == 0:
continue
pops = df_population[matches].sort_values('POPESTIMATE2015').iloc[0]
unidentified_map[row.place_name] = (pops.NAME, pops.POPESTIMATE2015)
print('''Out of {} unidentified places, we found {} by looking for substrings.'''.format(
unidentified_places.shape[0],
len(unidentified_map)
))
import numpy as np
def set_from_map_if_null(row):
if pd.isnull(row.POPESTIMATE2015):
return unidentified_map.get(row.place_name, [np.nan, np.nan])[1]
return row.POPESTIMATE2015
us_newspapers_with_pop['population_est_2015'] = us_newspapers_with_pop.apply(set_from_map_if_null, 1)
print('''So now {} out of {} newspapers have populations.
Largest newspapers by population:
{}
'''.format(
us_newspapers_with_pop.population_est_2015.count(),
us_newspapers_with_pop.shape[0],
us_newspapers_with_pop.sort_values('population_est_2015', ascending=False).head(5)[['title', 'state']]
))
# First, without any idf weighting, we'll calculate the contribution of individual words
from collections import Counter
def vocab_weights_by_word(df):
counter = Counter()
for i, row in df.iterrows():
for word in row.bow:
# we won't multiply by the number of characters to get closer to "true" word real estate because we don't
# care about the length of words. but we will divide by the total area of the page to normalize across
# newspapers that are different sizes.
counter[word] += row.avg_character_area
return counter
sorted(vocab_weights_by_word(df_denver_post_latest).items(), key=lambda x: x[1], reverse=True)[:5]
import string
import operator
from collections import Counter
from nltk.corpus import reuters
import numpy as np
doc_freq_counter = Counter()
for fid in reuters.fileids():
bow = set(map(operator.methodcaller('lower'), reuters.words(fid)))
bow = bow - set(string.punctuation) - set(string.digits)
doc_freq_counter.update(bow)
idfs = {}
for word, count in doc_freq_counter.items():
idfs[word] = np.log(float(len(reuters.fileids())) / count)
print('''We'll calculate document frequencies across the {} articles in the Reuters corpus.
The most common words in the corpus are:
{}
As idfs:
{}
'''.format(
len(reuters.fileids()),
sorted(doc_freq_counter.items(), key=operator.itemgetter(1), reverse=True)[:5],
sorted(idfs.items(), key=operator.itemgetter(1))[:5],
))
# again, this time with idf weighting
def vocab_weights_by_word(df, idf=None, method='by_char'):
'''Methods:
`by_char`: Average character size of the textbox in which a string is embedded
`by_word_area`: Average character size * len of string
`by_block`: Area of block in which string is embedded'''
if method not in ['by_char', 'by_word_area', 'by_block']:
raise ArgumentError('method needs to be one of "by_char", "by_word_area", "by_block"')
counter = Counter()
max_idf = max(idf.values()) # used for missing values
for i, row in df.iterrows():
for word in set(row.bow) - set(string.punctuation) - set(string.digits):
# we won't multiply by the number of characters to get closer to "true" word real estate because we don't
# care about the length of words. but we will divide by the total area of the page to normalize across
# newspapers that are different sizes.
if method in ['by_char', 'by_word_area']:
weight = row.avg_character_area
if method == 'by_word_area':
weight *= len(word)
elif method == 'by_block':
weight = row.percent_of_page
if idf:
weight *= idf.get(word, max_idf)
counter[word] += weight
return counter
print('''The top words in the latest Denver Post by aggregate word "real estate",
weighted by inverse document frequency:
{}
With word areas taken into consideration (longer words get weighted higher):
{}
Using the area of the entire block:
{}
'''.format(
pprint.pformat(sorted(vocab_weights_by_word(df_denver_post_latest, idfs).items(), key=operator.itemgetter(1), reverse=True)[:10]),
pprint.pformat(sorted(vocab_weights_by_word(df_denver_post_latest, idfs, method='by_word_area').items(), key=operator.itemgetter(1), reverse=True)[:10]),
pprint.pformat(sorted(vocab_weights_by_word(df_denver_post_latest, idfs, method='by_block').items(), key=operator.itemgetter(1), reverse=True)[:10])
))
import numpy as np
def make_idfs(docs):
article_word_doc_counts = Counter()
for doc in docs:
article_word_doc_counts.update(row.bow)
article_idfs = {}
for word, count in article_word_doc_counts.items():
article_idfs[word] = np.log(float(len(docs)) / count)
article_idfs = make_idfs(df_us.bow)
print('''Vocabulary size of these two different idf datasets:
Reuters: {}
Front pages: {}
Most common front page words:
{}
'''.format(
len(idfs),
len(article_idfs),
pprint.pformat(sorted(article_idfs.items(), key=operator.itemgetter(1))[:10])
))
from sklearn.feature_extraction import DictVectorizer
all_vocab_weights = {}
todays_papers = df_us_3plus[df_us_3plus.date == df_us_3plus.date.max()]
print('Total papers: ', todays_papers.slug.nunique())
for i, (slug, paper) in enumerate(todays_papers.groupby('slug')):
if i % 50 == 0:
print('.', end='')
all_vocab_weights[slug] = vocab_weights_by_word(paper, article_idfs, method='by_word_area')
vectorizer = DictVectorizer(sparse=False)
X = vectorizer.fit_transform(all_vocab_weights.values())
print('Top results with word area:')
sorted(zip(vectorizer.feature_names_, X.mean(axis=0)), key=operator.itemgetter(1), reverse=True)[:10]
all_vocab_weights = {}
todays_papers = df_us_3plus[df_us_3plus.date == df_us_3plus.date.max()]
print('Total papers: ', todays_papers.slug.nunique())
for i, (slug, paper) in enumerate(todays_papers.groupby('slug')):
if i % 50 == 0:
print('.', end='')
all_vocab_weights[slug] = vocab_weights_by_word(paper, article_idfs, method='by_char')
vectorizer = DictVectorizer(sparse=False)
X = vectorizer.fit_transform(all_vocab_weights.values())
print('Top results with character area:')
sorted(zip(vectorizer.feature_names_, X.mean(axis=0)), key=operator.itemgetter(1), reverse=True)[:10]
all_vocab_weights = {}
todays_papers = df_us_3plus[df_us_3plus.date == df_us_3plus.date.max()]
print('Total papers: ', todays_papers.slug.nunique())
for i, (slug, paper) in enumerate(todays_papers.groupby('slug')):
if i % 50 == 0:
print('.', end='')
all_vocab_weights[slug] = vocab_weights_by_word(paper, article_idfs, method='by_block')
vectorizer = DictVectorizer(sparse=False)
X = vectorizer.fit_transform(all_vocab_weights.values())
print('Top results with block area:')
sorted(zip(vectorizer.feature_names_, X.mean(axis=0)), key=operator.itemgetter(1), reverse=True)[:10]
df_us_3plus['page_height_round'] = df_us_3plus.page_height.apply(int)
df_us_3plus['page_width_round'] = df_us_3plus.page_width.apply(int)
import utils
def plot_word(dataframe, word, date=None, paper=None):
title = 'Appearances of {}'.format(word)
if date:
dataframe = dataframe[dataframe.date == date]
title += ' on {}'.format(date)
if paper:
dataframe = dataframe[dataframe.slug == utils.slug_for_newspaper(paper)]
title += ' on {}'.format(paper)
relevant_df = dataframe[dataframe.bow.apply(lambda bow: word in bow)]
grids = []
for (date, slug), paper in relevant_df.groupby(['date', 'slug']):
grids.append(utils.make_intensity_grid(relevant_df, relevant_df.page_height_round.max(), relevant_df.page_width_round.max()))
avg_intensity = sum([x / len(grids) for x in grids])
return utils.plot_intensity(avg_intensity, title)
plot_word(df_us_3plus, 'syria')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Fonts
Step2: Denver Post
Step3: Unigram "percent of page" analysis
Step4: Now we run this method across all the newspapers, across all days!
Step5: Connecting newspapers with population metadata
Step6: Good enough!
Step7: Oof. Looks like population might not work so well, since large cities often have several, lesser-read newspapers.
Step8: Clearly there needs to be some kind of weighting, or else words like "by" will dominate.
Step9: Better document frequencies
Step10: Finding "front-page-est" words
Step11: Ah! So it looks like
|
2,078
|
<ASSISTANT_TASK:>
Python Code:
# Required to see plots when running on mybinder
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Python standard-libraries to download data from the web
from urllib.parse import urlencode
from urllib.request import urlretrieve
#Some astropy submodules that you know already
from astropy import units as u
from astropy import coordinates as coords
from astropy.coordinates import SkyCoord
from astropy.io import fits
#only here to display images
from IPython.display import Image
# These are the new modules for this notebook
from astroquery.simbad import Simbad
from astroquery.sdss import SDSS
galaxy_name = 'NGC5406'
galaxy = SkyCoord.from_name(galaxy_name)
pos = coords.SkyCoord(galaxy.ra, galaxy.dec, frame='icrs')
print(pos)
im_size = 3*u.arcmin # get a 25 arcmin square
im_pixels = 1024
cutoutbaseurl = 'http://skyservice.pha.jhu.edu/DR12/ImgCutout/getjpeg.aspx'
query_string = urlencode(dict(ra=galaxy.ra.deg,
dec=galaxy.dec.deg,
width=im_pixels, height=im_pixels,
scale=im_size.to(u.arcsec).value/im_pixels))
url = cutoutbaseurl + '?' + query_string
# this downloads the image
image_name = galaxy_name+'_SDSS_cutout.jpg'
urlretrieve(url, image_name)
Image(image_name) #load the image into the notebook
xid = SDSS.query_region(pos, spectro=True)
print(xid)
spectra = SDSS.get_spectra(matches=xid)
spectra[0]
spectra_data = spectra[0][1].data
spectra_data
plt.plot(10**spectra_data['loglam'], spectra_data['flux'])
plt.xlabel('wavelenght (Angstrom)')
plt.ylabel('flux (nanomaggies)')
plt.title('SDSS spectra of '+galaxy_name)
spectra[0][3].data
lines = spectra[0][3].data
lines['LINENAME']
for n in ['[O_II] 3727', '[O_III] 5007', 'H_alpha']:
print(n, " ->", lines['LINEWAVE'][lines['LINENAME']==n])
plt.plot(10**spectra_data['loglam'], spectra_data['flux'], color='black')
plt.axvline(x=lines['LINEWAVE'][lines['LINENAME']=='[O_II] 3727'], label=r'O[II]', color='blue')
plt.axvline(x=lines['LINEWAVE'][lines['LINENAME']=='[O_III] 5007'], label=r'O[III]', color='red')
plt.axvline(x=lines['LINEWAVE'][lines['LINENAME']=='H_alpha'], label=r'H$\alpha$', color='green')
plt.xlabel('wavelenght (Angstrom)')
plt.ylabel('flux (nanomaggies)')
plt.title('SDSS spectra of '+galaxy_name)
plt.legend()
images = SDSS.get_images(matches=xid, band='g')
image_data = images[0][0].data
plt.figure(figsize=(10,10))
plt.imshow(image_data)
plt.colorbar()
clipped_image = image_data.copy()
clipped_image[clipped_image>1.0]=1.0
plt.figure(figsize=(10,10))
plt.imshow(clipped_image)
plt.colorbar()
plt.figure(figsize=(10,10))
plt.imshow(np.log10(image_data[125:475,750:1100]))
plt.colorbar()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The first thing is getting the coordinates for an object of interest, in this case NCG5406
Step2: We can now get a picture from the SDSS DR12 image service
Step3: Now we need to get the identification numbers to grab the data from SDSS
Step4: We can finally dowload the data. The spectra in this case.
Step5: The spectrum is stored as a table in the second item of the list.
Step6: If we pass spectra_data to the interpreter we can see the structure of that table.
Step7: The fourth record stores the positions of some emission lines
Step8: Let's print the wavelenght for some of them
Step9: Overplotting these lines on the spectrum
Step10: We can also get the images in the different SDSS bands (u,g,r,i,z)
Step11: That wasn't very nice! Where is the galaxy? What happens is that the flux values in some of the pixels are very high compare to the typical flux.
Step12: This look better now. We now see where is the galaxy.
|
2,079
|
<ASSISTANT_TASK:>
Python Code:
import os
import pytesmo.validation_framework.temporal_matchers as temporal_matchers
import pytesmo.validation_framework.metric_calculators as metrics_calculators
from datetime import datetime
from pytesmo.io.sat.ascat import AscatH25_SSM
from pytesmo.io.ismn.interface import ISMN_Interface
from pytesmo.validation_framework.validation import Validation
from examples.data_preparation_ASCAT_ISMN import DataPreparation
ascat_data_folder = os.path.join('/media/sf_R', 'Datapool_processed', 'WARP', 'WARP5.5',
'IRMA1_WARP5.5_P2', 'R1', '080_ssm', 'netcdf')
ascat_grid_folder = os.path.join('/media/sf_R', 'Datapool_processed', 'WARP',
'ancillary', 'warp5_grid')
ascat_reader = AscatH25_SSM(ascat_data_folder, ascat_grid_folder)
ascat_reader.read_bulk = True
ascat_reader._load_grid_info()
ismn_data_folder = os.path.join('/media/sf_D', 'ISMN', 'data')
ismn_reader = ISMN_Interface(ismn_data_folder)
jobs = []
ids = ismn_reader.get_dataset_ids(variable='soil moisture', min_depth=0, max_depth=0.1)
for idx in ids:
metadata = ismn_reader.metadata[idx]
jobs.append((idx, metadata['longitude'], metadata['latitude']))
save_path = os.path.join('/media/sf_D', 'validation_framework', 'test_ASCAT_ISMN')
datasets = {'ISMN': {'class': ismn_reader, 'columns': ['soil moisture'],
'type': 'reference', 'args': [], 'kwargs': {}},
'ASCAT': {'class': ascat_reader, 'columns': ['sm'], 'type': 'other',
'args': [], 'kwargs': {}, 'grids_compatible': False,
'use_lut': False, 'lut_max_dist': 30000}
}
period = [datetime(2007, 1, 1), datetime(2014, 12, 31)]
process = Validation(datasets=datasets, data_prep=DataPreparation(),
temporal_matcher=temporal_matchers.BasicTemporalMatching(window=1/24.0, reverse=True),
scaling='lin_cdf_match', scale_to_other=True,
metrics_calculator=metrics_calculators.BasicMetrics(),
period=period, cell_based_jobs=False)
def start_processing(job):
try:
return process.calc(job)
except RuntimeError:
return process.calc(job)
# if __name__ == '__main__':
#
# from pytesmo.validation_framework.results_manager import netcdf_results_manager
#
# for job in jobs:
# results = process.calc(job)
# netcdf_results_manager(results, save_path)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Initialize ASCAT reader
Step2: Initialize ISMN reader
Step3: Create the variable jobs which is a list containing either cell numbers (for a cell based process) or grid point index information tuple(gpi, longitude, latitude). For ISMN gpi is replaced by idx which is an index used to read time series of variables such as soil moisture. DO NOT CHANGE the name jobs because it will be searched during the parallel processing!
Step4: Create the variable save_path which is a string representing the path where the results will be saved. DO NOT CHANGE the name save_path because it will be searched during the parallel processing!
Step5: Create the validation object.
Step6: If you decide to use the ipython parallel processing to perform the validation please ADD the start_processing function to your code. Then move to pytesmo.validation_framework.start_validation, change the path to your setup code and start the validation.
Step7: If you chose to perform the validation normally then please ADD the uncommented main method to your code.
|
2,080
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import json
loans = pd.read_csv('lending-club-data.csv')
loans.head(2)
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans = loans.drop('bad_loans', axis=1)
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home_ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
target = 'safe_loans'
loans = loans[features + [target]]
categorical_variables = []
for feat_name, feat_type in zip(loans.columns, loans.dtypes):
if feat_type == object:
categorical_variables.append(feat_name)
for feature in categorical_variables:
loans_one_hot_encoded = pd.get_dummies(loans[feature],prefix=feature)
loans_one_hot_encoded.fillna(0)
#print loans_one_hot_encoded
loans = loans.drop(feature, axis=1)
for col in loans_one_hot_encoded.columns:
loans[col] = loans_one_hot_encoded[col]
print loans.head(2)
print loans.columns
loans.iloc[122602]
with open('module-6-assignment-train-idx.json') as train_data_file:
train_idx = json.load(train_data_file)
with open('module-6-assignment-validation-idx.json') as validation_data_file:
validation_idx = json.load(validation_data_file)
print train_idx[:3]
print validation_idx[:3]
print len(train_idx)
print len(validation_idx)
train_data = loans.iloc[train_idx]
validation_data = loans.iloc[validation_idx]
print len(loans.dtypes )
def reached_minimum_node_size(data, min_node_size):
# Return True if the number of data points is less than or equal to the minimum node size.
## YOUR CODE HERE
if len(data) <= min_node_size:
return True
else:
return False
def error_reduction(error_before_split, error_after_split):
# Return the error before the split minus the error after the split.
## YOUR CODE HERE
return error_before_split - error_after_split
def intermediate_node_num_mistakes(labels_in_node):
# Corner case: If labels_in_node is empty, return 0
if len(labels_in_node) == 0:
return 0
# Count the number of 1's (safe loans)
## YOUR CODE HERE
safe_loan = (labels_in_node==1).sum()
# Count the number of -1's (risky loans)
## YOUR CODE HERE
risky_loan = (labels_in_node==-1).sum()
# Return the number of mistakes that the majority classifier makes.
## YOUR CODE HERE
return min(safe_loan, risky_loan)
def best_splitting_feature(data, features, target):
target_values = data[target]
best_feature = None # Keep track of the best feature
best_error = 10 # Keep track of the best error so far
# Note: Since error is always <= 1, we should intialize it with something larger than 1.
# Convert to float to make sure error gets computed correctly.
num_data_points = float(len(data))
# Loop through each feature to consider splitting on that feature
for feature in features:
# The left split will have all data points where the feature value is 0
left_split = data[data[feature] == 0]
# The right split will have all data points where the feature value is 1
## YOUR CODE HERE
right_split = data[data[feature] == 1]
# Calculate the number of misclassified examples in the left split.
# Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes)
# YOUR CODE HERE
left_mistakes = intermediate_node_num_mistakes(left_split[target])
# Calculate the number of misclassified examples in the right split.
## YOUR CODE HERE
right_mistakes = intermediate_node_num_mistakes(right_split[target])
# Compute the classification error of this split.
# Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points)
## YOUR CODE HERE
error = (left_mistakes + right_mistakes) / num_data_points
# If this is the best error we have found so far, store the feature as best_feature and the error as best_error
## YOUR CODE HERE
if error < best_error:
best_feature = feature
best_error = error
return best_feature # Return the best feature we found
def create_leaf(target_values):
# Create a leaf node
leaf = {'splitting_feature' : None,
'left' : None,
'right' : None,
'is_leaf': True } ## YOUR CODE HERE
# Count the number of data points that are +1 and -1 in this node.
num_ones = len(target_values[target_values == +1])
num_minus_ones = len(target_values[target_values == -1])
# For the leaf node, set the prediction to be the majority class.
# Store the predicted class (1 or -1) in leaf['prediction']
if num_ones > num_minus_ones:
leaf['prediction'] = 1 ## YOUR CODE HERE
else:
leaf['prediction'] = -1 ## YOUR CODE HERE
# Return the leaf node
return leaf
def decision_tree_create(data, features, target, current_depth = 0,
max_depth = 10, min_node_size=1,
min_error_reduction=0.0):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print "--------------------------------------------------------------------"
print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values))
# Stopping condition 1: All nodes are of the same type.
if intermediate_node_num_mistakes(target_values) == 0:
print "Stopping condition 1 reached. All data points have the same target value."
return create_leaf(target_values)
# Stopping condition 2: No more features to split on.
if remaining_features == []:
print "Stopping condition 2 reached. No remaining features."
return create_leaf(target_values)
# Early stopping condition 1: Reached max depth limit.
if current_depth >= max_depth:
print "Early stopping condition 1 reached. Reached maximum depth."
return create_leaf(target_values)
# Early stopping condition 2: Reached the minimum node size.
# If the number of data points is less than or equal to the minimum size, return a leaf.
if reached_minimum_node_size(data, min_node_size): ## YOUR CODE HERE
print "Early stopping condition 2 reached. Reached minimum node size."
return create_leaf(target_values) ## YOUR CODE HERE
# Find the best splitting feature
splitting_feature = best_splitting_feature(data, features, target)
# Split on the best feature that we found.
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1]
# Early stopping condition 3: Minimum error reduction
# Calculate the error before splitting (number of misclassified examples
# divided by the total number of examples)
error_before_split = intermediate_node_num_mistakes(target_values) / float(len(data))
# Calculate the error after splitting (number of misclassified examples
# in both groups divided by the total number of examples)
left_mistakes = intermediate_node_num_mistakes(left_split[target]) ## YOUR CODE HERE
right_mistakes = intermediate_node_num_mistakes(right_split[target]) ## YOUR CODE HERE
error_after_split = (left_mistakes + right_mistakes) / float(len(data))
# If the error reduction is LESS THAN OR EQUAL TO min_error_reduction, return a leaf.
if error_reduction(error_before_split, error_after_split) <= min_error_reduction: ## YOUR CODE HERE
print "Early stopping condition 3 reached. Minimum error reduction."
return create_leaf(target_values) ## YOUR CODE HERE
remaining_features.remove(splitting_feature)
print "Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split))
# Repeat (recurse) on left and right subtrees
left_tree = decision_tree_create(left_split, remaining_features, target,
current_depth + 1, max_depth, min_node_size, min_error_reduction)
## YOUR CODE HERE
right_tree = decision_tree_create(right_split, remaining_features, target,
current_depth + 1, max_depth, min_node_size, min_error_reduction)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
def count_nodes(tree):
if tree['is_leaf']:
return 1
return 1 + count_nodes(tree['left']) + count_nodes(tree['right'])
features = list(train_data.columns)
features.remove('safe_loans')
print list(train_data.columns)
print features
small_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 2,
min_node_size = 10, min_error_reduction=0.0)
if count_nodes(small_decision_tree) == 7:
print 'Test passed!'
else:
print 'Test failed... try again!'
print 'Number of nodes found :', count_nodes(small_decision_tree)
print 'Number of nodes that should be there : 7'
my_decision_tree_new = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 100, min_error_reduction=0.0)
my_decision_tree_old = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 0, min_error_reduction=-1)
def classify(tree, x, annotate = False):
# if the node is a leaf node.
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['prediction']
return tree['prediction']
else:
# split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
### YOUR CODE HERE
return classify(tree['right'], x, annotate)
validation_data.iloc[0]
print 'Predicted class: %s ' % classify(my_decision_tree_new, validation_data.iloc[0])
classify(my_decision_tree_new, validation_data.iloc[0], annotate = True)
classify(my_decision_tree_old, validation_data.iloc[0], annotate = True)
def evaluate_classification_error(tree, data, target):
# Apply the classify(tree, x) to each row in your data
prediction = data.apply(lambda x: classify(tree, x), axis=1)
# Once you've made the predictions, calculate the classification error and return it
## YOUR CODE HERE
return (data[target] != np.array(prediction)).values.sum() / float(len(data))
evaluate_classification_error(my_decision_tree_new, validation_data, target)
evaluate_classification_error(my_decision_tree_old, validation_data, target)
model_1 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 2,
min_node_size = 0, min_error_reduction=-1)
model_2 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 0, min_error_reduction=-1)
model_3 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 14,
min_node_size = 0, min_error_reduction=-1)
print "Training data, classification error (model 1):", evaluate_classification_error(model_1, train_data, target)
print "Training data, classification error (model 2):", evaluate_classification_error(model_2, train_data, target)
print "Training data, classification error (model 3):", evaluate_classification_error(model_3, train_data, target)
print "Validation data, classification error (model 1):", evaluate_classification_error(model_1, validation_data, target)
print "Validation data, classification error (model 2):", evaluate_classification_error(model_2, validation_data, target)
print "Validation data, classification error (model 3):", evaluate_classification_error(model_3, validation_data, target)
def count_leaves(tree):
if tree['is_leaf']:
return 1
return count_leaves(tree['left']) + count_leaves(tree['right'])
print "number of leaves in model_1 is : {}".format(count_leaves(model_1))
print "number of leaves in model_2 is : {}".format(count_leaves(model_2))
print "number of leaves in model_3 is : {}".format(count_leaves(model_3))
model_4 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 0, min_error_reduction=-1)
model_5 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 0, min_error_reduction=0)
model_6 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 0, min_error_reduction=5)
print "Validation data, classification error (model 4):", evaluate_classification_error(model_4, validation_data, target)
print "Validation data, classification error (model 5):", evaluate_classification_error(model_5, validation_data, target)
print "Validation data, classification error (model 6):", evaluate_classification_error(model_6, validation_data, target)
print "number of leaves in model_4 is : {}".format(count_leaves(model_4))
print "number of leaves in model_5 is : {}".format(count_leaves(model_5))
print "number of leaves in model_6 is : {}".format(count_leaves(model_6))
model_7 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 0, min_error_reduction=-1)
model_8 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 2000, min_error_reduction=-1)
model_9 = decision_tree_create(train_data, features, 'safe_loans', max_depth = 6,
min_node_size = 50000, min_error_reduction=-1)
print "Validation data, classification error (model 7):", evaluate_classification_error(model_7, validation_data, target)
print "Validation data, classification error (model 8):", evaluate_classification_error(model_8, validation_data, target)
print "Validation data, classification error (model 9):", evaluate_classification_error(model_9, validation_data, target)
print "number of leaves in model_7 is : {}".format(count_leaves(model_7))
print "number of leaves in model_8 is : {}".format(count_leaves(model_8))
print "number of leaves in model_9 is : {}".format(count_leaves(model_9))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load LendingClub Dataset
Step2: As before, we reassign the labels to have +1 for a safe loan, and -1 for a risky (bad) loan.
Step3: We will be using the same 4 categorical features as in the previous assignment
Step4: Transform categorical data into binary features
Step5: Train-Validation split
Step6: Early stopping methods for decision trees
Step7: Quiz Question
Step8: Quiz Question
Step9: We then wrote a function best_splitting_feature that finds the best feature to split on given the data and a list of features to consider.
Step10: Finally, recall the function create_leaf from the previous assignment, which creates a leaf node given a set of target values.
Step11: Incorporating new early stopping conditions in binary decision tree implementation
Step12: Here is a function to count the nodes in your tree
Step13: Run the following test code to check your implementation. Make sure you get 'Test passed' before proceeding.
Step14: Build a tree!
Step15: Let's now train a tree model ignoring early stopping conditions 2 and 3 so that we get the same tree as in the previous assignment. To ignore these conditions, we set min_node_size=0 and min_error_reduction=-1 (a negative value).
Step16: Making predictions
Step17: Now, let's consider the first example of the validation set and see what the my_decision_tree_new model predicts for this data point.
Step18: Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class
Step19: Let's now recall the prediction path for the decision tree learned in the previous assignment, which we recreated here as my_decision_tree_old.
Step20: Quiz Question
Step21: Now, let's use this function to evaluate the classification error of my_decision_tree_new on the validation_set.
Step22: Now, evaluate the validation error using my_decision_tree_old.
Step23: Quiz Question
Step24: Evaluating the models
Step25: Now evaluate the classification error on the validation data.
Step26: Quiz Question
Step27: Compute the number of nodes in model_1, model_2, and model_3.
Step28: Quiz Question
Step29: Calculate the accuracy of each model (model_4, model_5, or model_6) on the validation set.
Step30: Using the count_leaves function, compute the number of leaves in each of each models in (model_4, model_5, and model_6).
Step31: Quiz Question
Step32: Now, let us evaluate the models (model_7, model_8, or model_9) on the validation_set.
Step33: Using the count_leaves function, compute the number of leaves in each of each models (model_7, model_8, and model_9).
|
2,081
|
<ASSISTANT_TASK:>
Python Code:
# Import modules
import sys
import math
import numpy as np
from matplotlib import pyplot as plt
from scipy import linalg
from scipy import sparse
A = np.array([1, -4, 2, 3, 2, 2]).reshape(3, 2)
b = np.array([-3, 15, 9])
x = linalg.lstsq(A, b)
print(x[0])
A = np.array([1, 1, 1, -1, 1, 1]).reshape(3, 2)
b = np.array([2, 1, 3])
x = linalg.lstsq(A, b)
print(x[0])
def classical_gram_schmidt_orthogonalization(A):
Q = np.zeros(A.size).reshape(A.shape)
R = np.zeros(A.shape[1] ** 2).reshape(A.shape[1], A.shape[1])
for j in range(A.shape[1]):
y = A[:,j]
for i in range(j):
R[i][j] = np.matmul(Q[:,i], A[:,j])
y = y - R[i][j] * Q[:,i]
R[j][j] = linalg.norm(y, 2)
Q[:,j] = y / R[j][j]
return Q, R
A = np.array([1, -4, 2, 3, 2, 2]).reshape(3, 2)
Q, R = classical_gram_schmidt_orthogonalization(A)
print('Q =')
print(Q)
print('R =')
print(R)
A = np.array([1, -4, 2, 3, 2, 2]).reshape(3, 2)
Q, R = linalg.qr(A)
print('Q =')
print(Q)
print('R =')
print(R)
A = np.array([1, -4, 2, 3, 2, 2]).reshape(3, 2)
b = np.array([-3, 15, 9]).T
Q, R = linalg.qr(A)
lu, piv = linalg.lu_factor(R[:2,:])
x = linalg.lu_solve([lu, piv], np.matmul(Q.T, b).reshape(3, 1)[:2])
print('x = %s' %x.T)
def modified_gram_schmidt_orthogonalization(A):
Q = np.zeros(A.size).reshape(A.shape)
R = np.zeros(A.shape[1] ** 2).reshape(A.shape[1], A.shape[1])
for j in range(A.shape[1]):
y = A[:,j]
for i in range(j):
R[i][j] = np.matmul(Q[:,i], y)
y = y - R[i][j] * Q[:,i]
R[j][j] = linalg.norm(y, 2)
Q[:,j] = y / R[j][j]
return Q, R
x = np.array([3, 4]).reshape(2, 1)
w = np.array([5, 0]).reshape(2, 1)
v = w - x
# Projection matrix
P = np.matmul(v, v.T) / np.matmul(v.T, v)
# Householder reflector
H = np.identity(P.shape[0]) - 2 * P
print('H=\n', H)
def householder_reflector(x):
w = np.zeros(x.size)
w[0] = linalg.norm(x, 2)
v = (w - x).reshape(x.size, 1)
# Projection matrix
P = np.matmul(v, v.T) / np.matmul(v.T, v)
# Householder reflector
H = np.identity(P.shape[0]) - 2 * P
return H
A = np.array([3, 1, 4, 3]).reshape(2, 2)
H1 = householder_reflector(A[:,0])
R = np.matmul(H1, A)
Q = H1
print('Q=\n', Q)
print('R=\n', R)
A = np.array([1, -4, 2, 3, 2, 2]).reshape(3, 2)
H1 = householder_reflector(A[:, 0])
TEMP = np.matmul(H1, A)
H2 = householder_reflector(TEMP[1:, 1])
H2_Ext = np.identity(H1.shape[0])
H2_Ext[-H2_TMP.shape[0]:, -H2_TMP.shape[1]:] = H2
R = np.matmul(np.matmul(H2_Ext, H1), A)
Q = np.matmul(H1, H2_Ext)
print('Q=\n', Q)
print('R=\n', R)
A = np.array([1, 1, 0, 0, 1, 0, 1, 1, 1]).reshape(3, 3)
b = np.array([1, 0, 0]).reshape(3, 1)
x0 = np.zeros(3).reshape(3, 1)
x, info = sparse.linalg.gmres(A, b, x0)
print('x = %s' %x)
A = np.arange(1, 10).reshape(3, 3)
D = np.diag(A.diagonal())
print(D)
print(linalg.inv(D))
def R_xy(x, y):
A = np.zeros(3)
f = lambda xf, yf, R : math.sqrt(pow(x - xf, 2) + pow(y - yf, 2)) - R
A[0] = f(-1, 0, 1)
A[1] = f( 1, 0.5, 0.5)
A[2] = f( 1,-0.5, 0.5)
return A
def DR_xy(x, y):
A = np.zeros(6).reshape(3, 2)
fx = lambda xf, yf : (x - xf) / math.sqrt(pow(x - xf, 2) + pow(y - yf, 2))
fy = lambda xf, yf : (y - yf) / math.sqrt(pow(x - xf, 2) + pow(y - yf, 2))
A[0][0] = fx(-1, 0)
A[0][1] = fy(-1, 0)
A[1][0] = fx(1, 0.5)
A[1][1] = fy(1, 0.5)
A[2][0] = fx(1, -0.5)
A[2][1] = fy(1, -0.5)
return A
def gauss_newton_method(x0, y0, k):
xk = np.array([x0, y0])
for _ in range(k):
x = xk[0]
y = xk[1]
A = DR_xy(x, y)
r = R_xy(x, y)
v = np.matmul(linalg.inv(np.matmul(A.T, A)), -np.matmul(A.T, r))
xk = xk + v
return xk
x = gauss_newton_method(0, 0, 8)
print('x = %s' %x)
def R_xy(c):
c1 = c[0]
c2 = c[1]
c3 = c[2]
r = np.zeros(5)
f = lambda t, y : c1 * math.exp( -c2 * pow(t - c3, 2) ) - y
r[0] = f(1, 3)
r[1] = f(2, 5)
r[2] = f(2, 7)
r[3] = f(3, 5)
r[4] = f(4, 1)
return r
def DR_xy(data, c):
c1 = c[0]
c2 = c[1]
c3 = c[2]
DR = np.zeros(15).reshape(5, 3)
f0 = lambda t : math.exp( -c2 * pow(t - c3, 2) )
f1 = lambda t : -c1 * pow(t - c3, 2) * math.exp( -c2 * pow(t - c3, 2) )
f2 = lambda t : 2 * c1 * c2 * (t - c3) * math.exp( -c2 * pow(t - c3, 2) )
for i in range(5):
t = data[i][0]
DR[i][0] = f0(t)
DR[i][1] = f1(t)
DR[i][2] = f2(t)
return DR
def levenberg_marquardt_method(data, c, la, k):
ck = c
for _ in range(k):
A = DR_xy(data, ck)
r = R_xy(ck)
mAr = -np.matmul(A.T, r)
invA = np.linalg.inv(np.matmul(A.T, A) + la * np.diag(np.matmul(A.T, A).diagonal()))
v = np.matmul(invA, mAr)
ck = ck + v
return ck
data = np.array([(1, 3), (2, 5), (2, 7), (3, 5), (4, 1)])
c = np.array([1, 1, 1])
c = levenberg_marquardt_method(data, c, 50, 1200)
f = lambda t, c1, c2, c3 : c1 * np.exp( -c2 * np.power(t - c3, 2) )
X = np.linspace(0, 5, 100)
Y = f(X, *c)
plt.plot(X, Y, color='cyan')
plt.plot(data[:,0], data[:,1], linestyle='', markersize=8, marker='.', color='blue')
plt.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: 4.1 Least Squares and the normal equations
Step2: Example
Step3: The best line is $y = \frac{7}{4} + \frac{3}{4}t$
Step4: Example
Step5: Example
Step6: Example
Step7: Modified Gram-Schmidt orthogonalization
Step8: Householder reflector
Step9: Example
Step10: Example
Step11: 4.4 Generalized Minimum Residual (GMRES) Method
Step12: 4.5 Nonlinear Least Squares
Step13: Levenberg-Marquardt Method
|
2,082
|
<ASSISTANT_TASK:>
Python Code:
# set_datalab_project_id('my-project-id')
from google.datalab.stackdriver import monitoring as gcm
groups_dataframe = gcm.Groups().as_dataframe()
# Sort the dataframe by the group name, and reset the index.
groups_dataframe = groups_dataframe.sort_values(by='Group name').reset_index(drop=True)
groups_dataframe.head(5)
import sys
if groups_dataframe.empty:
sys.stderr.write('This project has no Stackdriver groups. The remaining notebook '
'will raise errors!')
else:
first_group_id = groups_dataframe['Group ID'][0]
print('First group ID: %s' % first_group_id)
# Initialize the query for the CPU Utilization metric over the last 2 hours.
query_group = gcm.Query('compute.googleapis.com/instance/cpu/utilization', hours=2)
# Filter the instances to the members of the first group.
query_group = query_group.select_group(first_group_id)
# Aggregate the time series.
query_group = query_group.align(gcm.Aligner.ALIGN_MEAN, minutes=5)
query_group = query_group.reduce(gcm.Reducer.REDUCE_MEAN, 'resource.zone', 'metric.instance_name')
# Create a dataframe with zone and instance name in the headers.
cpu_group_dataframe = query_group.as_dataframe(labels=['zone', 'instance_name'])
cpu_group_dataframe.tail(5)
cpu_group_dataframe_per_zone = cpu_group_dataframe.groupby(level=0, axis=1).mean()
_ = cpu_group_dataframe_per_zone.plot().legend(loc='center left', bbox_to_anchor=(1.0, 0.8))
# Find all unique zones and sort them.
all_zones = sorted(set(cpu_group_dataframe.columns.get_level_values('zone')))
# Find the global min and max so we can set the same range for all y-axes.
min_cpu = cpu_group_dataframe.min().min()
max_cpu = cpu_group_dataframe.max().max()
for zone in all_zones:
zone_plot = cpu_group_dataframe[zone].plot(title=zone, ylim=(min_cpu, max_cpu))
zone_plot.legend(loc='center left', bbox_to_anchor=(1.0, 0.8))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: List the Stackdriver groups
Step2: Extract the first group
Step3: Load the CPU metric data for the instances a given group
Step4: Plot the the mean of the CPU Utilization per zone
Step5: Plot the CPU Utilization of instances
|
2,083
|
<ASSISTANT_TASK:>
Python Code:
import requests
import json
r = requests.get('http://3d-kenya.chordsrt.com/instruments/2.geojson?start=2017-03-01T00:00&end=2017-05-01T00:00')
if r.status_code == 200:
d = r.json()['Data']
else:
print("Please verify that the URL for the weather station is correct. You may just have to try again with a different/smaller date range or different dates.")
d
for o in d:
if o['variable_shortname'] == 'msl1':
print(o['time'], o['value'], o['units'])
davad_tuple = (
'f1',
'f2',
'f3',
'f4',
'f5',
'f6',
'f7',
'f8',
'f9',
'f10',
'f11',
'f12',
'f13',
'f14',
)
def make_data_set(d):
data_list = []
for o in d:
if o['variable_shortname'] == 'msl1':
t = o['time'].split("T")
tdate = t[0].replace('-', '')
ttime = ''.join(t[1].split(':')[:-1])
pressure = o['value']
if ttime.endswith('00') or ttime.endswith('15') or ttime.endswith('30') or ttime.endswith('45'):
davad_tuple = ['DAVAD', 'GLID4TT4', 'SITE_ID:45013']+['X']*11
davad_tuple[3] = tdate + ttime
davad_tuple[13] = str(pressure)
data_list.append('{}'.format(' '.join(davad_tuple)))
#print('//AA\n{}\n//ZZ'.format('\n'.join(data_list)))
return data_list
make_data_set(d)
def email_data(data_list):
import os
from sparkpost import SparkPost
FROM_EMAIL = os.getenv('FROM_EMAIL')
BCC_EMAIL = os.getenv('BCC_EMAIL')
# Send email using the SparkPost api
sp = SparkPost() # uses environment variable named SPARKPOST_API_KEY
response = sp.transmission.send(
recipients=['data@globe.gov'],
bcc=[BCC_EMAIL],
text='//AA\n{}\n//ZZ'.format('\n'.join(data_list)),
from_email=FROM_EMAIL,
subject='DATA'
)
print(response)
email_data(make_data_set(d))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Now the collected data can be viewed simply by issuing the following command
Step2: This code is useful for looking at a specific measurement dataset
Step3: A modified version of the above code will format the data properly for GLOBE Email Data Entry
Step4: To see the data formatted in GLOBE Email Data Entry format, comment out the return data_list command above, uncomment the print command right above it, then issue the following command
Step5: To email the data set to GLOBE's email data entry server, run the following code.
Step6: Finally, this command sends the email
|
2,084
|
<ASSISTANT_TASK:>
Python Code:
import cobra.test
from cobra.flux_analysis import gapfill
model = cobra.test.create_test_model("salmonella")
universal = cobra.Model("universal_reactions")
for i in [i.id for i in model.metabolites.f6p_c.reactions]:
reaction = model.reactions.get_by_id(i)
universal.add_reaction(reaction.copy())
model.remove_reactions([reaction])
model.optimize().objective_value
solution = gapfill(model, universal, demand_reactions=False)
for reaction in solution[0]:
print(reaction.id)
result = gapfill(model, universal, demand_reactions=False, iterations=4)
for i, entries in enumerate(result):
print("---- Run %d ----" % (i + 1))
for e in entries:
print(e.id)
with model:
model.objective = model.add_boundary(model.metabolites.f6p_c, type='demand')
solution = gapfill(model, universal)
for reaction in solution[0]:
print(reaction.id)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: In this model D-Fructose-6-phosphate is an essential metabolite. We will remove all the reactions using it, and at them to a separate model.
Step2: Now, because of these gaps, the model won't grow.
Step3: We will use can use the model's original objective, growth, to figure out which of the removed reactions are required for the model be feasible again. This is very similar to making the 'no-growth but growth (NGG)' predictions of Kumar et al. 2009.
Step4: We can obtain multiple possible reaction sets by having the algorithm go through multiple iterations.
Step5: We can also instead of using the original objective, specify a given metabolite that we want the model to be able to produce.
|
2,085
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import tensorflow as tf
from tensorflow import feature_column
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
print("TensorFlow version: ",tf.version.VERSION)
URL = 'https://storage.googleapis.com/download.tensorflow.org/data/heart.csv'
dataframe = pd.read_csv(URL)
dataframe.head()
dataframe.info()
# TODO 1a
# TODO: Your code goes here
print(len(train), 'train examples')
print(len(val), 'validation examples')
print(len(test), 'test examples')
# A utility method to create a tf.data dataset from a Pandas Dataframe
def df_to_dataset(dataframe, shuffle=True, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
ds = # TODO 2a: Your code goes here
if shuffle:
ds = ds.shuffle(buffer_size=len(dataframe))
ds = ds.batch(batch_size)
return ds
batch_size = 5 # A small batch sized is used for demonstration purposes
# TODO 2b
train_ds = # Your code goes here
val_ds = # Your code goes here
test_ds = # Your code goes here
for feature_batch, label_batch in train_ds.take(1):
print('Every feature:', list(feature_batch.keys()))
print('A batch of ages:', feature_batch['age'])
print('A batch of targets:', label_batch)
# We will use this batch to demonstrate several types of feature columns
example_batch = next(iter(train_ds))[0]
# A utility method to create a feature column
# and to transform a batch of data
def demo(feature_column):
feature_layer = layers.DenseFeatures(feature_column)
print(feature_layer(example_batch).numpy())
age = feature_column.numeric_column("age")
tf.feature_column.numeric_column
print(age)
demo(age)
age_buckets = tf.feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
demo(____) # TODO 3a: Replace the blanks with a correct value
thal = tf.feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = tf.feature_column.indicator_column(thal)
demo(thal_one_hot)
# Notice the input to the embedding column is the categorical column
# we previously created
thal_embedding = tf.feature_column.embedding_column(thal, dimension=8)
demo(thal_embedding)
thal_hashed = tf.feature_column.categorical_column_with_hash_bucket(
'thal', hash_bucket_size=1000)
demo(tf.feature_column.indicator_column(thal_hashed))
crossed_feature = tf.feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
demo(tf.feature_column.indicator_column(crossed_feature))
feature_columns = []
# numeric cols
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'slope', 'ca']:
feature_columns.append(feature_column.numeric_column(header))
# bucketized cols
age_buckets = feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
thal = feature_column.categorical_column_with_vocabulary_list(
'thal', ['fixed', 'normal', 'reversible'])
thal_one_hot = feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
# embedding cols
thal_embedding = feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# crossed cols
crossed_feature = feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
crossed_feature = feature_column.indicator_column(crossed_feature)
feature_columns.append(crossed_feature)
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
batch_size = 32
train_ds = df_to_dataset(train, batch_size=batch_size)
val_ds = df_to_dataset(val, shuffle=False, batch_size=batch_size)
test_ds = df_to_dataset(test, shuffle=False, batch_size=batch_size)
model = tf.keras.Sequential([
feature_layer,
layers.Dense(128, activation='relu'),
layers.Dense(128, activation='relu'),
layers.Dense(1)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
history = model.fit(train_ds,
validation_data=val_ds,
epochs=5)
loss, accuracy = model.evaluate(test_ds)
print("Accuracy", accuracy)
def plot_curves(history, metrics):
nrows = 1
ncols = 2
fig = plt.figure(figsize=(10, 5))
for idx, key in enumerate(metrics):
ax = fig.add_subplot(nrows, ncols, idx+1)
plt.plot(history.history[key])
plt.plot(history.history['val_{}'.format(key)])
plt.title('model {}'.format(key))
plt.ylabel(key)
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left');
plot_curves(history, ['loss', 'accuracy'])
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Lab Task 1
Step2: Split the dataframe into train, validation, and test
Step3: Lab Task 2
Step4: Understand the input pipeline
Step5: Lab Task 3
Step6: Numeric columns
Step7: Let's have a look at the output
Step8: Bucketized columns
Step9: Categorical columns
Step10: In a more complex dataset, many columns would be categorical (e.g. strings). Feature columns are most valuable when working with categorical data. Although there is only one categorical column in this dataset, we will use it to demonstrate several important types of feature columns that you could use when working with other datasets.
Step11: Hashed feature columns
Step12: Crossed feature columns
Step13: Choose which columns to use
Step14: How to Input Feature Columns to a Keras Model
Step15: Earlier, we used a small batch size to demonstrate how feature columns worked. We create a new input pipeline with a larger batch size.
Step16: Create, compile, and train the model
Step17: Visualize the model loss curve
|
2,086
|
<ASSISTANT_TASK:>
Python Code:
!pip install -I "phoebe>=2.1,<2.2"
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
times = np.linspace(0,1,51)
b.add_dataset('lc', times=times, dataset='lc01')
b.add_dataset('orb', times=times, dataset='orb01')
b.add_dataset('mesh', times=times, dataset='mesh01', columns=['teffs'])
b.run_compute(irrad_method='none')
afig, mplanim = b.plot(y={'orb': 'ws'},
animate=True, save='animations_1.gif', save_kwargs={'writer': 'imagemagick'})
afig, mplanim = b.plot(y={'orb': 'ws'},
times=times[:-1:2], animate=True, save='animations_2.gif', save_kwargs={'writer': 'imagemagick'})
afig, mplanim = b['lc01@model'].plot(times=times[:-1], uncover=True,\
c='r', linestyle=':',\
highlight_marker='s', highlight_color='g',
animate=True, save='animations_3.gif', save_kwargs={'writer': 'imagemagick'})
b['mesh01@model'].plot(times=times[:-1], fc='teffs', ec='None',
animate=True, save='animations_4.gif', save_kwargs={'writer': 'imagemagick'})
b['lc01@model'].plot(times=times[:-1], uncover=True, xlim='frame',
animate=True, save='animations_5.gif', save_kwargs={'writer': 'imagemagick'})
b['orb01@model'].plot(times=times[:-1], projection='3d', azim=[0, 360], elev=[-20,20],
animate=True, save='animations_6.gif', save_kwargs={'writer': 'imagemagick'})
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
Step2: Default Animations
Step3: Note that like the rest of the examples below, this is simply the animated version of the exact same call to plot
Step4: Plotting Options
Step5:
Step6: Disabling Fixed Limits
Step7: 3D axes
|
2,087
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
from nsaba.nsaba import Nsaba
from nsaba.nsaba.visualizer import NsabaVisualizer
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import itertools
%load_ext line_profiler
# Simon Path IO
data_dir = '../../data_dir'
os.chdir(data_dir)
Nsaba.aba_load()
Nsaba.ns_load()
#Torben Path IO
ns_path = "/Users/Torben/Documents/ABI analysis/current_data_new/"
aba_path = '/Users/Torben/Documents/ABI analysis/normalized_microarray_donor9861/'
Nsaba.aba_load(aba_path)
Nsaba.ns_load(ns_path)
# Loading gene expression for all ABA registered Entrez IDs.
A = Nsaba()
A.load_ge_pickle('Nsaba_ABA_ge.pkl')
%time A.get_ns_act('attention', thresh=-1)
A.get_ns_act('reward', thresh=-1)
# Testing ge_ratio()
A = Nsaba()
A.ge_ratio((1813,1816))
rand = lambda null: np.random.uniform(-10,10,3).tolist()
coord_num = 20
coords = [rand(0) for i in range(coord_num)]
A.coords_to_ge(coords, entrez_ids=[1813,1816], search_radii=8)
A.get_aba_ge([733,33,88])
A.get_ns_act("attention", thresh=-1, method='knn')
# You can use the sphere method too, if you want to weight by bucket.
# e.g:
# A.get_ns_act("attention", thresh=.3, method='sphere')
A.make_ge_ns_mat('attention', [733, 33, 88])
A.make_ge_ns_mat('attention', [733, 33, 88])
NV = NsabaVisualizer(A)
NV.visualize_ge([1813])
NV.visualize_ns('attention', alpha=.3)
NV.lstsq_ns_ge('attention', [1813])
NV.lstsq_ge_ge(1813, 1816);
NV.lstsq_ns_ns('attention', 'reward')
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Coordinates to gene expression
Step2: Visualization Methods (testing)
|
2,088
|
<ASSISTANT_TASK:>
Python Code:
import time
import numpy as np
import tensorflow as tf
import utils
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
## Your code here
from collections import Counter
import random
counter = Counter(int_words)
total_count = len(int_words)
word_freq = {word: counter[word]/total_count for word in counter}
t = 1e-5
P = lambda word: 1 - np.sqrt(t/word_freq[word])
train_words = [word for word in int_words if P(word) < random.random()] # The final subsampled word list
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
R = random.randint(1, window_size+1)
before_words = words[max(0, idx-R): idx]
after_words = words[idx+1:min(len(words), idx+1+R)]
return list(set(before_words + after_words))
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, shape=[None])
labels = tf.placeholder(tf.int32, shape=[None, None])
n_vocab = len(int_to_vocab)
n_embedding = 300 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform([n_vocab, n_embedding], -1.0, 1.0)) # create embedding weight matrix here
embed = tf.nn.embedding_lookup(embedding, inputs) # use tf.nn.embedding_lookup to get the hidden layer output
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b, labels, embed, n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
Step2: Preprocessing
Step3: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
Step4: Subsampling
Step5: Making batches
Step6: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
Step7: Building the graph
Step8: Embedding
Step9: Negative sampling
Step10: Validation
Step11: Training
Step12: Restore the trained network if you need to
Step13: Visualizing the word vectors
|
2,089
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.datasets import load_files
corpus = load_files("../data/")
doc_count = len(corpus.data)
print("Doc count:", doc_count)
assert doc_count is 56, "Wrong number of documents loaded, should be 56 (56 stories)"
from helpers.tokenizer import TextWrangler
from sklearn.feature_extraction.text import CountVectorizer
bow = CountVectorizer(strip_accents="ascii", tokenizer=TextWrangler(kind="lemma"))
X_bow = bow.fit_transform(corpus.data)
from sklearn.cluster import KMeans
kmeans = KMeans(n_jobs=-1, random_state=23)
from yellowbrick.cluster import KElbowVisualizer
viz = KElbowVisualizer(kmeans, k=(2, 28), metric="silhouette")
viz.fit(X_bow)
#viz.poof(outpath="plots/KElbow_bow_lemma_silhoutte.png")
viz.poof()
from yellowbrick.cluster import SilhouetteVisualizer
def plot_silhoutte_plots(max_n):
for i in range(2, max_n + 1):
plt.clf()
n_cluster = i
viz = SilhouetteVisualizer(KMeans(n_clusters=n_cluster, random_state=23))
viz.fit(X_bow)
path = "plots/SilhouetteViz" + str(n_cluster)
viz.poof(outpath=path)
#plot_silhoutte_plots(28)
from yellowbrick.cluster import SilhouetteVisualizer
n_clusters = 3
model = KMeans(n_clusters=n_clusters, n_jobs=-1, random_state=23)
viz = SilhouetteVisualizer(model)
viz.fit(X_bow)
viz.poof()
from sklearn.pipeline import Pipeline
pipe = Pipeline([("bow", bow),
("kmeans", model)])
pipe.fit(corpus.data)
pred = pipe.predict(corpus.data)
from sklearn.metrics import silhouette_score
print("Avg Silhoutte score:", silhouette_score(X_bow, pred), "(novel collections)")
print("AVG Silhoutte score", silhouette_score(X_bow, corpus.target), "(original collections)")
from yellowbrick.text import TSNEVisualizer
# Map target names of original collections to target vals
collections_map = {}
for i, collection_name in enumerate(corpus.target_names):
collections_map[i] = collection_name
# Plot
tsne_original = TSNEVisualizer()
labels = [collections_map[c] for c in corpus.target]
tsne_original.fit(X_bow, labels)
tsne_original.poof()
# Plot
tsne_novel = TSNEVisualizer()
labels = ["c{}".format(c) for c in pipe.named_steps.kmeans.labels_]
tsne_novel.fit(X_bow, labels)
tsne_novel.poof()
# Novel titles, can be more creative ;>
novel_collections_map = {0: "The Unassignable Adventures of Cluster 0",
1: "The Adventures of Sherlock Holmes in Cluster 1",
2: "The Case-Book of Cluster 2"}
orig_assignment = [collections_map[c] for c in corpus.target]
novel_assignment = [novel_collections_map[p] for p in pred]
titles = [" ".join(f_name.split("/")[-1].split(".")[0].split("_"))
for f_name in corpus.filenames]
# Final df, compares original with new assignment
df_documents = pd.DataFrame([orig_assignment, novel_assignment],
columns=titles, index=["Original Collection", "Novel Collection"]).T
df_documents.to_csv("collections.csv")
df_documents
df_documents["Novel Collection"].value_counts()
tsne_novel_named = TSNEVisualizer(colormap="Accent")
tsne_novel_named.fit(X_bow, novel_assignment)
tsne_novel_named.poof(outpath="plots/Novel_Sherlock_Holmes_Collections.png")
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Input
Step2: Vectorizer
Step3: Decided for BOW vectors, containing lemmatized words. BOW results (in this case) in better cluster performance than with tf-idf vectors. Lemmatization worked slightly better than stemming. (-> KElbow plots in plots/ dir).
Step4: Decided for 3 clusters, because of highest avg Silhoutte score compared to other cluster sizes.
Step5: Nonetheless, the assignment isn't perfect. Cluster #1 looks good, but the many negative vals in cluster #0 & #1 suggest that there exist a cluster with more similar docs than in the actual assigned cluster. As a cluster size of 2 also leads to an inhomogen cluster and has a lower avg Silhoutte score, we go with the size of 3. Nevertheless, in general those findings suggest that the Sherlock Holmes stories should be represented in a single collection only.
Step6: Evaluation
Step7: Compared to original collections by Sir Arthur Conan Doyle
Step8: Average Silhoutte coefficient is at least slightly positive and much better than the score of the original assignment (which is even negative). Success.
Step9: ... to the novel collection assignment
Step10: Confirms the findings from the Silhoutte plot above (in the Models section), cluster #1 looks very coherent, cluster #2 is seperated and the two documents of cluster #0 fly somewhere around.
Step11: Let's see how the the books are differently assigned to collections by Sir Arthur Conan Doyle (Original Collection), respectively by the clustering algo (Novel Collection).
Step12: Collections are uneven assigned. Cluster #1 is the predominant one. Looks like cluster #0 subsume the (rational) unassignable stories.
|
2,090
|
<ASSISTANT_TASK:>
Python Code:
descripciones = {
'P0306' : 'Programas de modernización catastral',
'P0307' : 'Disposiciones normativas sustantivas en materia de desarrollo urbano u ordenamiento territorial',
'P1001' : 'Promedio diario de RSU recolectados',
'P1003' : 'Número de municipios con disponibilidad de servicios relacionados con los RSU',
'P1006' : 'Número de municipios con aplicación de programas locales orientados a la GIRSU',
'P1009' : 'Número de municipios con estudios de generación de RSU',
}
# Librerias utilizadas
import pandas as pd
import sys
import urllib
import os
import zipfile
import csv
import pprint
import re
# Configuracion del sistema
print('Python {} on {}'.format(sys.version, sys.platform))
print('Pandas version: {}'.format(pd.__version__))
import platform; print('Running on {} {}'.format(platform.system(), platform.release()))
root = r'http://www.beta.inegi.org.mx/contenidos/proyectos/censosgobierno/municipal/cngmd/2015/datosabiertos/'
links = {
'P0306' : r'm1/Programa_modernizacion_catastral_cngmd2015_csv.zip', # Programas de modernización catastral
'P0307' : r'm2/Marco_regulatorio_cngmd2015_csv.zip', # Disposiciones normativas sustantivas en materia de desarrollo urbano u ordenamiento territorial
'P1001' : r'm6/Rec_RSU_cngmd2015_csv.zip', # Promedio diario de RSU recolectados
'P1006' : r'm6/Prog_gest_int_RSU_cngmd2015_csv.zip', # Número de municipios con aplicación de programas locales orientados a la GIRSU
'P1009' : r'm6/Est_gen_comp_RSU_cngmd2015_csv.zip', # Número de municipios con estudios de generación de RSU
}
P1003links = { # Número de municipios con disponibilidad de servicios relacionados con los RSU
1 : r'm6/Rec_RSU_cngmd2015_csv.zip',
2 : r'm6/Trat_RSU_cngmd2015_csv.zip',
3 : r'm6/Disp_final_RSU_cngmd2015_csv.zip'
}
# Destino local
destino = r'D:\PCCS\00_RawData\01_CSV\cngmd\2015'
# Descarga de zips para parametros que se encuentran en un solo archivo
m_archivos = {} # Diccionario para guardar memoria de descarga
for parametro, fuente in links.items():
file = fuente.split('/')[1]
remote_path = root+fuente
local_path = destino + r'\{}'.format(file)
if os.path.isfile(local_path):
print('Ya existe el archivo: {}'.format(local_path))
m_archivos[parametro] = local_path
else:
print('Descargando {} ... ... ... ... ... '.format(local_path))
urllib.request.urlretrieve(remote_path, local_path) #
m_archivos[parametro] = local_path
print('se descargó {}'.format(local_path))
# Descarga de zips para parametro P1003
m_archivos2 = {} # Diccionario para guardar memoria de descarga
for parametro, fuente in P1003links.items():
file = fuente.split('/')[1]
remote_path = root+fuente
local_path = destino + r'\{}'.format(file)
if os.path.isfile(local_path):
print('Ya existe el archivo: {}'.format(local_path))
m_archivos2[parametro] = local_path
else:
print('Descargando {} ... ... ... ... ... '.format(local_path))
urllib.request.urlretrieve(remote_path, local_path) #
m_archivos2[parametro] = local_path
print('se descargó {}'.format(local_path))
# Descompresión de archivos de m_parametro
unzipped = {}
for parametro, comprimido in m_archivos.items():
target = destino + '\\' + parametro
if os.path.isfile(target):
print('Ya existe el archivo: {}'.format(target))
unzipped[parametro] = target
else:
print('Descomprimiendo {} ... ... ... ... ... '.format(target))
descomprimir = zipfile.ZipFile(comprimido, 'r')
descomprimir.extractall(target)
descomprimir.close
unzipped[parametro] = target
# Descompresión de archivos de m_parametro2
unzipped2 = {}
for parametro, comprimido in m_archivos2.items():
target = destino + '\\P1003\\' + str(parametro)
if os.path.isfile(target):
print('Ya existe el archivo: {}'.format(target))
unzipped2[parametro] = target
else:
print('Descomprimiendo {} ... ... ... ... ... '.format(target))
descomprimir = zipfile.ZipFile(comprimido, 'r')
descomprimir.extractall(target)
descomprimir.close
unzipped2[parametro] = target
# Localizacion de archivos de cada parametro
# Cada parametro tiene rutas y estructuras distintas. En este paso localizo manualmente
# cada tabla y estructura desde los comprimidos. cada valor del diccionario contiene la ruta hacia
# donde se encuentran las tablas.
cd = r'\conjunto_de_datos'
tablas = {
'P0306' : destino + r'\P0306' + cd,
'P0307' : destino + r'\P0307\marco_regulatorio_cngmd2015_dbf' + cd,
'P1001' : destino + r'\P1001\Rec_RSU_cngmd2015_csv' + cd,
'P1006' : destino + r'\P1006\Prog_gest_int_RSU_cngmd2015_csv' + cd,
'P1009' : destino + r'\P1009\Est_gen_comp_RSU_cngmd2015_csv' + cd,
}
# Tablas para P1003
destino2 = destino + r'\P1003'
tablasP1003 = {
'1' : destino2 + r'\1' + r'\Rec_RSU_cngmd2015_csv' + cd,
'2' : destino2 + r'\2' + r'\Trat_RSU_cngmd2015_csv' + cd,
'3' : destino2 + r'\3' + r'\Disp_final_RSU_cngmd2015_csv' + cd,
}
# Script para extraer metadatos:
def getmeta(path, charcoding): # Path es el contenido en las variables 'tablas' para cada parametro
cat = r'\catalogos'
dic = r'\diccionario_de_datos'
metadict = {}
metapath = path.replace(cd, cat)
metafiles = os.listdir(metapath)
dicdict = {}
dicpath = path.replace(cd, dic)
dicfiles = os.listdir(dicpath)
for file in metafiles:
variable = file.replace('.csv', '')
if file.endswith('.csv'):
csvpath = metapath+'\\'+file
metadf = pd.DataFrame.from_csv(csvpath, parse_dates=False)
try:
metadf.index = metadf.index.map(str.lower)
except:
pass
metadict[variable] = metadf
else:
dothis = input('El archivo {} no es csv, que deseas hacer? [DD]etener [CC]ontinuar'.format(file))
dothis = dothis.lower()
if dothis == 'dd':
raise GeneratorExit('Script detenido por el usuario')
elif dothis == 'cc':
continue
else:
raise KeyError('No entendi la instruccion {}'.format(dothis))
for file in dicfiles:
if file.endswith('.csv'):
filename = file.replace('.csv', '')
csvpath = dicpath+'\\'+file
try:
dicdf = pd.read_csv(csvpath, skiprows=2, usecols=[1, 2], index_col=0, parse_dates=False).dropna()
except:
dicdf = pd.read_csv(csvpath, skiprows=2, usecols=[1, 2], index_col=0, parse_dates=False, encoding = charcoding).dropna()
dicdf.index = dicdf.index.map(str.lower)
dicdict[filename] = dicdf
return dicdict, metadict
# Funcion para revisar metadatos
def queryvar(var, tablelen=10, colprint = 125, dictio = p0306dic, metadat = p0306meta):
pdefault = pd.get_option('display.max_colwidth')
pd.set_option('display.max_colwidth', colprint) # Expande el espacio para imprimir columnas
print('"{}" :\n{}'.format(var, dictio.loc[var][0].upper()))
if len(metadat[var]) > tablelen:
print('{}\nImprimiendo {} de {} registros'.format('-'*40,tablelen, len(metadat[var])))
print(metadat[var].head(tablelen))
pd.set_option('display.max_colwidth', pdefault) # Regresa la variable de impresion de columnas a su default
# Creacion de diccionarios con metadatos para cada variable de P0306:
par = 'P0306'
p0306dic, p0306meta = getmeta(tablas['P0306'], 'mbcs')
print('Se extrajeron metadatos para las siguientes variables de {}:'.format(par))
for key in p0306meta.keys(): print(key)
print('\nDiccionarios disponibles para {}:'.format(par))
for key in p0306dic.keys(): print(key)
# Para P0306, solo existe una tabla de descripciones por lo que se convierte a un dataframe unico para poder indexar
p0306dic = p0306dic['diccionario_de_datos_programa_modernizacion_catastral_cngmd2015_dbf']
p0306dic
list(p0306dic.index)
queryvar('acc_modr')
print('** Descripciones de variables **\n'.upper())
for i in p0306dic.index:
queryvar(i)
print('\n')
# Carga de datos
P0306f = tablas['P0306']+'\\'+os.listdir(tablas['P0306'])[0]
df = pd.read_csv(P0306f, dtype={'ubic_geo':'str'})
df = df.rename(columns = {'ubic_geo':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P0306 = df.where((pd.notnull(df)), None)
# subset para pruebas
test = P0306.loc['15045']
test
queryvar('estructu')
# ¿El municipio cuenta con un programa de modernización catastral?
P0306_00 = P0306[P0306['estructu'] == 240500]['prog_mod'].astype('int')
print(P0306_00.head(10))
print('-'*50)
queryvar('prog_mod')
# ¿En que periodo se realizaron las acciones del programa de modernización catastral?
P0306_03 = P0306[P0306['estructu'] == 240503]['perio_ac'].astype('int')
print(P0306_03.head(10))
print('-'*50)
queryvar('perio_ac')
# ¿Qué acciones se realizaron?
P0306_02 = P0306[P0306['estructu'] == 240502]['acc_modr'].astype('int').groupby('CVE_MUN').apply(list)
print(P0306_02.head(10))
queryvar('acc_modr')
# ¿Cuantas acciones se realizaron?
P0306_02b = P0306_02.apply(len).rename('n_acc_modr')
P0306_02b.head(10)
queryvar('inst_enc')
# ¿Que instituciones se han involucrado en la modernizacion catastral, y de qué manera?
P0306_01t = P0306[P0306['estructu'] == 240501][['inst_enc', 'tip_inst']] # tipo de apoyo e institucion
P0306_01t.head()
queryvar('tip_inst')
# Institucion involucrada
instit = {
1:'Administración pública de la entidad federativa',
2:'BANOBRAS',
3:'SEDATU',
4:'OTRA INSTITUCION'
}
P0306_01t['tip_inst'] = P0306_01t['tip_inst'].replace(instit)
P0306_01t.head()
queryvar('inst_enc')
P0306_01t1 = P0306_01t[P0306_01t['inst_enc'] == 1]['tip_inst'].groupby('CVE_MUN').apply(list).rename('i_coord_ejecuta')
P0306_01t2 = P0306_01t[P0306_01t['inst_enc'] == 2]['tip_inst'].groupby('CVE_MUN').apply(list).rename('i_otorga_apoyos')
P0306_01t1.head()
P0306_01t2.head()
# Convertir series en Dataframes
P0306_00 = P0306_00.to_frame()
P0306_03 = P0306_03.to_frame()
P0306_02 = P0306_02.to_frame()
P0306_02b = P0306_02b.to_frame()
P0306_01t1 = P0306_01t1.to_frame()
P0306_01t2 = P0306_01t2.to_frame()
# Unir dataframes
P0306 = P0306_00.join(P0306_03).join(P0306_02).join(P0306_02b).join(P0306_01t1).join(P0306_01t2)
P0306 = P0306.where((pd.notnull(P0306)), None)
P0306.head()
P0306meta = {
'Nombre del Dataset' : 'Censo Nacional de Gobiernos Municipales y Delegacionales 2015',
'Descripcion del dataset' : 'Censo Nacional de Gobiernos Municipales y Delegacionales 2015',
'Disponibilidad Temporal' : '2015',
'Periodo de actualizacion' : 'Bienal',
'Nivel de Desagregacion' : 'Municipal',
'Notas' : 's/n',
'Fuente' : 'INEGI',
'URL_Fuente' : 'http://www.beta.inegi.org.mx/contenidos/proyectos/censosgobierno/municipal/cngmd/2015/datosabiertos/',
'Dataset base' : '"P0306.xlsx" disponible en \nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015',
}
P0306meta = pd.DataFrame.from_dict(P0306meta, orient='index', dtype=None)
P0306meta.columns = ['Descripcion']
P0306meta = P0306meta.rename_axis('Metadato')
P0306meta
list(P0306meta)
P0306.head()
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD\P0306.xlsx'
writer = pd.ExcelWriter(file)
P0306.to_excel(writer, sheet_name = 'P0306')
P0306meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
# Redefinición de la función para revisar metadatos, porque los datos de la carpeta 'catálogos' de P0307
# no coinciden con los titulos de las columnas en la carpeta 'Conjunto de datos'.
def getmetab(csvpath, textcoding):
# Importa el csv
try: dicdf = pd.read_csv(csvpath,
index_col=0,
parse_dates=False
)
except: dicdf = pd.read_csv(csvpath,
index_col=0,
parse_dates=False,
encoding = textcoding,
)
# Renombra las columnas
dicdf.columns = list(dicdf.iloc[1])
# Crea columna con el indice
dicdf['text_arc'] = dicdf.index
# Extrae el nombre del csv fuente en una columna independiente
def getarc(x):
try: return re.search('(?<=(o: ))([A-Z])\w+', x).group()
except: return None
dicdf['arc'] = dicdf['text_arc'].apply(lambda x: getarc(x))
# Extrae la descripcion del archivo en una columna independiente
def getdescarc(x):
try: return re.search('\(([^)]+)\)', x).group(1)
except: return None
dicdf['desc_arc'] = dicdf['text_arc'].apply(lambda x: getdescarc(x))
# Marca columnas que se van a eliminar (Las columnas de donde se sacaron las variables 'arc' y 'desc_arc')
dicdf['delete1'] = dicdf[list(dicdf.columns)[1:6]].notnull().sum(axis = 1)
# Rellenar valores NaN
dicdf = dicdf.fillna(method='ffill')
# Eliminar valores marcados previaente
dicdf = dicdf[dicdf.delete1 != 0]
# Eliminar encabezados de columna repetidos
dicdf = dicdf[dicdf.Descripción != 'Descripción']
# Asignar nuevo indice y eliminar columna 'arc'
dicdf = dicdf.set_index('arc')
# Elimina columna delete1
del dicdf['delete1']
# Renombra la columna de descripciones de codigos
dicdf.columns.values[5] = 'Descripcion codigos'
# Dame el DataFrame
return dicdf
# También es necesario redefinir la función para hacer consultas a los metadatos
def queryvar(filename, var = '', tablelen=10, colprint = 125, dictio = metadatos):
pdefault = pd.get_option('display.max_colwidth')
pd.set_option('display.max_colwidth', colprint) # Expande el espacio para imprimir columnas
frame = dictio.loc[filename]
print('Archivo "{}.csv" {}'.format(filename, '-'*30)) # Muestra el nombre del archivo
print(frame.iloc[0]['desc_arc']) # Muestra la descripcion del archivo
if var == '': pass
else:
print('\n{}{}'.format(var.upper(), '-'*30)) # Muestra el nombre de la variable
varframe = frame[frame['Nombre de la \ncolumna'] == var.upper()] # Haz un subset con los datos de la variable
varframe = varframe.set_index('Códigos válidos en la columna')
print(varframe['Descripción'][0]) # Muestra la descripcion de la variable
print(varframe[['Descripcion codigos']]) # Imprime las descripciones de codigos
csvpath = r'D:\PCCS\00_RawData\01_CSV\cngmd\2015\P0307\marco_regulatorio_cngmd2015_dbf\diccionario_de_datos\diccionario_de_datos_marco_regulatorio_cngmd2015.csv'
metadatos = getmetab(csvpath, 'mbcs')
# Definición de rutas de archivos
par = 'P0307'
P0307files = {}
for file in os.listdir(tablas[par]):
P0307files[file.replace('.csv', '')] = tablas[par]+'\\'+file
for file in P0307files.keys():
print(file)
queryvar(file.upper())
print('\n')
print('P0307 - {}\n'.format(descripciones['P0307']))
queryvar('m_regula'.upper())
# Carga de datos
P0307f = tablas['P0307']+'\\'+ os.listdir(tablas['P0307'])[4]
df = pd.read_csv(P0307f, dtype={'ubic_geo':'str'})
df = df.rename(columns = {'ubic_geo':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P0307 = df.where((pd.notnull(df)), None)
P0307.head()
P0307.columns
queryvar('m_regula'.upper(), 'tema_nis')
P0307 = P0307[P0307['tema_nis'] == 41]
P0307.head()
# Quita las columnas que estén vacías
P0307 = P0307.dropna(axis=1, how = 'all')
P0307.head()
# Metadatos
meta = P0306meta
meta.at['Dataset base','Descripcion'] = meta.at['Dataset base','Descripcion'].replace('P0306', 'P0307')
meta
par = 'P0307'
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD'+'\\'+par+'.xlsx'
writer = pd.ExcelWriter(file)
P0307.to_excel(writer, sheet_name = par)
meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
# Rutas de archivos
param = 'P1001'
rutadatos = tablas[param]
rutameta = tablas[param].replace('conjunto_de_datos', 'diccionario_de_datos')
rutameta = rutameta + '\\' + os.listdir(rutameta)[0]
print('{}\n{}'.format(rutadatos, rutameta))
# Obtencion de metadatos
# Cada hoja de metadatos es muy muy similar, pero con muy ligeras variaciones
# La unica parte del proceso que es seguro automatizar es la importación del archivo hacia Python
def getmeta(csvpath, textcoding):
# Importa el csv
try:
dicdf = pd.read_csv(csvpath,
index_col=0,
parse_dates=False
)
except:
dicdf = pd.read_csv(csvpath,
index_col=0,
parse_dates=False,
encoding = textcoding,
)
# Renombra las columnas
dicdf.columns = list(dicdf.iloc[1])
# Dame el archivo
return dicdf
os.listdir(r'D:\PCCS\00_RawData\01_CSV\cngmd\2015\P1001\Rec_RSU_cngmd2015_csv\diccionario_de_datos')
metadatos = getmeta(rutameta, 'mbcs')
# Crea columna con el indice
metadatos['Nombre de la \ncolumna'] = metadatos.index
# Extrae el nombre del csv fuente en una columna independiente
def getarc(x):
try: return x.split(' ')[1]
except: return None
metadatos['archivo'] = metadatos['Nombre de la \ncolumna'].apply(getarc)
# Extrae la descripcion del archivo en una columna independiente
def getdescarc(x):
try: return x.split('(')[1].replace(')','')
except: return None
metadatos['desc_arc'] = metadatos['Nombre de la \ncolumna'].apply(getdescarc)
# En la columna 'arc', reemplaza las celdas cuyo valor es 'de'
metadatos['archivo'] = metadatos['archivo'].replace({'de':None})
# Marca columnas que se van a eliminar (Las columnas de donde se sacaron las variables 'arc' y 'desc_arc')
metadatos['delete1'] = metadatos[list(metadatos.columns)[1:6]].notnull().sum(axis = 1)
# Rellenar valores NaN
metadatos = metadatos.fillna(method='ffill')
# Eliminar valores marcados previaente
metadatos = metadatos[metadatos.delete1 != 0]
# Eliminar columnas sin datos
metadatos = metadatos.dropna(axis = 1, how = 'all')
# Eliminar encabezados de columna repetidos
metadatos = metadatos[metadatos.Descripción != 'Descripción']
# Asignar nuevo indice y eliminar columna 'text_arc'
metadatos = metadatos.set_index('archivo')
# Elimina columna delete1
del metadatos['delete1']
# Renombra la columna de descripciones de codigos
metadatos.columns.values[3] = 'Descripcion codigos'
# Reordena las columnas
neworder = ['Nombre de la \ncolumna', 'Descripción', 'Tipo de dato', 'Rango válido', 'Descripcion codigos',
'Pregunta textual', 'Página de Cuestionario', 'Definición', 'desc_arc']
metadatos = metadatos.reindex(columns= neworder)
# Renombra las columnas para que funcionen con queryvar
metadatos = metadatos.rename({'Rango válido':'Códigos válidos en la columna'})
metadatos.head(3)
metadatos.loc['secc_i_tr_cngmd15_m6'][metadatos.loc['secc_i_tr_cngmd15_m6']['Nombre de la \ncolumna'] == 'P2_2']
# Definición de rutas a archivos de datos
Paramfiles = {}
for file in os.listdir(rutadatos):
Paramfiles[file.replace('.csv', '')] = rutadatos+'\\'+file
for file, path in Paramfiles.items():
print('{}:\n{}\n'.format(file, path))
# Carga de datos
P1001f = tablas[param]+'\\'+ os.listdir(tablas[param])[0]
df = pd.read_csv(P1001f, dtype={'folio':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'folio':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1001 = df.where((pd.notnull(df)), None)
P1001.head(1)
P1001 = P1001['p2_2'].to_frame()
P1001.head(1)
# Metadatos
meta = meta # Utiliza el archivo de metadatos que habías definido anteriormente
meta.at['Dataset base','Descripcion'] = '"P1001.xlsx" disponible en \nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'
meta.at['Notas','Descripcion'] = 'p2_2: Cantidad de residuos sólidos recolectada en kilogramos.'
meta
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD'+'\\'+param+'.xlsx'
writer = pd.ExcelWriter(file)
P1001.to_excel(writer, sheet_name = param)
meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
# Rutas de archivos
param = 'P1006'
rutadatos = tablas[param]
rutameta = tablas[param].replace('conjunto_de_datos', 'diccionario_de_datos')
rutameta = rutameta + '\\' + os.listdir(rutameta)[0]
print('{}\n{}'.format(rutadatos, rutameta))
# Definición de rutas a archivos de datos
Paramfiles = {}
for file in os.listdir(rutadatos):
Paramfiles[file.replace('.csv', '')] = rutadatos+'\\'+file
for file, path in Paramfiles.items():
print('{}:\n{}\n'.format(file, path))
os.listdir(tablas[param])[0]
# Carga de datos
P1006f = tablas[param]+'\\'+ os.listdir(tablas[param])[0]
df = pd.read_csv(P1006f, dtype={'folio':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'folio':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1006 = df.where((pd.notnull(df)), None)
P1006 = P1006['p13'].to_frame()
# Metadatos
meta = meta # Utiliza el archivo de metadatos que habías definido anteriormente
meta.at['Dataset base','Descripcion'] = '"P1006.xlsx" disponible en \nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'
meta.at['Notas','Descripcion'] = 'En la columna p13, ¿El municipio cuenta con Programas de Gestion de Residuos? 1: Si, 2: No'
meta
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD'+'\\'+param+'.xlsx'
writer = pd.ExcelWriter(file)
P1006.to_excel(writer, sheet_name = param)
meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
# Rutas de archivos
param = 'P1009'
rutadatos = tablas[param]
rutameta = tablas[param].replace('conjunto_de_datos', 'diccionario_de_datos')
rutameta = rutameta + '\\' + os.listdir(rutameta)[0]
print('{}\n{}'.format(rutadatos, rutameta))
# Definición de rutas a archivos de datos
Paramfiles = {}
for file in os.listdir(rutadatos):
Paramfiles[file.replace('.csv', '')] = rutadatos+'\\'+file
for file, path in Paramfiles.items():
print('{}:\n{}\n'.format(file, path))
# Carga de datos
P1009f = tablas[param]+'\\'+ os.listdir(tablas[param])[0]
df = pd.read_csv(P1009f, dtype={'folio':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'folio':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1009 = df.where((pd.notnull(df)), None)
del(P1009['entidad'])
del(P1009['municipio'])
meta
# Metadatos
meta = meta # Utiliza el archivo de metadatos que habías definido anteriormente
meta.at['Dataset base','Descripcion'] = '"P1009.xlsx" disponible en \nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'
meta.at['Notas','Descripcion'] = 'Para la columna P12, ¿El Municipio cuenta con estudios de generacion de residuos? 1: Si 2: No'
meta
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD'+'\\'+param+'.xlsx'
writer = pd.ExcelWriter(file)
P1009.to_excel(writer, sheet_name = param)
meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
tablasP1003
# Rutas de archivos
param = 'P1003'
rutasdatos = list(tablasP1003.values())
for ruta in rutasdatos:
print(ruta)
# Definición de rutas a archivos de datos
Paramfiles = {}
for rutadatos in rutasdatos:
for file in os.listdir(rutadatos):
Paramfiles[file.replace('.csv', '')] = rutadatos+'\\'+file
for file, path in Paramfiles.items():
print('{}:\n{}\n'.format(file, path))
# Carga de datos
# Es necesario hacer 3 dataframes, uno por cada archivo, y después unir las columnas para cada parámetro.
P1003f1 = Paramfiles['secc_i_tr_cngmd15_m6']
df = pd.read_csv(P1003f1, dtype={'folio':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'folio':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1003f1 = df.where((pd.notnull(df)), None)
P1003f2 = Paramfiles['secc_ii_tr_cngmd15_m6']
df = pd.read_csv(P1003f2, dtype={'folio':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'folio':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1003f2 = df.where((pd.notnull(df)), None)
# El Parametro en realidad no utiliza el numero de sitios de disposicion de residuos.
# Y no está documentado el significado de NS en la columna P11 lo que dificulta la lectura de los datos
'''
P1003f3 = Paramfiles['secc_iii_tr_cngmd15_m6']
df = pd.read_csv(P1003f3, dtype={'folio':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'folio':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1003f3 = df.where((pd.notnull(df)), None)
'''
# Aislar datos de interés
P1003 = P1003f1['p1'].to_frame()
P1003['p10'] = P1003f2['p10']
# P1003['p11'] = P1003f3['p11'] #p11 se excluye del analisis por los motivos descritos antes
P1003.head(1)
# Metadatos
meta = meta # Utiliza el archivo de metadatos que habías definido anteriormente
meta.at['Dataset base','Descripcion'] = '"P1003.xlsx" disponible en \nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'
meta.at['Notas','Descripcion'] = 'para p1: ¿Dispone de servicio de recoleccion? (1: Si 2: No)\npara p10: ¿Al menos una fracción de los RSU es enviada a plantas de tratamiento? (1: Si 2: No)\npara p11: ¿A cuantos sitios de disposición final son remitidos los residuos?'
meta
param
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD'+'\\'+param+'.xlsx'
writer = pd.ExcelWriter(file)
P1003.to_excel(writer, sheet_name = param)
meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
# Carga de datos
P1005f = Paramfiles['secc_i_tr_cngmd15_m6_p6_3_2']
df = pd.read_csv(P1005f, dtype={'FOLIO':'str'}, encoding = 'mbcs')
df = df.rename(columns = {'FOLIO':'CVE_MUN'})
df.set_index('CVE_MUN', inplace = True)
P1005f = df.where((pd.notnull(df)), None)
P1005f.head(1)
P1005 = P1005f['P6_3_2_1_3'].to_frame()
P1005.head(3)
# Metadatos
meta = meta # Utiliza el archivo de metadatos que habías definido anteriormente
meta.at['Dataset base','Descripcion'] = '"P1005.xlsx" disponible en \nhttps://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/CNGMD/2015'
meta.at['Notas','Descripcion'] = 'P6_3_2_1_3: Numero de vehiculos utilizados para la recolección de Residuos Solidos Urbanos'
meta
param = 'P1005'
file = r'D:\PCCS\01_Dmine\Datasets\CNGMD'+'\\'+param+'.xlsx'
writer = pd.ExcelWriter(file)
P1005.to_excel(writer, sheet_name = param)
meta.to_excel(writer, sheet_name ='METADATOS')
writer.save()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: En el caso del parámetro P1003, los datos se extraen desde 3 archivos. Estos archivos son una base de datos para cada servicio relacionado con los RSU, Utilizando nuevamente el archivo que utiliza P1001 y dos adicionales
Step2: Construccion de datasets estándar
Step3: P0306 - Programas de modernización catastral
Step4: El archivo está estructurado de manera inconveniente, teniendo un renglón para cada variable. Lo conveniente es que cada renglón contenga toda la información de un solo municipio.
Step5: Para corregirlo, primero hacemos dataframes separados para cada variable. Afortunadamente, la columna 'Estructu' sirve para agrupar estructuralmente el dataframe
Step6: Se reemplazarán numeros por descripciones en tip_inst
Step7: Y se separará la columna 'inst_enc' en 2
Step8: Finalmente, se unirán todas las series en un solo dataframe
Step9: Metadatos para P0306
Step10: EXPORTAR A EXCEL
Step11: P0307
Step12: El contenido de los archivos en la carpeta "Conjunto de datos" es el siguiente
Step13: La información para el parámetro P0307 se encuentra en el archivo M_REGULA.csv
Step14: ¿Dónde están los datos sobre desarrollo urbano y ordenamiento territorial?
Step15: Los datos de DU y OT estan en la columna TEMA_NIS. El código 41 en esta column indica DU y OT
Step16: Exportar archivo
Step17: P1001 - Promedio diario de RSU recolectados
Step18: ¿Donde estan los datos?
Step19: Los datos se encuentran en el archivo secc_i_tr_cngmd15_m6, en la columna P2_2
Step20: Exportar archivos
Step21: P1006 - Número de municipios con aplicación de programas locales orientados a la GIRSU
Step22: ¿Donde estan los datos?
Step23: Exportar Archivos
Step24: P1009 - Número de municipios con estudios de generación de RSU
Step25: ¿Donde están los datos?
Step26: Exportar archivos
Step27: P1003 - Número de municipios con disponibilidad de servicios relacionados con los RSU
Step28: La Carpeta 1 Contiene 2 archivos
Step29: Exportar archivos
Step30: P1005 - Número de vehículos utilizados para la recolección de residuos sólidos urbanos
Step31: Exportar archivos
|
2,091
|
<ASSISTANT_TASK:>
Python Code:
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
def download(url, file):
Download file from <url>
:param url: URL to file
:param file: Local file path
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
# TODO: Implement Min-Max scaling for grayscale image data
a = 0.1
b = 0.9
deltaba = b - a
min = 0.
max = 255.
deltamaxmin = max - min
result = []
for v in image_data:
t = a + ((v - min)*deltaba/deltamaxmin)
result.append(t)
return result
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
features_count = 784
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32, [None, features_count])
labels = tf.placeholder(tf.float32, [None, labels_count])
# TODO: Set the weights and biases tensors
# weights =
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
# biases =
biases = tf.Variable(tf.zeros([labels_count]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.initialize_all_variables()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# TODO: Find the best parameters for each configuration
epochs = 1
batch_size = 100
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
# TODO: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
epochs = 5
batch_size = 50
learning_rate = 0.1
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step3: The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
Step5: <img src="image/mean_variance.png" style="height
Step6: Checkpoint
Step7: <img src="image/weight_biases.png" style="height
Step8: <img src="image/learn_rate_tune.png" style="height
Step9: Test
|
2,092
|
<ASSISTANT_TASK:>
Python Code:
%matplotlib inline
import os
import sys
import random
import networkx as nx
## Paths from the file
PROJECT = os.path.join(os.getcwd(), "..")
FIXTURES = os.path.join(PROJECT, "fixtures")
DATASET = os.path.join(FIXTURES, 'activity.csv')
## Append the path for the logbook utilities
sys.path.append(PROJECT)
from logbook.reader import LogReader
from logbook.graph import *
from logbook.compare import *
# Actions to exclude from our graph.
# exclude = None
exclude=['Subscribed to DDL blog', 'Signed up for new course notifications']
# Load dataset and generate graph
dataset = LogReader(DATASET, exclude=exclude)
G = graph_from_triples(dataset)
print info(G)
draw_activity_graph(G, connected=True, iterations=100)
print "Pairwise Comparisons: {}\n\n".format(pairwise_comparisons(G, True))
combos = list(pairwise_comparisons(G, entity='person'))
random.shuffle(combos)
for idx, pair in enumerate(combos):
print "Pair {}:".format(idx + 1)
print " {}\n -- vs --\n {}".format(*pair)
print
if idx >= 4: break
print "Edge Blocked Pairwise Comparisons: {}\n\n".format(edge_blocked_comparisons(G, True))
combos = list(edge_blocked_comparisons(G, entity='person'))
random.shuffle(combos)
for idx, pair in enumerate(combos):
print "Pair {}:".format(idx + 1)
print " {}".format(pair[0])
for detail in G.neighbors(pair[0]):
print " {}".format(detail)
print " -- vs --"
print " {}".format(pair[1])
for detail in G.neighbors(pair[1]):
print " {}".format(detail)
print
if idx >= 4: break
combos = list(edge_blocked_comparisons(G, entity='person'))
combos = filter(lambda pair: fuzzblock(*pair), combos)
print "Fuzz/Edge Blocked Pairwise Comparisons: {}\n\n".format(len(combos))
random.shuffle(combos)
for idx, pair in enumerate(combos):
print "Pair {}:".format(idx + 1)
print " {}".format(pair[0])
for detail in G.neighbors(pair[0]):
print " {}".format(detail)
print " -- vs --"
print " {}".format(pair[1])
for detail in G.neighbors(pair[1]):
print " {}".format(detail)
print
if idx >= 100: break
from collections import Counter
def count_email_domains():
counter = Counter()
for triple in dataset:
email = triple.entity.email
domain = email.split("@")[-1]
counter[domain] += 1
return counter
domains = count_email_domains()
for domain in domains.most_common():
print "{}: {}".format(*domain)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Graph Structured Pairwise Comparisons
Step2: Edge structured comparisons only yield nodes so long as the itersection of the node's neighborhoods is empty (that is, two entities can't have an action to the same detail).
Step3: Other structural blocking can then be applied.
Step4: Domain Counts
|
2,093
|
<ASSISTANT_TASK:>
Python Code:
G = nx.Graph()
G.add_nodes_from(['a', 'b', 'c'])
G.add_edges_from([('a','b'), ('b', 'c')])
nx.draw(G, with_labels=True)
G.add_node('d')
G.add_edge('c', 'd')
G.add_edge('d', 'a')
nx.draw(G, with_labels=True)
# Load the network.
G = nx.read_gpickle('Synthetic Social Network.pkl')
nx.draw(G, with_labels=True)
# Example code that shouldn't be too hard to follow.
def in_triangle(G, node):
neighbors1 = G.neighbors(node) # neighbors of
neighbors2 = []
for n in neighbors1:
neighbors = G.neighbors(n)
if node in neighbors2:
neighbors2.remove(node)
neighbors2.extend(G.neighbors(n))
neighbors3 = []
for n in neighbors2:
neighbors = G.neighbors(n)
neighbors3.extend(G.neighbors(n))
if node in neighbors3:
return True
else:
return False
in_triangle(G, 3)
nx.triangles(G, 3)
# Possible answer
def get_triangles(G, node):
neighbors = set(G.neighbors(node))
triangle_nodes = set()
Fill in the rest of the code below.
# Verify your answer with the following funciton call. Should return:
# {1, 2, 3, 6, 23}
get_triangles(G, 3)
# Then, draw out those nodes.
nx.draw(G.subgraph(get_triangles(G, 3)), with_labels=True)
neighbors3 = G.neighbors(3)
neighbors3.append(3)
nx.draw(G.subgraph(neighbors3), with_labels=True)
# Possible Answer, credit Justin Zabilansky (MIT) for help on this.
def get_open_triangles(G, node):
There are many ways to represent this. One may choose to represent only the nodes involved
in an open triangle; this is not the approach taken here.
Rather, we have a code that explicitly enumrates every open triangle present.
open_triangle_nodes = []
neighbors = set(G.neighbors(node))
# Fill in code below.
return open_triangle_nodes
# # Uncomment the following code if you want to draw out each of the triplets.
# nodes = get_open_triangles(G, 2)
# for i, triplet in enumerate(nodes):
# fig = plt.figure(i)
# nx.draw(G.subgraph(triplet), with_labels=True)
print(get_open_triangles(G, 2))
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Let's think of another problem
Step2: The set of relationships involving A, B and C, if closed, involves a triangle in the graph. The set of relationships that also include D form a square.
Step3: Cliques
Step4: In reality, NetworkX already has a function that counts the number of triangles that any given node is involved in. This is probably more useful than knowing whether a node is present in a triangle or not, but the above code was simply for practice.
Step6: Exercise
Step8: Friend Recommendation
|
2,094
|
<ASSISTANT_TASK:>
Python Code:
import numpy as np
def ellipse(s, r, c, theta=0):
rows, cols = s[0], s[1]
rr0, cc0 = c[0], c[1]
rr, cc = np.meshgrid(range(rows), range(cols), indexing='ij')
rr = rr - rr0
cc = cc - cc0
cos = np.cos(theta)
sen = np.sin(theta)
i = cos/r[1]
j = sen/r[0]
m = -sen/r[1]
n = cos/r[0]
g = ((i*cc + m*rr)**2 + (j*cc + n*rr)**2) <= 1
return g
testing = (__name__ == "__main__")
if testing:
! jupyter nbconvert --to python ellipse.ipynb
import numpy as np
import sys,os
import matplotlib.image as mpimg
ia898path = os.path.abspath('../../')
if ia898path not in sys.path:
sys.path.append(ia898path)
import ia898.src as ia
if testing:
g = ia.ellipse([16,16], [2,4], [8,8], np.pi * 0.25)
print('g:\n', g.astype(int))
if testing:
from time import time
t = time()
g = ia.ellipse([300,300], [90,140], [150,150], np.pi * 0.25)
tend = time()
print('Computational time (10k, 10k) is {0:.2f} seconds.'.format(tend - t))
ia.adshow(g, "Ellipse")
if testing:
print('Computational time (10k, 10k) is:')
%timeit ia.ellipse([300,300], [90,140], [150,150], np.pi * 0.25)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Examples
Step2: Measuring time
|
2,095
|
<ASSISTANT_TASK:>
Python Code:
import sys
sys.version_info
import numpy as np
np.__version__
import requests
requests.__version__
import pandas as pd
pd.__version__
import scipy
scipy.__version__
import scidbpy
scidbpy.__version__
from scidbpy import connect
sdb = connect('http://localhost:8080')
import urllib.request # urllib2 in python2 the lib that handles the url stuff
target_url = "https://www.physionet.org/physiobank/database/mimic2wdb/matched/RECORDS-waveforms"
data = urllib.request.urlopen(target_url) # it's a file like object and works just like a file
lines = data.readlines();
line = str(lines[100])
carpeta,onda = line.replace('b\'','').replace('\'','').replace('\\n','').split("/")
onda
import wfdb
sig, fields = wfdb.srdsamp(onda,pbdir='mimic2wdb/matched/'+carpeta) #, sampfrom=11000
print(sig)
print("signame: " + str(fields['signame']))
print("units: " + str(fields['units']))
print("fs: " + str(fields['fs']))
print("comments: " + str(fields['comments']))
print("fields: " + str(fields))
signalII = None
try:
signalII = fields['signame'].index("II")
except ValueError:
print("List does not contain value")
if(signalII!=None):
print("List contain value")
array = wfdb.processing.normalize(x=sig[:, signalII], lb=-2, ub=2)
arrayNun = array[~np.isnan(array)]
arrayNun = np.trim_zeros(arrayNun)
arrayNun
ondaName = onda.replace("-", "_")
if arrayNun.size>0 :
sdb.input(upload_data=array).store(ondaName,gc=False)
# sdb.iquery("store(input(<x:int64>[i], '{fn}', 0, '{fmt}'), "+ondaName+")", upload_data=array)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: NumPy
Step2: Requests
Step3: Pandas (optional)
Step4: SciPy (optional)
Step5: 2) Importar scidbpy
Step6: conectarse al servidor de Base de datos
Step7: 3) Leer archivo con cada una de las ondas
Step8: Quitarle caracteres especiales
Step9: 4) Importar WFDB para conectarse a physionet
Step10: Busca la ubicacion de la señal tipo II
Step11: Normaliza la señal y le quita los valores en null
Step12: Cambiar los guiones "-" por raya al piso "_" porque por algun motivo SciDB tiene problemas con estos caracteres
|
2,096
|
<ASSISTANT_TASK:>
Python Code:
import ipywidgets as widgets
import os
image_path = os.path.abspath('../data_files/trees.jpg')
with open(image_path, 'rb') as f:
raw_image = f.read()
ipyimage = widgets.Image(value=raw_image, format='jpg')
ipyimage
from bqplot import *
# Create the scales for the image coordinates
scales={'x': LinearScale(), 'y': LinearScale()}
# Define the bqplot Image mark
image = Image(image=ipyimage, scales=scales)
# Create the bqplot Figure to display the mark
fig = Figure(title='Trees', marks=[image], padding_x=0, padding_y=0)
fig
scales = {'x': LinearScale(min=-1, max=2), 'y': LinearScale(min=-0.5, max=2)}
image = Image(image=ipyimage, scales=scales)
lines = Lines(x=[0, 1, 1, 0, 0], y=[0, 0, 1, 1, 0], scales=scales, colors=['red'])
fig = Figure(marks=[image, lines], padding_x=0, padding_y=0, animation_duration=1000)
fig.axes = [Axis(scale=scales['x']), Axis(scale=scales['y'], orientation='vertical')]
fig
# Full screen
image.x = [-1, 2]
image.y = [-.5, 2]
import bqplot.pyplot as bqp
bqp.figure()
bqp.imshow(image_path, 'filename')
bqp.show()
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Displaying the image inside a bqplot Figure
Step2: Mixing with other marks
Step3: Its traits (attributes) will also respond dynamically to a change from the backend
Step4: Pyplot
|
2,097
|
<ASSISTANT_TASK:>
Python Code:
!brew ls --versions gcc
!compgen -c | grep ^gcc
import os
os.environ['CC'] = 'gcc-6'
%%cython -f
# distutils: extra_compile_args = -fopenmp
# distutils: extra_link_args = -fopenmp
# cython: boundscheck = False
from libc.math cimport log
from cython.parallel cimport prange
def f1(double[:] x, double[:] out):
cdef int i, n = x.shape[0]
for i in range(n):
out[i] = log(x[i])
def f2(double[:] x, double[:] out):
cdef int i, n = x.shape[0]
for i in prange(n, nogil=True):
out[i] = log(x[i])
data = np.random.rand(10000000)
out = np.zeros_like(data)
%timeit f1(data, out)
%timeit np.log(data, out=out)
%timeit f2(data, out)
%%cython
# distutils: extra_compile_args = -fopenmp
# distutils: extra_link_args = -fopenmp
# cython: boundscheck = False
from libc.math cimport log
from cython.parallel cimport prange
def f_single(double[:] x):
cdef int i, n = x.shape[0]
cdef double result = 0
for i in range(n):
if x[i] > 0.5:
result += log(x[i])
else:
result += 1.0
return result
def f_parallel(double[:] x):
cdef int i, n = x.shape[0]
cdef double result = 0
for i in prange(n, nogil=True):
if x[i] > 0.5:
result += log(x[i])
else:
result += 1.0
return result
%%cython
# distutils: extra_compile_args = -fopenmp
# distutils: extra_link_args = -fopenmp
# cython: boundscheck = False
from libc.math cimport log
from cython.parallel cimport prange
def g_single(double[:] x):
cdef int i, n = x.shape[0]
cdef double result = 0
for i in range(n):
if x[i] > 0.5:
result += log(x[i])
else:
result += 1.0
return result
def g_parallel(double[:] x):
cdef int i, n = x.shape[0]
cdef double tmp, result = 0
for i in prange(n, nogil=True):
if x[i] > 0.5:
tmp = log(x[i])
else:
tmp = 1.0
result += tmp
return result
print(g_single(data[:3]))
print(g_parallel(data[:3]))
%timeit g_single(data)
%timeit g_parallel(data)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: The gcc command maps back to clang. The "real" GCC is different
Step2: My "real" GCC command is gcc-5
Step3: <div style="margin-top
Step4: Make some data
Step5: Timings
Step6: Some things are tricky
Step7: Confusing explanation from the Cython docs
|
2,098
|
<ASSISTANT_TASK:>
Python Code:
# DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'uhh', 'sandbox-2', 'ocnbgchem')
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Document Authors
Step2: Document Contributors
Step3: Document Publication
Step4: Document Table of Contents
Step5: 1.2. Model Name
Step6: 1.3. Model Type
Step7: 1.4. Elemental Stoichiometry
Step8: 1.5. Elemental Stoichiometry Details
Step9: 1.6. Prognostic Variables
Step10: 1.7. Diagnostic Variables
Step11: 1.8. Damping
Step12: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Step13: 2.2. Timestep If Not From Ocean
Step14: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Step15: 3.2. Timestep If Not From Ocean
Step16: 4. Key Properties --> Transport Scheme
Step17: 4.2. Scheme
Step18: 4.3. Use Different Scheme
Step19: 5. Key Properties --> Boundary Forcing
Step20: 5.2. River Input
Step21: 5.3. Sediments From Boundary Conditions
Step22: 5.4. Sediments From Explicit Model
Step23: 6. Key Properties --> Gas Exchange
Step24: 6.2. CO2 Exchange Type
Step25: 6.3. O2 Exchange Present
Step26: 6.4. O2 Exchange Type
Step27: 6.5. DMS Exchange Present
Step28: 6.6. DMS Exchange Type
Step29: 6.7. N2 Exchange Present
Step30: 6.8. N2 Exchange Type
Step31: 6.9. N2O Exchange Present
Step32: 6.10. N2O Exchange Type
Step33: 6.11. CFC11 Exchange Present
Step34: 6.12. CFC11 Exchange Type
Step35: 6.13. CFC12 Exchange Present
Step36: 6.14. CFC12 Exchange Type
Step37: 6.15. SF6 Exchange Present
Step38: 6.16. SF6 Exchange Type
Step39: 6.17. 13CO2 Exchange Present
Step40: 6.18. 13CO2 Exchange Type
Step41: 6.19. 14CO2 Exchange Present
Step42: 6.20. 14CO2 Exchange Type
Step43: 6.21. Other Gases
Step44: 7. Key Properties --> Carbon Chemistry
Step45: 7.2. PH Scale
Step46: 7.3. Constants If Not OMIP
Step47: 8. Tracers
Step48: 8.2. Sulfur Cycle Present
Step49: 8.3. Nutrients Present
Step50: 8.4. Nitrous Species If N
Step51: 8.5. Nitrous Processes If N
Step52: 9. Tracers --> Ecosystem
Step53: 9.2. Upper Trophic Levels Treatment
Step54: 10. Tracers --> Ecosystem --> Phytoplankton
Step55: 10.2. Pft
Step56: 10.3. Size Classes
Step57: 11. Tracers --> Ecosystem --> Zooplankton
Step58: 11.2. Size Classes
Step59: 12. Tracers --> Disolved Organic Matter
Step60: 12.2. Lability
Step61: 13. Tracers --> Particules
Step62: 13.2. Types If Prognostic
Step63: 13.3. Size If Prognostic
Step64: 13.4. Size If Discrete
Step65: 13.5. Sinking Speed If Prognostic
Step66: 14. Tracers --> Dic Alkalinity
Step67: 14.2. Abiotic Carbon
Step68: 14.3. Alkalinity
|
2,099
|
<ASSISTANT_TASK:>
Python Code:
# make some Python3 functions available on Python2
from __future__ import division, print_function
import sys
print(sys.version_info)
import theano
print(theano.__version__)
import keras
print(keras.__version__)
# FloydHub: check data
%ls /input/dogscats/
# check current directory
%pwd
%ls
# see some files are loaded fine
%cat floyd_requirements.txt
# check no Keras2 specific function is used (when Keras1 is used)
%cat utils.py
#Create references to important directories we will use over and over
import os, sys
current_dir = os.getcwd()
LESSON_HOME_DIR = current_dir
# FloydHub
DATA_HOME_DIR = "/input/dogscats/"
OUTPUT_HOME_DIR = "/output/"
# alternatively, for local
#DATA_HOME_DIR = current_dir+'/data/redux'
#import modules
from utils import *
from vgg16 import Vgg16
#Instantiate plotting tool
#In Jupyter notebooks, you will need to run this command before doing any plotting
%matplotlib inline
%cd $DATA_HOME_DIR
#Set path to sample/ path if desired
path = DATA_HOME_DIR + '/' #'/sample/'
test_path = DATA_HOME_DIR + '/test1/' #We use all the test data
# FloydHub
# data needs to be output under /output
# if results_path cannot be created, execute mkdir directly in the terminal
results_path = OUTPUT_HOME_DIR + '/results/'
%mkdir results_path
train_path = path + '/train/'
valid_path = path + '/valid/'
# As large as you can, but no larger than 64 is recommended.
#batch_size = 8
batch_size = 64
no_of_epochs=3
vgg = Vgg16()
# Grab a few images at a time for training and validation.
batches = vgg.get_batches(train_path, batch_size=batch_size)
val_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)
# Finetune: note that the vgg model is compiled inside the finetune method.
vgg.finetune(batches)
# Fit: note that we are passing in the validation dataset to the fit() method
# For each epoch we test our model against the validation set
latest_weights_filename = None
# FloydHub (Keras1)
for epoch in range(no_of_epochs):
print("Running epoch: %d" % epoch)
vgg.fit(batches, val_batches, nb_epoch=1)
latest_weights_filename = 'ft%d.h5' % epoch
vgg.model.save_weights(results_path+latest_weights_filename)
print("Completed %s fit operations" % no_of_epochs)
# alternatively, for local (Keras2)
for epoch in range(no_of_epochs):
print("Running epoch: %d" % epoch)
vgg.fit(batches, val_batches, batch_size, nb_epoch=1)
latest_weights_filename = 'ft%d.h5' % epoch
vgg.model.save_weights(results_path+latest_weights_filename)
print("Completed %s fit operations" % no_of_epochs)
# OUTPUT_HOME_DIR, not DATA_HOME_DIR due to FloydHub restriction
%cd $OUTPUT_HOME_DIR
%mkdir -p test1/unknown
%cd $OUTPUT_HOME_DIR/test1
%cp $test_path/*.jpg unknown/
# rewrite test_path
test_path = OUTPUT_HOME_DIR + '/test1/' #We use all the test data
batches, preds = vgg.test(test_path, batch_size = batch_size*2)
print(preds[:5])
filenames = batches.filenames
print(filenames[:5])
# You can verify the column ordering by viewing some images
from PIL import Image
Image.open(test_path + filenames[2])
#Save our test results arrays so we can use them again later
save_array(results_path + 'test_preds.dat', preds)
save_array(results_path + 'filenames.dat', filenames)
vgg.model.load_weights(results_path+latest_weights_filename)
val_batches, probs = vgg.test(valid_path, batch_size = batch_size)
filenames = val_batches.filenames
expected_labels = val_batches.classes #0 or 1
#Round our predictions to 0/1 to generate labels
our_predictions = probs[:,0]
our_labels = np.round(1-our_predictions)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(expected_labels, our_labels)
plot_confusion_matrix(cm, val_batches.class_indices)
#Load our test predictions from file
preds = load_array(results_path + 'test_preds.dat')
filenames = load_array(results_path + 'filenames.dat')
#Grab the dog prediction column
isdog = preds[:,1]
print("Raw Predictions: " + str(isdog[:5]))
print("Mid Predictions: " + str(isdog[(isdog < .6) & (isdog > .4)]))
print("Edge Predictions: " + str(isdog[(isdog == 1) | (isdog == 0)]))
# sneaky trick to round down our edge predictions
# Swap all ones with .95 and all zeros with .05
isdog = isdog.clip(min=0.05, max=0.95)
#Extract imageIds from the filenames in our test/unknown directory
filenames = batches.filenames
ids = np.array([int(f[8:f.find('.')]) for f in filenames])
subm = np.stack([ids,isdog], axis=1)
subm[:5]
# FloydHub
%cd $OUTPUT_HOME_DIR
# alternatively, for local
#%cd $DATA_HOME_DIR
submission_file_name = 'submission1.csv'
np.savetxt(submission_file_name, subm, fmt='%d,%.5f', header='id,label', comments='')
from IPython.display import FileLink
# FloydHub
%cd $OUTPUT_HOME_DIR
FileLink(submission_file_name)
# alternatively, for local
#%cd $LESSON_HOME_DIR
#FileLink('data/redux/'+submission_file_name)
<END_TASK>
|
<SYSTEM_TASK:>
Given the following text description, write Python code to implement the functionality described below step by step
<END_TASK>
<USER_TASK:>
Description:
Step1: Finetuning and Training
Step2: Use a pretrained VGG model with our Vgg16 class
Step4: The original pre-trained Vgg16 class classifies images into one of the 1000 categories. This number of categories depends on the dataset which Vgg16 was trained with. (http
Step5: Generate Predictions
Step6: Validate Predictions
Step7: (TODO) look at data to improve model
Step8: Submit Predictions to Kaggle!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.