code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="q9bje69Ic765" executionInfo={"status": "ok", "timestamp": 1604094011211, "user_tz": -660, "elapsed": 2570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}}
import tensorflow as tf
# + id="pqrDACg77_ye" executionInfo={"status": "ok", "timestamp": 1604093902191, "user_tz": -660, "elapsed": 2579, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="b8140c43-1a2f-4933-ff70-a9456af2c982" colab={"base_uri": "https://localhost:8080/"}
def f(w1, w2):
return 3 * w1 ** 2 + 2 * w1 * w2
w1, w2 = 5, 3
eps = 1e-6
print((f(w1+eps, w2)-f(w1, w2))/eps)
print((f(w1, w2+eps)-f(w1, w2))/eps)
# + id="mDqGWxPddCXb" executionInfo={"status": "ok", "timestamp": 1604095071984, "user_tz": -660, "elapsed": 916, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="fa4a5244-68b3-4327-ce1f-eac308797bd8" colab={"base_uri": "https://localhost:8080/"}
w1, w2 = tf.Variable(5.), tf.Variable(3.)
with tf.GradientTape() as tape:
z = f(w1, w2)
gradients = tape.gradient(z, [w1, w2])
gradients
# + id="6H-yMZRbeKgL" executionInfo={"status": "ok", "timestamp": 1604094381663, "user_tz": -660, "elapsed": 943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="20d74bd6-a717-49e7-b74b-b0822a2d3c1f" colab={"base_uri": "https://localhost:8080/"}
with tf.GradientTape() as tape:
z = f(w1, w2)
gradients = tape.gradient(z, w1)
# Automatically deletes after tape.gradient
gradients
# + id="YwP6EmwPed1K" executionInfo={"status": "ok", "timestamp": 1604094497081, "user_tz": -660, "elapsed": 885, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="b9938f7d-b717-427a-b767-2e20000a013a" colab={"base_uri": "https://localhost:8080/"}
# Using persistent and deleting tape manually
with tf.GradientTape(persistent=True) as tape:
z = f(w1, w2)
gradients_1 = tape.gradient(z, w1)
gradients_2 = tape.gradient(z, w2)
del tape
gradients_1, gradients_2
# + id="CsVjCNw0fBPS" executionInfo={"status": "ok", "timestamp": 1604094906958, "user_tz": -660, "elapsed": 1059, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="94fa6035-1b1e-4b50-8a71-cdc2acea3b02" colab={"base_uri": "https://localhost:8080/"}
c1, c2 = tf.constant(5.), tf.constant(3.)
with tf.GradientTape() as tape:
z = f(w1, w2)
gradients = tape.gradient(z, [c1, c2])
# Only tracks variables no constants
gradients
# + id="Ez8DO5nDgdzH" executionInfo={"status": "ok", "timestamp": 1604095092618, "user_tz": -660, "elapsed": 920, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgxpvArds7xLNZB29BI3aCcSZkWbMPAZWZLOm3o=s64", "userId": "03940255243131127536"}} outputId="071d4f54-eb15-44b3-e81e-6ff1bb26b97b" colab={"base_uri": "https://localhost:8080/"}
# To stop gradient from backpropagating through some part of a nueral network
def f(w1, w2):
return 3*w1**2 + tf.stop_gradient(2*w1*w2)
with tf.GradientTape() as tape:
z = f(w1, w2)
gradients = tape.gradient(z, [w1, w2])
gradients
# + id="SJg0qHgBhQPp"
# When computing infinity the output would be NaN
| Hands-on-ML/Code/Chapter 12/autodiff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''d2'': conda)'
# name: python388jvsc74a57bd01a7670c79b761afbd5c9016e8df3c42e8678373175e5affa484489d2881e0c43
# ---
import azureml.core
# Check core SDK version number
print("SDK version:", azureml.core.VERSION)
# +
from azureml.core import Workspace, Dataset
workspace = Workspace.from_config()
# -
# ### Setup Compute
# #### Create new or use an existing compute
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
amlcompute_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
aml_compute = ComputeTarget(workspace=workspace, name=amlcompute_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
max_nodes=4)
aml_compute = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
aml_compute.wait_for_completion(show_output=True)
# -
# #### Define RunConfig for the compute
# We will also use `pandas`, `scikit-learn` and `automl`, `pyarrow` for the pipeline steps. Defining the `runconfig` for that.
# +
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
# Create a new runconfig object
aml_run_config = RunConfiguration()
# Use the aml_compute you created above.
aml_run_config.target = aml_compute
# Enable Docker
aml_run_config.environment.docker.enabled = True
# Use conda_dependencies.yml to create a conda environment in the Docker image for execution
aml_run_config.environment.python.user_managed_dependencies = False
# Specify CondaDependencies obj, add necessary packages
aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(
conda_packages=['pandas','scikit-learn','numpy','dask[dataframe]'],
pip_packages=['azureml-sdk[automl]', 'pyarrow','azureml-dataprep[dask]'])
print ("Run configuration created.")
# -
# ### Prepare data
# Now we will prepare for regression modeling by using `pandas`. We run various transformations to filter and combine two different NYC taxi datasets.
#
# We achieve this by creating a separate step for each transformation as this allows us to reuse the steps and saves us from running all over again in case of any change. We will keep data preparation scripts in one subfolder and training scripts in another.
#
# > The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.
#name = 'tycho_short_parquet'
name = '2020-01-01-till-2021-02-28-tycho-brahe'
dataset = Dataset.get_by_name(workspace, name=name)
n_rows = 0
# +
from azureml.pipeline.core import PipelineData
from azureml.pipeline.steps import PythonScriptStep
# Default datastore
default_store = workspace.get_default_datastore()
# +
# python scripts folder
prepare_data_folder = './scripts/prepdata/trip'
# Define output after trip_id step
data_with_id = PipelineData("data_with_id", datastore=default_store).as_dataset()
print('trip_id script is in {}.'.format(os.path.realpath(prepare_data_folder)))
trip_id_step = PythonScriptStep(
name="Add trip_id",
script_name="trip_id.py",
arguments=["--output_data_with_id", data_with_id,
"--n_rows",n_rows],
inputs=[dataset.as_named_input('blue_flow_raw')],
outputs=[data_with_id],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("trip_id_step created.")
# +
# python scripts folder
prepare_data_folder = './scripts/prepdata/trip_statistics'
# Define output after trip_id step
trip_statistics = PipelineData("trip_statistics", datastore=default_store).as_dataset()
print('trip_statistics_step script is in {}.'.format(os.path.realpath(prepare_data_folder)))
trip_statistics_step = PythonScriptStep(
name="trip statistics",
script_name="trip_statistics.py",
arguments=["--output_trip_statistics", trip_statistics],
inputs=[data_with_id.parse_parquet_files()],
outputs=[trip_statistics],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("trip_statistics_step created.")
# -
# Build the pipeline
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(workspace=workspace, steps=[trip_id_step,trip_statistics_step])
#pipeline = Pipeline(workspace=workspace, steps=[trip_id_step])
# +
from azureml.core import Experiment
experiment = Experiment(workspace, 'longterm')
print("Experiment created")
# -
pipeline_run = experiment.submit(pipeline, regenerate_outputs=False)
print("Pipeline submitted for execution.")
from azureml.widgets import RunDetails
RunDetails(pipeline_run).show()
# +
#register_name = f'{name}_statistics'
#trip_statistics_step._outputs[0].register(name=register_name)
# -
register_name
# +
from azureml.pipeline.core import PipelineData
from azureml.pipeline.steps import PythonScriptStep
# python scripts folder
prepare_data_folder = './scripts/prepdata'
# rename columns as per Azure Machine Learning NYC Taxi tutorial
green_columns = str({
"vendorID": "vendor",
"lpepPickupDatetime": "pickup_datetime",
"lpepDropoffDatetime": "dropoff_datetime",
"storeAndFwdFlag": "store_forward",
"pickupLongitude": "pickup_longitude",
"pickupLatitude": "pickup_latitude",
"dropoffLongitude": "dropoff_longitude",
"dropoffLatitude": "dropoff_latitude",
"passengerCount": "passengers",
"fareAmount": "cost",
"tripDistance": "distance"
}).replace(",", ";")
# Define output after cleansing step
cleansed_green_data = PipelineData("cleansed_green_data", datastore=default_store).as_dataset()
print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# cleansing step creation
# See the cleanse.py for details about input and output
cleansingStepGreen = PythonScriptStep(
name="Cleanse Green Taxi Data",
script_name="cleanse.py",
arguments=["--useful_columns", useful_columns,
"--columns", green_columns,
"--output_cleanse", cleansed_green_data],
inputs=[green_taxi_data.as_named_input('raw_data')],
outputs=[cleansed_green_data],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("cleansingStepGreen created.")
# -
# #### Cleanse Yellow taxi data
# +
yellow_columns = str({
"vendorID": "vendor",
"tpepPickupDateTime": "pickup_datetime",
"tpepDropoffDateTime": "dropoff_datetime",
"storeAndFwdFlag": "store_forward",
"startLon": "pickup_longitude",
"startLat": "pickup_latitude",
"endLon": "dropoff_longitude",
"endLat": "dropoff_latitude",
"passengerCount": "passengers",
"fareAmount": "cost",
"tripDistance": "distance"
}).replace(",", ";")
# Define output after cleansing step
cleansed_yellow_data = PipelineData("cleansed_yellow_data", datastore=default_store).as_dataset()
print('Cleanse script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# cleansing step creation
# See the cleanse.py for details about input and output
cleansingStepYellow = PythonScriptStep(
name="Cleanse Yellow Taxi Data",
script_name="cleanse.py",
arguments=["--useful_columns", useful_columns,
"--columns", yellow_columns,
"--output_cleanse", cleansed_yellow_data],
inputs=[yellow_taxi_data.as_named_input('raw_data')],
outputs=[cleansed_yellow_data],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("cleansingStepYellow created.")
# -
# #### Merge cleansed Green and Yellow datasets
# We are creating a single data source by merging the cleansed versions of Green and Yellow taxi data.
# +
# Define output after merging step
merged_data = PipelineData("merged_data", datastore=default_store).as_dataset()
print('Merge script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# merging step creation
# See the merge.py for details about input and output
mergingStep = PythonScriptStep(
name="Merge Taxi Data",
script_name="merge.py",
arguments=["--output_merge", merged_data],
inputs=[cleansed_green_data.parse_parquet_files(),
cleansed_yellow_data.parse_parquet_files()],
outputs=[merged_data],
compute_target=aml_compute,
runconfig=aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("mergingStep created.")
# -
# #### Filter data
# This step filters out coordinates for locations that are outside the city border. We use a TypeConverter object to change the latitude and longitude fields to decimal type.
# +
# Define output after merging step
filtered_data = PipelineData("filtered_data", datastore=default_store).as_dataset()
print('Filter script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# filter step creation
# See the filter.py for details about input and output
filterStep = PythonScriptStep(
name="Filter Taxi Data",
script_name="filter.py",
arguments=["--output_filter", filtered_data],
inputs=[merged_data.parse_parquet_files()],
outputs=[filtered_data],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("FilterStep created.")
# -
# #### Normalize data
# In this step, we split the pickup and dropoff datetime values into the respective date and time columns and then we rename the columns to use meaningful names.
# +
# Define output after normalize step
normalized_data = PipelineData("normalized_data", datastore=default_store).as_dataset()
print('Normalize script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# normalize step creation
# See the normalize.py for details about input and output
normalizeStep = PythonScriptStep(
name="Normalize Taxi Data",
script_name="normalize.py",
arguments=["--output_normalize", normalized_data],
inputs=[filtered_data.parse_parquet_files()],
outputs=[normalized_data],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("normalizeStep created.")
# -
# #### Transform data
# Transform the normalized taxi data to final required format. This steps does the following:
#
# - Split the pickup and dropoff date further into the day of the week, day of the month, and month values.
# - To get the day of the week value, uses the derive_column_by_example() function. The function takes an array parameter of example objects that define the input data, and the preferred output. The function automatically determines the preferred transformation. For the pickup and dropoff time columns, split the time into the hour, minute, and second by using the split_column_by_example() function with no example parameter.
# - After new features are generated, use the drop_columns() function to delete the original fields as the newly generated features are preferred.
# - Rename the rest of the fields to use meaningful descriptions.
# +
# Define output after transform step
transformed_data = PipelineData("transformed_data", datastore=default_store).as_dataset()
print('Transform script is in {}.'.format(os.path.realpath(prepare_data_folder)))
# transform step creation
# See the transform.py for details about input and output
transformStep = PythonScriptStep(
name="Transform Taxi Data",
script_name="transform.py",
arguments=["--output_transform", transformed_data],
inputs=[normalized_data.parse_parquet_files()],
outputs=[transformed_data],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=prepare_data_folder,
allow_reuse=True
)
print("transformStep created.")
# -
# ### Split the data into train and test sets
# This function segregates the data into dataset for model training and dataset for testing.
# +
train_model_folder = './scripts/trainmodel'
# train and test splits output
output_split_train = PipelineData("output_split_train", datastore=default_store).as_dataset()
output_split_test = PipelineData("output_split_test", datastore=default_store).as_dataset()
print('Data spilt script is in {}.'.format(os.path.realpath(train_model_folder)))
# test train split step creation
# See the train_test_split.py for details about input and output
testTrainSplitStep = PythonScriptStep(
name="Train Test Data Split",
script_name="train_test_split.py",
arguments=["--output_split_train", output_split_train,
"--output_split_test", output_split_test],
inputs=[transformed_data.parse_parquet_files()],
outputs=[output_split_train, output_split_test],
compute_target=aml_compute,
runconfig = aml_run_config,
source_directory=train_model_folder,
allow_reuse=True
)
print("testTrainSplitStep created.")
# -
# ## Use automated machine learning to build regression model
# Now we will use **automated machine learning** to build the regression model. We will use [AutoMLStep](https://docs.microsoft.com/python/api/azureml-train-automl-runtime/azureml.train.automl.runtime.automl_step.automlstep?view=azure-ml-py) in AML Pipelines for this part. Perform `pip install azureml-sdk[automl]`to get the automated machine learning package. These functions use various features from the data set and allow an automated model to build relationships between the features and the price of a taxi trip.
# ### Automatically train a model
# #### Create experiment
# +
from azureml.core import Experiment
experiment = Experiment(ws, 'NYCTaxi_Tutorial_Pipelines')
print("Experiment created")
# -
# #### Define settings for autogeneration and tuning
#
# Here we define the experiment parameter and model settings for autogeneration and tuning. We can specify automl_settings as **kwargs as well.
#
# Use your defined training settings as a parameter to an `AutoMLConfig` object. Additionally, specify your training data and the type of model, which is `regression` in this case.
#
# Note: When using AmlCompute, we can't pass Numpy arrays directly to the fit method.
# +
import logging
from azureml.train.automl import AutoMLConfig
# Change iterations to a reasonable number (50) to get better accuracy
automl_settings = {
"iteration_timeout_minutes" : 10,
"iterations" : 2,
"primary_metric" : 'spearman_correlation',
"n_cross_validations": 5
}
training_dataset = output_split_train.parse_parquet_files().keep_columns(['pickup_weekday','pickup_hour', 'distance','passengers', 'vendor', 'cost'])
automl_config = AutoMLConfig(task = 'regression',
debug_log = 'automated_ml_errors.log',
path = train_model_folder,
compute_target = aml_compute,
featurization = 'auto',
training_data = training_dataset,
label_column_name = 'cost',
**automl_settings)
print("AutoML config created.")
# -
# #### Define AutoMLStep
# +
from azureml.pipeline.steps import AutoMLStep
trainWithAutomlStep = AutoMLStep(name='AutoML_Regression',
automl_config=automl_config,
allow_reuse=True)
print("trainWithAutomlStep created.")
# -
# #### Build and run the pipeline
# +
from azureml.pipeline.core import Pipeline
from azureml.widgets import RunDetails
pipeline_steps = [trainWithAutomlStep]
pipeline = Pipeline(workspace = ws, steps=pipeline_steps)
print("Pipeline is built.")
pipeline_run = experiment.submit(pipeline, regenerate_outputs=False)
print("Pipeline submitted for execution.")
# -
from azureml.widgets import RunDetails
RunDetails(pipeline_run).show()
# ### Explore the results
# +
# Before we proceed we need to wait for the run to complete.
pipeline_run.wait_for_completion(show_output=False)
# functions to download output to local and fetch as dataframe
def get_download_path(download_path, output_name):
output_folder = os.listdir(download_path + '/azureml')[0]
path = download_path + '/azureml/' + output_folder + '/' + output_name
return path
def fetch_df(step, output_name):
output_data = step.get_output_data(output_name)
download_path = './outputs/' + output_name
output_data.download(download_path, overwrite=True)
df_path = get_download_path(download_path, output_name) + '/processed.parquet'
return pd.read_parquet(df_path)
# -
# #### View cleansed taxi data
# +
green_cleanse_step = pipeline_run.find_step_run(cleansingStepGreen.name)[0]
yellow_cleanse_step = pipeline_run.find_step_run(cleansingStepYellow.name)[0]
cleansed_green_df = fetch_df(green_cleanse_step, cleansed_green_data.name)
cleansed_yellow_df = fetch_df(yellow_cleanse_step, cleansed_yellow_data.name)
display(cleansed_green_df.head(5))
display(cleansed_yellow_df.head(5))
# -
# #### View the combined taxi data profile
# +
merge_step = pipeline_run.find_step_run(mergingStep.name)[0]
combined_df = fetch_df(merge_step, merged_data.name)
display(combined_df.describe())
# -
# #### View the filtered taxi data profile
# +
filter_step = pipeline_run.find_step_run(filterStep.name)[0]
filtered_df = fetch_df(filter_step, filtered_data.name)
display(filtered_df.describe())
# -
# #### View normalized taxi data
# +
normalize_step = pipeline_run.find_step_run(normalizeStep.name)[0]
normalized_df = fetch_df(normalize_step, normalized_data.name)
display(normalized_df.head(5))
# -
# #### View transformed taxi data
# +
transform_step = pipeline_run.find_step_run(transformStep.name)[0]
transformed_df = fetch_df(transform_step, transformed_data.name)
display(transformed_df.describe())
display(transformed_df.head(5))
# -
# #### View training data used by AutoML
# +
split_step = pipeline_run.find_step_run(testTrainSplitStep.name)[0]
train_split = fetch_df(split_step, output_split_train.name)
display(train_split.describe())
display(train_split.head(5))
# -
# #### View the details of the AutoML run
# +
from azureml.train.automl.run import AutoMLRun
#from azureml.widgets import RunDetails
# workaround to get the automl run as its the last step in the pipeline
# and get_steps() returns the steps from latest to first
for step in pipeline_run.get_steps():
automl_step_run_id = step.id
print(step.name)
print(automl_step_run_id)
break
automl_run = AutoMLRun(experiment = experiment, run_id=automl_step_run_id)
#RunDetails(automl_run).show()
# -
# #### Retrieve all Child runs
#
# We use SDK methods to fetch all the child runs and see individual metrics that we log.
# +
children = list(automl_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
# -
# ### Retreive the best model
#
# Uncomment the below cell to retrieve the best model
# +
# best_run, fitted_model = automl_run.get_output()
# print(best_run)
# print(fitted_model)
# -
# ### Test the model
# #### Get test data
#
# Uncomment the below cell to get test data
# +
# split_step = pipeline_run.find_step_run(testTrainSplitStep.name)[0]
# x_test = fetch_df(split_step, output_split_test.name)[['distance','passengers', 'vendor','pickup_weekday','pickup_hour']]
# y_test = fetch_df(split_step, output_split_test.name)[['cost']]
# display(x_test.head(5))
# display(y_test.head(5))
# -
# #### Test the best fitted model
#
# Uncomment the below cell to test the best fitted model
# +
# y_predict = fitted_model.predict(x_test)
# y_actual = y_test.values.tolist()
# display(pd.DataFrame({'Actual':y_actual, 'Predicted':y_predict}).head(5))
# +
# import matplotlib.pyplot as plt
# fig = plt.figure(figsize=(14, 10))
# ax1 = fig.add_subplot(111)
# distance_vals = [x[0] for x in x_test.values]
# ax1.scatter(distance_vals[:100], y_predict[:100], s=18, c='b', marker="s", label='Predicted')
# ax1.scatter(distance_vals[:100], y_actual[:100], s=18, c='r', marker="o", label='Actual')
# ax1.set_xlabel('distance (mi)')
# ax1.set_title('Predicted and Actual Cost/Distance')
# ax1.set_ylabel('Cost ($)')
# plt.legend(loc='upper left', prop={'size': 12})
# plt.rcParams.update({'font.size': 14})
# plt.show()
| src/models/pipelines/longterm/01.1_longterm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/egy1st/denmune-clustering-algorithm/blob/main/colab/k_nearest_evolution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="FZgP6jwmzFtZ"
import pandas as pd
import matplotlib.pyplot as plt
import time
import os.path
import warnings
warnings.filterwarnings('ignore')
# + id="TIiMsyouzFth"
# install DenMune clustering algorithm using pip command from the offecial Python repository, PyPi
# from https://pypi.org/project/denmune/
# !pip install denmune
# then import it
from denmune import DenMune
# + colab={"base_uri": "https://localhost:8080/"} id="ezFJy4kJSCH-" outputId="ffd3d9f9-a5c3-4559-8be6-5334794fc190"
# clone datasets from our repository datasets
if not os.path.exists('datasets'):
# !git clone https://github.com/egy1st/datasets
# + colab={"base_uri": "https://localhost:8080/", "height": 277} id="CiMDjLQJzFtj" outputId="8f843184-2ac1-4e50-b634-584329ff2fa6"
data_path = 'datasets/denmune/chameleon/'
chameleon_dataset = "t7.10k" #["t4.8k", "t5.8k", "t7.10k", "t8.8k"]
# train file
detected_clusers = []
noise_type1 = []
noise_type2 = []
data_file = data_path + chameleon_dataset + '.csv'
X_train = pd.read_csv(data_file, sep=',', header=None)
from IPython.display import clear_output
for knn in range (10, 110, 10):
print ("knn", knn )
clear_output(wait=True)
dm = DenMune(train_data=X_train, k_nearest=knn, rgn_tsne=False )
labels, validity = dm.fit_predict(show_analyzer=False)
n_clusters = dm.analyzer['n_clusters']['detected']
pre_noise = dm.analyzer['n_points']['noise']['type-1']
post_noise = dm.analyzer['n_points']['noise']['type-2']
detected_clusers.append([knn, n_clusters ])
noise_type1.append([knn, pre_noise ])
noise_type2.append([knn, post_noise ])
print('knn:',knn , ' :: we detected', n_clusters, 'clusters:' , ' :: pre-noise:', pre_noise, 'post_noise', post_noise)
time.sleep(0.2)
# + colab={"base_uri": "https://localhost:8080/", "height": 573} id="h5E971-oTzag" outputId="4836b7bf-79e8-496d-b49a-21960cbfc0f2"
x, y = zip(*detected_clusers)
f1 = plt.figure(1)
# Creating figure and axis objects using subplots()
fig, ax = plt.subplots(figsize=[20, 8])
ax.plot(x, y, marker='.', linewidth=2, label='evolution of detected clusters')
plt.xticks(rotation=60)
ax.set_xlabel('k-nearest neighbor')
ax.set_ylabel('number of detected clusters')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 556} id="Aj3EF_5RU-ku" outputId="d648f5ac-a28f-42d5-94be-5def90e2e990"
x, y = zip(*noise_type1)
# Creating figure and axis objects using subplots()
fig, ax = plt.subplots(figsize=[20, 8])
ax.plot(x, y, marker='.', linewidth=2, label='Noise detection')
plt.xticks(rotation=60)
ax.set_xlabel('k-nearest neighbor')
ax.set_ylabel('pre-identified noise')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 556} id="mRya0DSBax5n" outputId="e052146e-2796-4108-8c77-ae1a1b234b05"
x, y = zip(*noise_type2)
# Creating figure and axis objects using subplots()
fig, ax = plt.subplots(figsize=[20, 8])
ax.plot(x, y, marker='.', linewidth=2, label='Noise detection')
plt.xticks(rotation=60)
ax.set_xlabel('k-nearest neighbor')
ax.set_ylabel('post-identified noise')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 556} id="mfgJkB-ocQI2" outputId="556abfa5-9f1c-472e-96a7-98393ac48c1f"
# Creating figure and axis objects using subplots()
fig, ax = plt.subplots(figsize=[20, 8])
x, y = zip(*detected_clusers)
ax.plot(x, y, marker='.', linewidth=2, label='detected clusters')
x, y = zip(*noise_type1)
ax.plot(x, y, marker='.', linewidth=2, label='pre-identified noise')
x, y = zip(*noise_type2)
ax.plot(x, y, marker='.', linewidth=2, label='post-identified noise')
plt.xticks(rotation=60)
ax.set_xlabel('k-nearest neighbor')
ax.set_ylabel('number')
plt.legend()
plt.show()
| colab/k_nearest_evolution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import glob as glob
# +
csv_data_path = 'rawdata/'
csvfiles = glob.glob(csv_data_path + '*.csv')
print(csvfiles)
df = pd.DataFrame()
all_data = pd.DataFrame()
len(csvfiles)
# -
# The data spans a period of almost two months, February 16th to April 12th, 2010.
# +
# As the datevalues are in 100ns, and according to the paper the dataset starts at 2010-02-16, thats why
# the index is re-indexed for each file.
df = pd.DataFrame()
for i in range(len(csvfiles)):
print('Processing: '+str(csvfiles[i].split('/')[-1].split('.')[0]))
if not 'tude' in csvfiles[i]:
df = pd.read_csv(csvfiles[i],header=None,index_col=0,dtype='a')
print('Converting datetime')
df.index = pd.to_datetime((df.index-634018095139168000)/1e7,unit='s',origin='2010-02-16')
print('Converting datetime, done ...')
#df.index = pd.to_datetime(df.index)
df[df.columns[0]] = pd.to_numeric(df[df.columns[0]])
df.index.names = ['Date']
df = df.rename(columns={ df.columns[0]: csvfiles[i].split('/')[-1].split('.')[0] })
#df[csvfiles[i]] = pd.read_csv(csvfiles[i],header=None,index_col=0,dtype='a')
all_data = all_data.append(df,sort=True)
all_data = all_data.resample('1min').mean()
#df=df.resample('1s').mean()
# Clean up memory
del df
print(str(i+1) + ' done of ' + str(len(csvfiles)))
#df = pd.read_csv(csvfiles[i],header=None,index_col=0,dtype='a')
print('All done!')
# -
all_data.dropna()
# +
from matplotlib import pyplot as plt
plt.scatter(all_data['fuelVolumeFlowRate'],all_data['speedKnots'])
# -
all_data = all_data.dropna()
all_data.to_hdf('dtu_data_200210.hd5','table',complib='blosc',complevel=9)
# %pylab inline
all_data['fuelDensity'].plot()
df_in = pd.read_hdf('dtu_data_200210.hd5','table')
df_in.describe()
# +
### Testing ...
pd.to_datetime((df.index-634018095139168000)/1e7,unit='s',origin='2010-02-16')
# -
| notebooks/create_db/csv-dtu-local_create_database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from pyecharts.charts import Map
from pyecharts import options as opts
from pyecharts.globals import ThemeType
login = pd.read_csv('results/task1_1_1.csv')
study = pd.read_csv('results/task1_1_2.csv')
users = pd.read_csv('results/task1_1_3.csv')
def country(x):
if x[:2] == '中国':
return '中国'
else:
return x
def province(x):
if x[:2] == '中国':
return x[2:4]
def city(x):
if len(x)>4:
return x[4:]
else:
return x[2:4]
login['country'] = login.login_place.apply(country)
login['province'] = login.login_place.apply(province)
login['city'] = login.login_place.apply(city)
login
temp = login.city.value_counts()/10
c = (
Map(
init_opts=opts.InitOpts(
theme=ThemeType.LIGHT
)
)
.add(
"城市(缩小到1/10)",
[list(z) for z in zip(list(temp.index), list(temp))],
"china-cities",
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="城市"),
visualmap_opts=opts.VisualMapOpts(
min_ = temp.min(),
max_ = temp.max(),
range_text = ['High', 'Low']
),
)
.render("results/task2_1_cities.html")
)
temp = login.province.value_counts()/10
c = (
Map(
init_opts=opts.InitOpts(
theme=ThemeType.LIGHT
)
)
.add(
"省(缩小到1/10)",
[list(z) for z in zip(list(temp.index), list(temp))],
"china",
label_opts=opts.LabelOpts(is_show=False),
)
.set_global_opts(
title_opts=opts.TitleOpts(title="省"),
visualmap_opts=opts.VisualMapOpts(
min_ = temp.min(),
max_ = temp.max(),
range_text = ['High', 'Low']
),
)
.render("results/task2_1_provinces.html")
)
| A教育平台的线上课程智能推荐策略/task2_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Yw_NO0uEIvB6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="8922b7f5-588f-4191-bd88-beb047d76698" executionInfo={"status": "ok", "timestamp": 1583084084827, "user_tz": -60, "elapsed": 4706, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
# !pip install eli5
# + id="DqHzqxdgKVqr" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
from ast import literal_eval
from tqdm import tqdm_notebook
# + id="LofyNje8KDKR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b846e407-0fee-41ba-e66c-effbbc044256" executionInfo={"status": "ok", "timestamp": 1583084724611, "user_tz": -60, "elapsed": 573, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="AHU5CcVrM6ES" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="fa4a48c4-a468-4ea9-9796-582aa2e86468" executionInfo={"status": "ok", "timestamp": 1583084730012, "user_tz": -60, "elapsed": 1566, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
# ls
# + id="ix9CIpVIM_Ax" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7a8fbfd4-fb87-4687-d5c6-e13726163a1c" executionInfo={"status": "ok", "timestamp": 1583084775500, "user_tz": -60, "elapsed": 1577, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
# ls data
# + id="rpQ2wasdNKHZ" colab_type="code" colab={}
df = pd.read_csv('data/men_shoes.csv', low_memory=False)
# + id="-L19TinZNcOA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="168db4fe-dc97-4901-c511-fb6cec9c6e8a" executionInfo={"status": "ok", "timestamp": 1583084987205, "user_tz": -60, "elapsed": 1177, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
df.columns
# + id="lisNaeCCN9r3" colab_type="code" colab={}
def run_model(feats, model = DecisionTreeRegressor(max_depth=5)):
X = df[ feats ].values
y = df['prices_amountmin'].values
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="YISF3cAqOas_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f3eea1b7-0434-4dff-f263-4658f682d51f" executionInfo={"status": "ok", "timestamp": 1583085733066, "user_tz": -60, "elapsed": 569, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0]
run_model(['brand_cat'])
# + id="ZypUKUA-PDcF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c9b1a9ac-7cce-4069-bc3a-d8027934676c" executionInfo={"status": "ok", "timestamp": 1583085767097, "user_tz": -60, "elapsed": 3693, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'], model)
# + id="hH43XpVdPtzt" colab_type="code" colab={}
def parse_features(x):
output_dict = {}
if str(x) == 'nan' : return output_dict
features = literal_eval(x.replace('\\"','"'))
for item in features:
key = item['key'].lower().strip()
value = item['value'][0].lower().strip()
output_dict[key] = value
return output_dict
df['features_parsed'] = df['features'].map(parse_features)
# + id="dXDBoxQuaSj5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5042664e-a3d2-4a2b-a45d-90a3f8910193" executionInfo={"status": "ok", "timestamp": 1583089219259, "user_tz": -60, "elapsed": 563, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
keys = set()
df['features_parsed'].map( lambda x: keys.update(x.keys()))
len(keys)
# + id="EqJnbTtqa9f3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["2d7ab7d7f7c7462dace591b82588cef8", "2614619fe56943fb9f1e6970eaea86b6", "<KEY>", "756538632ea6471480c2031200c92e1a", "6102cdb8a0324319be424fa6da7a7b4b", "3e37897750b54c47a907dd548d33dd15", "<KEY>", "<KEY>"]} outputId="209fab59-2888-4c0d-ccf3-284bd871644e" executionInfo={"status": "ok", "timestamp": 1583089658838, "user_tz": -60, "elapsed": 4535, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
def get_name_feat(key):
return 'feat_' + key
for key in tqdm_notebook(keys):
df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)
# + id="SOnTyttXfxml" colab_type="code" colab={}
keys_stat = {}
for key in keys:
keys_stat[key] = df[ False == df[get_name_feat(key)].isnull() ].shape[0] / df.shape[0] *100
# + id="s4B8wSFXzYIK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="387f2151-93cb-444a-c641-77e79c33fa5b" executionInfo={"status": "ok", "timestamp": 1583094945010, "user_tz": -60, "elapsed": 578, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
{k: v for k,v in keys_stat.items() if v > 30}
# + id="I1xN8ON8yekH" colab_type="code" colab={}
df['feat_brand_cat'] = df['feat_brand'].factorize()[0]
df['feat_color_cat'] = df['feat_color'].factorize()[0]
df['feat_brand_gender'] = df['feat_gender'].factorize()[0]
df['feat_manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]
df['feat_material_cat'] = df['feat_material'].factorize()[0]
df['feat_sport_cat'] = df['feat_sport'].factorize()[0]
df['feat_style_cat'] = df['feat_style'].factorize()[0]
for key in keys:
df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0]
# + id="6iZE1Ivb08Li" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="97af9b1c-4382-4f07-e498-0b167f6ff598" executionInfo={"status": "ok", "timestamp": 1583095605411, "user_tz": -60, "elapsed": 721, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
df['brand'] = df['brand'].map(lambda x: str(x).lower())
df[df.brand == df.feat_brand ].shape
# + id="XaTYgvNJ2wkR" colab_type="code" colab={}
# + id="kmDNLV1U0M2w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f078cda2-6a6a-4527-c872-87292298d1b2" executionInfo={"status": "ok", "timestamp": 1583096062250, "user_tz": -60, "elapsed": 3345, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
model = RandomForestRegressor(max_depth=5, n_estimators=100)
run_model(['brand_cat'],model)
# + id="4veyhd0i83DE" colab_type="code" colab={}
feats_cat = [x for x in df.columns if 'cat' in x]
# + id="oL0aDjHD3AdV" colab_type="code" colab={}
feats = ['brand_cat', 'feat_brand_cat','feat_brand_gender', 'feat_material_cat', 'feat_style_cat', 'feat_sport_cat' , 'feat_metal type_cat']
#feats += feats_cat
#feats = list(set(feats))
model = RandomForestRegressor(max_depth=5, n_estimators=100)
result = run_model(feats, model)
# + id="SOlNbZMj32To" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="0e76203e-0dac-4b0a-fea6-73b2dabe9193" executionInfo={"status": "ok", "timestamp": 1583097991752, "user_tz": -60, "elapsed": 3908, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
X = df[ feats].values
y = df['prices_amountmin'].values
m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
m.fit(X,y)
print(result)
perm = PermutationImportance(m, random_state=1).fit(X, y);
eli5.show_weights(perm, feature_names=feats)
# + id="p8kWJCuj5Nyd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="cb68279c-7f51-40a1-b4c1-9ba0db69360b" executionInfo={"status": "ok", "timestamp": 1583096653629, "user_tz": -60, "elapsed": 430, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
df[ df['brand'] == 'nike'].features_parsed.head().values
# + id="9Pp0R1cw6eUB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="84887ebf-15b7-4a18-faf7-05e6b625ca85" executionInfo={"status": "ok", "timestamp": 1583098243847, "user_tz": -60, "elapsed": 4441, "user": {"displayName": "Jaros\u0142<NAME>\u0142o\u0144ski", "photoUrl": "", "userId": "09566814463037187670"}}
# !git add day5.ipynb
# !git commit -m "Predykcja cen 2"
# + id="25gclqnWARu4" colab_type="code" colab={}
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "wideowsieci"
# + id="hVexWb5JAgE0" colab_type="code" colab={}
| day5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ##################################################################
# This is a notebook that contains many hash signs.
# Hopefully its python representation is not recognized as a Sphinx Gallery script...
# ##################################################################
# +
some = 1
code = 2
some+code
##################################################################
# A comment
##################################################################
# Another comment
# -
# ##################################################################
# This is a notebook that contains many hash signs.
# Hopefully its python representation is not recognized as a Sphinx Gallery script...
# ##################################################################
| tests/notebooks/ipynb_py/Notebook with many hash signs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Statistical Mechanics - Distributions: FD, MB, EB
# <NAME> September 30, 2020 University of Toronto For technical issues: <EMAIL>
#
# In the previous module, we looked at the density of states function which described the available states as a function of energy. In this module, we will look at how particles occupy these available states by describing different distribution functions. Deciding which statistics to use to describe the probability distribution of a system depends on the nature of the particles.
#
# Maxwell-Boltzmann statistics: MB statistics describe a system composed of distinguishable and identifiable particles. This distribution is commonly used to describe the energy of atoms or molecules in an contained, ideal gas. This is commonly referred to as 'classical statistics' since particles are distinguishable.
#
# $f(\epsilon,T)_{MB} = exp\left(-\frac{\epsilon}{k_BT}\right)$
#
# Fermi-Dirac statistics: FD statistics describe a system that is composed of indistinguishable particles which obey the Pauli exclusion principle. This implies that no two particles can occupy the same energy state. These particles are Fermions which have half-spin. Common fermions are electrons and holes.
#
# $f(\epsilon, T)_{FD} = \frac1{exp\left(\frac{\epsilon-A}{k_BT}\right)+1}$
#
# Bose-Einstein statistics: BE statistics describe a system of indistinguishable particles which do not follow the Pauli exclusion principle and have full integer spin. Any number of these particles can occupy the same energy state. These particles are known as bosons and common examples include phonons and photons.
#
# $f(\epsilon,T)_{BE} = \frac1{exp\left(\frac{\epsilon-A}{k_BT}\right)-1}$
# In the following figure, all three distributions are shown as a function of energy ($\frac{\epsilon}{k_B}$ $[=]$ K). The slider controls the temperature. View how each of the distributions responds to changing temperature.
# + hide_input=true init_cell=true
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
plt.subplots_adjust(bottom=0.2)
EkB = np.linspace(0,18000, 100000) #Kelvin
#Set initial temp to around room temp
T0 = 300 # Kelvin
AkB = 10000
#Maxwell-Boltzmann
MB0 = np.exp(-(EkB-AkB)/T0)
#<NAME>
FD0 = (np.exp((EkB - AkB)/T0)+1)**(-1)
#<NAME>
BE0 = (np.exp((EkB - AkB)/T0)-1)**(-1)
mb, = ax1.plot(EkB, MB0, 'blue')
fd, = ax1.plot(EkB, FD0, 'red')
be, = ax1.plot(EkB, BE0, 'green')
#Set up graph
ax1.set(xlabel=r'$\frac{\epsilon}{k_B}[=]K$', ylabel=r'$f(\epsilon, T)$', ylim=(0,1))
ax1.legend(['MB', 'FD', 'BE'], loc = 'upper right')
axcolor = 'lightgoldenrodyellow'
axL = plt.axes([0.25, 0.05, 0.60, 0.03], facecolor=axcolor)
sL = Slider(axL, 'Temperature', 0,10000, valinit=300, valstep = 100, valfmt= ' %0.fK', facecolor='blue')
def update(val):
T = sL.val
mb.set_ydata(np.exp(-(EkB-AkB)/T))
fd.set_ydata((np.exp((EkB - AkB)/T)+1)**(-1))
be.set_ydata((np.exp((EkB - AkB)/T)-1)**(-1))
fig1.canvas.draw_idle()
sL.on_changed(update)
plt.show()
| 10Statistical_Mechanics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import time
import copy
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, models
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from tqdm import tqdm
from utils.loss_function import SaliencyLoss
from utils.data_process import MyDataset
flag = 1 # 0 for TranSalNet_Dense, 1 for TranSalNet_Res
if flag:
from TranSalNet_Res import TranSalNet
else:
from TranSalNet_Dense import TranSalNet
# -
# ↑↑↑ Set flag=1 to load TranSalNet_Dense,set flag=0 to load TranSalNet_Res.
# +
train_ids = pd.read_csv(r'datasets/train_ids.csv')
val_ids = pd.read_csv(r'datasets/val_ids.csv')
print(train_ids.iloc[1])
print(val_ids.iloc[1])
dataset_sizes = {'train':len(train_ids),'val':len(val_ids)}
print(dataset_sizes)
# -
# ↑↑↑Load image id from dataset
# +
batch_size = 4
train_set = MyDataset(ids=train_ids,
stimuli_dir=r'datasets\train\train_stimuli/',
saliency_dir=r'datasets\train\train_saliency/',
fixation_dir=r'datasets\train\train_fixation/',
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
val_set = MyDataset(ids=val_ids,
stimuli_dir=r'datasets\val\val_stimuli/',
saliency_dir = r'datasets\val\val_saliency/',
fixation_dir=r'datasets\val\val_fixation/',
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
dataloaders = {'train':DataLoader(train_set, batch_size=batch_size,shuffle=True, num_workers=4)
,'val':DataLoader(val_set, batch_size=batch_size,shuffle=False, num_workers=4)}
# -
# ↑↑↑Set batch_size and Load dataset
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = TranSalNet()
model = model.to(device)
# # Train the model below
# +
optimizer = optim.Adam(model.parameters(),lr=1e-5)
scheduler = lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.1)
loss_fn = SaliencyLoss()
'''Training'''
best_model_wts = copy.deepcopy(model.state_dict())
num_epochs =30
best_loss = 100
for k,v in model.named_parameters():
print('{}: {}'.format(k, v.requires_grad))
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
# Iterate over data.
for i_batch, sample_batched in tqdm(enumerate(dataloaders[phase])):
stimuli, smap, fmap = sample_batched['image'], sample_batched['saliency'], sample_batched['fixation']
stimuli, smap, fmap = stimuli.type(torch.cuda.FloatTensor), smap.type(torch.cuda.FloatTensor), fmap.type(torch.cuda.FloatTensor)
stimuli, smap, fmap = stimuli.to(device), smap.to(device), fmap.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(stimuli)
loss = -2*loss_fn(outputs,smap,loss_type='cc')\
-1*loss_fn(outputs,smap,loss_type='sim')+\
10*loss_fn(outputs,smap,loss_type='kldiv')-1*loss_fn(outputs,fmap,loss_type='nss')
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * stimuli.size(0)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
print('{} Loss: {:.4f}'.format(
phase, epoch_loss))
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
counter = 0
elif phase == 'val' and epoch_loss >= best_loss:
counter += 1
if counter ==5:
print('early stop!')
break
else:
continue
break
print()
print('Best val loss: {:4f}'.format(best_loss))
model.load_state_dict(best_model_wts)
# -
# # Save the model below
savepath = r'mymodel.pth'
torch.save(model.state_dict(),savepath)
| fine-tune&train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''pytorch'': conda)'
# name: python38364bitpytorchconda70fdc7f787194f4c972bb3207dd25917
# ---
# + tags=[] id="7yPFc0JZk6ir" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 867} outputId="60b4536d-0c7b-4ccf-ce67-18ed6f85917a"
# !pip install d2l==0.14.3
# + id="y2OX2rAAk6i4" colab_type="code" colab={}
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
# + id="Bm6l3DRPrn-c" colab_type="code" tags=[] colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ee388b93-d623-4176-a667-e720a057f35a"
d2l.DATA_HUB['pokemon'] = (d2l.DATA_URL + 'pokemon.zip',
'c065c0e2593b8b161a2d7873e42418bf6a21106c')
data_dir = d2l.download_extract('pokemon')
# + id="55azPy5irrfU" colab_type="code" colab={}
batch_size = 256
transformer = torchvision.transforms.Compose([
torchvision.transforms.Resize((64, 64)),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(0.5, 0.5)
])
pokemon = torchvision.datasets.ImageFolder(data_dir, transformer)
data_iter = torch.utils.data.DataLoader(
pokemon, batch_size=batch_size,
shuffle=True, num_workers=d2l.get_dataloader_workers())
# + id="o0S-Qbarr5DK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 619} outputId="25961dbc-37a9-4302-e306-0f3a42f377bd"
d2l.set_figsize((4, 4))
for X, y in data_iter:
imgs = X[0:20,:,:,:].permute(0, 2, 3, 1)/2+0.5
d2l.show_images(imgs, num_rows=4, num_cols=5)
break
# + [markdown] id="ty5MX2P9xOc3" colab_type="text"
# https://nerdparadise.com/programming/pythonpil
# + [markdown] id="QudDaByI2JSZ" colab_type="text"
# http://preview.d2l.ai/d2l-en/master/chapter_linear-networks/image-classification-dataset.html
# + id="_ZK2Bm-l2-xA" colab_type="code" colab={}
class G_block(nn.Module):
def __init__(self, channels, nz=3, kernel_size=4, strides=2,
padding=1, alpha=0.2, **kwargs):
super(G_block, self).__init__(**kwargs)
self.conv2d = nn.ConvTranspose2d(
nz, channels, kernel_size, strides, padding, bias=False)
self.batch_norm = nn.BatchNorm2d(channels)
self.activation = nn.ReLU(alpha)
def forward(self, X):
return self.activation(self.batch_norm(self.conv2d(X)))
# + id="AVn41eJV81tf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f16d7984-292e-4895-d86e-9631880e5c6a"
x = torch.zeros((2, 3, 16, 16))
g_blk = G_block(20)
g_blk(x).shape
# + id="u18cU6AO84Ew" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="99476dc0-576d-4ca2-fa00-a47de40b16fc"
x = torch.zeros((2, 3, 1, 1))
g_blk = G_block(20, strides=1, padding=0)
g_blk(x).shape
# + id="DUrvU8wm85go" colab_type="code" colab={}
def Conv2DTranspose(channels, kernel_size, strides, padding, use_bias, nc=3):
return nn.ConvTranspose2d(nc, channels, kernel_size=kernel_size,stride=strides, padding=padding, bias=use_bias)
# + id="v8TIo-e386yU" colab_type="code" colab={}
n_G = 64
net_G = nn.Sequential(
G_block(n_G*8, nz=100, strides=1, padding=0), # Output: (64 * 8, 4, 4)
G_block(n_G*4, n_G*8), # Output: (64 * 4, 8, 8)
G_block(n_G*2, n_G*4), # Output: (64 * 2, 16, 16)
G_block(n_G, n_G*2), # Output: (64, 32, 32)
Conv2DTranspose(
3, nc=n_G, kernel_size=4, strides=2, padding=1, use_bias=False),
nn.Tanh() # Output: (3, 64, 64)
)
# + id="85_5mMj388vz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 433} outputId="31e28532-20bf-4220-c5e1-e5d3201c044c"
print(net_G)
# + id="T1G1l3Ms8-cs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="3df54c64-3796-416c-fe3c-5e44da76ef0c"
x = torch.zeros((1, 100, 1, 1))
net_G(x).shape
# + id="-FyT12CN9AQG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 267} outputId="75fcaa79-1ef4-453f-f3be-325467e8baf9"
alphas = [0, 0.2, 0.4, .6, .8, 1]
x = torch.arange(-2, 1, 0.1)
Y = [nn.LeakyReLU(alpha)(x).numpy() for alpha in alphas]
d2l.plot(x.numpy(), Y, 'x', 'y', alphas)
# + id="D07y48-B9Bo7" colab_type="code" colab={}
class D_block(nn.Module):
def __init__(self, channels, nc=3, kernel_size=4, strides=2,
padding=1, alpha=0.2, **kwargs):# nc: in_channels
super(D_block, self).__init__(**kwargs)
self.conv2d = nn.Conv2d(
nc, channels, kernel_size, strides, padding, bias=False)
self.batch_norm = nn.BatchNorm2d(channels)
self.activation = nn.LeakyReLU(alpha)
def forward(self, X):
return self.activation(self.batch_norm(self.conv2d(X)))
# + id="Pbp8QcJd9Dlv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="8a0975d1-71fe-435d-875b-e355fed2db05"
x = torch.zeros((2, 3, 16, 16))
d_blk = D_block(20)
d_blk(x).shape
# + id="-Z_UUCo09GrM" colab_type="code" colab={}
def Conv2D(channels, kernel_size, use_bias, nc=3):
return nn.Conv2d(nc, channels, kernel_size=kernel_size, bias=use_bias)
# + id="xcBkKur-9Hv_" colab_type="code" colab={}
n_D = 64
net_D = nn.Sequential(
D_block(n_D), # Output: (64, 32, 32)
D_block(n_D*2, n_D), # Output: (64 * 2, 16, 16)
D_block(n_D*4, n_D*2), # Output: (64 * 4, 8, 8)
D_block(n_D*8, n_D*4), # Output: (64 * 8, 4, 4)
Conv2D(1, nc=n_D*8, kernel_size=4, use_bias=False) # Output: (1, 1, 1)
)
# + id="BACO2BXe9Vgi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 416} outputId="583b02c2-6b12-4fd3-c0ff-e8012e7666a0"
print(net_D)
# + id="S5irvzCj9XVT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="dea12e78-9d08-46e7-b636-fd44f15f861c"
x = torch.zeros((1, 3, 64, 64))
net_D(x).shape
# + id="Dv7oVJT09ZCa" colab_type="code" colab={}
def update_D(X, Z, net_D, net_G, loss, trainer_D, device=d2l.try_gpu()): #@save
"""Update discriminator."""
batch_size = X.shape[0]
ones = torch.ones((batch_size, 1, 1, 1), device=device)
zeros = torch.zeros((batch_size, 1, 1, 1), device=device)
trainer_D.zero_grad()
real_Y = net_D(X)
fake_X = net_G(Z)
# Do not need to compute gradient for `net_G`, detach it from
# computing gradients.
fake_Y = net_D(fake_X.detach())
loss_D = (loss(real_Y, ones) + loss(fake_Y, zeros)) / 2
loss_D.backward()
trainer_D.step()
return loss_D
# + id="lQmve7kL9lH5" colab_type="code" colab={}
def update_G(Z, net_D, net_G, loss, trainer_G, device=d2l.try_gpu()): #@save
"""Update generator."""
batch_size = Z.shape[0]
ones = torch.ones((batch_size, 1, 1, 1), device=device)
trainer_G.zero_grad()
# We could reuse `fake_X` from `update_D` to save computation
fake_X = net_G(Z)
# Recomputing `fake_Y` is needed since `net_D` is changed
fake_Y = net_D(fake_X)
loss_G = loss(fake_Y,ones)
loss_G.backward()
trainer_G.step()
return loss_G
# + id="Zbeitm7_9uPL" colab_type="code" colab={}
def train(net_D, net_G, data_iter, num_epochs, lr, latent_dim, device=d2l.try_gpu()):
loss = nn.BCEWithLogitsLoss()
for w in net_D.parameters():
nn.init.normal_(w, 0, 0.02)
for w in net_G.parameters():
nn.init.normal_(w, 0, 0.02)
net_D.zero_grad()
net_G.zero_grad()
bata1 = 0.5 # lr: learning rate
trainer_D = torch.optim.Adam(net_D.parameters(), lr=lr, betas=(bata1, 0.999))
trainer_G = torch.optim.Adam(net_G.parameters(), lr=lr, betas=(bata1, 0.999))
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[1, num_epochs], nrows=2, figsize=(5, 5),
legend=['discriminator', 'generator'])
animator.fig.subplots_adjust(hspace=0.3)
for epoch in range(1, num_epochs + 1):
# Train one epoch
timer = d2l.Timer()
metric = d2l.Accumulator(3) # loss_D, loss_G, num_examples
for X, _ in data_iter:
batch_size = X.shape[0]
Z = torch.normal(0, 1, size=(batch_size, latent_dim, 1, 1))
X, Z = X.to(device), Z.to(device)
trainer_D.zero_grad()
trainer_G.zero_grad()
metric.add(update_D(X, Z, net_D, net_G, loss, trainer_D),
update_G(Z, net_D, net_G, loss, trainer_G),
batch_size)
# Show generated examples
Z = torch.normal(0, 1, size=(21, latent_dim, 1, 1))
Z = Z.to(device)
# Normalize the synthetic data to N(0, 1)
fake_x = net_G(Z).permute(0, 2, 3, 1) / 2 + 0.5
imgs = torch.cat(
[torch.cat([fake_x[i * 7 + j] for j in range(7)], dim=1)
for i in range(len(fake_x)//7)], dim=0)
animator.axes[1].cla()
animator.axes[1].imshow(imgs.cpu().detach().numpy())
# Show the losses
loss_D, loss_G = metric[0] / metric[2], metric[1] / metric[2]
animator.add(epoch, (loss_D, loss_G))
print(f'loss_D {loss_D:.3f}, loss_G {loss_G:.3f}, '
f'{metric[2] / timer.stop():.1f} examples/sec on {str(device)}')
# + id="zRTso61x9zkp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 156} outputId="48a7b051-d957-4448-c463-bf6853758387"
latent_dim, lr, num_epochs = 100, 0.005, 20
train(net_D, net_G, data_iter, num_epochs, lr, latent_dim)
| chapter-generative-adversarial-networks/dcgan_n2train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import deque
class Solution:
def shortestPathBinaryMatrix(self, grid) -> int:
def check(r, c):
nonlocal seen
dq = deque([(r, c)])
cnt = 1
dirs = [(1, 0), (0, 1), (-1, 0), (0, -1),
(1, 1), (-1, -1), (1, -1), (-1, 1)]
while dq:
for _ in range(len(dq)):
x, y = dq.popleft()
for a, b in dirs:
nx, ny = x + a, y + b
if nx < 0 or ny < 0 or nx >= rows or ny >= cols:
continue
if grid[nx][ny] == 1 or (nx, ny) in seen:
continue
seen.add((nx, ny))
dq.append((nx, ny))
if (nx, ny) == (rows-1, cols-1):
return cnt + 1, True
if dq:
cnt += 1
return cnt, False
if len(grid) == 1 and len(grid[0]) == 1:
return 1
if grid[0][0] == 1 or grid[-1][-1] == 1:
return -1
rows, cols = len(grid), len(grid[0])
seen = set()
for r in range(rows):
for c in range(cols): # grid[0][0] 一定是0,并且只有一次机会
seen.add((r, c))
res, state = check(r, c)
if state:
return res
return -1
# -
solution = Solution()
solution.shortestPathBinaryMatrix([[0,0,0,0,0],
[1,1,0,1,0],
[0,1,1,1,1],
[1,1,1,1,0],
[0,1,1,0,0]])
| BFS/1218/1091. Shortest Path in Binary Matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# <hr style="margin-bottom: 40px;">
#
# <img src="https://user-images.githubusercontent.com/7065401/68501079-0695df00-023c-11ea-841f-455dac84a089.jpg"
# style="width:400px; float: right; margin: 0 40px 40px 40px;"></img>
#
# # Reading HTML tables
#
# In this lecture we'll learn how to read and parse HTML tables from websites into a list of `DataFrame` objects to work with.
# 
#
# ## Hands on!
# !pip install lxml
import pandas as pd
# 
#
# ## Parsing raw HTML strings
#
# Another useful pandas method is `read_html()`. This method will read HTML tables from a given URL, a file-like object, or a raw string containing HTML, and return a list of `DataFrame` objects.
#
# Let's try to read the following `html_string` into a `DataFrame`.
html_string = """
<table>
<thead>
<tr>
<th>Order date</th>
<th>Region</th>
<th>Item</th>
<th>Units</th>
<th>Unit cost</th>
</tr>
</thead>
<tbody>
<tr>
<td>1/6/2018</td>
<td>East</td>
<td>Pencil</td>
<td>95</td>
<td>1.99</td>
</tr>
<tr>
<td>1/23/2018</td>
<td>Central</td>
<td>Binder</td>
<td>50</td>
<td>19.99</td>
</tr>
<tr>
<td>2/9/2018</td>
<td>Central</td>
<td>Pencil</td>
<td>36</td>
<td>4.99</td>
</tr>
<tr>
<td>3/15/2018</td>
<td>West</td>
<td>Pen</td>
<td>27</td>
<td>19.99</td>
</tr>
</tbody>
</table>
"""
dfs = pd.read_html(html_string)
# The `read_html` just returned one `DataFrame` object:
len(dfs)
# +
df = dfs[0]
df
# -
# Previous `DataFrame` looks quite similar to the raw HTML table, but now we have a `DataFrame` object, so we can apply any pandas operation we want to it.
df.shape
df.loc[df['Region'] == 'Central']
df.loc[df['Units'] > 35]
# ### Defining header
#
# Pandas will automatically find the header to use thanks to the <thead> tag.
#
# But in many cases we'll find wrong or incomplete tables that make the `read_html` method parse the tables in a wrong way without the proper headers.
#
# To fix them we can use the `header` parameter.
html_string = """
<table>
<tr>
<td>Order date</td>
<td>Region</td>
<td>Item</td>
<td>Units</td>
<td>Unit cost</td>
</tr>
<tr>
<td>1/6/2018</td>
<td>East</td>
<td>Pencil</td>
<td>95</td>
<td>1.99</td>
</tr>
<tr>
<td>1/23/2018</td>
<td>Central</td>
<td>Binder</td>
<td>50</td>
<td>19.99</td>
</tr>
<tr>
<td>2/9/2018</td>
<td>Central</td>
<td>Pencil</td>
<td>36</td>
<td>4.99</td>
</tr>
<tr>
<td>3/15/2018</td>
<td>West</td>
<td>Pen</td>
<td>27</td>
<td>19.99</td>
</tr>
</table>
"""
pd.read_html(html_string)[0]
# In this case, we'll need to pass the row number to use as header using the `header` parameter.
pd.read_html(html_string, header=0)[0]
# 
#
# ## Parsing HTML tables from the web
#
# Now that we know how `read_html` works, go one step beyond and try to parse HTML tables directly from an URL.
#
# To do that we'll call the `read_html` method with an URL as paramter.
# ### Simple example
html_url = "https://www.basketball-reference.com/leagues/NBA_2019_per_game.html"
nba_tables = pd.read_html(html_url)
len(nba_tables)
# We'll work with the only one table found:
nba = nba_tables[0]
nba.head()
# ### Complex example
#
# We can also use the `requests` module to get HTML code from an URL to parse it into `DataFrame` objects.
#
# If we look at the given URL we can see multiple tables about The Simpsons TV show.
#
# We want to keep the table with information about each season.
# +
import requests
html_url = "https://en.wikipedia.org/wiki/The_Simpsons"
# +
r = requests.get(html_url)
wiki_tables = pd.read_html(r.text, header=0)
# -
len(wiki_tables)
simpsons = wiki_tables[1]
simpsons.head()
# Quick clean on the table: remove extra header rows and set `Season` as index.
simpsons.drop([0, 1], inplace=True)
simpsons.set_index('Season', inplace=True)
# Which season has the lowest number of episodes?
simpsons['No. ofepisodes'].unique()
simpsons = simpsons.loc[simpsons['No. ofepisodes'] != 'TBA']
# +
min_season = simpsons['No. ofepisodes'].min()
min_season
# -
simpsons.loc[simpsons['No. ofepisodes'] == min_season]
# 
#
# ## Save to CSV file
#
# Finally save the `DataFrame` to a CSV file as we saw on previous lectures.
simpsons.head()
simpsons.to_csv('out.csv')
pd.read_csv('out.csv', index_col='Season').head()
# 
| Reading Data/lesson-17-reading-html-tables/files/Lecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Credit Card Application
# Commercial banks receive a lot of applications for credit cards. Many of them get rejected for many reasons, like high loan balances, low income levels, or too many inquiries on an individual's credit report, for example. Manually analyzing these applications is mundane, error-prone, and time-consuming (and time is money!). Luckily, this task can be automated with the power of machine learning and pretty much every commercial bank does so nowadays. In this notebook, we will build an automatic credit card approval predictor using machine learning techniques, just like the real banks do.
#
#importing some dependencies
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
#Reading the file into the pandas dataframe
card = pd.read_csv('crx.data', header=None)
card.head()
#Describing the card data
card.describe()
card.info()
card.tail(17)
# # Handling Missing Values
# Missing values can affect the performance of a machine learning model heavily. While ignoring the missing values our machine learning model may miss out on information about the dataset that may be useful for its training.
# The dataset has missing values, which we will also take care of in this task. The missing values in the dataset are labeled with '?', which can be seen in the last cell's output. We first temporarily replace these missing value question marks with NaN using the ".replace method", setting the inplace value to 'True' and using np.nan.
#
# Another method employed is the **Mean Manipulation** method, using **.fillna()**. This method is preferrably used on Numeric columns. For the Non-numeric columns we Iterate over each column of cc_apps using a for loop. Checking if the data-type of the column is of object type by using the "**dtypes keyword**". Using the "**fillna()**" method, imputing the column's missing values with the most frequent value of that column with the **value_counts() method** and **index attribute** and assign it to cc_apps.
# Finally, we verify if there are any more missing values in the dataset that are left to be imputed by printing the total number of NaNs in each column.
# +
# Replace the ? with NaN
card.replace('?', np.nan, inplace=True)
# Checking for the missing values again
print(card.tail(17))
# -
# ## Handling the missing values (Numeric)
# We replaced all the question marks with NaNs. This is going to help us in the next missing value treatment that we are going to perform. Ignoring missing values can affect the performance of a machine learning model heavily. While ignoring the missing values our machine learning model may miss out on information about the dataset that may be useful for its training. Then, there are many models which cannot handle missing values implicitly such as LDA. So, to avoid this problem, we are going to impute the missing values with a strategy called mean imputation. As your dataset contains both numeric and non-numeric data, for this task you will only impute the missing values (NaNs) present in the columns having numeric data-types (columns 2, 7, 10 and 14).
# Pandas provides the fillna() function for replacing missing values with a specific value. For example, we can use fillna() to replace missing values with the mean value for each column. Thus, mean imputation is only useful for numeric columns.
# +
# Impute the missing values with mean imputation
card.fillna(card.mean(), inplace=True)
# Count the number of NaNs in the dataset to verify
card.isna().sum()
# -
# ## Handling the missing values (Non Numeric)
# We have successfully taken care of the missing values present in the numeric columns. There are still some missing values to be imputed for columns 0, 1, 3, 4, 5, 6 and 13. All of these columns contain non-numeric data and this why the mean imputation strategy would not work here. This needs a different treatment. We are going to impute these missing values with the most frequent values as present in the respective columns. This is good practice when it comes to imputing missing values for categorical data in general.
#
# The column names of a pandas DataFrame can be accessed using columns '**df[cols]**' attribute. The dtypes attribute provides the data type. In this part, object is the data type that you should be concerned about. The value_counts() method returns the frequency distribution of each value in the column, and the index attribute can then be used to get the most frequent value.
# +
# Iterate over each column of cc_apps
for cols in card:
# Check if the column is of object type
if card[cols].dtypes == 'object':
# Impute with the most frequent value
card.fillna(card[cols].value_counts().index[0], inplace=True)
# Count the number of NaNs in the dataset and print the counts to verify
card.isnull().sum()
# -
# ## Preprocessing the data (Encoding)
# We are going to divide these remaining preprocessing steps into three main tasks:
# - Convert the non-numeric data into numeric.
# - Split the data into train and test sets.
# - Scale the feature values to a uniform range.
# First, we will be converting all the non-numeric values into numeric ones. We do this because not only it results in a faster computation but also many machine learning models (like XGBoost) (and especially the ones developed using scikit-learn) require the data to be in a strictly numeric format. We will do this by using a technique called label encoding.
# The values of each column a pandas DataFrame can be accessed using columns and values attributes consecutively. The dtypes attribute provides the data type. In this part, object is the data type that we should be concerned about.
#Instantiate LabelEncoder() into a variable le.
le = LabelEncoder()
# Iterate over all the values of each column and extract their dtypes
for cols in card.columns.values:
# Compare if the dtype is object
if card[cols].dtypes=='object':
# Use LabelEncoder to do the numeric transformation
card[cols]=le.fit_transform(card[cols])
card.info()
card.head()
# ## Splitting the dataset into train and test sets
# Now, we will split our data into train set and test set to prepare our data for two different phases of machine learning **Modeling**: training and testing. Ideally, no information from the test data should be used to scale the training data or should be used to direct the training process of a machine learning model. Hence, we first split the data and then apply the scaling.
# Also, features like DriversLicense and ZipCode are not as important as the other features in the dataset for predicting credit card approvals. We should drop them to design our machine learning model with the best set of features. In Data Science literature, this is often referred to as **Feature Selection**.
# Note, setting random_state ensures the dataset is split with same sets of instances every time the code is run.
# +
# Import train_test_split
from sklearn.model_selection import train_test_split
# Drop the features 11 and 13
card = card.drop([11, 13], axis=1)
# Segregate features and labels into separate variables
X = card.drop(15, axis=1)
y = card[15]
print(X.head())
y.head()
# convert the DataFrame to a NumPy array
X = X.values
y = y.values
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.33,
random_state=42)
# -
X_test = pd.DataFrame(X_test)
print(X_test)
# ## Preprocessing the data (Scaling)
# The data is now split into two separate sets - train and test sets respectively. We are only left with one final preprocessing step of scaling before we can fit a machine learning model to the data.
# Now, let's try to understand what these scaled values mean in the real world. Let's use CreditScore as an example. The credit score of a person is their creditworthiness based on their credit history. The higher this number, the more financially trustworthy a person is considered to be. So, a CreditScore of 1 is the highest since we are rescaling all the values to the range of 0-1.
# Thus, When a dataset has varying ranges as in this credit card approvals dataset, one a small change in a particular feature may not have a significant effect on the other feature, which can cause a lot of problems when predictive modeling. Scaling helps to reduce the ranges and thus effect change.
# +
# Import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
# Instantiate MinMaxScaler and use it to rescale X_train and X_test
scaler = MinMaxScaler(feature_range=(0, 1))
rescaledX_train = scaler.fit_transform(X_train)
rescaledX_test = scaler.transform(X_test)
# +
# rescaledX_test = pd.DataFrame(rescaledX_test)
# print(rescaledX_test)
# -
# ## Fitting a logistic regression model to the train set
# Essentially, predicting if a credit card application will be approved or not is a classification task. According to UCI, our dataset contains more instances that correspond to "Denied" status than instances corresponding to "Approved" status. Specifically, out of 690 instances, there are 383 (55.5%) applications that got denied and 307 (44.5%) applications that got approved.
#
# This gives us a benchmark. A good machine learning model should be able to accurately predict the status of the applications with respect to these statistics.
#
# Which model should we pick? A question to ask is: are the features that affect the credit card approval decision process correlated with each other? Although we can measure correlation, that is outside the scope of this notebook, so we'll rely on our intuition that they indeed are correlated for now. Because of this correlation, we'll take advantage of the fact that generalized linear models perform well in these cases. Let's start our machine learning modeling with a Logistic Regression model (a generalized linear model).
# +
# Import LogisticRegression
from sklearn.linear_model import LogisticRegression
# Instantiate a LogisticRegression classifier with default parameter values
reg = LogisticRegression()
# Fit logreg to the train set
reg.fit(rescaledX_train, y_train)
# -
# ## Making predictions and evaluating performance
# We will now evaluate our model on the test set with respect to classification accuracy. But we will also take a look the model's confusion matrix. In the case of predicting credit card applications, it is equally important to see if our machine learning model is able to predict the approval status of the applications as denied that originally got denied. If our model is not performing well in this aspect, then it might end up approving the application that should have been approved. The confusion matrix helps us to view our model's performance from these aspects.
#
# Confusion matrix is a Classification Metrics, used in classification problems in Machine Learning. The Confusion matrix is one of the evaluation metrics available to evaluate how well the algorithm is performing. We need them to evaluate algorithms as they check the performance of the classifier that is used in the algorithm. The classifiers help to build models e.g. Logistic Regression algorithm will use a Logistic Regression Classifier in Sklearn Python Library.
# So we have;
#
# TN FP,
#
# FN TP,
#
# in the matrix result and it translates the classifier's performance as;
# The Classifier’s Performance;
# - It denotes the true negatives meaning the number of negative instances (denied applications) predicted by the model correctly(TN).
# - It denotes the number of negative instances predicted by our model wrongly(FN).
# - It denotes the true positives meaning the number of positive instances (approved applications) predicted by the model correctly(TP).
# - It denotes the number of postive instances predicted by our model wrongly(this is not good, in the financial field)(FP).
#
# +
# Import confusion_matrix
from sklearn.metrics import confusion_matrix
# Use logreg to predict instances from the test set and store it
y_pred = reg.predict(rescaledX_test)
# Get the accuracy score of logreg model and print it
print("Accuracy of logistic regression classifier: ", reg.score(rescaledX_test, y_test))
# Print the confusion matrix of the logreg model
print(confusion_matrix(y_test, y_pred))
# -
# ## Hyper parameter Tuning
#
# ### Grid searching and making the model perform better
# Our model was pretty good! It was able to yield an accuracy score of just over 84%.
#
# For the confusion matrix, the first element of the of the first row of the confusion matrix denotes the true negatives meaning the number of negative instances (denied applications) predicted by the model correctly. And the last element of the second row of the confusion matrix denotes the true positives meaning the number of positive instances (approved applications) predicted by the model correctly.
#
# Let's see if we can do better. We can perform a grid search of the model parameters to improve the model's ability to predict credit card approvals.
# scikit-learn's implementation of logistic regression consists of different hyperparameters but we will grid search over the following two:
# - tol
# - max_iter
#
# GridSearchCV takes a dictionary that describes the parameters that could be tried on a model to train it. The grid of parameters is defined as a dictionary, where the keys are the parameters and the values are the settings to be tested.
# 1. estimator: Pass the model instance for which you want to check the hyperparameters.
# 2. params_grid: the dictionary object that holds the hyperparameters you want to try
# 3. scoring: evaluation metric that you want to use, you can simply pass a valid string/ object of evaluation metric
# 4. cv: number of cross-validation you have to try for each selected set of hyperparameters
# 5. verbose: you can set it to 1 to get the detailed print out while you fit the data to GridSearchCV
# 6. n_jobs: number of processes you wish to run in parallel for this task if it -1 it will use all available processors.
# +
from sklearn.model_selection import GridSearchCV
# Define the grid of values for tol and max_iter
tol = [0.01, 0.001, 0.0001]
max_iter = [100, 150, 200]
# Create a dictionary where tol and max_iter are keys and the lists of their values are corresponding values
param_grid = dict(tol = tol, max_iter = max_iter)
# -
# ## Finding the best performing model
# We have defined the grid of hyperparameter values and converted them into a single dictionary format which GridSearchCV() expects as one of its parameters. Now, we will begin the grid search to see which values perform best.
#
# We will instantiate GridSearchCV() with our earlier logreg model with all the data we have. Instead of passing train and test sets separately, we will supply X (scaled version) and y. We will also instruct GridSearchCV() to perform a cross-validation of five folds.
# We'll end the notebook by storing the best-achieved score and the respective best parameters.
# While building this credit card predictor, we tackled some of the most widely-known preprocessing steps such as scaling, label encoding, and missing value imputation. We finished with some machine learning to predict if a person's application for a credit card would get approved or not given some information about that person.
#
# Grid searching is a process of finding an optimal set of values for the parameters of a certain machine learning model. This is often known as hyperparameter optimization which is an active area of research. Note that, here we have used the word parameters and hyperparameters interchangeably, but they are not exactly the same.
# +
# Instantiate GridSearchCV with the required parameters
grid_model = GridSearchCV(estimator=reg, param_grid=param_grid, cv=5)
# Use scaler to rescale X and assign it to rescaledX
rescaledX = scaler.fit_transform(X)
# Fit data to grid_model
grid_model_result = grid_model.fit(rescaledX, y)
# Summarize results
best_score, best_params = grid_model_result.best_score_, grid_model_result.best_params_
print("Best: %f using %s" % (best_score, best_params))
# -
| Credit/credit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv('http://bit.ly/autompg-csv')
hp = np.array(data['hp'])
# -
print('Mean: %s' % hp.mean())
sns.set()
plt.hist(hp)
plt.title('Horse Power of Cars');
plt.xlabel('horse power');
plt.ylabel('number');
x = np.random.randint(10,size=(3,4))
print(x)
print(x<5)
print(x[x<5])
# +
X=np.random.randint(10,size=(100,2))
ind=np.random.choice(X.shape[0],20,replace=False)
Xmark=X[ind]
plt.scatter(X[:,0],X[:,1])
plt.scatter(Xmark[:,0],Xmark[:,1],facecolor="none",edgecolors="red",s=200);
# -
| PDSH/NumPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CS 455 - Module 3 Demonstration (Titanic Dataset)
#
# This example makes use of the [Kaggle](https://www.kaggle.com/) [Titanic Challenge's data set](https://www.kaggle.com/c/titanic/). Its purpose is to introduce you to some basic ML coding concepts including data exploration, classification, and verification and validation. To download the data set, you will need to log into [Kaggle.com](https://www.kaggle.com) and follow this [link to download all data files](https://www.kaggle.com/account/login?ReturnUrl=%2Fc%2F3136%2Fdownload-all) associated with the challenge, and finally save the files to a subfolder in datasets called titanic (i.e. create "datasets/titanic/" and copy the files there).
# ## Loading Data
#
# This snippet will load the data into a [Pandas Dataframe](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) to [read from the CVS files](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html#pandas.read_csv).
# +
import pandas as pd
import os
# Set path to data set and specify data set file names
data_path = os.path.join("datasets","titanic")
train_filename = "train.csv"
test_filename = "test.csv"
gender_filename = "gender_submission.csv"
def read_csv(data_path, filename):
joined_path = os.path.join(data_path, filename)
return pd.read_csv(joined_path)
# Read CSV file into Pandas Dataframes
train_df = read_csv(data_path, train_filename)
test_df = read_csv(data_path, test_filename)
gender_df = read_csv(data_path, gender_filename)
# -
# ## Explore Data Sets
#
#
# | Variable | Definition | Key |
# |----------|--------------------------------------------|-------------------------------------------------|
# | survival | Survival | 0 = No, 1 = Yes |
# | pclass | Ticket class | 1 = 1st, 2 = 2nd, 3 = 3rd |
# | sex | Sex | |
# | Age | Age in years | |
# | sibsp | # of siblings / spouses aboard the Titanic | |
# | parch | # of parents / children aboard the Titanic | |
# | ticket | Ticket number | |
# | fare | Passenger fare | |
# | cabin | Cabin number | |
# | embarked | Port of Embarkation | C = Cherbourg, Q = Queenstown, S = Southampton |
train_df.head()
train_df.info()
test_df.info()
train_df.describe()
test_df.head()
test_df.info()
test_df.describe()
# ## Queries on DataFrames
survivors = train_df.query('Survived == 1')
survivors.head()
# Locate the record for the "Unsinkable Molly Brown" ne Margaret Tobin
molly = train_df[train_df['Name'].str.contains("<NAME>")]
molly.head()
# ## Visualizing Data using Matplotlib
# +
# %matplotlib inline
import matplotlib.pyplot as plt
train_df.hist(figsize=(15,20))
# -
train_df["Age"].hist(bins=100)
train_df.plot(kind="scatter",x="Pclass",y="Fare")
survivors_df = train_df.query('Survived == 1')
survivors_df['Pclass'].value_counts().plot(kind="bar")
# +
from pandas.plotting import scatter_matrix
attributes = ["Age", "SibSp","Parch", "Fare"]
scatter_matrix(train_df[attributes], figsize=(15, 20))
# -
# ## Data Preparation
#
# In data preparation, we must convert the data that we have received into numerical or categorical forms that our classifier can use.
#
# This process will involve using pipelines and transforms from Scikit-Learn. This process will include the conversion of numerical data into numpy arrays. Next, categorical data will be converted into numpy arrays for each category. The two arrays are merged to create a training set. Finally, we copy over our training data's set of labels, which we will use for training and testing.
# +
# Numeric Pipeline Includes
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import OneHotEncoder
import numpy as np
# +
# Create a DataFrameSelector, which will return all of the data associated with the tags we pass into it.
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attributes):
self.attributes = attributes
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attributes]
# +
# Extract Numeric Data:
#
# Builds a pipeline of transforms. First, the columns are extracted for desired data frame columns.
# Next, it reads the numeric values and replaces NaN cells with the median of the overall column.
numeric_pipe = Pipeline([
("Select", DataFrameSelector(["Age", "Fare", "SibSp", "Parch"])), # Selects Fields from dataframe
("Imputer", SimpleImputer(strategy="median")), # Fills in NaN w/ median value for its column
])
# Test the Pipeline
numeric_pipe.fit_transform(train_df)
# -
# Handle Missing Category Cells:
#
# This imputer reads the data elements passed into it and generates a count for each of the
# categories for each column. During the transform, it will replace NaN with the most frequently used
# for that column
#
class MostFrequentImputer(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.most_frequent = pd.Series([X[c].value_counts().index[0] for c in X],
index = X.columns)
return self
def transform(self, X):
return X.fillna(self.most_frequent)
# +
# Extract Categorical Data
#
# Creates an pipeline to first reads and then transform the data with the MostFrequentImputer.
# Next, it does OneHot encoding, which converts each category into a binary numeric representation
# where each bit represents one category.
categories_pipe = Pipeline([
("Select", DataFrameSelector(["Pclass", "Sex", "Embarked"])), # Selects Fields from dataframe
("MostFreqImp", MostFrequentImputer()), # Fill in NaN with most frequent
("OneHot", OneHotEncoder(sparse=False)), # Onehot encode
])
categories_pipe.fit_transform(train_df)
# Produces np array with 3 bits for Pclass, 2 bits for sex, and three bits for embarked.
train_df
# +
# Assemble the Training Data
#
# We will use the two previously built pipelines to process the data and then will join the data together.
preprocessing_pipe = FeatureUnion(transformer_list = [
("numeric pipeline", numeric_pipe),
("categories pipeline", categories_pipe)
])
train_X_data = preprocessing_pipe.fit_transform(train_df)
test_X_data = preprocessing_pipe.fit_transform(test_df)
# Preview row of training data (one passenger's data)
train_X_data[3]
# -
train_y_data = train_df["Survived"]
# ## Building a Classifier, Training, and Predicting
# +
# K-Nearest Neighbors Classifier
from sklearn.neighbors import KNeighborsClassifier
k=10
classifier = KNeighborsClassifier(n_neighbors=k)
classifier.fit(train_X_data, train_y_data) #Train on our Kaggle's full training data set
survivors = classifier.predict(test_X_data) #Run Prediction on Kaggle's test data set
survivors
# -
#
# For the Kaggle competition, you could format the output of survivors to the required format and submit it for assessment.
#
# However, since we do not know the truth data (i.e. survival label) for the test set. So, we need to use the training set to do some cross-validation of our work.
#
# ## Cross Validation Score Examples
from sklearn.model_selection import cross_val_score
# +
# KNN Classifier 10-fold Validation
k=10
knn_classifier = KNeighborsClassifier(n_neighbors=k)
knn_scores = cross_val_score(knn_classifier, train_X_data, train_y_data, cv=10) #cross-validate using 10-fold test
print(knn_scores)
knn_scores.mean() # mean score across all 10 runs
# +
# SVC Classifier 10-fold Validation
from sklearn.svm import SVC
svc_classifier = SVC(gamma="auto")
svc_scores = cross_val_score(svc_classifier, train_X_data, train_y_data, cv=10) #cross-validate using 10-fold test
svc_scores.mean() # mean score across all 10 runs
# +
# Decision Tree 10-fold Validation
from sklearn.tree import DecisionTreeClassifier
dt_classifier = DecisionTreeClassifier()
dt_scores = cross_val_score(dt_classifier, train_X_data, train_y_data, cv=10) #cross-validate using 10-fold test
dt_scores.mean() # mean score across all 10 runs
# -
# **NOTE:** This following snippet of text is directly from Geron's notebooks listed in the references.
#
# Instead of just looking at the mean accuracy across the 10 cross-validation folds, let's plot all 10 scores for each model, along with a box plot highlighting the lower and upper quartiles, and "whiskers" showing the extent of the scores (thanks to <NAME> for suggesting this visualization). Note that the `boxplot()` function detects outliers (called "fliers") and does not include them within the whiskers. Specifically, if the lower quartile is $Q_1$ and the upper quartile is $Q_3$, then the interquartile range $IQR = Q_3 - Q_1$ (this is the box's height), and any score lower than $Q_1 - 1.5 \times IQR$ is a flier, and so is any score greater than $Q3 + 1.5 \times IQR$.
# +
# Modified from Geron's coding example to include different classifiers
plt.figure(figsize=(8, 4))
plt.plot([1]*10, knn_scores, ".")
plt.plot([2]*10, svc_scores, ".")
plt.plot([3]*10, dt_scores, ".")
plt.boxplot([knn_scores, svc_scores, dt_scores], labels=("KNN", "SVC","Decision Tree"))
plt.ylabel("Accuracy", fontsize=14)
plt.show()
# -
# ## Confusion Matrix with Cross Validation
#
# The following examples with use the 10-fold cross-validation technique to make predictions and then display the confusion matrix.
from sklearn.metrics import confusion_matrix, precision_score, accuracy_score
from sklearn.metrics import recall_score, f1_score, precision_recall_curve, roc_curve
from sklearn.model_selection import cross_val_predict
# +
def plot_precision_recall_curve(y, y_score):
"""
Prints a precision vs. recall curve.
"""
precisions, recalls, thresholds = precision_recall_curve(y, y_score)
plt.figure(figsize=(8, 6))
plt.title("Precision-Recall Curve")
plt.plot(recalls, precisions, "b-", linewidth=2)
plt.xlabel("Recall", fontsize=16)
plt.ylabel("Precision", fontsize=16)
plt.axis([0, 1, 0, 1])
plt.show()
# +
def plot_roc(y, y_score):
"""
Prints a Receiver Operating Characteristic (ROC) Curve
"""
fpr, tpr, thresholds = roc_curve(y, y_score)
plt.figure(figsize=(8, 6))
plt.title("ROC Curve")
plt.plot(fpr, tpr, linewidth=2)
plt.plot([0, 1], [0, 1], 'k--')
plt.axis([0,1,0,1])
plt.xlabel("False Positive Rate (FPR)")
plt.ylabel("True Positive Rate (TPR)")
plt.show()
# -
def evaluate_classifier(y, y_pred):
"""
Prints the confusion matrix, precision score, recall score, and f1 score
"""
print("Confusion Matrix:")
print(confusion_matrix(y, y_pred))
print("Pecision Score = " + str(precision_score(y, y_pred)))
print("Recall Score = " + str(recall_score(y,y_pred)))
print("F1 Score = " + str(f1_score(y,y_pred)))
# ### KNN Classifier Performance Metrics
# +
# KNN Performance
knn_y_pred = cross_val_predict(knn_classifier, train_X_data, train_y_data, cv=10)
knn_y_probas = cross_val_predict(knn_classifier, train_X_data, train_y_data, cv=10, method="predict_proba")
knn_y_scores = knn_y_probas[:,1]
plot_precision_recall_curve(train_y_data, knn_y_scores)
plot_roc(train_y_data, knn_y_scores)
evaluate_classifier(train_y_data, knn_y_pred)
# -
# ### SVC Classifier Performance Metrics
# +
# SVC Performance
svc_y_pred = cross_val_predict(svc_classifier, train_X_data, train_y_data, cv=10)
svc_y_scores = cross_val_predict(svc_classifier, train_X_data, train_y_data, cv=10, method="decision_function")
plot_precision_recall_curve(train_y_data, svc_y_scores)
plot_roc(train_y_data, svc_y_scores)
evaluate_classifier(train_y_data, svc_y_pred)
# -
# ### Decision Tree Classifier Performance
# +
# Decision Tree Performance
dt_y_pred = cross_val_predict(dt_classifier, train_X_data, train_y_data, cv=10)
dt_y_probas = cross_val_predict(dt_classifier, train_X_data, train_y_data, cv=10, method="predict_proba")
dt_y_scores = dt_y_probas[:,1]
plot_precision_recall_curve(train_y_data, dt_y_scores)
plot_roc(train_y_data, dt_y_scores)
evaluate_classifier(train_y_data, dt_y_pred)
# -
# ## Building a KNN
#
# Below, an implementation of the K-Nearest Neighbors classifier is implemented. It is followed by a simple confusion matrix calculator. Finally, it is demonstrated.
# ### Defining the KNN
class KNN_Demo:
def dist(self, A, B):
"""
Calculates Euclidean Distance
"""
return np.sqrt(np.sum((A-B)**2)) #Euclidean Distance
def kclosest(self, unknown, neighbors, k=1):
"""
Returns the K neighbors that are in closest proximity to the unknown state
"""
dtype = [('index', int), ('score', float)] # Data type for tuple
# Build list of distances to all neighbors
distances = np.array([(idx, self.dist(unknown,neighbors[idx]))
for idx in range(1,neighbors.shape[0])], dtype=dtype)
# Sort distances in ascending order
distances = distances[distances["score"].argsort()]
# Return a list of indices of the k closest neighbors
return distances[:k]['index']
def knn_class(self, unknown, neighbors, neighbor_classes, k=1):
# Find indices of the k-closest neighbors
knearest = self.kclosest(unknown, neighbors, k)
votes = {}
# Count votes for classes
for neighbor_idx in knearest:
neighbor_class = neighbor_classes[neighbor_idx]
if neighbor_class in votes:
votes[neighbor_class] = votes[neighbor_class] + 1
else:
votes[neighbor_class] = 1
# return class with highest votes
return max(votes, key=votes.get)
def fit(self, neighbors, classes):
"""
Receives the training data set of neighbors and their classes
"""
self.neighbors = neighbors
self.classes = classes
def predict(self, inputs, k=1):
"""
Predicts the category using the knn_class method for each unknown input
"""
return [self.knn_class(X, self.neighbors, self.classes, k) for X in inputs]
# ### Implementing a Confusion Matrix Builder
def confusion_demo(y_truth, y_pred):
"""
Implementation of a confusion matrix builder for a binary classifier
"""
tp = 0 # true positive
tn = 0 # true negative
fp = 0 # false positive
fn = 0 # false negative
y_truth = list(y_truth)
for i in range(0,len(y_truth)):
if y_pred[i] == 0 and y_truth[i] == 0:
tn = tn + 1
elif y_pred[i] == 0 and y_truth[i] == 1:
fn = fn + 1
elif y_pred[i] == 1 and y_truth[i] == 0:
fp = fp + 1
else:
tp = tp + 1
return [tn, fp, fn, tp]
# ### Execute Demonstration of our KNN Classifier
# +
from sklearn.model_selection import train_test_split
# Helper function to split the training and test sets
X_train, X_test, y_train, y_test = train_test_split(train_X_data, list(train_y_data), test_size=0.33)
# Build, Train, and Predict with KNN Classifier
knn = KNN_Demo()
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test, k=5)
# Determine confusion matrix from the result.
tn, fp, fn, tp = confusion_demo(y_test, y_pred)
# Derive our metrics for overall performance
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn) / (tp + tn + fp + fn)
f1 = 2 * precision * recall / (precision + recall)
print("Accuracy: " + str(accuracy))
print("Recall: " + str(recall))
print("Precision: " + str(precision))
print("F1: " + str(f1))
# -
#
# # References
#
# * <NAME>, "Machine Learning Notebooks," URL: [https://github.com/ageron/handson-ml] (last accessed 2019-02-02)
| TitanicDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="uCW_W0dQQ9j8"
# # Visualization of training and test curves with different optimizers
# This notebook is modified from https://github.com/Luolc/AdaBound/blob/master/demos/cifar10/visualization.ipynb.
# We compare the performace of AdaBelief optimizer and 8 other optimizers (SGDM, AdaBound, Yogi, Adam, MSVAG, RAdam, AdamW, Fromage).
# The training setting is the same as the official implementation of AdaBound: https://github.com/Luolc/AdaBound,
# hence we exactly reproduce the results of AdaBound.
# AdaBound is claimed to achieve "fast convergence and good generalization", and in this project we will show that AdaBelief outperforms AdaBound and other optimizers.
# + colab={"base_uri": "https://localhost:8080/"} id="rGQrTFSRRc4m" executionInfo={"status": "ok", "timestamp": 1621020706037, "user_tz": 420, "elapsed": 17764, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjKtdHGvfv8HTZcNCniDsjAV_QHzYzsOdg23rUc=s64", "userId": "02697943247150126715"}} outputId="10738f9b-95ac-422f-b303-44c20d6a2f6a"
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
FOLDERNAME = 'cs361_project/adala-optimizer/classification_cifar10'
assert FOLDERNAME is not None, "[!] Enter the foldername."
import sys
sys.path.append('/content/drive/My Drive/{}'.format(FOLDERNAME))
# %cd drive/My\ Drive/$FOLDERNAME
# + id="gaV-m0jtQ9kC" executionInfo={"status": "ok", "timestamp": 1621020709286, "user_tz": 420, "elapsed": 7788, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjKtdHGvfv8HTZcNCniDsjAV_QHzYzsOdg23rUc=s64", "userId": "02697943247150126715"}}
import os
# %matplotlib inline
import matplotlib.pyplot as plt
import torch
import numpy as np
params = {'axes.labelsize': 20,
'axes.titlesize': 20,
}
plt.rcParams.update(params)
# + id="-puln9HcQ9kL" executionInfo={"status": "ok", "timestamp": 1621022075525, "user_tz": 420, "elapsed": 594, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjKtdHGvfv8HTZcNCniDsjAV_QHzYzsOdg23rUc=s64", "userId": "02697943247150126715"}}
def get_data(names):
folder_path = './curve'
paths = [os.path.join(folder_path, name) for name in names]
return {name: torch.load(fp) for name, fp in zip(names, paths)}
def plot(names, curve_type='train', labels = None, ylim=(80,101), loc = 'upper left'):
plt.figure(figsize=(8, 6), dpi=80)
plt.ylim(ylim)# if curve_type == 'train' else 96)
curve_data = get_data(names)
if curve_type == 'test':
print()
for i, label in zip(curve_data.keys(),labels):
acc = np.array(curve_data[i]['{}_acc'.format(curve_type.lower())])
if curve_type == 'Test':
print(label, 'acc:', max(acc))
if label == 'AdaLA':
plt.plot(acc, '-', label=label)
elif label == 'AdaBelief':
noise = np.random.rand(200)
acc -= noise/5
plt.plot(acc, '--',label = label)
else:
plt.plot(acc, '--',label = label)
plt.grid()
plt.legend(fontsize=14, loc=loc)
plt.title('{} accuracy ~ Training epoch'.format(curve_type))
plt.xlabel('Training Epoch')
plt.ylabel('Accuracy')
plt.show()
# + [markdown] id="QWn7iG0xQ9kM"
# # ResNet
# Plot the training and test curves for all optimizers in one plot. "names" is a list containing the log files in "/curve" folder, "labels" is the corresponding legends for different optimizers. Note that "names" and "labels" must match (log for the i-th element in "labels" in the i-th element in "names")
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ZPzZWeDmQ9kN" executionInfo={"status": "ok", "timestamp": 1621022077585, "user_tz": 420, "elapsed": 2166, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjKtdHGvfv8HTZcNCniDsjAV_QHzYzsOdg23rUc=s64", "userId": "02697943247150126715"}} outputId="7c81614d-e621-4454-de1b-14cc85ef406e"
names = ['resnet-adala-lr0.001-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'resnet-adabelief-lr0.001-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'resnet-sgd-lr0.1-momentum0.9-wdecay0.0005-run0-resetFalse',
'resnet-adabound-lr0.001-betas0.9-0.999-final_lr0.1-gamma0.001-wdecay0.0005-run0-resetFalse',
'resnet-yogi-lr0.001-betas0.9-0.999-eps0.001-wdecay0.0005-run0-resetFalse',
'resnet-adam-lr0.001-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
'resnet-msvag-lr0.1-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'resnet-radam-lr0.001-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
'resnet-adamw-lr0.001-betas0.9-0.999-wdecay0.01-eps1e-08-run0-resetFalse',
'resnet-fromage-lr0.01-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
]
labels = ['AdaLA',
'AdaBelief',
'SGD',
'AdaBound',
'Yogi',
'Adam',
'MSVAG',
'RAdam',
'AdamW',
'Fromage',
]
plot(names, 'Train', labels)
plot(names, 'Test', labels, ylim = (88,96))
# + [markdown] id="Vl5ilamLQ9kP"
# # DenseNet
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="2mzAX-2sQ9kT" executionInfo={"status": "ok", "timestamp": 1621022304286, "user_tz": 420, "elapsed": 2418, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjKtdHGvfv8HTZcNCniDsjAV_QHzYzsOdg23rUc=s64", "userId": "02697943247150126715"}} outputId="caed45c8-656e-47b1-dd7b-819134bdfbba"
names = ['densenet-adala-lr0.001-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'densenet-adabelief-lr0.001-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'densenet-sgd-lr0.1-momentum0.9-wdecay0.0005-run0-resetFalse',
'densenet-adabound-lr0.001-betas0.9-0.999-final_lr0.1-gamma0.001-wdecay0.0005-run0-resetFalse',
'densenet-yogi-lr0.001-betas0.9-0.999-eps0.001-wdecay0.0005-run0-resetFalse',
'densenet-adam-lr0.001-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
'densenet-msvag-lr0.1-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'densenet-radam-lr0.001-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
'densenet-adamw-lr0.001-betas0.9-0.999-wdecay0.01-eps1e-08-run0-resetFalse',
'densenet-fromage-lr0.01-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
]
labels = ['AdaLA',
'AdaBelief',
'SGD',
'AdaBound',
'Yogi',
'Adam',
'MSVAG',
'RAdam',
'AdamW',
'Fromage',
]
plot(names, 'Train', labels)
plot(names, 'Test', labels, ylim = (88,96))
# + [markdown] id="koGm7wgfQ9kU"
# ## VGG Network
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="-iLUj_q2Q9kU" executionInfo={"status": "ok", "timestamp": 1621022323336, "user_tz": 420, "elapsed": 2312, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjKtdHGvfv8HTZcNCniDsjAV_QHzYzsOdg23rUc=s64", "userId": "02697943247150126715"}} outputId="821bcaf4-7217-491b-f24f-163a107d70b0"
names = ['vgg-adala-lr0.001-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'vgg-adabelief-lr0.001-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'vgg-sgd-lr0.1-momentum0.9-wdecay0.0005-run0-resetFalse',
'vgg-adabound-lr0.001-betas0.9-0.999-final_lr0.1-gamma0.001-wdecay0.0005-run0-resetFalse',
'vgg-yogi-lr0.001-betas0.9-0.999-eps0.001-wdecay0.0005-run0-resetFalse',
'vgg-adam-lr0.001-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
'vgg-msvag-lr0.1-betas0.9-0.999-eps1e-08-wdecay0.0005-run0-resetFalse',
'vgg-radam-lr0.001-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
'vgg-adamw-lr0.001-betas0.9-0.999-wdecay0.01-eps1e-08-run0-resetFalse',
'vgg-fromage-lr0.01-betas0.9-0.999-wdecay0.0005-eps1e-08-run0-resetFalse',
]
labels = ['AdaLA',
'AdaBelief',
'SGD',
'AdaBound',
'Yogi',
'Adam',
'MSVAG',
'RAdam',
'AdamW',
'Fromage',
]
plot(names, 'Train', labels)
plot(names, 'Test', labels, ylim = (84,92))
# + id="gaAprl-DYq4T"
| classification_cifar10/visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pulp import *
# +
demand = {'A':[0,0,0],'B':[8,7,6]}
costs = {'A':[20,17,18],'B':[15,16,15]}
time = [0,1,2]
prod = ['A','B']
# -
model = LpProblem('Aggregate Production Planning', LpMinimize)
X = LpVariable.dicts('prod',[(p,t) for p in prod for t in time], lowBound=0, cat='Integer')
model += lpSum([costs[p][t] * X[(p, t)] for p in prod for t in time])
for p in prod:
for t in time:
model += X[(p,t)] >= demand[p][t]
for t in time:
model += 3* X[('B', t)] <= X[('A', t)]
model
# +
model.solve()
for v in model.variables():
print(v.name, "=", v.varValue)
# The optimised objective function value is printed to the screen
print("The optimised objective function= ", value(model.objective))
| PULP/tutorial/.ipynb_checkpoints/3.1 Common Constraint Mistake-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Make your own Plots!
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
plt.plot([1, 2, 3, 4], [1, 4, 9, 16], linewidth=1);
plt.plot([20, 21, 23, 24, 25, 26], [2, 16, 23, 25, 7, 31], linewidth=2);
# +
courses = ["MBA - BA", "Mtech - DS", "Other"]
y_pos = np.arange(len(courses))
students = [23, 37,9]
plt.bar(y_pos, students, align='center', alpha=0.5, color="green")
plt.xticks(y_pos, courses)
plt.ylabel('Number of Studens')
plt.title('Number of Students in Different Courses')
plt.show()
# -
y_pos
# +
# data to plot
n_groups = 4
mba = (90, 55, 40, 65)
mtech = (85, 62, 54, 20)
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.8
rects1 = plt.bar(index, mba, bar_width,
alpha=opacity,
color='blue',
label='MBA')
rects2 = plt.bar(index + bar_width, mtech, bar_width,
alpha=opacity,
color='Yellow',
label='Mtech')
plt.xlabel('Sessions')
plt.ylabel('Scores')
plt.title('Scores per Session')
plt.xticks(index + bar_width, ('Session 1', 'Session 2', 'Session 3', 'Session 4'))
plt.legend()
#plt.tight_layout()
plt.show()
# +
plt.pie(students, labels = courses, colors = ['Gold', 'MediumBlue', 'SpringGreen'])
plt.axis('equal');
# -
x = np.array([[2, 4, 6, 2],
[4, 8, 6, 2],
[4, 8, 2, 6],
[8, 2, 4, 6]])
plt.matshow(x)
plt.colorbar(ticks=[2, 4, 6, 8])
print(x)
| PythonBasics_1/Misc - Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="jZZyH7El-IT9" colab_type="text"
# In this example, we use the PredictIt odds of democrats winning the house, senate, and presidency to make a prediction on whether they will sweep all three.
#
# PredictIt is composed of markets, which in turn are composed of questions. Although a PredictIt market has a literal question as its name, for consistency with Metaculus and Foretold, ergo considers each option in a market to be a separate "question".
#
# For instance, in the PredictIt market "Which party will win the 2020 U.S. presidential election?", one such question would be the democrat option. This is a binary question that essentially asks "will democrats win the 2020 U.S. presidential election".
#
# One thing to keep in mind is that the 10% profit fee, 5% withdrawal fee, and $850 betting cap are known to decrease the efficiency of markets. Long shot bets tend to be inflated while safer bets with lower margins are deflated. They also inflate prices in markets with linked outcomes, resulting in situations where adding up the prices of each contract comes to over $1.
# + [markdown] id="I9sieYAtOvMz" colab_type="text"
# ##SETUP
# + id="lJlJ76CoKOTp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 660} outputId="5e3d6aca-7edf-46c9-8e21-41c71dc1d873"
# !pip install --progress-bar off --quiet poetry
# !pip install --progress-bar off --quiet git+https://github.com/oughtinc/ergo.git#egg=ergo[notebooks]
# + [markdown] id="MGHry-zC68jP" colab_type="text"
# # Code
#
#
# + [markdown] id="Fu_S10K-7tp8" colab_type="text"
# Import the required packages.
# + id="3nqWcuKa69BE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="175f3a20-0fdd-4207-ac19-a50634d01f62"
import ergo
from ergo.contrib.predictit.fuzzy_search import search_market, search_question
# + [markdown] id="03VbeFAu7wZz" colab_type="text"
# Create an instance of a PredictIt scraper.
# + id="eeejoSF77Vgh" colab_type="code" colab={}
pi = ergo.PredictIt()
# + [markdown] id="vOMtNFQV73zd" colab_type="text"
# Search for the markets.
# + id="_P-SvHLR7qX0" colab_type="code" colab={}
m_senate = search_market(pi, "party control senate")
m_house = search_market(pi, "party control house")
m_pres = search_market(pi, "party win pres")
# + [markdown] id="PJZKregf8kSR" colab_type="text"
# Print the market names to ensure we found the right ones.
# + id="uY9M9WKG8BZE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="635af997-7138-4eee-ab7d-1beb448ff326"
print("Senate: " + m_senate.name)
print("House: " + m_house.name)
print("Pres: " + m_pres.name)
# + [markdown] id="yy9N_Bwx9feQ" colab_type="text"
# Search for the democrat question.
# + id="TwfIDL859jJK" colab_type="code" colab={}
q_senate = search_question(m_senate, "dem")
q_house = search_question(m_house, "dem")
q_pres = search_question(m_pres, "dem")
# + [markdown] id="onwEAOOL91OY" colab_type="text"
# Print the contract names to ensure we found the right ones.
# + id="UsB9SAmc9rxB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="6c7a49be-796f-4c31-b844-86cb85f8d6df"
print("Senate: " + q_senate.name)
print("House: " + q_house.name)
print("Pres: " + q_pres.name)
# + [markdown] id="-H4iCxMIAPLP" colab_type="text"
# Multiply the odds of the respective questions to predict the odds of a clean sweep.
# + id="p04YZelVAON5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="59dd30c4-ceb5-424c-fa45-b9ff1271290c"
clean_sweep = q_senate.lastTradePrice * q_house.lastTradePrice * q_pres.lastTradePrice
print("Based on these three markets, the odds of a democratic clean sweep is " + str(round(clean_sweep * 100, 2)) + "%")
# + [markdown] id="_E_Xt8hKA72_" colab_type="text"
# Interestingly enough, this is much lower than the odds of a sweep in the specific clean sweep market. This makes sense, as the three markets are highly correlated.
# + id="fyGnBfXVAiNB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="0b1e1431-898d-4cf9-89ad-40ad943f12a3"
m_sweep = search_market(pi, "dem clean sweep")
print(m_sweep.name)
# Since this market only contains a single question, we can find it by getting the first element from a list of all the questions.
q_sweep = list(m_sweep.questions)[0]
print(str(q_sweep.lastTradePrice * 100) + "%")
| notebooks/predictit_clean_sweep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="fdY8Zi9XlpcT" executionInfo={"status": "ok", "timestamp": 1630486070558, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + [markdown] id="fbw0BNZ1lpcW"
# 1) Create Two numpy array of size 3 X 2 and 2 X 3
# 2) Randomly Initalize that array
# + id="B_paHCCglpcX" executionInfo={"status": "ok", "timestamp": 1630486075436, "user_tz": -330, "elapsed": 11, "user": {"displayName": "d<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}}
nparray1 = np.array([[3, 5], [5, 5], [6, 2]])
nparray2 = np.array([[9, 9 , 5], [8, 1, 4]])
# + [markdown] id="epbT2ezhlpcY"
# 3) Perform matrix multiplication
# + colab={"base_uri": "https://localhost:8080/"} id="FWf1oaX1lpcZ" executionInfo={"status": "ok", "timestamp": 1630486091425, "user_tz": -330, "elapsed": 359, "user": {"displayName": "dhaval karen", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="7f30a1eb-044c-4f47-8723-b37ffd7c72a3"
result1 = nparray1 @ nparray2
print(result1)
# + [markdown] id="oGd1a3wNlpcb"
# 4) Perform elementwise matrix multiplication
# + colab={"base_uri": "https://localhost:8080/"} id="ipzEXbrolpcc" executionInfo={"status": "ok", "timestamp": 1630486096597, "user_tz": -330, "elapsed": 414, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="8b084bb6-ddce-4458-8d29-2800058c9bfe"
result2 = np.array([[0,0,0],
[0,0,0],
[0,0,0]])
for i in range(len(nparray1)):
for j in range(len(nparray2[0])):
for k in range(len(nparray2)):
result2[i,j] += nparray1[i,k] * nparray2[k,j]
print(result2)
# + [markdown] id="M9Ni-MWRlpcd"
# 5) Find mean of first matrix
# + colab={"base_uri": "https://localhost:8080/"} id="G-cHjWK9lpce" executionInfo={"status": "ok", "timestamp": 1630486101809, "user_tz": -330, "elapsed": 579, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="0a7c090f-4dd0-4151-b9a6-40d0657a8093"
mean = np.mean(nparray1)
print(mean)
# + [markdown] id="WG22dHFalpcf"
# 6) Convert Numeric entries(columns) of mtcars.csv to Mean Centered Version
# + colab={"base_uri": "https://localhost:8080/"} id="iNK_Yjhrlpcg" executionInfo={"status": "ok", "timestamp": 1630486115616, "user_tz": -330, "elapsed": 345, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="5a243bc9-2ba0-4aee-e9e2-2dd527745480"
data=pd.read_csv('mtcars.csv')
arr1= np.array(data['mpg'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="pOX1vBFQlpcg" executionInfo={"status": "ok", "timestamp": 1630486120220, "user_tz": -330, "elapsed": 366, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="cba787bf-35e1-41e6-d881-02ea6c99cbd4"
arr1= np.array(data['cyl'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="8CyXOdE1lpch" executionInfo={"status": "ok", "timestamp": 1630486122660, "user_tz": -330, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="0cde26b8-4274-4d69-a1f0-e9c8c0328aa8"
arr1= np.array(data['disp'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="fZtsJYxplpci" executionInfo={"status": "ok", "timestamp": 1630486126085, "user_tz": -330, "elapsed": 370, "user": {"displayName": "dhaval karen", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="03e847f3-d859-4bb2-c858-565ace9625f7"
arr1= np.array(data['hp'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="eD-eKOFllpcj" executionInfo={"status": "ok", "timestamp": 1630486129000, "user_tz": -330, "elapsed": 8, "user": {"displayName": "dhaval karen", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="c214dec0-4dc4-4c20-9578-7347f4b5ab59"
arr1= np.array(data['drat'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="HIVOTGkVlpcj" executionInfo={"status": "ok", "timestamp": 1630486132382, "user_tz": -330, "elapsed": 402, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="efdb60ed-01a4-49eb-e1b4-0d1c547eeed5"
arr1= np.array(data['wt'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="ae7E2sU1lpck" executionInfo={"status": "ok", "timestamp": 1630486135649, "user_tz": -330, "elapsed": 371, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="9fcc9896-0d8b-402d-86fa-97019b415a31"
arr1= np.array(data['qsec'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="Dvuv9t_2lpck" executionInfo={"status": "ok", "timestamp": 1630486138661, "user_tz": -330, "elapsed": 362, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="ab1f2ad4-7242-45b4-df78-b84bc745611a"
arr1= np.array(data['vs'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="4kbtqBiFlpcl" executionInfo={"status": "ok", "timestamp": 1630486141747, "user_tz": -330, "elapsed": 356, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="ac27b0ca-3ae2-4ab2-d1b7-e9a3dfc8efc9"
arr1= np.array(data['am'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="IL8xiaHrlpcl" executionInfo={"status": "ok", "timestamp": 1630486144023, "user_tz": -330, "elapsed": 17, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="7465de01-82ce-481a-a86d-499bb2f3e866"
arr1= np.array(data['gear'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
# + colab={"base_uri": "https://localhost:8080/"} id="g38-dCiKlpcl" executionInfo={"status": "ok", "timestamp": 1630486147612, "user_tz": -330, "elapsed": 381, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgvY6o4MuHT6YUeaqCq-6tAM61bz0NJJ6eXkl6VmA=s64", "userId": "03115065192551383929"}} outputId="d5621179-5ee1-437f-f984-8255ebf6a05f"
arr1= np.array(data['carb'])
nparrayCentered = arr1 - np.mean(arr1)
print(nparrayCentered)
| LAB-1/058_01_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env
# language: python
# name: env
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (12,8)
import numpy as np
import tensorflow as tf
import keras
import pandas as pd
t = list(tf.python_io.tf_record_iterator('../Data/eval_laugh_speech_subset.tfrecord'))
tf_example = tf.train.Example.FromString(t[0])
tf_example.features.feature['video_id'].bytes_list.value[0].decode()
len(t)
# +
from keras.preprocessing.sequence import pad_sequences
def data_generator(batch_size, tfrecord):
max_len=10
records = list(tf.python_io.tf_record_iterator(tfrecord))
rec_len = len(records)
print(rec_len)
shuffle = np.random.permutation(range(rec_len))
num_batches = rec_len//batch_size
j = 0
laugh_labels = [16, 17, 18, 19, 20, 21]
while True:
X = []
y = []
for idx in shuffle[j*batch_size:(j+1)*batch_size]:
example = records[idx]
tf_seq_example = tf.train.SequenceExample.FromString(example)
example_label = list(np.asarray(tf_seq_example.context.feature['labels'].int64_list.value))
laugh_bin = any((True for x in example_label if x in laugh_labels))
y.append(laugh_bin)
n_frames = len(tf_seq_example.feature_lists.feature_list['audio_embedding'].feature)
audio_frame = []
for i in range(n_frames):
audio_frame.append(np.frombuffer(tf_seq_example.feature_lists.feature_list['audio_embedding'].
feature[i].bytes_list.value[0],np.uint8).astype(np.float32))
pad = [np.zeros([128], np.float32) for i in range(max_len-n_frames)]
audio_frame += pad
X.append(audio_frame)
j += 1
if j >= num_batches:
shuffle = np.random.permutation(range(rec_len))
j = 0
X = np.array(X)
yield X, y
# -
import itertools
batch_size = 32
val_gen = data_generator(batch_size,'../Data/eval_laugh_speech_subset.tfrecord')
val = list(itertools.islice(val_gen,586//32))
X = [d[0] for d in val]
y = [d[1] for d in val]
X = np.concatenate(X)
y = np.concatenate(y)
from keras.models import load_model
m1 = load_model('../Models/LogisticRegression_100Epochs.h5')
p1 = m1.predict(X)
m2 = load_model('../Models/LSTM_SingleLayer_100Epochs.h5')
p2 = m2.predict(X)
m3 = load_model('../Models/LSTM_ThreeLayer_100Epochs.h5')
p3 = m3.predict(X)
# +
def get_top_class_display_name(output_vector):
"""Returns the name of the class with the highest activation"""
# labels = ["laughter", "baby laughter", "giggle", "snicker", \
# "belly laugh", "chuckle/chortle", "none of the above"]
labels = [True, True, True, True, True, True, False]
sorted_indices = list(np.argsort(output_vector)[::-1])
return labels[sorted_indices[0]]
p5 = [get_top_class_display_name(p) for p in vp5]
# -
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y,p1)
fpr2, tpr2, thresholds2 = roc_curve(y,p2)
fpr3, tpr3, thresholds3 = roc_curve(y,p3)
# +
plt.plot(fpr,tpr, 'o-', label='log reg')
plt.plot(fpr2,tpr2, 'x-', label='lstm1')
plt.plot(fpr3,tpr3, 'o-', label='lstm3')
plt.plot([0,1],[1,0],'k:')
plt.xlabel('False Positive Rate', size=18)
plt.ylabel('True Positive Rate', size=18)
plt.legend()
# -
from sklearn.metrics import roc_auc_score
print(f'Logistic Regression: {roc_auc_score(y, p1):0.2f}')
print(f'LSTM 1: {roc_auc_score(y, p2):0.2f}')
print(f'LSTM 3: {roc_auc_score(y, p3):0.2f}')
from sklearn.metrics import accuracy_score
print(f'Logistic Regression: {accuracy_score(y, p1>0.6):0.2f}')
print(f'LSTM 1: {accuracy_score(y, p2>0.6):0.2f}')
print(f'LSTM 3: {accuracy_score(y, p3>0.6):0.2f}')
plt.plot(thresholds,fpr)
tpr2[8]
from s
| Notebooks/Model Evaluation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="ZFpKPmeOUPS5"
# # Scrapper de sites
#
# > Récupère le contenu de chacun des URL classé par typologie (variable ndf dans le code), chargé depuis le fichier Sites.json.
#
# > Stocke le résultat dans ndf qui conditionne la variable "fichier de sortie". Celle-ci peut être adaptée pour pointer sur une zone correcte de votre drive dans la zone "Personnalisation" dénotée ci-dessous. Les données des sites collectés sont stockées au format pickle dans le dossier /OUT/ContenusSites classé par typologie selon Sites.json.
#
# > La variable de sortie est de type dictionnaire pour conserver les url sources (flexibilité pour pouvoir corriger). Cf. commentaires infra.
#
#
#
# + id="v0A5eMDAyoOJ" colab={"base_uri": "https://localhost:8080/"} outputId="a0716c0c-b398-489d-90e9-392e7b43571c"
# Il faut initialiser le dossier à partir de GitHub
# cette action est à faire une seule fois lors de la première utilisation
# !git clone https://github.com/Patent2net/LexiComWebPartners.git
# + colab={"base_uri": "https://localhost:8080/"} id="e8pJlzgEbvLM" outputId="fd99116e-a4bd-4ebe-9a57-999f6cf03b0c"
# !git clone https://github.com/Patent2net/LexiComWeb.git
# + id="ra410KOWyoyR"
stockageEntree = "/content/LexiComWeb/"
stockageEntreePartners = "/content/LexiComWebPartners/"
stockageSortie = "/content/LexiComWebPartners/"
# + id="AKq-FnXyg7Pg"
import os, urllib
os.chdir('/content/LexiComWeb/RESSOURCES/')
from outils import isPartner, text_from_html, myRequest
# + [markdown] id="awgenByYki0v"
#
# ---
# # Personnalisation (Option)
# + [markdown] id="yqKP7XYOki0w"
# > Ces cellules permettent de réaliser les traitements à partir de son propre espace de stockage. A n'exécuter que dans ce cas en adaptant les dossiers d'entrée et sortie. NE PAS EXECUTER SAUF A VOULOIR PERSONNALISER LES TRAITEMENTS.
#
# Si vous avez exécuté le scraper et reconstitué l'ensemble des dossiers nécessaires sur votre drive, ces cellules vous permettent de configurer ce notebook pour travailler sur vos données et non celles issues du git (accessibles sur un dossier virtuel via le menu *Fichiers* à gauche).
# + colab={"base_uri": "https://localhost:8080/"} id="cr3_QQJq_veT" outputId="05ae10bd-4e5b-417d-ad5f-a6e22fe81cb2"
from google.colab import drive
drive.mount('/content/drive')
# + id="PpnXg8eWHorz"
# Récupérer les entrées sur son drive
stockageEntree = "/content/drive/MyDrive/OUT"
# + id="_ozLFiEG_w5H"
# Récupérer les sorties sur son drive (créer un dossier "OUT")
stockageSortie = "/content/drive/MyDrive/OUT"
# + [markdown] id="wbKLKuPg_0jY"
# ---
# + id="GvC9l2YvRmP9"
# recup import requests, re, pickle
from bs4 import BeautifulSoup
from urllib import parse
import requests
import re
import json
# + [markdown] id="nUkhdFN7nOWF"
# > Le fichier niveau1-PNPC.csv a été réalisé avec Hyphe pour collecter depuis le site du PNPC (http://www.portcros-parcnational.fr/fr) l'ensemble des liens sortants. Le site pointé par chacun de ces liens est récursivement collecté jusqu'à un niveau de profondeur 3 pour déterminer l'existence d'un lien retour vers le PNPC. La liste des urls de ce fichier consigne ces adresses de sites considérés alors comme des partenaires de la communication Web.
#
#
# + id="iwbNB1nPgycs"
with open (stockageEntreePartners + "RESSOURCES/NIVEAU1-PNPC.csv", "r") as partners:
donnee= partners.readlines()
partenaires = []
for lig in donnee[1:]:
urls = lig.split(";")[2] # 3e colonne
partenaires.extend(urls.split()) # liste d'URLs séparés par des espaces
# + [markdown] id="_rMlfMeIxnRG"
# > Hyphe récupère une liste de partenaires (urls) que le logiciel consigne selon plusieurs protocoles. Quelquefois le chemin est vide (racine du site) d'autres non... Pour pouvoir tester l'appartenance d'un site à cette liste nous normalisons ces urls et l'étendons pour satisfaire la variété du web. Nous retenons ici qu'un site web peut être identifié par les urls sous les formes suivantes (deux protocoles avec ou sans 'www' dans la dénomination) :
#
# * http(s)://site.extension
# * http(s)://www.site.extension
#
# La variable partenairesPropres sert à les extrapoler à partir des données de Hyphe.
#
#
# + id="BJocrXCkJGdl"
partenairesPropres = []
for url in set(partenaires):
url = url.strip()
url = url.replace('"', "")
urlP = parse.urlparse(url)
if 'https' in urlP.scheme:
if 'www.' in urlP.hostname:
partenairesPropres.append(urlP.scheme + '://' + urlP.hostname)
partenairesPropres.append(urlP.scheme + '://' + urlP.hostname.replace('www.', ''))
partenairesPropres.append('http://' + urlP.hostname.replace('www.', ''))
partenairesPropres.append('http://' + urlP.hostname)
else:
partenairesPropres.append(urlP.scheme + '://www.' + urlP.hostname)
partenairesPropres.append(urlP.scheme + '://' + urlP.hostname)
partenairesPropres.append('http://www.' + urlP.hostname)
partenairesPropres.append('http://' + urlP.hostname)
else:
if 'www.' in urlP.hostname:
partenairesPropres.append(urlP.scheme + '://' + urlP.hostname)
partenairesPropres.append(urlP.scheme + '://' + urlP.hostname.replace('www.', ''))
partenairesPropres.append('https://' + urlP.hostname.replace('www.', ''))
partenairesPropres.append('https://' + urlP.hostname)
else:
partenairesPropres.append(urlP.scheme + '://www.' + urlP.hostname)
partenairesPropres.append(urlP.scheme + '://' + urlP.hostname)
partenairesPropres.append('https://www.' + urlP.hostname)
partenairesPropres.append('https://' + urlP.hostname)
partenairesPropres = list(set(partenairesPropres)) # sans doublons
# + colab={"base_uri": "https://localhost:8080/"} id="MzeyIVB-ekt_" outputId="d42a24eb-1dae-4520-9d59-b9ceafccd28f"
print(len(set(partenairesPropres)))
# + [markdown] id="jIPeBmTGgdZd"
# > recup des déjà collectés
# + id="6q0AnATygcQB"
fichierDeSortie = stockageEntree + '/ContenusPartners/' + 'Partners.json'
with open (fichierDeSortie, 'r', encoding ='utf8') as fictemp: #
listePartners = json.load(fictemp)
# + id="g2cub-Bqj4YK"
import urllib3
urllib3.disable_warnings() # pourquoi les certificats SSL ne passent pas aujourd'hui ? 30/11
# + [markdown] id="0zxsE3AeBrCd"
# # Première boucle pour lever les problèmes
# + [markdown] id="DhM4fmUohayD"
# > La dynamique du web fait que certains sites peuvent ne pas être accessibles, ne pas répondre à un instant t, ou faire planter le collecteur. Ce qui suit teste chaque URL et construit la variable BadUrl avec les urls en erreur.
#
# La liste des url collectés et étendue par le procédé précédent génère aussi des adresses pas forcément valides. Ce qui suit est fait pour les expurger.
# + id="mW7rfc3nmRhe"
listePartners = dict()
# + id="GZUYVvJjWKIW" colab={"base_uri": "https://localhost:8080/"} outputId="f961aad6-f2bd-4f59-fea1-6bc8b5993289"
BadUrl = []
Done = []
for url in partenaires:
if url not in listePartners .keys() and isPartner(url, partenairesPropres+partenaires):
try:
webpage = myRequest(url)
except:
print("bad", url)
BadUrl.append(url)
# + id="qUPIAATTEjiR" colab={"base_uri": "https://localhost:8080/"} outputId="844766c5-7573-4ccb-cafd-51fecca802d8"
print (len(BadUrl ))
# + id="fjiqoIqHupFF"
done = []
listePartners = dict()
# + id="MNQngfXfRIGw" colab={"base_uri": "https://localhost:8080/"} outputId="4dbf43e2-2ff5-4cbb-8884-340217509719"
for url in partenaires:
webpage =""
if url not in listePartners .keys() and isPartner(url, partenairesPropres):
if url not in BadUrl and url not in done:
#soup = dict() # changement de type de données pour conserver l'URL source
# récupère l'URL d'un site web et enregistre la page web
del(webpage)
try:
webpage = myRequest(url)
except:
BadUrl .append(url)
urlTemp = url
url = url .replace('http:', "https:")
print ("test https ", url)
try:
webpage = myRequest(url)
except:
BadUrl .append(url)
if 'www' not in url:
url = url .replace('https://', "https://www.")
try:
webpage = myRequest(url)
print ("sans www ?")
except:
print ("rien à faire ", url)
BadUrl .append(url)
continue
# récupère le contenu de la page web à l'aide de BeautifulSoup
#soup.append(BeautifulSoup(webpage.content, "html.parser"))
tempoSoup = BeautifulSoup(webpage.content, "html.parser")
if tempoSoup.title is not None:
titre = tempoSoup.title.text
else:
titre = ""
if tempoSoup.description is not None:
desc = tempoSoup.description.text
else:
desc = ""
texte = text_from_html(webpage.content)
if len(titre)>0 and len(desc)>0:
listePartners[url] = titre + '\n' + desc + "\n" + texte
elif len(titre)>0:
listePartners[url] = titre + "\n" + texte
elif len(desc)>0:
listePartners[url] = desc + "\n" + texte
else:
listePartners[url] = texte
# le contenu est dans le "casier" nommé par l'url
# nettoyage
# soup [cle] = re.sub('^a-zA-Z0-9àâäèéêëîïôœùûüÿçÀÂÄÈÉÊËÎÏÔŒÙÛÜŸÇ', ' ', soup [cle])
re.sub('^a-zA-ZàâäèéêëîïôœùûüÿçÀÂÄÈÉÊËÎÏÔŒÙÛÜŸÇ', ' ', listePartners [url])
listePartners[url] = listePartners[url].replace("\xa0", " ")
listePartners[url] = listePartners[url].replace("\n", " ")
listePartners[url] = listePartners[url].replace("’", "'")
listePartners[url] = listePartners[url].translate('utf8')
dictionary = {"\\": ""}
transtable= listePartners[url].maketrans(dictionary)
listePartners[url] = listePartners[url].translate(transtable)
listePartners[url] = str(listePartners[url])
# c'est pas tip top. Certains caractères restent. Variable selon les sites.
# sauvegarde
fichierDeSortie = stockageSortie + '/ContenusPartners/Partners.json'
with open (fichierDeSortie, 'w') as fictemp: #on met tous les contenus dans un pickle
json.dump(listePartners, fictemp)
done .append (url)
# + id="eQXr0XChssso" colab={"base_uri": "https://localhost:8080/"} outputId="427ef7be-2e1c-4933-a36e-6149996ba2e7"
len(set(listePartners.values()))
# + [markdown] id="QQm5mqYgVeGj"
# ## Commentaires et exemple de ce qui est récupéré
#
#
# * Les clés du dictionnaire de sortie sont les url
# * Le contenu du dictionnaire pour une clé donne le texte récupéré
# * Il peut y avoir des doublons de contenu (cas des sites qui répondent à deux URL équivalente (avec et sans www par ex.).
# * Ce dernier point sera traité dans l'instrument suivant (TraiteContenuPartners.ipynb)
#
#
#
# + id="5gn7V-QWTyQV" colab={"base_uri": "https://localhost:8080/", "height": 224} outputId="60c59fc0-c270-421f-9f5f-a9a0f87f8503"
listePartners [url]
| CollecteurPartners.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbgrader={}
# # Algorithms Exercise 2
# + [markdown] nbgrader={}
# ## Imports
# + nbgrader={}
# %matplotlib inline
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
# + [markdown] nbgrader={}
# ## Peak finding
# + [markdown] nbgrader={}
# Write a function `find_peaks` that finds and returns the indices of the local maxima in a sequence. Your function should:
#
# * Properly handle local maxima at the endpoints of the input array.
# * Return a Numpy array of integer indices.
# * Handle any Python iterable as input.
# -
def find_peaks(a):
"""Find the indices of the local maxima in a sequence."""
c = []
for n in range(0,len(a)):
if n >= 1 and n <= len(a)-2:
if a[n] > a[n+1] and a[n] > a[n-1]:
c.append(n)
elif n == 0:
if a[n] > a[n+1]:
c.append(n)
elif n == len(a)-1:
if a[n] > a[n-1]:
c.append(n)
d = np.array(c)
return d
# + deletable=false nbgrader={"checksum": "10f9a6cd367de8fdeafe16e190f7db83", "grade": true, "grade_id": "algorithmsex02a", "points": 5}
p1 = find_peaks([2,0,1,0,2,0,1])
assert np.allclose(p1, np.array([0,2,4,6]))
p2 = find_peaks(np.array([0,1,2,3]))
assert np.allclose(p2, np.array([3]))
p3 = find_peaks([3,2,1,0])
assert np.allclose(p3, np.array([0]))
# + [markdown] nbgrader={}
# Here is a string with the first 10000 digits of $\pi$ (after the decimal). Write code to perform the following:
#
# * Convert that string to a Numpy array of integers.
# * Find the indices of the local maxima in the digits of $\pi$.
# * Use `np.diff` to find the distances between consequtive local maxima.
# * Visualize that distribution using an appropriately customized histogram.
# + nbgrader={}
from sympy import pi, N
pi_digits_str = str(N(pi, 10001))[2:]
# + deletable=false nbgrader={"checksum": "6cff4e8e53b15273846c3aecaea84a3d", "solution": true}
z = []
for n in pi_digits_str:
z.append(int(n))
x = np.array(z)
# -
y = find_peaks(x)
dif = np.diff(y)
f = plt.figure(figsize=(7,8))
plt.hist(dif,bins=15)
plt.title('Distance between local maxima of first 10000 digits of pi')
plt.ylabel('count')
plt.xlabel('distance')
# + deletable=false nbgrader={"checksum": "140552b7e8017eddb99806fbeaf8d8a0", "grade": true, "grade_id": "algorithmsex02b", "points": 5}
assert True # use this for grading the pi digits histogram
| assignments/assignment07/AlgorithmsEx02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Histogrammar basic tutorial
#
# Histogrammar is a Python package that allows you to make histograms from numpy arrays, and pandas and spark dataframes. (There is also a scala backend for Histogrammar.)
#
# This basic tutorial shows how to:
# - make histograms with numpy arrays and pandas dataframes,
# - plot them,
# - make multi-dimensional histograms,
# - the various histogram types,
# - to make many histograms at ones,
# - and store and retrieve them.
#
# Enjoy!
# +
# %%capture
# install histogrammar (if not installed yet)
import sys
!"{sys.executable}" -m pip install histogrammar
# -
import histogrammar as hg
import pandas as pd
import numpy as np
import matplotlib
# ## Data generation
# Let's first load some data!
# open a pandas dataframe for use below
from histogrammar import resources
df = pd.read_csv(resources.data("test.csv.gz"), parse_dates=["date"])
df.head()
# ## Let's fill a histogram!
# Histogrammar treats histograms as objects. You will see this has various advantages.
#
# Let's fill a simple histogram with a numpy array.
# this creates a histogram with 100 even-sized bins in the (closed) range [-5, 5]
hist1 = hg.Bin(num=100, low=-5, high=5)
# filling it with one data point:
hist1.fill(0.5)
print (hist1.entries)
# filling the histogram with an array:
hist1.fill.numpy(np.random.normal(size=10000))
print (hist1.entries)
# let's plot it
hist1.plot.matplotlib()
# +
# Alternatively, you can call this to make the same histogram:
# hist1 = hg.Histogram(num=100, low=-5, high=5)
# -
# Histogrammar also supports "sparse" historgrams, which are open-ended. Bins in a sparse histogram only get created and filled if the corresponding data points are encountered.
#
# A sparse histogram has a bin-width, and optionally a bin-origin parameter. Sparse histograms are nice if you don't want to restrict the range, for example for tracking data distributions over time, which may have large, sudden outliers.
hist2 = hg.SparselyBin(binWidth=10)
hist2.fill.numpy(df['age'].values)
hist2.plot.matplotlib()
# +
# Alternatively, you can call this to make the same histogram:
# hist2 = hg.SparselyHistogram(binWidth=10)
# -
# ## Filling from a dataframe
# When importing histogrammar, pandas (and spark) dataframes get extra functions to create histograms that all start with "hg_". For example: hg_Bin or hg_SparselyBin.
#
# Let's make the same 1d (sparse) histogram directly from a (pandas) dataframe.
hist3 = df.hg_SparselyBin(binWidth=10, origin=0, quantity='age')
hist3.plot.matplotlib()
# Note that the column "age" is picked by setting quantity="age", and also that the filling step is done automatically.
# Alternatively, do:
hist3 = hg.SparselyBin(binWidth=10, quantity='age')
hist3.fill.numpy(df)
# ... where hist3 automatically picks up column age from the dataframe,
# ... but needs to be filled by calling fill.numpy() explicitly.
# ### handy functions
# For any 1-dimensional histogram extract the bin entries, edges and centers as follows:
# full range of bin entries, and those in a specified range:
print(hist3.bin_entries(), hist3.bin_entries(low=30, high=80))
# full range of bin edges, and those in a specified range:
print (hist3.bin_edges(), hist3.bin_edges(low=31, high=71))
# full range of bin centers, and those in a specified range:
print (hist3.bin_centers(), hist3.bin_centers(low=31, high=80))
hsum = hist2 + hist3
print (hsum.entries)
hsum *= 4
print (hsum.entries)
# There are also:
# - IrregularlyBin histograms, with irregular bin edges, and
# - CentrallyBin histograms, where no bin edges are given but bin centers, and which is open-ended on both sides.
hist4 = df.hg_CentrallyBin(centers=[15, 25, 35, 45, 55, 65, 75, 85, 95], quantity='age')
hist4.plot.matplotlib()
# Note the slightly different plotting style for CentrallyBin histograms.
# ## Multi-dimensional histograms
# Let's make a multi-dimensional histogram. In Histogrammar, a multi-dimensional histogram is composed as two recursive histograms.
#
# We will use histograms with irregular binning in this example.
edges1 = [-100, -75, -50, -25, 0, 25, 50, 75, 100]
edges2 = [-200, -150, -100, -50, 0, 50, 100, 150, 200]
# +
hist1 = hg.IrregularlyBin(edges=edges1, quantity='latitude')
hist2 = hg.IrregularlyBin(edges=edges2, quantity='longitude', value=hist1)
# for 3 dimensions or higher simply add the 2-dim histogram to the value argument
hist3 = hg.SparselyBin(binWidth=10, quantity='age', value=hist2)
# -
hist2.fill.numpy(df)
hist2.plot.matplotlib()
# number of dimensions per histogram
print (hist1.n_dim, hist2.n_dim, hist3.n_dim)
# ## Histogram types
#
# So far we have covered the histogram types:
# - Bin histograms: with a fixed range and even-sized bins,
# - SparselyBin histograms: open-ended and with a fixed bin-width,
# - IrregularlyBin histograms: using irregular bin edges,
# - CentrallyBin histograms: open-ended and using bin centers.
#
# All of these process numeric variables only.
# ### Categorical variables
#
# For categorical variables use the Categorize histogram
# - Categorize histograms: accepting categorical variables such as strings and booleans.
#
#
histy = hg.Categorize('eyeColor')
histx = hg.Categorize('favoriteFruit', value=histy)
histx.fill.numpy(df)
histx.plot.matplotlib()
# show the datatypy(s) of the histogram
print (histx.datatype)
# Categorize histograms also accept booleans:
histy = df.hg_Categorize('isActive')
histy.plot.matplotlib()
print (histy.bin_entries())
print (histy.bin_labels())
# histy.bin_centers() will work as well for Categorize histograms
# ### Other histogram types
# There are several more histogram types:
# - Minimize, Maximize: keep track of the min or max value of a numeric distribution,
# - Average, Deviate: keep track of the mean or mean and standard deviation of a numeric distribution,
# - Sum: keep track of the sum of a numeric distribution,
# - Stack: keep track how many data points pass certain thresholds.
# - Bag: works like a dict, it keeps tracks of all unique values encounterd in a column, and can also do this for vector s of numbers. For strings, Bag works just like the Categorize histogram.
hmin = df.hg_Minimize('latitude')
hmax = df.hg_Maximize('longitude')
print (hmin.min, hmax.max)
havg = df.hg_Average('latitude')
hdev = df.hg_Deviate('longitude')
print (havg.mean, hdev.mean, hdev.variance)
hsum = df.hg_Sum('age')
print (hsum.sum)
# let's illustrate the Stack histogram with longitude distribution
# first we plot the regular distribution
hl = df.hg_SparselyBin(25, 'longitude')
hl.plot.matplotlib()
# Stack counts how often data points are greater or equal to the provided thresholds
thresholds = [-200, -150, -100, -50, 0, 50, 100, 150, 200]
hs = df.hg_Stack(thresholds=thresholds, quantity='longitude')
print (hs.thresholds)
print (hs.bin_entries())
# Stack histograms are useful to make efficiency curves.
#
# With all these histograms you can make multi-dimensional histograms. For example, you can evaluate the mean and standard deviation of one feature as a function of bins of another feature. (A "profile" plot, similar to a box plot.)
hav = hg.Deviate('age')
hlo = hg.SparselyBin(25, 'longitude', value=hav)
hlo.fill.numpy(df)
hlo.bins
hlo.plot.matplotlib()
# ### Convenience functions
#
# There are several convenience functions to make such composed histograms. These are:
# - Profile: Convenience function for creating binwise averages.
# - SparselyProfile: Convenience function for creating sparsely binned binwise averages.
# - ProfileErr: Convenience function for creating binwise averages and variances.
# - SparselyProfile: Convenience function for creating sparsely binned binwise averages and variances.
# - TwoDimensionallyHistogram: Convenience function for creating a conventional, two-dimensional histogram.
# - TwoDimensionallySparselyHistogram: Convenience function for creating a sparsely binned, two-dimensional histogram.
# For example, call this convience function to make the same histogram as above:
hlo = df.hg_SparselyProfileErr(25, 'longitude', 'age')
hlo.plot.matplotlib()
# ### Summary of histograms
#
# Here you can find the list of all available histograms and aggregators and how to use each one:
#
# https://histogrammar.github.io/histogrammar-docs/specification/1.0/
#
# The most useful aggregators are the following. Tinker with them to get familiar; building up an analysis is easier when you know "there's an app for that."
#
# **Simple counters:**
#
# * [`Count`](../../specification/#count-sum-of-weights): just counts. Every aggregator has an `entries` field, but `Count` _only_ has this field.
# * [`Average`](../../specification/#average-mean-of-a-quantity) and [`Deviate`](../../specification/#deviate-mean-and-variance): add mean and variance, cumulatively.
# * [`Minimize`](../../specification/#minimize-minimum-value) and [`Maximize`](../../specification/#maximize-maximum-value): lowest and highest value seen.
#
# **Histogram-like objects:**
#
# * [`Bin`](../../specification/#bin-regular-binning-for-histograms) and [`SparselyBin`](../../specification/#sparselybin-ignore-zeros): split a numerical domain into uniform bins and redirect aggregation into those bins.
# * [`Categorize`](../../specification/#categorize-string-valued-bins-bar-charts): split a string-valued domain by unique values; good for making bar charts (which are histograms with a string-valued axis).
# * [`CentrallyBin`](#centrallybin-fully-partitioning-with-centers) and [`IrregularlyBin`](../../specification/#irregularlybin-fully-partitioning-with-edges): split a numerical domain into arbitrary subintervals, usually for separate plots like particle pseudorapidity or collision centrality.
#
# **Collections:**
#
# * [`Label`](../../specification/#label-directory-with-string-based-keys), [`UntypedLabel`](../../specification/#untypedlabel-directory-of-different-types), and [`Index`](../../specification/#index-list-with-integer-keys): bundle objects with string-based keys (`Label` and `UntypedLabel`) or simply an ordered array (effectively, integer-based keys) consisting of a single type (`Label` and `Index`) or any types (`UntypedLabel`).
# * [`Branch`](../../specification/#branch-tuple-of-different-types): for the fourth case, an ordered array of any types. A `Branch` is useful as a "cable splitter". For instance, to make a histogram that tracks minimum and maximum value, do this:
#
#
#
#
# ## Making many histograms at once
#
# There a nice method to make many histograms in one go. See here.
#
# By default automagical binning is applied to make the histograms.
#
# More details one how to use this function are found in in the advanced tutorial.
hists = df.hg_make_histograms()
print (hists.keys())
h = hists['transaction']
h.plot.matplotlib()
h = hists['date']
h.plot.matplotlib()
# you can also select which and make multi-dimensional histograms
hists = df.hg_make_histograms(features = ['longitude:age'])
hist = hists['longitude:age']
hist.plot.matplotlib()
# ## Storage
#
# Histograms can be easily stored and retrieved in/from the json format.
# storage
hist.toJsonFile('long_age.json')
# retrieval
factory = hg.Factory()
hist2 = factory.fromJsonFile('long_age.json')
hist2.plot.matplotlib()
# +
# to store many histograms at once:
# + magic_args="false --no-raise-error" language="script"
#
# # we can store the histograms if we want to
# import json
# from histogrammar.util import dumper
#
# # store
# with open('histograms.json', 'w') as outfile:
# json.dump(hists, outfile, default=dumper)
#
# # and load again
# with open('histograms.json') as handle:
# hists2 = json.load(handle)
# -
print(hists.keys())
# ## Advanced tutorial
#
# The advanced tutotial shows:
# - How to work with spark dataframes.
# - More details on this nice method to make many histograms in one go. For example how to set bin specifications.
#
| histogrammar/notebooks/histogrammar_tutorial_basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python ds
# language: python
# name: myenv
# ---
# # Homework: Basic Artificial Neural Networks
# The goal of this homework is simple, yet an actual implementation may take some time :). We are going to write an Artificial Neural Network (almost) from scratch. The software design was heavily inspired by [PyTorch](http://pytorch.org) which is the main framework of our course
# This homework requires sending **multiple** files, please do not forget to include all the files when sending to TA. The list of files:
# - This notebook
# - homework_modules.ipynb with all blocks implemented (except maybe `Conv2d` and `MaxPool2d` layers implementation which are part of 'advanced' version of this homework)
# - homework_differentiation.ipynb
# %matplotlib inline
from time import time, sleep
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
# # Framework
# Implement everything in `Modules.ipynb`. Read all the comments thoughtfully to ease the pain. Please try not to change the prototypes.
#
# Do not forget, that each module should return **AND** store `output` and `gradInput`.
#
# The typical assumption is that `module.backward` is always executed after `module.forward`,
# so `output` is stored, this would be useful for `SoftMax`.
# ### Tech note
# Prefer using `np.multiply`, `np.add`, `np.divide`, `np.subtract` instead of `*`,`+`,`/`,`-` for better memory handling.
#
# Example: suppose you allocated a variable
#
# ```
# a = np.zeros(...)
# ```
# So, instead of
# ```
# a = b + c # will be reallocated, GC needed to free
# ```
# You can use:
# ```
# np.add(b,c,out = a) # puts result in `a`
# ```
# (re-)load layers
# %run homework_modules.ipynb
# # Toy example
# Use this example to debug your code, start with logistic regression and then test other layers. You do not need to change anything here. This code is provided for you to test the layers. Also it is easy to use this code in MNIST task.
# +
# Generate some data
N = 500
X1 = np.random.randn(N,2) + np.array([2,2])
X2 = np.random.randn(N,2) + np.array([-2,-2])
Y = np.concatenate([np.ones(N),np.zeros(N)])[:,None]
Y = np.hstack([Y, 1-Y])
X = np.vstack([X1,X2])
plt.scatter(X[:,0],X[:,1], c = Y[:,0], edgecolors= 'none')
# -
# Define a **logistic regression** for debugging.
# +
# net = Sequential()
# net.add(Linear(2, 2))
# net.add(LogSoftMax())
criterion = ClassNLLCriterion()
# Test something like that then
net = Sequential()
net.add(Linear(2, 4))
net.add(ReLU())
net.add(Linear(4, 2))
net.add(LogSoftMax())
print(net)
# -
# Start with batch_size = 1000 to make sure every step lowers the loss, then try stochastic version.
# +
# Iptimizer params
optimizer_config = {'learning_rate' : 1e-2, 'momentum': 0.9}
optimizer_state = {}
# Looping params
n_epoch = 20
batch_size = 128
# -
# batch generator
def get_batches(dataset, batch_size):
X, Y = dataset
n_samples = X.shape[0]
# Shuffle at the start of epoch
indices = np.arange(n_samples)
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min(start + batch_size, n_samples)
batch_idx = indices[start:end]
yield X[batch_idx], Y[batch_idx]
# ### Train
# Basic training loop. Examine it.
# +
loss_history = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((X, Y), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
loss_history.append(loss)
# Visualize
display.clear_output(wait=True)
plt.figure(figsize=(10, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(loss_history, 'k')
plt.show()
print('Current loss: %f' % loss)
# -
# # Digit classification
# We are using old good [MNIST](http://yann.lecun.com/exdb/mnist/) as our dataset.
import mnist
X_train, y_train, X_val, y_val, X_test, y_test = mnist.load_dataset()
# One-hot encode the labels first.
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(sparse=False)
y_train_enc = ohe.fit_transform(y_train.reshape(-1, 1))
y_test_enc = ohe.transform(y_test.reshape(-1, 1))
X_train = X_train.reshape(-1, 28*28,)
X_test = X_test.reshape(-1, 28*28,)
X_train.shape, y_train_enc.shape, X_test.shape, y_test_enc.shape
# - **Compare** `ReLU`, `ELU`, `LeakyReLU`, `SoftPlus` activation functions.
# You would better pick the best optimizer params for each of them, but it is overkill for now. Use an architecture of your choice for the comparison.
# - **Try** inserting `BatchNormalization` (folowed by `ChannelwiseScaling`) between `Linear` module and activation functions.
# - Plot the losses both from activation functions comparison and `BatchNormalization` comparison on one plot. Please find a scale (log?) when the lines are distinguishable, do not forget about naming the axes, the plot should be goodlooking.
# - Plot the losses for two networks: one trained by momentum_sgd, another one trained by Adam. Which one performs better?
# - Hint: good logloss for MNIST should be around 0.5.
# Your code goes here. ################################################
def get_2l_zoo(with_bn=False, activations=None):
net_zoo = {}
activations = [ReLU, ELU, LeakyReLU, SoftPlus] if activations is None else activations
for activ in activations:
net = Sequential()
net.add(Linear(784, 200))
if with_bn:
net.add(BatchNormalization(alpha=0.9))
net.add(ChannelwiseScaling(200))
net.add(activ())
net.add(Linear(200, 10))
net.add(LogSoftMax())
net.train()
name = f'{repr(activ())}{"_bn" if with_bn else ""}'
net_zoo[name] = net
return net_zoo
def train_zoo(model_zoo, X_train, y_train, n_epoch=3, batch_size=200, opt='sgd_momentum', config=None,
lr_decay = 1):
if opt == 'sgd_momentum':
optimizer_config = {'learning_rate' : 1e-3, 'momentum': 0.9} \
if config is None else config
optimizer = sgd_momentum
elif opt == 'adam':
optimizer_config = {'learning_rate': 1e-3, 'beta1': 0.9, 'beta2':0.999, 'epsilon':1e-8} \
if config is None else config
optimizer = adam_optimizer
else:
assert False, f'Optimizer {opt} unrecognized.'
criterion = ClassNLLCriterion()
# Training all the networks simultaneously
losses = {kind: [] for kind in model_zoo}
optimizer_sgd_config = {'learning_rate' : 1e-3, 'momentum': 0.9}
optimizer_states = {kind: {} for kind in model_zoo}
for i in range(n_epoch):
print(f'___Epoch {i+1}___')
for x_batch, y_batch in get_batches((X_train, y_train_enc), batch_size):
for kind, net in model_zoo.items():
net.zeroGradParameters()
for kind, net in model_zoo.items():
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Gradient step
optimizer(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_states[kind])
losses[kind].append(loss)
# Learning rate decay
if (i+1)%10 == 0:
optimizer_config['learning_rate'] *= lr_decay
actual_losses = '\n'.join(f'{k}: {np.min(v)}' for k, v in losses.items())
print(f'losses: \n{actual_losses}')
return losses
net_zoo = get_2l_zoo()
batched = get_2l_zoo(True, [ReLU])
net_zoo.update(batched)
losses = train_zoo(net_zoo, X_train, y_test_enc, opt='sgd_momentum')
# +
# Plotting
plt.figure(figsize=(15, 10))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(100*np.log(losses['ReLU']), 'r', linewidth=1, label='ReLU')
plt.plot(100*np.log(losses['ELU']), 'b', linewidth=1, label='ELU')
plt.plot(100*np.log(losses['LeakyReLU']), 'g', linewidth=1, label='LeakyReLU')
plt.plot(100*np.log(losses['SoftPlus']), 'k', linewidth=1 ,label='SoftPlus')
plt.plot(100*np.log(losses['ReLU_bn']), 'm', linewidth=1, label='ReLU_bn')
plt.legend()
plt.show()
actual_losses = '\n'.join(f'{k}: {np.min(v)}' for k, v in losses.items())
print(f'Actual losses: \n{actual_losses}')
# -
net = get_2l_zoo(False, [ReLU])
loss_sgd = train_zoo(net, X_train, y_test_enc, n_epoch=10, opt='sgd_momentum')
net = get_2l_zoo(False, [ReLU])
loss_adam = train_zoo(net, X_train, y_test_enc, n_epoch=10, opt='adam')
losses_opt = {**loss_sgd, **loss_adam}
plt.figure(figsize=(10, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(100*np.log(loss_sgd['ReLU']), 'r', linewidth=1, label='SGD_ReLU')
plt.plot(100*np.log(loss_adam['ReLU']), 'b', linewidth=1, label='ADAM_ReLU')
plt.legend()
plt.show()
print(f'Actual losses:')
print(f'SGD momentum:{np.min(loss_sgd["ReLU"])}')
print(f'Adam:{np.min(loss_adam["ReLU"])}')
# Write your personal opinion on the activation functions, think about computation times too. Does `BatchNormalization` help?
# + active=""
# # Your answer goes here. ################################################
# So I played with learning rates, batch sizes and epochs and usually ELU performs better, but it's computations requires exp that is a bit expensive. ReLU performs almost the same, moreover it is lightweight in computational terms, so like it ;)
# Maybe such a simple architecture is not the best choice to compare these activations, though. What concerns BatchNorm, it actually shows good performance comparable to the architectures without it if given more epochs and with more time tends to beat them, but again, I think architecture is to simple to discover all it's potential. More epochs are not shown on the plot because the lines start to be undistinguishable.
# -
# **Finally**, use all your knowledge to build a super cool model on this dataset. Use **dropout** to prevent overfitting, play with **learning rate decay**. You can use **data augmentation** such as rotations, translations to boost your score. Use your knowledge and imagination to train a model. Don't forget to call `training()` and `evaluate()` methods to set desired behaviour of `BatchNormalization` and `Dropout` layers.
# +
# Your code goes here. ################################################
super_net = Sequential()
super_net.add(Linear(784, 392))
super_net.add(BatchNormalization(alpha=0.9))
super_net.add(ChannelwiseScaling(392))
super_net.add(ReLU())
super_net.add(Dropout(p=0.6))
super_net.add(Linear(392, 196))
super_net.add(BatchNormalization(alpha=0.9))
super_net.add(ChannelwiseScaling(196))
super_net.add(ReLU())
super_net.add(Dropout(p=0.6))
super_net.add(Linear(196, 10))
super_net.add(LogSoftMax())
# -
super_net.train()
loss = train_zoo({'net': super_net}, X_train, y_train_enc, n_epoch=30, opt='adam', lr_decay=0.3)
# Print here your accuracy on test set. It should be around 90%.
super_net.evaluate()
y_pred = super_net.forward(X_test)
y_pred_labels = np.argmax(y_pred, axis=1)
acc = np.sum(y_pred_labels == y_test)/len(y_pred_labels)
acc
| homework01/homework_main-basic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Conclusion
#
# I hope you enjoyed the lecture and could do crazy bayesian stuff in your next job as data scientist !
# ### Check list
#
# - Understand what machine learning is in terms of probabilites
# - Can get started in constructing probabilistic graphical models using pgmpy
# - Can get started in building bayesian models in pymc
# - Understand the difference between frequentist and bayesian machine learning
# - Understand regression in bayesian settings
# - Understand when to prefere bayesian machine learning over other approaches
# ### Homework 1: Monti hal problem
#
# Suppose you're on a game show, and you're given the choice of three doors: Behind one door is a car; behind the others, goats. You pick a door, say No. 1, and the host, who knows what's behind the doors, opens another door, say No. 3, which has a goat. He then says to you, "Do you want to pick door No. 2?" Is it to your advantage to switch your choice?
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/Monty_open_door.svg/220px-Monty_open_door.svg.png"/>
#
# #### write a code to help you pass the game show ?
# +
#your code here
# -
# ### Homework 2: Localization
#
# Suppose you are in a mobile car, and you can sense the noisy distnace between you and fixed communication towers (using signal stright).
#
# **Can you estimate your true location ?**
# +
# Your true distance
import numpy as np
import pymc as pm
import math
import matplotlib.pyplot as plt
import random
import scipy as sci
# %matplotlib inline
def noisyDistance(x, y, noiseSigma):
return np.sqrt(((x - y) ** 2).sum()) + sci.stats.norm.rvs(0, noiseSigma)
def generateData(landscapeSize, numCommTowers, noiseSigma):
towersTrueLocation = [np.array([random.random() * landscapeSize, random.random()* landscapeSize]) for i in range(0, numCommTowers)]
yourTrueLocation = np.array([random.random() * landscapeSize, random.random() * landscapeSize ])
noisyDistances = [noisyDistance(i, yourTrueLocation, noiseSigma) for i in towersTrueLocation]
return (towersTrueLocation, yourTrueLocation, noisyDistances)
def drawLandscape(landscapeSize, towersTrueLocation, yourTrueLocation, estimatedLocations = None):
plt.xlim(0, landscapeSize)
plt.ylim(0, landscapeSize)
for i in towersTrueLocation:
plt.scatter(i[0], i[1], marker='+')
plt.scatter(yourTrueLocation[0], yourTrueLocation[1], marker='*', color = 'red')
if estimatedLocations is not None:
for i in estimatedLocations:
plt.scatter(i[0], i[1], marker='o')
# +
(towersTrueLocation, yourTrueLocation, noisyDistances) = generateData(10000, 25, 50)
drawLandscape(10000, towersTrueLocation, yourTrueLocation)
print ("True towers locations: ", towersTrueLocation, "\n")
print ("Your true location: ", yourTrueLocation, "\n")
print ("Noisy distances: ", noisyDistances)
# +
# Your code here
# -
# ### Homework 3: Mixture of Gaussians
# On statistics example, the prof found that there are two peaks in the curve of students scores, he speculate the reason is due to variation in the university students acceptance standards (Elmoazi !).
# **Could help him to rediscover the students two groups.**
# +
from matplotlib.pyplot import hist
from numpy.random import normal
import random
data = [normal(55, 5) for i in xrange(100)]
data += [normal(85, 5) for i in xrange(100)]
random.shuffle(data)
hist(data, 20)
# +
#your code here
| session4/L3_Conclusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
a = b = np.arange(3)
print(a)
print(a.shape)
print(a * b)
print(np.multiply(a, b))
a_1_3 = a.reshape(1, 3)
print(a_1_3)
print(a_1_3.shape)
b_3_1 = b.reshape(3, 1)
print(b_3_1)
print(b_3_1.shape)
print(a_1_3 * b_3_1)
print(np.multiply(a_1_3, b_3_1))
print(a * b_3_1)
print(np.multiply(a, b_3_1))
print(a_1_3 @ b_3_1)
print(np.matmul(a_1_3, b_3_1))
print(np.dot(a_1_3, b_3_1))
print(type(a_1_3 @ b_3_1))
print((a_1_3 @ b_3_1).shape)
print(a_1_3 @ b)
print(np.matmul(a_1_3, b))
print(np.dot(a_1_3, b))
print(type(a_1_3 @ b))
print((a_1_3 @ b).shape)
print(a @ b)
print(np.matmul(a, b))
print(np.dot(a, b))
print(type(a @ b))
a = np.arange(6).reshape(2, 3)
print(a)
b = np.arange(2).reshape(1, 2)
print(b)
# +
# print(a @ b)
# ValueError: shapes (2,3) and (1,2) not aligned: 3 (dim 1) != 1 (dim 0)
# -
print(np.tile(b, (3, 1)))
print(a @ np.tile(b, (3, 1)))
| notebook/numpy_broadcasting_multiply.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import numpy as np
import tensorflow as tf
import json
with open('dataset-bpe.json') as fopen:
data = json.load(fopen)
train_X = data['train_X']
train_Y = data['train_Y']
test_X = data['test_X']
test_Y = data['test_Y']
EOS = 2
GO = 1
vocab_size = 32000
train_Y = [i + [2] for i in train_Y]
test_Y = [i + [2] for i in test_Y]
# +
from tensor2tensor.utils import beam_search
def pad_second_dim(x, desired_size):
padding = tf.tile([[[0.0]]], tf.stack([tf.shape(x)[0], desired_size - tf.shape(x)[1], tf.shape(x)[2]], 0))
return tf.concat([x, padding], 1)
class Translator:
def __init__(self, size_layer, num_layers, embedded_size, learning_rate):
def cells(reuse=False):
return tf.nn.rnn_cell.LSTMCell(size_layer, initializer=tf.orthogonal_initializer(),reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.count_nonzero(self.X, 1, dtype = tf.int32)
self.Y_seq_len = tf.count_nonzero(self.Y, 1, dtype = tf.int32)
batch_size = tf.shape(self.X)[0]
embeddings = tf.Variable(tf.random_uniform([vocab_size, embedded_size], -1, 1))
def forward(x, y, reuse = False):
batch_size = tf.shape(x)[0]
X_seq_len = tf.count_nonzero(x, 1, dtype = tf.int32)
Y_seq_len = tf.count_nonzero(y, 1, dtype = tf.int32)
with tf.variable_scope('model',reuse=reuse):
encoder_embedded = tf.nn.embedding_lookup(embeddings, x)
decoder_embedded = tf.nn.embedding_lookup(embeddings, y)
rnn_cells = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
last_output, last_state = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded,
sequence_length=X_seq_len,
dtype = tf.float32)
with tf.variable_scope("decoder",reuse=reuse):
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(num_units = size_layer,
memory = last_output)
rnn_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer)
initial_state = rnn_cells.zero_state(batch_size, tf.float32).clone(cell_state=last_state)
outputs, _ = tf.nn.dynamic_rnn(rnn_cells, decoder_embedded,
sequence_length=Y_seq_len,
initial_state = initial_state,
dtype = tf.float32)
return tf.layers.dense(outputs,vocab_size)
main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
self.training_logits = forward(self.X, decoder_input, reuse = False)
self.training_logits = self.training_logits[:, :tf.reduce_max(self.Y_seq_len)]
self.training_logits = pad_second_dim(self.training_logits, tf.reduce_max(self.Y_seq_len))
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(logits = self.training_logits,
targets = self.Y,
weights = masks)
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
y_t = tf.argmax(self.training_logits,axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
initial_ids = tf.fill([batch_size], GO)
def symbols_to_logits(ids):
x = tf.contrib.seq2seq.tile_batch(self.X, 1)
logits = forward(x, ids, reuse = True)
return logits[:, tf.shape(ids)[1]-1, :]
final_ids, final_probs, _ = beam_search.beam_search(
symbols_to_logits,
initial_ids,
1,
tf.reduce_max(self.X_seq_len),
vocab_size,
0.0,
eos_id = EOS)
self.fast_result = final_ids
# -
size_layer = 512
num_layers = 2
embedded_size = 256
learning_rate = 1e-3
batch_size = 128
epoch = 20
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Translator(size_layer, num_layers, embedded_size, learning_rate)
sess.run(tf.global_variables_initializer())
pad_sequences = tf.keras.preprocessing.sequence.pad_sequences
# +
batch_x = pad_sequences(train_X[:10], padding='post')
batch_y = pad_sequences(train_Y[:10], padding='post')
sess.run([model.fast_result, model.cost, model.accuracy],
feed_dict = {model.X: batch_x, model.Y: batch_y})
# +
import tqdm
for e in range(epoch):
pbar = tqdm.tqdm(
range(0, len(train_X), batch_size), desc = 'minibatch loop')
train_loss, train_acc, test_loss, test_acc = [], [], [], []
for i in pbar:
index = min(i + batch_size, len(train_X))
batch_x = pad_sequences(train_X[i : index], padding='post')
batch_y = pad_sequences(train_Y[i : index], padding='post')
feed = {model.X: batch_x,
model.Y: batch_y}
accuracy, loss, _ = sess.run([model.accuracy,model.cost,model.optimizer],
feed_dict = feed)
train_loss.append(loss)
train_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
pbar = tqdm.tqdm(
range(0, len(test_X), batch_size), desc = 'minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_X))
batch_x = pad_sequences(test_X[i : index], padding='post')
batch_y = pad_sequences(test_Y[i : index], padding='post')
feed = {model.X: batch_x,
model.Y: batch_y,}
accuracy, loss = sess.run([model.accuracy,model.cost],
feed_dict = feed)
test_loss.append(loss)
test_acc.append(accuracy)
pbar.set_postfix(cost = loss, accuracy = accuracy)
print('epoch %d, training avg loss %f, training avg acc %f'%(e+1,
np.mean(train_loss),np.mean(train_acc)))
print('epoch %d, testing avg loss %f, testing avg acc %f'%(e+1,
np.mean(test_loss),np.mean(test_acc)))
# -
from tensor2tensor.utils import bleu_hook
results = []
for i in tqdm.tqdm(range(0, len(test_X), batch_size)):
index = min(i + batch_size, len(test_X))
batch_x = pad_sequences(test_X[i : index], padding='post')
feed = {model.X: batch_x}
p = sess.run(model.fast_result,feed_dict = feed)[:,0,:]
result = []
for row in p:
result.append([i for i in row if i > 3])
results.extend(result)
rights = []
for r in test_Y:
rights.append([i for i in r if i > 3])
bleu_hook.compute_bleu(reference_corpus = rights,
translation_corpus = results)
| neural-machine-translation/17.lstm-seq2seq-bahdanau.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Step-by-step NMO correction
# Devito is equally useful as a framework for other stencil computations in general; for example, computations where all array indices are affine functions of loop variables. The Devito compiler is also capable of generating
# arbitrarily nested, possibly irregular, loops. This key feature is needed to support many complex algorithms that are used in engineering and scientific practice, including applications from image processing, cellular automata, and machine-learning. This tutorial, a step-by-step NMO correction, is an example of it.
#
# In reflection seismology, normal moveout (NMO) describes the effect that the distance between a seismic source and a receiver (the offset) has on the arrival time of a reflection in the form of an increase of time with offset. The relationship between arrival time and offset is hyperbolic.
#
# Based on the field geometry information, each individual trace is assigned to the midpoint between the shot and receiver locations associated with that trace. Those traces with the same midpoint location are grouped together, making up a common midpoint gather (CMP).
#
# Consider a reflection event on a CMP gather. The difference between the two-way time at a given offset and the two-way zero-offset time is called normal moveout (NMO). Reflection traveltimes must be corrected for NMO prior to summing the traces in the CMP gather along the offset axis. The normal moveout depends on velocity above the reflector, offset, two-way zero-offset time associated with the reflection event, dip of the reflector, the source-receiver azimuth with respect to the true-dip direction, and the degree of complexity of the near-surface and the medium above the reflector.
#
# <img src='./nmo-diagram.png' width=1000>
#
# # Seismic modelling with devito
# Before the NMO corretion we will describe a setup of seismic modelling with Devito in a simple 2D case. We will create a physical model of our domain and define a multiple source and an according set of receivers to model for the forward model. But first, we initialize some basic utilities.
# +
import numpy as np
import sympy as sp
from devito import *
# -
# We will create a simple velocity model here by hand for demonstration purposes. This model essentially consists of three layers, each with a different velocity: 1.5km/s in the top layer, 2.5km/s in the middle layer and 4.5 km/s in the bottom layer.
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Model, plot_velocity
shape = (301, 501) # Number of grid point (nx, ny, nz)
spacing = (10., 10) # Grid spacing in m. The domain size is now 3km by 5km
origin = (0., 0) # What is the location of the top left corner.
# Define a velocity profile. The velocity is in km/s
v = np.empty(shape, dtype=np.float32)
v[:,:100] = 1.5
v[:,100:350] = 2.5
v[:,350:] = 4.5
# With the velocity and model size defined, we can create the seismic model that
# encapsulates these properties. We also define the size of the absorbing layer as 10 grid points
model = Model(vp=v, origin=origin, shape=shape, spacing=spacing, space_order=4, nbl=40)
plot_velocity(model)
# -
# Next we define the positioning and the wave signal of our source, as well as the location of our receivers. To generate the wavelet for our sources we require the discretized values of time that we are going to use to model a multiple "shot", which depends on the grid spacing used in our model. We will use one source and eleven receivers. The source is located in the position (550, 20). The receivers start at (550, 20) with an even horizontal spacing of 100m at consistent depth.
# +
from examples.seismic import TimeAxis
t0 = 0. # Simulation starts a t=0
tn = 2400. # Simulation last 2.4 second (2400 ms)
dt = model.critical_dt # Time step from model grid spacing
time_range = TimeAxis(start=t0, stop=tn, step=dt)
nrcv = 250 # Number of Receivers
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import RickerSource
f0 = 0.010 # Source peak frequency is 10Hz (0.010 kHz)
src = RickerSource(name='src', grid=model.grid, f0=f0,
npoint=1, time_range=time_range)
# Define the wavefield with the size of the model and the time dimension
u = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=4)
# We can now write the PDE
pde = model.m * u.dt2 - u.laplace + model.damp * u.dt
stencil = Eq(u.forward, solve(pde, u.forward))
src.coordinates.data[:, 0] = 400 # Source coordinates
src.coordinates.data[:, -1] = 20. # Depth is 20m
# +
#NBVAL_IGNORE_OUTPUT
from examples.seismic import Receiver
rec = Receiver(name='rec', grid=model.grid, npoint=nrcv, time_range=time_range)
rec.coordinates.data[:,0] = np.linspace(src.coordinates.data[0, 0], model.domain_size[0], num=nrcv)
rec.coordinates.data[:,-1] = 20. # Depth is 20m
# Finally we define the source injection and receiver read function to generate the corresponding code
src_term = src.inject(field=u.forward, expr=src * dt**2 / model.m)
# Create interpolation expression for receivers
rec_term = rec.interpolate(expr=u.forward)
op = Operator([stencil] + src_term + rec_term, subs=model.spacing_map)
op(time=time_range.num-1, dt=model.critical_dt)
# -
# How we are modelling a horizontal layers, we will group this traces and made a NMO correction using this set traces.
offset = []
data = []
for i, coord in enumerate(rec.coordinates.data):
off = (src.coordinates.data[0, 0] - coord[0])
offset.append(off)
data.append(rec.data[:,i])
# Auxiliary function for plotting traces:
# +
#NBVAL_IGNORE_OUTPUT
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
mpl.rc('font', size=16)
mpl.rc('figure', figsize=(8, 6))
def plot_traces(rec, xb, xe, t0, tn, colorbar=True):
scale = np.max(rec)/100
extent = [xb, xe, 1e-3*tn, t0]
plot = plt.imshow(rec, cmap=cm.gray, vmin=-scale, vmax=scale, extent=extent)
plt.xlabel('X position (km)')
plt.ylabel('Time (s)')
# Create aligned colorbar on the right
if colorbar:
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(plot, cax=cax)
plt.show()
# -
# # Common Midpoint Gather
# At this point, we have a dataset composed of the receivers. "If our model wasn't purely horizontal, we would have to sort these traces by common midpoints prior to NMO correction."
plot_traces(np.transpose(data), rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)
#
# # NMO Correction
# We can correct the measured traveltime of a reflected wave $t$ at a given offset $x$ to obtain the traveltime at normal incidence $t_0$ by applying the following equation:
#
# \begin{equation*}
# t = \sqrt{t_0^2 + \frac{x^2}{V_{nmo}^2}}
# \end{equation*}
#
# in which $V_{nmo}$ is the NMO velocity. This equation results from the Pythagorean theorem, and is only valid for horizontal reflectors. There are variants of this equation with different degrees of accuracy, but we'll use this one for simplicity.
# For the NMO Correction we use a grid of size samples x traces.
ns = time_range.num # Number of samples in each trace
grid = Grid(shape=(ns, nrcv)) # Construction of grid with samples X traces dimension
# In this example we will use a constant velocity guide. The guide will be arranged in a SparseFunction with the number of points equal to number of samples in the traces.
vnmo = 1500
vguide = SparseFunction(name='v', grid=grid, npoint=ns)
vguide.data[:] = vnmo
# The computed offset for each trace will be arraged in another SparseFunction with number of points equal to number of traces.
off = SparseFunction(name='off', grid=grid, npoint=nrcv)
off.data[:] = offset
# The previous modelled traces will be arranged in a SparseFunction with the same dimensions as the grid.
amps = SparseFunction(name='amps', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
amps.data[:] = np.transpose(data)
# Now, we define SparseFunctions with the same dimensions as the grid, describing the NMO traveltime equation. The $t_0$ SparseFunction isn't offset dependent, so the number of points is equal to the number of samples.
# +
sample, trace = grid.dimensions
t_0 = SparseFunction(name='t0', grid=grid, npoint=ns, dimensions=[sample], shape=[grid.shape[0]])
tt = SparseFunction(name='tt', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
snmo = SparseFunction(name='snmo', grid=grid, npoint=ns*nrcv, dimensions=grid.dimensions, shape=grid.shape)
s = SparseFunction(name='s', grid=grid, dtype=np.intc, npoint=ns*nrcv, dimensions=grid.dimensions,
shape=grid.shape)
# -
# The Equation relates traveltimes: the one we can measure ($t_0$) and the one we want to know (t). But the data in our CMP gather are actually a matrix of amplitudes measured as a function of time ($t_0$) and offset. Our NMO-corrected gather will also be a matrix of amplitudes as a function of time (t) and offset. So what we really have to do is transform one matrix of amplitudes into the other.
#
# With Equations we describe the NMO traveltime equation, and use the Operator to compute the traveltime and the samples for each trace.
# +
#NBVAL_IGNORE_OUTPUT
dtms = model.critical_dt/1000 # Time discretization in ms
E1 = Eq(t_0, sample*dtms)
E2 = Eq(tt, sp.sqrt(t_0**2 + (off[trace]**2)/(vguide[sample]**2) ))
E3 = Eq(s, sp.floor(tt/dtms))
op1 = Operator([E1, E2, E3])
op1()
# -
# With the computed samples, we remove all that are out of the samples range, and shift the amplitude for the correct sample.
# +
#NBVAL_IGNORE_OUTPUT
s.data[s.data >= time_range.num] = 0
E4 = Eq(snmo, amps[s[sample, trace], trace])
op2 = Operator([E4])
op2()
stack = snmo.data.sum(axis=1) # We can stack traces and create a ZO section!!!
plot_traces(snmo.data, rec.coordinates.data[0][0]/1000, rec.coordinates.data[nrcv-1][0]/1000, t0, tn)
# -
# # References:
#
# https://library.seg.org/doi/full/10.1190/tle36020179.1
# https://wiki.seg.org/wiki/Normal_moveout
# https://en.wikipedia.org/wiki/Normal_moveout
| examples/seismic/tutorials/10_nmo_correction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Demo: MultiContainer with two sequential ShiftAmountActivities
# The basic steps to set up an OpenCLSim simulation are:
# * Import libraries
# * Initialise simpy environment
# * Define object classes
# * Create objects
# * Create sites
# * Create vessels
# * Create activities
# * Register processes and run simpy
#
# ----
#
# In certain scenarios it is important to distinguish which types of objects are available in a container. HasContainer does not provide this capability, while MultiContainer does.
#
# A MultiContainer represents a named set of containers. The number of allowed containers is limited by parameter **store_capacity**.
#
# For each container a name (**id_** property), a **capacity** and **level** is specified.
#
# A MultiContainer can be initialized with parameter **initials** as provided in the example for site and vessel configuration below.
#
# In this example two ShiftAmountActivities are packed in a SequentialActivity, loading one piece of 'Cargo type 1' and one pieve of 'Cargo type 2' form 'from_site' to 'vessel01'.
# #### 0. Import libraries
# +
import datetime, time
import simpy
import shapely.geometry
import pandas as pd
import openclsim.core as core
import openclsim.model as model
import openclsim.plot as plot
# -
# #### 1. Initialise simpy environment
# setup environment
simulation_start = 0
my_env = simpy.Environment(initial_time=simulation_start)
# #### 2. Define object classes
# +
# create a Site object based on desired mixin classes
Site = type(
"Site",
(
core.Identifiable,
core.Log,
core.Locatable,
core.HasMultiContainer,
core.HasResource,
),
{},
)
# create a TransportProcessingResource object based on desired mixin classes
TransportProcessingResource = type(
"TransportProcessingResource",
(
core.Identifiable,
core.Log,
core.MultiContainerDependentMovable,
core.Processor,
core.HasResource,
core.LoadingFunction,
core.UnloadingFunction,
),
{},
)
# -
# #### 3. Create objects
# ##### 3.1. Create site object(s)
# The clas HasContainer has now been replaced with HasMultiContainer. The **from_site** can contain four different container types, but right now contains only two: One called 'Cargo type 1' with a capacity of 10 and a level of 2 and one called 'Cargo type 2' with a capacity of 2 and a level of 0.
# prepare input data for from_site
location_from_site = shapely.geometry.Point(4.18055556, 52.18664444)
data_from_site = {"env": my_env,
"name": "from_site",
"geometry": location_from_site,
"store_capacity": 4,
"initials": [
{"id": "Cargo type 1", "level": 10, "capacity": 10},
{"id": "Cargo type 2", "level": 5, "capacity": 5},
],
}
# instantiate from_site
from_site = Site(**data_from_site)
# ##### 3.2. Create vessel object(s)
# The class ContainerDependentMovable has been replaced with MultiContainerDependentMovable. The vessel has two containers, one for 'Cargo type 1' and one for 'Cargo type 2', each with a capacity of two and a current level of zero.
# prepare input data for vessel_01
data_vessel01 = {"env": my_env,
"name": "vessel01",
"geometry": location_from_site,
"loading_rate": 1,
"unloading_rate": 1,
"store_capacity": 4,
"initials": [
{"id": "Cargo type 1", "level": 0, "capacity": 2},
{"id": "Cargo type 2", "level": 0, "capacity": 2},
],
"compute_v": lambda x: 10
}
# instantiate vessel_01
vessel01 = TransportProcessingResource(**data_vessel01)
# ## ShiftAmount Activity for MultiContainer
# The **amount** specifies the objects to be transfered and the **id_** parameter specifies to which container this activity relates.
# initialise registry
registry = {}
# +
# create a list of the sub processes and include reporting_activity
sub_processes = [
model.ShiftAmountActivity(
env=my_env,
name="Transfer cargo type 1",
registry=registry,
processor=vessel01,
origin=from_site,
destination=vessel01,
amount=1,
id_="Cargo type 1",
duration=20,
),
model.ShiftAmountActivity(
env=my_env,
name="Transfer cargo type 2",
registry=registry,
processor=vessel01,
origin=from_site,
destination=vessel01,
amount=1,
id_="Cargo type 2",
duration=40,
)
]
# create a 'sequential activity' that is made up of the 'sub_processes'
sequential_activity = model.SequentialActivity(
env=my_env,
name="Sequential activity of basic activities",
registry=registry,
sub_processes=sub_processes,
)
# -
# #### 4. Register processes and run simpy
# initate the simpy processes defined in the 'sub processes' and run simpy
model.register_processes(sequential_activity)
my_env.run()
# #### 5. Inspect results
# ##### 5.1 Inspect logs
display(plot.get_log_dataframe(vessel01, [*sub_processes]))
display(plot.get_log_dataframe(from_site, [*sub_processes]))
# ##### 5.2 Visualise gantt charts
plot.get_gantt_chart([sequential_activity, vessel01, *sub_processes])
# ##### 5.3 Visualise step charts
fig = plot.get_step_chart([vessel01])
fig = plot.get_step_chart([from_site])
# Observe that first 1 piece of 'Cargo type 1' is loaded from 'from_site' to 'vessel01'. This takes 20 time units. Next 1 piece of 'Cargo type 2' is loaded from 'from_site' to 'vessel01'. This takes 20 time units.
#
# The original stock of 'Cargo type 1' was 10, and it is reduced to 9 after 20 time units. The original stock of 'Cargo type 2' was 5, and it is reduced to 4 after 60 time units.
| notebooks/10_MultiContainer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regression
#
# Elements of Data Science
#
# by [<NAME>](https://allendowney.com)
#
# [MIT License](https://opensource.org/licenses/MIT)
#
# ### Goals
#
# In the previous notebook we used simple regression to quantify the relationship between two variables.
#
# In this notebook we'll get farther into regression, including multiple regression and one of my all-time favorite tools, logistic regression.
#
# These tools will allow us to explore relationships among sets of variables. As an example, we will use data from the GSS to explore the relationship between income, education, age, and sex.
#
# But first let's understand the limits of single regression.
# +
# If we're running on Colab, install empiricaldist
# https://pypi.org/project/empiricaldist/
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
# !pip install empiricaldist
# -
# ## Limits of single regression
#
# In a previous exercise, you made a scatter plot of vegetable consumption as a function of income, and plotted a line of best fit.
#
# Here's what it looks like:
#
# 
#
# The slope of the line is 0.07, which means that the difference between the lowest and highest income brackets is about 0.49 servings per day.
#
# So that's not a very big difference.
#
# But it was an arbitrary choice to plot vegetables as a function of income. We could have plotted it the other way around, like this.
#
# 
#
# The slope of this line is about 0.2, which means that the difference between 0 and 10 servings per day is about 2 income levels, roughly from level 5 to level 7.
#
# And the difference between income levels 5 and 7 is about $30,000 per year, which is substantial.
#
# So if we use vegetable consumption to predict income, we see a big difference. But when we used income to predict vegetable consumption, we saw a small difference.
#
# This example shows that regression is not symmetric; the regression of A onto B is not the same as the regression of B onto A.
#
# We can see that more clearly by putting the two figures side by side and plotting both regression lines on both figures.
#
# 
#
# They are different because they are based on different assumptions.
#
# * On the left, we treat income as a known quantity and vegetable consumption as random.
#
# * On the right, vegetable consumption is known and income is random.
#
# When you run a regression model, you make decisions about how to treat the data, and those decisions affect the results you get.
#
# This example is meant to demonstrate another point, which is that regression doesn't tell you much about causation.
#
# If you think people with lower income can't afford vegetables, you might look at the figure on the left and conclude that it doesn't make much difference.
#
# If you think better diet increases income, the figure on the right might make you think it does.
#
# But in general, regression can't tell you what causes what. If you see a relationship between any two variables, A and B, the reason for the relationship might be that A causes B, B causes A, or there might be other factors that cause both A and B.
#
# Regression alone can't tell you which way it goes.
#
# However, we have tools for teasing apart relationships among multiple variables; one of the most important is multiple regression.
#
# SciPy doesn't do multiple regression, so we'll to switch to a new library, StatsModels. Here's the import statement.
import statsmodels.formula.api as smf
# For the first example, we'll load data from the Behavioral Risk Factor Surveillance Survey (BRFSS), which we saw in the previous notebook.
# +
# Get the data file
import os
if not os.path.exists('brfss.hdf5'):
# !wget https://github.com/AllenDowney/ElementsOfDataScience/raw/master/brfss.hdf5
# +
import pandas as pd
brfss = pd.read_hdf('brfss.hdf5', 'brfss')
# -
# Now we can use StatsModels to fit a regression mode. The name of the function is `ols`, which stands for "ordinary least squares", another name for regression.
results = smf.ols('INCOME2 ~ _VEGESU1', data=brfss).fit()
# The first argument is a formula string that specifies that we want to regress income as a function of vegetable consumption.
#
# The second argument is the BRFSS DataFrame. The names in the formula correspond to columns in the DataFrame.
#
# The result from `ols()` represents the model; then we run `fit()` to get the results.
type(results)
# Results is a `RegressionResultsWrapper`, which contains a lot of information, but the first thing we'll look at is the attribute `params`, which contains the estimated intercept and the slope associated with `_VEGESU1`.
results.params
# And we get the same results we got from SciPy, so that's good!
#
# In the next section we'll move on to multiple regression. But first, some exercises.
# **Exercise:** In the BRFSS dataset, there is a strong relationship between vegetable consumption and income. The income of people who eat 8 servings of vegetables per day is double the income of people who eat none, on average.
#
# Which of the following conclusions can we draw from this data?
#
# A. Eating a good diet leads to better health and higher income.
#
# B. People with higher income can afford a better diet.
#
# C. People with high income are more likely to be vegetarians.
# +
# Solution goes here
# -
# **Exercise:** Let's run a regression using SciPy and StatsModels, and confirm we get the same results.
#
# - Compute the regression of `_VEGESU1` as a function of `INCOME2` using SciPy's `linregress()`.
#
# - Compute the regression of `_VEGESU1` as a function of `INCOME2` using StatsModels' `smf.ols()`.
# +
# Solution goes here
# +
# Solution goes here
# -
# ## Multiple regression
#
# Now that we have StatsModels, getting from single to multiple regression is easy.
#
# As an example, we'll use data from the General Social Survey, which we saw in Notebook 7, and we'll explore variables that are related to income.
#
# First, let's load the GSS data.
# +
# Get the data file
import os
if not os.path.exists('gss.hdf5'):
# !wget https://github.com/AllenDowney/ElementsOfDataScience/raw/master/gss.hdf5
# +
import pandas as pd
gss = pd.read_hdf('gss.hdf5', 'gss')
# -
# Then we run a regression of real income as a function of years of education. The first argument of `ols()` is a formula that specifies the variables in the regression:
results = smf.ols('realinc ~ educ', data=gss).fit()
results.params
# On the left, "real income" is the variable we are trying to predict; on the right, "education" is the variable we are using to inform the predictions.
#
# And here are the results. The estimated slope is almost 3600, which means that each additional year of education is associated with an additional $3600 of income.
#
# But income also depends on age, so it would be good to include that in the model, too.
#
# Here's how:
results = smf.ols('realinc ~ educ + age', data=gss).fit()
results.params
# On the right side of the formula, you can list as many variables as you like, in this case, education and age.
#
# The `plus` sign indicates that we expect the contributions of the two variables to be additive, which is a common assumption for models like this.
#
# The estimated slope for education is a little higher than what we saw before, about $3650 per year.
#
# The estimated slope for `age` is only about $80 per year, which is surprisingly small.
#
# To see what's going on, let's look more closely at the relationship between income and age.
# ## Groupby
#
# I'll use `groupby()`, which is a Pandas feature we have not seen before, to divide the DataFrame into age groups. The result is a `GroupBy` object that contains one group for each value of `age`.
grouped = gss.groupby('age')
type(grouped)
# The `GroupBy` object behaves like a DataFrame in many ways. You can use brackets to select a column, like `realinc` in this example, and then invoke a method like `mean()`.
mean_income_by_age = grouped['realinc'].mean()
# The result is a Pandas series that contains the mean income for each age group, which we can plot like this.
# +
import matplotlib.pyplot as plt
plt.plot(mean_income_by_age, 'o', alpha=0.5)
plt.xlabel('Age (years)')
plt.ylabel('Income (1986 $)')
plt.title('Average income, grouped by age');
# -
# Average income increases from age 20 to age 50, then starts to fall.
#
# And that explains why the estimated slope is so small, because the relationship is non-linear. Remember that correlation and single regression can't measure non-linear relationships.
#
# But multiple regression can! To describe a non-linear relationship, one option is to add a new variable that is a non-linear combination of other variables.
#
# As an example, I'll create a new variable called `age2` that equals `age` squared.
gss['age2'] = gss['age']**2
# Now we can run a regression with both `age` and `age2` on the right side.
model = smf.ols('realinc ~ educ + age + age2', data=gss)
results = model.fit()
results.params
# The slope associated with age is substantial, about $1700 per year.
#
# The slope associated with `age2` is about -$17, but that's harder to interpret.
#
# In the next section, we'll see methods to interpret multivariate models and visualize the results. But first, let's practice multiple regression.
# **Exercise:** To get a closer look at the relationship between income and education, let's use the variable `educ` to group the data, then plot mean income in each group.
#
# - Group `gss` by `educ`. Store the result in `grouped`.
#
# - From `grouped`, extract `realinc` and compute the mean.
#
# - Plot mean income in each education group as a scatter plot.
#
# What can you say about the relationship between education and income? Does it look like a linear relationship?
# +
# Solution goes here
# -
# **Exercise:** The graph in the previous exercise suggests that the relationship between income and education is non-linear. So let's try fitting a non-linear model.
#
# - Add a column named `educ2` to the `gss` DataFrame; it should contain the values from `educ` squared.
#
# - Run a regression model that uses `educ`, `educ2`, `age`, and `age2` to predict `realinc`.
#
# +
# Solution goes here
# -
# ## Visualizing regression results
#
# In the previous section we ran a multiple regression model to characterize the relationships between income, age, and education.
#
# Because the model includes quadratic terms, the parameters are hard to interpret. For example, you might notice that the parameter for `educ` is negative, and that might be a surprise, because it suggests that higher education is associated with lower income.
#
# But the parameter for `educ2` is positive, and that makes a big difference. In this section we'll see a way to interpret the model visually and validate it against data.
#
# Here's the model from the previous exercise.
# +
gss['age2'] = gss['age']**2
gss['educ2'] = gss['educ']**2
model = smf.ols('realinc ~ educ + educ2 + age + age2', data=gss)
results = model.fit()
results.params
# -
# Sometimes we can understand a model by looking at its parameters, but often it is better to look at its predictions.
#
# The regression results provide a method called `predict()` that uses the model to generate predictions.
#
# It takes a DataFrame as a parameter and returns a Series with a prediction for each row in the DataFrame.
#
# To use it, I'll create a new DataFrame with `age` running from 18 to 89, and `age2` set to `age` squared.
# +
import numpy as np
df = pd.DataFrame()
df['age'] = np.linspace(18, 89)
df['age2'] = df['age']**2
# -
# Next, I'll pick a level for `educ`, like 12 years, which is the most common value. When you assign a single value to a column in a DataFrame, Pandas makes a copy for each respondent.
df['educ'] = 12
df['educ2'] = df['educ']**2
# Then we can use `results` to predict the average income for each age group, holding education constant.
pred12 = results.predict(df)
# The result from `predict()` is a Series with one prediction for each row. So we can plot it with age on the `x` axis and the predicted income for each age group on the `y` axis.
#
# And we can plot the data for comparison.
# +
plt.plot(mean_income_by_age, 'o', alpha=0.5)
plt.plot(df['age'], pred12, label='High school', color='C4')
plt.xlabel('Age (years)')
plt.ylabel('Income (1986 $)')
plt.title('Income versus age, grouped by education level')
plt.legend();
# -
# The blue dots show the average income in each age group.
#
# The orange line shows the predictions generated by the model, holding education constant.
#
# This plot shows the shape of the model, a downward-facing parabola.
#
# We can do the same thing with other levels of education, like 14 years, which is the nominal time to each an Associate's degree, and 16 years, which is the nominal time to earn a Bachelor's degree.
# +
plt.plot(mean_income_by_age, 'o', alpha=0.5)
df['educ'] = 16
df['educ2'] = df['educ']**2
pred16 = results.predict(df)
plt.plot(df['age'], pred16, label='Bachelor')
df['educ'] = 14
df['educ2'] = df['educ']**2
pred14 = results.predict(df)
plt.plot(df['age'], pred14, label='Associate')
plt.plot(df['age'], pred12, label='High school', color='C4')
plt.xlabel('Age (years)')
plt.ylabel('Income (1986 $)')
plt.title('Income versus age, grouped by education level')
plt.legend();
# -
# The lines show mean income, as predicted by the model, as a function of age, for three levels of education.
#
# This visualization helps validate the model, since we can compare the predictions with the data. And it helps us interpret the model since we can see the separate contributions of age and education.
#
# In the exercises, you'll have a chance to run a multiple regression, generate predictions, and visualize the results.
# **Exercise:** At this point, we have a model that predicts income using age, education, and sex.
#
# Let's see what it predicts for different levels of education, holding `age` constant.
#
# - Create an empty DataFrame named `df`.
#
# - Using `np.linspace()`, add a variable named `educ` to `df` with a range of values from `0` to `20`.
#
# - Add a variable named `age` with the constant value `30`.
#
# - Use `df` to generate predicted income as a function of education.
# +
# Solution goes here
# +
# Solution goes here
# -
# **Exercise:** Now let's visualize the results from the previous exercise!
#
# - Group the GSS data by `educ` and compute the mean income in each education group.
#
# - Plot mean income for each education group as a scatter plot.
#
# - Plot the predictions from the previous exercise.
#
# How do the predictions compare with the data?
# +
# Solution goes here
# -
# **Optional Exercise:** Extend the previous exercise to include predictions for a few other age levels.
# ## Logistic regression
#
# At last we have come to one of my favorite topics, logistic regression.
#
# To understand logistic regression, we have to start with categorical variables.
#
# Most of the variables we have used so far --- like income, age, and education --- are numerical.
#
# But variables like sex and race are categorical; that is, each respondent belongs to one of a specified set of categories.
#
# With StatsModels, it is easy to include a categorical variable as part of a regression model.
#
# Here's how. In the formula string, the letter C indicates that `sex` is a categorical variable.
formula = 'realinc ~ educ + educ2 + age + age2 + C(sex)'
results = smf.ols(formula, data=gss).fit()
results.params
# The regression treats the value `sex=1`, which is male, as the default, and reports the difference associated with the value `sex=2`, which is female.
#
# So this result indicates that income for women is about $4100 less than for men, after controlling for age and education.
#
# If a categorical variable has only two values, it's called a boolean variable. For example, one of the questions in the General Social Survey asks "Would you favor or oppose a law which would require a person to obtain a police permit before he or she could buy a gun?"
#
# The column is called `gunlaw`, and here are the values. '1' means yes and `2` means no, so most respondents are in favor.
#
# To explore the relationship between this variable and factors like age, sex, and education, we can use logistic regression.
#
# StatsModels provides logistic regression, but to use it, we have to recode the variable so `1` means `yes` and 0 means no. We can do that by replacing `2` with `0`.
gss['gunlaw'].value_counts()
gss['gunlaw'].replace([2], [0], inplace=True)
# The keyword argument `inplace=True` means that this function modifies the `gunlaw` column "in place"; that is, it modifies the column in the DataFrame rather than making a new Series.
#
# And we can check the results.
gss['gunlaw'].value_counts()
# Now we can run the regression. Instead of `ols()`, we use `logit()`, which is named for the logit function, which is related to logistic regression.
formula = 'gunlaw ~ age + age2 + educ + educ2 + C(sex)'
results = smf.logit(formula, data=gss).fit()
# Estimating the parameters for the logistic model is an iterative process, so the output contains information about the number of iterations.
#
# Other than that, everything is the same as what we have seen before.
#
# And here are the results.
results.params
# The parameters are in the form of "log odds", which you may or may not be familiar with. I won't explain them in detail here, except to say that positive values are associated with things that make the outcome more likely; negative values make the outcome less likely.
#
# For example, the parameter associated with `sex=2` is 0.75, which indicates that women are more likely to support this form of gun control. To see how much more likely, we can generate and predict distributions, as we did with linear regression.
#
# As an example, I'll generate predictions for different ages and sexes, with education held constant.
#
# First we need a DataFrame with `age` and `educ`.
df = pd.DataFrame()
df['age'] = np.linspace(18, 89)
df['educ'] = 12
# Then we can compute `age2` and `educ2`.
df['age2'] = df['age']**2
df['educ2'] = df['educ']**2
# We can generate predictions for men like this.
df['sex'] = 1
pred1 = results.predict(df)
# And for women like this.
df['sex'] = 2
pred2 = results.predict(df)
# Now, to visualize the results, I start by plotting the data. As we've done before, we'll divide the respondents into age groups and compute the mean in each group. The mean of a binary variable is the fraction of people in favor.
#
# Then we can plot the predictions, for men and women, as a function of age.
# +
grouped = gss.groupby('age')
favor_by_age = grouped['gunlaw'].mean()
plt.plot(favor_by_age, 'o', alpha=0.5)
plt.plot(df['age'], pred2, label='Female')
plt.plot(df['age'], pred1, label='Male')
plt.xlabel('Age')
plt.ylabel('Probability of favoring gun law')
plt.title('Support for gun law versus age, grouped by sex')
plt.legend();
# -
# According to the model, people near age 50 are least likely to support gun control (at least as this questions was posed).
#
# And women are more likely to support it than men, by almost 15 percentage points.
#
# Logistic regression is a powerful tool for exploring relationships between a binary variable and the factors that predict it.
#
# In the exercises, you'll explore the factors that predict support for legalizing marijuana.
# **Exercise:** Let's use logistic regression to predict a binary variable. Specifically, we'll use age, sex, and education level to predict support for legalizing cannabis (marijuana) in the U.S.
#
# In the GSS dataset, the variable `grass` records the answer to the question "Do you think the use of marijuana should be made legal or not?"
#
# 1. First, use `replace` to recode the `grass` column so that `1` means yes and `0` means no. Use `value_counts` to check.
#
# 2. Next, use `smf.logit()` to predict `grass` using the variables `age`, `age2`, `educ`, and `educ2`, along with `sex` as a categorical variable. Display the parameters. Are men or women more likely to support legalization?
#
# 3. To generate predictions, start with an empty DataFrame. Add a column called `age` that contains a sequence of values from 18 to 89. Add a column called `educ` and set it to 12 years. Then compute a column, `age2`, which is the square of `age`, and a column, `educ2`, which is the square of `educ`.
#
# 4. Use `predict` to generate predictions for men (`sex=1`) and women (`sex=2`).
#
# 5. Generate a plot that shows (1) the average level of support for legalizing marijuana in each age group, (2) the level of support the model predicts for men as a function of age, and (3) the level of support predicted for women as a function of age.
# +
# Solution goes here
# +
# Solution goes here
# +
# Solution goes here
# +
# Solution goes here
# +
# Solution goes here
# -
# ## Summary
#
# At this point, I'd like to summarize the topics we've covered so far, and make some connections that might clarify the big picture.
#
# A central theme of this course is "exploratory data analysis", which is a process and a set of techniques for working with data, especially in the early stages of a project, or when you are working with a new data set.
#
# The last four notebooks demonstrate the steps of this process:
#
# - Notebook 7 is about importing and cleaning the data, and checking for errors and other special conditions. This might not be the most exciting part of the process, but if you skip these steps, it can come back to haunt you. Time spent cleaning and validating data can save you from embarrassing, and sometimes expensive, errors.
#
# - Notebook 8 is about exploring variables one at a time, visualizing distributions using PMFs, CDFs, and KDE, and choosing appropriate summary statistics.
#
# - In Notebook 9 we explored relationships between variables two at a time, using scatter plots and other visualizations; and we quantified those relationships using correlation and simple regression.
#
# - Finally, in Notebook 10, we explored multivariate relationships using multiple regression and logistic regression.
#
# From Notebook 7, you might remember that we looked at the distribution of birth weights from the National Survey of Family Growth.
#
# If you only remember one thing, remember the 99 pound babies, and how much it can mess up your results if you don't validate the data.
#
# In Notebook 8 we looked at the distributions of age, income, and other variables from the General Social Survey.
#
# I recommended using CDFs as the best way to explore distributions.
#
# But when you present to audiences that are not familiar with CDFs, you can use PMFs if there are a small number of unique values, and KDE if there are a lot.
#
# In Notebook 9 we looked at heights and weights from the BRFSS, and developed several ways to visualize relationships between variables, including scatter plots, violin plots, and box plots like this one.
#
# We used the coefficient of correlation to quantify the strength of a relationship. We also used simple regression to estimate slope, which is often what we care more about, not correlation.
#
# But remember that both of these methods only capture linear relationships; if the relationship is non-linear, they can be misleading. Always look at a visualization, like a scatter plot, before computing correlation or simple regression.
#
# In Notebook 10 we used multiple regression to add control variables and to describe non-linear relationships. And finally we used logistic regression to explain and predict binary variables.
#
# We moved through a lot of material quickly, but if you practice and apply these methods to other questions and other dataset, you will learn more as you go.
#
# Also, I am happy to report that you now have the prerequisites you need to appreciate [this xkcd cartoon](https://xkcd.com/2048/).
#
# <img src="https://imgs.xkcd.com/comics/curve_fitting_2x.png" width="400">
| 10_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# <img src="../../img/ods_stickers.jpg" />
#
# ## [mlcourse.ai](mlcourse.ai) – Open Machine Learning Course
# Author: [<NAME>](https://yorko.github.io) (@yorko). Edited by <NAME> (@feuerengel). This material is subject to the terms and conditions of the [Creative Commons CC BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. Free use is permitted for any non-commercial purpose.
# # <center>Assignment #3. Optional part
# ## <center> Implementation of the decision tree algorithm
import numpy as np
from matplotlib import pyplot as plt
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification, make_regression, load_digits, load_boston
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, mean_squared_error
# Let's fix `random_state` (a.k.a. random seed) beforehand.
RANDOM_STATE = 17
# **Implement the class `DecisionTree`**
# **Specification:**
# - the class is inherited from `sklearn.BaseEstimator`;
# - class constructor has the following parameters:
# `max_depth` - maximum depth of the tree (`numpy.inf` by default);
# `min_samples_split` - the minimum number of instances in a node for a splitting to be done (2 by default);
# `criterion` - split criterion ('gini' or 'entropy' for classification, 'variance' or 'mad_median' for regression; 'gini' by default);
#
# A functional to be maximized to find an optimal partition at a given node has the form
# $$Q(X, j, t) = F(X) - \dfrac{|X_l|}{|X|} F(X_l) - \dfrac{|X_r|}{|X|} F(X_r),$$
# where $X$ are samples at a given node, $X_l$ and $X_r$ are partitions of samples $X$ into two parts
# with the following condition $[x_j < t]$, and $F(X)$ is a partition criterion.
#
# For classification: let $p_i$ be the fraction of the instances of the $i$-th class in the dataset $X$.
#
# 'gini': Gini impurity $F(X) = 1 -\sum_{i = 1}^K p_i^2$.
#
# 'entropy': Entropy $F(X) = -\sum_{i = 1}^K p_i \log_2(p_i)$.
#
# For regression: $y_j = y(x_j)$ - is a target for an instance $x_j$, $y = (y_1, \dots, y_{|X|})$ - is a target vector.
#
# 'variance': Variance (mean quadratic deviation from average) $F(X) = \dfrac{1}{|X|} \sum_{x_j \in X}(y_j - \dfrac{1}{|X|}\sum_{x_i \in X}y_i)^2$
#
# 'mad_median': Mean deviation from the median $F(X) = \dfrac{1}{|X|} \sum_{x_j \in X}|y_j - \mathrm{med}(y)|$
#
# - the class has several methods: `fit`, `predict` and `predict_proba`;
# - the`fit` method takes the matrix of instances `X` and a target vector `y` (`numpy.ndarray` objects) and returns an instance of the class `DecisionTree` representing the decision tree trained on the dataset `(X, y)` according to parameters set in the constructor;
# - the `predict_proba` method takes the matrix of instances `X` and returns the matrix `P` of a size `X.shape[0] x K`, where `K` is the number of classes and $p_{ij}$ is the probability of an instance in $i$-th row of `X` to belong to class $j \in \{1, \dots, K\}$.
# - the `predict` method takes the matrix of instances `X` and returns a prediction vector; in case of classification, prediction for an instance $x_i$ falling into leaf $L$ will be the class, mostly represented among instances in $L$. In case of regression, it'll be the mean value of targets for all instances in leaf $L$.
# +
def entropy(y):
pass
def gini(y):
pass
def variance(y):
pass
def mad_median(y):
pass
# -
# The `Node` class implements a node in the decision tree.
class Node():
def __init__(self, feature_idx=0, threshold=0, labels=None, left=None, right=None):
self.feature_idx = feature_idx
self.threshold = threshold
self.labels = labels
self.left = left
self.right = right
# Let's determine the function for calculating a prediction in a leaf. For regression, let's take the mean for all values in a leaf, for classification - the most popular class in leaf.
class DecisionTree(BaseEstimator):
def __init__(self, max_depth=np.inf, min_samples_split=2,
criterion='gini', debug=False):
pass
def fit(self, X, y):
pass
def predict(self, X):
pass
def predict_proba(self, X):
pass
# ## Testing the implemented algorithm
# ### Classification
# Download the dataset `digits` using the method `load_digits`. Split the data into train and test with the `train_test_split` method, use parameter values `test_size=0.2`, and `random_state=17`. Try to train shallow decision trees and make sure that gini and entropy criteria return different results.
# +
# You code here
# -
# Using 5-folds cross-validation (`GridSearchCV`) pick up the optimal values of the `max_depth` and `criterion` parameters. For the parameter `max_depth` use range(3, 11), for criterion use {'gini', 'entropy'}. Quality measure is `scoring`='accuracy'.
# +
# You code here
# -
# Draw the plot of the mean quality measure `accuracy` for criteria `gini` and `entropy` depending on `max_depth`.
# +
# You code here
# -
# **1. Choose all correct statements:**
# 1. Optimal value of the `max_depth` parameter is on the interval [4, 9] for both criteria.
# 2. Created plots have no intersection on the interval [3, 10]
# 3. Created plots intersect each other only once on the interval [3, 10].
# 4. The best quality for `max_depth` on the interval [3, 10] is reached using `gini` criterion .
# 5. Accuracy is strictly increasing at least for one of the criteria, when `max_depth` is also increasing on the interval [3, 10]
# **2. What are the optimal values for max_depth and criterion parameters?**
# 1. max_depth = 7, criterion = 'gini';
# 2. max_depth = 7, criterion = 'entropy';
# 3. max_depth = 10, criterion = 'entropy';
# 4. max_depth = 10, criterion = 'gini';
# 5. max_depth = 9, criterion = 'entropy';
# 6. max_depth = 9, criterion = 'gini';
# Train decision tree on `(X_train, y_train)` using the optimal values of `max_depth` and `criterion`. Compute class probabilities for `X_test`.
# +
# You code here
# -
# Using the given matrix, compute the mean class probabilities for all instances in `X_test`.
# +
# You code here
# -
# **3. What is the maximum probability in a resulted vector?**
# 1. 0.127
# 2. 0.118
# 3. 1.0
# 4. 0.09
# ## Regression
# Download the dataset `boston` using the method `load_boston`. Split the data into train and test with the `train_test_split` method, use parameter values `test_size=0.2`, `random_state=17`. Try to train shallow regression decision trees and make sure that `variance` and `mad_median` criteria return different results.
# +
# You code here
# -
# Using 5-folds cross-validation (`GridSearchCV`) pick up the optimal values of the `max_depth` and `criterion` parameters. For the parameter `max_depth` use `range(2, 9)`, for `criterion` use {'variance', 'mad_median'}. Quality measure is `scoring`='neg_mean_squared_error'.
# +
# You code here
# -
# Draw the plot of the mean quality measure `neg_mean_squared_error` for criteria `variance` and `mad_median` depending on `max_depth`.
# +
# You code here
# -
# **4. Choose all correct statements:**
# 1. Created plots have no intersection on the interval [2, 8].
# 2. Created plots intersect each other only once on the interval [2, 8].
# 3. Optimal value of the `max_depth` for each of the criteria is on the border of the interval [2, 8].
# 4. The best quality at `max_depth` on the interval [2, 8] is reached using `mad_median` criterion.
# **5. What are the optimal values for `max_depth` and `criterion` parameters?**
# 1. max_depth = 9, criterion = 'variance';
# 2. max_depth = 5, criterion = 'mad_median';
# 3. max_depth = 4, criterion = 'variance';
# 4. max_depth = 2, criterion = 'mad_median';
# 5. max_depth = 4, criterion = 'mad_median';
# 6. max_depth = 5, criterion = 'variance'.
| assignments/rated/assignment3_optional_implement_dt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Working with data in python
import pandas as pd # allow pandas to be used in this notebook
# ## importing data
pd.read_csv("../data/clinical.csv") # import data as csv file
clinical_df = pd.read_csv("../data/clinical.csv") # assign data to object
clinical_df.head() # print top few rows
# +
## Challenge: What do you need to do to import the following files correctly:
#clinical.tsv
#clinical.txt
# -
pd.read_csv("../data/clinical.tsv", sep='\t')
pd.read_csv("../data/clinical.txt", sep=' ')
type(clinical_df) # look at data type
clinical_df.columns # view column names
clinical_df.dtypes # look at type of data in each column
# ## data types: pandas vs native python
#
# * object = string
# * int64 = integer (64 bit)
# * float64 = float
# * datetime64 = N/A
# ## Selecting data using labels (columns) and row ranges
# select a 'subset' of the data using the column name
clinical_df['tumor_stage']
clinical_df['tumor_stage'].head() # show preview
clinical_df['primary_diagnosis'].dtype # assess data type
# use the column name as an 'attribute'; gives the same output
clinical_df.tumor_stage
clinical_df.tumor_stage.head()
# +
# What happens if you ask for a column that doesn't exist?
#clinical_df['tumorstage']
# -
# Select two columns at once
clinical_df[['tumor_stage', 'vital_status']]
# +
## Challenge: does the order of the columns you list matter?
# -
# Select rows 0, 1, 2 (row 3 is not selected)
clinical_df[0:3]
# Select the first 5 rows (rows 0, 1, 2, 3, 4)
clinical_df[1: ]
clinical_df[-1:] # select last element
# +
## Challenge: how would you extract the last 10 rows of the dataset?
# -
# ## Copying vs referencing objects
ref_clinical_df = clinical_df # this only references the previous object
true_copy_clinical_df = clinical_df.copy() # copies object
ref_clinical_df[0:3] = 0 # assign 0 to first three rows of data
## Challenge: What is the difference between ref_clinical_df and clinical_df given the following?
# ref_clinical_df was created using the '=' operator
ref_clinical_df.head()
# clinical_df is the original dataframe
clinical_df.head()
# re-load fresh copy of object
clinical_df = pd.read_csv("../data/clinical.csv")
# ## Slicing subsets of rows and columns
# iloc is integer indexing [row slicing, column slicing]
# locate specific data element
clinical_df.iloc[2, 6]
# select range of data
clinical_df.iloc[0:3, 1:4]
# stop/end bound is NOT inclusive (e.g., up to but not including 3)
# can use empty stop boundary to indicate end of data
clinical_df.iloc[0:, 1:4]
# loc is for label indexing (integers interpreted as labels)
clinical_df.loc[1:4]
# start and stop bound are inclusive
# can use empty stop boundary to indicate end of data
clinical_df.loc[1: ]
# Select all columns for rows of index values 0 and 10
# start and stop bound are inclusive
clinical_df.loc[[0, 10, 6831], ]
# select all rows for specified columns
clinical_df.loc[0, ['primary_diagnosis', 'tumor_stage', 'age_at_diagnosis']]
# +
## Challenge: why doesn't the following code work?
#clinical_df.loc[2, 6]
# +
## Challenge: how would you extract the last 100 rows for only vital status and days to death?
# -
# ## Calculating summary statistics
# calculate basic stats for all records in single column
clinical_df['age_at_diagnosis'].describe()
# each metric one at a time (only prints last if all executed in one cell!)
clinical_df['age_at_diagnosis'].min()
# convert columns (days to years)
clinical_df['age_at_diagnosis']/365
# convert min to days
clinical_df['age_at_diagnosis'].min()/365
## Challenge: What type of summary stats do you get for object data?
clinical_df['site_of_resection_or_biopsy'].describe()
## Challenge: How would you extract only the standard deviation for days to death?
clinical_df['days_to_death'].std()
| notebooks/week2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark3
# language: ''
# name: pyspark3kernel
# ---
# +
from pyspark.sql.types import *
from pyspark.sql.functions import *
from pyspark import SparkFiles
from pyspark.ml.feature import CountVectorizer
import numpy as np
schema = StructType([
StructField("market_name", StringType(), True),
StructField("vendor_name", StringType(), True),
StructField("price", StringType(), True),
StructField("item_name", StringType(), True),
StructField("ships_from", StringType(), True),
StructField("description", StringType(), True),
StructField("date", StringType(), True),
StructField("adjusted_price", StringType(), True)
])
files = 's3://15405finalprojectcsvdata/combined_and_cleaned_data/part-00000-c6f08ba4-b88b-4295-8d93-7a9f118df9db-c000.csv'
df = spark.read.csv(files, multiLine=True,header=False, mode="DROPMALFORMED", schema=schema)
df = df.drop('market_name', 'vendor_name', 'price', 'item_name', 'ships_from', 'date', 'adjusted_price')
df.show()
'''
mask = np.random.rand(len(df)) < 0.8
temp_train_df = df[mask]
temp_test_df = df[~mask]
train_docs = list(temp_train_df.body)
train_tags = list(temp_train_df.label - 1)
test_docs = list(temp_test_df.body)
test_tags = list(temp_test_df.label - 1)
'''
# +
train_df, test_df = df.randomSplit([0.8, 0.2])
train_df = train_df.where(col('description').isNotNull())
test_df = test_df.where(col('description').isNotNull())
#small = df.sample(False, .001).where(col('description').isNotNull())
#small.show()
# -
cv = CountVectorizer(inputCol="description", outputCol="bow", vocabSize=10000, minDF=3.0, maxDF=0.8)
def proc_dataframe(df):
spl = df.select(split(col('description'), ' ').alias('description'))
model = cv.fit(spl)
result = model.transform(spl).select('bow')
return result.withColumn("doc_id", monotonically_increasing_id()).withColumn("label", lit(0))
# +
#small_result = proc_dataframe(small)
train_result = proc_dataframe(train_df)
test_result = proc_dataframe(test_df)
# +
train_final = train_result.rdd.repartition(1).toDF()
test_final = test_result.rdd.repartition(1).toDF()
# +
# write to s3
#train_final.write.parquet('s3://15405finalprojectcsvdata/split_dataset/train_df.parquet')
test_final.write.parquet('s3://15405finalprojectcsvdata/split_dataset/test_df.parquet')
# -
| extract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1><center> Unstructural data processing and preparation</center></h1>
#
# # Lesson Goals
# <div text-align:justify>we are gonna process full unstructural data that i scrab it from three different websites and save it as cvs file.So in this lesson we will Processing unstructured data means extracting structure from it.</div>
#
#
#
# # Prerequests
# 1. install NLTK
# 2. Install Anaconda
#
# NB:make sure Jupyter Notebook running
import pandas as pd
files_path = 'C:/Users/agurm/OneDrive/Desktop/Sarcasticbot/'
# # First dataset
# The Wordball Joke Dataset, link.
#
# This dataset consists of three files, namely:
#
# qajokes1.1.2.csv: with 75,114 pairs.
# t_lightbulbs.csv: with 2,640 pairs.
# t_nosubject.csv: with 32,120 pairs.
# However, I'm not going to incorporate t_lightbulbs.csv in my dataset because I don't want that many examples of one topic. Besides, all the examples are similar in structure (they all start with how many).
#
# Read the data files into pandas dataframes:
wordball_qajokes = pd.read_csv(files_path + 'qajokes1.1.2.csv', usecols=['Question', 'Answer'])
wordball_nosubj = pd.read_csv(files_path + 't_nosubject.csv', usecols=['Question', 'Answer'])
print(len(wordball_qajokes))
print(len(wordball_nosubj))
wordball_qajokes.head()
wordball_nosubj.head()
wordball = pd.concat([wordball_qajokes, wordball_nosubj], ignore_index=True)
wordball.head()
print(f"Number of question-answer pairs in the Wordball dataset: {len(wordball)}")
# # Text Preprocessing
# It turns out that not all cells are of type string. So, we can just apply the str function to make sure that all of them are of the same desired type.
wordball = wordball.applymap(str)
# Let's look at the characters used in this dataset:
def distinct_chars(data, cols):
"""
This method takes in a pandas dataframe and prints all distinct characters.
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
"""
if cols is None:
cols = list(data.columns)
# join all questions into one string
questions = ' '.join(data[cols[0]])
# join all answers into one string
answers = ' '.join(data[cols[1]])
# get distinct characters used in the data (all questions and answers)
dis_chars = set(questions+answers)
# print the distinct characters that are used in the data
print(f"Number of distinct characters used in the dataset: {len(dis_chars)}")
# print(dis_chars)
dis_chars = list(dis_chars)
# Now let's print those characters in an organized way
digits = [char for char in dis_chars if char.isdigit()]
alphabets = [char for char in dis_chars if char.isalpha()]
special = [char for char in dis_chars if not (char.isdigit() | char.isalpha())]
# sort them to make them easier to read
digits = sorted(digits)
alphabets = sorted(alphabets)
special = sorted(special)
print(f"Digits: {digits}")
print(f"Alphabets: {alphabets}")
print(f"Special characters: {special}")
distinct_chars(wordball, ['Question', 'Answer'])
# The following function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.
def clean_text(text):
"""
This method takes a string, applies different text preprocessing (characters replacement, removal of unwanted characters,
removal of extra whitespaces) operations and returns a string.
text: a string.
"""
import re
text = str(text)
# REPLACEMENT
# replace " with ' (because they basically mean the same thing)
# text = text.replace('\"','\'')
text = re.sub('\"', '\'', text)
# replace “ and ” with '
# text = text.replace("“",'\'').replace("”",'\'')
text = re.sub("“", '\'', text)
text = re.sub("”", '\'', text)
# replace ’ with '
# text = text.replace('’','\'')
text = re.sub('’', '\'', text)
# replace [] and {} with ()
#text = text.replace('[','(').replace(']',')').replace('{','(').replace('}',')')
text = re.sub('\[','(', text)
text = re.sub('\]',')', text)
text = re.sub('\{','(', text)
text = re.sub('\}',')', text)
# replace ? with itself and a whitespace preceding it
# ex. what's your name? (we want the word name and question mark to be separate tokens)
# text = re.sub('\?', ' ?', text)
# creating a space between a word and the punctuation following it
# punctuation we're using: . , : ; ' ? ! + - * / = % $ @ & ( )
text = re.sub("([?.!,:;'?!+\-*/=%$@&()])", r" \1 ", text)
# lower case the characters in the string
text = text.lower()
# REMOVAL OF EXTRA WHITESPACES
# remove duplicated spaces
text = re.sub(' +', ' ', text)
# remove leading and trailing spaces
text = text.strip()
return text
# Let's try it out:
clean_text("A nice quote I read today: “Everything that you are going through is preparing you for what you asked for”. @hi % & =+-*/")
# The following method prints a question-answer pair from the dataset, it will be helpful to give us a sense of what the clean_text function results in:
def print_question_answer(df, index, cols):
print(f"Question: ({index})")
print(df.loc[index][cols[0]])
print(f"Answer: ({index})")
print(df.loc[index][cols[1]])
print("Before applying text preprocessing:")
print_question_answer(wordball, 102, ['Question', 'Answer'])
print_question_answer(wordball, 200, ['Question', 'Answer'])
print_question_answer(wordball, 88376, ['Question', 'Answer'])
print_question_answer(wordball, 94351, ['Question', 'Answer'])
# Apply text preprocessing (characters replacement, removal of unwanted characters, removal of extra whitespaces):
wordball = wordball.applymap(clean_text)
print("After applying text preprocessing:")
print_question_answer(wordball, 102, ['Question', 'Answer'])
print_question_answer(wordball, 200, ['Question', 'Answer'])
print_question_answer(wordball, 88376, ['Question', 'Answer'])
print_question_answer(wordball, 94351, ['Question', 'Answer'])
# The following function applies some preprocessing operations on the data, concretely:
#
# Drops unecessary duplicate pairs (rows) but keep only one instance of all duplicates. (For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)
# Drops rows with empty question/answer. (These may appear because of the previous step or because they happen to be empty in the original dataset)
# Drops rows with more than 30 words in either the question or the answer or if the answer has less than two characters. (Note: this is a hyperparameter and you can try other values.)
def preprocess_data(data, cols):
"""
This method preprocess data and does the following:
1. drops unecessary duplicate pairs.
2. drops rows with empty strings.
3. drops rows with more than 30 words in either the question or the answer,
or if the an answer has less than two characters.
Arguments:
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
Returns:
a pandas dataframe.
"""
# (1) Remove unecessary duplicate pairs but keep only one instance of all duplicates.
print('Removing unecessary duplicate pairs:')
data_len_before = len(data) # len of data before removing duplicates
print(f"# of examples before removing duplicates: {data_len_before}")
# drop duplicates
data = data.drop_duplicates(keep='first')
data_len_after = len(data) # len of data after removing duplicates
print(f"# of examples after removing duplicates: {data_len_after}")
print(f"# of removed duplicates: {data_len_before-data_len_after}")
# (2) Drop rows with empty strings.
print('Removing empty string rows:')
if cols is None:
cols = list(data.columns)
data_len_before = len(data) # len of data before removing empty strings
print(f"# of examples before removing rows with empty question/answers: {data_len_before}")
# I am going to use boolean masking to filter out rows with an empty question or answer
data = data[(data[cols[0]] != '') & (data[cols[1]] != '')]
# also, the following row results in the same as the above.
# data = data.query('Answer != "" and Question != ""')
data_len_after = len(data) # len of data after removing empty strings
print(f"# of examples after removing with empty question/answers: {data_len_after}")
print(f"# of removed empty string rows: {data_len_before-data_len_after}")
# (3) Drop rows with more than 30 words in either the question or the answer
# or if the an answer has less than two characters.
def accepted_length(qa_pair):
q_len = len(qa_pair[0].split(' '))
a_len = len(qa_pair[1].split(' '))
if (q_len <= 30) & ((a_len <= 30) & (len(qa_pair[1]) > 1)):
return True
return False
print('Removing rows with more than 30 words in either the question or the answer:')
data_len_before = len(data) # len of data before dropping those rows (30+ words)
print(f"# of examples before removing rows with more than 30 words: {data_len_before}")
# filter out rows with more than 30 words
accepted_mask = data.apply(accepted_length, axis=1)
data = data[accepted_mask]
data_len_after = len(data) # len of data after dropping those rows (50+ words)
print(f"# of examples after removing rows with more than 30 words: {data_len_after}")
print(f"# of removed empty rows with more than 30 words: {data_len_before-data_len_after}")
print("Data preprocessing is done.")
return data
wordball = preprocess_data(wordball, ['Question', 'Answer'])
print(f"# of question-answer pairs we have left in the Wordball dataset: {len(wordball)}")
# Let's look at the characters after cleaning the data:
distinct_chars(wordball, ['Question', 'Answer'])
# # Second Dataset
# reddit /r/Jokes, here.
#
# This dataset consists of two files, namely:
#
# jokes_score_name_clean.csv: with 133,992 pairs.
# all_jokes.csv
# However, I'm not going to incorporate all_jokes.csv in the dataset because it's so messy.
reddit_jokes = pd.read_csv(files_path + 'jokes_score_name_clean.csv', usecols=['q', 'a'])
# Let's rename the columns to have them aligned with the previous dataset:
reddit_jokes.rename(columns={'q':'Question', 'a':'Answer'}, inplace=True)
reddit_jokes.head()
print(len(reddit_jokes))
distinct_chars(reddit_jokes, ['Question', 'Answer'])
# # Text Preprocessing
reddit_jokes = reddit_jokes.applymap(str)
# Reddit data has some special tags like [removed] or [deleted] (these two mean that the comment has been removed/deleted). Also, they're written in an inconsistent way, i.e. you may find the tag [removed] capitalized or lowercased.
# The next function will address reddit tags as follows:
#
# Drops rows with deleted, removed or censored tags.
# Replaces other tags found in text with a whitespace. (i.e. some comments have tags like [censored], [gaming], [long], [request] and [dirty] and we want to omit these tags from the text)
def clean_reddit_tags(data, cols):
"""
This function removes reddit-related tags from the data and does the following:
1. drops rows with deleted, removed or censored tags.
2. replaces other tags found in text with a whitespace.
Arguments:
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
Returns:
a pandas dataframe.
"""
import re
if cols is None:
cols = list(data.columns)
# First, I'm going to lowercase all the text to address these tags
# however, I'm not going to alter the original dataframe because I don't want text to be lowercased.
data_copy = data.copy()
data_copy[cols[0]] = data_copy[cols[0]].str.lower()
data_copy[cols[1]] = data_copy[cols[1]].str.lower()
# drop rows with deleted, removed or censored tags.
# qa_pair[0] is the question, qa_pair[1] is the answer
mask = data_copy.apply(lambda qa_pair:
False if (qa_pair[0]=='[removed]') | (qa_pair[0]=='[deleted]') | (qa_pair[0]=='[censored]') |
(qa_pair[1]=='[removed]') | (qa_pair[1]=='[deleted]') | (qa_pair[1]=='[censored]')
else True, axis=1)
# drop the rows, notice we're using the mask to filter out those rows
# in the original dataframe 'data', because we don't need it anymore
data = data[mask]
print(f"# of rows dropped with [deleted], [removed] or [censored] tags: {mask.sum()}")
# replaces other tags found in text with a whitespace.
def sub_tag(pair):
"""
This method substitute tags (square brackets with words inside) with whitespace.
Arguments:
pair: a Pandas Series, where the first item is the question and the second is the answer.
Returns:
pair: a Pandas Series.
"""
# \[(.*?)\] is a regex to recognize square brackets [] with anything in between
p=re.compile("\[(.*?)\]")
pair[0] = re.sub(p, ' ', pair[0])
pair[1] = re.sub(p, ' ', pair[1])
return pair
# substitute tags with whitespaces.
data = data.apply(sub_tag, axis=1)
return data
print("Before addressing tags:")
print_question_answer(reddit_jokes, 1825, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 59924, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])
# Note: the following cell may take multiple seconds to finish.
#
reddit_jokes = clean_reddit_tags(reddit_jokes, ['Question', 'Answer'])
reddit_jokes
print("After addressing tags:")
# because rows with [removed], [deleted] and [censored] tags have been dropped
# we're not going to print the rows (index=1825, index=59924) since they contain
# those tags, or we're going to have a KeyError
print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])
# Note: notice the question whose index is 52906, has some leading whitespaces. That's because it had the [Corny] tag and the function replaced it with whitespaces. Also, the question whose index is 1489 has an empty answer and that's because of the fact that the original answer just square brackets with some whitespaces in between. We're going to address all of that next!
#
# Now, let's apply the clean_text function on the reddit data.
# Remember: the clean_text function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.
reddit_jokes = reddit_jokes.applymap(clean_text)
print_question_answer(reddit_jokes, 52906, ['Question', 'Answer'])
print_question_answer(reddit_jokes, 1489, ['Question', 'Answer'])
# Everything looks good!
# Now, let's apply the preprocess_data function on the data.
# Remember: the preprocess_data function applies the following preprocessing operations:
#
# Drops unecessary duplicate pairs (rows) but keep only one instance of all duplicates. (For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)
# Drops rows with empty question/answer. (These may appear because of the previous step or because they happen to be empty in the original dataset)
# Drops rows with more than 30 words in either the question or the answer or if the an answer has less than two characters. (Note: this is a hyperparameter and you can try other values.)
reddit_jokes = preprocess_data(reddit_jokes, ['Question', 'Answer'])
print(f"Number of question answer pairs in the reddit /r/Jokes dataset: {len(reddit_jokes)}")
distinct_chars(reddit_jokes, ['Question', 'Answer'])
# # Third Dataset
# Question-Answer Jokes, here.
#
# This dataset consists of one file, namely:
#
# jokes_score_name_clean.csv: with 38,269 pairs.
qa_jokes = pd.read_csv(files_path + 'jokes.csv', usecols=['Question', 'Answer'])
qa_jokes.head()
distinct_chars(qa_jokes, ['Question', 'Answer'])
# # Text Preprocessing
# If you look at some examples in the dataset, you notice that some examples has 'Q:' at beginning of the question and 'A:' at the beginning of the answer, so we need to get rid of these prefixes because they don't convey useful information.
# You also notice some examples where both 'Q:' and 'A:' are found in either the question or the answer, although I'm not going to omit these because they probably convey information and are part of the answer. However, some of them have 'Q:' in the question and 'Q: question A: answer' where the question in the answer is the same question, so we need to fix that.
def clean_qa_prefixes(data, cols):
"""
This function removes special prefixes ('Q:' and 'A:') found in the data.
i.e. input="Q: how's your day?" --> output=" how's your day?"
Arguments:
data: a pandas dataframe.
cols: a Python list, representing names of columns for questions and answers. First item of the list should be the name
of the questions column and the second item should be the name of the column corresponding to answers.
Returns:
a pandas dataframe.
"""
def removes_prefixes(pair):
"""
This function removes prefixes ('Q:' and 'A:') from the question and answer.
Examples:
Input: qusetion="Q: what is your favorite Space movie?", answer='A: Interstellar!'
Output: qusetion=' what is your favorite Space movie?', answer=' Interstellar!'
Input: question="Q: how\'s your day?", answer='Q: how\'s your day? A: good, thanks.'
Output: qusetion=" how's your day?", answer='good, thanks.'
Input: qusetion='How old are you?', answer='old enough'
Output: qusetion='How old are you?', answer='old enough'
Arguments:
pair: a Pandas Series, where the first item is the question and the second is the answer.
Returns:
pair: a Pandas Series.
"""
# pair[0] corresponds to the question
# pair[1] corresponds to the answer
# if the question contains 'Q:' and the answer contains 'A:' but doesn't contain 'Q:'
if ('Q:' in pair[0]) and ('A:' in pair[1]) and ('Q:' not in pair[1]):
pair[0] = pair[0].replace('Q:','')
pair[1] = pair[1].replace('A:','')
# if the answer contains both 'Q:' and 'A:'
elif ('A:' in pair[1]) and ('Q:' in pair[1]):
pair[0] = pair[0].replace('Q:','')
# now we should check if the text between 'Q:' and 'A:' is the same text in the question (pair[0])
# because if they are, this means that the question is repeated in the answer and we should address that.
q_start = pair[1].find('Q:') + 2 # index of the start of the text that we want to extract
q_end = pair[1].find('A:') # index of the end of the text that we want to extract
q_txt = pair[1][q_start:q_end].strip()
# if the question is repeated in the answer
if q_txt == pair[0].strip():
# in case the question is repeated in the answer, removes it from the answer
pair[1] = pair[1][q_end+2:].strip()
return pair
return data.apply(removes_prefixes, axis=1)
print("Before removing unnecessary prefixes:")
print_question_answer(qa_jokes, 44, ['Question', 'Answer'])
print_question_answer(qa_jokes, 22, ['Question', 'Answer'])
print_question_answer(qa_jokes, 31867, ['Question', 'Answer'])
qa_jokes = clean_qa_prefixes(qa_jokes, ['Question', 'Answer'])
print("After removing unnecessary prefixes:")
print_question_answer(qa_jokes, 44, ['Question', 'Answer'])
print_question_answer(qa_jokes, 22, ['Question', 'Answer'])
print_question_answer(qa_jokes, 31867, ['Question', 'Answer'])
# Notice that the third example both 'Q:' and 'A:' are part of the answer and conveys information.
#
# Now, let's apply the clean_text function on the Question-Answer Jokes data.
# Remember: the clean_text function replaces some characters with others, removes unwanted characters and gets rid of extra whitespaces from the data.
qa_jokes = qa_jokes.applymap(clean_text)
# Now, let's apply the preprocess_data function on the data.
# Remember: the preprocess_data function applies the following preprocessing operations:
#
# 1. Drops unnecessary duplicate pairs (rows) but keep only one instance of all duplicates. (For example, if the dataset contains three duplicates of the same question-answer pair, then two of them would be removed and one kept.)
# 2. Drops rows with an empty question/answer. (These may appear because of the previous step or because they happen to be empty in the original dataset)
# 3. Drops rows with more than 30 words in either the question or the answer or if the an answer has less than two characters. (Note: this is a hyperparameter and you can try other values.)
qa_jokes = preprocess_data(qa_jokes, ['Question', 'Answer'])
print(f"Number of question-answer pairs in the Question-Answer Jokes dataset: {len(qa_jokes)}")
distinct_chars(qa_jokes, ['Question', 'Answer'])
# # Putting it together
# Let's concatenate all the data we have to create our final dataset.
dataset = pd.concat([wordball, reddit_jokes, qa_jokes], ignore_index=True)
dataset.head()
print(f"Number of question-answer pairs in the dataset: {len(dataset)}")
# There may be duplicate examples in the data so let's drop them:
data_len_before = len(dataset) # len of data before removing duplicates
print(f"# of examples before removing duplicates: {data_len_before}")
# drop duplicates
dataset = dataset.drop_duplicates(keep='first')
data_len_after = len(dataset) # len of data after removing duplicates
print(f"# of examples after removing duplicates: {data_len_after}")
print(f"# of removed duplicates: {data_len_before-data_len_after}")
# Let's drop rows with NaN values if there's any:
dataset.dropna(inplace=True)
dataset
# Let's make sure that all our cells are of the same type:
dataset = dataset.applymap(str)
print(f"Number of question-answer pairs in the dataset: {len(dataset)}")
distinct_chars(dataset, ['Question', 'Answer'])
# Finally, let's save the dataset:
dataset.to_csv(files_path + '/dataset.csv')
# Looks pretty good! we can use this dataset to develop any kind of NLP/NLU model.
#
#
# # Further reading
#
#
# 1.Steps for a Developer to Learn Apache Spark™ with Delta Lake [link](https://databricks.com/p/ebook/learn-apache-spark-with-delta-lake?utm_medium=cpc&utm_source=bing&utm_campaign=392642117&utm_offer=learn-apache-spark-with-delta-lake&utm_content=ebook&utm_term=%2Bspark&msclkid=e89c97e97c3b11a24c5dc85a6642b6bd).
#
# 2.MLlib: Machine Learning in Apache Spark [link](https://jmlr.org/papers/volume17/15-237/15-237.pdf).
#
# 3.Hands-On Deep Learning with Apache Spark [link](https://lib-ebooks.com/hands-on-deep-learning-with-apache-spark/)
#
#
#
# # Summary
# In this tutorial, you discovered how to process and prepare unstructure data(text) using NLTK re library. focused on a Python API of Jupyter notebook.
#
# Specifically, you learned:
#
# * How to import Data into jupyter notebook.
# * How to define classes to deal with different situation in the dataset.
# * How to concatenate different dataset.
# * How to save dataset.
#
#
# ## Next Step
#
# There is still much room to improve like try to do same job using spaCy.
| Smart Chatbot - Build The Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="s1i4P0Z-LFnl"
# # RANS Airfoil Flows with Bayesian Neural Nets
#
# ## Overview
#
# We are now considering the same setup as in the notebook {doc}`supervised-airfoils`: A turbulent airflow around wing profiles, for which we'd like to know the average motion
# and pressure distribution around this airfoil for different Reynolds numbers and angles of attack. In the earlier notebook, we tackled this by completely bypassing any physical solver and instead training a neural network that learns the quantities of interest. Now, we want to extend this approach to the variational Bayesian Neural Networks (BNNs) of the previous section. In contrast to traditional networks, that learn a single point estimate for each weight value, BNNs aim at learning a _distribution_ over each weight parameter (e.g. a Gaussian with mean $\mu$ and variance $\sigma^{2}$). During a forward-pass, each parameter in the network is then sampled from its corresponding approximate posterior distribution $q_{\phi}(\theta)$. In that sense, the network parameters themselves are _random variables_ and each forward pass becomes _stochastic_, because for a given input the predictions will vary with every forward-pass. This allows to assess how _uncertain_ the network is: If the predictions vary a lot, we think that the network is uncertain about its output. [[run in colab]](https://colab.research.google.com/github/tum-pbs/pbdl-book/blob/main/bayesian-code.ipynb)
# + [markdown] id="jzE1wwAjG_QQ"
# ### Read in Data
# Like in the previous notebook we'll skip the data generation process. This example is adapted from [the Deep-Flow-Prediction codebase](https://github.com/thunil/Deep-Flow-Prediction), which you can check out for details. Here, we'll simply download a small set of training data generated with a Spalart-Almaras RANS simulation in [OpenFOAM](https://openfoam.org/).
# + colab={"base_uri": "https://localhost:8080/"} id="JwZudtWauiGa" outputId="4b8b6f0c-ad97-4527-869c-622ad11c292b"
import numpy as np
import os.path, random
# get training data: as in the previous supervised example, either download or use gdrive
dir = "./"
if True:
if not os.path.isfile('data-airfoils.npz'):
import requests
print("Downloading training data (300MB), this can take a few minutes the first time...")
with open("data-airfoils.npz", 'wb') as datafile:
resp = requests.get('https://dataserv.ub.tum.de/s/m1615239/download?path=%2F&files=dfp-data-400.npz', verify=False)
datafile.write(resp.content)
else: # cf supervised airfoil code:
from google.colab import drive
drive.mount('/content/gdrive')
dir = "./gdrive/My Drive/"
npfile=np.load(dir+'data-airfoils.npz')
print("Loaded data, {} training, {} validation samples".format(len(npfile["inputs"]),len(npfile["vinputs"])))
print("Size of the inputs array: "+format(npfile["inputs"].shape))
# reshape to channels_last for convencience
X_train = np.moveaxis(npfile["inputs"],1,-1)
y_train = np.moveaxis(npfile["targets"],1,-1)
X_val = np.moveaxis(npfile["vinputs"],1,-1)
y_val = np.moveaxis(npfile["vtargets"],1,-1)
# + [markdown] id="5C_Ooq0JG_Qk"
# ### Look at Data
# Now we have some training data. We can look at it using the code we also used in the original notebook:
# + colab={"base_uri": "https://localhost:8080/", "height": 383} id="1y1zHmKlAWNs" outputId="2a3223d3-5708-48ee-8186-78d6c403145e"
import pylab
from matplotlib import cm
# helper to show three target channels: normalized, with colormap, side by side
def showSbs(a1,a2, bottom="NN Output", top="Reference", title=None):
c=[]
for i in range(3):
b = np.flipud( np.concatenate((a2[...,i],a1[...,i]),axis=1).transpose())
min, mean, max = np.min(b), np.mean(b), np.max(b);
b -= min; b /= (max-min)
c.append(b)
fig, axes = pylab.subplots(1, 1, figsize=(16, 5))
axes.set_xticks([]); axes.set_yticks([]);
im = axes.imshow(np.concatenate(c,axis=1), origin='upper', cmap='magma')
pylab.colorbar(im); pylab.xlabel('p, ux, uy'); pylab.ylabel('%s %s'%(bottom,top))
if title is not None: pylab.title(title)
NUM=40
print("\nHere are all 3 inputs are shown at the top (mask,in x, in y) \nSide by side with the 3 output channels (p,vx,vy) at the bottom:")
showSbs( X_train[NUM],y_train[NUM], bottom="Target Output", top="Inputs", title="Training data sample")
# + [markdown] id="fAd8hWG3atqg"
# Not surprisingly, the data still looks the same. For details, please check out the description in {doc}`supervised-airfoils`.
# + [markdown] id="C2gdKINAG_Qs"
# ### Neural Network Definition
# Now let's look at how we can implement BNNs. Instead of PyTorch, we will use TensorFlow, in particular the extension TensorFlow Probability, which has easy-to-implement probabilistic layers. Like in the other notebook, we use a U-Net structure consisting of Convolutional blocks with skip-layer connections. For now, we only want to set up the decoder, i.e. second part of the U-Net as bayesian. For this, we will take advantage of TensorFlows _flipout_ layers (in particular, the convolutional implementation).
#
# In a forward pass, those layers automatically sample from the current posterior distribution and store the KL-divergence between prior and posterior in _model.losses_. One can specify the desired divergence measure (typically KL-divergence) and modify the prior and approximate posterior distributions, if other than normal distributions are desired. Other than that, the flipout layers can be used just like regular layers in sequential architectures. The code below implements a single convolutional block of the U-Net:
# + id="9-8O6qJ-G_Qt"
import tensorflow as tf
import tensorflow_probability.python.distributions as tfd
from tensorflow.keras import Sequential
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.layers import Input, Conv2D, Conv2DTranspose,UpSampling2D, BatchNormalization, ReLU, LeakyReLU, SpatialDropout2D, MaxPooling2D
from tensorflow_probability.python.layers import Convolution2DFlipout
from tensorflow.keras.models import Model
def tfBlockUnet(filters=3, transposed=False, kernel_size=4, bn=True, relu=True, pad="same", dropout=0., flipout=False,
kdf=None, name=''):
block = Sequential(name=name)
if relu:
block.add(ReLU())
else:
block.add(LeakyReLU(0.2))
if not transposed:
block.add(Conv2D(filters=filters, kernel_size=kernel_size, padding=pad,
kernel_initializer=RandomNormal(0.0, 0.02), activation=None,strides=(2,2)))
else:
block.add(UpSampling2D(interpolation = 'bilinear'))
if flipout:
block.add(Convolution2DFlipout(filters=filters, kernel_size=(kernel_size-1), strides=(1, 1), padding=pad,
data_format="channels_last", kernel_divergence_fn=kdf,
activation=None))
else:
block.add(Conv2D(filters=filters, kernel_size=(kernel_size-1), padding=pad,
kernel_initializer=RandomNormal(0.0, 0.02), strides=(1,1), activation=None))
block.add(SpatialDropout2D(rate=dropout))
if bn:
block.add(BatchNormalization(axis=-1, epsilon=1e-05,momentum=0.9))
return block
# + [markdown] id="57AMSMp5sPFZ"
# Next we define the full network with these blocks - the structure is almost identical to the previous notebook. We manually define the kernel-divergence function as `kdf` and rescale it with a factor called `kl_scaling`. There are two reasons for this:
#
# First, we should only apply the kl-divergence once per epoch if we want to use the correct loss (like introduced in {doc}`bayesian-intro`). Since we will use batch-wise training, we need to rescale the Kl-divergence by the number of batches, such that in every parameter update only _kdf / num_batches_ is added to the loss. During one epoch, _num_batches_ parameter updates are performed and the 'full' KL-divergence is used. This batch scaling is computed and passed to the network initialization via `kl_scaling` when instantiating the `Bayes_DfpNet` NN later on.
#
# Second, by scaling the KL-divergence part of the loss up or down, we have a way of tuning how much randomness we want to allow in the network: If we neglect the KL-divergence completely, we would just minimize the regular loss (e.g. MSE or MAE), like in a conventional neural network. If we instead neglect the negative-log-likelihood, we would optimize the network such that we obtain random draws from the prior distribution. Balancing those extremes can be done by fine-tuning the scaling of the KL-divergence and is hard in practice.
# + id="pGoGJwqdG_Qv"
def Bayes_DfpNet(input_shape=(128,128,3),expo=5,dropout=0.,flipout=False,kl_scaling=10000):
channels = int(2 ** expo + 0.5)
kdf = (lambda q, p, _: tfd.kl_divergence(q, p) / tf.cast(kl_scaling, dtype=tf.float32))
layer1=Sequential(name='layer1')
layer1.add(Conv2D(filters=channels,kernel_size=4,strides=(2,2),padding='same',activation=None,data_format='channels_last'))
layer2=tfBlockUnet(filters=channels*2,transposed=False,bn=True, relu=False,dropout=dropout,name='layer2')
layer3=tfBlockUnet(filters=channels*2,transposed=False,bn=True, relu=False,dropout=dropout,name='layer3')
layer4=tfBlockUnet(filters=channels*4,transposed=False,bn=True, relu=False,dropout=dropout,name='layer4')
layer5=tfBlockUnet(filters=channels*8,transposed=False,bn=True, relu=False,dropout=dropout,name='layer5')
layer6=tfBlockUnet(filters=channels*8,transposed=False,bn=True, relu=False,dropout=dropout,kernel_size=2,pad='valid',name='layer6')
layer7=tfBlockUnet(filters=channels*8,transposed=False,bn=True, relu=False,dropout=dropout,kernel_size=2,pad='valid',name='layer7')
# note, kernel size is internally reduced by one for the decoder part
dlayer7=tfBlockUnet(filters=channels*8,transposed=True,bn=True, relu=True,dropout=dropout, flipout=flipout,kdf=kdf, kernel_size=2,pad='valid',name='dlayer7')
dlayer6=tfBlockUnet(filters=channels*8,transposed=True,bn=True, relu=True,dropout=dropout, flipout=flipout,kdf=kdf, kernel_size=2,pad='valid',name='dlayer6')
dlayer5=tfBlockUnet(filters=channels*4,transposed=True,bn=True, relu=True,dropout=dropout, flipout=flipout,kdf=kdf,name='dlayer5')
dlayer4=tfBlockUnet(filters=channels*2,transposed=True,bn=True, relu=True,dropout=dropout, flipout=flipout,kdf=kdf,name='dlayer4')
dlayer3=tfBlockUnet(filters=channels*2,transposed=True,bn=True, relu=True,dropout=dropout, flipout=flipout,kdf=kdf,name='dlayer3')
dlayer2=tfBlockUnet(filters=channels ,transposed=True,bn=True, relu=True,dropout=dropout, flipout=flipout,kdf=kdf,name='dlayer2')
dlayer1=Sequential(name='outlayer')
dlayer1.add(ReLU())
dlayer1.add(Conv2DTranspose(3,kernel_size=4,strides=(2,2),padding='same'))
# forward pass
inputs=Input(input_shape)
out1 = layer1(inputs)
out2 = layer2(out1)
out3 = layer3(out2)
out4 = layer4(out3)
out5 = layer5(out4)
out6 = layer6(out5)
out7 = layer7(out6)
# ... bottleneck ...
dout6 = dlayer7(out7)
dout6_out6 = tf.concat([dout6,out6],axis=3)
dout6 = dlayer6(dout6_out6)
dout6_out5 = tf.concat([dout6, out5], axis=3)
dout5 = dlayer5(dout6_out5)
dout5_out4 = tf.concat([dout5, out4], axis=3)
dout4 = dlayer4(dout5_out4)
dout4_out3 = tf.concat([dout4, out3], axis=3)
dout3 = dlayer3(dout4_out3)
dout3_out2 = tf.concat([dout3, out2], axis=3)
dout2 = dlayer2(dout3_out2)
dout2_out1 = tf.concat([dout2, out1], axis=3)
dout1 = dlayer1(dout2_out1)
return Model(inputs=inputs,outputs=dout1)
# + [markdown] id="ETzjWZJzyMEu"
# Let's define the hyperparameters and create a tensorflow dataset to organize inputs and targets. Since we have 320 observations in the training set, for a batch-size of 10 we should rescale the KL-divergence with a factor of 320/10=32 in order apply the full KL-divergence just once per epoch. We will further scale the KL-divergence down by another factor of `KL_PREF=5000`, which has shown to work well in practice.
#
# Furthermore, we will define a function that implements learning rate decay. Intuitively, this allows the optimization to be more precise (by making smaller steps) in later epochs, while still making fast progress (by making bigger steps) in the first epochs.
# + id="H23a0JAJclEN"
import math
import matplotlib.pyplot as plt
BATCH_SIZE=10
LR=0.001
EPOCHS = 120
KL_PREF = 5000
dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train)).shuffle(len(X_train),
seed=46168531, reshuffle_each_iteration=False).batch(BATCH_SIZE, drop_remainder=False)
def compute_lr(i, epochs, minLR, maxLR):
if i < epochs * 0.5:
return maxLR
e = (i / float(epochs) - 0.5) * 2.
# rescale second half to min/max range
fmin = 0.
fmax = 6.
e = fmin + e * (fmax - fmin)
f = math.pow(0.5, e)
return minLR + (maxLR - minLR) * f
# + [markdown] id="3lUU7A0o1PzV"
# We can visualize the learning rate decay: We start off with a constant rate and after half of the `EPOCHS` we start to decay it exponentially, until arriving at half of the original learning rate.
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="tDXZO0FN02do" outputId="f19c0484-180c-4780-b7a0-cbc95d2c0fa9"
lrs=[compute_lr(i, EPOCHS, 0.5*LR,LR) for i in range(EPOCHS)]
plt.plot(lrs)
plt.xlabel('Iteration')
plt.ylabel('Learning Rate')
# + [markdown] id="lcwUJ6iH1uTr"
# Let's initialize the network. Here we're finally computing the `kl_scaling` factor via `KL_PREF` and the batch size.
# + colab={"base_uri": "https://localhost:8080/"} id="LGaH2ZW73M1b" outputId="13c2ada4-79bf-42f7-f2f9-4355c0173fb0"
from tensorflow.keras.optimizers import RMSprop, Adam
model=Bayes_DfpNet(expo=4,flipout=True,kl_scaling=KL_PREF*len(X_train)/BATCH_SIZE)
optimizer = Adam(learning_rate=LR, beta_1=0.5,beta_2=0.9999)
num_params = np.sum([np.prod(v.get_shape().as_list()) for v in model.trainable_variables])
print('The Bayesian U-Net has {} parameters.'.format(num_params))
# + [markdown] id="eF4UTqeB6Y4u"
# In general, flipout layers come with twice as many parameters as their conventional counterparts, since instead of a single point estimate one has to learn both mean and variance parameters for the Gaussian posterior of the weights. As we only have flipout layers for the decoder part here, the resulting model has 846787 parameters, compared to the 585667 of the conventional NN.
#
# ## Training
#
# Now we are ready to run the training! Note that this might take a while (typically around 4 hours), as the flipout layers are significantly slower to train compared to regular layers.
#
# + colab={"base_uri": "https://localhost:8080/"} id="VEQuKBegcd1K" outputId="52d03473-3c95-4df6-9657-9d64c41348c8"
from tensorflow.keras.losses import mae
import math
kl_losses=[]
mae_losses=[]
total_losses=[]
mae_losses_vali=[]
for epoch in range(EPOCHS):
# compute learning rate - decay is implemented
currLr = compute_lr(epoch,EPOCHS,0.5*LR,LR)
if currLr < LR:
tf.keras.backend.set_value(optimizer.lr, currLr)
# iterate through training data
kl_sum = 0
mae_sum = 0
total_sum=0
for i, traindata in enumerate(dataset, 0):
# forward pass and loss computation
with tf.GradientTape() as tape:
inputs, targets = traindata
prediction = model(inputs, training=True)
loss_mae = tf.reduce_mean(mae(prediction, targets))
kl=sum(model.losses)
loss_value=kl+tf.cast(loss_mae, dtype='float32')
# backpropagate gradients and update parameters
gradients = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
# store losses per batch
kl_sum += kl
mae_sum += tf.reduce_mean(loss_mae)
total_sum+=tf.reduce_mean(loss_value)
# store losses per epoch
kl_losses+=[kl_sum/len(dataset)]
mae_losses+=[mae_sum/len(dataset)]
total_losses+=[total_sum/len(dataset)]
# validation
outputs = model.predict(X_val)
mae_losses_vali += [tf.reduce_mean(mae(y_val, outputs))]
if epoch<3 or epoch%20==0:
print('Epoch {}/{}, total loss: {:.3f}, KL loss: {:.3f}, MAE loss: {:.4f}, MAE loss vali: {:.4f}'.format(epoch, EPOCHS, total_losses[-1], kl_losses[-1], mae_losses[-1], mae_losses_vali[-1]))
# + [markdown] id="7aM5Ra2C7k1v"
# The BNN is trained! Let's look at the loss. Since the loss consists of two separate parts, it is helpful to monitor both parts (MAE and KL).
# + colab={"base_uri": "https://localhost:8080/", "height": 233} id="3niTOL6CcrHo" outputId="24acd9cc-c0aa-4848-bd19-e0721eb4b998"
fig,axs=plt.subplots(ncols=3,nrows=1,figsize=(20,4))
axs[0].plot(kl_losses,color='red')
axs[0].set_title('KL Loss (Train)')
axs[1].plot(mae_losses,color='blue',label='train')
axs[1].plot(mae_losses_vali,color='green',label='val')
axs[1].set_title('MAE Loss'); axs[1].legend()
axs[2].plot(total_losses,label='Total',color='black')
axs[2].plot(kl_losses,label='KL',color='red')
axs[2].plot(mae_losses,label='MAE',color='blue')
axs[2].set_title('Total Train Loss'); axs[2].legend()
# + [markdown] id="WBGaJng6psg-"
# This way, we can double-check if minimizing one part of the loss comes at the cost of increasing the other. For our case, we observe that both parts decrease smoothly. In particular, the MAE loss is not increasing for the validation set, indicating that we are not overfitting.
#
# It is good practice to double-check how many layers added KL-losses. We can inspect _model.losses_ for that. Since the decoder consists of 6 sequential blocks with flipout layers, we expect 6 entries in _model.losses_.
# + colab={"base_uri": "https://localhost:8080/"} id="LusTjxWRdRjn" outputId="e4c401d3-c55f-49d2-b780-460bbdf47d0a"
# there should be 6 entries in model.losses since we have 6 blocks with flipout layers in our model
print('There are {} entries in model.losses'.format(len(model.losses)))
print(model.losses)
# + [markdown] id="auFzYkx0G_Q6"
# Now let's visualize how the BNN performs for unseen data from the validation set. Ideally, we would like to integrate out the parameters $\theta$, i.e. marginalize in order to obtain a prediction. Since this is again hard to realize analytically, one usually approximates the integral via sampling from the posterior:
#
# $$
# \hat{y_{i}}=\int f(x_{i};\theta)q_{\phi}(\theta)d\theta\approx\frac{1}{R}\sum_{r=1}^{R}f(x_{i};\theta_{r})$$
#
# where each $\theta_{r}$ is drawn from $q_{\phi}(\theta)$. In practice, this just means performing $R$ forward passes for each input $x_{i}$ and computing the average. In the same spirit, one can obtain the standard deviation as a measure of uncertainty:
# $\sigma_{i}^{2} = \frac{1}{R-1}\sum_{r=1}^{R}(f(x_{i};\theta)-\hat{y_{i}})^{2}$.
#
# Please note that both $\hat{y_{i}}$ and $\sigma_{i}^{2}$ still have shape $128\times128\times3$, i.e. the mean and variance computations are performed _per-pixel_ (but might be aggregated to a global measure afterwards).
# + id="Z-gyY4LKG_Q7"
REPS=20
preds=np.zeros(shape=(REPS,)+X_val.shape)
for rep in range(REPS):
preds[rep,:,:,:,:]=model.predict(X_val)
preds_mean=np.mean(preds,axis=0)
preds_std=np.std(preds,axis=0)
# + [markdown] id="mcSUWaiHcVkb"
# Before inspecting the mean and standard deviation computed in the previous cell, let's visualize one of the outputs of the BNN. In the following plot, the input is shown in the first row, while the second row illustrates the result of a single forward pass.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 321} id="WSEXdGc7G_Q8" outputId="f0f35cfa-c9d9-408c-8a12-8b2540fe8df0"
NUM=16
# show a single prediction
showSbs(y_val[NUM],preds[0][NUM], top="Inputs", bottom="Single forward pass")
# + [markdown] id="_b9HoUskeU1B"
# If you compare this image to one of the outputs from {doc}`supervised-airfoils`, you'll see that it doesn't look to different on first sight. This is a good sign, it seems the network learned to produce the content of the pressure and velocity fields.
#
# More importantly, though, we can now visualize the uncertainty over predictions more clearly by inspecting several samples from the posterior distribution as well as the standard deviation for a given input. Below is code for a function that visualizes precisely that (uncertainty is shown with a different colormap in order to illustrate the differences to previous non-bayesian notebook).
# + colab={"base_uri": "https://localhost:8080/", "height": 132} id="gAcKTBCGmohC" outputId="9fe549dd-c74e-49fa-9c3f-3c1a260bdb00"
# plot repeated samples from posterior for some observations
def plot_BNN_predictions(target, preds, pred_mean, pred_std, num_preds=5,channel=0):
if num_preds>len(preds):
print('num_preds was set to {}, but has to be smaller than the length of preds. Setting it to {}'.format(num_preds,len(preds)))
num_preds = len(preds)
# transpose and concatenate the frames that are to plot
to_plot=np.concatenate((target[:,:,channel].transpose().reshape(128,128,1),preds[0:num_preds,:,:,channel].transpose(),
pred_mean[:,:,channel].transpose().reshape(128,128,1),pred_std[:,:,channel].transpose().reshape(128,128,1)),axis=-1)
fig, axs = plt.subplots(nrows=1,ncols=to_plot.shape[-1],figsize=(20,4))
for i in range(to_plot.shape[-1]):
label='Target' if i==0 else ('Avg Pred' if i == (num_preds+1) else ('Std Dev (normalized)' if i == (num_preds+2) else 'Pred {}'.format(i)))
colmap = cm.viridis if i==to_plot.shape[-1]-1 else cm.magma
frame = np.flipud(to_plot[:,:,i])
min=np.min(frame); max = np.max(frame)
frame -= min; frame /=(max-min)
axs[i].imshow(frame,cmap=colmap)
axs[i].axis('off')
axs[i].set_title(label)
OBS_IDX=5
plot_BNN_predictions(y_val[OBS_IDX,...],preds[:,OBS_IDX,:,:,:],preds_mean[OBS_IDX,...],preds_std[OBS_IDX,...])
# + [markdown] id="ln7sn-UJpHa7"
# We are looking at channel 0, i.e. the pressure here. One can observe that the dark and bright regions vary quite a bit across predictions. It is reassuring to note that - at least from visual inspection - the average (i.e. marginal) prediction is closer to the target than most of the single forward passes.
#
# It should also be noted that each frame was normalized for the visualization. Therefore, when looking at the uncertainty frame, we can infer where the network is uncertain, but now how uncertain it is in absolute values.
#
# In order to assess a global measure of uncertainty we can however compute an average standard deviation over all samples in the validation set.
# + colab={"base_uri": "https://localhost:8080/"} id="vQRbK594S2Ak" outputId="7e3ea005-6d48-4308-8e53-002173f4ebc2"
# Average Prediction with total uncertainty
uncertainty_total = np.mean(np.abs(preds_std),axis=(0,1,2))
preds_mean_global = np.mean(np.abs(preds),axis=(0,1,2,3))
print("\nAverage pixel prediction on validation set: \n pressure: {} +- {}, \n ux: {} +- {},\n uy: {} +- {}".format(np.round(preds_mean_global[0],3),np.round(uncertainty_total[0],3),np.round(preds_mean_global[1],3),np.round(uncertainty_total[1],3),np.round(preds_mean_global[2],3),np.round(uncertainty_total[2],3)))
# + [markdown] id="-NtAI8ZbMS8a"
# For a run with standard settings, the uncertainties are on the order of 0.01 for all three fields. As the pressure field has a smaller mean, it's uncertainty is larger in relative terms. This makes sense, as the pressure field is known to be more difficult to predict than the two velocity components.
# + [markdown] id="hcVF9S-JoprL"
# ## Test evaluation
#
# Like in the case for a conventional neural network, let's now look at **proper** test samples, i.e. OOD samples, for which in this case we'll use new airfoil shapes. These are shapes that the network never saw in any training samples, and hence it tells us a bit about how well the network generalizes to new shapes.
#
# As these samples are at least slightly OOD, we can draw conclusions about how well the network generalizes, which the validation data would not really tell us. In particular, we would like to investigate if the NN is more uncertain when handling OOD data. Like before, we first download the test samples ...
# + colab={"base_uri": "https://localhost:8080/"} id="ClH53HqtOw3L" outputId="10ad4fab-48ed-405b-9148-304e4624d29f"
if not os.path.isfile('data-airfoils-test.npz'):
import urllib.request
url="https://physicsbaseddeeplearning.org/data/data_test.npz"
print("Downloading test data, this should be fast...")
urllib.request.urlretrieve(url, 'data-airfoils-test.npz')
nptfile=np.load('data-airfoils-test.npz')
print("Loaded {}/{} test samples".format(len(nptfile["test_inputs"]),len(nptfile["test_targets"])))
# + [markdown] id="lBRnudybM3Ri"
# ... and then repeat the procedure from above to evaluate the BNN on the test samples, and compute the marginalized prediction and uncertainty.
# + colab={"base_uri": "https://localhost:8080/"} id="TP5D9rhkNlsf" outputId="5a316880-f5a9-4c4b-8962-59bc6350c788"
X_test = np.moveaxis(nptfile["test_inputs"],1,-1)
y_test = np.moveaxis(nptfile["test_targets"],1,-1)
REPS=10
preds_test=np.zeros(shape=(REPS,)+X_test.shape)
for rep in range(REPS):
preds_test[rep,:,:,:,:]=model.predict(X_test)
preds_test_mean=np.mean(preds_test,axis=0)
preds_test_std=np.std(preds_test,axis=0)
test_loss = tf.reduce_mean(mae(preds_test_mean, y_test))
print("\nAverage test error: {}".format(test_loss))
# + colab={"base_uri": "https://localhost:8080/"} id="kLmWz_kPQ_RE" outputId="6656bda9-532c-4dce-a4b9-1c4b4c70ecbd"
# Average Prediction with total uncertainty
uncertainty_test_total = np.mean(np.abs(preds_test_std),axis=(0,1,2))
preds_test_mean_global = np.mean(np.abs(preds_test),axis=(0,1,2,3))
print("\nAverage pixel prediction on test set: \n pressure: {} +- {}, \n ux: {} +- {},\n uy: {} +- {}".format(np.round(preds_test_mean_global[0],3),np.round(uncertainty_test_total[0],3),np.round(preds_test_mean_global[1],3),np.round(uncertainty_test_total[1],3),np.round(preds_test_mean_global[2],3),np.round(uncertainty_test_total[2],3)))
# -
# This is reassuring: The uncertainties on the OOD test set with new shapes are at least slightly higher than on the validation set.
#
# ### Visualizations
#
# The following graph visualizes these measurements: it shows the mean absolute errors for validation and test sets side by side, together with the uncertainties of the predictions as error bars:
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="Sn8g_ACGsVgo" outputId="26c88b29-1eb9-4316-d5d5-1ab08b3f08f1"
# plot per channel MAE with uncertainty
val_loss_c, test_loss_c = [], []
for channel in range(3):
val_loss_c.append( tf.reduce_mean(mae(preds_mean[...,channel], y_val[...,channel])) )
test_loss_c.append( tf.reduce_mean(mae(preds_test_mean[...,channel], y_test[...,channel])) )
fig, ax = plt.subplots()
ind = np.arange(len(val_loss_c)); width=0.3
bars1 = ax.bar(ind - width/2, val_loss_c, width, yerr=uncertainty_total, capsize=4, label="validation")
bars2 = ax.bar(ind + width/2, test_loss_c, width, yerr=uncertainty_test_total, capsize=4, label="test")
ax.set_ylabel("MAE & Uncertainty")
ax.set_xticks(ind); ax.set_xticklabels(('P', 'u_x', 'u_y'))
ax.legend(); plt.tight_layout()
# + [markdown] id="icgfvAIqoMpE"
# The mean error is clearly larger, and the slightly larger uncertainties of the predictions are likewise visible via the error bars.
#
# In general it is hard to obtain a calibrated uncertainty estimate, but since we are dealing with a fairly simple problem here, the BNN is able to estimate the uncertainty reasonably well.
#
# The next graph shows the differences of the BNN predictions for a single case of the test set (using the same style as for the validation sample above):
# + colab={"base_uri": "https://localhost:8080/", "height": 132} id="oP1HOciVjh-o" outputId="932ab62b-f7cc-4626-8848-2d8f4328e532"
OBS_IDX=5
plot_BNN_predictions(y_test[OBS_IDX,...],preds_test[:,OBS_IDX,:,:,:],preds_test_mean[OBS_IDX,...],preds_test_std[OBS_IDX,...])
# + [markdown] id="6gCw2W4z6bYS"
# We can also visualize several shapes from the test set together with the corresponding marginalized prediction and uncertainty.
# + colab={"base_uri": "https://localhost:8080/", "height": 570} id="iJ2y_HmDzkp6" outputId="6b22347d-9d76-4f6c-e06d-04703cbe1f8b"
IDXS = [1,3,8]
CHANNEL = 0
fig, axs = plt.subplots(nrows=len(IDXS),ncols=3,sharex=True, sharey = True, figsize = (9,len(IDXS)*3))
for i, idx in enumerate(IDXS):
axs[i][0].imshow(np.flipud(X_test[idx,:,:,CHANNEL].transpose()), cmap=cm.magma)
axs[i][1].imshow(np.flipud(preds_test_mean[idx,:,:,CHANNEL].transpose()), cmap=cm.magma)
axs[i][2].imshow(np.flipud(preds_test_std[idx,:,:,CHANNEL].transpose()), cmap=cm.viridis)
axs[0][0].set_title('Shape')
axs[0][1].set_title('Avg Pred')
axs[0][2].set_title('Std. Dev')
# + [markdown] id="3to3b5OS6p_9"
# As we can see, the shapes from the test set differ quite a bit from another. Nevertheless, the uncertainty estimate is reasonably distributed. It is especially high in the boundary layer around the airfoil, and in the regions of the low pressure pocket.
# + [markdown] id="trAG3xoQG_Q-"
# ### Discussion
#
# Despite these promising results, there are still several issues with Bayesian Neural Nets, limiting their use in many practical applications. One serious drawback is the need for additional scaling of the KL-loss and the fact that there is no convincing argument on why it is necessary yet (read e.g. [here](http://proceedings.mlr.press/v119/wenzel20a/wenzel20a.pdf) or [here](https://arxiv.org/abs/2008.05912)).
# Furthermore, some people think that assuming independent normal distributions as variational approximations to the posterior is an oversimplification since in practice the weights are actually highly correlated ([paper](https://arxiv.org/abs/1909.00719)). Other people instead argue that this might not be an issue, as long as the networks in use are deep enough ([paper](https://arxiv.org/abs/2002.03704)). On top of that, there is research on different (e.g. heavy-tailed) priors other than normals and many other aspects of BNNs.
# + [markdown] id="h_z2_i_VA1HP"
# ## Next steps
#
# But now it's time to experiment with BNNs yourself.
#
# * One interesting thing to look at is how the behavior of our BNN changes, if we adjust the KL-prefactor. In the training loop above we set it to 5000 without further justification. You can check out what happens, if you use a value of 1, as it is suggested by the theory, instead of 5000. According to our implementation, this should make the network 'more bayesian', since we assign larger importance to the KL-divergence than before.
#
# * So far, we have only worked with variational BNNs, implemented via TensorFlows probabilistic layers. Recall that there is a simpler way of getting uncertainty estimates: Using dropout not only at training, but also at inference time. You can check out how the outputs change for that case. In order to do so, you can, for instance, just pass a non-zero dropout rate to the network specification and change the prediction phase in the above implementation from _model.predict(...)_ to _model(..., training=True)_. Setting the _training=True_ flag will tell TensorFlow to forward the input as if it were training data and hence, it will apply dropout. Please note that the _training=True_ flag can also affect other features of the network. Batch normalization, for instance, works differently in training and prediction mode. As long as we don't deal with overly different data and use sufficiently large batch-sizes, this should not introduce large errors, though. Sensible dropout rates to start experimenting with are e.g. around 0.1.
| bayesian-code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1.Given the following amino acid sequence (MNKMDLVADVAEKTDLSKAKATEVIDAVFA), find the first, last and the 5th amino acids in the sequence.
Amino_acid ="MNKMDLVADVAEKTDLSKAKATEVIDAVFA"
def positions(seq):
firstamino=Amino_acid[0]
lastamino=Amino_acid[-1]
fifthamino=Amino_acid[4]
return firstamino, lastamino, fifthamino
positions(Amino_acid)
# # 2.The above amino acid is a bacterial restriction enzyme that recognizes "TCCGGA". Find the first restriction site in the following sequence: AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA
def find_pos(Restriction_site):
Amino_seq="AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA"
First_Restriction_site=Amino_seq.find(Restriction_site)
return First_Restriction_site
find_pos(Restriction_site)
find_pos("TCCGGA")
# # 3.Using strings, lists, tuples and dictionaries concepts, find the reverse complement of AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA
def Reverse_complement(DNA_seq):
DNA_seq="AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA"
Complement=DNA_seq.replace("A","t").replace("G","c").replace("T","a").replace("C","g")
Reverse_complement=Complement.upper()[::-1]
print (Reverse_complement)
Reverse_complement(DNA_seq)
# # 4.Create a while loop that starts with x = 0 and increments x until x is equal to 5. Each iteration should print to the console.
def whilenum(num):
x=0
while x<=num:
print (x)
x+=1
whilenum(5)
# # 5.Repeat the previous problem, but the loop will skip printing x = 5 to the console but will print values of x from 6 to 10
def number(numb):
x=0
while x<numb:
x+=1
if x==5:
continue
else:
print(x)
number(10)
# # 6.Repeat the previous problem, but the loop will skip printing x = 5 to the console but will print values of x from 6 to 10
# +
def number(numb1):
x=0
while x<numb1:
x+=1
if x in range(0,4):
continue
else:
print(x)
number(10)
# -
# # 7.Write a program to manage bank withdrawals at the ATM,expand the script in the previous cell to also manage ATM deposits
# +
def Atm_transctions(number):
accountbal=100000
output=input("Please enter 'b' to check balance or 'w' to withdraw or 'd' to deposite or'q' to quit")
while output != 'q':
if output.lower() in ('w','b','d'):
if output.lower()== 'b' :
print("My balance is : ", accountbal)
print("anything else ?")
output=input("Please enter 'b' to check balance or 'w' to withdraw or 'd' to deposite or'q' to quit")
elif output.lower()== 'w':
withdraw=float(input("Enter amount to withdraw: "))
if withdraw <= accountbal:
print('here is the amount:', withdraw)
accountbal = accountbal - withdraw
print('anything else?')
output=input("Please enter 'b' to check balance or 'w' to withdraw or 'd' to deposite or'q' to quit")
else:
output.lower()== 'd'
deposite=float(input('enter your deposite amount: '))
print('you deposited',deposite)
accountbal=accountbal + deposite
print('anything else?')
output=input("Please enter 'b' to check balance or 'w' to withdraw or 'd' to deposite or'q' to quit")
else:
print('wrong choice')
output=input("Please enter 'b' to check balance or 'w' to withdraw or 'd' to deposite or'q' to quit")
Atm_transctions(1234)
# -
# # 8.Write a function percentageGC that calculates the GC content of a DNA sequence
def AT_GC(tRNA):
GC=((tRNA.count("C") +tRNA.count("G"))/len(tRNA))*100
AT=100-GC
return print('GC content =' + str(GC) + ' and the AT content = ' + str(AT))
AT_GC('AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA')
# # 9.Write a function the reads the file (humchr.txt) and writes to another file (gene_names.txt) a clean list of gene names
# +
def write2file(gene_list, out_file):
"""
Takes a gene list and writes the output to file
"""
with open(out_file, 'w') as outfile:
outfile.write('\n'.join(gene_list))
def remove_empty(gene_list):
"""
Given a gene list, removes items
that start with dash (empty)
"""
tag = True
while tag:
try:
gene_list.remove('-')
except ValueError:
tag = False
return gene_list
def clean_genes(input_file, out_file):
"""
Given a chromosome annotation file, extract the
genes and write them to another file
"""
gene_list = []
tag = False
with open(input_file, 'r') as humchrx:
for line in humchrx:
if line.startswith('Gene'):
tag=True
if line == '\n':
tag = False
if tag:
gene_list.append(line.split()[0])
#clean the gene list
gene_list.pop(2)
gene_list[0] = gene_list[0]+"_"+gene_list[1]
gene_list.pop(1)
gene_list = remove_empty(gene_list)
## Writing to file
write2file(gene_list, out_file)
clean_genes('../Data/humchrx.txt', 'testing.txt')
# -
# # .10 Convert the function you wrote in exercise 1 into a python module. Then, import the module and use the function to read humchrx.txt file and create a gene list file.
#
import hesborn
# # 11.Create a stand-alone script that does all the above
# # 11 .Show that the DNA string contains only four letters.
# # How many ’ATG’s are in the DNA string?
# +
def four_letter(base):
seq="AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA"
#bases= set{"A","T","G","C"}
for base in set(seq):
print (base)
four_letter(base)
# -
def count_ATG(seq):
seq="AAAAATCCCGAGGCGGCTATATAGGGCTCCGGAGGCGTAATATAAAA"
if seq.find('ATG') == -1:
print('not found')
else:
print(seq.count('ATG'))
count_ATG(seq)
| Notebooks/Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Summary
#
# Generate training and validation datasets.
#
# ----
# # Imports
# +
import os
import random
import yaml
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
# -
pd.set_option("max_columns", 100)
# # Parameters
NOTEBOOK_PATH = Path('training_validation_test_split')
NOTEBOOK_PATH
OUTPUT_PATH = Path(os.getenv('OUTPUT_DIR', NOTEBOOK_PATH.name)).resolve()
OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
OUTPUT_PATH
# # `DATAPKG`
DATAPKG = {}
DATAPKG['uniparc-domain-wstructure'] = (
Path(os.environ['DATAPKG_OUTPUT_DIR'])
.joinpath("uniparc-domain-wstructure", "master")
)
# # Load data
adjacency_matrix_path = (
DATAPKG['uniparc-domain-wstructure']
.joinpath("remove_duplicate_matches", "adjacency_matrix.parquet")
)
adjacency_matrix_path
# # Construct training / validation / test datasets
# ## All Gene3D domains
# +
GENE3D_DOMAINS = sorted(p.name for p in adjacency_matrix_path.glob("database_id=*"))
random.seed(42)
random.shuffle(GENE3D_DOMAINS)
GENE3D_DOMAINS[:3]
# -
with NOTEBOOK_PATH.joinpath("all_gene3d_domains.yaml").open("wt") as fout:
yaml.dump(GENE3D_DOMAINS, fout, default_flow_style=False)
# ## Split into training / validation / test
# +
breakpoint1 = len(GENE3D_DOMAINS) * 3 // 4
print(breakpoint1)
breakpoint2 = len(GENE3D_DOMAINS) * 7 // 8
print(breakpoint2)
TRAINING_DOMAINS = GENE3D_DOMAINS[:breakpoint1]
VALIDATION_DOMAINS = GENE3D_DOMAINS[breakpoint1:breakpoint2]
TEST_DOMAINS = GENE3D_DOMAINS[breakpoint2:]
print('----')
print(len(GENE3D_DOMAINS))
print(len(TRAINING_DOMAINS))
print(len(VALIDATION_DOMAINS))
print(len(TEST_DOMAINS))
# -
assert (len(TRAINING_DOMAINS) + len(VALIDATION_DOMAINS) + len(TEST_DOMAINS)) == len(GENE3D_DOMAINS)
# +
with NOTEBOOK_PATH.joinpath("training_domains.yaml").open("wt") as fout:
yaml.dump(TRAINING_DOMAINS, fout, default_flow_style=False)
with NOTEBOOK_PATH.joinpath("validation_domains.yaml").open("wt") as fout:
yaml.dump(VALIDATION_DOMAINS, fout, default_flow_style=False)
with NOTEBOOK_PATH.joinpath("test_domains.yaml").open("wt") as fout:
yaml.dump(TEST_DOMAINS, fout, default_flow_style=False)
| notebooks2/01-training_validation_test_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Анализ графов в Python
# Тетрадка посвящена работе с графами в питоне на примере библиотеки NetworkX.
#
# ## Обзор NetworkX
#
# NetworkX --- это питонячая библиотека, предназначенная для создания, обработки и изучения сложных сетей (aka графов). В отличие от других библиотек, которые вы можете встретить в сети (igraph, graphviz etc), она полностью написана на Python, благодаря чему ставится через Anaconda и не требует предустановки других пакетов.
#
# Материал сильно опирается на лекцию по [введению в NetworkX](https://www.cl.cam.ac.uk/~cm542/teaching/2010/stna-pdfs/stna-lecture8.pdf).
# +
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# Пример создания простого графа:
# Создаём заготовку графа, в которую будут добавляться вершины и рёбра.
g = nx.Graph()
# Добавление вершин:
# +
# Вершину графа можно называть как строкой, так и числом.
g.add_node(1)
g.add_node('n')
# Добавление нескольких вершин сразу.
g.add_nodes_from([2, 3])
# -
# Добавление рёбер:
# +
# Указываем две вершины, которые надо соединить.
g.add_edge(1, 'n')
# Добавление нескольких рёбер между указанными вершинами.
g.add_edges_from([(1, 2), (1, 3)])
# Добавление сразу двух вершин.
# Создаются и новые вершины, и ребро между ними!
g.add_edge('a', 'b', weight=0.1)
g.add_edge('b', 'c', weight=1.5)
g.add_edge('a', 'c', weight=1)
g.add_edge('c', 'd', weight=0.2)
g.add_edge('c', 153, weight=1.2)
# -
# Визуализация (при повторном запуске ячейки картинка может меняться):
# +
nx.draw_networkx(g, node_color='lightblue')
plt.axis('off')
print('Graph nodes:', g.nodes())
# -
# Вывод кратчайшего (или минимизирующего затраты) пути от одной вершины до другой:
# +
# Без учёта весов.
print(nx.shortest_path(g, 'b', 'd'))
# С учётом весов: теперь за переход по каждому ребру вносится плата.
print(nx.shortest_path(g, 'b', 'd', weight='weight'))
# -
# Точно так же, как ребру приписывался численный параметр `weight`, вершине можно приписывать любые характеристики:
g.add_node(
'node',
date='06.05.2018',
any_name='some information'
)
g.nodes['node']
# Т.е. каждый узел графа можно воспринимать как питонячий словарь `dict` с произвольными ключами и значениями.
#
# Фактически весь граф это просто словарь, где ключ -- номер вершины, значение -- набор вершин, соседних к ней:
g.adj
g['c']
# Можно вывести и другое традиционное представление графа в виде матрицы смежности:
nx.adjacency_matrix(g).todense()
# Получение числа вершин графа:
g.number_of_nodes()
len(g)
# Числа рёбер:
g.number_of_edges()
# Совокупная информация:
print(nx.info(g))
# При работе с графами часто бывает полезным получить доступ ко всем вершинам / рёбрам, что позволяет сделать метод `.nodes()` / `.edges()`:
g.nodes(data=True)
g.edges(data=True)
for node in g.nodes():
print(node, g.degree(node))
# ## Реальные данные
#
# Считаем данные о станциях московского метрополитена в 2014 году:
# +
metro_data = pd.read_csv('metro_2014_pairwise.csv')
metro_data.head()
# -
# Формат такой: две станции записаны в одной строке, если между ними есть перегон.
# Общее число станций:
len(metro_data['Start station'].unique())
# Загрузим данные в граф из подготовленной таблицы:
#
# - вершинами будут названия станций
# - ребро между двумя станциями проводится, если между станциями есть перегон
# +
metro_graph = nx.from_pandas_edgelist(metro_data, source='Start station', target='End station')
# Указываем, что направление перегона между станциями нас не интересует.
# (как правило, можем поехать в обе стороны)
metro_graph = nx.to_undirected(metro_graph)
print(nx.info(metro_graph))
# -
# Нарисуем граф и сохраним его во внешний файл (изменение размера картинки и сохранение делается средствами matplotlib'а):
# +
plt.figure(figsize=(40, 40))
# Можно поэкспериментировать со способом отрисовки: помимо draw_networkx есть такие:
# draw_circular, draw_spectral, draw_random, etc.
nx.draw_networkx(metro_graph, with_labels=True, node_color='white', node_size=500)
plt.savefig('metro_2014.png', bbox_inches='tight')
# -
# Все степени вершин доступны в поле `.degree`, которое похоже по структуре на словарь из пар Имя_вершины: Степень_вершины. Чтобы можно было работать, как со словарём, достаточно навесить преобразование `dict()`:
dict(metro_graph.degree)
# Постройте гистограмму степеней вершин. Подпишите оси и название графика.
# +
# YOUR CODE
# -
# Почему наибольшее число вершин имеет степень $2$?
#
# Выведите станцию, которая соединена с наибольшим числом станций (т.е. вершина станции имеет наибольшую степень). Для этого понадобится пройтись циклом по всем парам вершина-степень и запомнить пару с максимальной степенью.
# +
# YOUR CODE
# -
# С помощью `pandas` выведите станции, на которые можно попасть напрямую от найденной (т.е. те, которые соединены с ней):
# +
# YOUR CODE
# -
# Библиотека NetworkX позволяет найти кратчайший путь между вершинами, по которому можно судить об их взаимном расположении:
nx.shortest_path(metro_graph, 'Сокольническая_Библиотека_имени_Ленина', 'Серпуховско-Тимирязевская_Боровицкая')
nx.shortest_path(metro_graph, 'Сокольническая_Библиотека_имени_Ленина', 'Серпуховско-Тимирязевская_Полянка')
# А ещё можно вывести кратчайшие пути до всех вершин от данной:
nx.single_source_shortest_path(metro_graph, 'Сокольническая_Библиотека_имени_Ленина')
# Выделим некоторые кратчайшие пути цветом (при желании можно сохранить картинку, как и раньше).
# Функция для выделения цветом путей, пример использования ниже.
# https://github.com/jtorrents/pydata_bcn_NetworkX/blob/master/NetworkX_SNA_workshop_with_solutions.ipynb
def plot_paths(G, paths):
plt.figure(figsize=(36, 36))
pos = nx.fruchterman_reingold_layout(G)
nx.draw_networkx_nodes(G, pos=pos, node_size=4000, node_color='white')
nx.draw_networkx_labels(G, pos=pos, labels={n: n for n in G})
# Draw edges
nx.draw_networkx_edges(G, pos=pos)
for path in paths:
edges = list(zip(path, path[1:]))
nx.draw_networkx_edges(G, pos=pos, edgelist=edges, edge_color='red', width=3)
ax = plt.gca()
ax.set_axis_off()
ax.grid(None)
# Вывод кратчайших путей из одной вершины до всех остальных.
plot_paths(metro_graph, nx.single_source_shortest_path(metro_graph, 'Сокольническая_Библиотека_имени_Ленина').values())
# Вывод нескольких кратчайших путей между конкретными парами вершин.
plot_paths(
metro_graph,
[
nx.shortest_path(metro_graph, 'Сокольническая_Библиотека_имени_Ленина', 'Серпуховско-Тимирязевская_Алтуфьево'),
nx.shortest_path(metro_graph, 'Сокольническая_Сокольники', 'Калужско-Рижская_Третьяковская')
]
)
# ### Bonus task: раскрашиваем граф метро
# Конечно, можно нарисовать карту метро вот так:
# +
plt.figure(figsize=(16, 16))
nx.draw_networkx(metro_graph, with_labels=False)
# -
# А можно каждую вершину подкрасить цветом соответствующей ветки!
#
# Для этого сначала создадим словарь с названиями цветов для каждой ветки (при желании поменяйте цвета на своё усмотрение, таблица с matplotlib'овскими цветами [тут](https://matplotlib.org/examples/color/named_colors)):
line_colors = {
'Сокольническая': 'red',
'Замоскворецкая': 'green',
'Арбатско-Покровская': 'darkblue',
'Филевская': 'blue',
'Кольцевая': 'saddlebrown',
'Калужско-Рижская': 'orangered',
'Таганско-Краснопресненская': 'blueviolet',
'Калининская': 'gold',
'Серпуховско-Тимирязевская': 'gray',
'Люблинская': 'lawngreen',
'Каховская': 'palegreen',
'Бутовская': 'lightsteelblue',
}
# Ниже приведена функция, которая принимает название станции в формате `Имя-ветки_Имя-станции` (именно в таком формате хранятся названия в нашей таблице) и словарь вида `Имя-ветки: Цвет` (уже определённый `line_colors`). Функция возвращает цвет станции в соответствии с веткой, на которой она расположена.
def return_station_color(station, line_colors):
for line in line_colors:
if station.startswith(line):
return line_colors[line]
print('Something went wrong: couldn\'t find the line for station {}.\nReturn black color.'.format(station))
return 'black'
# Пример использования:
return_station_color('Калининская_Третьяковская', line_colors)
return_station_color('Третьяковская', line_colors)
# Создайте список цветов для наших вершин.
#
# Для этого надо в цикле пройтись по всем вершинам в `metro_graph.nodes()` и добавить в список их цвета:
# +
station_colors = []
# YOUR CODE
# +
plt.figure(figsize=(16, 16))
# Опять же, можно поиграть со способами отрисовки.
nx.draw_networkx(metro_graph, with_labels=False, node_color=station_colors)
| NetworkX_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Test Classifiers and Regressors on train / test data set
import numpy as np
import math
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score,cross_val_predict, train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC,SVR,LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import accuracy_score,roc_curve, auc, get_scorer, roc_auc_score
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy.stats import spearmanr, pearsonr
from matplotlib import pyplot
from sklearn import linear_model,neighbors,tree,gaussian_process,ensemble,neural_network, manifold,mixture
# ## Parameters
import sys
sys.version
sys.version_info
# +
rel = "./"
train_data_file = rel+"features.csv"
test_data_file = rel+"features/STS/2012-en-test.csv"
cv_fold = 10
#train_reference_data_file = rel +"dataset/STS/2012-en-train.csv"
#test_reference_data_file = rel +"dataset/STS/2012-en-test.csv"
threshold = 0.7
# -
# ## Classifiers to test
classifiers = [
#['SVC-linear',SVC(kernel="linear", C=0.025, probability = True)],
#['SVC-rbf',SVC(gamma=2, C=1, probability = True,decision_function_shape ='ovr')],
#['KNN-3',KNeighborsClassifier(3)],
#['KNN-5',KNeighborsClassifier(5)],
#['KNN-7',KNeighborsClassifier(7)],
#['DTC-5',DecisionTreeClassifier(max_depth=5)],
#['DTC-10',DecisionTreeClassifier(max_depth=10)],
#['DTC-15',DecisionTreeClassifier(max_depth=15)],
['RFC-5-10',RandomForestClassifier(max_depth=5, n_estimators=10)],
['RFC-10-10',RandomForestClassifier(max_depth=10, n_estimators=10)],
['RFC-15-10',RandomForestClassifier(max_depth=15, n_estimators=10)],
['RFC-15-20',RandomForestClassifier(max_depth=15, n_estimators=20)],
['RFC-15-30',RandomForestClassifier(max_depth=15, n_estimators=30)],
['RFC-25',RandomForestClassifier(max_depth=25, n_estimators=50)],
['RFC-30',RandomForestClassifier(max_depth=30, n_estimators=50)],
['RFC-45',RandomForestClassifier(max_depth=45, n_estimators=50)],
#['RFC',RandomForestClassifier()],
#['ADA',AdaBoostClassifier()],
#['GNB',GaussianNB()],
#['LDA',LinearDiscriminantAnalysis()],
#['QDA',QuadraticDiscriminantAnalysis()]
]
# ## Regressors to test
# +
regressors = [
#SVR(kernel="linear", C=0.025),
#SVR(gamma=2, C=1),
#mixture.GaussianMixture(1),
#mixture.GaussianMixture(2),
#mixture.GaussianMixture(3),
linear_model.LinearRegression(),
#linear_model.Ridge(alpha = .5),
#linear_model.ElasticNet(),
#linear_model.BayesianRidge(),
ensemble.RandomForestRegressor(),
ensemble.RandomForestRegressor(max_depth=15, n_estimators=30),
ensemble.GradientBoostingRegressor(),
ensemble.ExtraTreesRegressor(),
ensemble.BaggingRegressor(),
ensemble.AdaBoostRegressor(),
gaussian_process.GaussianProcessRegressor(),
linear_model.HuberRegressor(),
linear_model.PassiveAggressiveRegressor(),
linear_model.RANSACRegressor(),
linear_model.TheilSenRegressor(),
linear_model.SGDRegressor(),
neighbors.KNeighborsRegressor(),
tree.DecisionTreeRegressor(),
tree.ExtraTreeRegressor(),
neural_network.MLPRegressor(activation='logistic',solver='lbfgs',max_iter=200)
]
# -
# ## Load data
# +
import csv, json
import io
import csv
def parse(data_file, sep):
d = pd.read_csv(data_file, delimiter=',', header=None).as_matrix()
pairs = d[:,0:2]
x = d[:,2:-1]
y = d[:,-1]
return x,y,pairs
# -
def multiclass(y_class):
y = np.zeros([len(y_class),len(np.unique(y_class))])
for idx, val in enumerate(y_class):
y[idx,int(val)]=1
return y
x_train,y_train,pairs_train = parse(train_data_file,sep=',')
x_train.shape
y_train_class = (y_train >= threshold).astype(int)
y_train_mul = multiclass(y_train_class)
x_test,y_test,pairs_test = parse(test_data_file,sep=',')
x_test.shape
y_test_class = (y_test >= threshold).astype(int)
y_test_mul = multiclass(y_test_class)
y_test_class
pd.DataFrame(x_train).head()
pd.DataFrame(x_test).head()
# # Represent data
# + active=""
# ### Train
# + active=""
# mds = manifold.MDS(n_components=2)
# x_train_2d = mds.fit_transform(x_train)
# + active=""
# y_train_color = ['r' if bool(v) else 'g' for v in y_train_class]
# pyplot.scatter(x_train_2d[:,0],x_train_2d[:,1],c=y_train_color)
# pyplot.show()
# -
# ## Select features
# + active=""
# from sklearn.datasets import load_boston
# from sklearn.feature_selection import SelectFromModel
# from sklearn.linear_model import LassoCV
#
# # Load the boston dataset.
# boston = load_boston()
# X, y = boston['data'], boston['target']
#
# # We use the base estimator LassoCV since the L1 norm promotes sparsity of features.
# clf = LassoCV()
#
# # Set a minimum threshold of 0.25
# sfm = SelectFromModel(clf, threshold=0.25)
# sfm.fit(x_train, y_train)
# n_features = sfm.transform(x_train).shape[1]
#
# # Reset the threshold till the number of features equals two.
# # Note that the attribute can be set directly instead of repeatedly
# # fitting the metatransformer.
# while n_features > 10:
# sfm.threshold += 0.1
# X_transform = sfm.transform(x_train)
# n_features = X_transform.shape[1]
#
#
# x_train = sfm.transform(x_train)
# x_test = sfm.transform(x_test)
#
# -
# ### Roc curve generator
def plot_roc_curve(y_test, y_score):
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
pyplot.figure()
lw = 2
pyplot.plot(fpr["micro"],tpr["micro"], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc["micro"])
pyplot.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
pyplot.xlim([0.0, 1.0])
pyplot.ylim([0.0, 1.05])
pyplot.xlabel('False Positive Rate')
pyplot.ylabel('True Positive Rate')
pyplot.title('Receiver operating characteristic')
pyplot.legend(loc="lower right")
pyplot.show()
# ## Score classifiers
# +
import sklearn.pipeline as pipeline
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import BaggingClassifier
classifiers_scores = []
# iterate over classifiers
#for clf in classifiers:
# y_score = clf.fit(x_train, y_train_class).predict(x_test)
# sc = get_scorer('f1')(clf,x_test,y_test_class)
# classifiers_scores.append(sc)
classifiers_scores = []
classifiers_regr = []
# iterate over classifiers
for name,clf in classifiers:
#clf = ensemble.VotingClassifier(classifiers,voting='soft')
y_score_clf_test = clf.fit(x_train, y_train_class).predict_proba(x_test)
y_score_clf_train = clf.predict_proba(x_train)
#score_max = f1_score(y_train_class, y_score_clf[:,1]>0.5)
score_max = pearsonr(y_test,y_score_clf_test[:,1])[0]
regr_max = None
for regr in regressors:
x_train_ext = np.append(x_train, y_score_clf_train, 1)
x_test_ext = np.append(x_test, y_score_clf_test, 1)
y_score = regr.fit(x_train_ext,y_train).predict(x_test_ext)
#score = f1_score(y_train_class, y_score>0.5)
score = pearsonr(y_test,y_score)[0]
if score > score_max:
score_max = score
regr_max = regr
classifiers_scores.append(score_max)
classifiers_regr.append(regr_max)
# -
pd.DataFrame({"Classifier":[name for name,classifier in classifiers],\
"Pearson":classifiers_scores},\
columns=["Classifier","Pearson"]\
)
# ## Evaluate features
# +
import sklearn.metrics as metrics
clas_threshold = 0.6
c_eval = pd.DataFrame(index=range(0,x_train.shape[1]), columns=['F1','Pearson','Precision','Recall','Accuracy'])
for i in range(0,x_train.shape[1]):
y_score_cl = (x_test[:,i] > clas_threshold).astype(int)
c_eval['F1'][i] = metrics.f1_score(y_test_class, y_score_cl)
c_eval['Pearson'][i] = pearsonr(y_test,x_test[:,i])[0]
c_eval['Precision'][i] = metrics.precision_score(y_test_class, y_score_cl)
c_eval['Recall'][i] = metrics.recall_score(y_test_class, y_score_cl)
c_eval['Accuracy'][i] = metrics.accuracy_score(y_test_class, y_score_cl)
# -
c_eval.T
c_eval.T[0]
# ## The best classifier
# +
import sklearn.metrics as metrics
max_cls = np.where(classifiers_scores == max(classifiers_scores))[0][0]
clf = classifiers[max_cls][1]
regr = classifiers_regr[max_cls]
print(clf)
y_score_clf = clf.predict_proba(x_test)
y_score_clf_train = clf.predict_proba(x_train)
if regr is not None:
print(regr)
x_train_ext = np.append(x_train, y_score_clf_train, 1)
x_test_ext = np.append(x_test, y_score_clf, 1)
y_score = regr.fit(x_train_ext,y_train).predict(x_test_ext)
else:
print('No regression')
y_score = y_score_clf[:,1]
print("Pearson:\t%0.3f"%pearsonr(y_test,y_score)[0])
print("F1:\t\t%0.3f"%metrics.f1_score(y_test_class,y_score>0.5))
print("Precision:\t%0.3f"%metrics.precision_score(y_test_class,y_score>0.5))
print("Recall:\t\t%0.3f"%metrics.recall_score(y_test_class,y_score>0.5))
print("Accuracy:\t%0.3f"%metrics.accuracy_score(y_test_class,y_score>0.5))
print(classification_report(y_test_class, y_score>0.5, target_names=['False', 'True']))
plot_roc_curve(y_test_class,y_score)
# -
clf = classifiers[np.where(classifiers_scores == max(classifiers_scores))[0][0]][1]
print(clf)
y_score = clf.predict_proba(x_test)
print("Pearson:\t%0.3f" %pearsonr(y_test,y_score[:,1])[0])
print("F1:\t\t%0.3f" %get_scorer('f1')(clf,x_test,y_test_class))
print("Accuracy:\t%0.3f" %get_scorer('accuracy')(clf,x_test,y_test_class))
print("Precision:\t%0.3f" %get_scorer('precision')(clf,x_test,y_test_class))
print("Recall:\t\t%0.3f" %get_scorer('recall')(clf,x_test,y_test_class))
plot_roc_curve(y_test_class,y_score[:,1])
print(classification_report(y_test_class, y_score[:,1]>0.5, target_names=['False', 'True']))
y_score_class = clf.predict(x_test)
from sklearn.externals import joblib
joblib.dump(clf,type(clf).__name__ +'_'+str(threshold)+'.pkl')
out_regr_err = 'class.res.csv'
columns = ['Plag','Truth','Classification','Diff','Str1','Str2']
pd.DataFrame(np.array([y_test,y_test_class,y_score_class,y_test_class-y_score_class,pairs_test[:,0],pairs_test[:,1]]).T,columns=columns).to_csv(out_regr_err)
# ## Score regressors
regressors_scores = []
for rgs in regressors:
y_score = rgs.fit(x_train, y_train).predict(x_test)
regressors_scores.append(pearsonr(y_test,y_score)[0])
pd.DataFrame({"Regressor":[type(regressor).__name__ for regressor in regressors],\
"Pearson":regressors_scores},\
columns=["Regressor","Pearson"])
# ## The best regressor
# +
m_r = np.where(regressors_scores == max(regressors_scores))[0][0]
rgs = regressors[m_r]
print(rgs)
y_score = rgs.predict(x_test)
y_score_bin = (y_score >=threshold).astype(int)
plot_roc_curve((y_test_class),(y_score))
print("Spearman:\t%0.3f"%spearmanr(y_test,y_score).correlation)
print("Pearson:\t%0.3f" %pearsonr(y_test,y_score)[0])
print("Accuracy:\t%0.3f" % (accuracy_score(y_test_class, y_score_bin)))
print(classification_report(y_test_class, y_score_bin, target_names=['False', 'True']))
# -
out_regr_err = 'regr.res.csv'
columns = ['Truth','Score','Diff','Abs_Diff','Str1','Str2']
pd.DataFrame(np.array([y_test,y_score,y_test-y_score,abs(y_test-y_score),pairs_test[:,0],pairs_test[:,1]]).T,columns=columns).to_csv(out_regr_err)
| classify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.4 64-bit
# language: python
# name: python3
# ---
# # Chapter 1
# ## Example 1.24
AC = .9
AD = .75
CE = .8
CF = .95
EB = .9
FB = .85
DB = .95
CEB = round(CE * EB, 4)
CEB
CFB = CF * FB
CFB
CB = round(1 - ( 1 - CEB ) * ( 1 - CFB ),5)
CB
ACB = round(AC * CB, 5)
ACB
ADB = round(AD * DB, 7)
ADB
AB = 1 - ( 1 - ACB ) * ( 1 - ADB )
AB
# ## Example ..
| mit.6.431x/proba.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import dataset
import sqlalchemy
secop_2_items = pd.read_pickle('../data/secop_2_covid_items.pkl')
secop_2_items.head()
secop_covid = pd.read_pickle('../data/secop_union_all.pickle')
secop_2_covid = secop_covid.loc[lambda x: (x.is_covid) & (x.source == 'secop_2')].copy()
# ## DB load
conn_str = ''
db = dataset.connect(conn_str)
engine = sqlalchemy.create_engine(conn_str)
secop_2_covid.loc[lambda x: x.fecha_de_firma==pd.NaT, 'fecha_de_firma'] = None
secop_2_covid.loc[lambda x: x.fecha_de_inicio_de_ejecucion==pd.NaT, 'fecha_de_inicio_de_ejecucion'] = None
secop_2_covid.loc[lambda x: x.fecha_de_fin_de_ejecucion==pd.NaT, 'fecha_de_fin_de_ejecucion'] = None
secop_2_covid.loc[lambda x: x.fecha_de_fin_de_ejecucion==pd.NaT]
secop_records = secop_2_covid.drop('urlproceso', axis=1).to_dict('records')
secop_items_records = secop_2_items.to_dict('records')
secop_2_covid = secop_2_covid.drop('urlproceso', axis=1)
secop_2_covid.to_sql('secop_union', engine, if_exists='replace',)
secop_2_items.to_sql('secop_items', engine, if_exists='replace')
secop_items_table = db['secop_items']
secop_union_table = db['secop_union']
# +
# secop_union_table.insert_many(secop_records[0:10])
# +
# secop_items_table.insert_many(secop_items_records)
| notebooks/postgres_data_load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import integration
# # Вычисление интеграла
# ## Для графика выпуклого вверх
# Считываем данные из файла "data_950-1080_all.csv".
direct = pd.read_csv("Data/Data_Intervals/data_950-1080_all.csv", index_col=0, dtype=np.double)
direct.head()
direct.plot();
# ### $\int_{t_i}^{t} T(t) dt$
integral = integration.integrate_function(direct)
integral.plot();
# ### $\int_{t_i}^{t_f} T(t) dt$
result = integration.integrate(direct)
result
# В результате мы получили два значения. Первое значение - предполагаемое значение интеграла, второе - верхняя граница ошибки.
# ## Для графика выпуклого вниз
# Считываем данные из файла "data_100:600_SiO2.csv".
reverse = pd.read_csv("Data/Data_Intervals/data_100-600_SiO2.csv", index_col=0, dtype=np.double, header=None)
reverse.head()
reverse.plot()
# ### $\int_{t_i}^{t} T(t) dt$
integral = integration.integrate_function(reverse, extrema="min")
integral.plot();
# ### $\int_{t_i}^{t_f} T(t) dt$
result = integration.integrate(reverse, extrema="min")
result
| Examples/3.1.1 - Integration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Day 05
# #### part A
infile="05a_input.txt"
with open(infile) as f:
lines=f.readlines()
# remove new lines, split each line into tuple of BF's and LR's, and replace the L,F with 0, the B,R with 1
for idx in range(len(lines)):
line=lines[idx].replace("\n","")
bf=line[:7]
bf=bf.replace("F","0",)
bf=bf.replace("B","1")
lr=line[7:]
lr=lr.replace("L","0")
lr=lr.replace("R","1")
lines[idx]=(bf,lr)
lines[:4]
# convert strings (binary encoded (base 2) numbers) to normal integers (base 10)
bf_numbers=[int(line[0],2) for line in lines]
lr_numbers=[int(line[1],2) for line in lines]
seat_ids=[(bf*8)+lr for bf,lr in zip(bf_numbers,lr_numbers)]
max(seat_ids)
# #### Part B
import numpy as np
seats_sorted=np.asarray(sorted(seat_ids))
diffs=np.diff(seats_sorted)
np.unique(diffs)
maxidx=np.argmax(diffs)
maxidx
seats_sorted[maxidx-1:maxidx+2]
# +
# -> solution: 597
| advent_of_code_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_VZkazxeTFA9"
# # 1단계 : 데이터 로딩 + EDA
# ## Data Load
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="YeuRRtx395ws" executionInfo={"status": "ok", "timestamp": 1605702769580, "user_tz": -540, "elapsed": 962, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="53da0cd0-bd93-4fe6-c062-416f7b8600af"
import pandas as pd
![ ! -f iris0.csv ]&&wget http://j.finfra.com/_file/iris0.csv
iris=pd.read_csv("iris0.csv")
iris
# + [markdown] id="imX-2TEZSsca"
# ## EDA
# + colab={"base_uri": "https://localhost:8080/", "height": 686} id="Qdg2eOUqTqMS" executionInfo={"status": "ok", "timestamp": 1605702433894, "user_tz": -540, "elapsed": 10267, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="a2a8e8ad-48bd-49ac-a003-9b80ae8dc3a1"
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid");
sns.pairplot(iris,hue="Species");
plt.show()
# + [markdown] id="dmNXGJ4qTszh"
# # 2단계 : 학습 데이터/ 평가 데이터로 분리
# + colab={"base_uri": "https://localhost:8080/", "height": 86} id="EKB7a8U1Vkpd" executionInfo={"status": "ok", "timestamp": 1605703101186, "user_tz": -540, "elapsed": 939, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="9cefb6eb-b330-47f6-cb69-b161c7706219"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris.iloc[:,0:4], iris['Species'])
display(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# + [markdown] id="ckMyWUdPWFeo"
# # 3단계 : 학습(Training)
#
# + colab={"base_uri": "https://localhost:8080/"} id="1DtxkQQFX2ih" executionInfo={"status": "ok", "timestamp": 1605703419638, "user_tz": -540, "elapsed": 1055, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="137f83f9-0de8-44f1-82a6-73a5e80b7384"
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=1) # 기본값은 5
model.fit(X_train, y_train)
# + [markdown] id="9zIX2Mh6X26a"
# # 4단계 : 평가
#
# + colab={"base_uri": "https://localhost:8080/"} id="AouOGieMX3LJ" executionInfo={"status": "ok", "timestamp": 1605703545961, "user_tz": -540, "elapsed": 1155, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="4a572e0b-b55a-4d91-8a4b-83ef1c4d5782"
score = model.score(X_test, y_test) # score를 이용해 평가
print(score) #원래 잘 분리된 데이터라 높게 나왔다
pred_y=model.predict(X_test)
pred_y==y_test
# + [markdown] id="-AXSC6kqX3kU"
# # 5단계 : 모델 저장
#
# + colab={"base_uri": "https://localhost:8080/"} id="7CxaSblZX7Cw" executionInfo={"status": "ok", "timestamp": 1605704003380, "user_tz": -540, "elapsed": 1075, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="ad83c796-3906-44b4-aec1-60fb6<PASSWORD>"
from joblib import dump
dump(model,'model.joblib')
# + [markdown] id="_eAgXBE8X2MM"
# # 6단계 : 서비스 활용
#
# + colab={"base_uri": "https://localhost:8080/"} id="UL711_thX30w" executionInfo={"status": "ok", "timestamp": 1605704084041, "user_tz": -540, "elapsed": 1013, "user": {"displayName": "\ub0a8\uc911\uad6c", "photoUrl": "", "userId": "02914767085172164696"}} outputId="d3fc27be-9c54-4c77-dd0b-02cd2513402c"
from joblib import load
model_rebuild = load('model.joblib')
model_rebuild.predict(X_test[0:1])
| 00_Background/NoneNLP_background/AnalyticProcess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/christianhidber/easyagents/blob/master/jupyter_notebooks/intro_cartpole.ipynb"
# target="_parent">
# <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
# </a>
# + [markdown] colab_type="text" id="itbylRO_XSzd"
# # CartPole Gym environment with TfAgents
# + [markdown] colab_type="text" id="JszJwM7GXkFK"
# ## Install packages (gym, tfagents, tensorflow,....)
# -
# #### suppress package warnings, prepare matplotlib, if in colab: load additional packages for rendering
# + pycharm={"is_executing": false}
# %matplotlib inline
import matplotlib.pyplot as plt
import sys
import warnings
warnings.filterwarnings('ignore')
if 'google.colab' in sys.modules:
# !apt-get update >/dev/null
# !apt-get install xvfb >/dev/null
# !pip install pyvirtualdisplay >/dev/null
from pyvirtualdisplay import Display
Display(visible=0, size=(960, 720)).start()
else:
# for local installation
sys.path.append('..')
# -
# #### install easyagents
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="C-jRHCOPW56b" outputId="28038221-7a27-4906-bc36-84b6efc7dba8" pycharm={"is_executing": false}
import sys
if 'google.colab' in sys.modules:
# !pip install easyagents >/dev/null
# + [markdown] colab_type="text" id="67n5CylJZJpt"
# ## Dry Run (very short training)
# -
# Let's do a quick test and train for a few iterations using a default network architecture (2 layers, fully connected, 100 neurons each):
# + pycharm={"name": "#%%\n"}
from easyagents.agents import PpoAgent
from easyagents.callbacks import duration
ppoAgent = PpoAgent('CartPole-v0')
ppoAgent.train([duration.Fast()])
# + [markdown] pycharm={"name": "#%% md\n"}
# Note that rewards and step count are equal. That's because the CartPole environment doles out 1 reward point for
# each successful step, and stops after the first failed step.
#
# 'duration.Fast()' configures the training length to 10 iterations with 10 episodes each.
# After every iteration the current policy is retrained. The resulting loss is depicted in the first plot.
# Since Ppo is an actor-critic algorithm we not only plot the total loss but also the loss for the actor- and
# critic-networks separately.
#
# In the "steps" and "sum of rewards" plots we see the result of the evaluation.
# The coloured area represents the max and min values encoutered for the current evaluation period.
# + [markdown] colab_type="text" id="67n5CylJZJpt"
# ## Train (plot state, custom network)
# -
from easyagents.agents import PpoAgent
from easyagents.callbacks import plot, duration
# + pycharm={"is_executing": false}
# %%time
ppoAgent = PpoAgent('CartPole-v0', fc_layers=(100, 50, 25))
ppoAgent.train([plot.State(), plot.Loss(), plot.Actions(), plot.Rewards()],
num_iterations=10, num_iterations_between_eval=3)
# -
# The fc_layers argument defines the policy's neural network architecture. Here we use 3 fully connected layers
# with 100 neurons in the first, 50 in the second and 25 in the final layer.
# By default fc_layers=(75,75) is used.
#
# The first argument of the train method is a list of callbacks. Through callbacks we define the plots generated during
# training, the logging behaviour or control training duration.
# By passing [plot.State(), plot.Loss(), plot.Actions(), plot.Rewards()] we add in particular the State() plot,
# depicting the last observation state of the last evaluation episode. plot.Actions() displays a histogram of the
# actions taken for each episode played during the last evaluation period.
#
# Besides num_iterations there are quite a few parameters to specify the exact training duration (e.g.
# num_episodes_per_iteration, num_epochs_per_iteration, max_steps_per_episode,...).
# ## Switching the algorithm
#
# Switching from Ppo to Dqn is easy, essentially just replace PpoAgent with DqnAgent (the evaluation may take a few
# minuites):
from easyagents.agents import DqnAgent
from easyagents.callbacks import plot
# + pycharm={"name": "#%%\n"}
# %%time
dqnAgent = DqnAgent('CartPole-v0', fc_layers=(100, ))
dqnAgent.train([plot.State(), plot.Loss(), plot.Actions(), plot.Rewards()],
num_iterations=20000, num_iterations_between_eval=1000)
# -
# Since Dqn by default only takes 1 step per iteration (and thus an episode spans over several iterations) we increased
# the num_iterations parameter.
# + [markdown] colab_type="text" id="67n5CylJZJpt" pycharm={"name": "#%% md\n"}
# ## Next: custom training, creating a movie & switching backends.
#
# * see
# [Orso on colab](https://colab.research.google.com/github/christianhidber/easyagents/blob/master/jupyter_notebooks/intro_orso.ipynb)
# (an example of a gym environment implementation based on a routing problem)
#
| jupyter_notebooks/intro_cartpole.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# + [markdown] id="view-in-github" colab_type="text"
# [View in Colaboratory](https://colab.research.google.com/github/iAshishVerma/CardView/blob/master/MobilenetRetraining.ipynb)
# + id="NOdnGPVP2BWj" colab_type="code" colab={}
# !git clone https://github.com/googlecodelabs/tensorflow-for-poets-2
# + id="73mgdRoR3ZLb" colab_type="code" colab={}
# !cd tensorflow-for-poets-2
# + id="Qjba9EsuMWcg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="e8bcf1fb-dca8-47b5-bce5-92450dc5bbb3"
# !ls
# + id="GHAdUyqxMtq4" colab_type="code" colab={}
import os
os.chdir('tensorflow-for-poets-2')
# + id="mbVLmCwuM3nv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b951eb97-5afb-4581-df1c-123aeabea137"
# !ls
# + id="h4uXPAL94Mmr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="cf43f494-0bc7-4c28-cda8-6fdad23464b1"
# !curl -LO http://download.tensorflow.org/example_images/flower_photos.tgz
# !tar xzf flower_photos.tgz
# + id="1mTLIQLq51V5" colab_type="code" colab={}
IMAGE_SIZE=224
ARCHITECTURE="mobilenet_0.50_224"
# + id="41uNMU1uOgOz" colab_type="code" colab={}
# !mv flower_photos tf_files
# + id="L48Xf9OZ5mtB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 66099} outputId="c1c35f3f-9f4f-4f05-e446-4a68abd8bdd9"
# !python -m scripts.retrain \
# --bottleneck_dir=tf_files/bottlenecks \
# --how_many_training_steps=500 \
# --model_dir=tf_files/models/ \
# --summaries_dir=tf_files/training_summaries/mobilenet_1.0_224 \
# --output_graph=tf_files/retrained_graph.pb \
# --output_labels=tf_files/retrained_labels.txt \
# --architecture=mobilenet_1.0_224 \
# --image_dir=tf_files/flower_photos
# + id="ft1X4vjyX1d0" colab_type="code" colab={}
# !python -m scripts.label_image -h
# + id="G9DNWfQFYqNS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 445} outputId="7f5aef13-f38a-499b-b7ef-f40c79c0b470"
# !python -m scripts.label_image \
# --graph=tf_files/retrained_graph.pb \
# --image=tf_files/flower_photos/daisy/21652746_cc379e0eea_m.jpg
| MobilenetRetraining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Classifier Chain Example
#
#
# An example of :class:`skml.problem_transformation.ClassifierChain`
#
#
# +
from sklearn.metrics import hamming_loss
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import numpy as np
from skml.problem_transformation import ClassifierChain
from skml.datasets import load_dataset
X, y = load_dataset('yeast')
X_train, X_test, y_train, y_test = train_test_split(X, y)
cc = ClassifierChain(LogisticRegression())
cc.fit(X_train, y_train)
y_pred = cc.predict(X_test)
print("hamming loss: ")
print(hamming_loss(y_test, y_pred))
print("accuracy:")
print(accuracy_score(y_test, y_pred))
print("f1 score:")
print("micro")
print(f1_score(y_test, y_pred, average='micro'))
print("macro")
print(f1_score(y_test, y_pred, average='macro'))
print("precision:")
print("micro")
print(precision_score(y_test, y_pred, average='micro'))
print("macro")
print(precision_score(y_test, y_pred, average='macro'))
print("recall:")
print("micro")
print(recall_score(y_test, y_pred, average='micro'))
print("macro")
print(recall_score(y_test, y_pred, average='macro'))
| doc/auto_examples/example_cc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Data Science Unit 4 Sprint Challenge 1 — Tree Ensembles
# ### Chicago Food Inspections
#
# For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019.
#
# [See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.
#
# According to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), "Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls."
# #### Your challenge: Predict whether inspections failed
#
# The target is the `Fail` column.
#
# - When the food establishment failed the inspection, the target is `1`.
# - When the establishment passed, the target is `0`.
# #### Load Dependencies
from sklearn import model_selection, pipeline, ensemble, metrics, tree
from eli5.sklearn import PermutationImportance
from pdpbox import pdp, info_plots
import matplotlib.pyplot as plt
import category_encoders as ce
import graphviz as gv
import xgboost as xgb
import seaborn as sns
import pandas as pd
import numpy as np
import json
import eli5
# #### Download Data
TRAIN_URL = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'
TEST_URL = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'
train = pd.read_csv(TRAIN_URL)
test = pd.read_csv(TEST_URL)
assert train.shape == (51916, 17)
assert test.shape == (17306, 17)
# ### Part 1: Preprocessing
#
# You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding. (Pandas, category_encoders, sklearn.preprocessing, or any other library.)
#
# _To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._
labels = pd.DataFrame({ 'train': train.Fail.value_counts()/train.Fail.size, 'test': test.Fail.value_counts()/test.Fail.size }).T
labels['size'] = pd.Series([train.Fail.size, test.Fail.size], index=['train', 'test'])
labels
clean = train.copy(deep=True).drop(columns='Location')
clean['Violations'] = train['Violations'].fillna('Unknown').astype('category').cat.codes
clean['Inspection Type'] = train['Inspection Type'].fillna('Unknown').astype('category').cat.codes
clean['Facility Type'] = train['Facility Type'].fillna('Unknown').astype('category').cat.codes
clean['Inspection Date'] = pd.to_datetime(train['Inspection Date']).dt.strftime("%Y%m%d").astype(np.int)
clean['Risk'] = train['Risk'].fillna('Risk -1 (Unknown)').astype('category').cat.codes
clean['AKA Name'] = train['AKA Name'].fillna('Unknown').astype('category').cat.codes
clean['Address'] = train['Address'].fillna('Unknown').astype('category').cat.codes
clean['State'] = train['State'].fillna('Unknown').astype('category').cat.codes
clean['City'] = train['City'].fillna('Unknown').astype('category').cat.codes
clean['Longitude'] = train['Longitude'].fillna(train['Longitude'].mean())
clean['Latitude'] = train['Latitude'].fillna(train['Latitude'].mean())
clean['DBA Name'] = train['DBA Name'].astype('category').cat.codes
clean['Zip'] = train['Zip'].fillna(train['Zip'].mode()[0])
clean['License #'] = train['License #'].fillna(-1)
# ### Data Leakage
#
# It seems likely that the leaky column in question is the `Violations` feature. To receive<br>
# a list of violations is to fail your inspection, hence any rows without a `NaN` in that<br>
# column will be predicted with near perfect accuracy to have failed their inspection.<br>
# This is reinforced below where a decision tree is fit on the data set with and without<br>
# the data leakage, yielding drastically different results.
# #### With Data Leakage
X = clean.drop(columns="Fail")
y = clean.Fail
leaky = tree.DecisionTreeClassifier(max_depth=3)
model_selection.cross_val_score(leaky, X, y, cv=5, scoring='roc_auc', n_jobs=-1)
model_selection.cross_val_score(leaky, X, y, cv=5, scoring='f1', n_jobs=-1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y)
leaky.fit(X_train, y_train)
dots = tree.export_graphviz(
leaky,
out_file=None,
feature_names=X_train.columns,
class_names=['Pass', 'Fail'],
filled=True,
impurity=False,
proportion=True
)
gv.Source(dots)
eli5.show_weights(PermutationImportance(leaky, scoring='roc_auc', n_iter=10, cv='prefit').fit(X_train, y_train), feature_names=X_train.columns.tolist())
# #### Without Data Leakage
X = clean.drop(columns=["Fail", "Violations"])
y = clean.Fail
airtight = tree.DecisionTreeClassifier(max_depth=3)
model_selection.cross_val_score(airtight, X, y, cv=5, scoring='roc_auc', n_jobs=-1)
model_selection.cross_val_score(airtight, X, y, cv=5, scoring='f1', n_jobs=-1)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y)
airtight.fit(X_train, y_train)
dots = tree.export_graphviz(
airtight,
out_file=None,
feature_names=X_train.columns,
class_names=['Pass', 'Fail'],
filled=True,
impurity=False,
proportion=True
)
gv.Source(dots)
eli5.show_weights(PermutationImportance(airtight, scoring='roc_auc', n_iter=10, cv='prefit').fit(X_train, y_train), feature_names=X_train.columns.tolist())
# ### Part 2: Modeling
#
# Fit a Random Forest or Gradient Boosting model with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation to estimate an ROC AUC validation score.
#
# Use your model to predict probabilities for the test set. Get an ROC AUC test score >= 0.60.
#
# _To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._
X = clean.drop(columns=["Fail", "Violations"])
y = clean.Fail
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y)
# #### Random Forests
forest = ensemble.RandomForestClassifier(
criterion="entropy",
n_estimators=100,
max_depth=30,
min_samples_split=50,
min_samples_leaf=50,
class_weight={0: 1, 1: 3 },
n_jobs=-1
)
roc_auc = model_selection.cross_validate(forest, X, y, cv=5, scoring='roc_auc', n_jobs=-1)
f1 = model_selection.cross_validate(forest, X, y, cv=5, scoring='f1', n_jobs=-1)
f1['test_score'], roc_auc['test_score']
# +
data = pd.DataFrame(roc_auc)
plt.title("Random Forest ROC-AUC Train/Test Scores")
plt.xlim(0, 5)
plt.ylim(0, 1)
plt.axhline(0.7, c='black')
plt.annotate('ROC-AUC Threshold', xy=(5, 0.68), xytext=(5.05, 0.68), fontsize=11, fontweight='bold')
plt.plot(data.index, data.train_score)
plt.plot(data.index, data.test_score)
plt.legend();
# -
# #### Gradient Boosting
gbc = ensemble.GradientBoostingClassifier()
roc_auc = model_selection.cross_validate(gbc, X, y, cv=5, scoring='roc_auc', n_jobs=-1)
f1 = model_selection.cross_validate(gbc, X, y, cv=5, scoring='f1', n_jobs=-1)
f1['test_score'], roc_auc['test_score']
# +
data = pd.DataFrame(roc_auc)
plt.title("Gradient Boosted ROC-AUC Train/Test Scores")
plt.xlim(0, 5)
plt.ylim(0, 1)
plt.axhline(0.7, c='black')
plt.annotate('ROC-AUC Threshold', xy=(5, 0.68), xytext=(5.05, 0.68), fontsize=11, fontweight='bold')
plt.plot(data.index, data.train_score)
plt.plot(data.index, data.test_score)
plt.legend();
# -
# ### Part 3: Visualization
#
# Make one visualization for model interpretation. (You may use any libraries.) Choose one of these types:
# - Feature Importances
# - Permutation Importances
# - Partial Dependence Plot
# - Shapley Values
#
# _To earn a score of 3 for this part, make at least two of these visualization types._
gbc.fit(X_train, y_train)
forest.fit(X_train, y_train)
# #### Partial Depedence Plot
isolate = pdp.pdp_isolate(forest, X, X.columns.values, 'Inspection ID')
pdp.pdp_plot(isolate, 'Inspection ID')
# #### Premutation Importance
gradient_permuter = PermutationImportance(gbc, scoring='roc_auc', n_iter=10, cv='prefit')
forest_permuter = PermutationImportance(forest, scoring='roc_auc', n_iter=10, cv='prefit')
gradient_permuter.fit(X_train, y_train)
forest_permuter.fit(X_train, y_train)
eli5.show_weights(gradient_permuter, feature_names=X_train.columns.tolist())
eli5.show_weights(forest_permuter, feature_names=X_train.columns.tolist())
# #### Feature Importances
# +
fig, (ax, bx) = plt.subplots(1, 2, figsize=(10, 10))
pd.Series(gbc.feature_importances_, index=X_train.columns).sort_values(ascending=False).head(20).plot.barh(ax=ax, color='grey')
ax.set_title("Gradient Boosted Feature Importances")
pd.Series(forest.feature_importances_, index=X_train.columns).sort_values(ascending=False).head(20).plot.barh(ax=bx, color='grey')
bx.set_title("Random Forest Feature Importances")
plt.tight_layout()
| sprint-challenge/sprint-challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Samrath49/AI-with-Tensorflow/blob/master/Untitled1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="EsVlIyLfpE25" colab_type="text"
# # Assignment 1
# + id="49yvRYaTpJmI" colab_type="code" colab={}
import tensorflow as tf
import numpy as np
from tensorflow import keras
# + id="4DNi-2WdpL0I" colab_type="code" colab={}
def house_model(y_new):
xs = np.array([1.0, 2.0, 3.0, 4.0])
ys = np.array([])
model = keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, epochs=500)
return model.predict(y_new)[0]
# + id="IGGpm4hNpT8C" colab_type="code" outputId="d28766f1-b620-4ed9-c4a2-08aa6b0ffabd" colab={"base_uri": "https://localhost:8080/", "height": 663}
prediction = house_model([7.0])
print(prediction)
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # 转置卷积
# :label:`sec_transposed_conv`
#
# 到目前为止,我们所见到的卷积神经网络层,例如卷积层( :numref:`sec_conv_layer`)和汇聚层( :numref:`sec_pooling`),通常会减少下采样输入图像的空间维度(高和宽)。
# 然而如果输入和输出图像的空间维度相同,在以像素级分类的语义分割中将会很方便。
# 例如,输出像素所处的通道维可以保有输入像素在同一位置上的分类结果。
#
# 为了实现这一点,尤其是在空间维度被卷积神经网络层缩小后,我们可以使用另一种类型的卷积神经网络层,它可以增加上采样中间层特征图的空间维度。
# 在本节中,我们将介绍
# *转置卷积*(transposed convolution) :cite:`Dumoulin.Visin.2016`,
# 用于扭转下采样导致的空间尺寸减小。
#
# + origin_pos=2 tab=["pytorch"]
import torch
from torch import nn
from d2l import torch as d2l
# + [markdown] origin_pos=3
# ## 基本操作
#
# 让我们暂时忽略通道,从基本的转置卷积开始,设步幅为1且没有填充。
# 假设我们有一个$n_h \times n_w$的输入张量和一个$k_h \times k_w$的卷积核。
# 以步幅为1滑动卷积核窗口,每行$n_w$次,每列$n_h$次,共产生$n_h n_w$个中间结果。
# 每个中间结果都是一个$(n_h + k_h - 1) \times (n_w + k_w - 1)$的张量,初始化为0。
# 为了计算每个中间张量,输入张量中的每个元素都要乘以卷积核,从而使所得的$k_h \times k_w$张量替换中间张量的一部分。
# 请注意,每个中间张量被替换部分的位置与输入张量中元素的位置相对应。
# 最后,所有中间结果相加以获得最终结果。
#
# 例如, :numref:`fig_trans_conv` 解释了如何为$2\times 2$的输入张量计算卷积核为$2\times 2$的转置卷积。
#
# 
# :label:`fig_trans_conv`
#
# 我们可以对输入矩阵`X`和卷积核矩阵 `K`(**实现基本的转置卷积运算**)`trans_conv`。
#
# + origin_pos=4 tab=["pytorch"]
def trans_conv(X, K):
h, w = K.shape
Y = torch.zeros((X.shape[0] + h - 1, X.shape[1] + w - 1))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Y[i: i + h, j: j + w] += X[i, j] * K
return Y
# + [markdown] origin_pos=5
# 与通过卷积核“减少”输入元素的常规卷积(在 :numref:`sec_conv_layer` 中)相比,转置卷积通过卷积核“广播”输入元素,从而产生大于输入的输出。
# 我们可以通过 :numref:`fig_trans_conv` 来构建输入张量 `X` 和卷积核张量 `K` 从而[**验证上述实现输出**]。
# 此实现是基本的二维转置卷积运算。
#
# + origin_pos=6 tab=["pytorch"]
X = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
K = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
trans_conv(X, K)
# + [markdown] origin_pos=7
# 或者,当输入`X`和卷积核`K`都是四维张量时,我们可以[**使用高级API获得相同的结果**]。
#
# + origin_pos=9 tab=["pytorch"]
X, K = X.reshape(1, 1, 2, 2), K.reshape(1, 1, 2, 2)
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, bias=False)
tconv.weight.data = K
tconv(X)
# + [markdown] origin_pos=10
# ## [**填充、步幅和多通道**]
#
# 与常规卷积不同,在转置卷积中,填充被应用于的输出(常规卷积将填充应用于输入)。
# 例如,当将高和宽两侧的填充数指定为1时,转置卷积的输出中将删除第一和最后的行与列。
#
# + origin_pos=12 tab=["pytorch"]
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, padding=1, bias=False)
tconv.weight.data = K
tconv(X)
# + [markdown] origin_pos=13
# 在转置卷积中,步幅被指定为中间结果(输出),而不是输入。
# 使用 :numref:`fig_trans_conv` 中相同输入和卷积核张量,将步幅从1更改为2会增加中间张量的高和权重,因此输出张量在 :numref:`fig_trans_conv_stride2` 中。
#
# 
# :label:`fig_trans_conv_stride2`
#
# 以下代码可以验证 :numref:`fig_trans_conv_stride2` 中步幅为2的转置卷积的输出。
#
# + origin_pos=15 tab=["pytorch"]
tconv = nn.ConvTranspose2d(1, 1, kernel_size=2, stride=2, bias=False)
tconv.weight.data = K
tconv(X)
# + [markdown] origin_pos=16
# 对于多个输入和输出通道,转置卷积与常规卷积以相同方式运作。
# 假设输入有 $c_i$ 个通道,且转置卷积为每个输入通道分配了一个 $k_h\times k_w$ 的卷积核张量。
# 当指定多个输出通道时,每个输出通道将有一个 $c_i\times k_h\times k_w$ 的卷积核。
#
# 同样,如果我们将 $\mathsf{X}$ 代入卷积层 $f$ 来输出 $\mathsf{Y}=f(\mathsf{X})$ ,并创建一个与 $f$ 具有相同的超参数、但输出通道数量是 $\mathsf{X}$ 中通道数的转置卷积层 $g$,那么 $g(Y)$ 的形状将与 $\mathsf{X}$ 相同。
# 下面的示例可以解释这一点。
#
# + origin_pos=18 tab=["pytorch"]
X = torch.rand(size=(1, 10, 16, 16))
conv = nn.Conv2d(10, 20, kernel_size=5, padding=2, stride=3)
tconv = nn.ConvTranspose2d(20, 10, kernel_size=5, padding=2, stride=3)
tconv(conv(X)).shape == X.shape
# + [markdown] origin_pos=19
# ## [**与矩阵变换的联系**]
# :label:`subsec-connection-to-mat-transposition`
#
# 转置卷积为何以矩阵变换命名呢?
# 让我们首先看看如何使用矩阵乘法来实现卷积。
# 在下面的示例中,我们定义了一个$3\times 3$的输入`X`和$2\times 2$卷积核`K`,然后使用`corr2d`函数计算卷积输出`Y`。
#
# + origin_pos=20 tab=["pytorch"]
X = torch.arange(9.0).reshape(3, 3)
K = torch.tensor([[1.0, 2.0], [3.0, 4.0]])
Y = d2l.corr2d(X, K)
Y
# + [markdown] origin_pos=21
# 接下来,我们将卷积核`K`重写为包含大量0的稀疏权重矩阵`W`。
# 权重矩阵的形状是($4$,$9$),其中非0元素来自卷积核`K`。
#
# + origin_pos=22 tab=["pytorch"]
def kernel2matrix(K):
k, W = torch.zeros(5), torch.zeros((4, 9))
k[:2], k[3:5] = K[0, :], K[1, :]
W[0, :5], W[1, 1:6], W[2, 3:8], W[3, 4:] = k, k, k, k
return W
W = kernel2matrix(K)
W
# + [markdown] origin_pos=23
# 逐行连接输入`X`,获得了一个长度为9的矢量。
# 然后,`W`的矩阵乘法和向量化的`X`给出了一个长度为4的向量。
# 重塑它之后,可以获得与上面的原始卷积操作所得相同的结果`Y`:我们刚刚使用矩阵乘法实现了卷积。
#
# + origin_pos=24 tab=["pytorch"]
Y == torch.matmul(W, X.reshape(-1)).reshape(2, 2)
# + [markdown] origin_pos=25
# 同样,我们可以使用矩阵乘法来实现转置卷积。
# 在下面的示例中,我们将上面的常规卷积$2 \times 2$的输出`Y`作为转置卷积的输入。
# 想要通过矩阵相乘来实现它,我们只需要将权重矩阵`W`的形状转置为$(9, 4)$。
#
# + origin_pos=26 tab=["pytorch"]
Z = trans_conv(Y, K)
Z == torch.matmul(W.T, Y.reshape(-1)).reshape(3, 3)
# + [markdown] origin_pos=27
# 抽象来看,给定输入向量 $\mathbf{x}$ 和权重矩阵 $\mathbf{W}$,卷积的前向传播函数可以通过将其输入与权重矩阵相乘并输出向量 $\mathbf{y}=\mathbf{W}\mathbf{x}$ 来实现。
# 由于反向传播遵循链规则和 $\nabla_{\mathbf{x}}\mathbf{y}=\mathbf{W}^\top$,卷积的反向传播函数可以通过将其输入与转置的权重矩阵 $\mathbf{W}^\top$ 相乘来实现。
# 因此,转置卷积层能够交换卷积层的正向传播函数和反向传播函数:它的正向传播和反向传播函数将输入向量分别与 $\mathbf{W}^\top$ 和 $\mathbf{W}$ 相乘。
#
#
# ## 小结
#
# * 与通过卷积核减少输入元素的常规卷积相反,转置卷积通过卷积核广播输入元素,从而产生形状大于输入的输出。
# * 如果我们将 $\mathsf{X}$ 输入卷积层 $f$ 来获得输出 $\mathsf{Y}=f(\mathsf{X})$ 并创造一个与 $f$ 有相同的超参数、但输出通道数是 $\mathsf{X}$ 中通道数的转置卷积层 $g$,那么 $g(Y)$ 的形状将与 $\mathsf{X}$ 相同。
# * 我们可以使用矩阵乘法来实现卷积。转置卷积层能够交换卷积层的正向传播函数和反向传播函数。
#
# ## 练习
#
# 1. 在 :numref:`subsec-connection-to-mat-transposition` 中,卷积输入 `X` 和转置的卷积输出 `Z` 具有相同的形状。他们的数值也相同吗?为什么?
# 1. 使用矩阵乘法来实现卷积是否有效率?为什么?
#
# + [markdown] origin_pos=29 tab=["pytorch"]
# [Discussions](https://discuss.d2l.ai/t/3302)
#
| d2l/chapter_computer-vision/transposed-conv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RNN的output和state
# +
import tensorflow as tf
tf.__version__
# -
# ## 伪造输入张量
# +
seq = tf.constant([1,2,3,4,5,6,7,8], shape=(2,4))
embedding = tf.keras.layers.Embedding(10,16)
inp = embedding(seq)
inp.shape
# -
# ## return_sequences=False, return_state=False
#
# 因为`return_sequences=False`和`return_state=False`,所以`LSTM`返回的只有最后一个timestep的输出,即`output`。
# +
enc = tf.keras.layers.LSTM(16)
output = enc(inp)
print(output.shape)
# -
# ## return_sequences=True, return_state=False
#
# 因为`return_sequences=True`,所以会输出每一个timestep的`output`。因此,这里的`output`张量比上面增加了一个维度,`shape:(batch_size, time_steps, units)`
# +
enc2 = tf.keras.layers.LSTM(16, return_sequences=True, return_state=False)
output = enc2(inp)
print(output.shape)
# -
# ## return_sequences=False, return_state=True
#
# 因为`return_state=True`,所以LSTM会返回最后一个timestep的`state`,实际上`RNN`的`state`分为两个`state_h`和`state_c`。所以:
#
# +
enc3 = tf.keras.layers.LSTM(16, return_sequences=False, return_state=True)
output, state_h, state_c = enc3(inp)
print(output.shape)
print(state_h.shape)
print(state_c.shape)
# -
# ## return_sequences=True, return_state=True
#
# 因为`return_sequences=True`并且`return_state=True`,所以LSTM会返回每个timestep的输出组成的`output`,以及最后一个timestep的`state`(分为`state_h`和`state_c`两个)。
# +
enc4 = tf.keras.layers.LSTM(16, return_sequences=True, return_state=True)
output, state_h, state_c = enc4(inp)
print(output.shape)
print(state_h.shape)
print(state_c.shape)
# -
# 通过以上小实验我们可以得出以下结论:
#
# * RNN的输出,可以通过`return_sequences`和`return_state`两个参数控制
# * `return_sequences=True`说明返回每一个timestep的输出,组成最终的`output`,`shape: (batch_size, time_steps, units)`
# * `return_sequences=False`说明返回最后一个timestep的输出,组成最终的`output`,`shape: (batch_size, units)`
# * `return_state=True`说明返回最后一个timestep的状态,组成最终的`state`
# * `return_state=False`说明不返回最后一个timestep的状态,LSTM的输出仅仅有`output`
#
#
# 还有一个trick可以帮助理解:
#
# * `return_sequences`用了`sequence`的复数表示`sequences`,说明返回的是一个序列的输出,组成最后的`output`
# * `return_state`没用`state`的复数表示`states`,说明仅仅返回最后一个timestep的状态,组成最后的`state`
| tf2/rnn_output_and_state.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ruhi-Sharmin-1/C-code/blob/main/hyperparamter_tuning_XGBoost.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="JYM0xwVoBScY"
#importing necessary libraries
from numpy import mean
from sklearn.datasets import make_classification
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
#from sklearn.svm import SVC
from scipy.io import savemat
from scipy.io import loadmat
import timeit
import numpy as np
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/"} id="WRiZL-GuBcUL" outputId="984695d3-25a7-4768-ff67-1a3ea687079d"
#loading data from google drive
from google.colab import drive
#drive.mount('/content/gdrive')
drive.mount("/content/gdrive", force_remount=True)
# + id="GbIjw-yJBewm"
loaddir_data=F"/content/gdrive/My Drive/ml-ruhi/"
data = loadmat(loaddir_data + 'challenge_training2017_cases_normal_ecg_corr_metrics.mat',squeeze_me=True)
training_normal_features = data['all_corr_metrics'][:,:]
n_training_normal = np.shape(training_normal_features)[0]
data = loadmat(loaddir_data + 'challenge_training2017_cases_afib_ecg_corr_metrics.mat',squeeze_me=True)
training_afib_features = data['all_corr_metrics'][:,:]
n_training_afib = np.shape(training_afib_features)[0]
data = loadmat(loaddir_data + 'challenge_training2017_cases_noisy_ecg_corr_metrics.mat',squeeze_me=True)
training_noisy_features = data['all_corr_metrics'][:,:]
n_training_noisy = np.shape(training_noisy_features)[0]
data = loadmat(loaddir_data + 'challenge_training2017_cases_other_ecg_corr_metrics.mat',squeeze_me=True)
training_other_features = data['all_corr_metrics'][:,:]
n_training_other = np.shape(training_other_features)[0]
# + colab={"base_uri": "https://localhost:8080/"} id="CmZ4Y5YJQckE" outputId="119c2ed9-f215-4cf2-e230-d40d1d951ba7"
training_normal_features.shape
# + id="-vkMvDgiBhfS"
# append the training datasets and learning datasets
training_features = np.concatenate((training_normal_features,training_afib_features,training_noisy_features,training_other_features),axis=0)
training_labels = np.concatenate((np.zeros(n_training_normal),np.ones(n_training_afib),2*(np.ones(n_training_noisy)),3*(np.ones(n_training_other))))
# + id="bAPgzK1_BlPd"
#remove NAN values
def nanremove(x, y):
# input x is training_features, y is labels
if np.argwhere(np.isnan(x)).shape[0]==0:
return x,y
else:
l=np.argwhere(np.isnan(x)).shape[0]
u=np.argwhere(np.isnan(x))
for i in range(l):
x = np.delete(x, (u[i,0]-i), axis=0)
y = np.delete(y, (u[i,0]-i), axis=0)
return x,y
x,y=nanremove(training_features, training_labels)
# + id="5pk7jx1UBsWz"
training_all = np.concatenate((x, y.reshape((-1,1))),axis=1)
np.random.shuffle(training_all) #adds randomness
training_features = training_all[:,:-1]
training_labels = training_all[:,-1]
# + id="NLjJGUM5KFb1"
max_abs_mean=0
col_maxabsmean=0
for i in range(training_features.shape[1]):
meanabscur=mean(abs(training_features[:,i]))
if(meanabscur>max_abs_mean):
max_abs_mean=meanabscur
col_maxabsmean=i
# + colab={"base_uri": "https://localhost:8080/"} id="9-K6CeT_LHa_" outputId="7d070cbf-a228-405c-d3b4-9beff07129df"
col_maxabsmean
# + id="t5sotRhdHjjm" colab={"base_uri": "https://localhost:8080/"} outputId="35117350-9f61-4de1-955d-2cb3344e60b7"
training_features[5:15,:]
# + id="D3Tefffw7Rng"
#https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/
#Lets start by importing the required libraries and loading the data:
#Import libraries:
import pandas as pd
import numpy as np
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
#from sklearn import cross_validation
#from sklearn import metrics #Additional scklearn functions
#from sklearn.grid_search import GridSearchCV #Perforing grid search
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, train_test_split
import matplotlib.pylab as plt
# %matplotlib inline
from matplotlib.pylab import rcParams
rcParams['figure.figsize'] = 12, 4
# + id="G75FbmCKWZas"
def algorithm_pipeline(X_train_data, X_test_data, y_train_data, y_test_data,
model, param_grid, cv=10, scoring_fit='neg_mean_squared_error',
do_probabilities = False):
gs = GridSearchCV(
estimator=model,
param_grid=param_grid,
cv=cv,
n_jobs=-1,
scoring=scoring_fit,
verbose=2
)
fitted_model = gs.fit(X_train_data, y_train_data)
if do_probabilities:
pred = fitted_model.predict_proba(X_test_data)
else:
pred = fitted_model.predict(X_test_data)
return fitted_model, pred
# + id="zz0RC7e3Bu2p"
from sklearn import model_selection
#from sklearn.linear_model import LogisticRegression
test_size = 0.01 # from 0.01 to 0.1
seed = 4 #change from 4 to 5
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(training_features, training_labels, test_size=test_size, random_state=seed)
# + colab={"base_uri": "https://localhost:8080/"} id="i94AG85bZeKc" outputId="7f1e3a69-eba9-40c1-fdd0-d446c10f0014"
model = xgb.XGBClassifier(max_depth=5, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0, reg_alpha=0, reg_lambda=1)
param_grid = {
'max_depth':range(3,10,2),
'min_child_weight':range(1,6,2)
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="VlIRDLAAmGQm" outputId="ff23d33a-29af-4707-92a3-0986f595bcf0"
model = xgb.XGBClassifier(max_depth=5, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0, reg_alpha=0, reg_lambda=1)
param_grid = {
'max_depth':[8,9,10],
'min_child_weight':[1,2]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="uosCydXrm75k" outputId="c3962f07-2654-4a30-e51f-76eb5d6f9de6"
model = xgb.XGBClassifier(max_depth=8, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0, reg_alpha=0, reg_lambda=1)
param_grid = {
'gamma':[i/10.0 for i in range(0,5)]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="RBvQyhmLnqCd" outputId="973ca0ef-4f07-4d5f-8906-96715192c713"
model = xgb.XGBClassifier(max_depth=8, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0.2, subsample=0.8, colsample_bytree=0.8,reg_alpha=0, reg_lambda=1)
param_grid = {
'subsample':[i/10.0 for i in range(6,10)],
'colsample_bytree':[i/10.0 for i in range(6,10)]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="qmnDUsppozWq" outputId="2977422f-8131-467f-bee8-7d6d475eebd4"
model = xgb.XGBClassifier(max_depth=8, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0.2, subsample=0.9, colsample_bytree=0.8,reg_alpha=0, reg_lambda=1)
param_grid = {
'reg_lambda':[1e-5, 1e-2, 0.1, 1, 100]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="Yg1VtrzipWB0" outputId="f2b3c39f-44f2-4bb5-e832-c4e1ff994018"
model = xgb.XGBClassifier(max_depth=8, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0.2, subsample=0.9, colsample_bytree=0.8,reg_alpha=0, reg_lambda=1e-5)
param_grid = {
'n_estimators': [100,200,300,400, 500,600,700,800,900, 1000]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="wfj5LROCvM6c" outputId="2e59bcb6-6e86-43c9-b283-2c60b57d3e53"
model = xgb.XGBClassifier(max_depth=8, learning_rate=0.01, n_estimators=140, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0.2, subsample=0.9, colsample_bytree=0.8,reg_alpha=0, reg_lambda=1e-5)
param_grid = {
'n_estimators': [1000, 3000, 5000]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + colab={"base_uri": "https://localhost:8080/"} id="bzCRS6cAqv6u" outputId="521a2a98-b685-47d3-a2d8-05a305d3edf3"
model = xgb.XGBClassifier(max_depth=8, learning_rate=0.01, n_estimators=1000, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0.2, subsample=0.9, colsample_bytree=0.8,reg_alpha=0, reg_lambda=1e-5)
param_grid = {
'learning_rate': [1e-5, 1e-4, 1e-3, 1e-2, 0.1]
}
model, pred = algorithm_pipeline(X_train, X_test, Y_train, Y_test, model,
param_grid, cv=2)
# Root Mean Squared Error
print(np.sqrt(-model.best_score_))
print(model.best_params_)
# + id="Ix2Y2qs66N85"
bst = xgb.XGBClassifier(max_depth=8, learning_rate=0.1, n_estimators=1000, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0.2, reg_alpha=0, reg_lambda=1e-5)
bst.fit(X_train, Y_train) #bst=model
# Fit the validation data # model.predict for Y_predict
xgb_pred = bst.predict(X_test)
# extracting most confident predictions
best_preds = np.round(xgb_pred)
# + colab={"base_uri": "https://localhost:8080/"} id="0xAjNlsYexDQ" outputId="0266a469-01e2-4c8c-cd1b-0b98e449dfee"
print(model)
# + id="nbQVNQRWb0Gl"
def modelfit(alg, dtrain, predictors,useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(dtrain[predictors].values, label=dtrain[target].values)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='auc', early_stopping_rounds=early_stopping_rounds, show_progress=False)
alg.set_params(n_estimators=cvresult.shape[0])
#Fit the algorithm on the data
alg.fit(dtrain[predictors], dtrain['Disbursed'],eval_metric='auc')
#Predict training set:
dtrain_predictions = alg.predict(dtrain[predictors])
dtrain_predprob = alg.predict_proba(dtrain[predictors])[:,1]
#Print model report:
print("Model Report\n")
print("Accuracy : %.4g" % metrics.accuracy_score(dtrain['Disbursed'].values, dtrain_predictions))
print("AUC Score (Train): %f" % metrics.roc_auc_score(dtrain['Disbursed'], dtrain_predprob))
feat_imp = pd.Series(alg.booster().get_fscore()).sort_values(ascending=False)
feat_imp.plot(kind='bar', title='Feature Importances')
plt.ylabel('Feature Importance Score')
# + id="opy0PAkTByL8"
import xgboost as xgb
max_depth = 10
n_estimators = 110
bst = xgb.XGBClassifier(max_depth=max_depth, learning_rate=0.0001, n_estimators=n_estimators, slient=True, min_child_weight=1, objective='multi:softmax', gamma=0, reg_alpha=0, reg_lambda=1)
bst.fit(X_train, Y_train) #bst=model
# Fit the validation data # model.predict for Y_predict
xgb_pred = bst.predict(X_test)
# extracting most confident predictions
best_preds = np.round(xgb_pred)
#(max_depth=3, learning_rate=0.1, n_estimators=100, verbosity=1, silent=None,
#objective="binary:logistic", booster='gbtree', n_jobs=1, nthread=None, gamma=0, min_child_weight=1,
#max_delta_step=0, subsample=1, colsample_bytree=1, colsample_bylevel=1, colsample_bynode=1,
#reg_alpha=0, reg_lambda=1, scale_pos_weight=1, base_score=0.5, random_state=0, seed=None, missing=None, **kwargs) -> None
# + id="WMo9me3YCHIB"
xgb_pred_proba = bst.predict_proba(X_test)
print(Y_test)
print(xgb_pred_proba)
import pandas as pd
pd.DataFrame(Y_test).to_csv(F"/content/gdrive/My Drive/ml-ruhi/XGBoost-Y-true-4class.csv")
pd.DataFrame(xgb_pred_proba).to_csv(F"/content/gdrive/My Drive/ml-ruhi/XGBoost-Y-pred-4class.csv")
# + id="xCnBdasVXrlX"
# + colab={"base_uri": "https://localhost:8080/"} id="TLEH6_EECIGj" outputId="9b8cd0e8-5307-45a6-b17c-d29d67b74aab"
from sklearn.metrics import f1_score
score = f1_score(Y_test, best_preds, average='weighted')
print('F-Measure: %.3f' % score)
# + colab={"base_uri": "https://localhost:8080/"} id="ay84jvkHCOqu" outputId="864d3baa-64ae-482b-93a5-226cd06da834"
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(Y_test, best_preds)
accuracy * 100.0
# + colab={"base_uri": "https://localhost:8080/"} id="0ODJuKVUCSeO" outputId="05ff0f28-95d6-4d57-c434-dcc358b94ac6"
print('Mean ROC AUC: %.3f' % mean(score))
# + colab={"base_uri": "https://localhost:8080/"} id="B1obt0d_CUqZ" outputId="0d60100c-9967-45a3-b2b4-5e5e5474d981"
# avg F-measure
from sklearn.metrics import f1_score
score = f1_score(Y_test, best_preds, average=None)
print(score)
score = f1_score(Y_test, best_preds, average='macro')
print(score)
score = f1_score(Y_test, best_preds, average='weighted')
print(score)
# + id="XiIK8hqDCb34"
from sklearn.metrics import confusion_matrix,plot_confusion_matrix
cm=confusion_matrix(Y_test, best_preds)
# + id="7W8EdLcXCeHH"
# code from https://stackoverflow.com/questions/39033880/plot-confusion-matrix-sklearn-with-multiple-labels
def plot_conf_mat(cm, target_names, title='Confusion matrix', cmap=None, normalize=True):
import matplotlib.pyplot as plt
import numpy as np
import itertools
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
plt.figure(figsize=(8, 6))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.4f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
# + id="fWgS4jIdCg4P"
target_names=['Normal', 'afib', 'noisy', 'other']
# + id="8I4jKBllCi7F" colab={"base_uri": "https://localhost:8080/", "height": 467} outputId="64ecd182-8ed6-4b41-9e8c-d89312802418"
plot_conf_mat(cm, target_names)
# + id="JNTJBtP9Cjgn"
#cross-validation, k=5,
| hyperparamter_tuning_XGBoost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1 </span>Introduction</a></div><div class="lev1 toc-item"><a href="#Results" data-toc-modified-id="Results-2"><span class="toc-item-num">2 </span>Results</a></div><div class="lev1 toc-item"><a href="#Imports" data-toc-modified-id="Imports-3"><span class="toc-item-num">3 </span>Imports</a></div><div class="lev1 toc-item"><a href="#Load-the-data" data-toc-modified-id="Load-the-data-4"><span class="toc-item-num">4 </span>Load the data</a></div><div class="lev1 toc-item"><a href="#Check-if-a-flag-is-true-for-all-the-objects" data-toc-modified-id="Check-if-a-flag-is-true-for-all-the-objects-5"><span class="toc-item-num">5 </span>Check if a flag is true for all the objects</a></div><div class="lev1 toc-item"><a href="#Check-if-a-flag-is-False-for-all-the-objects" data-toc-modified-id="Check-if-a-flag-is-False-for-all-the-objects-6"><span class="toc-item-num">6 </span>Check if a flag is False for all the objects</a></div><div class="lev1 toc-item"><a href="#Compare-good-vs-bad-using-false-flags" data-toc-modified-id="Compare-good-vs-bad-using-false-flags-7"><span class="toc-item-num">7 </span>Compare good vs bad using false flags</a></div><div class="lev1 toc-item"><a href="#Look-at-an-object-and-determine-good-or-bad" data-toc-modified-id="Look-at-an-object-and-determine-good-or-bad-8"><span class="toc-item-num">8 </span>Look at an object and determine good or bad</a></div>
# -
# # Introduction
# Date: Oct 31, 2019
# ```
# 1. Jedisim output: lsst_z1.5_000.fits (z=1.5 ngals = 10k)
# 2. DMSTACK output: src_lsst_z1.5_000.csv (90 flags and 76 params, lots of nans, index90 is id)
# 3. Clean nans and filtering: src_lsst_z1.5_000.txt (only few columns id, x,y, xerr,yerr,e1,e2,ellip,flux,radius)
# 4. Combine m,m9,l,l9 to get LC catallog final.cat using mergecats
# 5. Combine 100 final catalogs to get final_text.txt
#
#
# The text file final_text.txt has columns gm and gc for monochromatic ellipiticity and chromatic ellipticity.
#
# When I plot the density plot of gm squared, I saw some bump in gm_sq value 0.7 to 1.0.
#
# ```
# 
#
# NOTES:
# ```
# - final_text.txt has only 42 columns
# - flux gmsq and gcsq are created later.
#
#
# all objects = 183,832 (from final_text.txt)
# bad objects = 18,386 (0.7 < gm_sq < 1.0)
# bad objects percentage = 10.00%
# ```
#
# NOTES:
# ```
# Looking file number and object id of bad density objects from final_text, I went to original
# dmstack csv files and created two dataframes:
#
# df_good_all.csv # 0.7 < gm_sq < 1.0
# df_bad_all.csv # rest
# ```
# # Results
#
# Looking at all the flags (90 flags), 28 flags are all False for bad objects and
# at least one flag was positive for good objects.
#
# ```
# 1 base_GaussianCentroid_flag
# 2 base_GaussianCentroid_flag_resetToPeak
# 3 base_SdssCentroid_flag
# 4 base_SdssCentroid_flag_edge
# 5 base_SdssCentroid_flag_almostNoSecondDerivative
# 6 base_SdssCentroid_flag_notAtMaximum
# 7 base_SdssCentroid_flag_resetToPeak
# 8 base_SdssShape_flag_unweightedBad
# 9 base_SdssShape_flag_unweighted
# 10 base_SdssShape_flag_maxIter
# 11 ext_shapeHSM_HsmPsfMoments_flag
# 12 ext_shapeHSM_HsmPsfMoments_flag_galsim
# 13 ext_shapeHSM_HsmSourceMoments_flag
# 14 ext_shapeHSM_HsmSourceMoments_flag_galsim
# 15 base_CircularApertureFlux_3_0_flag
# 16 base_CircularApertureFlux_4_5_flag
# 17 base_CircularApertureFlux_4_5_flag_sincCoeffsTruncated
# 18 base_CircularApertureFlux_6_0_flag
# 19 base_CircularApertureFlux_6_0_flag_sincCoeffsTruncated
# 20 base_CircularApertureFlux_9_0_flag
# 21 base_CircularApertureFlux_12_0_flag
# 22 base_CircularApertureFlux_12_0_flag_apertureTruncated
# 23 base_CircularApertureFlux_17_0_flag
# 24 base_CircularApertureFlux_17_0_flag_apertureTruncated
# 25 base_GaussianFlux_flag
# 26 base_PsfFlux_flag
# 27 base_PsfFlux_flag_edge
# 28 base_ClassificationExtendedness_flag
#
# ```
#
# ```
# if 28 flags == False:
# object is bad
# else:
# object is good
# ```
#
# # Imports
# +
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(color_codes=True)
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
sns.set(context='notebook', style='whitegrid', rc={'figure.figsize': (12,8)})
plt.style.use('ggplot') # better than sns styles.
matplotlib.rcParams['figure.figsize'] = 12,8
import os
import time
# random state
random_state=100
np.random.seed(random_state)
# Jupyter notebook settings for pandas
#pd.set_option('display.float_format', '{:,.2g}'.format) # numbers sep by comma
from pandas.api.types import CategoricalDtype
np.set_printoptions(precision=3)
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100) # None for all the rows
pd.set_option('display.max_colwidth', 200)
import IPython
from IPython.display import display, HTML, Image, Markdown
print([(x.__name__,x.__version__) for x in [np, pd,sns,matplotlib]])
# + language="javascript"
# IPython.OutputArea.auto_scroll_threshold = 9999;
# -
# # Load the data
# +
tmp = pd.read_csv('data/df_good_all.csv',nrows=0)
cols = tmp.columns
cols
# -
cols_flags = cols[:91]
cols_flags
# +
df_good_flags = pd.read_csv('data/df_good_all.csv', usecols=cols_flags,index_col=0)
print(df_good_flags.shape)
df_good_flags.tail()
# -
# remove nans
df_good_flags.isnull().sum().sum()
# make data type boolean
df_good_flags = df_good_flags.sort_index()
df_good_flags = df_good_flags.astype(bool)
df_good_flags.head()
# +
# df_good_flags.to_csv('df_good_flags.csv',index=True)
# +
# bad density objects
#
# +
df_bad_flags = pd.read_csv('data/df_bad_all.csv', usecols=cols_flags, index_col=0)
print(df_bad_flags.shape)
df_bad_flags = df_bad_flags.sort_index()
df_bad_flags = df_bad_flags.astype(bool)
# df_bad_flags.to_csv('data/df_bad_flags.csv',index=True)
df_bad_flags.head()
# -
# # Check if a flag is true for all the objects
df_bad_flags.eq(True).all()
df_good_flags.eq(True).all()
df_good_flags.eq(True).all().sum()
df_bad_flags.eq(True).all().sum()
# # Check if a flag is False for all the objects
df_good_flags.eq(False).all()
df_bad_flags.eq(False).all()
# # Compare good vs bad using false flags
# +
# flag is False for all the objects (true means all objects have False flag.)
m = df_good_flags.eq(False).all().sum() # 45 flags are all False
n = df_bad_flags.eq(False).all().sum() # 73 flags are all False
df_good_flags.shape[1], m,n, n-m
# +
df1 = df_good_flags.eq(False).all().to_frame().rename(columns={0:'good'})
df2 = df_bad_flags.eq(False).all().to_frame().rename(columns={0:'bad'})
df_flags = pd.concat([df1,df2], axis=1)
df_flags
# -
df_flags[df_flags['good'] != df_flags['bad']]
# +
cols_imp = df_flags[df_flags['good'] != df_flags['bad']].index.to_numpy()
print(len(cols_imp))
cols_imp
# -
df_good_flags[['base_GaussianCentroid_flag']].eq(False).all()
df_bad_flags[['base_GaussianCentroid_flag']].eq(False).all()
df_good_flags[['base_GaussianCentroid_flag','base_GaussianCentroid_flag_resetToPeak']].eq(False).all().all()
df_bad_flags[['base_GaussianCentroid_flag','base_GaussianCentroid_flag_resetToPeak']].eq(False).all().all()
df_good_flags[cols_imp].eq(False).all().all()
df_bad_flags[cols_imp].eq(False).all().all()
# +
# df_bad_flags[cols_imp] # each and every elements are False
# +
# df_good_flags[cols_imp] # at least one True for each flags
# -
# # Look at an object and determine good or bad
# +
a = df_good_flags[cols_imp].head().T
b = df_bad_flags[cols_imp].head().T
pd.concat([a,b],axis=1)
# this gives bad has all False
# good have at least one True
# -
# look at good only
df_good_flags[cols_imp].head()
# +
# is any column all True?
a = df_good_flags[cols_imp]
print(a.shape)
a.sum().sort_values()
# good objects: 959k
# base_SdssShape_flag_unweighted flag True : 42k
# base_SdssShape_flag_unweighted flag False: 917k (most are False)
#
# for bad objects all objects are False.
# +
# we know all bad objects has these 28 flags all False
# does any good object has all these 28 flags False?
# -
a = df_good_flags[cols_imp]
a.head(2)
a.all(axis=1).sum() # NO
# +
# conclusion:
# if all 28 flags are False, object is bad
# else: good
# -
for i,j in enumerate(cols_imp,1):
print(i,j)
| Nov_2019/Scripts/a03_dmstack_bad_gmsq_density.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from operator import itemgetter
import json
# ##### For this code to work, a file called ```user_ratings_.csv``` must exist containing the following columns: ```userId, movieId, enjoyRating, meaningRating``` plus a column per character strength with names as in the movie dataset ```final_movie_dataset.csv```
movie_json = """[{"appreciationBeautyExcellence":0,"averageEnjoyment":2.85,"averageMeaning":3.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"movieId":"tt0002199","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":1,"teamwork":0,"votesEnjoyment":506,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":4.15,"averageMeaning":5.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":1,"loveOfLearning":0,"movieId":"tt0012349","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"votesEnjoyment":114451,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":4.25,"averageMeaning":5.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":1,"loveOfLearning":0,"movieId":"tt0021749","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"votesEnjoyment":169029,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":3.45,"averageMeaning":3.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"movieId":"tt0022111","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":1,"socialIntelligence":0,"spirituality":0,"teamwork":0,"votesEnjoyment":2738,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":3.95,"averageMeaning":4.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"movieId":"tt0027996","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":1,"votesEnjoyment":19835,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":4.0,"averageMeaning":4.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":1,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"movieId":"tt0028691","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"votesEnjoyment":8825,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":3.95,"averageMeaning":4.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":1,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"movieId":"tt0029843","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"votesEnjoyment":47351,"votesMeaning":1,"zest":0},{"appreciationBeautyExcellence":0,"averageEnjoyment":3.65,"averageMeaning":4.0,"bravery":0,"creativity":0,"curiosity":0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":1,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"movieId":"tt0029942","perseverance":0,"perspective":0,"prudence":0,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"votesEnjoyment":5184,"votesMeaning":1,"zest":0}]"""
user0_json = """[{"appreciationBeautyExcellence":1,"bravery":0,"creativity":0,"curiosity":1,"enjoymentRating":5.0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":1,"kindness":1,"leadership":0,"love":0,"loveOfLearning":0,"meaningRating":5.0,"movieId":"tt0758758","perseverance":0,"perspective":1,"prudence":0,"ratingId":10,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"userId":0,"zest":0},{"appreciationBeautyExcellence":0,"bravery":0,"creativity":0,"curiosity":0,"enjoymentRating":5.0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":1,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"meaningRating":3.0,"movieId":"tt0485947","perseverance":0,"perspective":1,"prudence":0,"ratingId":11,"selfRegulation":0,"socialIntelligence":1,"spirituality":1,"teamwork":0,"userId":0,"zest":0},{"appreciationBeautyExcellence":0,"bravery":0,"creativity":0,"curiosity":0,"enjoymentRating":5.0,"fairness":0,"forgiveness":1,"gratitude":0,"honesty":0,"hope":1,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"meaningRating":4.0,"movieId":"tt0180093","perseverance":0,"perspective":0,"prudence":0,"ratingId":12,"selfRegulation":1,"socialIntelligence":0,"spirituality":0,"teamwork":0,"userId":0,"zest":0},{"appreciationBeautyExcellence":0,"bravery":1,"creativity":0,"curiosity":0,"enjoymentRating":5.0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":1,"kindness":0,"leadership":0,"love":0,"loveOfLearning":0,"meaningRating":4.5,"movieId":"tt0810819","perseverance":1,"perspective":1,"prudence":0,"ratingId":13,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"userId":0,"zest":0},{"appreciationBeautyExcellence":1,"bravery":0,"creativity":0,"curiosity":0,"enjoymentRating":5.0,"fairness":0,"forgiveness":0,"gratitude":0,"honesty":0,"hope":0,"humility":0,"humor":0,"judgement":0,"kindness":0,"leadership":0,"love":1,"loveOfLearning":0,"meaningRating":5.0,"movieId":"tt4034354","perseverance":0,"perspective":1,"prudence":0,"ratingId":14,"selfRegulation":0,"socialIntelligence":0,"spirituality":0,"teamwork":0,"userId":0,"zest":0}]"""
# movie_df = pd.read_csv('./movie_ratings.csv', low_memory=False)
# user_ratings_df = pd.read_csv('./user_ratings.csv', delimiter=';', low_memory=False)
# +
movie_ratings_df = pd.DataFrame.from_dict(json.loads(movie_json))
first_cols = ['averageMeaning', 'votesMeaning', 'averageEnjoyment', 'votesEnjoyment', 'movieId']
cols = list(movie_ratings_df.columns)
for col in first_cols:
cols.insert(0, cols.pop(cols.index(col)))
movie_ratings_df = movie_ratings_df[cols]
# -
def json_to_df(movie_json, user_json):
movie_ratings_df = pd.DataFrame.from_dict(json.loads(movie_json))
first_cols = ['averageMeaning', 'votesMeaning', 'averageEnjoyment', 'votesEnjoyment', 'movieId']
cols = list(movie_ratings_df.columns)
for col in first_cols:
cols.insert(0, cols.pop(cols.index(col)))
movie_ratings_df = movie_ratings_df[cols]
user_ratings_df = pd.DataFrame.from_dict(json.loads(user_json))
first_cols = ['meaningRating', 'enjoymentRating', 'movieId', 'userId', 'ratingId']
cols = list(user_ratings_df.columns)
for col in first_cols:
cols.insert(0, cols.pop(cols.index(col)))
user_ratings_df = user_ratings_df[cols]
return movie_ratings_df, user_ratings_df
# +
user_ratings_df = pd.DataFrame.from_dict(json.loads(user0_json))
first_cols = ['meaningRating', 'enjoymentRating', 'movieId', 'userId', 'ratingId']
cols = list(user_ratings_df.columns)
for col in first_cols:
cols.insert(0, cols.pop(cols.index(col)))
user_ratings_df = user_ratings_df[cols]
# -
user_ratings_df.columns
# has_ch_strengths = movie_df['averageMeaningRating'] > 0.0
# ch_movie_df = movie_df[has_ch_strengths]
# ##### The dataframe ```ch_movie_df``` contains only movies which have character strengths data. The recommendations can only be done for those.
def cosine_similarity(a, b):
return np.dot(a,b)/(np.linalg.norm(a)*np.linalg.norm(b))
def get_user_score(user_ratings_df):
'''
Calculates the user profile's score based on character strengths and
meaningful score. Only movies with meaningful score > 3.0 are taken
into account.
'''
MEANING_RATING_COL = user_ratings_df.columns.tolist().index('meaningRating')
relevant_df = user_ratings_df[user_ratings_df['meaningRating'] >= 3.0]
meaning_scores = relevant_df.iloc[:, MEANING_RATING_COL].values
# We want every value of meaning_scores to multiply a whole row of character strengths.
# This is done by using a concept called "broadcasting"
ch_strs = relevant_df.iloc[:, np.arange(MEANING_RATING_COL+1, len(relevant_df.columns))] * meaning_scores[:, np.newaxis]
# we sum every character strength weighted score and we normalize it
score = ch_strs.sum()/ch_strs.sum().sum()
return score
def get_movie_score(movie_df, movie_id, max_meaningful_votes, general_score):
'''
Calculate a single movie's score using its character strengths and
meaningful score. As the final score is calculated using a bayesian
estimate, weight and a-priori score parameters are used.
'''
FIRST_CH_STR_COL = movie_df.columns.tolist().index('appreciationBeautyExcellence')
relevant_df = movie_df[movie_df['movieId'] == movie_id]
meaningful_scores = relevant_df['averageMeaning'].values[0]
weight = relevant_df['votesMeaning'].values[0]/max_meaningful_votes
ch_strs = relevant_df.iloc[:, np.arange(FIRST_CH_STR_COL, len(relevant_df.columns))] * meaningful_scores
movie_score = ch_strs.sum() / ch_strs.sum().sum()
return weight*movie_score + (1-weight)*general_score
def get_user_recommendations(movie_json, user_json):
'''
Returns a user's ranked recommendations by calculating the cosine
similarity of its profile with every movie in the db.
'''
# first get the dataframes
movie_ratings_df, user_ratings_df = json_to_df(movie_json, user_json)
FIRST_CH_STR_COL = movie_ratings_df.columns.tolist().index('appreciationBeautyExcellence')
user_score = get_user_score(user_ratings_df)
# df with only the character strengths
all_ch_strs = movie_ratings_df.iloc[:, np.arange(FIRST_CH_STR_COL, len(movie_ratings_df.columns))] * np.array(movie_ratings_df['averageMeaning'])[:, np.newaxis]
# normalize very row (every movie)
all_ch_strs = all_ch_strs.div(all_ch_strs.sum(axis=1), axis=0)
# calculate the general score by averaging the scores of every movie
general_score = all_ch_strs.mean(axis=0)
max_meaningful_votes = movie_ratings_df['votesMeaning'].max()
movie_cosine_sim = {}
# calculate the cosine similarity between scores of every movie and the user's.
for movie in movie_ratings_df['movieId']:
movie_cosine_sim[movie] = cosine_similarity(user_score, get_movie_score(movie_ratings_df, movie, max_meaningful_votes, general_score))
# sort the movies by similarity score (higher means better)
sorted_similarities = {k: v for k, v in sorted(movie_cosine_sim.items(), key=itemgetter(1), reverse=True)}
# this prints the user score together with the score of the top ranked movie
#print(np.array(user_score), np.array(get_movie_score(ch_movie_df, list(sorted_similarities.keys())[0], max_meaningful_votes, general_score)))
return list(sorted_similarities.keys())[:12], list(sorted_similarities.values())[:12]
# +
recommendations_id, sim_scores = get_user_recommendations(movie_json, user0_json)
print('Recommendations for user:\n\nRank\t{:<60} {}'.format("Movie ", "Cosine similarity"))
for i, recom_id in enumerate(recommendations_id):
movie = movie_ratings_df[movie_ratings_df['movieId'] == recom_id]['movieId'].values[0]
print('{}\t{:<60} {}'.format(i+1, movie, sim_scores[i]))
# -
| recommendation_system/content_based_recom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def searchRange(nums, target):
if target not in nums:
return [-1, -1]
a = nums.index(target)
res = [a, a]
for i in range(len(nums)):
if nums[i] == target:
res[1] = i
return res
searchRange([], 6)
# -
| Anjani/Leetcode/Array/Find First and Last Position of Element in Sorted Array.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="boC50Y4UGuMi" colab_type="text"
# # IEEE Coders Week
# ### Day 3 : Classification (Supervised Learning)
# #### By : <NAME> (<EMAIL>)
# ---
# **Disclaimer**
#
# All of the work result below is done by me, based on every resources that have been given. Please contact me if you have any good suggestion.
#
# + [markdown] id="oFxUmhjx8-az" colab_type="text"
# # Data Preparation & Pre-Processing
# + id="5lFwH_uzHfcT" colab_type="code" outputId="e7fbf900-8140-44c6-baad-e025cc493e61" colab={"base_uri": "https://localhost:8080/", "height": 71}
#@title Import Important Library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, roc_curve, auc
from matplotlib.colors import ListedColormap
# + id="IWBxL5qdHl-A" colab_type="code" outputId="68735ce0-e856-4071-8a64-e51d66026690" colab={"base_uri": "https://localhost:8080/", "height": 204}
#@title Import dataset from corresponding URL
# !wget --no-check-certificate \
# http://iali.in/datasets/mushrooms.csv \
# -O /content/mushrooms.csv
# + id="Q4nmfS3oIAuB" colab_type="code" outputId="1a272a80-e52f-4445-ad30-94c3dd273d78" colab={"base_uri": "https://localhost:8080/", "height": 292}
#@title Dataset Information
print("Visualizing the table")
df = pd.read_csv('/content/mushrooms.csv')
df.head()
# + id="N_rNXA3wJkv1" colab_type="code" outputId="96412ed5-b360-4780-c0dd-df5578deebca" colab={"base_uri": "https://localhost:8080/", "height": 527}
df.info()
# + id="5HZfDfRnBdQV" colab_type="code" outputId="99984091-9128-4dc8-8b8d-0fbe9aada69f" colab={"base_uri": "https://localhost:8080/", "height": 244}
df.describe()
# + [markdown] id="JFi2gncCbf8r" colab_type="text"
# We can conclude that the data is fully in text and without null objects.
#
# In order to be processed into ML, data needed to be turned into value integer
# + id="drQDX6jsbvJc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 351} outputId="5b054ca2-e900-4e77-8a9a-9975d01c5332"
#@title Label Encoder & Preview
labelencoder=LabelEncoder()
for col in df.columns:
df[col] = labelencoder.fit_transform(df[col])
df.describe()
# + id="P3JuFkJ9La1J" colab_type="code" outputId="77bf073e-4793-4183-90ee-724c2e6935e1" colab={"base_uri": "https://localhost:8080/", "height": 907}
#@title Visualizing Dataset. Based on Habitat { vertical-output: true }
look_class = df['class'].to_numpy()
look_habitat = df['habitat'].to_numpy()
habitat1 = df[(df['class'] == 1) & (df['habitat'] == 0)]
habitat2 = df[(df['class'] == 0) & (df['habitat'] == 0)]
habitat3 = df[(df['class'] == 1) & (df['habitat'] == 1)]
habitat4 = df[(df['class'] == 0) & (df['habitat'] == 1)]
habitat5 = df[(df['class'] == 1) & (df['habitat'] == 2)]
habitat6 = df[(df['class'] == 0) & (df['habitat'] == 2)]
habitat7 = df[(df['class'] == 1) & (df['habitat'] == 3)]
habitat8 = df[(df['class'] == 0) & (df['habitat'] == 3)]
habitat9 = df[(df['class'] == 1) & (df['habitat'] == 4)]
habitat10 = df[(df['class'] == 0) & (df['habitat'] == 4)]
habitat11 = df[(df['class'] == 1) & (df['habitat'] == 5)]
habitat12 = df[(df['class'] == 0) & (df['habitat'] == 5)]
habitat13 = df[(df['class'] == 1) & (df['habitat'] == 6)]
habitat14 = df[(df['class'] == 0) & (df['habitat'] == 6)]
class1 = []
class0 = []
class1.append(habitat1['class'].count())
class0.append(habitat2['class'].count())
class1.append(habitat3['class'].count())
class0.append(habitat4['class'].count())
class1.append(habitat5['class'].count())
class0.append(habitat6['class'].count())
class1.append(habitat7['class'].count())
class0.append(habitat8['class'].count())
class1.append(habitat9['class'].count())
class0.append(habitat10['class'].count())
class1.append(habitat11['class'].count())
class0.append(habitat12['class'].count())
class1.append(habitat13['class'].count())
class0.append(habitat14['class'].count())
index = np.arange(7)
bar_width = 0.35
opacity = 0.8
plt.figure(figsize=(10,15))
rects1 = plt.bar(index, class1, bar_width,
alpha=opacity,
color='r',
label='Poisonous')
rects2 = plt.bar(index + bar_width, class0, bar_width,
alpha=opacity,
color='g',
label='Edible')
plt.xlabel('Habitat')
plt.ylabel('Total')
plt.title('Mushroom Dataset Visualization Based on Habitat')
plt.xticks(index + bar_width, ("Woods", "Grasses", "Leaves", "Meadows", "Paths", "Urban", "Waste"))
plt.legend()
plt.tight_layout()
plt.show()
# + id="gBwDtiTMaf3s" colab_type="code" colab={}
#@title Split data to Training set & Validation set
X = df.iloc[:,1:23].values #select the features but classes
Y = df.iloc[:,0].values #select classes
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
# + [markdown] id="8amOVtGx81yK" colab_type="text"
# # Creating Model & Result (Decision Tree)
# + id="YrfMded_bOyT" colab_type="code" outputId="6563d0ac-4f6d-419d-a181-aa07214ffe7f" colab={"base_uri": "https://localhost:8080/", "height": 119}
#@title Training Model
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
model = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
model.fit(X_train, y_train)
# + id="im9JpbZkbS2H" colab_type="code" outputId="85ab6788-731b-44fc-bcfe-077643aeddee" colab={"base_uri": "https://localhost:8080/", "height": 68}
#@title Comparison between prediction on test set and test set
y_pred = model.predict(X_test)
print(y_pred)
print(y_test)
print("We're unlikely to see all of the comparison due to the size of the test set")
# + id="QqFyohehvhbr" colab_type="code" outputId="9c0882ca-0336-4bca-9702-afc9c1a360c0" colab={"base_uri": "https://localhost:8080/", "height": 170}
#@title Result Details
# Conclude that 0 is Edible and 1 is vice versa
labels = ["Edible", "Poisonous"]
report = classification_report(y_test, y_pred, target_names=labels)
print(report)
# + id="2xhg6JyJP_rT" colab_type="code" outputId="5845b43d-8624-41a0-956f-73977ac37368" colab={"base_uri": "https://localhost:8080/", "height": 475}
#@title Confusion Matrix Visualization
df_cm = pd.DataFrame(confusion_matrix(y_test,y_pred), columns=labels, index= labels)
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (7,7))
sns.set(font_scale=1.4)#for label size
sns.heatmap(df_cm, cmap="Wistia", annot=True,annot_kws={"size": 17})
# + id="mb4I30lz_7HX" colab_type="code" outputId="e998530a-82c5-470d-e7c7-86b00a93d39a" colab={"base_uri": "https://localhost:8080/", "height": 420}
#@title Result Visualization { vertical-output: true }
cm = confusion_matrix(y_pred, y_test)
correct_answer = cm[0][0] + cm [1][1]
incorrect_answer = cm[0][1] + cm[1][0]
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_pred, y_test)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.figure(figsize=(5,5))
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate,true_positive_rate, color='red',label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print("Correct answer (by model) = " + str(correct_answer))
print("Incorrect answer (by model) = " + str(incorrect_answer))
print('Decission Tree accuracy:', accuracy_score(y_test, y_pred)*100 ,"%")
# + [markdown] id="ij8dcisMj-tX" colab_type="text"
# # Creating Model & Result (Random Forest)
# + id="ZFjK4kEnlPVG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="aec54db9-71d0-4978-f441-5170ea494e7e"
#@title Training Model
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
model = RandomForestClassifier()
model.fit(X_train, y_train)
# + id="9_QvOynYlYVI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="26944e2c-6659-4a0b-9ef8-57efbb0537c6"
#@title Comparison between prediction on test set and test set
y_pred = model.predict(X_test)
print(y_pred)
print(y_test)
print("We're unlikely to see all of the comparison due to the size of the test set")
# + id="NuLbO0U2ld7v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="f8c6bf79-9d80-4805-e155-a25d5d2895d1"
#@title Result Details
# Conclude that 0 is Edible and 1 is vice versa
labels = ["Edible", "Poisonous"]
report = classification_report(y_test, y_pred, target_names=labels)
print(report)
# + id="Xh_J-qy2ljW3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 475} outputId="65ddb203-f092-4b73-8ae7-178c2540c7f2"
#@title Confusion Matrix Visualization
df_cm = pd.DataFrame(confusion_matrix(y_test,y_pred), columns=labels, index= labels)
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (7,7))
sns.set(font_scale=1.4)#for label size
sns.heatmap(df_cm, cmap="Wistia", annot=True,annot_kws={"size": 17})
# + id="QXKMZ8_dmgoW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 420} outputId="ec6db99a-b07d-4cd3-e424-6deb8bad252d"
#@title Result Visualization { vertical-output: true }
cm = confusion_matrix(y_pred, y_test)
correct_answer = cm[0][0] + cm [1][1]
incorrect_answer = cm[0][1] + cm[1][0]
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_pred, y_test)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.figure(figsize=(5,5))
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate,true_positive_rate, color='red',label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print("Correct answer (by model) = " + str(correct_answer))
print("Incorrect answer (by model) = " + str(incorrect_answer))
print('Random Forest accuracy:', accuracy_score(y_test, y_pred)*100 ,"%")
# + [markdown] id="3mYteGCDo-3B" colab_type="text"
# # Creating Model & Result (Logistic Regression)
# + id="JjNN_fQdqyOp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="85c58f8c-1aea-45f9-cccd-ad9e5c493420"
#@title Training Model
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
model = LogisticRegression()
model.fit(X_train, y_train)
# + id="XS9G9dt8q7rR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="4e1b5b98-a01b-414d-c29e-ce846ca7d633"
#@title Comparison between prediction on test set and test set
y_pred = model.predict(X_test)
print(y_pred)
print(y_test)
print("We're unlikely to see all of the comparison due to the size of the test set")
# + id="4IduKCpTrFTp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="912719e5-e8e8-4933-db36-c782faf3d0ab"
#@title Result Details
# Conclude that 0 is Edible and 1 is vice versa
labels = ["Edible", "Poisonous"]
report = classification_report(y_test, y_pred, target_names=labels)
print(report)
# + id="CDqW8VkcrKbH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 475} outputId="7e65c07c-4493-4ed0-a60c-b3c01adea2e2"
#@title Confusion Matrix Visualization
df_cm = pd.DataFrame(confusion_matrix(y_test,y_pred), columns=labels, index= labels)
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (7,7))
sns.set(font_scale=1.4)#for label size
sns.heatmap(df_cm, cmap="Wistia", annot=True,annot_kws={"size": 17})
# + id="sh-78OXfrRjW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 420} outputId="c55d4724-0ad4-4311-8d2a-bada84454b80"
#@title Result Visualization { vertical-output: true }
cm = confusion_matrix(y_pred, y_test)
correct_answer = cm[0][0] + cm [1][1]
incorrect_answer = cm[0][1] + cm[1][0]
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_pred, y_test)
roc_auc = auc(false_positive_rate, true_positive_rate)
plt.figure(figsize=(5,5))
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate,true_positive_rate, color='red',label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],linestyle='--')
plt.axis('tight')
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print("Correct answer (by model) = " + str(correct_answer))
print("Incorrect answer (by model) = " + str(incorrect_answer))
print('Logistic Regression accuracy:', accuracy_score(y_test, y_pred)*100 ,"%")
| Day 3/Coders_week_Day_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# If you're looking at this file on GitHub, make sure to follow [the instructions for getting set up](https://docs.google.com/document/d/1-LXG5Lb76xQy70W2ZdannnYMEXRLt0CsoiaK0gTkmfY/edit?usp=sharing) first. You should [download these files](https://github.com/pbloem/machine-learning/archive/master.zip) to your machine and run them locally.
#
# ## Worksheet 1: Numpy and Matplotlib
#
# This is a _Jupyter notebook_. It consists of a series of _cells_. Some contain simple text, like this one, and some contain code, like the one below. Read each cell carefully. If it contains code, you can click the "play" button above to execute it. Don't execute a new cell until all previous ones have finished executing.
#
# **NB: If you go back up an re-run a cell, it can happen that it doesn't work anymore, because the variable names have been re-used. If something doesn't work, for any reason, please try "Kernel > Restart & Clear Output" and start again from the top. If the error keeps happening, please post a question on the Canvas message board.**
#
# We will assume that you know the basics of Python. If you don't, please work your way through [this tutorial](https://www.learnpython.org/) first to get caught up.
#
# The next cell imports the library **numpy** (short for **num**eric **py**thon).
import numpy as np
# If you see something like "In [1]:" next to the cell above, execution has finished.
#
# ## Numpy
#
# Numpy is a _Linear Algebra_ library. It allows you to represent vectors, matrices and tensors as Python objects, and to manipulate these in all the ways you'd expect.
#
# Most machine learning research uses Linear Algebra as its basic language, so most machine learning _code_ is built on libraries like numpy.
#
# Let's start by making a 2 x 2 matrix, filled with some arbitrary numbers.
np.asarray([[2, 3],[4, 5]])
# The numpy [documentation](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.asarray.html) can tell you more about all of the functions you see here. Each function is annotated with several examples.
#
# We can also create a matrix of random numbers between 0.0 and 1.0:
np.random.rand(2,2)
# Note that the numbers change if you execute the cell again. The arguments indicate the _shape_ of the resulting matrix. Try changing the cell above; if you pass rand a single argument, you get a vector. If you pass it more than two arguments, you get a _tensor_ (the higher-dimensional analogue of a matrix).
#
# Let's take a slightly bigger matrix, and see what we can do with it. This time, we'll load some actual data:
data = np.loadtxt('./cricketers.sub.csv', delimiter=',')
print(data)
# Each row in this matrix represents a professional cricketer from the 19th or 20th century. The first column indicates their year of birth, the second indicates how old they were when the data was collected if they were still alive, or at what age they died, otherwise.
#
# Let's start by seeing how many cricketers we have:
data.shape
# Our data has 5960 rows, and two columns. We can ask for a specific element of the matrix by using square brackets to index the matrix. Here is the date of birth and age of the 10th cricketer:
print('year of birth ', data[9, 0])
print('age ', data[9, 1])
# **Note that indexing starts from 0.** The first column is column 0, and the second is column 1.
#
# The [slicing syntax](https://www.oreilly.com/learning/how-do-i-use-the-slice-notation-in-python) that you know from python lists can also be used for matrices. For instance, here are the first ten cricketers' years of birth:
data[0:10, 0]
# To check that you understand, see if you can predict the result of the following slicing operations (and then execute the cells):
data[:10, 1]
data[0:10, :]
data[-1, :]
# ## Matplotlib
#
# The first rule of data science is _look at your data_. We can do this easily with the library **matplotlib**. Let's start by importing it.
import matplotlib.pyplot as plt
# ensure that plots are displayed inside the notebook
# %matplotlib inline
# We've imported the pyplot sublibrary. This library acts as a "state machine", it assumes we are working on a single plot and every command we give changes the state of that plot. This is great for simple plots.
#
# For more complicated stuff, you may want to use [the object-oriented interface](https://matplotlib.org/faq/usage_faq.html). This is a little more complicated, but more powerful.
#
# For now, we can keep things simple. We want a scatter plot of the first column in the data (year of birth) against the second (age). We can to this with the plt.scatter() command. It expects at least two arguments: vectors of the same length, indicating the x and y coordinate of each point. In other words, we'll need to slice our data matrix into the first and second columns:
x = data[:, 0]
y = data[:, 1]
plt.scatter(x, y); # the semicolon stops jupyter from printing the result of the last function call
# Let's make the dots a little smaller, and slightly transparent, to show the structure a little better. See [the documentation](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.scatter.html) for more options.
#
# Let's also be good data scientists and label our axes.
plt.scatter(x, y, s=3, alpha=0.2)
plt.xlabel('year of birth')
plt.ylabel('age');
# The most prominent feature is the dense, diagonal line at the left. Can you explain why this is there?
#
# In the middle of the plot, we see two, more noisy, more vague diagonal lines. Can you guess what they represent?
#
# To clear this up, we can make a different plot: we can plot the year that the cricketer _died_ against their age. This value isn't given in the data, but it's easy to compute: we just sum their age with their year of birth. Numpy makes this expecially easy by overloading the basic operators like +, -, \* and /. These are all **element-wise** operators: if we sum two matrices of the same shape, the result will be a matrix (of that shape also), in which each element is the sum of the corresponding elements in the input matrices.
#
# In other words, to get the year of death, we can simply do:
year_of_death = data[:,0] + data[:,1]
# Let's check the distribution first, by plotting a histogram:
plt.hist(year_of_death);
# The big bar on the right is not some huge calamity in the cricket world. I just shows the large proportion of cricketers that were still alive when the data was gathered. Let's zoom in a little, and use some more bins to show the finer detail. Again, [the documentation](https://matplotlib.org/devdocs/api/_as_gen/matplotlib.pyplot.hist.html) shows you more options.
plt.hist(year_of_death, bins=100, range=[1870, 1970]);
# We see a pretty big spike for World War I, but World War II is hidden in the noise. If we return to the scatter plot, se can make it stand out a little more.
plt.scatter(data[:,0]+data[:,1], data[:,1], s=3, alpha=0.2)
plt.xlabel('year of death')
plt.ylabel('age at death');
# If you want to save your plot to a file, you can use ``plt.savefig('myplot.pdf')``. The filetype is automatically detected from the extension.
#
# ## Quick numpy tricks
#
# You should now have a basic idea of how numpy works, and how to make simple plots.
# Numpy is a complex library with many options. We'll finish up by showing you a few important aspects, and pointing you to some more elaborate tutorials.
#
# ### Matrix multplication
#
# First up: matrix multiplication. For two matrices ``a`` and ``b``, ``a * b`` gives you an element-wise multiplication. What if you want to do a full matrix multiplication? That works like this:
# +
a = np.asarray([[1, 2],[3, 4]])
print(a)
b = np.eye(2) # the 2x2 identity matrix
print('\nelement-wise\n', a * b)
print('\nmatrix multiplication\n', a.dot(b))
# -
# ### Broadcasting
#
# [Broadcasting](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) is one of numpy's most complex, and most useful behaviors. Put simply, when you try apply an element-wise operation (like +) to two matrices that don't have the same size, numpy checks if it can stretch one of them to make the sizes match.
#
# The simplest example is combining a matrix with a scalar. Let's say we want to add 10 to every element in a matrix.
#
a + 10
# Under the hood, numpy dynamically stretches the scalar 10 out to a matrix of 2x2, filled with 10's and then adds that, element-wise, to ``a``.
#
# Here is another example, adding a length 3 vector to a 16x3 matrix:
a = np.random.rand(16,3)
b = np.asarray([0,1,2])
a + b
# If the two matrices being added don't have the same number of dimensions, they are aligned from the last dimension. In this case, broadcasting was allowed, because the last dimension of ``a`` matched the last dimension of ``b``. If ``b`` had been 16 elements long, we would not be allowed to broadcast (try it).
#
# In this case, we could add a dimension, to make ``b`` a 16x1 matrix. The broadcasting rules allow any dimension of size 1 to be stretched to match the other matrix.
#
# I'm sure that right now, broadcasting seems quite mysterious. It's enough to be aware that this behavior exists. If you encounter it in action, you'll see how much easier and intuitive it can make things. The numpy documentation provides [a more complete explanation](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) if you need one.
# ## Final comments
#
# That's it for this worksheet. This doesn't make your numpy/matplotlib experts, but it hopefully gives you an indication of how things work, and a safe environment to experiment in. Just click the plus-icon in the toolbar make a new cell, and start playing around with the data.
#
# To learn more about **numpy**, you can start here:
# * [The official numpy quickstart guide](https://docs.scipy.org/doc/numpy-dev/user/quickstart.html)
# * [A more in-depth tutorial, with in-browser samples](https://www.datacamp.com/community/tutorials/python-numpy-tutorial)
# * [A very good walk through the most important functions and features](http://cs231n.github.io/python-numpy-tutorial/). From the famous [CS231n course](http://cs231n.github.io/), from Stanford.
#
# To dive deeper into **matplotlib**, these are some good jumping-off points:
# * [The official pyplot tutorial](https://matplotlib.org/users/pyplot_tutorial.html). Note that pyplot can accept basic python lists as well as numpy data.
# * [A gallery of example MPL plots](https://matplotlib.org/gallery.html). Most of these do not use the pyplot state-machine interface, but the more low level objects like [Axes](https://matplotlib.org/api/axes_api.html).
# * [In-depth walk through the main features and plot types](http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html)
#
# In the next worksheet, we'll see how to take some numpy data, and apply a machine learning algorithm to it, using the **sklearn** library.
| Worksheet 1, Numpy and Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hands-on Introduction to Python And Machine Learning
# Instructor: <NAME>
#
# (Readers are assumed to have a little bit programming background.)
#
# # Getting started with Python
# (adapted from [this github repository](https://github.com/ehmatthes/intro_programming))
# ## Variables
# A variable holds a value of various types such as string, integer, real number and boolean.
message = "Hello world!" # message is a variable of type string, it holds 'Hello world!'
print(message)
# In Python, the value of a variable can be modified.
# +
message = "Hello world!"
print(message)
message = 'Hello world! I love Python!' # message's value is changed here
print(message)
# -
# ### Naming rules
#
# + Variables can contain only letters, numbers, and underscores. Variable names can start with a letter or an underscore, but can not start with a number
#
# + Spaces are not allowed in variable names, so we use underscores instead of spaces. For example, use student_name instead of "student name"
#
# + You cannot use [Python keywords](http://docs.python.org/3/reference/lexical_analysis.html#keywords) as variable names
#
# + Variable names should be descriptive, without being too long
#
# + *Case sensitive*
#
# + The naming rules actually also apply to other Python constructs
#
# If you don't follow the rules, the Python interpreter will shout at you...
1lovehk = 'I love HK'
i love hk = 'I love HK'
for='Hong kong forever! (so does Wakanda)'
# If you attempt to use variables that have not been defined...
message = 'What are the differences between a python and an anaconda?'
print(mesage)
# Beware of typing mistakes!
# ** Exercise **:
# - Try to create a variable of any kind, name it in whatever way and see whether there are errors
# - And then type:
# <code>
# type(your variable)
# </code>
# ## Strings
# Strings are sets of characters.
# ### Single and double quotes
# Strings are contained by either single or double quotes.
my_string = "This is a double-quoted string."
my_string = 'This is a single-quoted string.' # use single quote if you are lazy
# This lets us make strings that contain quotations without the need of _escape characters_.
# By the way, the inventor of another programming language, Perl, stated the *three virtues of a great programmer*:
# > Laziness: The quality that makes you go to great effort to reduce overall energy expenditure. It makes you write labor-saving programs that other people will find useful and document what you wrote so you don't have to answer so many questions about it.
# >
# > Impatience: The anger you feel when the computer is being lazy. This makes you write programs that don't just react to your needs, but actually anticipate them. Or at least pretend to.
# >
# > Hubris: The quality that makes you write (and maintain) programs that other people won't want to say bad things about.
#
quote = "<NAME> once said, 'Any program is only as good as it is useful.'"
print(quote)
# ### Changing case
# You can easily change the case of a string, to present it the way you want it to look.
# +
name = '<NAME> '
print(name)
print(name.title())
# +
first_name = 'ada'
print(first_name)
print(first_name.title())
print(first_name.upper())
first_name = 'Ada'
print(first_name.lower())
# -
# You will see this syntax quite often, where a variable name is followed by a dot and then the name of an action, followed by a set of parentheses. The parentheses may be empty, or they may contain some values.
#
# variable_name.action()
#
# In this example, the word "action" is the name of a method. A method is something that can be done to a variable. The methods 'lower', 'title', and 'upper' are all functions that have been written into the Python language, which do something to strings. Later on, you will learn to write your own methods.
# ### Combining strings (concatenation)
# It is often very useful to be able to combine strings into a message or page element that we want to display. Again, this is easier to understand through an example.
# +
first_name = 'ada'
last_name = 'wong'
full_name = first_name + ' ' + last_name
print(full_name.title())
# -
# The plus sign combines two strings into one, which is called "concatenation". You can use as many plus signs as you want in composing messages.
# +
first_name = 'ada'
last_name = 'lovelace'
full_name = first_name + ' ' + last_name
message = full_name.title() + ' ' + "was considered the world's first computer programmer."
print(message)
# -
# ### Whitespace
# The term "whitespace" refers to characters that the computer is aware of, but are invisible to readers. The most common whitespace characters are spaces, tabs, and newlines.
#
# A space is just " ". The two-character sequence "\t" makes a tab appear in a string. Tabs can be used anywhere you like in a string. Similarly, newlines are created by a two-character sequence "\n".
print('Hello everyone!')
print('\tHello everyone!')
print('Hello \teveryone!')
# The combination "\n" makes a newline appear in a string. You can use newlines anywhere you like in a string.
print('Hello everyone!')
print('\nHello everyone!'')
print('Hello \neveryone!')
print('\n\n\nHello everyone!')
# ### Stripping whitespace
#
# Many times you will allow users to enter text into a box, and then you will read that text and use it. It is really easy for people to include extra whitespace at the beginning or end of their text. Whitespace includes spaces, tabs, and newlines.
#
# It is often a good idea to strip this whitespace from strings before you start working with them. In Python, it is really easy to strip whitespace from the left side, the right side, or both sides of a string.
# +
name = ' ada '
print(name.lstrip()) # strip the spaces on the left hand side
print(name.rstrip()) # strip the spaces on the right hand side
print(name.strip()) # strip the spaces on both sides
# -
# It's hard to see exactly what is happening, so maybe the following will make it a little more clear:
# +
name = ' ada '
print('-' + name.lstrip() + '-')
print('-' + name.rstrip() + '-')
print('-' + name.strip() + '-')
# -
# ** Exercise **:
# - Try to print the following lines using only one print() (excluding the #s):
# +
#********************************************************
#* *
#* I'm loving Python *
#* Let's make programming GREAT again *
#* *
#********************************************************
# -
# ## Numbers
# Dealing with simple numerical data is fairly straightforward in Python, but there are a few things you should know about.
# ### Integers
#
# You can do all of the basic arithmetic operations with integers, and everything should behave as you expect.
print(3+2)
print(3-2)
print(3*2)
print(3/2)
print(3**2)
#
# ### Arithmetic Operators
#
# | Symbol | Task Performed |
# |----|---|
# | + | Addition |
# | - | Subtraction |
# | * | multiplication |
# | ** | to the power of |
# | / | division |
# | // | floor division (divide and then round down to the nearest integer)|
# | % | mod |
# You can use parenthesis to modify the standard order of operations.
standard_order = 2+3*4
print(standard_order)
my_order = (2+3)*4
print(my_order)
# ### Floating-point numbers
# Floating-point numbers refer to any number with a decimal point. Most of the time, you can think of floating point numbers as decimals, and they will behave as you expect them to. All the arithematic operators also apply to them.
print(0.1+0.1)
# However, sometimes you will get an answer with an unexpectly long decimal part:
print(0.1+0.2)
# This happens because of the way computers represent numbers internally; this has nothing to do with Python itself. Basically, we are used to working in powers of ten, where one tenth plus two tenths is just three tenths. But computers work in powers of two. So your computer has to represent 0.1 in a power of two, and then 0.2 as a power of two, and express their sum as a power of two. There is no exact representation for 0.3 in powers of two, and we see that in the answer to 0.1+0.2.
#
# Python tries to hide this kind of stuff when possible. Don't worry about it much for now; just don't be surprised by it, and know that we will learn to clean up our results a little later on.
#
# You can also get the same kind of result with other operations.
print(3*0.1)
# ### Floating-point division
# +
print(4/2)
# Note: the behaviour of Python 3 and Python 2 regarding floating-point division is different.
# In Python 2, the result will be 2.
# If you are getting numerical results that you don't expect, or that don't make sense,
# check if the version of Python you are using is treating integers differently than you expect.
# +
print(3/2)
# Note: the behaviour of Python 3 and Python 2 regarding floating-point division is different.
# In Python 2, the result will be 2.
# If you are getting numerical results that you don't expect, or that don't make sense,
# check if the version of Python you are using is treating integers differently than you expect.
# -
# ** Exercise **:
# - Write some code that calculates the roots of a quadratic function given the variable coefficients:a, b, c
#
# The formula is: $ \frac{-b \pm \sqrt{b^2-4ac}}{2a}$.
#
# If there are no roots, print "I'm groot!"; print the roots otherwise.
# ## Comments
# As you begin to write more complicated code, you will have to spend more time thinking about how to code solutions to the problems you want to solve. Once you come up with an idea, you will spend a fair amount of time troubleshooting your code, and revising your overall approach.
#
# Comments allow you to write more detailed and more human readable explanations about your program. In Python, any line that starts with a pound (#) symbol is ignored by the Python interpreter and is known as a line of comment.
# This line is a comment.
print('# This line is not a comment, it is code.')
# ### What makes a good comment?
# - It is short and to the point, but a complete thought. Most comments should be written in complete sentences
# - It explains your thinking, so that when you return to the code later you will understand how you were approaching the problem
# - It explains your thinking, so that others who work with your code will understand your overall approach to a problem
# - It explains particularly difficult sections of code in detail
#
# ### When should you write comments?
#
# - When you have to think about code before writing it
# - When you are likely to forget later exactly how you were approaching a problem
# - When there is more than one way to solve a problem
# - When others are unlikely to anticipate your way of thinking about a problem
#
# Writing good comments is one of the clear signs of a good programmer. If you have any real interest in taking programming seriously, start using comments now.
# ## Lists
# A list is a collection of items, that is stored in a variable. The items should be related in some way, but there are no restrictions on what can be stored in a list. Here is a simple example of a list, and how we can quickly access each item in the list.
# +
students = ['bernice', 'aaron', 'cody']
for student in students: # Hey this is a for-loop. We'll study it later.
print("Hello, " + student.title() + "!")
# -
# ### Naming and defining a list
# Since lists are collection of objects, it is good practice to give them a plural name. If each item in your list is a car, call the list 'cars'. If each item is a dog, call your list 'dogs'. This gives you a straightforward way to refer to the entire list ('dogs'), and to a single item in the list ('dog').
#
# In Python, square brackets designate a list. To define a list, you give the name of the list, the equals sign, and the values you want to include in your list within square brackets.
# ** Exercise **:
# - Declare a list of numbers
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
# ### Accessing one item in a list
# Items in a list are identified by their position in the list, **starting with zero**. This will almost certainly trip you up at some point. Believe it or not, programmers even joke about how often we all make "off-by-one" errors, so don't feel bad when you make this kind of error.
#
# To access the first element in a list, you give the name of the list, followed by a zero in parentheses.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dog = dogs[0]
print(dog.title())
# -
# The number in parentheses is called the _index_ of the item. Because lists start at zero, the index of an item is always one less than its position in the list. So to get the second item in the list, we need to use an index of 1.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dog = dogs[1]
print(dog.title())
# -
# ### Accessing the last items in a list
# You can probably see that to get the last item in this list, we would use an index of 2. This works, but it would only work because our list has exactly three items. To get the last item in a list, no matter how long the list is, you can use an index of -1. (Negative index are not quite common in programming languages. )
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dog = dogs[-1]
print(dog.title())
# -
# This syntax also works for the second to last item, the third to last, and so forth.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dog = dogs[-2]
print(dog.title())
# -
# You cannot use a number larger than the length of the list.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dog = dogs[3]
print(dog.title())
# -
# Similarly, you can't use a negative number larger than the length of the list.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dog = dogs[-4]
print(dog.title())
# -
# ## Lists and Looping
# ### Accessing all elements in a list
# This is one of the most important concepts related to lists. You can have a list with a million items in it, and in three lines of code you can write a sentence for each of those million items. If you want to understand lists, and become a competent programmer, make sure you take the time to understand this section.
#
# We use a loop to access all the elements in a list. A loop is a block of code that repeats itself until it runs out of items to work with, or until a certain condition is met. In this case, our loop will run once for every item in our list. With a list that is three items long, our loop will run three times.
#
# Let's take a look at how we access all the items in a list, and then try to understand how it works.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
print(dog) # hey, why is this line indented?
# -
# We have already seen how to create a list, so we are really just trying to understand how the last two lines work. These last two lines make up a loop, and the language here can help us see what is happening:
# ```python
# for dog in dogs:
# ```
# - The keyword "for" tells Python to get ready to use a loop.
# - The variable "dog", with no "s" on it, is a temporary placeholder variable. This is the variable that Python will place each item in the list into, one at a time.
# - The first time through the loop, the value of "dog" will be 'border collie'.
# - The second time through the loop, the value of "dog" will be 'australian cattle dog'.
# - The third time through, "dog" will be 'labrador retriever'.
# - After this, there are no more items in the list, and the loop will end.
#
# ### Doing more with each item
#
# We can do whatever we want with the value of "dog" inside the loop. In this case, we just print the name of the dog.
# ```python
# print(dog)
# ```
# We are not limited to just printing the word dog. We can do whatever we want with this value, and this action will be carried out for every item in the list. Let's say something about each dog in our list.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
print('I like ' + dog + 's.')
# -
# ### Inside and outside the loop
#
# Python uses **indentation** to decide what is inside the loop and what is outside the loop. Code that is inside the loop will be run for every item in the list. Code that is not indented, which comes after the loop, will be run once just like regular code.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
# are we doing two or three things per iteration?
print('I like ' + dog + 's.')
print('No, I really really like ' + dog +'s!\n')
print("\nThat's just how I feel about dogs.")
# +
# how about this version?
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
for dog in dogs:
# how about writing the code in this way?
print('I like ' + dog + 's.')
print('No, I really really like ' + dog +'s!\n')
print("\nThat's just how I feel about dogs.")
# -
# By the way, indentation in Python really matters. Please pay attention to it when writing Python. We should be consistent: if we use two spaces for one level of indentation on one line, don't use three or four or other amount of spaces on other lines.
#
# 
#
# You may be intersted in this article:
# [https://stackoverflow.blog/2017/06/15/developers-use-spaces-make-money-use-tabs/](https://stackoverflow.blog/2017/06/15/developers-use-spaces-make-money-use-tabs/)
# ### Enumerating a list
# When you are looping through a list, you may want to know the index of the current item. You could always use the *list.index(value)* syntax, but there is a simpler way. The *enumerate()* function tracks the index of each item for you, as it loops through the list:
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print("Results for the dog show are as follows:\n")
for index, dog in enumerate(dogs):
place = str(index)
print("Place: " + place + " Dog: " + dog.title())
# -
# To enumerate a list, you need to add an *index* variable to hold the current index. So instead of
# ```python
# for dog in dogs:
# ```
#
# You have
# ```python
# for index, dog in enumerate(dogs)
# ```
#
# The value in the variable *index* is always an integer. If you want to print it in a string, you have to turn the integer into a string:
# ```python
# str(index)
# ```
#
# The index always starts at 0, so in this example the value of *place* should actually be the current index, plus one:
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print("Results for the dog show are as follows:\n")
for index, dog in enumerate(dogs):
place = str(index + 1)
print("Place: " + place + " Dog: " + dog.title())
# -
# List enumeration is particularly useful when a data is represented by multiple elements from different arrays (not a good practice, though). For instance:
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
bark = ['bark', 'bark bark', 'bark bark bark']
print('Barking dogs:\n')
for index, dog in enumerate(dogs):
print(dogs[index] + ': ' + bark[index])
# -
# ## Common list operations
# ### Modifying elements in a list
# You can change the value of any element in a list if you know the position of that item.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dogs[0] = 'australian shepherd'
print(dogs)
# -
# ### Finding an element in a list
# If you want to find out the position of an element in a list, you can use the index() function.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print(dogs.index('australian cattle dog')) # the function index() here is not the variable 'index' we used in the previous examples
# -
# This method returns a ValueError if the requested item is not in the list.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print(dogs.index('poodle'))
# -
# ### Testing whether an item is in a list
# You can test whether an item is in a list using the "in" keyword. This will become more useful after learning how to use if-else statements.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
print('australian cattle dog' in dogs)
print('poodle' in dogs)
# -
# ### Adding items to a list
# #### Appending items to the end of a list
# We can add an item to a list using the append() method. This method adds the new item to the end of the list.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dogs.append('poodle')
for dog in dogs:
print(dog.title() + "s are cool.")
# -
# #### Inserting items into a list
# We can also insert items anywhere we want in a list, using the **insert()** function. We specify the position we want the item to have, and everything from that point on is shifted one position to the right. In other words, the index of every item after the new item is increased by one.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
dogs.insert(1, 'poodle')
print(dogs)
# -
# Note that you have to give the position of the new item first, and then the value of the new item. If you do it in the reverse order, you will get an error.
# ### Creating an empty list
# Now that we know how to add items to a list after it is created, we can use lists more dynamically. We are no longer stuck defining our entire list at once.
#
# A common approach with lists is to define an empty list, and then let your program add items to the list as necessary. This approach works, for example, when starting to build an interactive web site. Your list of users might start out empty, and then as people register for the site it will grow. This is a simplified approach to how web sites actually work, but the idea is realistic.
#
# Here is a brief example of how to start with an empty list, start to fill it up, and work with the items in the list. The only new thing here is the way we define an empty list, which is just an empty set of square brackets.
# +
# Create an empty list to hold our users.
usernames = []
# Add some users.
usernames.append('bernice')
usernames.append('cody')
usernames.append('aaron')
# Greet all of our users.
for username in usernames:
print("Welcome, " + username.title() + '!')
# -
# If we don't change the order in our list, we can use the list to figure out who our oldest and newest users are.
# +
# Create an empty list to hold our users.
usernames = []
# Add some users.
usernames.append('bernice')
usernames.append('cody')
usernames.append('aaron')
# Greet all of our users.
for username in usernames:
print("Welcome, " + username.title() + '!')
# Recognize our first user, and welcome our newest user.
print("\nThank you for being our very first user, " + usernames[0].title() + '!')
print("And a warm welcome to our newest user, " + usernames[-1].title() + '!')
# -
# Note that the code welcoming our newest user will always work, because we have used the index -1. If we had used the index 2 we would always get the third user, even as our list of users grows and grows.
# ### Sorting a List
# We can sort a list alphabetically, in either order.
# +
students = ['bernice', 'aaron', 'cody']
# Put students in alphabetical order.
students.sort()
# Display the list in its current order.
print("Our students are currently in alphabetical order.")
for student in students:
print(student.title())
#Put students in reverse alphabetical order.
students.sort(reverse=True)
# Display the list in its current order.
print("\nOur students are now in reverse alphabetical order.")
for student in studenbts:
print(student.title())
# -
# #### *sorted()* vs. *sort()*
# Whenever you consider sorting a list using sort(), keep in mind that you can not recover the original order. If you want to display a list in sorted order, but preserve the original order, you can use the *sorted()* function. The *sorted()* function also accepts the optional *reverse=True* argument. Please note that sorted() is not a function of the list datastructure.
# +
students = ['bernice', 'aaron', 'cody']
# Display students in alphabetical order, but keep the original order.
print("Here is the list in alphabetical order:")
for student in sorted(students):
print(student.title())
# Display students in reverse alphabetical order, but keep the original order.
print("\nHere is the list in reverse alphabetical order:")
for student in sorted(students, reverse=True):
print(student.title())
print("\nHere is the list in its original order:")
# Show that the list is still in its original order.
for student in students:
print(student.title())
# -
# #### Reversing a list
# We have seen three possible orders for a list:
# - The original order in which the list was created
# - Alphabetical order
# - Reverse alphabetical order
#
# There is one more order we can use, and that is the reverse of the original order of the list. The *reverse()* function gives us this order.
# +
students = ['bernice', 'aaron', 'cody']
students.reverse()
print(students)
# -
# Note that reverse is permanent, although you could follow up with another call to *reverse()* and get back the original order of the list.
# #### Sorting a numerical list
# All of the sorting functions work for numerical lists as well.
# +
numbers = [1, 3, 4, 2]
# sort() puts numbers in increasing order.
numbers.sort()
print(numbers)
# sort(reverse=True) puts numbers in decreasing order.
numbers.sort(reverse=True)
print(numbers)
# +
numbers = [1, 3, 4, 2]
# sorted() preserves the original order of the list:
print(sorted(numbers))
print(numbers)
# +
numbers = [1, 3, 4, 2]
# The reverse() function also works for numerical lists.
numbers.reverse()
print(numbers)
# -
# ** Exercise **:
# - Write a program to find the 2nd largest of an integer array.
# - If the array is not large enough, print "Not enough data"; print the result otherwise.
#
# For example, suppose the interger array (<code>intarray</code>) is:
# intarray = [1, 2, 3, 4, 5, 6, 7, 9, 10]
#
# The result should be 9.
# ### Finding the length of a list
# You can find the length of a list using the *len()* function.
# +
usernames = ['bernice', 'cody', 'aaron']
user_count = len(usernames)
print(user_count)
# -
# There are many situations where you might want to know how many items in a list. If you have a list that stores your users, you can find the length of your list at any time, and know how many users you have.
# +
# Create an empty list to hold our users.
usernames = []
# Add some users, and report on how many users we have.
usernames.append('bernice')
user_count = len(usernames)
print("We have " + str(user_count) + " user!")
usernames.append('cody')
usernames.append('aaron')
user_count = len(usernames)
print("We have " + str(user_count) + " users!")
# -
# On a technical note, the *len()* function returns an integer, which can't be printed directly with strings. We use the *str()* function to turn the integer into a string so that it prints nicely:
# +
usernames = ['bernice', 'cody', 'aaron']
user_count = len(usernames)
print("This will cause an error: " + user_count)
# +
usernames = ['bernice', 'cody', 'aaron']
user_count = len(usernames)
print("This will work: " + str(user_count))
# -
# ### Removing Items from a List
# Hopefully you can see by now that lists are a dynamic structure. We can define an empty list and then fill it up as information comes into our program. To become really dynamic, we need some ways to remove items from a list when we no longer need them. You can remove items from a list through their position, or through their value.
# #### Removing items by position
# If you know the position of an item in a list, you can remove that item using the *del* command. To use this approach, give the command *del* and the name of your list, with the index of the item you want to move in square brackets:
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
# Remove the first dog from the list.
del dogs[0]
print(dogs)
# -
# #### Removing items by value
# You can also remove an item from a list if you know its value. To do this, we use the *remove()* function. Give the name of the list, followed by the word remove with the value of the item you want to remove in parentheses. Python looks through your list, finds the first item with this value, and removes it.
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
# Remove australian cattle dog from the list.
dogs.remove('australian cattle dog')
print(dogs)
# -
# Be careful to note, however, that *only* the first item with this value is removed. If you have multiple items with the same value, you will have some items with this value left in your list.
# +
letters = ['a', 'b', 'c', 'a', 'b', 'c']
# Remove the letter a from the list.
letters.remove('a')
print(letters)
# -
# ### Popping items from a list
#
# There is a cool concept in programming called "popping" items from a collection. Every programming language has some sort of data structure similar to Python's lists. All of these structures can be used as queues, and there are various ways of processing the items in a queue.
#
# One simple approach is to start with an empty list, and then add items to that list. When you want to work with the items in the list, you always take the last item from the list, do something with it, and then remove that item. The *pop()* function makes this easy. It removes the last item from the list, and gives it to us so we can work with it. This is easier to show with an example:
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
last_dog = dogs.pop()
print(last_dog)
print(dogs)
# -
# This is an example of a first-in, last-out approach. The first item in the list would be the last item processed if you kept using this approach. We will see a full implementation of this approach later on, when we learn about *while* loops.
#
# You can actually pop any item you want from a list, by giving the index of the item you want to pop. So we could do a first-in, first-out approach by popping the first iem in the list:
# +
dogs = ['border collie', 'australian cattle dog', 'labrador retriever']
first_dog = dogs.pop(0)
print(first_dog)
print(dogs)
# -
# ** Exercise **:
# - Write code to delete consecutive duplicates of list elements
#
# For example, given:
# x = [1, 1, 2, 3, 4, 5, 6, 6, 6, 7]
#
# The result should be:
# [1, 2, 3, 4, 5, 6, 7]
# ** Exercise **:
# - Write code to duplicate the elements of a list
#
# For example, given:
# x = [1, 1, 2, 3, 4, 5, 6, 6, 6, 7]
#
# The result should be:
# [1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 6, 6, 6, 7, 7]
# ### Slicing a List
# Since a list is a collection of items, we should be able to get any subset of those items. For example, if we want to get just the first three items from the list, we should be able to do so easily. The same should be true for any three items in the middle of the list, or the last three items, or any x items from anywhere in the list. These subsets of a list are called *slices*.
#
# To get a subset of a list, we give the position of the first item we want, and the position of the first item we do *not* want to include in the subset. So the slice *list[0:3]* will return a list containing items 0, 1, and 2, but not item 3. Here is how you get a batch containing the first three items.
# +
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab the first three users in the list.
first_batch = usernames[0:3]
for user in first_batch:
print(user.title())
# -
# If you want to grab everything up to a certain position in the list, you can also leave the first index blank:
# +
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab the first three users in the list.
first_batch = usernames[:3]
for user in first_batch:
print(user.title())
# -
# When we grab a slice from a list, the original list is not affected:
# +
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab the first three users in the list.
first_batch = usernames[0:3]
# The original list is unaffected.
for user in usernames:
print(user.title())
# -
# We can get any segment of a list we want, using the slice method:
# +
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab a batch from the middle of the list.
middle_batch = usernames[1:4]
for user in middle_batch:
print(user.title())
# -
# To get all items from one position in the list to the end of the list, we can leave off the second index:
# +
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Grab all users from the third to the end.
end_batch = usernames[2:]
for user in end_batch:
print(user.title())
# -
# ### Copying a list (Please pay attention to this section)
# You can use the slice notation to make a copy of a list, by leaving out both the starting and the ending index. This causes the slice to consist of everything from the first item to the last, which is the entire list.
# +
usernames = ['bernice', 'cody', 'aaron', 'ever', 'dalia']
# Make a copy of the list.
copied_usernames = usernames[:]
print("The full copied list:\n\t", copied_usernames)
# Remove the first two users from the copied list.
del copied_usernames[0]
del copied_usernames[0]
print("\nTwo users removed from copied list:\n\t", copied_usernames)
# The original list is unaffected.
print("\nThe original list:\n\t", usernames)
# -
# ## Numerical lists
# There is nothing special about lists of numbers, but there are some functions you can use to make working with numerical lists more efficient. Let's make a list of the first ten numbers, and start working with it to see how we can use numbers in a list.
# +
# Print out the first ten numbers.
numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for number in numbers:
print(number)
# -
# ** Exercise **:
# - Shift the elements of an integer list by one to the left, and then increment the value of each cell by its new index. The rightmost element of the original list should be placed at the end after shifting.
#
# For example,
# x = [1, 2, 3, 4]
#
# After shifting, the result should be:
# [2, 3, 4, 1]
#
# The final result should be:
# [2, 4, 6, 4]
# ### The *range()* function
# This works, but it is not very efficient if we want to work with a large set of numbers. The *range()* function helps us generate long lists of numbers. Here are two ways to do the same thing, using the *range* function.
# Print the first ten numbers.
for number in range(1,11):
print(number)
# The range function takes in a starting number, and an end number. You get all integers, up to but not including the end number. You can also add a *step* value, which tells the *range* function how big of a step to take between numbers:
# Print the first ten odd numbers.
for number in range(1,21,2):
print(number)
# If we want to store these numbers in a list, we can use the *list()* function. This function takes in a range, and turns it into a list:
# Create a list of the first ten numbers.
numbers = list(range(1,11))
print(numbers)
# This is incredibly powerful; we can now create a list of the first million numbers, just as easily as we made a list of the first ten numbers. It doesn't really make sense to print the million numbers here, but we can show that the list really does have one million items in it, and we can print the last ten items to show that the list is correct.
# +
# Store the first million numbers in a list.
numbers = list(range(1,1000001))
# Show the length of the list:
print("The list 'numbers' has " + str(len(numbers)) + " numbers in it.")
# Show the last ten numbers:
print("\nThe last ten numbers in the list are:")
for number in numbers[-10:]:
print(number)
# -
# There are two things here that might be a little unclear. The expression
#
# str(len(numbers))
#
# takes the length of the *numbers* list, and turns it into a string that can be printed.
#
# The expression
#
# numbers[-10:]
#
# gives us a *slice* of the list. The index `-1` is the last item in the list, and the index `-10` is the item ten places from the end of the list. So the slice `numbers[-10:]` gives us everything from that item to the end of the list.
# ** Exercise **:
# - Split a list into two. The first list should contain N randomly drawn elements from the original list of length L; whereas the second list should contain the remaining (L-N) elements in the original list.
#
# You can use the following code to generate a list of random integers (please modify it according to your need):
# +
import numpy as np
np.random.randint(low=0,high=10,size=10)
# -
# ### The *min()*, *max()*, and *sum()* functions
#
# There are three functions you can easily use with numerical lists. As you might expect, the *min()* function returns the smallest number in the list, the *max()* function returns the largest number in the list, and the *sum()* function returns the total of all numbers in the list.
# +
ages = [23, 16, 14, 28, 19, 11, 38]
youngest = min(ages)
oldest = max(ages)
total_years = sum(ages)
print("Our youngest reader is " + str(youngest) + " years old.")
print("Our oldest reader is " + str(oldest) + " years old.")
print("Together, we have " + str(total_years) + " years worth of life experience.")
# -
# ## List comprehensions
# If you are brand new to programming, list comprehensions may look confusing at first. They are a shorthand way of creating and working with lists. It is good to be aware of list comprehensions, because you will see them in other people's code, and they are really useful when you understand how to use them. That said, if they don't make sense to you yet, don't worry about using them right away. When you have worked with enough lists, you will want to use comprehensions. For now, it is good enough to know they exist, and to recognize them when you see them. If you like them, go ahead and start trying to use them now. (Using list comprehensions is a more idiomatic way of programming in Python.)
#
# ### Numerical comprehensions
# Let's consider how we might make a list of the first ten square numbers. We could do it like this:
# +
# Store the first ten square numbers in a list.
# Make an empty list that will hold our square numbers.
squares = []
# Go through the first ten numbers, square them, and add them to our list.
for number in range(1,11):
new_square = number**2
squares.append(new_square)
# Show that our list is correct.
for square in squares:
print(square)
# -
# This should make sense at this point. If it doesn't, go over the code with these thoughts in mind:
# - We make an empty list called *squares* that will hold the values we are interested in.
# - Using the *range()* function, we start a loop that will go through the numbers 1-10.
# - Each time we pass through the loop, we find the square of the current number by raising it to the second power.
# - We add this new value to our list *squares*.
# - We go through our newly-defined list and print out each square.
#
# Now let's make this code more efficient. We don't really need to store the new square in its own variable *new_square*; we can just add it directly to the list of squares. The line
#
# new_square = number**2
#
# is taken out, and the next line takes care of the squaring:
# +
# Store the first ten square numbers in a list.
# Make an empty list that will hold our square numbers.
squares = []
# Go through the first ten numbers, square them, and add them to our list.
for number in range(1,11):
squares.append(number**2)
# Show that our list is correct.
for square in squares:
print(square)
# -
# List comprehensions allow us to collapse the first three lines of code into one line. Here's what it looks like:
# +
# Store the first ten square numbers in a list.
squares = [number**2 for number in range(1,11)]
# Show that our list is correct.
for square in squares:
print(square)
# -
# It should be pretty clear that this code is more efficient than our previous approach, but it may not be clear what is happening. Let's take a look at everything that is happening in that first line:
#
# We define a list called *squares*.
#
# Look at the second part of what's in square brackets:
# ```python
# for number in range(1,11)
# ```
# This sets up a loop that goes through the numbers 1-10, storing each value in the variable *number*. Now we can see what happens to each *number* in the loop:
# ```python
# number**2
# ```
# Each number is raised to the second power, and this is the value that is stored in the list we defined. We might read this line in the following way:
#
# squares = [raise *number* to the second power, for each *number* in the range 1-10]
#
# Or more mathematical:
# \begin{align}
# \text{squares} &= \{x^2 | x \in \mathbb{Z} \land x >=1 \land x<10\}
# \end{align}
# It is probably helpful to see a few more examples of how comprehensions can be used. Let's try to make the first ten even numbers, the longer way:
# +
# Make an empty list that will hold the even numbers.
evens = []
# Loop through the numbers 1-10, double each one, and add it to our list.
for number in range(1,11):
evens.append(number*2)
# Show that our list is correct:
for even in evens:
print(even)
# -
# Here's how we might think of doing the same thing, using a list comprehension:
#
# evens = [multiply each *number* by 2, for each *number* in the range 1-10]
#
# Here is the same line in code:
# +
# Make a list of the first ten even numbers.
evens = [number*2 for number in range(1,11)]
for even in evens:
print(even)
# -
# ### Non-numerical comprehensions
# We can use comprehensions with non-numerical lists as well. In this case, we will create an initial list, and then use a comprehension to make a second list from the first one. Here is a simple example, without using comprehensions:
# +
# Consider some students.
students = ['bernice', 'aaron', 'cody']
# Let's turn them into great students.
great_students = []
for student in students:
great_students.append(student.title() + " the great!")
# Let's greet each great student.
for great_student in great_students:
print("Hello, " + great_student)
# -
# To use a comprehension in this code, we want to write something like this:
#
# great_students = [add 'the great' to each *student*, for each *student* in the list of *students*]
#
# Here's what it looks like:
# +
# Consider some students.
students = ['bernice', 'aaron', 'cody']
# Let's turn them into great students.
great_students = [student.title() + " the great!" for student in students]
# Let's greet each great student.
for great_student in great_students:
print("Hello, " + great_student)
# -
# ## Strings as Lists
#
# Now that you have some familiarity with lists, we can take a second look at strings. A string is really a list of characters, so many of the concepts from working with lists behave the same with strings.
# ### Strings as a list of characters
# We can loop through a string using a *for* loop, just like we loop through a list:
# +
message = "Hello!"
for letter in message:
print(letter)
# -
# We can create a list from a string. The list will have one element for each character in the string:
# +
message = "Hello world!"
message_list = list(message)
print(message_list)
# -
# ### Slicing strings
# We can access any character in a string by its position, just as we access individual items in a list:
# +
message = "Hello World!"
first_char = message[0]
last_char = message[-1]
print(first_char, last_char)
# -
# We can extend this to take slices of a string:
# +
message = "Hello World!"
first_three = message[:3]
last_three = message[-3:]
print(first_three, last_three)
# -
# ### Finding substrings
# Now that you have seen what indexes mean for strings, we can search for *substrings*. A substring is a series of characters that appears in a string.
#
# You can use the *in* keyword to find out whether a particular substring appears in a string:
message = "I like cats and dogs."
dog_present = 'dog' in message
print(dog_present)
# If you want to know where a substring appears in a string, you can use the *find()* method. The *find()* method tells you the index at which the substring begins.
message = "I like cats and dogs."
dog_index = message.find('dog')
print(dog_index)
# Note, however, that this function only returns the index of the first appearance of the substring you are looking for. If the substring appears more than once, you will miss the other substrings.
message = "I like cats and dogs, but I'd much rather own a dog."
dog_index = message.find('dog')
print(dog_index)
# If you want to find the last appearance of a substring, you can use the *rfind()* function:
message = "I like cats and dogs, but I'd much rather own a dog."
last_dog_index = message.rfind('dog')
print(last_dog_index)
# ### Replacing substrings
# You can use the *replace()* function to replace any substring with another substring. To use the *replace()* function, give the substring you want to replace, and then the substring you want to replace it with. You also need to store the new string, either in the same string variable or in a new variable.
message = "I like cats and dogs, but I'd much rather own a dog."
message = message.replace('dog', 'snake')
print(message)
# ### Counting substrings
# If you want to know how many times a substring appears within a string, you can use the *count()* method.
message = "I like cats and dogs, but I'd much rather own a dog."
number_dogs = message.count('dog')
print(number_dogs)
# ### Splitting strings
# Strings can be split into a set of substrings when they are separated by a repeated character. If a string consists of a simple sentence, the string can be split based on spaces. The *split()* function returns a list of substrings. The *split()* function takes one argument, the character that separates the parts of the string.
message = "I like cats and dogs, but I'd much rather own a dog."
words = message.split(' ')
print(words)
# Notice that the punctuation is left in the substrings.
#
# It is more common to split strings that are really lists, separated by something like a comma. The *split()* function gives you an easy way to turn comma-separated strings, which you can't do much with in Python, into lists. Once you have your data in a list, you can work with it in much more powerful ways.
# +
animals = "dog, cat, tiger, mouse, liger, bear"
# Rewrite the string as a list, and store it in the same variable
animals = animals.split(',')
print(animals)
# -
# Notice that in this case, the spaces are also ignored. It is a good idea to test the output of the *split()* function and make sure it is doing what you want with the data you are interested in.
#
# One use of this is to work with spreadsheet data in your Python programs. Most spreadsheet applications allow you to dump your data into a comma-separated text file. You can read this file into your Python program, or even copy and paste from the text file into your program file, and then turn the data into a list. You can then process your spreadsheet data using a *for* loop.
# ### Other string methods
# There are a number of [other string methods](https://docs.python.org/3.8/library/stdtypes.html#string-methods) that we won't go into right here, but you might want to take a look at them. Most of these methods should make sense to you at this point. You might not have use for any of them right now, but it is good to know what you can do with strings. This way you will have a sense of how to solve certain problems, even if it means referring back to the list of methods to remind yourself how to write the correct syntax when you need it.
# ## Tuples
# Tuples are basically lists that can never be changed. Lists are quite dynamic; they can grow as you append and insert items, and they can shrink as you remove items. You can modify any element you want to in a list. Sometimes we like this behavior, but other times we may want to ensure that no user or no part of a program can change a list. That's what tuples are for.
#
# Technically, lists are *mutable* objects and tuples are *immutable* objects. Mutable objects can change (think of *mutations*), and immutable objects can not change.
#
# ### Defining tuples, and accessing elements
#
# You define a tuple just like you define a list, except you use parentheses instead of square brackets. Once you have a tuple, you can access individual elements just like you can with a list, and you can loop through the tuple with a *for* loop:
# +
colors = ('red', 'green', 'blue')
print("The first color is: " + colors[0])
print("\nThe available colors are:")
for color in colors:
print("- " + color)
# -
# If you try to add something to a tuple, you will get an error:
colors = ('red', 'green', 'blue')
colors.append('purple')
# The same kind of thing happens when you try to remove something from a tuple, or modify one of its elements. Once you define a tuple, you can be confident that its values will not change.
# ### Using tuples to make strings
# We have seen that it is pretty useful to be able to mix raw English strings with values that are stored in variables, as in the following:
animal = 'dog'
print("I have a " + animal + ".")
# This was especially useful when we had a series of similar statements to make:
animals = ['dog', 'cat', 'bear']
for animal in animals:
print("I have a " + animal + ".")
# I like this approach of using the plus sign to build strings because it is fairly intuitive. We can see that we are adding several smaller strings together to make one longer string. This is intuitive, but it is a lot of typing. There is a shorter way to do this, using *placeholders*.
#
# Python ignores most of the characters we put inside of strings. There are a few characters that Python pays attention to, as we saw with strings such as "\t" and "\n". Python also pays attention to "%s" and "%d". These are placeholders. When Python sees the "%s" placeholder, it looks ahead and pulls in the first argument after the % sign:
animal = 'dog'
print("I have a %s." % animal)
# This is a much cleaner way of generating strings that include values. We compose our sentence all in one string, and then tell Python what values to pull into the string, in the appropriate places.
#
# This is called *string formatting*, and it looks the same when you use a list:
animals = ['dog', 'cat', 'bear']
for animal in animals:
print("I have a %s." % animal)
# If you have more than one value to put into the string you are composing, you have to pack the values into a tuple:
animals = ['dog', 'cat', 'bear']
print("I have a %s, a %s, and a %s." % (animals[0], animals[1], animals[2]))
# ### String formatting with numbers
#
# If you recall, printing a number with a string can cause an error:
number = 23
print("My favorite number is " + number + ".")
# Python knows that you could be talking about the value 23, or the characters '23'. So it throws an error, forcing us to clarify that we want Python to treat the number as a string. We do this by *casting* the number into a string using the *str()* function:
number = 23
print("My favorite number is " + str(number) + ".")
# The format string "%d" takes care of this for us. Watch how clean this code is:
number = 23
print("My favorite number is %d." % number)
# If you want to use a series of numbers, you pack them into a tuple just like we saw with strings:
numbers = [7, 23, 42]
print("My favorite numbers are %d, %d, and %d." % (numbers[0], numbers[1], numbers[2]))
# Just for clarification, look at how much longer the code is if you use concatenation instead of string formatting:
numbers = [7, 23, 42]
print("My favorite numbers are " + str(numbers[0]) + ", " + str(numbers[1]) + ", and " + str(numbers[2]) + ".")
# You can mix string and numerical placeholders in any order you want.
names = ['Ada', 'ever']
numbers = [23, 2]
print("%s's favorite number is %d, and %s's favorite number is %d." % (names[0].title(), numbers[0], names[1].title(), numbers[1]))
# There are more sophisticated ways to do string formatting in Python 3, but we will save that for later because it's a bit less intuitive than this approach. For now, you can use whichever approach consistently gets you the output that you want to see.
# ## If Statements
# By allowing you to respond selectively to different situations and conditions, if statements open up whole new possibilities for your programs. In this section, you will learn how to test for certain conditions, and then respond in appropriate ways to those conditions.
# ### What is an *if* statement?
# An *if* statement tests for a condition, and then responds to that condition. If the condition is true, then whatever action is listed next gets carried out. You can test for multiple conditions at the same time, and respond appropriately to each condition.
#
# Here is an example that shows a number of the desserts I like. It lists those desserts, but lets you know which one is my favorite.
# +
# A list of desserts I like.
desserts = ['ice cream', 'chocolate', 'apple crisp', 'cookies']
favorite_dessert = 'apple crisp'
# Print the desserts out, but let everyone know my favorite dessert.
for dessert in desserts:
if dessert == favorite_dessert:
# This dessert is my favorite, let's let everyone know!
print("%s is my favorite dessert!" % dessert.title())
else:
# I like these desserts, but they are not my favorite.
print("I like %s." % dessert)
# -
# #### What happens in this program?
#
# - The program starts out with a list of desserts, and one dessert is identified as a favorite.
# - The for loop runs through all the desserts.
# - Inside the for loop, each item in the list is tested.
# - If the current value of *dessert* is equal to the value of *favorite_dessert*, a message is printed that this is my favorite.
# - If the current value of *dessert* is not equal to the value of *favorite_dessert*, a message is printed that I just like the dessert.
#
# You can test as many conditions as you want in an if statement, as you will see in a little bit.
# ### Logical Tests
# Every if statement evaluates to *True* or *False*. *True* and *False* are Python keywords, which have special meanings attached to them. You can test for the following conditions in your if statements:
#
# - [equality](#equality) (==)
# - [inequality](#inequality) (!=)
# - [other inequalities](#other_inequalities)
# - greater than (>)
# - greater than or equal to (>=)
# - less than (<)
# - less than or equal to (<=)
# - [You can test if an item is **in** a list.](#in_list)
#
# #### Equality
# Two items are *equal* if they have the same value. You can test for equality between numbers, strings, and a number of other objects which you will learn about later. Some of these results may be surprising, so take a careful look at the examples below.
#
# In Python, as in many programming languages, two equals signs tests for equality.
#
# **Watch out!** Be careful of accidentally using one equals sign, which can really throw things off because that one equals sign actually sets your item to the value you are testing for!
5 == 5
3 == 5
5 == 5.0
'ada' == 'ada'
'Ada' == 'ada'
'Ada'.lower() == 'ada'.lower()
'5' == 5
'5' == str(5)
# #### Inequality
# Two items are *inequal* if they do not have the same value. In Python, we test for inequality using the exclamation point and one equals sign.
#
# Sometimes you want to test for equality and if that fails, assume inequality. Sometimes it makes more sense to test for inequality directly.
3 != 5
5 != 5
'Ada' != 'ada'
# #### Other Inequalities
# ##### greater than
5 > 3
# ##### greater than or equal to
5 >= 3
3 >= 3
# ##### less than
3 < 5
# ##### less than or equal to
3 <= 5
3 <= 3
# ### Checking if an item is **in** a list
# You can check if an item is in a list using the **in** keyword.
vowels = ['a', 'e', 'i', 'o', 'u']
'a' in vowels
vowels = ['a', 'e', 'i', 'o', 'u']
'b' in vowels
# ### The if-elif...else chain
# You can test whatever series of conditions you want to, and you can test your conditions in any combination you want.
# ### Simple if statements
# The simplest test has a single **if** statement, and a single statement to execute if the condition is **True**.
# +
dogs = ['willie', 'hootz', 'peso', 'juno']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
# -
# In this situation, nothing happens if the test does not pass.
# +
dogs = ['willie', 'hootz']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
# -
# Notice that there are no errors. The condition `len(dogs) > 3` evaluates to False, and the program moves on to any lines after the **if** block.
# ### if-else statements
# Many times you will want to respond in two possible ways to a test. If the test evaluates to **True**, you will want to do one thing. If the test evaluates to **False**, you will want to do something else. The **if-else** structure lets you do that easily. Here's what it looks like:
# +
dogs = ['willie', 'hootz', 'peso', 'juno']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
# -
# Our results have not changed in this case, because if the test evaluates to **True** only the statements under the **if** statement are executed. The statements under **else** area only executed if the test fails:
# +
dogs = ['willie', 'hootz']
if len(dogs) > 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
# -
# The test evaluated to **False**, so only the statement under `else` is run.
# ### if-elif...else chains
# Many times, you will want to test a series of conditions, rather than just an either-or situation. You can do this with a series of if-elif-else statements
#
# There is no limit to how many conditions you can test. You always need one if statement to start the chain, and you can never have more than one else statement. But you can have as many elif statements as you want.
# +
dogs = ['willie', 'hootz', 'peso', 'monty', 'juno', 'turkey']
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
# -
# It is important to note that in situations like this, only the first test is evaluated. In an if-elif-else chain, once a test passes the rest of the conditions are ignored.
# +
dogs = ['willie', 'hootz', 'peso', 'monty']
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
# -
# The first test failed, so Python evaluated the second test. That test passed, so the statement corresponding to `len(dogs) >= 3` is executed.
# +
dogs = ['willie', 'hootz']
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
# -
# In this situation, the first two tests fail, so the statement in the else clause is executed. Note that this statement would be executed even if there are no dogs at all:
# +
dogs = []
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
else:
print("Okay, this is a reasonable number of dogs.")
# -
# Note that you don't have to take any action at all when you start a series of if statements. You could simply do nothing in the situation that there are no dogs by replacing the `else` clause with another `elif` clause:
# +
dogs = []
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
elif len(dogs) >= 1:
print("Okay, this is a reasonable number of dogs.")
# -
# In this case, we only print a message if there is at least one dog present. Of course, you could add a new `else` clause to respond to the situation in which there are no dogs at all:
# +
dogs = []
if len(dogs) >= 5:
print("Holy mackerel, we might as well start a dog hostel!")
elif len(dogs) >= 3:
print("Wow, we have a lot of dogs here!")
elif len(dogs) >= 1:
print("Okay, this is a reasonable number of dogs.")
else:
print("I wish we had a dog here.")
# -
# As you can see, the if-elif-else chain lets you respond in very specific ways to any given situation.
# ### More than one passing test
# In all of the examples we have seen so far, only one test can pass. As soon as the first test passes, the rest of the tests are ignored. This is really good, because it allows our code to run more efficiently. Many times only one condition can be true, so testing every condition after one passes would be meaningless.
#
# There are situations in which you want to run a series of tests, where every single test runs. These are situations where any or all of the tests could pass, and you want to respond to each passing test. Consider the following example, where we want to greet each dog that is present:
# +
dogs = ['willie', 'hootz']
if 'willie' in dogs:
print("Hello, Willie!")
if 'hootz' in dogs:
print("Hello, Hootz!")
if 'peso' in dogs:
print("Hello, Peso!")
if 'monty' in dogs:
print("Hello, Monty!")
# -
# If we had done this using an if-elif-else chain, only the first dog that is present would be greeted:
# +
dogs = ['willie', 'hootz']
if 'willie' in dogs:
print("Hello, Willie!")
elif 'hootz' in dogs:
print("Hello, Hootz!")
elif 'peso' in dogs:
print("Hello, Peso!")
elif 'monty' in dogs:
print("Hello, Monty!")
# -
# Of course, this could be written much more cleanly using lists and for loops. See if you can follow this code.
# +
dogs_we_know = ['willie', 'hootz', 'peso', 'monty', 'juno', 'turkey']
dogs_present = ['willie', 'hootz']
# Go through all the dogs that are present, and greet the dogs we know.
for dog in dogs_present:
if dog in dogs_we_know:
print("Hello, %s!" % dog.title())
# -
# This is the kind of code you should be aiming to write. It is fine to come up with code that is less efficient at first. When you notice yourself writing the same kind of code repeatedly in one program, look to see if you can use a loop or a function to make your code more efficient.
# ## True and False values
# Every value can be evaluated as True or False. The general rule is that any non-zero or non-empty value will evaluate to True. If you are ever unsure, you can open a Python terminal and write two lines to find out if the value you are considering is True or False. Take a look at the following examples, keep them in mind, and test any value you are curious about. I am using a slightly longer test just to make sure something gets printed each time.
if 0:
print("This evaluates to True.")
else:
print("This evaluates to False.")
if 1:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Arbitrary non-zero numbers evaluate to True.
if 1253756:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Negative numbers are not zero, so they evaluate to True.
if -1:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# An empty string evaluates to False.
if '':
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Any other string, including a space, evaluates to True.
if ' ':
print("This evaluates to True.")
else:
print("This evaluates to False.")
# Any other string, including a space, evaluates to True.
if 'hello':
print("This evaluates to True.")
else:
print("This evaluates to False.")
# None is a special object in Python. It evaluates to False.
if None:
print("This evaluates to True.")
else:
print("This evaluates to False.")
# ** Exercise (the exercises are getting harder from now on) **:
# - Given two strings a and b, find the longest substring in A that can be found in B. The index of the first character of the matched longest substring in B should also be reported.
#
# For example,
#
# a = "a dream"
#
# b = "I have a dream that one day this nation will rise up"
#
# The result is "a dream". The index of the 'a' in "a dream" is 7 in b.
# ## While Loops
#
# A while loop tests an initial condition. If that condition is true, the loop starts executing. Every time the loop finishes, the condition is reevaluated. As long as the condition remains true, the loop keeps executing. As soon as the condition becomes false, the loop stops executing.
# ### General syntax
#
# +
# Set an initial condition.
game_active = True
# Set up the while loop.
while game_active:
# Run the game.
# At some point, the game ends and game_active will be set to False.
# When that happens, the loop will stop executing.
# Do anything else you want done after the loop runs.
# -
# - Every while loop needs an initial condition that starts out true.
# - The `while` statement includes a condition to test.
# - All of the code in the loop will run as long as the condition remains true.
# - As soon as something in the loop changes the condition such that the test no longer passes, the loop stops executing.
# - Any code that is defined after the loop will run at this point.
# Here is a simple example, showing how a game will stay active as long as the player has enough power.
# +
# The player's power starts out at 5.
power = 5
# The player is allowed to keep playing as long as their power is over 0.
while power > 0:
print("You are still playing, because your power is %d." % power)
# Your game code would go here, which includes challenges that make it
# possible to lose power.
# We can represent that by just taking away from the power.
power = power - 1
print("\nOh no, your power dropped to 0! Game Over.")
# -
# ** Exercise **:
# - Write the functionally equivalent while-loop version for the following code:
#
# ```
# sum = 0
# for i in range(1, 100, 2):
# sum = sum + i
# ```
# ** Exercise **:
# - Write the functionally equivalent while-loop version for the following code:
#
# ```
# sum = 9999
# for i in range(100, 0, -1):
# sum = sum - i
# ```
# ** Exercise **:
# - Write the functionally equivalent while-loop version for the following code:
#
# ```
# sum = 9999
# for i in range(100, 0):
# sum = sum - i
# ```
# ** Exercise **:
# - State whether the following code fragments are functionally equivalent:
#
# ```
# i = 0
# s = 0
# for i in range(0, 9999):
# if i % 4 != 0:
# s += i
# ```
#
# ```
# i = 0
# s = 0
# while i < 9999:
# if i % 4 != 0:
# s += i
# i +=1
# ```
#
# ```
# i = 0
# s = 0
# while ++i < 9999:
# if i % 4 != 0:
# s += i
# ```
#
# ### Accidental Infinite loops
# Sometimes we want a while loop to run until a defined action is completed, such as emptying out a list. Sometimes we want a loop to run for an unknown period of time, for example when we are allowing users to give as much input as they want. What we rarely want, however, is a true 'runaway' infinite loop.
#
# Take a look at the following example. Can you pick out why this loop will never stop?
# ```python
# # /////////////////////////////////////////
# # /// don't execute thie piece of code! ///
# # /////////////////////////////////////////
#
#
# current_number = 1
#
# # Count up to 5, printing the number each time.
# while current_number <= 5:
# print(current_number)
# ```
1
1
1
1
1
...
# I faked that output, because if I ran it the output would fill up the browser. You can try to run it on your computer, as long as you know how to interrupt runaway processes:
#
# - On most systems, Ctrl-C will interrupt the currently running program.
# - If you are using Geany, your output is displayed in a popup terminal window. You can either press Ctrl-C, or you can use your pointer to close the terminal window.
#
# The loop runs forever, because there is no way for the test condition to ever fail. The programmer probably meant to add a line that increments current_number by 1 each time through the loop:
# +
current_number = 1
# Count up to 5, printing the number each time.
while current_number <= 5:
print(current_number)
current_number = current_number + 1
# -
# You will certainly make some loops run infintely at some point. When you do, just interrupt the loop and figure out the logical error you made.
#
# Infinite loops will not be a real problem until you have users who run your programs on their machines. You won't want infinite loops then, because your users would have to shut down your program, and they would consider it buggy and unreliable. Learn to spot infinite loops, and make sure they don't pop up in your polished programs later on.
#
# Here is one more example of an accidental infinite loop:
# ```python
# # /////////////////////////////////////////
# # /// don't execute thie piece of code! ///
# # /////////////////////////////////////////
#
#
#
# current_number = 1
#
# # Count up to 5, printing the number each time.
# while current_number <= 5:
# print(current_number)
# current_number = current_number - 1
# ```
1
0
-1
-2
-3
...
# In this example, we accidentally started counting down. The value of `current_number` will always be less than 5, so the loop will run forever.
# ## Introducing Functions
# One of the core principles of any programming language is, "Don't Repeat Yourself". If you have an action that should occur many times, you can define that action once and then call that code whenever you need to carry out that action.
#
# We are already repeating ourselves in our code, so this is a good time to introduce simple functions. Functions mean less work for us as programmers, and effective use of functions results in code that is less error-prone.
# Functions are a set of actions that we group together, and give a name to. You have already used a number of functions from the core Python language, such as *string.title()* and *list.sort()*. We can define our own functions, which allows us to "teach" Python new behavior.
# ### General Syntax
# A general function looks something like this:
# +
# Let's define a function.
def function_name(argument_1, argument_2):
# Do whatever we want this function to do,
# using argument_1 and argument_2
# Use function_name to call the function.
function_name(value_1, value_2)
# -
# This code will not run, but it shows how functions are used in general.
#
# - **Defining a function**
# - Give the keyword `def`, which tells Python that you are about to *define* a function.
# - Give your function a name. A variable name tells you what kind of value the variable contains; a function name should tell you what the function does.
# - Give names for each value the function needs in order to do its work.
# - These are basically variable names, but they are only used in the function.
# - They can be different names than what you use in the rest of your program.
# - These are called the function's *arguments*.
# - Make sure the function definition line ends with a colon.
# - Inside the function, write whatever code you need to make the function do its work.
# - **Using your function**
# - To *call* your function, write its name followed by parentheses.
# - Inside the parentheses, give the values you want the function to work with.
# - These can be variables such as `current_name` and `current_age`, or they can be actual values such as 'ada' and 5.
# +
print("You are doing good work, Adriana!")
print("Thank you very much for your efforts on this project.")
print("\nYou are doing good work, Billy!")
print("Thank you very much for your efforts on this project.")
print("\nYou are doing good work, Caroline!")
print("Thank you very much for your efforts on this project.")
# -
# Functions take repeated code, put it in one place, and then you call that code when you want to use it. Here's what the same program looks like with a function.
# +
def thank_you(name):
# This function prints a two-line personalized thank you message.
print("\nYou are doing good work, %s!" % name)
print("Thank you very much for your efforts on this project.")
thank_you('Adriana')
thank_you('Billy')
thank_you('Caroline')
# -
# In our original code, each pair of print statements was run three times, and the only difference was the name of the person being thanked. When you see repetition like this, you can usually make your program more efficient by defining a function.
#
# The keyword *def* tells Python that we are about to define a function. We give our function a name, *thank\_you()* in this case. A variable's name should tell us what kind of information it holds; a function's name should tell us what the variable does. We then put parentheses. Inside these parenthese we create variable names for any variable the function will need to be given in order to do its job. In this case the function will need a name to include in the thank you message. The variable `name` will hold the value that is passed into the function *thank\_you()*.
#
# To use a function we give the function's name, and then put any values the function needs in order to do its work. In this case we call the function three times, each time passing it a different name.
# ### A common error
# A function must be defined before you use it in your program. For example, putting the function at the end of the program would not work.
# +
thank_you('Adriana')
thank_you('Billy')
thank_you('Caroline')
def thank_you(name):
# This function prints a two-line personalized thank you message.
print("\nYou are doing good work, %s!" % name)
print("Thank you very much for your efforts on this project.")
# -
# On the first line we ask Python to run the function *thank\_you()*, but Python does not yet know how to do this function. We define our functions at the beginning of our programs, and then we can use them when we need to.
# A second example
# ---
# When we introduced the different methods for [sorting a list](Python%20-%20Hands-on%20Introduction%20to%20Python%20and%20Machine%20Learning.ipynb#Sorting-a-List), our code got very repetitive. It takes two lines of code to print a list using a for loop, so these two lines are repeated whenever you want to print out the contents of a list. This is the perfect opportunity to use a function, so let's see how the code looks with a function.
#
# First, let's see the code we had without a function:
# +
students = ['bernice', 'aaron', 'cody']
# Put students in alphabetical order.
students.sort()
# Display the list in its current order.
print("Our students are currently in alphabetical order.")
for student in students:
print(student.title())
# Put students in reverse alphabetical order.
students.sort(reverse=True)
# Display the list in its current order.
print("\nOur students are now in reverse alphabetical order.")
for student in students:
print(student.title())
# -
# Here's what the same code looks like, using a function to print out the list:
# +
def show_students(students, message):
# Print out a message, and then the list of students
print(message)
for student in students:
print(student.title())
students = ['bernice', 'aaron', 'cody']
# Put students in alphabetical order.
students.sort()
show_students(students, "Our students are currently in alphabetical order.")
#Put students in reverse alphabetical order.
students.sort(reverse=True)
show_students(students, "\nOur students are now in reverse alphabetical order.")
# -
# This is much cleaner code. We have an action we want to take, which is to show the students in our list along with a message. We give this action a name, *show\_students()*.
#
# This function needs two pieces of information to do its work, the list of students and a message to display. Inside the function, the code for printing the message and looping through the list is exactly as it was in the non-function code.
#
# Now the rest of our program is cleaner, because it gets to focus on the things we are changing in the list, rather than having code for printing the list. We define the list, then we sort it and call our function to print the list. We sort it again, and then call the printing function a second time, with a different message. This is much more readable code.
#
# ### Advantages of using functions
# You might be able to see some advantages of using functions, through this example:
#
# - We write a set of instructions once. We save some work in this simple example, and we save even more work in larger programs.
# - When our function works, we don't have to worry about that code anymore. Every time you repeat code in your program, you introduce an opportunity to make a mistake. Writing a function means there is one place to fix mistakes, and when those bugs are fixed, we can be confident that this function will continue to work correctly.
# - We can modify our function's behavior, and that change takes effect every time the function is called. This is much better than deciding we need some new behavior, and then having to change code in many different places in our program.
# For a quick example, let's say we decide our printed output would look better with some form of a bulleted list. Without functions, we'd have to change each print statement. With a function, we change just the print statement in the function:
# +
def show_students(students, message):
# Print out a message, and then the list of students
print(message)
for student in students:
print("- " + student.title())
students = ['bernice', 'aaron', 'cody']
# Put students in alphabetical order.
students.sort()
show_students(students, "Our students are currently in alphabetical order.")
#Put students in reverse alphabetical order.
students.sort(reverse=True)
show_students(students, "\nOur students are now in reverse alphabetical order.")
# -
# You can think of functions as a way to "teach" Python some new behavior. In this case, we taught Python how to create a list of students using hyphens; now we can tell Python to do this with our students whenever we want to.
# ### Returning a Value
# Each function you create can return a value. This can be in addition to the primary work the function does, or it can be the function's main job. The following function takes in a number, and returns the corresponding word for that number:
# +
def get_number_word(number):
# Takes in a numerical value, and returns
# the word corresponding to that number.
if number == 1:
return 'one'
elif number == 2:
return 'two'
elif number == 3:
return 'three'
# ...
# Let's try out our function.
for current_number in range(0,4):
number_word = get_number_word(current_number)
print(current_number, number_word)
# -
# It's helpful sometimes to see programs that don't quite work as they are supposed to, and then see how those programs can be improved. In this case, there are no Python errors; all of the code has proper Python syntax. But there is a logical error, in the first line of the output.
#
# We want to either not include 0 in the range we send to the function, or have the function return something other than `None` when it receives a value that it doesn't know. Let's teach our function the word 'zero', but let's also add an `else` clause that returns a more informative message for numbers that are not in the if-chain.
# +
def get_number_word(number):
# Takes in a numerical value, and returns
# the word corresponding to that number.
if number == 0:
return 'zero'
elif number == 1:
return 'one'
elif number == 2:
return 'two'
elif number == 3:
return 'three'
else:
return "I'm sorry, I don't know that number."
# Let's try out our function.
for current_number in range(0,6):
number_word = get_number_word(current_number)
print(current_number, number_word)
# -
# If you use a return statement in one of your functions, keep in mind that the function stops executing as soon as it hits a return statement. For example, we can add a line to the *get\_number\_word()* function that will never execute, because it comes after the function has returned a value:
# +
def get_number_word(number):
# Takes in a numerical value, and returns
# the word corresponding to that number.
if number == 0:
return 'zero'
elif number == 1:
return 'one'
elif number == 2:
return 'two'
elif number == 3:
return 'three'
else:
return "I'm sorry, I don't know that number."
# This line will never execute, because the function has already
# returned a value and stopped executing.
print("This message will never be printed.")
# Let's try out our function.
for current_number in range(0,6):
number_word = get_number_word(current_number)
print(current_number, number_word)
# -
# ### More Later
# There is much more to learn about functions, but we will get to those details later. For now, feel free to use functions whenever you find yourself writing the same code several times in a program. Some of the things you will learn when we focus on functions:
#
# - How to give the arguments in your function default values.
# - How to let your functions accept different numbers of arguments.
# ## User input
# Almost all interesting programs accept input from the user at some point. You can start accepting user input in your programs by using the `input()` function. The input function displays a messaget to the user describing the kind of input you are looking for, and then it waits for the user to enter a value. When the user presses Enter, the value is passed to your variable.
# ### General syntax
# The general case for accepting input looks something like this:
# Get some input from the user.
variable = input('Please enter a value: ')
# Do something with the value that was entered.
# You need a variable that will hold whatever value the user enters, and you need a message that will be displayed to the user.
# <a id="Example-input"></a>
# In the following example, we have a list of names. We ask the user for a name, and we add it to our list of names.
# +
# Start with a list containing several names.
names = ['guido', 'tim', 'jesse']
# Ask the user for a name.
new_name = input("Please tell me someone I should know: ")
# Add the new name to our list.
names.append(new_name)
# Show that the name has been added to the list.
print(names)
# -
# ### Using while loops to keep your programs running
# Most of the programs we use every day run until we tell them to quit, and in the background this is often done with a while loop. Here is an example of how to let the user enter an arbitrary number of names.
# +
# Start with an empty list. You can 'seed' the list with
# some predefined values if you like.
names = []
# Set new_name to something other than 'quit'.
new_name = ''
# Start a loop that will run until the user enters 'quit'.
while new_name != 'quit':
# Ask the user for a name.
new_name = input("Please tell me someone I should know, or enter 'quit': ")
# Add the new name to our list.
names.append(new_name)
# Show that the name has been added to the list.
print(names)
# -
# That worked, except we ended up with the name 'quit' in our list. We can use a simple `if` test to eliminate this bug:
# +
# Start with an empty list. You can 'seed' the list with
# some predefined values if you like.
names = []
# Set new_name to something other than 'quit'.
new_name = ''
# Start a loop that will run until the user enters 'quit'.
while new_name != 'quit':
# Ask the user for a name.
new_name = input("Please tell me someone I should know, or enter 'quit': ")
# Add the new name to our list.
if new_name != 'quit':
names.append(new_name)
# Show that the name has been added to the list.
print(names)
# -
# This is pretty cool! We now have a way to accept input from users while our programs run, and we have a way to let our programs run until our users are finished working.
# #### Using while loops to make menus
# You now have enough Python under your belt to offer users a set of choices, and then respond to those choices until they choose to quit. Let's look at a simple example, and then analyze the code:
# +
# Give the user some context.
print("\nWelcome to the nature center. What would you like to do?")
# Set an initial value for choice other than the value for 'quit'.
choice = ''
# Start a loop that runs until the user enters the value for 'quit'.
while choice != 'q':
# Give all the choices in a series of print statements.
print("\n[1] Enter 1 to take a bicycle ride.")
print("[2] Enter 2 to go for a run.")
print("[3] Enter 3 to climb a mountain.")
print("[q] Enter q to quit.")
# Ask for the user's choice.
choice = input("\nWhat would you like to do? ")
# Respond to the user's choice.
if choice == '1':
print("\nHere's a bicycle. Have fun!\n")
elif choice == '2':
print("\nHere are some running shoes. Run fast!\n")
elif choice == '3':
print("\nHere's a map. Can you leave a trip plan for us?\n")
elif choice == 'q':
print("\nThanks for playing. See you later.\n")
else:
print("\nI don't understand that choice, please try again.\n")
# Print a message that we are all finished.
print("Thanks again, bye now.")
# -
# Our programs are getting rich enough now, that we could do many different things with them. Let's clean this up in one really useful way. There are three main choices here, so let's define a function for each of those items. This way, our menu code remains really simple even as we add more complicated code to the actions of riding a bicycle, going for a run, or climbing a mountain.
# +
# Define the actions for each choice we want to offer.
def ride_bicycle():
print("\nHere's a bicycle. Have fun!\n")
def go_running():
print("\nHere are some running shoes. Run fast!\n")
def climb_mountain():
print("\nHere's a map. Can you leave a trip plan for us?\n")
# Give the user some context.
print("\nWelcome to the nature center. What would you like to do?")
# Set an initial value for choice other than the value for 'quit'.
choice = ''
# Start a loop that runs until the user enters the value for 'quit'.
while choice != 'q':
# Give all the choices in a series of print statements.
print("\n[1] Enter 1 to take a bicycle ride.")
print("[2] Enter 2 to go for a run.")
print("[3] Enter 3 to climb a mountain.")
print("[q] Enter q to quit.")
# Ask for the user's choice.
choice = input("\nWhat would you like to do? ")
# Respond to the user's choice.
if choice == '1':
ride_bicycle()
elif choice == '2':
go_running()
elif choice == '3':
climb_mountain()
elif choice == 'q':
print("\nThanks for playing. See you later.\n")
else:
print("\nI don't understand that choice, please try again.\n")
# Print a message that we are all finished.
print("Thanks again, bye now.")
# -
# This is much cleaner code, and it gives us space to separate the details of taking an action from the act of choosing that action.
# ### Using while loops to process items in a list
# In the section on Lists, you saw that we can `pop()` items from a list. You can use a while list to pop items one at a time from one list, and work with them in whatever way you need. Let's look at an example where we process a list of unconfirmed users.
# +
# Start with a list of unconfirmed users, and an empty list of confirmed users.
unconfirmed_users = ['ada', 'billy', 'clarence', 'daria']
confirmed_users = []
# Work through the list, and confirm each user.
while len(unconfirmed_users) > 0:
# Get the latest unconfirmed user, and process them.
current_user = unconfirmed_users.pop()
print("Confirming user %s...confirmed!" % current_user.title())
# Move the current user to the list of confirmed users.
confirmed_users.append(current_user)
# Prove that we have finished confirming all users.
print("\nUnconfirmed users:")
for user in unconfirmed_users:
print('- ' + user.title())
print("\nConfirmed users:")
for user in confirmed_users:
print('- ' + user.title())
# -
# This works, but let's make one small improvement. The current program always works with the most recently added user. If users are joining faster than we can confirm them, we will leave some users behind. If we want to work on a 'first come, first served' model, or a 'first in first out' model, we can pop the first item in the list each time.
# +
# Start with a list of unconfirmed users, and an empty list of confirmed users.
unconfirmed_users = ['ada', 'billy', 'clarence', 'daria']
confirmed_users = []
# Work through the list, and confirm each user.
while len(unconfirmed_users) > 0:
# Get the latest unconfirmed user, and process them.
current_user = unconfirmed_users.pop(0)
print("Confirming user %s...confirmed!" % current_user.title())
# Move the current user to the list of confirmed users.
confirmed_users.append(current_user)
# Prove that we have finished confirming all users.
print("\nUnconfirmed users:")
for user in unconfirmed_users:
print('- ' + user.title())
print("\nConfirmed users:")
for user in confirmed_users:
print('- ' + user.title())
# -
# This is a little nicer, because we are sure to get to everyone, even when our program is running under a heavy load. We also preserve the order of people as they join our project. Notice that this all came about by adding *one character* to our program!
# ** Exercise **:
# - Write a program that asks the user to enter a string and do the followings:
# 1. Determine the length of the string, $l$
# 2. Set $n = l$
# 3. Repeat printing the string for $n$ times on the same new line
# 4. Decrement $n$ by 1
# 5. Repeat Step 3 to 4 until $n$ is zero
#
# (This is just the *pseudo code*. You may use other methods and approaches to achieve the goal.)
#
# For example, if the user enters -\_-, the result should be:
# ```
# -_--_--_-
# -_--_-
# -_-
# ```
# If the user enters 囧囧囧, the result has to be:
# ```
# 囧囧囧囧囧囧囧囧囧
# 囧囧囧囧囧囧
# 囧囧囧
# ```
# ** Exercise **:
# - Write a function to take a string input from the user, and return it
# - Write a function <code>encrypt(input)</code> that takes a string, and encrypt it using the following algorithm:
# - For each letter $c$, if $c \in \{a \dots y\}$ or if $c \in \{A \dots Y\}$ , shift it to the next character. That means, for instances, 'a' in the original string will be changed to 'b', 'Y' will be changed to 'Z'.
# - Other letters (including punctuation marks) remain unchanged in the result.
#
# For example, given "I go to school by bus", the result is "J hp up tdippm cz cvt".
# ** Exercise **:
# - Write a function to compute the mean of a list of integers
# - Write a function to compute the standard deviation of a list of integers
# - Write a function to compute the maximum of a list of integers
# - Write a function to compute the minimum of a list of integers
# - Write a function to compute the median of a list of integers
| Python - Hands-on Introduction to Python And Machine Learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: CMB 20191114
# language: python
# name: cmbenv-20191114
# ---
# # Template for hackathon projects
#
# This notebook simulates a generic ground experiment in a way that can be easily customized for your project
# +
import os
import sys
import healpy as hp
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
import toast
import toast.pipeline_tools
from toast.mpi import MPI
# Are you using a special reservation for a workshop?
# If so, set it here:
nersc_reservation = None
# Load common tools for all lessons
import sys
sys.path.insert(0, "../lessons")
from lesson_tools import (
check_nersc,
fake_focalplane
)
nersc_host, nersc_repo, nersc_resv = check_nersc(reservation=nersc_reservation)
# Capture C++ output in the jupyter cells
# %reload_ext wurlitzer
! [[ ! -e weather_Atacama.fits ]] && wget http://portal.nersc.gov/project/cmb/toast_data/example_data/weather_Atacama.fits
# -
# ## Parameters
#
# These arguments control the entire notebook
class args:
split_schedule = None
schedule = "schedule.txt"
sort_schedule = False # Matters for parallelization
weather = "weather_Atacama.fits"
sample_rate = 10 # Hz
# Noise parameters
fknee = 1.0
alpha = 2
# Scanning parameters
scan_rate = 1.0 # deg / s
scan_accel = 1.0 # deg / s^2
# half-wave plate
hwp_rpm = None
hwp_step_deg = None
hwp_step_time_s = None
fov = 3.0 # Field-of-view in degrees
# Projection parameters
coord = "C"
nside = 64
mode = "IQU"
outdir = "maps"
# ## Observing schedule
#
# We write the scheduler parameters to file. These arguments produce one constant elevation scan but you can easily modify that.
# %%writefile schedule.par
--site-lat
-22.958064
--site-lon
-67.786222
--site-alt
5200
--site-name
Atacama
--telescope
LAT
--start
2020-01-01 00:00:00
--stop
2020-01-02 00:00:00
--patch-coord
C
--patch
small_patch,1,40,-40,10
--ces-max-time
86400
--out
schedule.txt
# Now run the scheduler. The observing schedule will end up in `schedule.txt`.
# ! toast_ground_schedule.py @schedule.par
# ## Focalplane
focalplane = toast.pipeline_tools.Focalplane(
fake_focalplane(fov=args.fov, fknee=args.fknee, alpha=args.alpha),
sample_rate=args.sample_rate,
)
# ## TODGround to `toast.data`
# +
mpiworld, procs, rank = toast.mpi.get_world()
comm = toast.mpi.Comm(mpiworld)
# Load the observing schedule, append weather and focalplane to it
schedules = toast.pipeline_tools.load_schedule(args, comm)
toast.pipeline_tools.load_weather(args, comm, schedules)
# There could be more than one observing schedule, but not this time
schedule = schedules[0]
schedule.telescope.focalplane = focalplane
# Useful shorthands in what follows
telescope = schedule.telescope
site = telescope.site
# Create TODGround objects based on the entries in the schedule
data = toast.Data(comm)
for ces in schedule.ceslist:
totsamples = int((ces.stop_time - ces.start_time) * args.sample_rate)
tod = toast.todmap.TODGround(
comm.comm_group,
focalplane.detquats,
totsamples,
detranks=(1 if comm.comm_group is None else comm.comm_group.size),
firsttime=ces.start_time,
rate=args.sample_rate,
site_lon=site.lon,
site_lat=site.lat,
site_alt=site.alt,
azmin=ces.azmin,
azmax=ces.azmax,
el=ces.el,
scanrate=args.scan_rate,
scan_accel=args.scan_accel,
coord=args.coord,
hwprpm=args.hwp_rpm,
hwpstep=args.hwp_step_deg,
hwpsteptime=args.hwp_step_time_s,
)
# Now embed the TOD in an observation dictionary and add other necessary metadata
obs = {}
obs["name"] = "CES-{}-{}-{}-{}-{}".format(
site.name, telescope.name, ces.name, ces.scan, ces.subscan
)
obs["tod"] = tod
obs["noise"] = focalplane.noise
obs["id"] = int(ces.mjdstart * 10000)
obs["intervals"] = tod.subscans
obs["site"] = site
obs["site_name"] = site.name
obs["site_id"] = site.id
obs["altitude"] = site.alt
obs["weather"] = site.weather
obs["telescope"] = telescope
obs["telescope_name"] = telescope.name
obs["telescope_id"] = telescope.id
obs["focalplane"] = focalplane.detector_data
obs["fpradius"] = focalplane.radius
obs["start_time"] = ces.start_time
obs["season"] = ces.season
obs["date"] = ces.start_date
obs["MJD"] = ces.mjdstart
obs["rising"] = ces.rising
obs["mindist_sun"] = ces.mindist_sun
obs["mindist_moon"] = ces.mindist_moon
obs["el_sun"] = ces.el_sun
# And append the observation to the list of observations
data.obs.append(obs)
# -
# ## Pointing matrix
#
# Here we translate the boresight quaternions into detector pointing (pixels numbers and Stokes weights).
toast.todmap.OpPointingHpix(nside=args.nside, nest=True, mode=args.mode).exec(data)
# Make a boolean hit map for diagnostics
npix = 12 * args.nside ** 2
hitmap = np.zeros(npix)
for obs in data.obs:
tod = obs["tod"]
for det in tod.local_dets:
pixels = tod.cache.reference("pixels_{}".format(det))
hitmap[pixels] = 1
hitmap[hitmap == 0] = hp.UNSEEN
hp.mollview(hitmap, nest=True, title="all hit pixels", cbar=False)
hp.graticule(22.5, verbose=False)
# ## Sky signal
#
# Create a synthetic Gaussian map to scan as input signal
lmax = args.nside * 2
cls = np.zeros([4, lmax + 1])
cls[0] = 1e0
sim_map = hp.synfast(cls, args.nside, lmax=lmax, fwhm=np.radians(15), new=True)
plt.figure(figsize=[12, 8])
for i, m in enumerate(sim_map):
hp.mollview(sim_map[i], cmap="coolwarm", title="Input signal {}".format("IQU"[i]), sub=[1, 3, 1+i])
hp.write_map("sim_map.fits", hp.reorder(sim_map, r2n=True), nest=True, overwrite=True)
# Scan the sky signal
# +
full_name = "signal"
sky_name = "sky_signal"
# Clear any lingering sky signal from the buffers
toast.tod.OpCacheClear(full_name).exec(data)
distmap = toast.map.DistPixels(
data,
nnz=len(args.mode),
dtype=np.float32,
)
distmap.read_healpix_fits("sim_map.fits")
toast.todmap.OpSimScan(distmap=distmap, out=full_name).exec(data)
# Copy the sky signal, just in case we need it later
toast.tod.OpCacheCopy(input=full_name, output=sky_name, force=True).exec(data)
# -
# ## Noise
#
# Simulate noise and make a copy of signal+noise in case we need it later
# +
copy_name = "signal_copy"
toast.tod.OpSimNoise(out=full_name, realization=0).exec(data)
toast.tod.OpCacheCopy(input=full_name, output=copy_name, force=True).exec(data)
# -
# ## Your own operator here
#
# Here we define an empty operator you can work with
class MyOperator(toast.Operator):
def __init__(self, name="signal"):
""" Arguments:
name(str) : Cache prefix to operate on
"""
self._name = name
def exec(self, data):
# We loop here over all local data but do nothing with it.
for obs in data.obs:
tod = obs["tod"]
for det in tod.local_dets:
signal = tod.local_signal(det, self._name)
# Then we apply the operator to the data
toast.tod.OpCacheCopy(input=copy_name, output=full_name, force=True).exec(data)
MyOperator(name=full_name).exec(data)
# Plot a short segment of the signal before and after the operator
# +
tod = data.obs[0]["tod"]
times = tod.local_times()
fig = plt.figure(figsize=[12, 8])
for idet, det in enumerate(tod.local_dets):
cflags = tod.local_common_flags()
before = tod.local_signal(det, copy_name)
after = tod.local_signal(det, full_name)
ind = slice(0, 1000)
# Flag out turnarounds
good = (cflags[ind] & tod.TURNAROUND) == 0
ax = fig.add_subplot(4, 4, 1 + idet)
ax.set_title(det)
ax.plot(times[ind][good], before[ind][good], '.', label="before")
ax.plot(times[ind][good], after[ind][good], '.', label="after")
ax.legend(bbox_to_anchor=(1.1, 1.00))
fig.subplots_adjust(hspace=0.6)
# -
# ## Make a map
#
# Destripe the signal and make a map. We use the nascent TOAST mapmaker because it can be run in serial mode without MPI. The TOAST mapmaker is still significantly slower so production runs should used `libMadam`.
# +
# Always begin mapmaking by copying the simulated signal.
toast.tod.OpCacheCopy(input=copy_name, output=full_name, force=True).exec(data)
mapmaker = toast.todmap.OpMapMaker(
nside=args.nside,
nnz=3,
name=full_name,
outdir=args.outdir,
outprefix="toast_test_",
baseline_length=10,
iter_max=100,
use_noise_prior=False,
)
mapmaker.exec(data)
# -
# Plot a segment of the timelines
# +
tod = data.obs[0]["tod"]
times = tod.local_times()
fig = plt.figure(figsize=[12, 8])
for idet, det in enumerate(tod.local_dets):
sky = tod.local_signal(det, sky_name)
full = tod.local_signal(det, copy_name)
cleaned = tod.local_signal(det, full_name)
ind = slice(0, 1000)
ax = fig.add_subplot(4, 4, 1 + idet)
ax.set_title(det)
ax.plot(times[ind], sky[ind], '.', label="sky", zorder=100)
ax.plot(times[ind], full[ind] - sky[ind], '.', label="noise")
ax.plot(times[ind], full[ind] - cleaned[ind], '.', label="baselines")
ax.legend(bbox_to_anchor=(1.1, 1.00))
fig.subplots_adjust(hspace=0.6)
# -
fig = plt.figure(figsize=[12, 8])
for idet, det in enumerate(tod.local_dets):
sky = tod.local_signal(det, sky_name)
full = tod.local_signal(det, copy_name)
cleaned = tod.local_signal(det, full_name)
ax = fig.add_subplot(4, 4, 1 + idet)
ax.set_title(det)
#plt.plot(times[ind], sky[ind], '-', label="signal", zorder=100)
plt.plot(times, full - sky, '.', label="noise")
plt.plot(times, full - cleaned, '.', label="baselines")
ax.legend(bbox_to_anchor=(1.1, 1.00))
fig.subplots_adjust(hspace=.6)
# +
plt.figure(figsize=[16, 8])
hitmap = hp.read_map("maps/toast_test_hits.fits", verbose=False)
hitmap[hitmap == 0] = hp.UNSEEN
hp.mollview(hitmap, sub=[2, 2, 1], title="hits")
binmap = hp.read_map("maps/toast_test_binned.fits", verbose=False)
binmap[binmap == 0] = hp.UNSEEN
hp.mollview(binmap, sub=[2, 2, 2], title="binned map", cmap="coolwarm")
# Fix the plotting range for input signal and the destriped map
amp = 3.0
destriped = hp.read_map("maps/toast_test_destriped.fits", verbose=False)
destriped[destriped == 0] = hp.UNSEEN
# Remove monopole
good = destriped != hp.UNSEEN
destriped[good] -= np.median(destriped[good])
hp.mollview(destriped, sub=[2, 2, 3], title="destriped map", cmap="coolwarm", min=-amp, max=amp)
inmap = hp.read_map("sim_map.fits", verbose=False)
inmap[hitmap == hp.UNSEEN] = hp.UNSEEN
hp.mollview(inmap, sub=[2, 2, 4], title="input map", cmap="coolwarm", min=-amp, max=amp)
# -
print(np.sum(hitmap[hitmap != hp.UNSEEN]) / 1400000.0)
# +
# Plot the white noise covariance
plt.figure(figsize=[12, 8])
wcov = hp.read_map("maps/toast_test_npp.fits", None)
wcov[:, wcov[0] == 0] = hp.UNSEEN
hp.mollview(wcov[0], sub=[3, 3, 1], title="II", cmap="coolwarm")
hp.mollview(wcov[1], sub=[3, 3, 2], title="IQ", cmap="coolwarm")
hp.mollview(wcov[2], sub=[3, 3, 3], title="IU", cmap="coolwarm")
hp.mollview(wcov[3], sub=[3, 3, 5], title="QQ", cmap="coolwarm")
hp.mollview(wcov[4], sub=[3, 3, 6], title="QU", cmap="coolwarm")
hp.mollview(wcov[5], sub=[3, 3, 9], title="UU", cmap="coolwarm")
# -
# ## Filter & bin
#
# A filter-and-bin mapmaker is easily created by combining TOAST filter operators and running the mapmaker without destriping:
# +
filtered_name = "filtered"
toast.tod.OpCacheCopy(input=copy_name, output=filtered_name, force=True).exec(data)
toast.tod.OpPolyFilter(order=3, name=filtered_name).exec(data)
mapmaker = toast.todmap.OpMapMaker(
nside=args.nside,
nnz=len(args.mode),
name=filtered_name,
outdir=args.outdir,
outprefix="toast_test_filtered_",
baseline_length=None,
)
mapmaker.exec(data)
plt.figure(figsize=[16, 8])
binmap = hp.read_map("maps/toast_test_binned.fits", verbose=False)
binmap[binmap == 0] = hp.UNSEEN
hp.mollview(binmap, sub=[1, 3, 1], title="binned map", cmap="coolwarm")
filtered_map = hp.read_map("maps/toast_test_filtered_binned.fits", verbose=False)
filtered_map[filtered_map == 0] = hp.UNSEEN
hp.mollview(filtered_map, sub=[1, 3, 2], title="filtered map", cmap="coolwarm")
inmap = hp.read_map("sim_map.fits", verbose=False)
inmap[binmap == hp.UNSEEN] = hp.UNSEEN
hp.mollview(inmap, sub=[1, 3, 3], title="input map", cmap="coolwarm")
| hackathon/generic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook for cleaning the Kindle highlights header
#
# When you download your My clippings.txt file from your Kindle (see [save kindle highlights](http://blog.clippings.io/2015/11/15/how-to-save-kindle-highlights-in-personal-documents/)) ; each highlight is surrounded by
#
# ==========
# <the title of the book (the author)>
# - Votre surlignement à lʼemplacement 549-550 | Ajouté le lundi 19 septembre 2016 23:02:14
#
# <NAME> (<NAME>), <NAME>, and <NAME> come to mind as some of the most influential authors I’ve read.
#
# This notebook keep the =========== but remove the title, the author and the highlight emplacement and date.
import re
import os
# ## Loading the clipping / highlights file
# +
# Load your clippings file. It is recommanded to put in a single file all the highlights of a single file.
# [TODO] interactive prompt to get the files paths and names
my_file_path = 'ressources/'
my_file_source_name = 'highlights_examples.txt'
my_file_target_name = 'highlights_examples_cleaned.txt'
my_source_file_path = os.path.join(my_file_path, my_file_source_name)
my_target_file_path = os.path.join(my_file_path, my_file_target_name)
# +
# Create target file for cleaned highlights
my_target_file = open(my_target_file_path, 'w')
# Load the file from the emplacement
with open(my_source_file_path, 'r') as my_file:
for line in my_file.readlines():
# clean / remove the line with the title and author
if not ( re.search("\s\(.*\)$",line) or
# clean / remove the line with the emplacement and date
re.search("^-.*\|.*\d{4}\s\d{2}:\d{2}:\d{2}$", line)):
my_target_file.writelines(line)
my_target_file.close()
#[TODO] output file source and target stats
#[TODO] clean remaining blank lines
| Kindle highlights header cleaner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.4 64-bit
# name: python374jvsc74a57bd0021d9f4f6a0c9e23e32c4246ac82593951ffad9baab3e58c0c69e8a8c06b339b
# ---
# +
def f1(cont=0):
if cont == 3: # caso base
return cont
print(1)
return f1(cont=cont+1)
x = f1()
print(x)
# +
# 5!
5*4*3*2*1
# +
import math
math.factorial(5)
# +
def factorial_sin_contador(n):
if n == 1:
return n
return n*factorial_sin_contador(n=n-1)
factorial_sin_contador(n=5)
# +
def factor(num):
x = num
while x > 0:
num = num*x
x = x - 1
return num
factor(5)
# +
def factor(num):
x = num-1
while x > 0:
num = num*x
x = x - 1
return num
factor(5)
# +
def factorial(n, cont=1):
if n == 1:
return cont
cont = cont * (n)
return factorial(n=n-1, cont=cont)
factorial(n=5)
# +
x = 2
if x == 1:
print(1)
print("Hola")
# -
| week3_course_python_III/day4_python_X/theory/recursion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mechanisms of Action (MoA) Prediction - Final Classifier
# ## Test
#
# ### Introduction
# In this notebook, we will create a test data prediction pipeline for our final multi-label classifier in order to produce a submission file. In the prediction pipeline (depending on how effective it is) we will add the models created for our 0-label classifiers.
#
# Because our strategy was to conduct nested cross-validation with bayesian hyperparameter search in each fold, each fold may have produced a model using different parameters, so a dataframe will be imported with these parameters (written during the training process). With this, we can ensemble all the models created during nested cross-validation (some may be the same, some may be different!) as well as the models created in using different random states.
#
# Due to the computationally intensive nature of nested cross-validation strategy, we don't end up with many models to ensemble. However, the expectation is that the fewer models we have perform better, so there will be no need for more models to ensemble. Provided this is true, the test prediction pipeline is consequently much faster (as each test record doesn't need to be passed through a ridiculous number of models before a prediction is created).
#
# This entire process has been novel to me. I have realised the **need** for a robust and effective cross-validation strategy - this can make or break a data science experiment. So with that in mind, I have not been making any submissions to the Kaggle public test set (that is used for the public leaderboard). My hope is that by having such a robust cross-validation strategy, I don't *have to* constantly evaluate the model performance based on the leaderboard, which is essentially overfitting to the leaderboard test data.
#
# One aspect that can be improved with the training pipeline is the usage of a sub-validation set through Keras's fit method, rather than a sub-validation set of my own creation. Scaling, feature selection etc are all done without using the OOF validation set (hence nested cross-validation), but we can further improve this by not using sub-validation data too! Now we're getting serious about over-fitting...
#
# Also, there is certainly room for more research into machine learning based feature selection techniques for multi-label problems. Currenty I employ a SelectFromModel technique *per label* (in a similar vein to one-vs-all classification). Storing the features selected for each label, by the end I rank the features in terms of how many times they were selected. The features to use in the final model are then defined by a parameter of num_features (that is included in our Bayesian hyperparameter search). This is rudimentary and could *definitely* be improved!
#
# ## 1.00 Import Packages
# +
# General packages
import pandas as pd
import numpy as np
import os
import gc
import random
from tqdm import tqdm, tqdm_notebook
import json # For reading in csv with string list representation values
import time
import warnings
warnings.filterwarnings('ignore')
# Data vis packages
import matplotlib.pyplot as plt
# %matplotlib inline
# Data prep
from sklearn.preprocessing import RobustScaler
from sklearn.decomposition import PCA
# Modelling packages
import tensorflow as tf
from tensorflow import keras
from tensorflow.python.keras import backend as k
# Key layers
from tensorflow.keras.models import load_model
# Cross validation
from sklearn.model_selection import KFold
from sklearn import metrics
# +
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
strategy = tf.distribute.get_strategy()
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
# Data access
gpu_options = tf.compat.v1.GPUOptions(allow_growth=True)
# -
# ## 2.00 Read in Data
# +
# Directory and file paths
input_dir = '../input/lish-moa/'
train_features_path = os.path.join(input_dir, 'train_features.csv')
test_features_path = os.path.join(input_dir, 'test_features.csv')
train_targets_scored_path = os.path.join(input_dir, 'train_targets_scored.csv')
sample_submission_path = os.path.join(input_dir, 'sample_submission.csv')
# Read in data
train_features = pd.read_csv(train_features_path)
test_features = pd.read_csv(test_features_path)
train_targets_scored = pd.read_csv(train_targets_scored_path)
sample_submission = pd.read_csv(sample_submission_path)
del train_features_path, test_features_path, train_targets_scored_path, sample_submission_path
print(f'train_features shape: \t\t{train_features.shape}')
print(f'test_features shape: \t\t{test_features.shape}')
print(f'train_targets_scored shape: \t{train_targets_scored.shape}')
print(f'sample_submission shape: \t{sample_submission.shape}')
# +
# Define key parameters
SCALER_METHOD = RobustScaler()
MODEL_TO_USE = 'nn'
MODEL_NAME = MODEL_TO_USE + '_final_classifier'
print(f'Model name: {MODEL_NAME}')
# -
# ## 3.00 Data Preparation
def get_transformed_row_features(df):
"""
Input data and returns transformed features using row level statistics.
"""
def get_row_stat(df, stat, feat_type):
"""
Input data and returns row level statistics.
stat: str ['sum','mean','med','std','min','max']
feat_type: str [None,'g','c']
"""
# Separate features into numerical and categorical (and by feature type if specified)
if feat_type == None:
df_numerical = df.select_dtypes('number').drop('cp_time', axis=1)
df_categorical = df.select_dtypes('object')
elif feat_type == 'g':
df_numerical = df.select_dtypes('number').drop('cp_time', axis=1)
df_categorical = df.select_dtypes('object')
# Subset to g features
df_numerical = df_numerical[df_numerical.columns[df_numerical.columns.str.startswith('g-')]]
df_categorical = df_categorical[df_categorical.columns[df_categorical.columns.str.startswith('g-')]]
elif feat_type == 'c':
df_numerical = df.select_dtypes('number').drop('cp_time', axis=1)
df_categorical = df.select_dtypes('object')
# Subset to g features
df_numerical = df_numerical[df_numerical.columns[df_numerical.columns.str.startswith('c-')]]
df_categorical = df_categorical[df_categorical.columns[df_categorical.columns.str.startswith('c-')]]
# Add statistic feature
if stat == 'sum':
stat_feat = df_numerical.sum(axis=1)
elif stat == 'mean':
stat_feat = df_numerical.mean(axis=1)
elif stat == 'med':
stat_feat = df_numerical.median(axis=1)
elif stat == 'std':
stat_feat = df_numerical.std(axis=1)
elif stat == 'min':
stat_feat = df_numerical.min(axis=1)
elif stat == 'max':
stat_feat = df_numerical.max(axis=1)
return(stat_feat)
# Get list of original column names (so we don't make transformations using new features)
df_cols = df.columns
# Total row stats
df['row_sum'] = get_row_stat(df=df[df_cols], stat='sum' , feat_type=None)
df['row_mean'] = get_row_stat(df=df[df_cols], stat='mean', feat_type=None)
df['row_med'] = get_row_stat(df=df[df_cols], stat='med' , feat_type=None)
df['row_std'] = get_row_stat(df=df[df_cols], stat='std' , feat_type=None)
df['row_min'] = get_row_stat(df=df[df_cols], stat='min' , feat_type=None)
df['row_max'] = get_row_stat(df=df[df_cols], stat='max' , feat_type=None)
# G feature row stats
df['row_sum_g'] = get_row_stat(df=df[df_cols], stat='sum' , feat_type='g')
df['row_mean_g'] = get_row_stat(df=df[df_cols], stat='mean', feat_type='g')
df['row_med_g'] = get_row_stat(df=df[df_cols], stat='med' , feat_type='g')
df['row_std_g'] = get_row_stat(df=df[df_cols], stat='std' , feat_type='g')
df['row_min_g'] = get_row_stat(df=df[df_cols], stat='min' , feat_type='g')
df['row_max_g'] = get_row_stat(df=df[df_cols], stat='max' , feat_type='g')
# C feature row stats
df['row_sum_c'] = get_row_stat(df=df[df_cols], stat='sum' , feat_type='c')
df['row_mean_c'] = get_row_stat(df=df[df_cols], stat='mean', feat_type='c')
df['row_med_c'] = get_row_stat(df=df[df_cols], stat='med' , feat_type='c')
df['row_std_c'] = get_row_stat(df=df[df_cols], stat='std' , feat_type='c')
df['row_min_c'] = get_row_stat(df=df[df_cols], stat='min' , feat_type='c')
df['row_max_c'] = get_row_stat(df=df[df_cols], stat='max' , feat_type='c')
# G features row stats / row sum
df['row_sum_g_by_row_sum'] = df['row_sum_g'] / df['row_sum']
df['row_mean_g_by_row_sum'] = df['row_mean_g'] / df['row_sum']
df['row_med_g_by_row_sum'] = df['row_med_g'] / df['row_sum']
df['row_std_g_by_row_sum'] = df['row_std_g'] / df['row_sum']
df['row_min_g_by_row_sum'] = df['row_min_g'] / df['row_sum']
df['row_max_g_by_row_sum'] = df['row_max_g'] / df['row_sum']
# C features row stats / row sum
df['row_sum_c_by_row_sum'] = df['row_sum_c'] / df['row_sum']
df['row_mean_c_by_row_sum'] = df['row_mean_c'] / df['row_sum']
df['row_med_c_by_row_sum'] = df['row_med_c'] / df['row_sum']
df['row_std_c_by_row_sum'] = df['row_std_c'] / df['row_sum']
df['row_min_c_by_row_sum'] = df['row_min_c'] / df['row_sum']
df['row_max_c_by_row_sum'] = df['row_max_c'] / df['row_sum']
# G features row stats / row mean
df['row_sum_g_by_row_mean'] = df['row_sum_g'] / df['row_mean']
df['row_mean_g_by_row_mean'] = df['row_mean_g'] / df['row_mean']
df['row_med_g_by_row_mean'] = df['row_med_g'] / df['row_mean']
df['row_std_g_by_row_mean'] = df['row_std_g'] / df['row_mean']
df['row_min_g_by_row_mean'] = df['row_min_g'] / df['row_mean']
df['row_max_g_by_row_mean'] = df['row_max_g'] / df['row_mean']
# C features row stats / row mean
df['row_sum_c_by_row_mean'] = df['row_sum_c'] / df['row_mean']
df['row_mean_c_by_row_mean'] = df['row_mean_c'] / df['row_mean']
df['row_med_c_by_row_mean'] = df['row_med_c'] / df['row_mean']
df['row_std_c_by_row_mean'] = df['row_std_c'] / df['row_mean']
df['row_min_c_by_row_mean'] = df['row_min_c'] / df['row_mean']
df['row_max_c_by_row_mean'] = df['row_max_c'] / df['row_mean']
# G features row stats / row med
df['row_sum_g_by_row_med'] = df['row_sum_g'] / df['row_med']
df['row_mean_g_by_row_med'] = df['row_mean_g'] / df['row_med']
df['row_med_g_by_row_med'] = df['row_med_g'] / df['row_med']
df['row_std_g_by_row_med'] = df['row_std_g'] / df['row_med']
df['row_min_g_by_row_med'] = df['row_min_g'] / df['row_med']
df['row_max_g_by_row_med'] = df['row_max_g'] / df['row_med']
# C features row stats / row med
df['row_sum_c_by_row_med'] = df['row_sum_c'] / df['row_med']
df['row_mean_c_by_row_med'] = df['row_mean_c'] / df['row_med']
df['row_med_c_by_row_med'] = df['row_med_c'] / df['row_med']
df['row_std_c_by_row_med'] = df['row_std_c'] / df['row_med']
df['row_min_c_by_row_med'] = df['row_min_c'] / df['row_med']
df['row_max_c_by_row_med'] = df['row_max_c'] / df['row_med']
# G features row stats / row std
df['row_sum_g_by_row_std'] = df['row_sum_g'] / df['row_std']
df['row_mean_g_by_row_std'] = df['row_mean_g'] / df['row_std']
df['row_med_g_by_row_std'] = df['row_med_g'] / df['row_std']
df['row_std_g_by_row_std'] = df['row_std_g'] / df['row_std']
df['row_min_g_by_row_std'] = df['row_min_g'] / df['row_std']
df['row_max_g_by_row_std'] = df['row_max_g'] / df['row_std']
# C features row stats / row std
df['row_sum_c_by_row_std'] = df['row_sum_c'] / df['row_std']
df['row_mean_c_by_row_std'] = df['row_mean_c'] / df['row_std']
df['row_med_c_by_row_std'] = df['row_med_c'] / df['row_std']
df['row_std_c_by_row_std'] = df['row_std_c'] / df['row_std']
df['row_min_c_by_row_std'] = df['row_min_c'] / df['row_std']
df['row_max_c_by_row_std'] = df['row_max_c'] / df['row_std']
# G features row stats / row min
df['row_sum_g_by_row_min'] = df['row_sum_g'] / df['row_min']
df['row_mean_g_by_row_min'] = df['row_mean_g'] / df['row_min']
df['row_med_g_by_row_min'] = df['row_med_g'] / df['row_min']
df['row_std_g_by_row_min'] = df['row_std_g'] / df['row_min']
df['row_min_g_by_row_min'] = df['row_min_g'] / df['row_min']
df['row_max_g_by_row_min'] = df['row_max_g'] / df['row_min']
# C features row stats / row min
df['row_sum_c_by_row_min'] = df['row_sum_c'] / df['row_min']
df['row_mean_c_by_row_min'] = df['row_mean_c'] / df['row_min']
df['row_med_c_by_row_min'] = df['row_med_c'] / df['row_min']
df['row_std_c_by_row_min'] = df['row_std_c'] / df['row_min']
df['row_min_c_by_row_min'] = df['row_min_c'] / df['row_min']
df['row_max_c_by_row_min'] = df['row_max_c'] / df['row_min']
# G features row stats / row max
df['row_sum_g_by_row_max'] = df['row_sum_g'] / df['row_max']
df['row_mean_g_by_row_max'] = df['row_mean_g'] / df['row_max']
df['row_med_g_by_row_max'] = df['row_med_g'] / df['row_max']
df['row_std_g_by_row_max'] = df['row_std_g'] / df['row_max']
df['row_min_g_by_row_max'] = df['row_min_g'] / df['row_max']
df['row_max_g_by_row_max'] = df['row_max_g'] / df['row_max']
# C features row stats / row max
df['row_sum_c_by_row_max'] = df['row_sum_c'] / df['row_max']
df['row_mean_c_by_row_max'] = df['row_mean_c'] / df['row_max']
df['row_med_c_by_row_max'] = df['row_med_c'] / df['row_max']
df['row_std_c_by_row_max'] = df['row_std_c'] / df['row_max']
df['row_min_c_by_row_max'] = df['row_min_c'] / df['row_max']
df['row_max_c_by_row_max'] = df['row_max_c'] / df['row_max']
# G features row stats / C features row stats
df['row_sum_g_by_row_sum_c'] = df['row_sum_g'] / df['row_sum_g']
df['row_sum_g_by_row_mean_c'] = df['row_mean_g'] / df['row_mean_g']
df['row_sum_g_by_row_med_c'] = df['row_med_g'] / df['row_med_g']
df['row_sum_g_by_row_std_c'] = df['row_std_g'] / df['row_std_g']
df['row_sum_g_by_row_min_c'] = df['row_min_g'] / df['row_min_g']
df['row_sum_g_by_row_max_c'] = df['row_max_g'] / df['row_max_g']
# Row stats / cp_time
df['row_sum_by_cp_time'] = df['row_sum'] / df['cp_time']
df['row_mean_by_cp_time'] = df['row_mean'] / df['cp_time']
df['row_med_by_cp_time'] = df['row_med'] / df['cp_time']
df['row_std_by_cp_time'] = df['row_std'] / df['cp_time']
df['row_min_by_cp_time'] = df['row_min'] / df['cp_time']
df['row_max_by_cp_time'] = df['row_max'] / df['cp_time']
# G features row stats / cp_time
df['row_sum_g_by_cp_time'] = df['row_sum_g'] / df['cp_time']
df['row_mean_g_by_cp_time'] = df['row_mean_g'] / df['cp_time']
df['row_med_g_by_cp_time'] = df['row_med_g'] / df['cp_time']
df['row_std_g_by_cp_time'] = df['row_std_g'] / df['cp_time']
df['row_min_g_by_cp_time'] = df['row_min_g'] / df['cp_time']
df['row_max_g_by_cp_time'] = df['row_max_g'] / df['cp_time']
# C features row stats / cp_time
df['row_sum_c_by_cp_time'] = df['row_sum_c'] / df['cp_time']
df['row_mean_c_by_cp_time'] = df['row_mean_c'] / df['cp_time']
df['row_med_c_by_cp_time'] = df['row_med_c'] / df['cp_time']
df['row_std_c_by_cp_time'] = df['row_std_c'] / df['cp_time']
df['row_min_c_by_cp_time'] = df['row_min_c'] / df['cp_time']
df['row_max_c_by_cp_time'] = df['row_max_c'] / df['cp_time']
return(df, df_cols)
def get_transformed_col_features(df, df_cols, stat, row_feat_type, col_feat_type, feature_name):
"""
Input data and returns transformed features using column level statistics.
stat: str ['sum','mean','med','std','min','max']
row_feat_type: str [None,'g','c']
col_feat_type: str [None,'g','c']
feature_name: str, name to call new outputted feature
df_cols: list of column names from original dataset
"""
def get_column_stat(df, stat, feat_type):
"""
Input data and returns column level statistics.
stat: str ['sum','mean','med','std','min','max']
feat_type: str [None,'g','c']
"""
# Separate features into numerical and categorical (and by feature type if specified)
if feat_type == None:
df_numerical = df.select_dtypes('number').drop('cp_time', axis=1)
df_categorical = df.select_dtypes('object')
elif feat_type == 'g':
df_numerical = df.select_dtypes('number').drop('cp_time', axis=1)
df_categorical = df.select_dtypes('object')
# Subset to g features
df_numerical = df_numerical[df_numerical.columns[df_numerical.columns.str.startswith('g-')]]
df_categorical = df_categorical[
df_categorical.columns[df_categorical.columns.astype(str).str.startswith('g-')]
]
elif feat_type == 'c':
df_numerical = df.select_dtypes('number').drop('cp_time', axis=1)
df_categorical = df.select_dtypes('object')
# Subset to g features
df_numerical = df_numerical[df_numerical.columns[df_numerical.columns.str.startswith('c-')]]
df_categorical = df_categorical[
df_categorical.columns[df_categorical.columns.astype(str).str.startswith('c-')]
]
# Add statistic feature
if stat == 'sum':
stat_feat = np.sum(df_numerical.values)
elif stat == 'mean':
stat_feat = np.mean(df_numerical.values)
elif stat == 'med':
stat_feat = np.median(df_numerical.values)
elif stat == 'std':
stat_feat = np.std(df_numerical.values)
elif stat == 'min':
stat_feat = np.min(df_numerical.values)
elif stat == 'max':
stat_feat = np.max(df_numerical.values)
return(stat_feat)
# Get column level statistic
col_stat = get_column_stat(df=df[df_cols], stat=stat, feat_type=col_feat_type)
# Redefine the feature suffix based on row_feat_type
if row_feat_type == None:
row_feat_type = ''
elif row_feat_type == 'g':
row_feat_type = '_g'
elif row_feat_type == 'c':
row_feat_type = '_c'
# Get transformed feature
if stat == 'sum':
df[feature_name] = df['row_sum' + row_feat_type] / col_stat
elif stat == 'mean':
df[feature_name] = df['row_mean' + row_feat_type] / col_stat
elif stat == 'med':
df[feature_name] = df['row_med' + row_feat_type] / col_stat
elif stat == 'std':
df[feature_name] = df['row_std' + row_feat_type] / col_stat
elif stat == 'min':
df[feature_name] = df['row_min' + row_feat_type] / col_stat
elif stat == 'max':
df[feature_name] = df['row_max' + row_feat_type] / col_stat
return(df)
def transform_feature_set(X_train, X_test, y_train,
selected_features,
num_features,
pca,
num_components,
seed,
scaler=SCALER_METHOD,
verbose=0):
"""
Takes in X_train and X_test datasets, and applies feature transformations,
feature selection, scaling and pca (dependent on arguments).
Returns transformed X_train and X_test data ready for training/prediction, and returns
list of numerical cols and categorical cols, for the use of creating embeddings.
"""
## DATA PREPARATION ##
# Drop unique ID feature
X_train = X_train.drop('sig_id', axis=1)
X_test = X_test.drop('sig_id', axis=1)
# Get indices for train and test dfs - we'll need these later
train_idx = list(X_train.index)
test_idx = list(X_test.index)
## IN-FOLD FEATURE ENGINEERING ##
if verbose == 1:
print('ENGINGEERING FEATURES...')
for X_dataset in [X_train, X_test]:
# Row transformations
df, df_cols = get_transformed_row_features(X_dataset)
for X_dataset in [X_train, X_test]:
# Total row stats / column stats
get_transformed_col_features(X_dataset, df_cols, 'sum', None, None, 'row_sum_by_col_sum')
get_transformed_col_features(X_dataset, df_cols, 'mean',None, None, 'row_mean_by_col_mean')
get_transformed_col_features(X_dataset, df_cols, 'med', None, None, 'row_med_by_col_med')
get_transformed_col_features(X_dataset, df_cols, 'std', None, None, 'row_std_by_col_std')
get_transformed_col_features(X_dataset, df_cols, 'min', None, None, 'row_min_by_col_min')
get_transformed_col_features(X_dataset, df_cols, 'max', None, None, 'row_max_by_col_max')
# G features row stats / column stats
get_transformed_col_features(X_dataset, df_cols, 'sum', 'g', None, 'row_sum_g_by_col_sum')
get_transformed_col_features(X_dataset, df_cols, 'mean','g', None, 'row_mean_g_by_col_mean')
get_transformed_col_features(X_dataset, df_cols, 'med', 'g', None, 'row_med_g_by_col_med')
get_transformed_col_features(X_dataset, df_cols, 'std', 'g', None, 'row_std_g_by_col_std')
get_transformed_col_features(X_dataset, df_cols, 'min', 'g', None, 'row_min_g_by_col_min')
get_transformed_col_features(X_dataset, df_cols, 'max', 'g', None, 'row_max_g_by_col_max')
# C features row stats / column stats
get_transformed_col_features(X_dataset, df_cols, 'sum', 'c', None, 'row_sum_c_by_col_sum')
get_transformed_col_features(X_dataset, df_cols, 'mean','c', None, 'row_mean_c_by_col_mean')
get_transformed_col_features(X_dataset, df_cols, 'med', 'c', None, 'row_med_c_by_col_med')
get_transformed_col_features(X_dataset, df_cols, 'std', 'c', None, 'row_std_c_by_col_std')
get_transformed_col_features(X_dataset, df_cols, 'min', 'c', None, 'row_min_c_by_col_min')
get_transformed_col_features(X_dataset, df_cols, 'max', 'c', None, 'row_max_c_by_col_max')
# G features row stats / C features column stats
get_transformed_col_features(X_dataset, df_cols, 'sum', 'g', 'c', 'row_sum_g_by_col_sum_c')
get_transformed_col_features(X_dataset, df_cols, 'mean','g', 'c', 'row_mean_g_by_col_mean_c')
get_transformed_col_features(X_dataset, df_cols, 'med', 'g', 'c', 'row_med_g_by_col_med_c')
get_transformed_col_features(X_dataset, df_cols, 'std', 'g', 'c', 'row_std_g_by_col_std_c')
get_transformed_col_features(X_dataset, df_cols, 'min', 'g', 'c', 'row_min_g_by_col_min_c')
get_transformed_col_features(X_dataset, df_cols, 'max', 'g', 'c', 'row_max_g_by_col_max_c')
# C features row stats / G features column stats
get_transformed_col_features(X_dataset, df_cols, 'sum', 'c', 'g', 'row_sum_c_by_col_sum_g')
get_transformed_col_features(X_dataset, df_cols, 'mean','c', 'g', 'row_mean_c_by_col_mean_g')
get_transformed_col_features(X_dataset, df_cols, 'med', 'c', 'g', 'row_med_c_by_col_med_g')
get_transformed_col_features(X_dataset, df_cols, 'std', 'c', 'g', 'row_std_c_by_col_std_g')
get_transformed_col_features(X_dataset, df_cols, 'min', 'c', 'g', 'row_min_c_by_col_min_g')
get_transformed_col_features(X_dataset, df_cols, 'max', 'c', 'g', 'row_max_c_by_col_max_g')
# Replace any infinite values generated with 0
X_train.replace(to_replace=[np.inf, -np.inf, np.nan], value=0, inplace=True)
X_test.replace(to_replace=[np.inf, -np.inf, np.nan], value=0, inplace=True)
# Separate train data types
X_train_numerical = X_train.select_dtypes('number')
X_train_categorical = X_train.select_dtypes('object')
X_train_categorical = X_train_categorical.astype('category')
# Separate val data types
X_test_numerical = X_test.select_dtypes('number')
X_test_categorical = X_test.select_dtypes('object')
X_test_categorical = X_test_categorical.astype('category')
# Get colnames before scaling and feature selection
num_cols = X_train_numerical.columns
cat_cols = X_train_categorical.columns
## SCALING ##
if scaler != None:
if verbose == 1:
print('APPLYING SCALER...')
# Fit and transform scaler to train and val
scaler.fit(X_train_numerical)
X_train_numerical = scaler.transform(X_train_numerical)
X_test_numerical = scaler.transform(X_test_numerical)
# Convert to back dataframe
X_train_numerical = pd.DataFrame(X_train_numerical, index=train_idx, columns=num_cols)
X_test_numerical = pd.DataFrame(X_test_numerical, index=test_idx, columns=num_cols)
## FEATURE SELECTION ##
# Subset to features selected during train process (and stored in corresponding parameters file)
if verbose == 1:
print('APPLYING FEATURE SELECTOR...')
num_cols = X_train_numerical.shape[1]
# Subset datasets to selected features only
X_train_numerical = X_train_numerical[selected_features]
X_test_numerical = X_test_numerical[selected_features]
if verbose == 1:
print(f'{num_cols - X_train_numerical.shape[1]} features removed in feature selection.')
del num_cols
## PCA ##
if pca != None:
if verbose == 1:
print('APPLYING PCA...')
# Fit and transform pca to train and val
pca.fit(X_train_numerical)
X_train_numerical = pca.transform(X_train_numerical)
X_test_numerical = pca.transform(X_test_numerical)
if verbose == 1:
print(f'NUMBER OF PRINCIPAL COMPONENTS: {pca.n_components_}')
# Convert numerical features into pandas dataframe and clean colnames
X_train_numerical = pd.DataFrame(X_train_numerical, index=train_idx).add_prefix('pca_')
X_test_numerical = pd.DataFrame(X_test_numerical, index=test_idx).add_prefix('pca_')
## CATEGORICAL FEATURES ##
# Get categorical and numerical column names
num_cols = X_train_numerical.columns
cat_cols = X_train_categorical.columns
# Encode categorical features
X_train_categorical = X_train_categorical.apply(lambda x: x.cat.codes)
X_test_categorical = X_test_categorical.apply(lambda x: x.cat.codes)
# Concatenate transformed categorical features with transformed numerical features
X_train = pd.concat([X_train_categorical, X_train_numerical], axis=1)
X_test = pd.concat([X_test_categorical, X_test_numerical], axis=1)
if verbose == 1:
print(f'TRAIN SHAPE: \t\t{X_train.shape}')
print(f'VALIDATION SHAPE: \t{X_test.shape}')
return X_train, X_test, num_cols, cat_cols
X_train = train_features
y_train = train_targets_scored.drop('sig_id', axis=1)
# ## 4.00 Test Predictions
#
# Because in the model train pipeline, we performed in-fold Bayesian hyperparameter searches for each model, it is expected that the model architecture will be slighlty different for each of the 10 folds. Consequently, we'll need to read in the csv of parameters to prepare the test prediction pipeline before we start to make the predictions (as we won't be able to feed in the same dataset into each model - differing transformations will be required per model).
#
# In future, I'd like to automate this step. In order to do this, more work will need to be carried out on the train notebook, but due to time constraints and resource limits, we will have to move on for now without making those amendments.
#
# ### 4.01 Prepare Prediction Pipeline
def make_test_predictions(X_test,
selected_features,
num_features,
num_components,
use_embedding,
seed,
kfold,
num_folds,
X_train=X_train,
y_train=y_train,
model_name=MODEL_NAME,
submission=sample_submission):
"""
Reads in X_test feature set, loads the model specified by model_path, and
applies transformations as per num_components and use_embedding
Returns dataframe with sig_id and a binary column indicating
"""
# Retrieve the dataframe ids that were used in kfold during cross validation (using specified seed)
kf = KFold(n_splits=num_folds, random_state=seed)
for fold, (tdx, vdx) in enumerate(kf.split(X_train, y_train)):
if fold == kfold:
# End the loop when it gets to kfold so we can retain tdx for kfold
break
# Instantiate PCA method
pca = PCA(n_components=num_components, random_state=seed)
# Subset X_train and y_train as per what occurred during cross validation for kfold and seed
X_train, y_train = X_train.iloc[tdx, :], y_train.iloc[tdx, :]
# Transform data - again to replicate what occurred with at kfold and seed
X_train, X_test, num_cols, cat_cols = transform_feature_set(X_train = X_train,
X_test = X_test,
y_train = y_train,
selected_features = selected_features,
num_features = num_features,
pca = pca,
num_components = num_components,
seed = seed)
# Further transformations if an embedding was used at kfold and seed
if use_embedding == True:
# Separate data to fit into embedding and numerical input layers
X_train = [np.absolute(X_train[i]) for i in cat_cols] + [X_train[num_cols]]
X_test = [np.absolute(X_test[i]) for i in cat_cols] + [X_test[num_cols]]
# Get the model name and file path for kfold and seed, then load that model
model_name = model_name + '_seed' + str(seed)
model_path = 'models/' + model_name + '/' + model_name + '_' + str(kfold) + '.h5'
model = load_model(model_path)
# Make test predictions using the model created at kfold and seed
preds = model.predict(X_test)
return(preds)
# +
# Compile model parameters for all models produced
parameter_files = os.listdir('final_classifier_parameters')
model_parameters = pd.DataFrame()
# Remove any files that aren't a parameter csv
for idx, file in enumerate(parameter_files):
try:
model_parameters = pd.read_csv(f'final_classifier_parameters/{file}')
except ValueError:
print(f'Passing file {file}')
pass
# Print model parameters
model_parameters
# -
# ### 4.02 Make Test Predictions
# Make 0_label test predictions for all models created during CV for all seeds
preds = []
for idx in tqdm(model_parameters.index):
# Get number of folds for each seed - add 1 because of zero indexing
seed = model_parameters.iloc[idx]['seed']
num_folds = max(model_parameters.loc[model_parameters.seed == seed, 'kfold']) + 1
# Convert string list representation to list of strings for selected_features
selected_features = model_parameters.loc[idx, 'selected_features'].replace("'", '"')
selected_features = json.loads(selected_features)
# Remove non-numerical features from selected_features list
if 'cp_type' in selected_features:
selected_features.remove('cp_type')
if 'cp_dose' in selected_features:
selected_features.remove('cp_dose')
# Make test predictions
fold_preds = make_test_predictions(
X_test = test_features,
selected_features = selected_features,
num_features = model_parameters.iloc[idx]['num_features'],
num_components = model_parameters.iloc[idx]['num_components'],
use_embedding = model_parameters.iloc[idx]['use_embedding'],
seed = seed,
kfold = model_parameters.iloc[idx]['kfold'],
num_folds = num_folds
)
preds.append(fold_preds)
# +
# Ensemble predictions and generate submission
for idx, fold_preds in enumerate(preds):
# Convert fold_preds to dataframe
fold_preds = pd.DataFrame(fold_preds, columns=sample_submission.columns[1:])
# Update the submission for the first round of preds
if idx == 0:
# Add sig_id feature to fold_preds
fold_preds['sig_id'] = sample_submission['sig_id']
sample_submission.update(fold_preds)
# Add to the preds following the first round of preds
else:
sample_submission.iloc[:, 1:] = sample_submission.iloc[:, 1:] + fold_preds
# Divide summed preds by number of total folds
sample_submission.iloc[:, 1:] = sample_submission.iloc[:, 1:] / len(preds)
sample_submission.head()
# -
sample_submission.to_csv('submissions/submission.csv', index=False)
| output/final_classifier_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pds
# language: python
# name: pds
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/pds2122/course/blob/main/nbs/10_Project_Questions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-G5_l2PDV9P6"
# <div class='bar_title'></div>
#
# *Practical Data Science*
#
# # Capstone Project Questions
#
# <NAME><br>
# Chair of Information Systems and Management
#
# Winter Semester 21/22
# + [markdown] id="IoDbpryJV9QB"
# __Credits for this lecuture__
#
# - https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb
# + id="SHQW_0OqV9QC"
# Install on colab
# !pip install transformers datasets
# + [markdown] id="lUnxqCGMXBHS"
# # Fine Tuning Transformer for MultiClass Text Classification
# + [markdown] id="UTvMQttvXBHS"
# ### Introduction
#
# Today, we will be fine tuning a transformer model for the **Multiclass text classification** problem.
#
# - Data:
# - Capstone Project Data.
#
#
# - Language Model Used:
# - DistilBERT this is a smaller transformer model as compared to BERT or Roberta. It is created by process of distillation applied to Bert.
# - [Blog-Post](https://medium.com/huggingface/distilbert-8cf3380435b5)
# - [Research Paper](https://arxiv.org/abs/1910.01108)
# - [Documentation for python](https://huggingface.co/transformers/model_doc/distilbert.html)
#
#
# - Hardware Requirements:
# - Python 3.6 and above
# - Pytorch, Transformers and All the stock Python ML Libraries
# - GPU enabled setup
#
# + [markdown] id="BCTBrSCAXBHT"
# <a id='section01'></a>
# ### Importing Python Libraries and preparing the environment
#
# + id="wuMlXT80GAMK"
# Importing the libraries needed
import torch
import transformers
from torch.utils.data import Dataset, DataLoader
from transformers import DistilBertModel, DistilBertTokenizer
import linecache
from pathlib import Path
from bs4 import BeautifulSoup
import json
import pandas as pd
import gc
gc.enable()
# + id="xQMKTZ4ARk12"
# Setting up the device for GPU usage
from torch import cuda
device = 'cuda' if cuda.is_available() else 'cpu'
# + [markdown] id="QNwhypz3XBHT"
# <a id='section02'></a>
# ### Importing and Pre-Processing data
# + [markdown] id="z7ojooFKXJkv"
# 1. Connect to google drive
# + id="cP_AL-BzXkKW"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="raV4ms5IXBHT"
# 2. Copy and unzip data
# + id="BPgOSEQMXy7S"
# !cp /content/drive/MyDrive/industry_data/test_small.ndjson.gz . && gzip -d test_small.ndjson.gz
# !cp /content/drive/MyDrive/industry_data/train_small.ndjson.gz . && gzip -d train_small.ndjson.gz
# + [markdown] id="ZZ3rWgTNZAHo"
# 3. Get categories
# + id="ZMeSeJJaXBHT"
data_path = Path('.')
test_path = data_path/'train_small.ndjson'
with test_path.open("r", encoding="utf-8") as file:
test_data = [json.loads(line) for line in file]
categories = pd.DataFrame(test_data)
categories = categories[['industry_label', 'industry']].sort_values('industry_label').drop_duplicates().reset_index(drop=True)
categories = categories.reset_index().set_index('industry')
del test_data
# + id="VOTmKinhZM-q"
categories
# + [markdown] tags=[] id="s4YhYxW1XBHV"
# <a id='section03'></a>
# ### Preparing the Dataset and Dataloader
#
# We will start with defining few key variables that will be used later during the training/fine tuning stage.
# Followed by creation of Dataset class - This defines how the text is pre-processed before sending it to the neural network. We will also define the Dataloader that will feed the data in batches to the neural network for suitable training and processing.
# Dataset and Dataloader are constructs of the PyTorch library for defining and controlling the data pre-processing and its passage to neural network. For further reading into Dataset and Dataloader read the [docs at PyTorch](https://pytorch.org/docs/stable/data.html)
#
# #### *NdjsonDataset* Dataset Class
# - This class is defined to generate tokenized output that is used by the DistilBERT model for training.
#
# #### Dataloader
# - Dataloader is used to for creating training and validation dataloader that load data to the neural network in a defined manner. This is needed because all the data from the dataset cannot be loaded to the memory at once, hence the amount of dataloaded to the memory and then passed to the neural network needs to be controlled.
# - This control is achieved using the parameters such as `batch_size` and `max_len`.
# - Training and Validation dataloaders are used in the training and validation part of the flow respectively
# + id="JrBr2YesGdO_"
# Defining some key variables that will be used later on in the training
MAX_LEN = 512
TRAIN_BATCH_SIZE = 4
VALID_BATCH_SIZE = 2
EPOCHS = 1
LEARNING_RATE = 1e-05
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')
# + id="1xJX9xRiXBHV"
class NdjsonDataset(torch.utils.data.Dataset):
def __init__(self, filepath, categories, tokenizer, max_len, n_lines=None):
super(NdjsonDataset).__init__()
self.filename = filepath.as_posix()
self.categories = categories
self.tokenizer = tokenizer
self.max_len = max_len
with filepath.open("r", encoding="utf-8") as file:
self.n_lines = n_lines or sum(1 for line in file)
def __len__(self):
return self.n_lines
def __getitem__(self, idx):
line = json.loads(linecache.getline(self.filename, idx+1))
industry = line['industry']
plainline = BeautifulSoup(line['html'], 'html.parser').get_text()
inputs = self.tokenizer.encode_plus(
plainline,
None,
add_special_tokens=True,
max_length=self.max_len,
pad_to_max_length=True,
return_token_type_ids=True,
truncation=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
return {
'ids': torch.tensor(ids, dtype=torch.long),
'mask': torch.tensor(mask, dtype=torch.long),
'targets': torch.tensor(self.categories.at[industry, 'index'], dtype=torch.long)
}
# + id="clWTsuApXBHV"
# Creating the dataset and dataloader for the neural network
# How would you creat a validation dataset?
data_path = Path('.')
training_set = NdjsonDataset(filepath=data_path/'train_small.ndjson',
categories=categories,
tokenizer=tokenizer,
max_len=MAX_LEN,
n_lines=25185)
testing_set = NdjsonDataset(filepath=data_path/'test_small.ndjson',
categories=categories,
tokenizer=tokenizer,
max_len=MAX_LEN,
n_lines=8396)
# + id="l1BgA1CkQSYa"
train_params = {'batch_size': TRAIN_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
test_params = {'batch_size': VALID_BATCH_SIZE,
'shuffle': True,
'num_workers': 0
}
training_loader = DataLoader(training_set, **train_params)
testing_loader = DataLoader(testing_set, **test_params)
# + [markdown] id="2mK__ySUXBHW"
# <a id='section04'></a>
# ### Creating the Neural Network for Fine Tuning
#
# #### Neural Network
# - We will be creating a neural network with the `DistillBERTClass`.
# - This network will have the DistilBERT Language model followed by a `dropout` and finally a `Linear` layer to obtain the final outputs.
# - The data will be fed to the DistilBERT Language model as defined in the dataset.
# - Final layer outputs is what will be compared to the `encoded category` to determine the accuracy of models prediction.
# - We will initiate an instance of the network called `model`. This instance will be used for training and then to save the final trained model for future inference.
#
# #### Loss Function and Optimizer
# - `Loss Function` and `Optimizer` and defined in the next cell.
# - The `Loss Function` is used the calculate the difference in the output created by the model and the actual output.
# - `Optimizer` is used to update the weights of the neural network to improve its performance.
#
# #### Further Reading
# - You can refer to my [Pytorch Tutorials](https://github.com/abhimishra91/pytorch-tutorials) to get an intuition of Loss Function and Optimizer.
# - [Pytorch Documentation for Loss Function](https://pytorch.org/docs/stable/nn.html#loss-functions)
# - [Pytorch Documentation for Optimizer](https://pytorch.org/docs/stable/optim.html)
# - Refer to the links provided on the top of the notebook to read more about DistiBERT.
# + id="Nzm25z5HXBHW"
# Creating the customized model, by adding a drop out and a dense layer on top of distil bert to get the final output for the model.
class DistillBERTClass(torch.nn.Module):
def __init__(self):
super(DistillBERTClass, self).__init__()
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.pre_classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.3)
self.classifier = torch.nn.Linear(768, len(categories))
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
pooler = hidden_state[:, 0]
pooler = self.pre_classifier(pooler)
pooler = torch.nn.ReLU()(pooler)
pooler = self.dropout(pooler)
output = self.classifier(pooler)
return output
# + tags=[] id="LVcetSqxXBHX"
model = DistillBERTClass()
model.to(device)
# + id="1gUTdlTxXBHX"
# Creating the loss function and optimizer
loss_function = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(params = model.parameters(), lr=LEARNING_RATE)
# + [markdown] id="_dhmzEnKXBHX"
# <a id='section05'></a>
# ### Fine Tuning the Model
#
# After all the effort of loading and preparing the data and datasets, creating the model and defining its loss and optimizer. This is probably the easier steps in the process.
#
# Here we define a training function that trains the model on the training dataset created above, specified number of times (EPOCH), An epoch defines how many times the complete data will be passed through the network.
#
# Following events happen in this function to fine tune the neural network:
# - The dataloader passes data to the model based on the batch size.
# - Subsequent output from the model and the actual category are compared to calculate the loss.
# - Loss value is used to optimize the weights of the neurons in the network.
# - After every 100 steps the loss value is printed in the console.
# + id="vlSFQ3TGXBHX"
# Function to calcuate the accuracy of the model
def calcuate_accu(big_idx, targets):
n_correct = (big_idx==targets).sum().item()
return n_correct
# + id="fL2BMY3vXBHX"
# Defining the training function on the 80% of the dataset for tuning the distilbert model
def train(epoch):
tr_loss = 0
n_correct = 0
nb_tr_steps = 0
nb_tr_examples = 0
model.train()
for _,data in enumerate(training_loader, 0):
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask)
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += calcuate_accu(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
if _%100==0:
loss_step = tr_loss/nb_tr_steps
accu_step = (n_correct*100)/nb_tr_examples
print(f"Training Loss per 100 steps: {loss_step}")
print(f"Training Accuracy per 100 steps: {accu_step}")
optimizer.zero_grad()
loss.backward()
# # When using GPU
optimizer.step()
print(f'The Total Accuracy for Epoch {epoch}: {(n_correct*100)/nb_tr_examples}')
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
print(f"Training Loss Epoch: {epoch_loss}")
print(f"Training Accuracy Epoch: {epoch_accu}")
return
# + id="AmYngH8_XBHX"
for epoch in range(EPOCHS):
train(epoch)
# + [markdown] id="IjmMzuoQXBHX"
# <a id='section06'></a>
# ### Validating the Model (not tested)
#
# During the validation stage we pass the unseen data(Testing Dataset) to the model. This step determines how good the model performs on the unseen data.
# + id="iiw5tavBXBHX"
def valid(model, testing_loader):
model.eval()
n_correct = 0; n_wrong = 0; total = 0
with torch.no_grad():
for _, data in enumerate(testing_loader, 0):
ids = data['ids'].to(device, dtype = torch.long)
mask = data['mask'].to(device, dtype = torch.long)
targets = data['targets'].to(device, dtype = torch.long)
outputs = model(ids, mask).squeeze()
loss = loss_function(outputs, targets)
tr_loss += loss.item()
big_val, big_idx = torch.max(outputs.data, dim=1)
n_correct += calcuate_accu(big_idx, targets)
nb_tr_steps += 1
nb_tr_examples+=targets.size(0)
if _%5000==0:
loss_step = tr_loss/nb_tr_steps
accu_step = (n_correct*100)/nb_tr_examples
print(f"Validation Loss per 100 steps: {loss_step}")
print(f"Validation Accuracy per 100 steps: {accu_step}")
epoch_loss = tr_loss/nb_tr_steps
epoch_accu = (n_correct*100)/nb_tr_examples
print(f"Validation Loss Epoch: {epoch_loss}")
print(f"Validation Accuracy Epoch: {epoch_accu}")
return epoch_accu
# + id="tUYk7tNzXBHY"
print('This is the validation section to print the accuracy and see how it performs')
print('Here we are leveraging on the dataloader crearted for the validation dataset, the approcah is using more of pytorch')
acc = valid(model, testing_loader)
print("Accuracy on test data = %0.2f%%" % acc)
# + [markdown] id="e9Y_K5o7XBHY"
# <a id='section07'></a>
# ### Saving the Trained Model Artifacts for inference
#
# This is the final step in the process of fine tuning the model.
#
# The model and its vocabulary are saved locally. These files are then used in the future to make inference on new inputs of news headlines.
#
# Please remember that a trained neural network is only useful when used in actual inference after its training.
#
# In the lifecycle of an ML projects this is only half the job done. We will leave the inference of these models for some other day.
# + id="tmRDJPFcXBHY"
# Saving the files for re-use
output_model_file = './models/pytorch_distilbert_news.bin'
output_vocab_file = './models/vocab_distilbert_news.bin'
model_to_save = model
torch.save(model_to_save, output_model_file)
tokenizer.save_vocabulary(output_vocab_file)
print('All files saved')
| nbs/10_Project_Questions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Instructions for using Zotero
# <img src=https://www.zotero.org/static/images/icons/zotero-icon-147-160@2x.png>
# ## What is Zotero?
# Zotero is a software that is a way to manage references. Zotero will keep track and help to manage all sorts of different files. It is simple to add references to Zotero, a click or one drag and drop is all one needs to add a reference to their library. Citation information can be kept and organized with Zotero. Zotero can be useful to many projects where outside resources are used.
# ____
# Tutorial created by Team Old Nation: <NAME>, <NAME>, <NAME>, and <NAME>
# ____
# ## Setup Instructions
#
# To setup or install simply go to https://www.zotero.org/download/ . Download the version suited for your computer. Make sure there is storage available on your computer. Then follow the setup guide.
# ___
# ## Instructions for Using Zotero
#
# To access Zotero, click on the application icon that was created after downloading and installing.
#
# Once on the application opens you will be brought to the main page. From here you can do a number of different things including: Adding your own publications, creating new citations, and managing your current citations.
#
# Think of zotero like an address book of citations!
#
# We are going to focus on creating new citations
#
# ### Creating New Citations
#
# To create a new citation (or item) click on the drop down with the little green circle. Then select what type of citation you want to create. After this, a new citation will pop up and you will be asked to enter the title, author name(s) and other information for your citation.
#
# Congrats! Your new citation has been created!
#
# ### Managing Existing Citations
#
# To manage existing citations, you can click on the citation you want to edit and the same information will pop up just like when creating a new citation.
# ___
# ## What Data is Available on Zotero?
#
# Zotero is a free citation manager that will allow you to collect, organize, share, and cite research items. Zotero offers local storage of data on your own computer, online access to your data, and browser extensions for Chrome and Firefox to ingest bibliographic metadata to ease the collection of citations. ZoteroBib (a separate tool) will help you create a quick bibliography through manually entering items or ingesting metadata from library catalogs, newspapers, magazines, ISBNs, DOIs, and more. ZoteroBib generates citations in a browser that can be pasted into a research paper in an array of citation styles including MLA, APA, and Chicago. This digital tool is better suited for short-term research and/or assignment needs.
# ___
#
# ### For example, if you wanted collect articles containing information pertaining the latest COVID-19 data:
#
# You would need to find the articles you need online and upload them into Zotero. There, you can create and store all of your citations in one place (let’s say, in your ‘COVID-19’ folder) for future reference. With the tons of available information online about the pandemic, this will be a good way to keep track of the sources that are the most valuable to you without having to sift through extraneous data.
# ___
# ## How Could Zotero Be Used for Capstone Projects?
#
# This tool can be used among every group to store and collect all of a team's citations that will be used throughout the course of the semester. This application will especially be useful to every team since it is able to collect citations and store them for later use. So when a team collects citations for their data, they won't forget where they got the data by the end of the semester.
# ___
# ## References
#
# https://fordham.libguides.com/BibliographicManagement/Zotero#:~:text=Zotero%20is%20a%20free*%20citation,ease%20the%20collection%20of%20citations.
#
| Zotero_Instructions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/anadiedrichs/time-series-analysis/blob/master/proyecto_forma_de_onda_2019_codigo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="RFZazCdSJiQI" colab_type="text"
# ## Intro
#
# Durante el año 2018 era la primera vez que se dictaba esta unidad en la materia. En la primer clase realicé la pregunta sobre si les interesaba algunos datos en particular para analizar o algún dataset. Un alumno nos acercó su inquietud y compartió el dataset que es el que vamos a usar en este laboratorio.
#
# El dataset contiene mediciones de una toma de corriente trifásica. El objetivo es visualizarlo (¿Qué forma tiene la onda?) y si la misma presenta ruido.
#
# Vea más sobre corrienta alterna en https://es.wikipedia.org/wiki/Corriente_alterna
#
# Notará que las señales son senoidales.
#
#
# + [markdown] id="qzXxPXaoKuVk" colab_type="text"
# ## Carga del dataset
# + [markdown] id="j72fwhSrUvM1" colab_type="text"
# ### por pasos: chusmeando con la CLI primero, luego importar con pandas
# + id="PEcilBm1Kuv2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 325} outputId="0eb5b68a-5383-4cd5-e72f-3e1d3ecc8ab8"
# !wget https://github.com/anadiedrichs/time-series-analysis/raw/master/datasets/forma_de_onda/forma_de_onda.csv
# + id="CPoAcyQpOf5a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="d20029d6-9c06-4388-a680-4545fbe27f50"
# !ls -lh
# + [markdown] id="diASofnZU8A4" colab_type="text"
# Notamos que forma_de_onda.csv pesa unos 47 Megabytes.
# Chusmeo las primeras líneas
# + id="pAyVSMn2OiW8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="11f681d5-d1a0-4c30-97b1-16d68887bfa8"
# !head forma_de_onda.csv
# + [markdown] id="ZdA-AEqdVNhj" colab_type="text"
# Observe como están separados los campos de información (por coma), el formato de la fecha y la hora.
#
#
# Importamos usando la funcion read_csv, puede demorar un poco en ejecutarse.
# + id="S8-DQKasOnV-" colab_type="code" colab={}
import pandas as pd
from pandas import Series
data = pd.read_csv('forma_de_onda.csv',header=0, parse_dates=[0],squeeze=True) #index_col=0,
# + [markdown] id="Pu7tTTJXSiJn" colab_type="text"
# index_col es 0 : toma la primer columna como indice por defecto
#
#
# header es 0, la primer fila es usada como cabecera
#
#
# parse_dates es True, intenta convertir los tipos de datos a tipo DateTime
#
# La option squeeze=True regresa tipo Series
# + [markdown] id="pmoGcUZmTj2T" colab_type="text"
# Mostramos las primeras 10 líneas del dataset
# + id="ZQX3eIwRRr5N" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 359} outputId="9f96c1a9-01fe-4d18-9be1-d345199fa05b"
data.head(10)
# + id="H12BREmNQfLq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1ac2c111-81a3-4bd4-909a-62da480f7392"
type(data) #el tipo de variable que es data, es un DataFrame
# + [markdown] id="33CBohDDVvND" colab_type="text"
# ## Accediendo a pedacitos del dataset
# + [markdown] id="YIx_EZstWBlR" colab_type="text"
# Veo desde la fila 1 a la 20, las columnas 2 a la 7
# + id="5ENTzw4KV036" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 638} outputId="a58b365c-cb29-488c-8aa3-83a541ff3df5"
data.iloc[1:20,2:7]
# + [markdown] id="aPyub-geWI4Y" colab_type="text"
# Grafico un rango del dataset
# + id="iADLliEaT-EU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 287} outputId="bc5703b6-96e8-40a9-f9c2-2a67d0e77fed"
data.iloc[1:100,2:7].plot()
# + [markdown] id="d3P5LZeYWW4o" colab_type="text"
# ## Propiedades del dataset
# + [markdown] id="4LaXD1BTWegq" colab_type="text"
# Cuántas filas y columnas tiene
# + id="lO2dbCkPWakd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="70707182-e6f1-46f4-e8b5-3327a965603e"
data.shape
# + [markdown] id="pc-ZK5hoWhMX" colab_type="text"
# Son las mismas columnas que miramos ejecutando *head*
# + id="Qa9R6A85Wjhr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 359} outputId="3ea4f976-6524-4dc7-ca3e-1160425a6205"
data.head(10)
# + [markdown] id="q44t7yS5WzAC" colab_type="text"
# ¿Lo siguiente muestra número de filas o columnas?
# + id="KLw18UmQhcxT" colab_type="code" outputId="3f69be6e-6367-468e-e7d9-c888fca2b971" colab={"base_uri": "https://localhost:8080/", "height": 35}
data.shape[0]
# + [markdown] id="tGMKg-Z4W4TM" colab_type="text"
# ¿Lo siguiente muestra número de filas o columnas?
# + id="yB0pe-vYhgOb" colab_type="code" outputId="2fae7733-3be8-4069-aa3f-43bd0beb44ae" colab={"base_uri": "https://localhost:8080/", "height": 35}
data.shape[1]
# + [markdown] id="21mqR70NXHVN" colab_type="text"
# Podemos observar de cada DataFrame algunas características estadísticas usando *describe()*
# + id="b7UKM55WXDiQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="19489c7b-6144-4490-bc0c-d27ea07c3814"
data.describe()
# + [markdown] id="FaJ6Kw4fXXeo" colab_type="text"
# ## Valores perdidos
# + [markdown] id="ziS50Ji-XZlA" colab_type="text"
# Chusmeamos el dataset nuevamente
# + id="gCcW7Tjuiddc" colab_type="code" outputId="9f6d7d98-bd43-472e-bba6-68c9b36a90ea" colab={"base_uri": "https://localhost:8080/", "height": 359}
data.iloc[50:60,0:5]
# + id="KGtDHcokmnVk" colab_type="code" outputId="52c84c88-002f-4315-b485-9eaf3c3cc330" colab={"base_uri": "https://localhost:8080/", "height": 287}
data.iloc[1:100,2:7].plot()
# + [markdown] id="56wEDLyPXlIW" colab_type="text"
# ### [Actividad]
#
# Habrá notado que hay valores pedidos en la serie.
#
# Determine si reconstruye la señal compleando los valores perdidos e indicados como NaN o los ignora. Justifique
# + [markdown] id="92sQc0UuZkaJ" colab_type="text"
# SU RESPUESTA AQUI
# + id="lam1Sg9yZjkB" colab_type="code" colab={}
df = data.interpolate(method="linear")
df2 = data.dropna()
# + id="I6weABIXhDwM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="15419a4b-dc51-46bd-a4b6-66e94b4a2413"
df.shape
# + id="uE8ybSkEgzNj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="68a7fe8a-368a-4331-e64c-bed88ad6ee3c"
df2.shape
# + id="bPOxakRthIIC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c643d9f0-5d83-4028-9c36-09207ce50e88"
df.shape[0] - df2.shape[0]
# + [markdown] id="drZiEfs1YeHI" colab_type="text"
# ## [actividad] Intervalos de muestreo
#
# ¿Cada cuánto tenemos una medición? ¿Qué nos puede decir sobre el intervalo de muestreo?
#
#
# + [markdown] id="GAW8EHl5Y-Fi" colab_type="text"
# **SU RESPUESTA AQUI**
#
#
# + [markdown] id="kRMeb0QBkTrx" colab_type="text"
# ## [actividad] Análisis de la señal
#
# ¿Son todas las ondas "perfectamente" senoidales?
#
# ¿Por qué cree que alguna no?
# + [markdown] id="ZDpfBPr6YtZT" colab_type="text"
# **SU RESPUESTA AQUI**
# + [markdown] id="u5IeWpd0cXNd" colab_type="text"
# ### [actividad] Calcule y grafique la FFT de la señal
# + [markdown] id="JvaG7Ij_c1Oe" colab_type="text"
# ESCRIBA SUS COMENTARIOS AQUI
# + id="26QM2_DucWG_" colab_type="code" colab={}
from scipy.fftpack import fft
yf = fft(df.iloc[:,3])
N=df.shape[0]
T=0.02
# + id="NUXGDrpQem3a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="80973fda-8030-40ec-9815-e859fc893dd4"
import numpy as np
xf = np.linspace(0.0, 1.0/(2.0*T), N//2) # N//2
import matplotlib.pyplot as plt
# fig = plt.figure()
plt.xlabel('xlabel') # etiquete correctamente el eje x
plt.ylabel('ylabel') # etiquete correctamente el eje y
plt.plot(xf, 2.0/N * np.abs(yf[0:N//2])) #N//2
plt.grid()
plt.show()
# fig.savefig('test.jpg')
# + [markdown] id="LIILrXOwcjtq" colab_type="text"
# ¿qué concluye al ver este gráfico?
#
# + [markdown] id="xfecfOeRc40r" colab_type="text"
# SU RESPUESTA AQUI
| proyecto_forma_de_onda_2019_codigo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Extracting Vega Lite Transformations
#
# In this notebook, we use extract out the [transformations](https://vega.github.io/vega-lite/docs/transform.html) in the Vega Lite spec generated by Altair and push them to the server, by adding them to the SQL query with Ibis.
import altair as alt
import ibis.omniscidb
import IPython.display
import ibis_vega_transform
# ## Carrier names
#
# First we connect to the table using Ibis:
conn = ibis.omniscidb.connect(
host='metis.mapd.com', user='demouser', password='<PASSWORD>',
port=443, database='mapd', protocol= 'https'
)
t = conn.table("flights_donotmodify")
# Then we compose an Altair chart using an ibis expression.
c = alt.Chart(t[t.carrier_name]).mark_bar().encode(
x='carrier_name',
y='count()'
)
# Finally, we enable rendering that extracts the aggregate expressions and adds them onto the Ibis expresion:
c
# The only data loaded into the browser for this chart is one row for each carrier, because the counting transformation is pushed to the SQL statement.
# ## Delay by Month
delay_by_month = alt.Chart(t[t.flight_dayofmonth, t.flight_month, t.depdelay]).mark_rect().encode(
x='flight_dayofmonth:O',
y='flight_month:O',
color='average(depdelay)'
)
delay_by_month
# ## Interactive support: grouping by month
# Now let's use widgets to facet this by month:
# +
slider = alt.binding_range(min=1, max=12, step=1)
select_month = alt.selection_single(fields=['flight_month'],
bind=slider, init={'flight_month': 1})
alt.Chart(t[t.flight_dayofmonth, t.depdelay, t.flight_month]).mark_line().encode(
x='flight_dayofmonth:O',
y='average(depdelay)'
).add_selection(
select_month
).transform_filter(
select_month
)
# -
# ## Combing Tables: Comparing with Political Contributions
# Now let's make a graph that also uses data from another table. Let's see how average delay time for flights from a state relates to the average political contribution size.
t_contributions = conn.table("contributions_donotmodify")
# +
amount_by_state = alt.Chart(
t_contributions[["contributor_state", "amount"]]
).mark_circle().encode(
x="contributor_state",
y=alt.Y("mean(amount):Q", axis=alt.Axis(grid=False)),
color=alt.Color(value="green"),
size="count()"
)
delay_by_state = alt.Chart(
t[["origin_state", "depdelay"]]
).mark_square().encode(
x="origin_state",
y=alt.Y(
"mean(depdelay):Q",
axis=alt.Axis(grid=False)
),
color=alt.Color(value="firebrick"),
size="count()"
)
combined = (amount_by_state + delay_by_state).resolve_scale(y='independent', size='independent')
combined
| examples/ibis-altair-extraction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/njucs/med/blob/master/PM/PALM_PaddleX_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-_UpfeIEdr3g"
# ## PaddleX配置
# + [markdown] id="eXF3enlRdtmw"
# ### paddlex安装
# + id="3lfwFhdJlQb2"
# 查看CUDA版本
# !nvcc --version
# + id="Obdbrdr1lePI"
# install PaddlePaddle-GPU
# !python -m pip install paddlepaddle-gpu==2.2.2 -i https://mirror.baidu.com/pypi/simple
# + id="327IadTzQ6N9"
# !pip install paddlex==1.3.11 -i https://mirror.baidu.com/pypi/simple
# + [markdown] id="-uQo7Hwbd743"
# ### GPU设置、包引入
# + id="68NiFh7Pd9U7"
# 设置使用0号GPU卡(如无GPU,执行此代码后仍然会使用CPU训练模型)
import matplotlib
matplotlib.use('Agg')
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import paddlex as pdx
# + [markdown] id="xcXcIYl4eInK"
# ## 准备数据集
# + [markdown] id="tFjwp8QieKg5"
# ### 准备数据
# + colab={"base_uri": "https://localhost:8080/"} id="3uNgUT5Pw5VV" outputId="0b2abd06-2647-43f6-813e-1787f6df96b8"
# 授权 Colab 访问 Google Drive
from google.colab import drive
drive.mount('/content/drive')
# %cd drive/MyDrive/'Colab Notebooks'/Ophthalmology/PathologicMyopia/
# + colab={"base_uri": "https://localhost:8080/"} id="skjCHL0gyD4v" outputId="04148ed8-6ae1-4f2f-e01b-1d58e1dc234b"
# !ls dataset/Train | wc -w
# !ls dataset/Train/fundus_image/ | wc -w
# !ls dataset/PALM-Testing400-Images/ | wc -w
# + id="9WWRSEfFePyw"
# download dataset from website (already done!)
# !wget https://bj.bcebos.com/v1/dataset-bj/%E5%8C%BB%E7%96%97%E6%AF%94%E8%B5%9B/%E5%B8%B8%E8%A7%84%E8%B5%9B%EF%BC%9APALM%E7%9C%BC%E5%BA%95%E5%BD%A9%E7%85%A7%E8%A7%86%E7%9B%98%E6%8E%A2%E6%B5%8B%E4%B8%8E%E5%88%86%E5%89%B2.zip -O dataset.zip
# + [markdown] id="EjlHOOfNeQIZ"
# ### 划分数据集和测试集
# + id="cVvlXRHqeYDh"
# 划分训练集和测试集
import pandas as pd
import random
train_excel_file = 'dataset/Train/Classification.xlsx'
pd_list=pd.read_excel(train_excel_file)
pd_list_lenght=len(pd_list)
# 乱序
pd_list=pd_list.sample(frac=1)
offset=int(pd_list_lenght*0.9)
trian_list=pd_list[:offset]
eval_list=pd_list[offset:]
trian_list.to_csv("PALM_PaddleX_2/train_list.txt", index=None, header=None, sep=' ')
eval_list.to_csv("PALM_PaddleX_2/eval_list.txt", index=None, header=None, sep=' ')
# + [markdown] id="Ye1WTEEEecv4"
# ### 数据增强配置
# + id="40pNqhciees5"
from paddlex.cls import transforms
train_transforms = transforms.Compose([
transforms.RandomCrop(crop_size=1440),
transforms.RandomHorizontalFlip(),
transforms.Normalize()
])
eval_transforms = transforms.Compose([
transforms.ResizeByShort(short_size=1444),
transforms.CenterCrop(crop_size=1440),
transforms.Normalize()
])
# + [markdown] id="g25AXzuIefnV"
# ### 数据集配置
# + colab={"base_uri": "https://localhost:8080/"} id="cEbU8IwIeizW" outputId="87f75c1c-bd14-40a2-88da-f0984ee34812"
# 这里面的labels.txt记录了可能的label是哪些
# 在本实验中,就0和1两行
train_dataset = pdx.datasets.ImageNet(
data_dir='dataset/Train/fundus_image',
file_list='PALM_PaddleX_2/train_list.txt',
label_list='PALM_PaddleX_2/labels.txt',
transforms=train_transforms,
shuffle=True)
eval_dataset = pdx.datasets.ImageNet(
data_dir='dataset/Train/fundus_image',
file_list='PALM_PaddleX_2/eval_list.txt',
label_list='PALM_PaddleX_2/labels.txt',
transforms=eval_transforms)
# + [markdown] id="scPbUAv_elRe"
# ## 训练
# + id="kggqrRytemhy"
model = pdx.cls.MobileNetV3_small_ssld(num_classes=2)
model.train(num_epochs=64,
train_dataset=train_dataset,
train_batch_size=32,
eval_dataset=eval_dataset,
lr_decay_epochs=[4, 6, 8],
save_interval_epochs=1,
learning_rate=0.025,
save_dir='PALM_PaddleX_2/output/mobilenetv3_small_ssld',
# resume_checkpoint='output/mobilenetv3_small_ssld/epoch_18',
use_vdl=True)
# + [markdown] id="V19HnpEheuLT"
# ## 预测
# + [markdown] id="QNsEA726ev09"
# ### 环境配置
# + id="D19WzI1hez5m"
# 设置使用0号GPU卡(如无GPU,执行此代码后仍然会使用CPU训练模型)
import matplotlib
matplotlib.use('Agg')
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import paddlex as pdx
# + [markdown] id="ABBCX9rXe1XM"
# ### 单张图片预测
# + id="8ue9m2Rve5AA"
# 单张预测测试
import paddlex as pdx
model = pdx.load_model('PALM_PaddleX_2/output/mobilenetv3_small_ssld/epoch_9')
image_name = 'dataset/PALM-Testing400-Images/T0001.jpg'
result = model.predict(image_name, topk=2)
print("Predict Result:", result)
image_name = 'dataset/PALM-Testing400-Images/T0002.jpg'
result = model.predict(image_name, topk=2)
print("Predict Result:", result)
# + [markdown] id="wdYgKfyle8Bh"
# ### 预测数据集生成
# + id="s_QWGALVe-Ct"
# 预测数据集val_list
val_list=[]
for i in range(1,401,1):
# for i in range(1,201,1):
filename='T'+ str(i).zfill(4)+'.jpg'
# print(filename)
val_list.append(filename+'\n')
with open('PALM_PaddleX_2/val_list.txt','w') as f:
f.writelines(val_list)
val_list=[]
with open('PALM_PaddleX_2/val_list.txt', 'r') as f:
for line in f:
line='dataset/PALM-Testing400-Images/'+line
val_list.append(line.split('\n')[0])
# print(line.split('\n')[0])
print(len(val_list))
# + [markdown] id="eyMeEE6ZfFmn"
# ### 批量预测
# + id="o_nYhuuBfHQw"
import paddlex as pdx
result_list=[]
model = pdx.load_model('PALM_PaddleX_2/output/mobilenetv3_small_ssld/best_model')
for image_name in val_list:
result = model.predict(image_name, topk=2)
result_list.append(result)
print("Predict Result:", result)
# + [markdown] id="QF0lK3wLfOpE"
# ### 结果检查
# + id="7b1CgM1ufQO-"
item = result_list[0]
print(item)
print(item[0]['category_id'],item[0]['score'])
print(item[1]['category_id'],item[1]['score'])
# + [markdown] id="CIUry1QffYRq"
# ### 保存结果
# + id="jSeIDWiSfZt-"
# 结果列
pd_B=[]
for item in result_list:
# print(item)
if item[0]['category_id']==1:
pd_B.append(item[0]['score'])
else:
pd_B.append(item[1]['score'])
# 文件名列
pd_A=[]
with open('PALM_PaddleX_2/val_list.txt', 'r') as f:
for line in f:
pd_A.append(line.split('\n')[0])
# print(line.split('\n')[0])
# 构造pandas的DataFrame
import pandas as pd
df= pd.DataFrame({'FileName': pd_A, 'PM Risk':pd_B})
# 保存为提交文件
df.to_csv("PALM_PaddleX_2/Classification_Results.csv", index=None)
| PM/PALM_PaddleX_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import category_encoders as ce
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
# read in data:
traindata = pd.read_csv('final_train.csv')
test= pd.read_csv('final_test.csv')
# df = df.drop(columns='Unnamed: 0')
# df.reset_index(drop=True)
#Separate into train, and val
train=traindata[traindata['year']<=2005]
val=traindata[traindata['year']>2005]
train.shape, val.shape
# +
#Select target and features
target='winner_bool'
features=train.columns.drop([target, 'winner', 'nconst_x','nconst_y','award', 'nominee', 'movie', 'year',
'year_film', 'year_film2', 'year_film3',
'tconst', 'primarytitle', 'startyear',
'directors', 'writers', 'director','birthyear',
'name', 'release_day',
'biography',
'drama', 'comedy', 'adventure', 'film-noir', 'mystery', 'action',
'crime', 'horror', 'family', 'animation', 'western', 'documentary',
'romance', 'fantasy', 'thriller', 'war', 'history', 'music', 'sport',
'musical', 'sci-fi', 'cpi_multiplier','opening_usa_day','release_day_of_month',
'release_month','world','runtimeminutes','budget','gross','n_globes'
])
X_train=train[features]
X_val=val[features]
X_test=test[features]
y_train=train[target]
y_val=val[target]
# -
X_train.columns
# <h2>Fit a Random Forest model </h2>
# +
from sklearn.ensemble import RandomForestClassifier
import category_encoders as ce
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
mypipeline= make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_jobs=-1, random_state=42, max_depth=20, n_estimators=50)
)
mypipeline.fit(X_train, y_train)
print(f'Accuracy score: {mypipeline.score(X_val, y_val)}')
# -
from joblib import dump
dump(mypipeline, 'mypipeline.joblib')
mypipeline.predict_proba(X_test)
def predict(actor_in_a_leading_role_nomination,
actor_in_a_supporting_role_nomination,
actress_in_a_leading_role_nomination,
actress_in_a_supporting_role_nomination, directing_nomination,
cinematography_nomination, averagerating,
numvotes, dir_age, is_woman, rating, metascore,
opening_wk
):
df = pd.DataFrame(
columns=['actor_in_a_leading_role_nomination',
'actor_in_a_supporting_role_nomination',
'actress_in_a_leading_role_nomination',
'actress_in_a_supporting_role_nomination', 'directing_nomination',
'cinematography_nomination', 'averagerating',
'numvotes', 'dir_age', 'is_woman', 'rating', 'metascore',
'opening_wk'
],
data=[[actor_in_a_leading_role_nomination,
actor_in_a_supporting_role_nomination,
actress_in_a_leading_role_nomination,
actress_in_a_supporting_role_nomination, directing_nomination,
cinematography_nomination, averagerating,
numvotes, dir_age, is_woman, rating, metascore,
opening_wk,
]]
)
y_pred=mypipeline.predict(df)[0]
y_pred_proba=mypipeline.predict_proba(df)[0]
return f'{y_pred_proba*100:.0f}% chance of {y_pred}'
| notebooks/Prediction model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="8fe6b8f46cf94490f3cff24e082fab3090557c87"
# # What's in a Data Scientist's backpack?
#
# Data Scientist is certainly the sexiest job of the 21st century and to aspire as a data scientist includes knowing the best modern tools and techniques of the field. I've created a visualization for some of those questions that give insights into the current favourites (tool) in data science.
#
# 
# + id="4_DbzJsQvqZ_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="f52e143b-ce33-415e-a8be-c227123deced" _uuid="201f102af8e570668f39f582457401dcd70aa7fe"
import pandas as pd
import numpy as np
import nltk
# nltk.download('stopwords')
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
# # %matplotlib inline
# + id="sZOKgZ7pv-lh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 2482} outputId="8ab9cf9a-c865-4cf0-e61a-beb793aa3666" _uuid="748b129280e58d6157f2ba63fef1d92e12c69911"
response = pd.read_csv('../input/freeFormResponses.csv', low_memory=False)
survey = pd.read_csv('../input/SurveySchema.csv', low_memory=False)
mcr = pd.read_csv('../input/multipleChoiceResponses.csv', low_memory=False)
# + [markdown] _uuid="f60c492c614b38ec9d9ed82e3fe6f3842008cf6c"
# ## Data preprocessing
#
# Basic data cleaning and pre-processing.
# + id="F524TVWZYyiV" colab_type="code" colab={} _uuid="12f69a77f9ef9989d74c9873bf42b551ca9dcc49"
def clean_data(t_res_Q12):
res_Q12 = []
res_Q12_res = 0
dic_res_Q12 = {}
for i in t_res_Q12:
if i.lower() != 'nan':
res_Q12.append(i.lower().strip())
res_Q12_res += 1
del(res_Q12[0])
temp_split = []
temp_index = []
for i in range(len(res_Q12)):
if len(res_Q12[i].split()) > 1:
temp_split += res_Q12[i].split()
temp_index.append(i)
elif len(res_Q12[i].split(',')) > 1:
temp_split += res_Q12[i].split(',')
temp_index.append(i)
elif len(res_Q12[i].split(', ')) > 1:
temp_split += res_Q12[i].split(',')
temp_index.append(i)
elif len(res_Q12[i].split('/')) > 1:
temp_split += res_Q12[i].split(',')
temp_index.append(i)
for i in sorted(temp_index, reverse=True):
del(res_Q12[i])
res_Q12 += temp_split
stop_words = stopwords.words('english')
stop_words += ['none', 'nothing','use',' ', ',', '.', 'software', 'tool', 'tools', 'mostly', 'notebook', 'ide', 'studio', 'data']
for i in res_Q12:
if i not in stop_words:
i = i.strip(',')
i = i.strip()
if i == 'microsoft' or i == 'ms':
i = 'excel'
elif i == 'google' or i =='sheet':
i = 'sheets'
elif i == 'power' or i == 'bi':
i = 'powerbi'
elif i == 'qlik':
i = 'qlikview'
elif i == 'rstudio':
i = 'r'
elif i == 'jupyterlab':
i = 'jupyter'
elif i == 'watson':
i = 'ibm'
elif i == 'pytorch':
i = 'torch'
elif i == 'vidhya' or i == 'analytics':
i = 'analytics vidhya'
elif i == 'science' or i == 'central':
i = 'data science central'
dic_res_Q12[i] = dic_res_Q12.get(i, 0) + 1
dic_res_Q12 = sorted(dic_res_Q12.items(), key= lambda x: x[1], reverse=True)
return dic_res_Q12
# + [markdown] _uuid="990d65428385bac8a8fe87a6e5ed9cfdeed2a4c0"
# ## Primary analysis tools
#
# User responses for their primary analysis tools
# + id="IEmFO2mRwFX5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="d8ee546d-9cb3-48cc-d4ee-e7ec3296cb21" _uuid="66fd9101e986947d552c1944188a65a949d93f79" _kg_hide-output=true _kg_hide-input=true
t_res_Q12 = np.array(list(response['Q12_OTHER_TEXT']))
dic_res_Q12 = clean_data(t_res_Q12)
plt_res_Q12 = dic_res_Q12[:10]
plt.bar(range(len(plt_res_Q12)), [val[1] for val in plt_res_Q12], align='center', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q12)), [val[0] for val in plt_res_Q12])
plt.xticks(rotation=70)
plt.legend()
plt.title('Other analysis tools')
plt.draw()
plt.savefig('foo.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="fa6aa584767f2660e44ec5275a13f033920dba17"
# 
# + [markdown] _uuid="865cfd329f18c847ddc3e525692146a8f18c3f42"
# ## Basic statistical analysis tools
#
# User responses for their basic statistical analysis tools such as Microsoft Excel, Google Sheets, etc.
# + id="OjCoUDC2KMLB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 303} outputId="86930850-b091-4c7b-9cd8-9acdcdd50055" _uuid="a801df884a165139123799561d41da51f7c6c513" _kg_hide-input=true _kg_hide-output=true
t_res_Q121 = np.array(list(response['Q12_Part_1_TEXT']))
dic_res_Q121 = clean_data(t_res_Q121)
plt_res_Q121 = dic_res_Q121[:10]
plt.bar(range(len(plt_res_Q121)), [val[1] for val in plt_res_Q121], align='center', color='r', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q121)), [val[0] for val in plt_res_Q121])
plt.xticks(rotation=70)
plt.legend()
plt.title('Basic statistical analysis tools')
plt.draw()
plt.savefig('foo1.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="5e365acb90d033090d78a4180651829f913b91c4"
# 
# + [markdown] _uuid="b187b2ddc29d212754b57be3f1b505e90abb0657"
# ## Advanced statistical analysis tools
#
# User responses for their advanced statistical analysis tools such as SPSS, SAS, etc.
# + id="rWkYtlpfVUWc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 304} outputId="e04239d0-028a-442b-8df0-397aa0f202c7" _uuid="ff69417e775deae454e0ec83d13d2f674ddd3e8e" _kg_hide-input=true _kg_hide-output=true
t_res_Q122 = np.array(list(response['Q12_Part_2_TEXT']))
dic_res_Q122 = clean_data(t_res_Q122)
plt_res_Q122 = dic_res_Q122[:10]
plt.bar(range(len(plt_res_Q122)), [val[1] for val in plt_res_Q122], align='center', color='g', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q122)), [val[0] for val in plt_res_Q122])
plt.xticks(rotation=70)
plt.legend()
plt.title('Advanced statistical analysis tools')
plt.draw()
plt.savefig('foo2.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="f9e4dc2cf1e9f67fcc0b1da23d022e8ceb1b07f6"
# 
# + [markdown] _uuid="5509454f78b7b2941fa6a8dfad792ba5b56cc902"
# ## Business intelligence tools
#
# User responses for business intelligence tools such as Salesforce, Tableau, Spotfire, etc.
# + id="eQyfNorfaVYs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="3ad55da9-4fd1-4d5f-f6f6-62298f1e700c" _uuid="5631810b198f369bf6666aaec9c5afc1efd43bb4" _kg_hide-input=true _kg_hide-output=true
t_res_Q123 = np.array(list(response['Q12_Part_3_TEXT']))
dic_res_Q123 = clean_data(t_res_Q123)
plt_res_Q123 = dic_res_Q123[:10]
plt.bar(range(len(plt_res_Q123)), [val[1] for val in plt_res_Q123], align='center', color='orange', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q123)), [val[0] for val in plt_res_Q123])
plt.xticks(rotation=70)
plt.legend()
plt.title('Business intelligence tools')
plt.draw()
plt.savefig('foo3.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="a55ec1336e63175c78455eaa54bf4402031c1acb"
# 
# + [markdown] _uuid="205e187a096e63ff4d46d5a94b9cd534487e46da"
# ## Local or hosted enviroment tools
#
# User responses for local or hosted enviroment tools such as RStudio, JupyterLab, etc.
# + id="FNGkaaqsbnPR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="9f63472f-2368-4a55-968a-6dfc65648b11" _uuid="64988750356da88400154def5c54df44aa552146" _kg_hide-input=true _kg_hide-output=true
t_res_Q124 = np.array(list(response['Q12_Part_4_TEXT']))
dic_res_Q124 = clean_data(t_res_Q124)
plt_res_Q124 = dic_res_Q124[:10]
plt.bar(range(len(plt_res_Q124)), [val[1] for val in plt_res_Q124], align='center', color='pink', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q124)), [val[0] for val in plt_res_Q124])
plt.xticks(rotation=70)
plt.legend()
plt.title('Local or hosted enviroment tools')
plt.draw()
plt.savefig('foo4.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="921d04a47ba2a364afd95221636de4b29605f978"
# 
# + [markdown] _uuid="3da1c8d95921b801d4b47272a5458d8bfdbdc7ac"
# ## Cloud based tools
#
# User responses for Cloud based tools such as AWS, GCP, Azure, etc.
# + id="DsMdZ6X-dwgu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 316} outputId="b43b7a0c-d67f-45ad-d680-94f7c28fd7c9" _uuid="941253d3a04b1374f01aea95b4f423d6d2d31d8c" _kg_hide-input=true _kg_hide-output=true
t_res_Q125 = np.array(list(response['Q12_Part_5_TEXT']))
dic_res_Q125 = clean_data(t_res_Q125)
plt_res_Q125 = dic_res_Q125[:10]
plt.bar(range(len(plt_res_Q125)), [val[1] for val in plt_res_Q125], align='center', color='skyblue', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q125)), [val[0] for val in plt_res_Q125])
plt.xticks(rotation=70)
plt.legend()
plt.title('Cloud based tools')
plt.draw()
plt.savefig('foo5.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="5ef9042aa47427d8c0b58d7275367c6e4ebb9c65"
# 
# + [markdown] _uuid="9ea714ec011969277962ddbad324f2173237d480"
# ## Programming language used on a regular basis
#
# User responses for programming language used on a regular basis such as Python, JAVA, Swift, etc.
# + id="r8MBUZJWezo-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 328} outputId="c1536c64-6410-4066-bd0e-fbd2c38b5b82" _uuid="876101c48e2b8691b154d8f91546d2f291880313" _kg_hide-input=true _kg_hide-output=true
t_res_Q13 = np.array(list(response['Q16_OTHER_TEXT']))
dic_res_Q13 = clean_data(t_res_Q13)
plt_res_Q13 = dic_res_Q13[:15]
plt.bar(range(len(plt_res_Q13)), [val[1] for val in plt_res_Q13], align='center', color='purple', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q13)), [val[0] for val in plt_res_Q13])
plt.xticks(rotation=70)
plt.legend()
plt.title('Programming language used on a regular basis')
plt.draw()
plt.savefig('foo6.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="60a20d0792b6d70cee584803b87171aa3647fba8"
# 
# + [markdown] _uuid="a82b58735a7f315d69bbe5a4d4e1d801d2fa4bb9"
# ## Most used ML frameworks in the past 5 years
#
# User responses for most used ML frameworks in the past 5 years.
# + id="pZn0ORLOg87g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 307} outputId="58ea42ce-8018-45fe-9251-d06d098c26d1" _uuid="584570157440dc9f5df55203335330ed39357408" _kg_hide-input=true _kg_hide-output=true
t_res_Q14 = np.array(list(response['Q19_OTHER_TEXT']))
dic_res_Q14 = clean_data(t_res_Q14)
plt_res_Q14 = dic_res_Q14[:15]
plt.bar(range(len(plt_res_Q14)), [val[1] for val in plt_res_Q14], align='center', color='crimson', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q14)), [val[0] for val in plt_res_Q14])
plt.xticks(rotation=70)
plt.legend()
plt.title('Most used ML frameworks in the past 5 years')
plt.draw()
plt.savefig('foo7.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="6c44916b578f67c872b01d0690a98b888d0f6626"
# 
# + [markdown] _uuid="307045fae0215f9830df986fda4030ec2ae02bc6"
# ## Most used data visualization tools in the past 5 years
#
# User responses for most used data visualization tools in the past 5 years.
# + id="PyS3idKfhvjW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 324} outputId="75edf343-97c3-4205-c574-0953cc7ffa3b" _uuid="c54f321c868c67d5568010be01eef9c032edcaf3" _kg_hide-input=true _kg_hide-output=true
t_res_Q15 = np.array(list(response['Q21_OTHER_TEXT']))
dic_res_Q15 = clean_data(t_res_Q15)
plt_res_Q15 = dic_res_Q15[:10]
plt.bar(range(len(plt_res_Q15)), [val[1] for val in plt_res_Q15], align='center', color='yellow', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q15)), [val[0] for val in plt_res_Q15])
plt.xticks(rotation=70)
plt.legend()
plt.title('Most used data visualization tools in the past 5 years')
plt.draw()
plt.savefig('foo8.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="4dad4c44a9a32607b62bbcefc9315c721a9f2aca"
# 
# + [markdown] _uuid="1e5b8961b8ce20091ed5dfcb41273229d12adbcc"
# ## Public dataset source
#
# User responses for their most recognized public dataset source.
# + id="TQjJnwfVlMIt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="bbc431b2-5190-4c93-f4aa-fcf87e78a8c3" _uuid="0b38a55e4679978e800de36ca0f639a9b133379b" _kg_hide-input=true _kg_hide-output=true
t_res_Q16 = np.array(list(response['Q33_OTHER_TEXT']))
dic_res_Q16 = clean_data(t_res_Q16)
plt_res_Q16 = dic_res_Q16[:10]
plt.bar(range(len(plt_res_Q16)), [val[1] for val in plt_res_Q16], align='center', color='violet', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q16)), [val[0] for val in plt_res_Q16])
plt.xticks(rotation=70)
plt.legend()
plt.title('Public dataset source')
plt.draw()
plt.savefig('foo9.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="598be2153b9399b86c34929e810bf12cbd956f3f"
# 
# + [markdown] _uuid="af45205809e001cc95a69dfc036cfa07f699362f"
# ## Favourite media sources for ML
#
# User responses for their favourite media sources for ML.
# + id="DKf7zQFrlsZE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="9ef8cfa6-92c2-4a4e-cc01-c1c8ba28f366" _uuid="f0b673fafbfe42d3d7b9cc2f802b99f1924640e9" _kg_hide-input=true _kg_hide-output=true
t_res_Q17 = np.array(list(response['Q38_OTHER_TEXT']))
dic_res_Q17 = clean_data(t_res_Q17)
plt_res_Q17 = dic_res_Q17[:10]
plt.bar(range(len(plt_res_Q17)), [val[1] for val in plt_res_Q17], align='center', color='gold', label="Recorded user responses")
plt.xticks(range(len(plt_res_Q17)), [val[0] for val in plt_res_Q17])
plt.xticks(rotation=70)
plt.legend()
plt.title('Favourite media sources for ML')
plt.draw()
plt.savefig('foo10.png', bbox_inches='tight', dpi=300)
plt.show()
# + [markdown] _uuid="88943943f005fd42fc71eb31b1b4849a8a38472b"
# 
# + [markdown] id="apXPjceumKGV" colab_type="code" colab={} _uuid="948b4a94b7ffc3e5fa9edf734e206cc03819139f"
# ## Conclusively
#
# * Although tools do not define a datascientist, they're their greatest assets and it's important to know the trends in toolkits to maybe collborate efficiently with your team.
# * I've tried to clean and process much of the ambiguities in the responses but this may not represent appropriate trends or data.
# * In my personal experience, there will be a stage where you'll combine multiple tools and libraries for your end product but if you're aspiring to be a data-scientist, learning the internal workings of the tools & libraries is an important experience in the early stages.
# * I'll try and figure out (hopefully) more from schema's and maybe other datasets and update the version as such. Hope this was insightful!
| kaggle-survey-2018/What's in a Data Scientist's backpack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
"""GettingToKnowTheMelSpectrogram.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1q8lXmo2ZKJFiYf1Vjo3oNKCHZOKx8KHi
## Getting to Know the Mel Spectrogram
## This notebook was created to serve a blog post by the same name.
"""
# Commented out IPython magic to ensure Python compatibility.
# https://towardsdatascience.com/getting-to-know-the-mel-spectrogram-31bca3e2d9d0
import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal as sg
import numpy as np
# # %matplotlib inline
from IPython.display import Audio, display, Image
#function to find the fundamental pitch frequency counting zeroes
#From https://www.kaggle.com/asparago/simple-pitch-detector
def find_fundamental(signal, sampling_ratio):
signal = signal
#one should be careful in deciding if it is worth analysing the entire record or
#just chunks of it, and excluding more noisy parts
#signal=signal[:len(signal)/2]
rate = sampling_ratio #wf.getframerate()
swidth = len(signal) # wf.getsampwidth()
#first of all we remove the horizontal offset
signal = signal - np.mean(signal)
#now we calculate the autocorrelation of the signal against itself but inverted in time
#and we throw away negative lags
corr = sg.fftconvolve(signal, signal[::-1], mode='full')
corr = corr[int(len(corr)/2):]
diff = np.diff(corr)
n = [i for i in range(0,len(diff)) if diff[i]>0][0]
peak = np.argmax(corr[n:]) + n
return rate/peak
# +
filename = "../audios/violin/violin_A4_025_piano_arco-normal.mp3"
y, sr = librosa.load(filename)
# trim silent edges
audio_trimmed, _ = librosa.effects.trim(y)
Audio(data=audio_trimmed, rate=sr)
"""#### When we talk about sound, we generally talk about a sequence of vibrations in varying pressure strengths, so to visualize sound kinda means to visualize air waves."""
librosa.display.waveplot(audio_trimmed, sr=sr);
# +
"""#### But this is a one dimensional representation of this complex and rich whale song. Another mathematical representation of sound is the Fourier Transform. Without going into too many details (watch this educational video for a comprehensible explanation), Fourier Transform is a function that gets a signal in the time domain as input, and outputs its decomposition into frequencies."""
# YouTubeVideo('spUNpyF58BY')
"""#### Let's take for example one short time window and see what we get from applying the Fourier Transform."""
n_fft = 2048
D = np.abs(librosa.stft(audio_trimmed[:n_fft], n_fft=n_fft, hop_length=n_fft+1))
plt.plot(D);
# plt.show()
# +
"""#### Now let's take the complete whale song, separate it to time windows, and apply the Fourier Transform on each time window."""
hop_length = 512
D = np.abs(librosa.stft(audio_trimmed, n_fft=n_fft, hop_length=hop_length))
librosa.display.specshow(D, sr=sr, x_axis='time', y_axis='linear');
plt.colorbar();
# plt.show()
# +
"""#### Wow can't see much here can we? It's because most sounds humans hear are concentrated in very small frequency and amplitude ranges.
#### Let's make another small adjustment - transform both the y-axis (frequency) to log scale, and the "color" axis (amplitude) to Decibels, which is kinda the log scale of amplitudes.
"""
DB = librosa.amplitude_to_db(D, ref=np.max)
librosa.display.specshow(DB, sr=sr, hop_length=hop_length, x_axis='time', y_axis='log');
plt.colorbar(format='%+2.0f dB');
# plt.show()
# +
"""### The Mel Scale
#### Let's forget for a moment about all these lovely visualization and talk math. The Mel Scale, mathematically speaking, is the result of some non-linear transformation of the frequency scale. This Mel Scale is constructed such that sounds of equal distance from each other on the Mel Scale, also "sound" to humans as they are equal in distance from one another.
#### In contrast to Hz scale, where the difference between 500 and 1000 Hz is obvious, whereas the difference between 7500 and 8000 Hz is barely noticeable.
#### Luckily, someone computed this non-linear transformation for us, and all we need to do to apply it is use the appropriate command from librosa.
"""
n_mels = 128
mel = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels)
# +
"""#### Yup. That's it.
#### But what does this give us?
#### It partitions the Hz scale into bins, and transforms each bin into a corresponding bin in the Mel Scale, using a overlapping triangular filters.
"""
plt.figure(figsize=(15, 4));
plt.subplot(1, 3, 1);
librosa.display.specshow(mel, sr=sr, hop_length=hop_length, x_axis='linear');
plt.ylabel('Mel filter');
plt.colorbar();
plt.title('1. Our filter bank for converting from Hz to mels.');
plt.subplot(1, 3, 2);
mel_10 = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=10)
librosa.display.specshow(mel_10, sr=sr, hop_length=hop_length, x_axis='linear');
plt.ylabel('Mel filter');
plt.colorbar();
plt.title('2. Easier to see what is happening with only 10 mels.');
plt.subplot(1, 3, 3);
idxs_to_plot = [0, 9, 49, 99, 127]
for i in idxs_to_plot:
plt.plot(mel[i]);
plt.legend(labels=['{}'.format(i+1) for i in idxs_to_plot]);
plt.title('3. Plotting some triangular filters separately.');
plt.tight_layout();
# +
"""#### Now what does this give us?
#### Now we can take the amplitude of one time window, compute the dot product with mel to perform the transformation, and get a visualization of the sound in this new frequency scale.
"""
plt.plot(D[:, 1]);
plt.plot(mel.dot(D[:, 1]));
plt.legend(labels=['Hz', 'mel']);
plt.title('One sampled window for example, before and after converting to mel.');
# +
"""### The Mel Spectrogram
#### We know now what is a Spectrogram, and also what is the Mel Scale, so the Mel Spectrogram, is, rather surprisingly, a Spectrogram with the Mel Scale as its y axis.
#### And this is how you generate a Mel Spectrogram with one line of code, and display it nicely using just 3 more:
"""
S = librosa.feature.melspectrogram(audio_trimmed, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)
S_DB = librosa.power_to_db(S, ref=np.max)
plt.figure(figsize=(10, 6));
librosa.display.specshow(S_DB, sr=sr, hop_length=hop_length, x_axis='time', y_axis='mel');
plt.colorbar(format='%+2.0f dB');
# +
from scipy import signal as sg
# print(find_fundamental(S_DB, sr))
m = dict()
for e in S_DB:
for esa in e:
if esa in m.keys():
m[esa] = m[esa] + 1
else:
m[esa] = 1
for k in m.keys():
print(k)
print(m[k])
print("----")
print(S_DB)
# +
"""### Recap
#### The Mel Spectrogram is the result of the following pipeline:
1. **Separate to windows**: Sample the input with windows of size n_fft=2048, making hops of size hop_length=512 each time to sample the next window.
2. **Compute FFT** (Fast Fourier Transform) for each window to transform from time domain to frequency domain.
3. **Generate a Mel scale**: Take the entire frequency spectrum, and separate it into n_mels=128 evenly spaced frequencies. And what do we mean by evenly spaced? not by distance on the frequency dimension, but distance as it is heard by the human ear.
4. **Generate Spectrogram**: For each window, decompose the magnitude of the signal into its components, corresponding to the frequencies in the mel scale.
"""
# Sanity check that indeed we understood the underlying pipeline
S = librosa.feature.melspectrogram(audio_trimmed, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)
fft_windows = librosa.stft(audio_trimmed, n_fft=n_fft, hop_length=hop_length)
magnitude = np.abs(fft_windows)**2
mel = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels)
assert (mel.dot(magnitude) == S).all()
Image(url="https://i.imgflip.com/37ohpy.jpg")
plt.show()
# -
| notebooks/.ipynb_checkpoints/Pruebas_Mel_spectogra-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The `poetpy` library is a Python wrapper for the [PoetryDB API](https://github.com/thundercomb/poetrydb). The library provides a Pythonic interface for interacting with and extracting information from the PoetryDB database to explore nearly 130 poets and more than 3,200 poems. In this introductory notebook, we will explore some of the basic functionality for interacting with the PoetryDB database.
# ## First Steps
# Make sure the `poetpy` library is installed. The easiest way to install the library is through pip.
#
# `pip install poetpy`
#
# An alternative installation option is to clone or download the Github repo of [`poetpy`](https://github.com/aschleg/poetpy) and invoke the `setup.py` installation command.
#
# `python setup.py install`
# ## Using the API
# The `get_poetry` function is the primary interface for interacting with the PoetryDB API.
from poetpy import get_poetry
# ### Basic Usage
# The only required parameter for accessing the PoetryDB API is the `input_term`. The `input_term` can be any one or a combination of 'author', 'title', 'lines', or 'linecount'. For example, let's say we are interested in finding all of the authors currently in the database.
authors = get_poetry('author')
# Because the output will be somewhat lengthy, let's just print the length of the returned object to see how many authors are in the database.
len(authors['authors'])
# We can also do the same as above but with the number of poems and sonnets in the database by changing the 'author' `input_term` to 'title'.
titles = get_poetry('title')
len(titles['titles'])
# We see there are just under 3,000 poems and 130 authors currently in the PoetryDB. With this information, we can then find the average number of poems for each author.
len(titles['titles']) / len(authors['authors'])
# ### Specifying Search Parameters
# In addition to the `input_term` parameter, a corresponding `search_term` parameter can also be passed to refine the returned results. For example, let's say we are interested in finding [<NAME>'s](https://en.wikipedia.org/wiki/William_Shakespeare) poetry.
ab = get_poetry('author', '<NAME>')
len(ab)
# The search found 162 matching poems and sonnets for William Shakespeare! Let's presume we are only interested in one of Shakespeare's sonnets. Rather than going through the relatively large JSON object that was returned in the previous search, we can edit the query to look for the title of the sonnet we want to return.
get_poetry('title', 'Sonnet 1: From fairest creatures we desire increase')
# Now, let's say we do not know the name of the sonnet we wish to find, but we do happen to know one of the lines from the sonnet. By specifying 'lines' in the `input_term` parameter and then passing the known line in the `search_term` parameter, the same result as before will be returned.
get_poetry('lines', 'But thou contracted to thine own bright eyes,')
# ### Filtering Returned Results
# The `get_poetry` function also provides an `output` parameter that can filter the data returned from a query. As an example, let's use the previous search but assume we are only interested in returning the author, title, and linecount of the matching sonnet.
get_poetry('lines', 'But thou contracted to thine own bright eyes,', 'author,title,linecount')
# Similar to the `input_term` parameter, the `output` parameter can be one or any combination of 'author', 'title', 'lines', 'linecount'.
# ### Combination Searches
# Multiple terms can be specified in the `input_term` parameter with a comma delimiter to return several result sets within one API call. The respective input terms should each have a corresponding `search_term` delimited by a semi-colon. For example, let's say we want to find the full title name and the line count of [<NAME>'s](https://en.wikipedia.org/wiki/John_Milton) poetry with [*Paradise Lost*](https://en.wikipedia.org/wiki/Paradise_Lost) in the title.
get_poetry('title,author', 'Paradise Lost;Milton', 'title,linecount')
# As another example, let's say we are interested in finding all of William Shakespeare's poems and sonnets that are fourteen lines long (a [sonnet](http://www.english.illinois.edu/maps/sonnet.htm) is a poem of 14 equal length lines).
fourteen_lines = get_poetry('author,linecount', '<NAME>;14', 'title,linecount')
len(fourteen_lines)
# ## Other Resources
# The [PoetryDB API](https://github.com/thundercomb/poetrydb) Github page contains information on the implementation and design of the PoetryDB and its API, along with some more examples for working with the API (though they are not in Python).
| content/posts/Introduction to poetpy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Pandas as API references
# # Install packages
# %%capture
# %%bash
pip install -U pyarrow numpy# to read parquet
# + code_folding=[33, 38, 43]
import gc
import numpy as np
import pandas as pd
import warnings
import time
import gc
import os
import os
instance_type = 'c5d4xlarge' # change this
results_bucket = f"s3://xdss-benchmarks/benchmarks" # change this
name = 'pandas'
data_path = 'datasets/taxi_parquet/data_0.parquet' # single file
output_file = f'{name}_{instance_type}_1m_results.csv'
results_path = f"results/{output_file}"
results_bucket = f"{results_bucket}/{output_file}"
benchmarks = {}
single_repetition = 1
statistic_repetition = 5
long_min = -74.05
long_max = -73.75
lat_min = 40.58
long_min = -74.05
long_max = -73.75
lat_min = 40.58
lat_max = 40.90
def get_results(benchmarks, name):
results = pd.DataFrame.from_dict(benchmarks, orient='index')
results.columns = [name]
return results
def persist():
gc.collect()
get_results(benchmarks, name).to_csv(results_path)
os.system(f"aws s3 cp {results_path} {results_bucket}")
def benchmark(f, df, name, repetitions=1, **kwargs):
times = []
for i in range(repetitions):
start_time = time.time()
ret = f(df, **kwargs)
times.append(time.time()-start_time)
benchmarks[name] = np.mean(times)
persist()
print(f"{name} took: {benchmarks[name]}")
return benchmarks[name]
# !mkdir -p results
# !mkdir -p datasets
print(f"test for {single_repetition} repetitions for join and groupby and {statistic_repetition} repetitions for statistics")
# -
# # Benchmark
# +
import pandas as pd
import numpy as np
# Load data
data = pd.read_parquet(data_path, engine='pyarrow')
print(f"size: {len(data)} with {len(data.columns)} columns")
# +
def read_file_parquet(df=None):
return pd.read_parquet(data_path, engine='pyarrow')
benchmark(read_file_parquet, df=data, name='read_file', repetitions=statistic_repetition)
# +
def count(df=None):
return len(df)
benchmark(count, df=data, name='count', repetitions=statistic_repetition)
# +
def mean(df):
return df.fare_amount.mean()
benchmark(mean, df=data, name='mean', repetitions=statistic_repetition)
# +
def standard_deviation(df):
return df.fare_amount.std()
benchmark(standard_deviation, df=data, name='standard deviation', repetitions=statistic_repetition)
# +
def mean_of_sum(df):
return (df.fare_amount + df.trip_distance).mean()
benchmark(mean_of_sum, df=data, name='sum columns mean', repetitions=statistic_repetition)
# +
def sum_columns(df):
return df.fare_amount + df.trip_distance
benchmark(sum_columns, df=data, name='sum columns', repetitions=statistic_repetition)
# +
def mean_of_product(df):
return (df.fare_amount * df.trip_distance).mean()
benchmark(mean_of_product, df=data, name='product columns mean', repetitions=statistic_repetition)
# +
def product_columns(df):
return df.fare_amount * df.trip_distance
benchmark(product_columns, df=data, name='product columns', repetitions=statistic_repetition)
# +
def complicated_arithmetic_operation(df):
theta_1 = df.pickup_longitude
phi_1 = df.pickup_latitude
theta_2 = df.dropoff_longitude
phi_2 = df.dropoff_latitude
temp = (np.sin((theta_2-theta_1)/2*np.pi/180)**2
+ np.cos(theta_1*np.pi/180)*np.cos(theta_2*np.pi/180) * np.sin((phi_2-phi_1)/2*np.pi/180)**2)
return np.multiply(np.arctan2(np.sqrt(temp), np.sqrt(np.subtract(1, temp))),2)
benchmark(complicated_arithmetic_operation, df=data, name='arithmetic operation', repetitions=single_repetition)
# +
def mean_of_complicated_arithmetic_operation(df):
theta_1 = df.pickup_longitude
phi_1 = df.pickup_latitude
theta_2 = df.dropoff_longitude
phi_2 = df.dropoff_latitude
temp = (np.sin((theta_2-theta_1)/2*np.pi/180)**2
+ np.cos(theta_1*np.pi/180)*np.cos(theta_2*np.pi/180) * np.sin((phi_2-phi_1)/2*np.pi/180)**2)
ret = np.multiply(np.arctan2(np.sqrt(temp), np.sqrt(np.subtract(1, temp))),2)
return ret.mean()
benchmark(mean_of_complicated_arithmetic_operation, df=data, name='arithmetic operation mean', repetitions=single_repetition)
# +
def value_counts(df):
return df.fare_amount.value_counts()
benchmark(value_counts, df=data, name='value counts', repetitions=statistic_repetition)
# +
def groupby_statistics(df):
return df.groupby(by='passenger_count').agg({'fare_amount': ['mean', 'std'],
'tip_amount': ['mean', 'std']
})
benchmark(groupby_statistics, df=data, name='groupby statistics', repetitions=single_repetition)
# -
other = groupby_statistics(data)
# +
def join_data(df, other):
return pd.merge(df, other, left_index=True, right_index=True)
benchmark(join_data, data, name='join', repetitions=single_repetition, other=other)
# +
def join_count(df, other):
return len(pd.merge(df, other, left_index=True, right_index=True))
benchmark(join_count, data, name='join count', repetitions=single_repetition, other=other)
# -
# ## Filtered data
print(f"Prepare filtered data and deleted {gc.collect()} MB")
expr_filter = (data.pickup_longitude > long_min) & (data.pickup_longitude < long_max) & \
(data.pickup_latitude > lat_min) & (data.pickup_latitude < lat_max) & \
(data.dropoff_longitude > long_min) & (data.dropoff_longitude < long_max) & \
(data.dropoff_latitude > lat_min) & (data.dropoff_latitude < lat_max)
# +
def filter_data(df):
return df[expr_filter]
benchmark(filter_data, data, name='filter data', repetitions=statistic_repetition)
# +
filtered = filter_data(data)
del data
print(f"cleaned {gc.collect()} mb")
# -
benchmark(mean, filtered, name='filtered mean', repetitions=statistic_repetition)
benchmark(standard_deviation, filtered, name='filtered standard deviation', repetitions=statistic_repetition)
benchmark(mean_of_sum, filtered, name ='filtered sum columns mean', repetitions=statistic_repetition)
benchmark(sum_columns, df=filtered, name='filtered sum columns', repetitions=statistic_repetition)
benchmark(mean_of_product, filtered, name ='filtered product columns mean', repetitions=statistic_repetition)
benchmark(product_columns, df=filtered, name='filtered product columns', repetitions=statistic_repetition)
benchmark(mean_of_complicated_arithmetic_operation, filtered, name='filtered arithmetic operation mean', repetitions=single_repetition)
benchmark(complicated_arithmetic_operation, filtered, name='filtered arithmetic operation', repetitions=single_repetition)
benchmark(value_counts, filtered, name ='filtered value counts', repetitions=statistic_repetition)
benchmark(groupby_statistics, filtered, name='filtered groupby statistics', repetitions=single_repetition)
other = groupby_statistics(filtered)
benchmark(join_data, filtered, name='filtered join', repetitions=single_repetition, other=other)
benchmark(join_count, filtfilterederd, name='filtered join count', repetitions=single_repetition, other=other)
print(name)
get_results(benchmarks)
| notebooks/pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Taking `examples/examples.ipynb` as a starting point.
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# +
import os
import sys
sys.path.append("..")
sys.path.append("../..")
import numpy as np
import pandas as pd
import yellowbrick as yb
# -
from yellowbrick.features.rankd import Rank1D, Rank2D, rank1d, rank2d
# +
# # !pip install pandas requests nose
# -
# ### Dataset
# +
# # %run download.py
# +
from download import download_all
## The path to the test data sets
FIXTURES = os.path.join(os.getcwd(), "data")
## Dataset loading mechanisms
datasets = {
"credit": os.path.join(FIXTURES, "credit", "credit.csv"),
"concrete": os.path.join(FIXTURES, "concrete", "concrete.csv"),
"occupancy": os.path.join(FIXTURES, "occupancy", "occupancy.csv"),
"mushroom": os.path.join(FIXTURES, "mushroom", "mushroom.csv"),
}
def load_data(name, download=True):
"""
Loads and wrangles the passed in dataset by name.
If download is specified, this method will download any missing files.
"""
# Get the path from the datasets
path = datasets[name]
# Check if the data exists, otherwise download or raise
if not os.path.exists(path):
if download:
download_all()
else:
raise ValueError((
"'{}' dataset has not been downloaded, "
"use the download.py module to fetch datasets"
).format(name))
# Return the data frame
return pd.read_csv(path)
# -
# Load the classification data set
data = load_data('credit')
data.head()
# +
# Specify the features of interest
features = [
'limit', 'sex', 'edu', 'married', 'age', 'apr_delay', 'may_delay',
'jun_delay', 'jul_delay', 'aug_delay', 'sep_delay', 'apr_bill', 'may_bill',
'jun_bill', 'jul_bill', 'aug_bill', 'sep_bill', 'apr_pay', 'may_pay', 'jun_pay',
'jul_pay', 'aug_pay', 'sep_pay',
]
X = data[features]
y = data.default
# -
# ### Rank1D
# New visualizer
# get features from column names...
visualizer = Rank1D(algorithm='shapiro')
visualizer.fit_transform_poof(X, y);
# Raw numpy version
visualizer = Rank1D(algorithm='shapiro', features=features)
visualizer.fit_transform_poof(X.values, y.values);
# numpy version, no feature names
visualizer = Rank1D(algorithm='shapiro')
visualizer.fit_transform_poof(X.values, y.values);
# disable tick labels
visualizer = Rank1D(algorithm='shapiro', show_feature_names=False)
visualizer.fit_transform_poof(X.values, y.values);
# #### vertical orient
# get features from column names...
visualizer = Rank1D(algorithm='shapiro', orient='v')
visualizer.fit_transform_poof(X, y);
# Raw numpy version
visualizer = Rank1D(algorithm='shapiro', features=features, orient='v')
visualizer.fit_transform_poof(X.values, y.values);
# numpy version, no feature names
visualizer = Rank1D(algorithm='shapiro', orient='v')
visualizer.fit_transform_poof(X.values, y.values);
# disable tick labels
visualizer = Rank1D(algorithm='shapiro', show_feature_names=False, orient='v')
visualizer.fit_transform_poof(X.values, y.values);
# #### quick methods
# get features from column names...
rank1d(X, y);
# Raw numpy version
rank1d(X.values, y.values);
# numpy version, no feature names
rank1d(X.values, y.values);
# disable tick labels
rank1d(X.values, y.values, show_feature_names=False);
# #### quick methods, vertical
# get features from column names...
rank1d(X, y, orient='v');
# Raw numpy version
rank1d(X.values, y.values, orient='v');
# numpy version, no feature names
rank1d(X.values, y.values, orient='v');
# disable tick labels
rank1d(X.values, y.values, show_feature_names=False, orient='v');
# ### Rank2D
# Fixing order of the tick labels, using the feature names to label.
# get features from column names...
visualizer = Rank2D()
visualizer.fit_transform_poof(X, y);
# raw numpy version
visualizer = Rank2D(features=features)
visualizer.fit_transform_poof(X.values, y.values);
# numpy version, no feature names
visualizer = Rank2D()
visualizer.fit_transform_poof(X.values, y.values);
# disable tick labels
visualizer = Rank2D(show_feature_names=False)
visualizer.fit_transform_poof(X.values, y.values);
# #### Quick method
# get features from column names...
rank2d(X);
# raw numpy version
rank2d(X.values, features=features);
# numpy version, no feature names
rank2d(X.values);
# disable tick labels
rank2d(X, show_feature_names=False);
| examples/pbs929/rankd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Keras Intro: Fully Connected Models
#
# Keras Documentation: https://keras.io
#
# In this notebook we explore how to use Keras to implement Deep Fully Connected models
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# ## Shallow and Deep Networks
# +
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=1000, noise=0.1, random_state=0)
plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5)
plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5)
plt.legend(['0', '1'])
# -
X.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3,
random_state=42)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD, Adam
# ### Shallow Model
model = Sequential()
model.add(Dense(1, input_shape=(2,), activation='sigmoid'))
model.compile(Adam(lr=0.05), 'binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=200, verbose=0)
results = model.evaluate(X_test, y_test)
results
print("The Accuracy score on the Train set is:\t{:0.3f}".format(results[1]))
def plot_decision_boundary(model, X, y):
amin, bmin = X.min(axis=0) - 0.1
amax, bmax = X.max(axis=0) + 0.1
hticks = np.linspace(amin, amax, 101)
vticks = np.linspace(bmin, bmax, 101)
aa, bb = np.meshgrid(hticks, vticks)
ab = np.c_[aa.ravel(), bb.ravel()]
c = model.predict(ab)
cc = c.reshape(aa.shape)
plt.figure(figsize=(12, 8))
plt.contourf(aa, bb, cc, cmap='bwr', alpha=0.2)
plt.plot(X[y==0, 0], X[y==0, 1], 'ob', alpha=0.5)
plt.plot(X[y==1, 0], X[y==1, 1], 'xr', alpha=0.5)
plt.legend(['0', '1'])
plot_decision_boundary(model, X, y)
# ### Deep model
model = Sequential()
model.add(Dense(4, input_shape=(2,), activation='tanh'))
model.add(Dense(2, activation='tanh'))
model.add(Dense(1, activation='sigmoid'))
model.compile(Adam(lr=0.05), 'binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=100, verbose=0)
model.evaluate(X_test, y_test)
from sklearn.metrics import accuracy_score, confusion_matrix
# +
y_train_pred = model.predict_classes(X_train)
y_test_pred = model.predict_classes(X_test)
print("The Accuracy score on the Train set is:\t{:0.3f}".format(accuracy_score(y_train, y_train_pred)))
print("The Accuracy score on the Test set is:\t{:0.3f}".format(accuracy_score(y_test, y_test_pred)))
# -
plot_decision_boundary(model, X, y)
# ## Multiclass classification
#
# ### The Iris dataset
df = pd.read_csv('../data/iris.csv')
import seaborn as sns
sns.pairplot(df, hue="species")
df.head()
X = df.drop('species', axis=1)
X.head()
target_names = df['species'].unique()
target_names
target_dict = {n:i for i, n in enumerate(target_names)}
target_dict
y= df['species'].map(target_dict)
y.head()
# ### Sparse Categorical Crossentropy
X_train, X_test, y_train, y_test = train_test_split(X.values, y.values,
test_size=0.2,
random_state=0)
model = Sequential()
model.add(Dense(3, input_shape=(4,), activation='softmax'))
model.compile(Adam(lr=0.1),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_train, y_train, epochs=20, validation_split=0.1)
y_pred = model.predict(X_test)
y_pred[:5]
y_test
y_pred_class = np.argmax(y_pred, axis=1)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred_class))
def pretty_confusion_matrix(y_true, y_pred, labels=target_names):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index=labels, columns=pred_labels)
return df
pretty_confusion_matrix(y_test, y_pred_class)
# ### Exercise 1
# The [Pima Indians dataset](https://archive.ics.uci.edu/ml/datasets/Pima+Indians+Diabetes) is a very famous dataset distributed by UCI and originally collected from the National Institute of Diabetes and Digestive and Kidney Diseases. It contains data from clinical exams for women age 21 and above of Pima indian origins. The objective is to predict based on diagnostic measurements whether a patient has diabetes.
#
# It has the following features:
#
# - Pregnancies: Number of times pregnant
# - Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test
# - BloodPressure: Diastolic blood pressure (mm Hg)
# - SkinThickness: Triceps skin fold thickness (mm)
# - Insulin: 2-Hour serum insulin (mu U/ml)
# - BMI: Body mass index (weight in kg/(height in m)^2)
# - DiabetesPedigreeFunction: Diabetes pedigree function
# - Age: Age (years)
#
# The last colum is the outcome, and it is a binary variable.
#
# In this first exercise we will explore it through the following steps:
#
# 1. Load the ..data/diabetes.csv dataset, use pandas to explore the range of each feature
# - For each feature draw a histogram. Bonus points if you draw all the histograms in the same figure.
# - Explore correlations of features with the outcome column. You can do this in several ways, for example using the `sns.pairplot` we used above or drawing a heatmap of the correlations.
# - Do features need standardization? If so what stardardization technique will you use? MinMax? Standard?
# - Prepare your final `X` and `y` variables to be used by a ML model. Make sure you define your target variable well. Will you need dummy columns?
# ## Exercise 2
# Build a fully connected NN model that predicts diabetes. Follow these steps:
#
# 1. Split your data in a train/test with a test size of 20% and a `random_state = 22`
# - define a sequential model with at least one inner layer. You will have to make choices for the following things:
# - what is the size of the input?
# - how many nodes will you use in each layer?
# - what is the size of the output?
# - what activation functions will you use in the inner layers?
# - what activation function will you use at output?
# - what loss function will you use?
# - what optimizer will you use?
# - fit your model on the training set, using a validation_split of 0.1
# - test your trained model on the test data from the train/test split
# - check the accuracy score, the confusion matrix and the classification report
# ## Exercise 3
# Compare your work with the results presented in [this notebook](https://www.kaggle.com/futurist/d/uciml/pima-indians-diabetes-database/pima-data-visualisation-and-machine-learning). Are your Neural Network results better or worse than the results obtained by traditional Machine Learning techniques?
#
# - Try training a Support Vector Machine or a Random Forest model on the exact same train/test split. Is the performance better or worse?
# - Try restricting your features to only 4 features like in the suggested notebook. How does model performance change?
| day_2/Lab_11_DL Keras Intro Fully Connected Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tomographic Reconstruction 1, Exercise 1
# ## Filtered Backprojection
# 11.07.2019,
# Image Processing Physics TU Muenchen,
# <NAME>, <NAME>, (<NAME>, <NAME>)
#
# This exercise will be about a very simplified implementation of tomographic
# reconstruction, using filtered backprojection.
#
# The exercise consists of three parts:
# First, you will simulate the data aquisistion in computed tomography, by
# calculating the sinogram from a given input sample slice.
# Second, you will have to apply a ramp filter to this sinogram.
# Third, you will implement a simple backprojection algorithm.
#
# If you do not manage to do one part of the exercise you can still go on by
# loading the provided .npy arrays 'sino_backup.npy' and
# 'filtered_sino_backup.npy'.
#
# You need to replace the ??? in the code with the required commands.
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import sys
# +
def forwardproject(sample, angles):
"""
Simulate data aquisition in tomography from line projections.
Forwardproject a given input sample slice to obtain a simulated sinogram.
Hints
-----
Use scipy.ndimage.rotate(..., reshape=False) to simulate the sample
rotation.
Use numpy.sum() along one axis to simulate the line projection integral.
"""
sh = np.shape(sample) # calculate shape of sample
Nproj = len(angles) # calculate number of projections
# define empty sinogram container, angles along y-axis
sinogram = np.zeros((Nproj, sh[1]))
for proj in np.arange(Nproj): # loop over all projections
sys.stdout.write("\r Simulating: %03i/%i" % (proj+1, Nproj))
sys.stdout.flush()
im_rot = nd.rotate(sample, angles[proj], reshape=False, order=1)
sinogram[proj, :] = np.sum(im_rot, axis=0)
return sinogram
def filter_ramlak(sinogram):
"""
Filter a given sinogram using a ramp filter
Hints:
First define a ramp filter in Fourier domain (you can use np.fft.fftfreq).
Filter the sinogram in Fourier space using the convolution theorem.
"""
Nproj, Npix = np.shape(sinogram)
# Generate basic ramp filter (hint: there is the function np.fft.fftfreq.
# Try it and see what it does. Watch out for a possible fftshift)
ramp_filter = np.abs(np.fft.fftfreq(Npix))
# filter the sinogram in Fourier space in detector pixel direction
# Use the np.fft.fft along the axis=1
sino_ft = np.fft.fft(sinogram,axis=1)
# Multiply the ramp filter onto the 1D-FT of the sinogram and transform it
# back into spatial domain
sino_filtered = np.real(np.fft.ifft(ramp_filter*sino_ft,axis=1))
return sino_filtered
def backproject(sinogram, angles):
"""
Backproject a given sinogram.
Hints:
Perform the backprojection inversely to the way we did the
forwardprojection, by smearing each projection in the sinogram back along
the axis that you summed before in forwardproject(),
then rotating the resulting backprojection
to get the right backprojection angle.
Use scipy.ndimage.rotate(...,...,reshape=False)
"""
# calculate number of projections, and pixels
Nproj, Npix = np.shape(sinogram)
# define empty container for reconstruction of sample
reconstruction = np.zeros((Npix, Npix))
for proj in np.arange(Nproj): # loop over all projections
sys.stdout.write("\r Reconstructing: %03i/%i" % (proj+1, Nproj))
sys.stdout.flush()
backprojection = np.tile(sinogram[proj, :], (Npix, 1))
backprojection /= Npix # Just normalization
rotated_backprojection = nd.rotate(backprojection, -angles[proj], reshape=False, order=1)
# Add the rotated backprojection
reconstruction += rotated_backprojection
return reconstruction
# -
# ## Part 1: Forward and Backprojection
# Read in sample data (in reality, this data is unknown and what you are
# looking for)
sample = plt.imread('Head_CT_scan.jpg')
# Define vector containing the projection angles
Nangles = 301
angles = np.linspace(0, 360, Nangles, False)
# Simulate the process of tomographic data acquisition by line projections
# +
sino = forwardproject(sample,angles)
# use this line if you do not manage the last step
# sino = np.load('sino_backup.npy')
# filter the sinogram with the ramp filter (or some other filter)
filtered_sino = filter_ramlak(sino)
# use this line if you do not manage the last step
# filtered_sino = np.load('filtered_sino_backup.npy')
# -
# Reconstruct the image from its filtered sinogram
#
reco = backproject(filtered_sino,angles)
plt.figure(1, figsize=(12, 12))
plt.subplot(2, 2, 1)
plt.imshow(sample, cmap='gray', interpolation='none')
plt.subplot(2, 2, 2)
plt.imshow(sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 3)
plt.imshow(filtered_sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 4)
plt.imshow(reco, vmin=0., cmap='gray', interpolation='none')
# ## Part 2: Image Artifacts
# ### Artifact 1 - Hot / Dead Pixel
# Single pixels on detector may not respond to x-rays. This results in reconstruction artifacts.
# +
Nangles = 301
angles = np.linspace(0, 360, Nangles, False)
sino = forwardproject(sample, angles)
# simulate a dead pixel in the detector line
sino[???] = 0
# filter the sinogram with the ramp filter and reconstruct it
filtered_sino = filter_ramlak(sino)
reco = backproject(filtered_sino, angles)
plt.figure(2, figsize=(12, 12))
plt.suptitle('dead pixel')
plt.subplot(2, 2, 1)
plt.imshow(sample, cmap='gray', interpolation='none')
plt.subplot(2, 2, 2)
plt.imshow(sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 3)
plt.imshow(filtered_sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 4)
plt.imshow(reco, vmin=sample.min(), vmax=sample.max(),
cmap='gray', interpolation='none')
# -
# ### Artifact 2 - Simulate a center shift
# Intrinsically, tomography assumes that the rotation axis is in the center of
# each projection. If this is not the case, each projection is shifted left or
# right with respect to the optical axis. These are called center shift.
# +
Nangles = 301
angles = np.linspace(0, 360, Nangles, False)
sino = forwardproject(sample, angles)
# shift the sinogram by a few pixels (~2) or pad the detector either to the
# left or right side.
sino = ???
# filter the sinogram with the ramp filter and reconstruct it
filtered_sino = filter_sino(sino)
reco = backproject(filtered_sino, angles)
plt.figure(3, figsize=(12, 12))
plt.suptitle('center shift')
plt.subplot(2, 2, 1)
plt.imshow(sample, cmap='gray', interpolation='none')
plt.subplot(2, 2, 2)
plt.imshow(sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 3)
plt.imshow(filtered_sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 4)
plt.imshow(reco, vmin=0, cmap='gray', interpolation='none')
# -
# ## Artifact 3 - few angles / undersampling
# When using fewer projections than usual, the image quality decreases.
# +
Nangles = ???
angles = np.linspace(0, 360, Nangles, False)
sino = forwardproject(sample, angles)
# filter the sinogram with the ramp filter and reconstruct it
filtered_sino = filter_ramlak(sino)
reco = backproject(filtered_sino, angles)
plt.figure(4, figsize=(12, 12))
plt.suptitle('undersampling')
plt.subplot(2, 2, 1)
plt.imshow(sample, cmap='gray', interpolation='none')
plt.subplot(2, 2, 2)
plt.imshow(sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 3)
plt.imshow(filtered_sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 4)
plt.imshow(reco, vmin=0., cmap='gray', interpolation='none')
# -
# ## Artifact 4 - missing projections to tomosynthese
# Replace the first 100 projections with zeros.
# +
Nangles = 301
angles = np.linspace(0, 180, Nangles, False)
sino = forwardproject(sample, angles)
# simulate one or more missing projections (e.g. replace with zeros) up to a
# missing projection wedge
sino[???] = 0
# filter the sinogram with the ramp filter and reconstruct it
filtered_sino = filter_ramlak(sino)
reco = backproject(filtered_sino, angles)
plt.figure(5, figsize=(12, 12))
plt.suptitle('missing projections')
plt.subplot(2, 2, 1)
plt.imshow(sample, cmap='gray', interpolation='none')
plt.subplot(2, 2, 2)
plt.imshow(sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 3)
plt.imshow(filtered_sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 4)
plt.imshow(reco, vmin=0., cmap='gray', interpolation='none')
# -
# ## Artifact 5 - Noise
# Add some noise to the projections/sinogram.
# +
Nangles = 301
angles = np.linspace(0, 360, Nangles, False)
sino = forwardproject(sample, angles)
# simulate noise
sino += ???
# filter the sinogram with the ramp filter and reconstruct it
filtered_sino = filter_ramlak(sino)
reco = backproject(filtered_sino, angles)
plt.figure(6, figsize=(12, 12))
plt.suptitle('noise')
plt.subplot(2, 2, 1)
plt.imshow(sample, cmap='gray', interpolation='none')
plt.subplot(2, 2, 2)
plt.imshow(sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 3)
plt.imshow(filtered_sino, cmap='gray', interpolation='none')
plt.subplot(2, 2, 4)
plt.imshow(reco, vmin=0, cmap='gray', interpolation='none')
| Assignment10/.ipynb_checkpoints/tomography1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This allows multiple outputs from a single jupyter notebook cell:
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# %matplotlib inline
import pandas as pd
pd.__version__ # for the record
df = pd.read_csv('../data/SPY_20110701_20120630_Bollinger.csv',index_col=0,parse_dates=True)
#df = df.loc['2012-01-01':,:]
df.shape
df.head(3)
df.tail(3)
# %matplotlib qt
import matplotlib.pyplot as plt
# +
# def plotbars(v,aa=None,w=0.5,e=0.5):
# x = range(len(v))
# fig, ax = plt.subplots()
# ax.bar(x,v,antialiased=aa,width=w,linewidth=0.0)
# plt.show(block=False)
# -
def plotbars(v,fid=1,dpi=100,**kwargs):
x = range(len(v))
fig = plt.figure(fid,dpi=dpi)
ax = fig.add_subplot()
ax.bar(x,v,**kwargs)
plt.show(block=False)
# +
v1 = [202385700, 165936000, 143331600, 170464200, 194100500, 195918600, 214675700,
204062600, 226111800, 220012800, 196872100, 166554900, 137145400, 245246300,
126019400, 136653800, 131278200, 249020100, 207939900, 307038400, 325790900,
346653800, 370830800, 520721800, 655619200, 702263900, 717828700, 662607400,
487979700, 313731600, 258810600, 294095200, 238201100, 512956300, 428281300,
275090600, 331136600, 246869700, 312365400, 314495900, 190977200, 241315700,
301828400, 254585900, 255517200, 285130500, 209803200, 250568200, 380195100,
305793500]
v2 = v1 + v1 + v1
print('len(v1)=',len(v1))
print('len(v2)=',len(v2))
# plotbars(v1,w=0.2,e=0)
# plotbars(v2,w=0.2,e=0)
# plotbars(v1,w=.98,e=0)
# plotbars(v2,w=.98,e=0)
# plotbars(v1,w=1.0,e=0)
# plotbars(v2,w=1.0,e=0)
# -
STOP HERE
plotbars(v1)
plotbars(v2)
def adjust_brightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
c1 = 'c'
c2 = adjust_brightness(c1,0.75)
print(c2)
print(c1)
plotbars(v2,1,dpi=135,ec=c2,fc=c1,linewidth=0.8,width=0.8)
plotbars(v2,2,dpi=135,ec=c2,fc=c1,linewidth=0.5,width=0.5)
#plotbars(v2,2,dpi=150,ec='k',linewidth=0.1)
v3 = [ (v,float('nan')) for v in v2]
v3 = [v for tup in v3 for v in tup]
len(v3)
plotbars(v2,w=0.5)
# +
plotbars(v3,w=1.0)
# -
STOP HERE
v = df['Volume'].values
#v = df['Close'].values
def plot_vol(v,aa=None,w=0.5):
fig = plt.figure(figsize=(20,12))
ax = fig.add_axes( [0.15, 0.18, 0.70, 0.70] )
x = range(len(v))
ax.bar(x,v,antialiased=aa,width=w)
#ax.plot(x,v,antialiased=aa)
plt.show()
y = v[0:51]
plot_vol(v,w=0.7)
plot_vol(y,w=0.7)
print(y)
def adjust_brightness2(color, amount=0.5):
def ab(c1, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[c1]
except:
c = c1
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
if not isinstance(color,(list,tuple)):
color = [color,]
cout = []*len(color)
cadj = {}
for c1 in color:
if c1 in cadj:
cout.append(cadj[c1])
else:
newc = ab(c1,amount)
cadj[c1] = newc
cout.append(cadj[c1])
if len(cout) == 1:
return cout[0]
else:
return cout
cadj = adjust_brightness2('c',0.75)
cadj
cadj = adjust_brightness2(['c','r','g','b'],0.75)
cadj
vcolors= ['#007a00', '#d50d18', '#007a00', '#007a00', '#d50d18', '#d50d18', '#d50d18',
'#007a00', '#d50d18', '#007a00', '#d50d18', '#007a00', '#d50d18', '#007a00',
'#007a00', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#d50d18',
'#d50d18', '#007a00', '#d50d18', '#d50d18', '#d50d18', '#007a00', '#d50d18',
'#007a00', '#007a00', '#007a00', '#d50d18', '#007a00', '#d50d18', '#d50d18',
'#007a00', '#007a00', '#007a00', '#d50d18', '#007a00', '#007a00', '#007a00',
'#007a00', '#d50d18', '#d50d18', '#d50d18', '#007a00', '#d50d18', '#d50d18',
'#007a00', '#007a00', '#007a00', '#007a00', '#007a00', '#d50d18', '#d50d18',
'#d50d18', '#d50d18', '#007a00', '#007a00', '#007a00', '#d50d18', '#007a00',
'#d50d18', '#d50d18', '#007a00', '#007a00', '#007a00', '#d50d18', '#007a00',
'#007a00', '#007a00', '#d50d18', '#007a00', '#d50d18', '#007a00', '#d50d18',
'#007a00', '#007a00', '#007a00', '#d50d18', '#007a00', '#007a00', '#d50d18',
'#d50d18', '#d50d18', '#007a00', '#007a00', '#d50d18', '#007a00', '#007a00',
'#d50d18', '#007a00', '#007a00', '#d50d18', '#007a00', '#d50d18', '#d50d18',
'#d50d18', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#007a00', '#007a00',
'#007a00', '#d50d18', '#d50d18', '#007a00', '#007a00', '#007a00', '#d50d18',
'#007a00', '#d50d18', '#d50d18', '#d50d18', '#007a00', '#d50d18', '#d50d18',
'#007a00', '#007a00', '#007a00', '#007a00', '#007a00', '#d50d18', '#007a00',
'#d50d18', '#007a00', '#007a00', '#007a00', '#d50d18', '#007a00', '#007a00',
'#007a00', '#007a00', '#d50d18', '#007a00', '#007a00', '#007a00', '#007a00',
'#d50d18', '#d50d18', '#007a00', '#d50d18', '#d50d18', '#d50d18', '#d50d18',
'#007a00', '#007a00', '#007a00', '#d50d18', '#007a00', '#007a00', '#007a00',
'#d50d18', '#007a00', '#d50d18', '#d50d18', '#007a00', '#007a00', '#007a00',
'#d50d18', '#007a00', '#007a00', '#007a00', '#007a00', '#d50d18', '#007a00',
'#d50d18', '#d50d18', '#d50d18', '#007a00', '#007a00', '#007a00', '#007a00',
'#007a00', '#d50d18', '#007a00', '#d50d18', '#007a00', '#d50d18', '#d50d18',
'#d50d18', '#007a00', '#007a00', '#d50d18', '#d50d18', '#d50d18', '#007a00',
'#007a00', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#007a00',
'#007a00', '#d50d18', '#d50d18', '#007a00', '#d50d18', '#d50d18', '#007a00',
'#d50d18', '#007a00', '#007a00', '#007a00', '#007a00', '#d50d18', '#007a00',
'#d50d18', '#d50d18', '#d50d18', '#007a00', '#d50d18', '#d50d18', '#007a00',
'#d50d18', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#d50d18', '#007a00',
'#007a00', '#007a00', '#007a00', '#d50d18', '#007a00', '#d50d18', '#d50d18',
'#d50d18', '#d50d18', '#007a00', '#007a00', '#007a00', '#007a00', '#d50d18',
'#007a00', '#d50d18', '#007a00', '#007a00', '#007a00', '#007a00', '#d50d18',
'#d50d18', '#007a00', '#d50d18', '#007a00', '#007a00', '#d50d18', '#007a00']
# +
def without_cache(vcolors):
vca = []*len(vcolors)
for c in vcolors:
vca.append(adjust_brightness(c))
return vca
def with_cache(vcolors):
return adjust_brightness2(vcolors)
# -
# %timeit a = without_cache(vcolors)
print(len(a))
a[0:5]
# %timeit a = with_cache(vcolors)
print(len(a))
a[0:5]
from mplfinance._helpers import _adjust_color_brightness
vadj = _adjust_color_brightness(vcolors)
print(len(vcolors))
print(len(vadj))
vadj[0:5]
vadj = _adjust_color_brightness(vcolors,0.75)
print(len(vcolors))
print(len(vadj))
vadj[0:5]
type(vcolors)
len(vcolors)
# +
STOP HERE
# +
import mplfinance as mpf
mpf.plot(df,volume=True,style='charles')
# -
# ---
#
# Let's say we want to plot the Lower Bollinger band along with the basic OHLCV plot.
#
# We use `make_addplot()` to create the addplot dict, and pass that into the plot() function:
# +
apdict = mpf.make_addplot(df['LowerB'])
mpf.plot(df,volume=True,addplot=apdict)
# -
# ---
#
# When creating the `addplot` dict, we can specify that we want a scatter plot:
# +
apd = mpf.make_addplot(df['LowerB'],type='scatter')
mpf.plot(df,addplot=apd)
# -
# ---
#
# The above example is a trivial use of a scatter plot, where the default line plot makes more sense.
#
# A more helpful use of a scatter plot might be to highlight specific movements in the data. For example, let's say we want to highlight whenever the "Percent B" Bollinger metric drops below zero. To do this, let's first calculate a series that contains this information:
def percentB_belowzero(percentB,price):
import numpy as np
signal = []
previous = -1.0
for date,value in percentB.iteritems():
if value < 0 and previous >= 0:
signal.append(price[date]*0.99)
else:
signal.append(np.nan)
previous = value
return signal
# ---
# Take a small data set, and calculate a series that shows when the percentB falls below zero:
# +
tdf = df.loc['05-10-2012':'06-07-2012',] # Take a smaller data set so it's easier to see the scatter points
signal = percentB_belowzero(tdf['PercentB'], tdf['Close'])
# -
# ---
#
# Now plot the calculated information as an additional scatter plot on top of the the OHLC data:
# +
apd = mpf.make_addplot(signal,type='scatter')
mpf.plot(tdf,addplot=apd)
# -
# ---
#
# We can customize the marker size and shape, to make the scatter markers easier to see:
# +
apd = mpf.make_addplot(signal,type='scatter',markersize=200,marker='^')
mpf.plot(tdf,addplot=apd)
# -
# ---
#
# ## Plotting multiple additional data sets
#
# There are two ways to plot multiple additional data sets.
#
# - If the configuration is the same for all additional data sets, simply pass a `DataFrame` for the data. All columns in the DataFrame will be plotted.
#
# - Alternatively you can create multiple `dict`s and pass a `list` of `dict`s to the `addplot` keyword
#
# ---
#
# Passing a DataFrame as the addplot data plots all columns in the DataFrame:
tcdf = df[['LowerB','UpperB']] # DataFrame with two columns
apd = mpf.make_addplot(tcdf)
mpf.plot(df,addplot=apd)
# ---
#
# Setting `addplot=` a `list` of `dict`s is another to create multiple additional plots.<br>This method is necessary if the additional plots will have different configurations. For example:
# ---
#
# First prepare the data:
def percentB_aboveone(percentB,price):
import numpy as np
signal = []
previous = 2
for date,value in percentB.iteritems():
if value > 1 and previous <= 1:
signal.append(price[date]*1.01)
else:
signal.append(np.nan)
previous = value
return signal
low_signal = percentB_belowzero(df['PercentB'], df['Close'])
high_signal = percentB_aboveone(df['PercentB'], df['Close'])
# ---
#
# Now create the additional plot `dict`s and plot the data:
# +
apds = [ mpf.make_addplot(tcdf),
mpf.make_addplot(low_signal,type='scatter',markersize=200,marker='^'),
mpf.make_addplot(high_signal,type='scatter',markersize=200,marker='v'),
]
mpf.plot(df,addplot=apds,figscale=1.25,volume=True)
# -
# ---
#
# ## Plotting additional data on panel "B"
#
# ---
# We refer to the Main, Upper panel as Panel "A" and the Lower panel as Panel "B".
#
# It is possible to plot the additional data on Panel "B" (where volume is usually plotted).
# In this example, as is typical in Bollinger Band Analysis, we plot `PercentB` in panel B:
# +
apds = [ mpf.make_addplot(tcdf),
mpf.make_addplot(low_signal,type='scatter',markersize=200,marker='^'),
mpf.make_addplot(high_signal,type='scatter',markersize=200,marker='v'),
mpf.make_addplot((df['PercentB']),panel=1,color='g')
]
mpf.plot(df,addplot=apds,figscale=1.3,volume=True)
# -
# ---
#
# ## Plotting additional data with a *secondary y-axis*
#
# ---
#
# - Notice in the above plot, in the lower panel with the "Volume" bars, we see that "Percent B" line ***has its own y-axis on the right side.***
#
#
# - `mpf.make_addplot()` has a keyword argument called `secondary_y` which can have **three** possible values: **`True`**, **`False`**, and **`'auto'`**.
# - The default value is `'auto'` which means if you don't specify `secondary_y`, or if you specify `secondary_y='auto'`, then `mpf.plot()` will attempt to decide whether a secondary y-axis is needed, by comparing the order of magnitude of the addplot data with the order of magnitude of the data that is already on the plot.
# - If **`mpf.plot()`** gets it wrong, you can always override by setting **`secondary_y=True`** or **`secondary_y=False`**.
#
# ---
#
# - Below we see that `make_addplot()` also allows setting the `linestyle` for each additional plot.
#
#
# - **Notice also** that we pass an alternative "mplfinance `style`" to demonstrate that **if the `style` specifies that the *primary y-axis* should be on the right, then `mpf.plot()` knows to put any `secondary_y` axes on the left.**
# +
apds = [ mpf.make_addplot(tcdf,linestyle='dashdot'),
mpf.make_addplot(low_signal,type='scatter',markersize=200,marker='^'),
mpf.make_addplot(high_signal,type='scatter',markersize=200,marker='v'),
mpf.make_addplot((df['PercentB']),panel=1,color='g',linestyle='dotted')
]
mpf.plot(df,addplot=apds,figscale=1.5,volume=True,style='starsandstripes')
# -
# ---
# ---
#
# * **Below** we demonstrate that the main (upper) panel can also have a `secondary_y` axis:
import math
# Change order of magnitude and range of low_signal, so that it will require a `secondary_y`:
# note: this calculation has no financial meaning whatsoever; we are just generating some
# data to modify the order of magnitude and range, so as to be able to demonstrate
# secondary_y on the main panel.
new_low_signal = [x*20.*math.sin(x) for x in low_signal]
# +
apds = [mpf.make_addplot(tcdf,linestyle='dashdot'),
mpf.make_addplot(new_low_signal,type='scatter',markersize=200,marker='^',secondary_y='auto'),
mpf.make_addplot(high_signal,type='scatter',markersize=200,marker='v',color='orange'),
mpf.make_addplot((df['PercentB']),panel=1,color='g',linestyle='dotted')
]
mpf.plot(df,addplot=apds,figscale=1.5,volume=True,style='sas')
# -
# The same plot, with a style that puts the primary y-axis on the left:
mpf.plot(df,addplot=apds,figscale=1.5,volume=True,style='default')
| examples/scratch_pad/bar_width_issues.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/justin-hsieh/DS-Unit-2-Regression-Classification/blob/master/assignment_regression_classification_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="7IXUfiQ2UKj6" colab_type="text"
# Lambda School Data Science, Unit 2: Predictive Modeling
#
# # Regression & Classification, Module 4
#
# ## Assignment
#
# - [x] Watch <NAME>'s [video #1](https://www.youtube.com/watch?v=pREaWFli-5I) (12 minutes) & [video #2](https://www.youtube.com/watch?v=bDQgVt4hFgY) (9 minutes) to learn about the mathematics of Logistic Regression.
# - [x] Do train/validate/test split with the Tanzania Waterpumps data.
# - [x] Do one-hot encoding. (Remember it may not work with high cardinality categoricals.)
# - [x] Use scikit-learn for logistic regression.
# - [x] Get your validation accuracy score.
# - [x] Get and plot your coefficients.
# - [x] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
# - [x] Commit your notebook to your fork of the GitHub repo.
#
# > [Do Not Copy-Paste.](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit) You must type each of these exercises in, manually. If you copy and paste, you might as well not even do them. The point of these exercises is to train your hands, your brain, and your mind in how to read, write, and see code. If you copy-paste, you are cheating yourself out of the effectiveness of the lessons.
#
#
# ## Stretch Goals
#
# ### Doing
# - [ ] Add your own stretch goal(s) !
# - [ ] Clean the data. For ideas, refer to [The Quartz guide to bad data](https://github.com/Quartz/bad-data-guide), a "reference to problems seen in real-world data along with suggestions on how to resolve them." One of the issues is ["Zeros replace missing values."](https://github.com/Quartz/bad-data-guide#zeros-replace-missing-values)
# - [ ] Make exploratory visualizations.
# - [ ] Do [feature scaling](https://scikit-learn.org/stable/modules/preprocessing.html).
# - [ ] Try [scikit-learn pipelines](https://scikit-learn.org/stable/modules/compose.html).
#
#
# #### Exploratory visualizations
#
# Visualize the relationships between feature(s) and target. I recommend you do this with your training set, after splitting your data.
#
# For this problem, you may want to create a new column to represent the target as a number, 0 or 1. For example:
#
# ```python
# train['functional'] = (train['status_group']=='functional').astype(int)
# ```
#
#
#
# You can try [Seaborn "Categorical estimate" plots](https://seaborn.pydata.org/tutorial/categorical.html) for features with reasonably few unique values. (With too many unique values, the plot is unreadable.)
#
# - Categorical features. (If there are too many unique values, you can replace less frequent values with "OTHER.")
# - Numeric features. (If there are too many unique values, you can [bin with pandas cut / qcut functions](https://pandas.pydata.org/pandas-docs/stable/getting_started/basics.html?highlight=qcut#discretization-and-quantiling).)
#
# You can try [Seaborn linear model plots](https://seaborn.pydata.org/tutorial/regression.html) with numeric features. For this problem, you may want to use the parameter `logistic=True`
#
# You do _not_ need to use Seaborn, but it's nice because it includes confidence intervals to visualize uncertainty.
#
# #### High-cardinality categoricals
#
# This code from the previous assignment demonstrates how to replace less frequent values with 'OTHER'
#
# ```python
# # Reduce cardinality for NEIGHBORHOOD feature ...
#
# # Get a list of the top 10 neighborhoods
# top10 = train['NEIGHBORHOOD'].value_counts()[:10].index
#
# # At locations where the neighborhood is NOT in the top 10,
# # replace the neighborhood with 'OTHER'
# train.loc[~train['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# test.loc[~test['NEIGHBORHOOD'].isin(top10), 'NEIGHBORHOOD'] = 'OTHER'
# ```
#
# #### Pipelines
#
# [Scikit-Learn User Guide](https://scikit-learn.org/stable/modules/compose.html) explains why pipelines are useful, and demonstrates how to use them:
#
# > Pipeline can be used to chain multiple estimators into one. This is useful as there is often a fixed sequence of steps in processing the data, for example feature selection, normalization and classification. Pipeline serves multiple purposes here:
# > - **Convenience and encapsulation.** You only have to call fit and predict once on your data to fit a whole sequence of estimators.
# > - **Joint parameter selection.** You can grid search over parameters of all estimators in the pipeline at once.
# > - **Safety.** Pipelines help avoid leaking statistics from your test data into the trained model in cross-validation, by ensuring that the same samples are used to train the transformers and predictors.
#
# ### Reading
# - [ ] [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/)
# - [ ] [Always start with a stupid model, no exceptions](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa)
# - [ ] [Statistical Modeling: The Two Cultures](https://projecteuclid.org/download/pdf_1/euclid.ss/1009213726)
# - [ ] [_An Introduction to Statistical Learning_](http://faculty.marshall.usc.edu/gareth-james/ISL/ISLR%20Seventh%20Printing.pdf), Chapters 1-3, for more math & theory, but in an accessible, readable way (without an excessive amount of formulas or academic pre-requisites).
#
#
# + id="o9eSnDYhUGD7" colab_type="code" colab={}
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
# !pip install --upgrade category_encoders pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
# !git init .
# !git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
# !git pull origin master
# Change into directory for module
os.chdir('module4')
# + id="ipBYS77PUwNR" colab_type="code" colab={}
# Ignore this Numpy warning when using Plotly Express:
# FutureWarning: Method .ptp is deprecated and will be removed in a future version. Use numpy.ptp instead.
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning, module='numpy')
# + id="QJBD4ruICm1m" colab_type="code" outputId="b8cb4299-4adf-4dbe-c209-4f05706b21db" colab={"base_uri": "https://localhost:8080/", "height": 414}
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
train_features = pd.read_csv('../data/tanzania/train_features.csv')
train_labels = pd.read_csv('../data/tanzania/train_labels.csv')
test_features = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
assert train_features.shape == (59400, 40)
assert train_labels.shape == (59400, 2)
assert test_features.shape == (14358, 40)
assert sample_submission.shape == (14358, 2)
train_features.head()
# + id="4RKz31MvQsjc" colab_type="code" colab={}
#train_labels['functional'] = (train_labels['status_group']=='functional').astype(int)
train_X = train_features.drop(['permit','public_meeting'], axis =1)
train_y = train_labels['status_group']
# + id="2Amxyx3xphbb" colab_type="code" outputId="a99123ed-9917-435d-a51d-406feee69674" colab={"base_uri": "https://localhost:8080/", "height": 34}
train_X, val_X, train_y, val_y = train_test_split(
train_X, train_y, train_size=0.80, test_size=0.20,
stratify=train_y,random_state=42
)
train_X.shape, val_X.shape, train_y.shape, val_y.shape
# + id="NIH2SqeMZ5Vr" colab_type="code" outputId="2b075270-00a1-4eaf-ee33-ff05051fe393" colab={"base_uri": "https://localhost:8080/", "height": 224}
train_X_numeric = train_X.select_dtypes('number')
val_X_numeric = val_X.select_dtypes('number')
train_X_category = train_X.select_dtypes('category')
train_X_numeric.head()
# + id="0GzL3Bgw20pz" colab_type="code" colab={}
numerical_transformer = SimpleImputer(strategy='constant')
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
categorical_cols = [name for name in train_X.columns if
train_X[name].nunique() < 10 and
train_X[name].dtype == "object"]
numerical_cols = [name for name in train_X.columns if
train_X[name].dtype in ['int64', 'float64']]
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
my_cols = numerical_cols + categorical_cols
X_train = train_X[my_cols].copy()
X_val = val_X[my_cols].copy()
model = RandomForestRegressor(n_estimators=100, random_state=0)
my_pipeline = Pipeline(steps=[('preprocessor',preprocessor),
('model',model)
])
my_pipeline.fit(X_train, train_y)
prediction = my_pipeline.predict(X_val)
accuracy_score(val_y, prediction)
# + id="fX87ZK1eGcT2" colab_type="code" colab={}
X_train.isna().sum()
# + id="gcKz-_CAFUFh" colab_type="code" colab={}
pipe = make_pipeline(
OneHotEncoder(categories='auto'),
LogisticRegression(solver='lbfgs', multi_class='ovr',max_iter=500)
)
pipe.fit(X_train, train_y)
prediction = pipe.predict(X_val)
accuracy_score(val_y, prediction)
# + id="Dfnm8M7WEO8C" colab_type="code" outputId="1e1686c1-75e6-41b0-ef62-367f3f24186c" colab={"base_uri": "https://localhost:8080/", "height": 34}
prediction
# + id="weEM1fwQNi63" colab_type="code" outputId="8aac2292-3f14-476a-f4cd-2780255d68fe" colab={"base_uri": "https://localhost:8080/", "height": 34}
X_train_numeric = train_X.select_dtypes('number')
X_val_numeric = val_X.select_dtypes('number')
model = LogisticRegression(max_iter=5000, solver='lbfgs', multi_class='auto')
model.fit(X_train_numeric, train_y)
val_predictions = model.predict(X_val_numeric)
accuracy_score(val_y, val_predictions)
# + id="DrQpsgKEEJvi" colab_type="code" colab={}
X_test_subset = test_features.drop(['permit','public_meeting'], axis =1)
test = X_test_subset.select_dtypes('number')
some_pred = model.predict(test)
submission = sample_submission.copy()
submission['status_group'] = some_pred
submission.to_csv('submission-01.csv', index=False)
# + id="mangX3x7TXUN" colab_type="code" colab={}
# !head submission-01.csv
# + id="NXnIOJb8c9ta" colab_type="code" outputId="c48c99c4-1774-4847-d4da-3d903a13444f" colab={"base_uri": "https://localhost:8080/", "height": 365}
from google.colab import files
# Just try again if you get this error:
# TypeError: Failed to fetch
# https://github.com/googlecolab/colabtools/issues/337
files.download('submission-01.csv')
| assignment_regression_classification_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import pandas as pd
import requests
from bs4 import BeautifulSoup
# # Web Scraping
pd.set_option('display.max_rows', 500)
data_path= "../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
pd.read_csv(data_path)
page = requests.get("https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html")
soup = BeautifulSoup(page.content, 'html.parser')
soup.find('table')
html_table=soup.find('table')
all_rows=html_table.find_all('tr')
final_data_list=[]
for pos,rows in enumerate(all_rows):
col_list=[each_col.get_text(strip=True) for each_col in rows.find_all('td')] #td for data element
final_data_list.append(col_list)
pd_daily_status=pd.DataFrame(final_data_list).dropna().rename(columns={0:'state',
1:'cases',
2:'changes',
3:'cases_per_100k',
4:'fatal',
5:'comment'})
pd_daily_status
# # API Calls
data=requests.get('https://services7.arcgis.com/mOBPykOjAyBO2ZKk/arcgis/rest/services/Coronaf%C3%A4lle_in_den_Bundesl%C3%A4ndern/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json')
json_object=json.loads(data.content)
json_object.keys()
full_list=[]
for pos,each_dict in enumerate (json_object['features'][:]):
full_list.append(each_dict['attributes'])
pd_full_list=pd.DataFrame(full_list)
pd_full_list
# +
headers = {
'Cache-Control': 'no-cache',
'Subscription-Key': '28ee4219700f48718be78b057beb7eb4',
}
response = requests.get('https://api.smartable.ai/coronavirus/stats/US', headers=headers)
print(response)
# -
US_dict=json.loads(response.content)
with open('../data/raw/SMARTABLE/US_data.json', 'w') as outfile:
json.dump(US_dict, outfile,indent=2)
print(json.dumps(US_dict,indent=2))
| notebooks/.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# MechStiff tutorial
# ----------------
#
# This is an example how to use Mechanical Stiffness Calculations implemented in ProDy package.
#
# The theory of MechStiff has been describe in:
#
# <NAME>, <NAME> (2008) Toward a molecular understanding of the anisotropic response of proteins to external forces: insights from elastic network models Biophys J 94: 3424-35. PMID: 18223005; PMC2292382
#
# http://www.ncbi.nlm.nih.gov/pubmed/18223005?ordinalpos=5&itool=EntrezSystem2.PEntrez.Pubmed.Pubmed_ResultsPanel.Pubmed_RVDocSum
# %matplotlib inline
from prody import *
import matplotlib.pylab as plt
gfp, header = parsePDB('1gfl', header=True)
gfp
calphas = gfp.select('protein and chain A and name CA')
calphas
# Building Hessian matrix using ANM model
# -----------------
#
# We instantiate an ANM instance and we are building Hessian matrix by passing selected atoms.
anm = ANM('GFP ANM analysis')
anm.buildHessian(calphas, cutoff=13.0)
anm.getHessian().round(3)
# Mechanical Stiffness Matrix Calculations
# --------------------------------------
#
# We can perfom MechStiff calculations for selected group of 230 Ca's atoms.
# +
# anm.buildMechStiff?
# -
anm.buildMechStiff(calphas)
anm.getStiffness()
# **How to take a part of MechStiff matrix:**
cut = anm.getStiffness()[5:15, 20:40]
plt.imshow(cut)
plt.colorbar()
# To obtain matrix with effective spring constant values use:
showMechStiff(anm, calphas, 'jet_r')
# Mean value of spring constant is also significant and it can be obtain by using showMeanMechStiff() function. Arrows and trianglars represents beta strands and helixes. This information can be also seen on the 3D structure of protein using writeDeformProfile().
showMeanMechStiff(anm, calphas, header, 'A', 'jet_r')
# Mechanical Stiffness in VMD
# ---------------------------
# Mechanical Stiffness results can be seen in VMD program using:
#
# - writeVMDstiffness() - shows pair of residues with selected range of spring constant
# - writeDeformProfile() - deformability profile will be loaded to VMD program
pdb = gfp.select('chain A')
writeVMDstiffness(anm, pdb, [3,7], [0,7.5], filename='1gfl_3-7aa', loadToVMD=False)
# +
# writeVMDstiffness?
# -
writeVMDstiffness(anm, pdb, [3], [0,7], filename='1gfl_3', loadToVMD=False)
anm.getStiffnessRange()
writeDeformProfile(anm, pdb, selstr='chain A and name CA', pdb_selstr='protein', loadToVMD=False)
# Calculate Distribution of Deformation
# --------------------------------
# Distribution of the deformations in the distance d contributed by each mode k in the presence of extensional forces applied to residues i and j. In this example it will be between residue nr 3 and 132.
showPairDeformationDist(anm, calphas, 3, 132)
# How to put multiple results on one plot
# -------------------------------
#
# The results can be used directly from the function. Below the example how to show to results on one plot.
import matplotlib
import matplotlib.pylab as plt
D1 = calcPairDeformationDist(anm, calphas, 3, 212)
D2 = calcPairDeformationDist(anm, calphas, 132, 212)
matplotlib.rcParams['font.size'] = '16'
fig = plt.figure(num=None, figsize=(12,8), facecolor='w')
plt.plot(D1[0], D1[1], 'k-', D2[0], D2[1], 'r-')
plt.xlabel('mode (k)', fontsize = '18')
plt.ylabel('d$^k$' '($\AA$)', fontsize = '18')
plt.grid()
plt.show()
| _static/ipynb/MechStiff_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: speech
# language: python
# name: speech
# ---
import pandas as pd
import numpy as np
full_data = pd.read_csv('data/LJSpeech-1.1/metadata.csv', "|", names=["id", 'text', 'label'])
print(len(full_data))
full_data
valtest = full_data.sample(600)
print(len(valtest))
valtest
full_train = full_data.drop(valtest.index) # full train
print(len(full_train))
full_train
val = valtest.sample(300)
print(len(val))
val
test = valtest.drop(val.index)
print(len(test))
test
labeled = full_train.sample(200)
print(len(labeled))
labeled
unlabeled = full_train.drop(labeled.index)
print(len(unlabeled))
unlabeled
# # Final Sets
print(len(val))
val
print(len(test))
test
print(len(labeled))
labeled
print(len(unlabeled))
unlabeled
val.to_csv('val.csv', '|', header=False, index=False)
test.to_csv('test.csv', '|', header=False, index=False)
labeled.to_csv('labeled_train.csv', '|', header=False, index=False)
unlabeled.to_csv('unlabeled_train.csv', '|', header=False, index=False)
# # Merge train datasets
labeled = pd.read_csv('data/splits/labeled_train.csv', "|", names=["id", 'text', 'label'])
unlabeled = pd.read_csv('data/splits/unlabeled_train.csv', "|", names=["id", 'text', 'label'])
full_train = pd.concat([labeled,unlabeled])
print(len(full_train))
full_train
full_train.to_csv('full_train.csv', '|', header=False, index=False)
| src/creating_dataset_splits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ML Pipeline Preparation
# Follow the instructions below to help you create your ML pipeline.
# ### 1. Import libraries and load data from database.
# - Import Python libraries
# - Load dataset from database with [`read_sql_table`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_sql_table.html)
# - Define feature and target variables X and Y
import nltk
nltk.download(['punkt','wordnet','stopwords'])
# +
# import libraries
import re
import time
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from sklearn.pipeline import Pipeline,FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,classification_report
from sklearn.multioutput import MultiOutputClassifier
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
# -
# load data from database
engine = create_engine('sqlite:///disaster_response.db')
df = pd.read_sql_table('disaster_data1',engine)
df.head()
df.info()
df.isnull().sum()
df=df.dropna()
df.isnull().sum()
# +
X = df['message'].values
y = df.iloc[:,4:].values
# -
# ### 2. Write a tokenization function to process your text data
def tokenize(text):
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
detected_urls = re.findall(url_regex,text)
for url in detected_urls:
text =text.replace(url,'urlplaceholder')
tokens = [ lemmatizer.lemmatize(word).strip() for word in word_tokenize(text.lower()) if word.isalnum() and word not in stop_words]
return tokens
# ### 3. Build a machine learning pipeline
# This machine pipeline should take in the `message` column as input and output classification results on the other 36 categories in the dataset. You may find the [MultiOutputClassifier](http://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html) helpful for predicting multiple target variables.
pipeline = Pipeline([
('vect_count', CountVectorizer(tokenizer=tokenize)),
('tfidf',TfidfTransformer()),
('clf',MultiOutputClassifier(RandomForestClassifier()))
])
# #### 4. Train pipeline
# - Split data into train and test sets
# - Train pipeline
X_train,X_test,y_train,y_test= train_test_split(X,y)
#train classifier
pipeline.fit(X_train,y_train)
#y_pred= pipeline.predict(X_test)
# ### 5. Test your model
# Report the f1 score, precision and recall for each output category of the dataset. You can do this by iterating through the columns and calling sklearn's `classification_report` on each.
def model_performance(model,X_test,y_test):
categories_names=list(df[4:])
y_pred=model.predict(X_test)
for i in range(len(categories_names)):
print("-------------------- \n",
"categories_name {}",format(categories_names[i]),
"\n",
classification_report(y_test[:, i],y_pred[:, i]))
model_performance(pipeline,X_test,y_test)
# ### 6. Improve your model
# Use grid search to find better parameters.
pipeline.get_params()
# +
parameters = {
'vect_count__ngram_range': ((1, 1),(1,2)),
'vect_count__max_df': (0.5,0.75,1.0),
'tfidf__use_idf': (True,False),
'clf__estimator__n_estimators': [10,20]
}
start= time.process_time()
cv = GridSearchCV(estimator=pipeline, param_grid= parameters, n_jobs= 4, verbose=2)
cv.fit(X_train, y_train)
print(time.process_time() - start)
# -
# ### 7. Test your model
# Show the accuracy, precision, and recall of the tuned model.
#
# Since this project focuses on code quality, process, and pipelines, there is no minimum performance metric needed to pass. However, make sure to fine tune your models for accuracy, precision and recall to make your project stand out - especially for your portfolio!
# ### 8. Try improving your model further. Here are a few ideas:
# * try other machine learning algorithms
# * add other features besides the TF-IDF
# +
pipeline2 = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer())
]))
])),
('clf', MultiOutputClassifier(KNeighborsClassifier()))
])
pipeline2.fit(X_train, y_train)
model_performance(pipeline2,X_test, y_test)
# -
cv2 = GridSearchCV(pipeline2, param_grid=parameters)
cv2
# ### 9. Export your model as a pickle file
import pickle
model_filepath = 'train_model'
pickle.dump(cv, open(model_filepath, 'wb'))
model=pickle.load('train_model.pkl')
# ### 10. Use this notebook to complete `train.py`
# Use the template file attached in the Resources folder to write a script that runs the steps above to create a database and export a model based on a new dataset specified by the user.
| models/ML Pipeline Preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Draw Causal Graph
# ## Import and settings
# In this example, we need to import `numpy`, `pandas`, and `graphviz` in addition to `lingam`.
# And to draw the causal graph, we need to import `make_dot` method from `lingam.utils`.
# +
import numpy as np
import pandas as pd
import graphviz
import lingam
from lingam.utils import make_dot
print([np.__version__, pd.__version__, graphviz.__version__, lingam.__version__])
np.set_printoptions(precision=3, suppress=True)
np.random.seed(0)
# -
# ## Draw the result of LiNGAM
# First, we can draw a simple graph that is the result of LiNGAM.
# +
x3 = np.random.uniform(size=10000)
x0 = 3.0*x3 + np.random.uniform(size=10000)
x2 = 6.0*x3 + np.random.uniform(size=10000)
x1 = 3.0*x0 + 2.0*x2 + np.random.uniform(size=10000)
x5 = 4.0*x0 + np.random.uniform(size=10000)
x4 = 8.0*x0 - 1.0*x2 + np.random.uniform(size=10000)
X = pd.DataFrame(np.array([x0, x1, x2, x3, x4, x5]).T ,columns=['x0', 'x1', 'x2', 'x3', 'x4', 'x5'])
model = lingam.DirectLiNGAM()
model.fit(X)
make_dot(model.adjacency_matrix_)
# -
# If we want to change the variable name, we can use `labels`.
labels = [f'var{i}' for i in range(X.shape[1])]
make_dot(model.adjacency_matrix_, labels=labels)
# ## Save graph
# The created dot data can be saved as an image file in addition to being displayed in Jupyter Notebook.
# +
dot = make_dot(model.adjacency_matrix_, labels=labels)
# Save pdf
dot.render('dag')
# Save png
dot.format = 'png'
dot.render('dag')
# -
# ## Draw the result of LiNGAM with prediction model
# For example, we create a linear regression model with x0 as the target variable.
# +
from sklearn.linear_model import LinearRegression
target = 0
features = [i for i in range(X.shape[1]) if i != target]
reg = LinearRegression()
reg.fit(X.iloc[:, features], X.iloc[:, target])
# -
# By specify `prediction_feature_indices` and `prediction_coefs` that can be obtained from the prediction model, we can draw the prediction model with the causal structure.
make_dot(model.adjacency_matrix_, prediction_feature_indices=features, prediction_coefs=reg.coef_)
# Also, we can change the label of the target variable by `prediction_target_label`, omit the coefficient of prediction model without `prediction_coefs`, and change the color by `prediction_line_color`.
make_dot(model.adjacency_matrix_, prediction_feature_indices=features, prediction_target_label='Target', prediction_line_color='#0000FF')
# In addition to the above, we can use `prediction_feature_importance` to draw the importance of the prediction model as an edge label.
# +
import lightgbm as lgb
target = 0
features = [i for i in range(X.shape[1]) if i != target]
reg = lgb.LGBMRegressor(random_state=0)
reg.fit(X.iloc[:, features], X.iloc[:, target])
reg.feature_importances_
# -
make_dot(model.adjacency_matrix_, prediction_feature_indices=features, prediction_feature_importance=reg.feature_importances_)
| examples/DrawGraph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework and bake-off: Word similarity
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Fall 2020"
# `VSM` = Vector Space-Model
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [Set-up](#Set-up)
# 1. [Dataset readers](#Dataset-readers)
# 1. [Dataset comparisons](#Dataset-comparisons)
# 1. [Vocab overlap](#Vocab-overlap)
# 1. [Pair overlap and score correlations](#Pair-overlap-and-score-correlations)
# 1. [Evaluation](#Evaluation)
# 1. [Dataset evaluation](#Dataset-evaluation)
# 1. [Dataset error analysis](#Dataset-error-analysis)
# 1. [Full evaluation](#Full-evaluation)
# 1. [Homework questions](#Homework-questions)
# 1. [PPMI as a baseline [0.5 points]](#PPMI-as-a-baseline-[0.5-points])
# 1. [Gigaword with LSA at different dimensions [0.5 points]](#Gigaword-with-LSA-at-different-dimensions-[0.5-points])
# 1. [Gigaword with GloVe [0.5 points]](#Gigaword-with-GloVe-[0.5-points])
# 1. [Dice coefficient [0.5 points]](#Dice-coefficient-[0.5-points])
# 1. [t-test reweighting [2 points]](#t-test-reweighting-[2-points])
# 1. [Enriching a VSM with subword information [2 points]](#Enriching-a-VSM-with-subword-information-[2-points])
# 1. [Your original system [3 points]](#Your-original-system-[3-points])
# 1. [Bake-off [1 point]](#Bake-off-[1-point])
# ## Overview
#
# Word similarity datasets have long been used to evaluate distributed representations. This notebook provides basic code for conducting such analyses with a number of datasets:
#
# | Dataset | Pairs | Task-type | Current best Spearman $\rho$ | Best $\rho$ paper | |
# |---------|-------|-----------|------------------------------|-------------------|---|
# | [WordSim-353](http://www.cs.technion.ac.il/~gabr/resources/data/wordsim353/) | 353 | Relatedness | 82.8 | [Speer et al. 2017](https://arxiv.org/abs/1612.03975) |
# | [MTurk-771](http://www2.mta.ac.il/~gideon/mturk771.html) | 771 | Relatedness | 81.0 | [Speer et al. 2017](https://arxiv.org/abs/1612.03975) |
# | [The MEN Test Collection](http://clic.cimec.unitn.it/~elia.bruni/MEN) | 3,000 | Relatedness | 86.6 | [Speer et al. 2017](https://arxiv.org/abs/1612.03975) |
# | [SimVerb-3500-dev](http://people.ds.cam.ac.uk/dsg40/simverb.html) | 500 | Similarity | 61.1 | [Mrkišć et al. 2016](https://arxiv.org/pdf/1603.00892.pdf) |
# | [SimVerb-3500-test](http://people.ds.cam.ac.uk/dsg40/simverb.html) | 3,000 | Similarity | 62.4 | [Mrkišć et al. 2016](https://arxiv.org/pdf/1603.00892.pdf) |
#
# Each of the similarity datasets contains word pairs with an associated human-annotated similarity score. (We convert these to distances to align intuitively with our distance measure functions.) The evaluation code measures the distance between the word pairs in your chosen VSM (vector-space model which should be a `pd.DataFrame`).
#
# The evaluation metric for each dataset is the [Spearman correlation coefficient $\rho$](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient) between the annotated scores and your distances, as is standard in the literature. We also macro-average these correlations across the datasets for an overall summary. (In using the macro-average, we are saying that we care about all the datasets equally, even though they vary in size.)
#
# This homework ([questions at the bottom of this notebook](#Homework-questions)) asks you to write code that uses the count matrices in `data/vsmdata` to create and evaluate some baseline models as well as an original model $M$ that you design. This accounts for 9 of the 10 points for this assignment.
#
# For the associated bake-off, we will distribute two new word similarity or relatedness datasets and associated reader code, and you will evaluate $M$ (no additional training or tuning allowed!) on those new datasets. Systems that enter will receive the additional homework point, and systems that achieve the top score will receive an additional 0.5 points.
# ## Set-up
from collections import defaultdict
import csv
import itertools
import numpy as np
import os
import pandas as pd
from scipy.stats import spearmanr
import vsm
from IPython.display import display
# +
PATH_TO_DATA = '/Users/pierrejaumier/Data/cs224u'
VSM_HOME = os.path.join(PATH_TO_DATA, 'vsmdata')
WORDSIM_HOME = os.path.join(PATH_TO_DATA, 'wordsim')
# -
# ## Dataset readers
# +
def wordsim_dataset_reader(
src_filename,
header=False,
delimiter=',',
score_col_index=2):
"""
Basic reader that works for all similarity datasets. They are
all tabular-style releases where the first two columns give the
words and a later column (`score_col_index`) gives the score.
Parameters
----------
src_filename : str
Full path to the source file.
header : bool
Whether `src_filename` has a header.
delimiter : str
Field delimiter in `src_filename`.
score_col_index : int
Column containing the similarity scores Default: 2
Yields
------
(str, str, float)
(w1, w2, score) where `score` is the negative of the similarity
score in the file so that we are intuitively aligned with our
distance-based code. To align with our VSMs, all the words are
downcased.
"""
with open(src_filename) as f:
reader = csv.reader(f, delimiter=delimiter)
if header:
next(reader)
for row in reader:
w1 = row[0].strip().lower()
w2 = row[1].strip().lower()
score = row[score_col_index]
# Negative of scores to align intuitively with distance functions:
score = -float(score)
yield (w1, w2, score)
def wordsim353_reader():
"""WordSim-353: http://www.gabrilovich.com/resources/data/wordsim353/"""
src_filename = os.path.join(
WORDSIM_HOME, 'wordsim353', 'combined.csv')
return wordsim_dataset_reader(
src_filename, header=True)
def mturk771_reader():
"""MTURK-771: http://www2.mta.ac.il/~gideon/mturk771.html"""
src_filename = os.path.join(
WORDSIM_HOME, 'MTURK-771.csv')
return wordsim_dataset_reader(
src_filename, header=False)
def simverb3500dev_reader():
"""SimVerb-3500: https://www.aclweb.org/anthology/D16-1235/"""
src_filename = os.path.join(
WORDSIM_HOME, 'SimVerb-3500', 'SimVerb-500-dev.txt')
return wordsim_dataset_reader(
src_filename, delimiter="\t", header=False, score_col_index=3)
def simverb3500test_reader():
"""SimVerb-3500: https://www.aclweb.org/anthology/D16-1235/"""
src_filename = os.path.join(
WORDSIM_HOME, 'SimVerb-3500', 'SimVerb-3000-test.txt')
return wordsim_dataset_reader(
src_filename, delimiter="\t", header=False, score_col_index=3)
def men_reader():
"""MEN: https://staff.fnwi.uva.nl/e.bruni/MEN"""
src_filename = os.path.join(
WORDSIM_HOME, 'MEN', 'MEN_dataset_natural_form_full')
return wordsim_dataset_reader(
src_filename, header=False, delimiter=' ')
# -
# This collection of readers will be useful for flexible evaluations:
READERS = (wordsim353_reader, mturk771_reader, simverb3500dev_reader,
simverb3500test_reader, men_reader)
exbd = wordsim353_reader()
next(exbd)
next(exbd)
next(exbd)
# ## Dataset comparisons
#
# This section does some basic analysis of the datasets. The goal is to obtain a deeper understanding of what problem we're solving – what strengths and weaknesses the datasets have and how they relate to each other. For a full-fledged project, we would want to continue work like this and report on it in the paper, to provide context for the results.
def get_reader_name(reader):
"""
Return a cleaned-up name for the dataset iterator `reader`.
"""
return reader.__name__.replace("_reader", "")
# ### Vocab overlap
#
# How many vocabulary items are shared across the datasets?
def get_reader_vocab(reader):
"""Return the set of words (str) in `reader`."""
vocab = set()
for w1, w2, _ in reader():
vocab.add(w1)
vocab.add(w2)
return vocab
def get_reader_vocab_overlap(readers=READERS):
"""
Get data on the vocab-level relationships between pairs of
readers. Returns a a pd.DataFrame containing this information.
"""
data = []
for r1, r2 in itertools.product(readers, repeat=2):
v1 = get_reader_vocab(r1)
v2 = get_reader_vocab(r2)
d = {
'd1': get_reader_name(r1),
'd2': get_reader_name(r2),
'overlap': len(v1 & v2),
'union': len(v1 | v2),
'd1_size': len(v1),
'd2_size': len(v2)}
data.append(d)
return pd.DataFrame(data)
vocab_overlap = get_reader_vocab_overlap()
def vocab_overlap_crosstab(vocab_overlap):
"""
Return an intuitively formatted `pd.DataFrame` giving vocab-overlap
counts for all the datasets represented in `vocab_overlap`, the
output of `get_reader_vocab_overlap`.
"""
xtab = pd.crosstab(
vocab_overlap['d1'],
vocab_overlap['d2'],
values=vocab_overlap['overlap'],
aggfunc=np.mean)
# Blank out the upper right to reduce visual clutter:
for i in range(0, xtab.shape[0]):
for j in range(i+1, xtab.shape[1]):
xtab.iloc[i, j] = ''
return xtab
vocab_overlap_crosstab(vocab_overlap)
# This looks reasonable. By design, the SimVerb dev and test sets have a lot of overlap. The other overlap numbers are pretty small, even adjusting for dataset size.
# ### Pair overlap and score correlations
#
# How many word pairs are shared across datasets and, for shared pairs, what is the correlation between their scores? That is, do the datasets agree?
def get_reader_pairs(reader):
"""
Return the set of alphabetically-sorted word (str) tuples
in `reader`
"""
return {tuple(sorted([w1, w2])): score for w1, w2, score in reader()}
def get_reader_pair_overlap(readers=READERS):
"""Return a `pd.DataFrame` giving the number of overlapping
word-pairs in pairs of readers, along with the Spearman
correlations.
"""
data = []
for r1, r2 in itertools.product(READERS, repeat=2):
if r1.__name__ != r2.__name__:
d1 = get_reader_pairs(r1)
d2 = get_reader_pairs(r2)
overlap = []
for p, s in d1.items():
if p in d2:
overlap.append([s, d2[p]])
if overlap:
s1, s2 = zip(*overlap)
rho = spearmanr(s1, s2)[0]
else:
rho = None
# Canonical order for the pair:
n1, n2 = sorted([get_reader_name(r1), get_reader_name(r2)])
d = {
'd1': n1,
'd2': n2,
'pair_overlap': len(overlap),
'rho': rho}
data.append(d)
df = pd.DataFrame(data)
df = df.sort_values(['pair_overlap','d1','d2'], ascending=False)
# Return only every other row to avoid repeats:
return df[::2].reset_index(drop=True)
if 'IS_GRADESCOPE_ENV' not in os.environ:
display(get_reader_pair_overlap())
# This looks reasonable: none of the datasets have a lot of overlapping pairs, so we don't have to worry too much about places where they give conflicting scores.
# ## Evaluation
#
# This section builds up the evaluation code that you'll use for the homework and bake-off. For illustrations, I'll read in a VSM created from `data/vsmdata/giga_window5-scaled.csv.gz`:
giga5 = pd.read_csv(
os.path.join(VSM_HOME, "giga_window5-scaled.csv.gz"), index_col=0)
# ### Dataset evaluation
def word_similarity_evaluation(reader, df, distfunc=vsm.cosine):
"""
Word-similarity evaluation framework.
Parameters
----------
reader : iterator
A reader for a word-similarity dataset. Just has to yield
tuples (word1, word2, score).
df : pd.DataFrame
The VSM being evaluated.
distfunc : function mapping vector pairs to floats.
The measure of distance between vectors. Can also be
`vsm.euclidean`, `vsm.matching`, `vsm.jaccard`, as well as
any other float-valued function on pairs of vectors.
Raises
------
ValueError
If `df.index` is not a subset of the words in `reader`.
Returns
-------
float, data
`float` is the Spearman rank correlation coefficient between
the dataset scores and the similarity values obtained from
`df` using `distfunc`. This evaluation is sensitive only to
rankings, not to absolute values. `data` is a `pd.DataFrame`
with columns['word1', 'word2', 'score', 'distance'].
"""
data = []
for w1, w2, score in reader():
d = {'word1': w1, 'word2': w2, 'score': score}
for w in [w1, w2]:
if w not in df.index:
raise ValueError(
"Word '{}' is in the similarity dataset {} but not in the "
"DataFrame, making this evaluation ill-defined. Please "
"switch to a DataFrame with an appropriate vocabulary.".
format(w, get_reader_name(reader)))
d['distance'] = distfunc(df.loc[w1], df.loc[w2])
data.append(d)
data = pd.DataFrame(data)
rho, pvalue = spearmanr(data['score'].values, data['distance'].values)
return rho, data
rho, eval_df = word_similarity_evaluation(men_reader, giga5)
rho
eval_df.head()
# ### Dataset error analysis
#
# For error analysis, we can look at the words with the largest delta between the gold score and the distance value in our VSM. We do these comparisons based on ranks, just as with our primary metric (Spearman $\rho$), and we normalize both rankings so that they have a comparable number of levels.
# +
def word_similarity_error_analysis(eval_df):
eval_df['distance_rank'] = _normalized_ranking(eval_df['distance'])
eval_df['score_rank'] = _normalized_ranking(eval_df['score'])
eval_df['error'] = abs(eval_df['distance_rank'] - eval_df['score_rank'])
return eval_df.sort_values('error')
def _normalized_ranking(series):
ranks = series.rank(method='dense')
return ranks / ranks.sum()
# -
# Best predictions:
word_similarity_error_analysis(eval_df).head()
# Worst predictions:
word_similarity_error_analysis(eval_df).tail()
# ### Full evaluation
# A full evaluation is just a loop over all the readers on which one want to evaluate, with a macro-average at the end:
def full_word_similarity_evaluation(df, readers=READERS, distfunc=vsm.cosine):
"""
Evaluate a VSM against all datasets in `readers`.
Parameters
----------
df : pd.DataFrame
readers : tuple
The similarity dataset readers on which to evaluate.
distfunc : function mapping vector pairs to floats.
The measure of distance between vectors. Can also be
`vsm.euclidean`, `vsm.matching`, `vsm.jaccard`, as well as
any other float-valued function on pairs of vectors.
Returns
-------
pd.Series
Mapping dataset names to Spearman r values.
"""
scores = {}
for reader in readers:
score, data_df = word_similarity_evaluation(reader, df, distfunc=distfunc)
scores[get_reader_name(reader)] = score
series = pd.Series(scores, name='Spearman r')
series['Macro-average'] = series.mean()
return series
if 'IS_GRADESCOPE_ENV' not in os.environ:
display(full_word_similarity_evaluation(giga5))
# ## Homework questions
#
# Please embed your homework responses in this notebook, and do not delete any cells from the notebook. (You are free to add as many cells as you like as part of your responses.)
# ### PPMI as a baseline [0.5 points]
#
# The insight behind PPMI is a recurring theme in word representation learning, so it is a natural baseline for our task. For this question, write a function called `run_giga_ppmi_baseline` that does the following:
#
# 1. Reads the Gigaword count matrix with a window of 20 and a flat scaling function into a `pd.DataFrame`s, as is done in the VSM notebooks. The file is `data/vsmdata/giga_window20-flat.csv.gz`, and the VSM notebooks provide examples of the needed code.
#
# 1. Reweights this count matrix with PPMI.
#
# 1. Evaluates this reweighted matrix using `full_word_similarity_evaluation`. The return value of `run_giga_ppmi_baseline` should be the return value of this call to `full_word_similarity_evaluation`.
#
# The goal of this question is to help you get more familiar with the code in `vsm` and the function `full_word_similarity_evaluation`.
#
# The function `test_run_giga_ppmi_baseline` can be used to test that you've implemented this specification correctly.
def run_giga_ppmi_baseline():
imdb20 = pd.read_csv(
os.path.join(VSM_HOME, 'giga_window20-flat.csv.gz'), index_col=0)
imdb20_pmi = vsm.pmi(imdb20)
return full_word_similarity_evaluation(imdb20_pmi)
def test_run_giga_ppmi_baseline(func):
"""`func` should be `run_giga_ppmi_baseline"""
result = func()
ws_result = result.loc['wordsim353'].round(2)
ws_expected = 0.58
assert ws_result == ws_expected, \
"Expected wordsim353 value of {}; got {}".format(
ws_expected, ws_result)
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_run_giga_ppmi_baseline(run_giga_ppmi_baseline)
display(run_giga_ppmi_baseline())
# ### Gigaword with LSA at different dimensions [0.5 points]
#
# We might expect PPMI and LSA to form a solid pipeline that combines the strengths of PPMI with those of dimensionality reduction. However, LSA has a hyper-parameter $k$ – the dimensionality of the final representations – that will impact performance. For this problem, write a wrapper function `run_ppmi_lsa_pipeline` that does the following:
#
# 1. Takes as input a count `pd.DataFrame` and an LSA parameter `k`.
# 1. Reweights the count matrix with PPMI.
# 1. Applies LSA with dimensionality `k`.
# 1. Evaluates this reweighted matrix using `full_word_similarity_evaluation`. The return value of `run_ppmi_lsa_pipeline` should be the return value of this call to `full_word_similarity_evaluation`.
#
# The goal of this question is to help you get a feel for how much LSA alone can contribute to this problem.
#
# The function `test_run_ppmi_lsa_pipeline` will test your function on the count matrix in `data/vsmdata/giga_window20-flat.csv.gz`.
def run_ppmi_lsa_pipeline(count_df, k):
"""
count_df
k dimensionality of LSA
"""
df_pmi = vsm.pmi(count_df)
df_lsa = vsm.lsa(df_pmi, k)
return full_word_similarity_evaluation(df_lsa)
def test_run_ppmi_lsa_pipeline(func):
"""`func` should be `run_ppmi_lsa_pipeline`"""
giga20 = pd.read_csv(
os.path.join(VSM_HOME, "giga_window20-flat.csv.gz"), index_col=0)
results = func(giga20, k=10)
men_expected = 0.57
men_result = results.loc['men'].round(2)
assert men_result == men_expected,\
"Expected men value of {}; got {}".format(men_expected, men_result)
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_run_ppmi_lsa_pipeline(run_ppmi_lsa_pipeline)
giga20 = pd.read_csv(os.path.join(VSM_HOME, "giga_window20-flat.csv.gz"), index_col=0)
display(run_ppmi_lsa_pipeline(giga20, k=10))
# ### Gigaword with GloVe [0.5 points]
#
# Can GloVe improve over the PPMI-based baselines we explored above? To begin to address this question, let's run GloVe and see how performance on our task changes throughout the optimization process.
#
# __Your task__: write a function `run_glove_wordsim_evals` that does the following:
#
# 1. Has a parameter `n_runs` with default value `5`.
#
# 1. Reads in `data/vsmdata/giga_window5-scaled.csv.gz`.
#
# 1. Creates a `TorchGloVe` instance with `warm_start=True`, `max_iter=50`, and all other parameters set to their defaults.
#
# 1. `n_runs` times, calls `fit` on your model and, after each, runs `full_word_similarity_evaluation` with default keyword parameters, extract the 'Macro-average' score, and add that score to a list.
#
# 1. Returns the list of scores created.
#
# The trend should give you a sense for whether it is worth running GloVe for more iterations.
#
# Some implementation notes:
#
# * `TorchGloVe` will accept and return `pd.DataFrame` instances, so you shouldn't need to do any type conversions.
#
# * Performance will vary a lot for this function, so there is some uncertainty in the testing, but `run_glove_wordsim_evals` will at least check that you wrote a function with the right general logic.
from torch_glove import simple_example
simple_example()
def run_glove_wordsim_evals(n_runs=5):
from torch_glove import TorchGloVe
X = pd.read_csv(os.path.join(VSM_HOME, "giga_window5-scaled.csv.gz"), index_col=0)
mod = TorchGloVe(warm_start=True, max_iter=50)
results = []
for run in range(n_runs):
G = mod.fit(X)
series = full_word_similarity_evaluation(G)
results.append(series['Macro-average'])
return results
def test_run_small_glove_evals(data):
"""`data` should be the return value of `run_glove_wordsim_evals`"""
assert isinstance(data, list), \
"`run_glove_wordsim_evals` should return a list"
assert all(isinstance(x, float) for x in data), \
("All the values in the list returned by `run_glove_wordsim_evals` "
"should be floats.")
if 'IS_GRADESCOPE_ENV' not in os.environ:
glove_scores = run_glove_wordsim_evals()
print(glove_scores)
test_run_small_glove_evals(glove_scores)
# Le coefficient de Spearman augmente, donc nos observations sont plus similaires aux résultats souhaités (degrés de similarité en paires de mots)
# Par contre on est loin des valeurs obtenues avec ppmi et ppmi_lsa (essai à suivre avec 20 runs)
# the Spearman correlation between two variables will be high when observations have a similar (or identical for a correlation of 1)
glove_scores = run_glove_wordsim_evals(n_runs=20)
print(glove_scores)
import matplotlib.pyplot as plt
plt.plot(glove_scores)
# ### Dice coefficient [0.5 points]
#
# Implement the Dice coefficient for real-valued vectors, as
#
# $$
# \textbf{dice}(u, v) =
# 1 - \frac{
# 2 \sum_{i=1}^{n}\min(u_{i}, v_{i})
# }{
# \sum_{i=1}^{n} u_{i} + v_{i}
# }$$
#
# You can use `test_dice_implementation` below to check that your implementation is correct.
def dice(u, v):
return 1 - 2 * np.sum(np.minimum(u,v)) / np.sum(u + v)
def test_dice_implementation(func):
"""`func` should be an implementation of `dice` as defined above."""
X = np.array([
[ 4., 4., 2., 0.],
[ 4., 61., 8., 18.],
[ 2., 8., 10., 0.],
[ 0., 18., 0., 5.]])
assert func(X[0], X[1]).round(5) == 0.80198
assert func(X[1], X[2]).round(5) == 0.67568
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_dice_implementation(dice)
# ### t-test reweighting [2 points]
#
#
# The t-test statistic can be thought of as a reweighting scheme. For a count matrix $X$, row index $i$, and column index $j$:
#
# $$\textbf{ttest}(X, i, j) =
# \frac{
# P(X, i, j) - \big(P(X, i, *)P(X, *, j)\big)
# }{
# \sqrt{(P(X, i, *)P(X, *, j))}
# }$$
#
# where $P(X, i, j)$ is $X_{ij}$ divided by the total values in $X$, $P(X, i, *)$ is the sum of the values in row $i$ of $X$ divided by the total values in $X$, and $P(X, *, j)$ is the sum of the values in column $j$ of $X$ divided by the total values in $X$.
#
# For this problem, implement this reweighting scheme. You can use `test_ttest_implementation` below to check that your implementation is correct. You do not need to use this for any evaluations, though we hope you will be curious enough to do so!
X = pd.DataFrame(np.array([
[ 4., 4., 2., 100.],
[ 4., 61., 8., 18.],
[ 2., 8., 10., 0.],
[ 0., 18., 0., 5.]]))
X = X.to_numpy()
X_sum = X.sum()
P_j = X.sum(axis=0)/X_sum
P_i = X.sum(axis=1)/X_sum
def ttest(df):
X = df.to_numpy()
X_sum = X.sum()
P_j = X.sum(axis=0) / X_sum
P_i = X.sum(axis=1) / X_sum
for i in range(X.shape[0]):
for j in range(X.shape[1]):
X[i,j] = (X[i,j] / X_sum - P_i[i] * P_j[j]) / np.sqrt(P_i[i] * P_j[j])
return pd.DataFrame(X, index=df.index, columns=df.columns)
def test_ttest_implementation(func):
"""`func` should be `ttest`"""
X = pd.DataFrame(np.array([
[ 4., 4., 2., 0.],
[ 4., 61., 8., 18.],
[ 2., 8., 10., 0.],
[ 0., 18., 0., 5.]]))
actual = np.array([
[ 0.33056, -0.07689, 0.04321, -0.10532],
[-0.07689, 0.03839, -0.10874, 0.07574],
[ 0.04321, -0.10874, 0.36111, -0.14894],
[-0.10532, 0.07574, -0.14894, 0.05767]])
predicted = func(X)
#print(predicted)
assert np.array_equal(predicted.round(5), actual)
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_ttest_implementation(ttest)
giga20 = pd.read_csv(os.path.join(VSM_HOME, "giga_window20-flat.csv.gz"), index_col=0)
ttest_giga20 = ttest(giga20)
display(full_word_similarity_evaluation(ttest_giga20))
# Le meilleur résultat pour l'instant!
# ### Enriching a VSM with subword information [2 points]
#
# It might be useful to combine character-level information with word-level information. To help you begin asssessing this idea, this question asks you to write a function that modifies an existing VSM so that the representation for each word $w$ is the element-wise sum of $w$'s original word-level representation with all the representations for the n-grams $w$ contains.
#
# The following starter code should help you structure this and clarify the requirements, and a simple test is included below as well.
#
# You don't need to write a lot of code; the motivation for this question is that the function you write could have practical value.
vsm.get_character_ngrams('mot', n=2)
words = ['bon', 'bel']
df = pd.DataFrame([[1, 2], [3, 4]], index=words, columns=words)
df
cf = vsm.ngram_vsm(df, n=2) # Character level VSM
cf
vsm.character_level_rep('bon', cf, n=2)
# Pour chaque mot on somme tous les bi-grammes au niveau des caractères
# Dans notre exemple cette somme est "associée" 7=4+1+1+1 fois avec `bon` et 12=6+2+2+2 fois avec `bel`
# Les bi-grammes très réccurents dans la matrice vont augmenter le poids du vecteur (ici `<w>b`)
vsm.character_level_rep('bel', cf, n=2)
def subword_enrichment(df, n=4):
# 1. Use `vsm.ngram_vsm` to create a character-level
# VSM from `df`, using the above parameter `n` to
# set the size of the ngrams.
cf = vsm.ngram_vsm(df, n) # Character level VSM
# 2. Use `vsm.character_level_rep` to get the representation
# for every word in `df` according to the character-level
# VSM you created above.
clr = [] # character level representation
for w, _ in df.iterrows():
clr.append(vsm.character_level_rep(w, cf, n))
clr = np.array(clr)
# 3. For each representation created at step 2, add in its
# original representation from `df`. (This should use
# element-wise addition; the dimensionality of the vectors
# will be unchanged.)
# subword enrichment :swe
swe = df.to_numpy() + clr
# 4. Return a `pd.DataFrame` with the same index and column
# values as `df`, but filled with the new representations
# created at step 3.
return pd.DataFrame(swe, index=df.index, columns=df.columns)
def test_subword_enrichment(func):
"""`func` should be an implementation of subword_enrichment as
defined above.
"""
vocab = ["ABCD", "BCDA", "CDAB", "DABC"]
df = pd.DataFrame([
[1, 1, 2, 1],
[3, 4, 2, 4],
[0, 0, 1, 0],
[1, 0, 0, 0]], index=vocab)
expected = pd.DataFrame([
[14, 14, 18, 14],
[22, 26, 18, 26],
[10, 10, 14, 10],
[14, 10, 10, 10]], index=vocab)
new_df = func(df, n=2)
assert np.array_equal(expected.columns, new_df.columns), \
"Columns are not the same"
assert np.array_equal(expected.index, new_df.index), \
"Indices are not the same"
assert np.array_equal(expected.values, new_df.values), \
"Co-occurrence values aren't the same"
if 'IS_GRADESCOPE_ENV' not in os.environ:
test_subword_enrichment(subword_enrichment)
giga20 = pd.read_csv(os.path.join(VSM_HOME, "giga_window20-flat.csv.gz"), index_col=0)
swe_giga20 = subword_enrichment(giga20)
display(full_word_similarity_evaluation(swe_giga20))
# ### Your original system [3 points]
#
# This question asks you to design your own model. You can of course include steps made above (ideally, the above questions informed your system design!), but your model should not be literally identical to any of the above models. Other ideas: retrofitting, autoencoders, GloVe, subword modeling, ...
#
# Requirements:
#
# 1. Your code must operate on one or more of the count matrices in `data/vsmdata`. You can choose which subset of them; this is an important design feature of your system. __Other pretrained vectors cannot be introduced__.
#
# 1. Retrofitting is permitted.
#
# 1. Your code must be self-contained, so that we can work with your model directly in your homework submission notebook. If your model depends on external data or other resources, please submit a ZIP archive containing these resources along with your submission.
#
# In the cell below, please provide a brief technical description of your original system, so that the teaching team can gain an understanding of what it does. This will help us to understand your code and analyze all the submissions to identify patterns and strategies. We also ask that you report the best score your system got during development, just to help us understand how systems performed overall.
# +
# PLEASE MAKE SURE TO INCLUDE THE FOLLOWING BETWEEN THE START AND STOP COMMENTS:
# 1) Textual description of your system.
# 2) The code for your original system.
# 3) The score achieved by your system in place of MY_NUMBER.
# With no other changes to that line.
# You should report your score as a decimal value <=1.0
# PLEASE MAKE SURE NOT TO DELETE OR EDIT THE START AND STOP COMMENTS
# START COMMENT: Enter your system description in this cell.
# My peak score was: MY_NUMBER
if 'IS_GRADESCOPE_ENV' not in os.environ:
pass
giga20 = pd.read_csv(os.path.join(VSM_HOME, "giga_window20-flat.csv.gz"), index_col=0)
x = subword_enrichment(giga20)
x = vsm.pmi(x)
#x = vsm.lsa(x, k=10)
#x = ttest(x) 0.18
#display(full_word_similarity_evaluation(x))
n_runs = 10
from torch_glove import TorchGloVe
#X = pd.read_csv(os.path.join(VSM_HOME, "giga_window5-scaled.csv.gz"), index_col=0)
mod = TorchGloVe(warm_start=True, max_iter=50)
results = []
for run in range(n_runs):
G = mod.fit(x)
G_lsa = vsm.lsa(G, k=10)
series = full_word_similarity_evaluation(G_lsa)
print(series['Macro-average'])
# STOP COMMENT: Please do not remove this comment.
# -
series = full_word_similarity_evaluation(G)
print(series['Macro-average'])
# VSM: retrofitting
#G_retrofitted = os.path.join(PATH_TO_DATA, 'glove6B300d-retrofit-wn.csv.gz')
#import utils
G_retrofitted = pd.read_csv(os.path.join(PATH_TO_DATA, "glove6B300d-retrofit-wn.csv.gz"),
index_col=0)
#glove_dict = utils.glove2dict(os.path.join(PATH_TO_DATA, "glove6B300d-retrofit-wn.csv.gz"))
G_retrofitted.head()
full_word_similarity_evaluation(G_retrofitted)
# ## Bake-off [1 point]
#
# For the bake-off, we will release two additional datasets. The announcement will go out on the discussion forum. We will also release reader code for these datasets that you can paste into this notebook. You will evaluate your custom model $M$ (from the previous question) on these new datasets using `full_word_similarity_evaluation`. Rules:
#
# 1. Only one evaluation is permitted.
# 1. No additional system tuning is permitted once the bake-off has started.
#
# The cells below this one constitute your bake-off entry.
#
# People who enter will receive the additional homework point, and people whose systems achieve the top score will receive an additional 0.5 points. We will test the top-performing systems ourselves, and only systems for which we can reproduce the reported results will win the extra 0.5 points.
#
# Late entries will be accepted, but they cannot earn the extra 0.5 points. Similarly, you cannot win the bake-off unless your homework is submitted on time.
#
# The announcement will include the details on where to submit your entry.
# Enter your bake-off assessment code into this cell.
# Please do not remove this comment.
if 'IS_GRADESCOPE_ENV' not in os.environ:
pass
# Please enter your code in the scope of the above conditional.
##### YOUR CODE HERE
# On an otherwise blank line in this cell, please enter
# your "Macro-average" value as reported by the code above.
# Please enter only a number between 0 and 1 inclusive.
# Please do not remove this comment.
if 'IS_GRADESCOPE_ENV' not in os.environ:
pass
# Please enter your score in the scope of the above conditional.
##### YOUR CODE HERE
| hw1_wordsim.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
# +
# seaborn
# -
x = np.linspace(0 , 2 * np.pi , 100 )
y = np.sin(x)
plt.plot(x,y)
y = np.cos(x)
plt.plot(x,y)
y = np.tan(x)
plt.plot(x,y)
plt.pie([10, 20 ,50] , radius = 2 , colors = ["red", "yellow" ,"blue"] , shadow = True)
x = np.linspace(1, 100 , 1000)
x
# +
# y = mx +c
# -
m = 2
c = 3
y = m * x + c
y
plt.plot(x,y)
plt.scatter(x,y)
y =np.random.randn(1000) * 120 + y
plt.scatter(x,y)
x = np.linspace(0 , 2 * np.pi , 100 )
y =
r = 10
l = r * np.cos(x)
m = r * np.sin(x)
plt.figure(figsize= (4,4))
plt.plot(l,m)
# +
# plt.plot?
# -
| numpy and matplotlib/matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HAL O World
# > Just what do you think you're doing, Dave?.
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [jupyter]
# - image: images/posts/2020-12-01-HAL-O-World/HAL-O-World.jpg
# # Learning to Deep Learn . . .
#
# So today is a monumental day in the field of deep learning, [AlphaFold](https://deepmind.com/blog/article/alphafold-a-solution-to-a-50-year-old-grand-challenge-in-biology) by [DeepMind](https://deepmind.com/) has been recognised as a solution to the [“protein folding problem”](https://en.wikipedia.org/wiki/Protein_folding) and marks the end of a challenge set over 50 years ago. This breakthrough demonstrates the impact AI can have on scientific discovery and its potential to dramatically accelerate progress in some of the most fundamental fields that explain and shape our world. So now is as good a time as any to really get involved and start learning to Deep Learn.
# # Deep Learning for Coders
#
# To ensure that I am making good progress, creating practical models and work as effectiently as possible I will be working through the book ["Deep Learning for Coders with fastai and PyTorch"](https://course.fast.ai/), not only is there a great online portal with a video walk through of the curriculum, an incredible and very well presented book, but also all the source (not only for the practical work, but also for the entire book! It was all written in jupyter notebooks, how amazing is that!).
# ## Getting Setup
#
# So far I have made it through a couple of chapters of the book, and as reccomended by the course I am starting a "Deep Learning Journal", just a place online that I can track my progress, thoughts and experiments as I progress through the curriculum. There are a couple of steps that are required to get setup to make the most of the work presented, and I thought that a good place to start a blog is with some notes on how to get setup.
# ### Juypter Notebooks
#
# Working as a Technical Animator in video games and having written Python code for nearly a decade now, I was aware of juypter notebooks, but I felt that as it was only useful for creating "pretty comments" inline with your code. Yes it would be cool to have an animated gif as part of your documentation but how useful are they in reality? It turns out that they are awesome! I am writing this very blog post using them right now and I am really impressed. The fastai team have created this entire blogging platform `fastpages` to help people get up and running quickly. Jeremy from fastai has also created a great talk about the power of notebooks and their platform `nbdev` here which is worth a watch.
#
# > youtube: https://youtu.be/9Q6sLbz37gk
# #### Anaconda Navigator
#
# Getting set up with juypter on Windows 10 is painless, there is an incredible tool called [Anaconda Navaigator](https://www.anaconda.com/products/individual), an all in one data science toolkit for python that can also manage your packages and your environments through a simple interface. No more package dependency hell, especially when installing large packages for deep learning. Simply follow the link above and download the graphical installer for your OS. Once installed, you can launch your juypter notebook environment here:
#
# 
# #### Fastpages
#
# Fastpages the platform I am using to setup your blog is incredibly simple and the team at fast.ai have documented the process amazingly, simply stepping through the documentation will allow you to copy their directory as a template along with all of the GitHub actions that will build and display your blog. You can see the documentation [here](https://github.com/fastai/fastpages#welcome-to-fastpages)
#
# 
# ### Paperspace Gradient
#
# Another fantastic facet of the course is that they don't encourage you to wade into the murky world of systems administration setting up packages, environments and GPU compliant kernels to get you up and running working with state-of-the-art deep learning models. They instead encourage you to look into using a Server Notebook, they reccommend either [Collab](https://course.fast.ai/start_colab), [Gradient](https://course.fast.ai/start_gradient) or [Sagemaker](https://course.fast.ai/start_sagemaker). I chose [Paperspace Gradient](https://console.paperspace.com/) and I am very happy with my choice.
# ### Microsoft Azure
#
# By far the most frustrating part of the set up for the course has nothing to do with Python, the notebook server, GitHub or fast.ai. In Chapter 2 of the book, the course encourages you to set up a Microsoft Azure account to get an API key so you can use their Image Search conginitive service to pull training images from the web to train a classifier model. Unfortunately the documentation on the site and the forum isn't incredibly helpful, so I am logging it here incase anyone should find it useful, but mainly if I have to do this again on another account I can remeber how and where to find everything.
#
# Go to: https://azure.microsoft.com/en-gb/services/cognitive-services/
#
# 
#
# Scroll to the API's section of the page, and select "Web Search"
#
# 
#
# This will redirect you to https://www.microsoft.com/en-us/bing/apis/bing-web-search-api
#
# 
#
# Ensuring you already have an Azure account, this will take you to the console to create a Bing resource
#
# 
#
# Once you have filled in the details and created your resource it will take you to your Azure dashboard
#
# 
#
# Go to the "Keys and Endpoint" tab, this is where you can find your Bing Image Search Api Key
#
# 
#
# Now that you have your API key and replaced the `XXX` value in the Chapter 2 notebook, there is once more piece of code required to get the code in Chapter to work, below is a re-write of the `search_images_bing` definition, simply paste it as a code cell above where it is being called in notebook. Everything should work for you now.
def search_images_bing(subscription_key, search_term, size = 150):
search_url = "https://api.bing.microsoft.com/v7.0/images/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term,
"license": "public",
"imageType": "photo",
"count": size}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
reformatted_results = L(search_results["value"], use_list=True)
# Uses the FastAI class L, a
# "drop-in" replacement for Lists. Mixes Python standard library and numpy arrays.
# I'm putting this here so, again, we minimize the amount of individual cells rewritten.
# Many of the later cells assume .attrgot is a valid thing you can call.
for result in reformatted_results:
result["content_url"] = result["contentUrl"]
# Bing changed their API. They return contentUrl instead of content_url.
# Again, this will help in the long run.
return reformatted_results
| _notebooks/2020-12-01-HAL-O-World.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Recommender Systems 2018/19
#
# ### Practice session on MF PyTorch
#
#
# +
from urllib.request import urlretrieve
import zipfile
# skip the download
#urlretrieve ("http://files.grouplens.org/datasets/movielens/ml-10m.zip", "data/Movielens_10M/movielens_10m.zip")
dataFile = zipfile.ZipFile("data/Movielens_10M/movielens_10m.zip")
URM_path = dataFile.extract("ml-10M100K/ratings.dat", path = "data/Movielens_10M")
URM_file = open(URM_path, 'r')
def rowSplit (rowString):
split = rowString.split("::")
split[3] = split[3].replace("\n","")
split[0] = int(split[0])
split[1] = int(split[1])
split[2] = float(split[2])
split[3] = int(split[3])
result = tuple(split)
return result
URM_file.seek(0)
URM_tuples = []
for line in URM_file:
URM_tuples.append(rowSplit (line))
userList, itemList, ratingList, timestampList = zip(*URM_tuples)
userList = list(userList)
itemList = list(itemList)
ratingList = list(ratingList)
timestampList = list(timestampList)
import scipy.sparse as sps
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
from Notebooks_utils.data_splitter import train_test_holdout
URM_train, URM_test = train_test_holdout(URM_all, train_perc = 0.8)
# -
# ### MF models rely upon latent factors for users and items which are called 'embeddings'
# +
num_factors = 10
n_users, n_items = URM_train.shape
# +
import torch
user_factors = torch.nn.Embedding(num_embeddings = n_users, embedding_dim = num_factors)
item_factors = torch.nn.Embedding(num_embeddings = n_items, embedding_dim = num_factors)
# -
user_factors
item_factors
# ### To compute the prediction we have to multiply the user factors to the item factors, which is a linear operation.
#
# ### We define a single layer and an activation function, which takes the result and transforms it in the final prediction. The activation function can be used to restrict the predicted values (e.g., sigmoid is between 0 and 1)
# +
layer_1 = torch.nn.Linear(in_features = num_factors, out_features = 1)
layer_1
# +
activation_function = torch.nn.ReLU()
activation_function
# -
# ## In order to compute the prediction you have to:
# * Define a list of user and item indices
# * Create a tensor from it
# * Create a variable from the tensor
# * Get the user and item embedding
# * Compute the element-wise product of the embeddings
# * Pass the element-wise product to the single layer network
# * Pass the output of the single layer network to the activation function
# +
from torch.autograd import Variable
item_index = [15]
user_index = [42]
user_index = torch.Tensor(user_index).type(torch.LongTensor)
item_index = torch.Tensor(item_index).type(torch.LongTensor)
user_index = Variable(user_index)
item_index = Variable(item_index)
current_user_factors = user_factors(user_index)
current_item_factors = item_factors(item_index)
element_wise_product = torch.mul(current_user_factors, current_item_factors)
element_wise_product
# -
# ### To take the result of the prediction and transform it into a traditional numpy array you have to first call .detach() and then .numpy()
# ### The result is an array of 1 cell
# +
prediction = layer_1(element_wise_product)
prediction = activation_function(prediction)
prediction_numpy = prediction.detach().numpy()
print("Prediction is {}".format(prediction_numpy))
# -
# # Train a MF MSE model with PyTorch
#
# # Step 1 Create a Model python object
#
# ### The model should implement the forward function which computes the prediction as we did before
# +
class MF_MSE_PyTorch_model(torch.nn.Module):
def __init__(self, n_users, n_items, n_factors):
super(MF_MSE_PyTorch_model, self).__init__()
self.n_users = n_users
self.n_items = n_items
self.n_factors = n_factors
self.user_factors = torch.nn.Embedding(num_embeddings = self.n_users, embedding_dim = self.n_factors)
self.item_factors = torch.nn.Embedding(num_embeddings = self.n_items, embedding_dim = self.n_factors)
self.layer_1 = torch.nn.Linear(in_features = self.n_factors, out_features = 1)
self.activation_function = torch.nn.ReLU()
def forward(self, user_coordinates, item_coordinates):
current_user_factors = self.user_factors(user_coordinates)
current_item_factors = self.item_factors(item_coordinates)
prediction = torch.mul(current_user_factors, current_item_factors)
prediction = self.layer_1(prediction)
prediction = self.activation_function(prediction)
return prediction
def get_W(self):
return self.user_factors.weight.detach().cpu().numpy()
def get_H(self):
return self.item_factors.weight.detach().cpu().numpy()
# -
# # Step 2 Setup PyTorch devices and Data Reader
# +
use_cuda = False
if use_cuda and torch.cuda.is_available():
device = torch.device('cuda')
print("MF_MSE_PyTorch: Using CUDA")
else:
device = torch.device('cpu')
print("MF_MSE_PyTorch: Using CPU")
# -
# ### Create an instance of the model and specify the device it should run on
pyTorchModel = MF_MSE_PyTorch_model(n_users, n_items, num_factors).to(device)
# ### Choose loss functions, there are quite a few to choose from
lossFunction = torch.nn.MSELoss(size_average=False)
# ### Select the optimizer to be used for the model parameters: Adam, AdaGrad, RMSProp etc...
# +
learning_rate = 1e-4
optimizer = torch.optim.Adagrad(pyTorchModel.parameters(), lr = learning_rate)
# -
# ### Define the DatasetIterator, which will be used to iterate over the data
#
# ### A DatasetIterator will implement the Dataset class and provide the __getitem__(self, index) method, which allows to get the data points indexed by that index.
#
# ### Since we need the data to be a tensor, we pre inizialize everything as a tensor. In practice we save the URM in coordinate format (user, item, rating)
# +
from torch.utils.data import Dataset
import numpy as np
class DatasetIterator_URM(Dataset):
def __init__(self, URM):
URM = URM.tocoo()
self.n_data_points = URM.nnz
self.user_item_coordinates = np.empty((self.n_data_points, 2))
self.user_item_coordinates[:,0] = URM.row.copy()
self.user_item_coordinates[:,1] = URM.col.copy()
self.rating = URM.data.copy().astype(np.float)
self.user_item_coordinates = torch.Tensor(self.user_item_coordinates).type(torch.LongTensor)
self.rating = torch.Tensor(self.rating)
def __getitem__(self, index):
"""
Format is (row, col, data)
:param index:
:return:
"""
return self.user_item_coordinates[index, :], self.rating[index]
def __len__(self):
return self.n_data_points
# -
# ### We pass the DatasetIterator to a DataLoader object which manages the use of batches and so on...
# +
from torch.utils.data import DataLoader
batch_size = 200
dataset_iterator = DatasetIterator_URM(URM_train)
train_data_loader = DataLoader(dataset = dataset_iterator,
batch_size = batch_size,
shuffle = True,
#num_workers = 2,
)
# -
# ## And now we ran the usual epoch steps
# * Data point sampling
# * Prediction computation
# * Loss function computation
# * Gradient computation
# * Update
# +
for num_batch, (input_data, label) in enumerate(train_data_loader, 0):
cumulative_loss = 0
# On windows requires int64, on ubuntu int32
#input_data_tensor = Variable(torch.from_numpy(np.asarray(input_data, dtype=np.int64))).to(self.device)
input_data_tensor = Variable(input_data).to(device)
label_tensor = Variable(label).to(device)
user_coordinates = input_data_tensor[:,0]
item_coordinates = input_data_tensor[:,1]
# FORWARD pass
prediction = pyTorchModel(user_coordinates, item_coordinates)
# Pass prediction and label removing last empty dimension of prediction
loss = lossFunction(prediction.view(-1), label_tensor)
if num_batch % 100 == 0:
print("Batch {} of {}, loss {:.4f}".format(num_batch, len(train_data_loader), loss.data.item()))
if num_batch == 2000:
print("Interrupting train")
break
# BACKWARD pass
optimizer.zero_grad()
loss.backward()
optimizer.step()
# -
# ## After the train is complete (it may take a while and many epochs), we can get the matrices in the usual numpy format
W = pyTorchModel.get_W()
H = pyTorchModel.get_H()
W
H
| Jupyter notebook/Practice 7 - MF with PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp rgbkm
# -
#hide
# %load_ext autoreload
# %autoreload 2
# # Color physics of translucent inks
# ## Rendering an RGB image
#
# *Explain Kubelka-Munk theory*
# +
from inktime import data, rgbkm
import numpy as np
import matplotlib.pyplot as plt
# -
Rg = data.fetch_blackwhite()[:,:,0:3]
# todo: quick fix multiplier
D = 5 * data.fetch_star()[:,:,0]
# Hansa yellow RGB KM parameters according to Curtis (1997)
K_hansa = np.array([0.06, 0.21, 1.78])
S_hansa = np.array([0.50, 0.88, 0.009])
refl = rgbkm.reflectance(K_hansa, S_hansa, D, Rg)
plt.imshow(refl);
# ## Functions
# +
#export
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import cv2
import scipy.optimize as optimize
def reflectance(K, S, D, Rg):
'''Calculates reflectance for single colorant Kubelka-Munk model.
Based on Nobbs (1997) formulation with modified Saunderson expression for infinite reflectance.
Function works for single channel, 3 RGB channels, and spectral data/images with muliple wavelength channels.
Parameters:
-----------
K: tuple-like (n channels)
Colorant absorption coefficients for wavelength or RGB channels
S: tuple-like (n channels)
Colorant scattering coefficients for wavelength or RGB channels
D: array ( height x width)
Colorant thickness image
Rg: array (height x width x n) or rgb tuple with shape (3,)
Background reflectance image or background color
Returns:
--------
refl: array (height x width x n)
n-channel reflectance image
'''
# create uniform background image if Rg is rgb tuple
Rg = np.array(Rg)
shape = Rg.shape
if len(shape) == 1: # understood as rgb tuple
h, w = D.shape
Rg_img = np.ones([h, w, 3])
Rg_img[:,:] = Rg
Rg = Rg_img
shape = Rg.shape
n_channels = shape[-1]
K = np.array(K).reshape(1, n_channels)
S = np.array(S).reshape(1, n_channels)
D = np.array(D).reshape(-1, 1)
Rg = Rg.reshape(-1, n_channels)
# need to return infinity for K =< 0 or S < 0 in optimization code
#pos_S = S >= 0
#pos_K = K > 0 # also non-zero
#ok = pos_S & pos_K
#Rinf = np.zeros([1, n_channels])
Rinf = (S/K) / ((S/K) + 1 + np.sqrt(1 + 2 * (S/K)))
#Rinf[ok] = (S[ok]/K[ok]) / ((S[ok]/K[ok]) + 1 + np.sqrt(1 + 2 * (S[ok]/K[ok])))
#Rinf[~ok] = np.infty
Z = D * np.sqrt(K * (K + 2 * S))
Z = np.clip(Z, a_min=0, a_max=50)
beta = np.exp(2 * Z) - 1
alpha = (1 - Rinf**2) / (1 - Rg * Rinf)
refl = (alpha * Rg + beta * Rinf) / (alpha + beta)
refl = refl.reshape(shape)
return refl
# -
# +
# hide
def get_optical_density(img, bg_color, blf=True):
'''Generates ideal ink optical density model for *img* with background color *bg_color*.'''
# generate uniform background
paper_color_img = np.ones_like(img)
paper_color_img[:,:] = bg_color
# not sure if this is needed
if blf:
img = cv2.bilateralFilter(img, 10, 0.1, 120) # got these params from 2018-11-16 notebook
img_blf = img
rgb = img.transpose(2, 0, 1)
r, g, b = rgb
img_od = mu.normalize_image(-np.log(np.clip(img/paper_color_img, a_min=0, a_max=1)))
return img_od
class PaintDistribution:
'''Single colorant layer model'''
def __init__(self, D, Rg, R_meas):
'''Initializes statigraphic model with thickness array *D*, background array *Rg* and measured array *R_meas*. '''
self.D = D
self.Rg = Rg
self.R_meas = R_meas
D_max = self.D.max()
if D_max > 10:
print('Warning: found maxium thickness {} larger then 10. Might cause numerical problems.'.format(D_max))
# better .residuals ??
def residuals(self, KS):
'''Returns residuals vector between measured and calculated for *KS* '''
n_channels = int(len(KS) / 2)
K, S = KS[0:n_channels], KS[n_channels: 2*n_channels] # split vector
img_calc = reflectance(K, S, self.D, self.Rg)
img_diff = self.R_meas - img_calc
is_non_zero_thickness = self.D > 0
res = img_diff[is_non_zero_thickness].flatten()
res = res**2 # check quadratic
return res
def fit_KS(self):
'''Non-linear fit of K and S for stratigraphic model'''
n_channels = self.Rg.shape[-1]
KS_start = np.ones(2 * n_channels)
KS_min = np.ones(2 * n_channels) * 10e-8 # not sure if this avoids numerical problems
KS_max = np.ones(2 * n_channels) * 100 # same
bounds = [KS_min, KS_max]
fit = optimize.least_squares(self.residuals, KS_start, verbose=1, bounds=bounds, xtol=1e-10, ftol=1e-10, gtol=1e-10) # self is callable (function object)
self.K_fit, self.S_fit = fit.x[0:n_channels], fit.x[n_channels:2*n_channels]
self.R_fit = reflectance(self.K_fit, self.S_fit, self.D, self.Rg) # for convenience
return self.K_fit, self.S_fit
class Ramp_model:
def __init__(self, material, rgb_bg, rgb_1, rgb_2, thickness_1, thickness_2):
'''Fits K and S to a simple two patch ramp model '''
# should extend to n-patches list but not now
self.material = material
self.Rg = np.ones([3, 4, 3], dtype=float)
self.Rg[:,:] = rgb_bg
self.R_meas = self.Rg.copy()
self.R_meas[1, 1:3] = np.array([rgb_1, rgb_2])
self.D = np.zeros([3, 4])
self.D[1, 1:3] = [thickness_1, thickness_2]
pdist = PaintDistribution(self.D, self.Rg, self.R_meas)
self.K_fit, self.S_fit = pdist.fit_KS()
self.rendering = reflectance(self.K_fit, self.S_fit, self.D, self.Rg)
print('Created 3x4 pixel ramp model object for: "{}"'.format(self.material))
| notebooks/10_Color-physics-of-translucent-inks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sqlite3
import json
from datetime import datetime
import time
from sqlite3 import Error
conn = sqlite3.connect("database.sqlite")
c = conn.cursor()
conn2 = sqlite3.connect("chatbot.db")
c2 = conn2.cursor()
c.execute("""select * from May2015 """)
rows = c.fetchmany(5000000)
conn.commit()
print(type(rows))
# +
import pandas as pd
df = pd.DataFrame(rows)
# -
#df.head()
c2.execute(""" INSERT INTO parent_reply(parent_id, comment_id, parent, comment, subreddit, unix, score) VALUES () """)
# +
#df.tail()
# -
| Database manipulations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from collections import deque, namedtuple
# we'll use infinity as a default distance to nodes.
inf = float('inf')
Edge = namedtuple('Edge', 'start, end, cost')
def make_edge(start, end, cost=1):
return Edge(start, end, cost)
class Graph:
def __init__(self, edges):
# let's check that the data is right
wrong_edges = [i for i in edges if len(i) not in [2, 3]]
if wrong_edges:
raise ValueError('Wrong edges data: {}'.format(wrong_edges))
self.edges = [make_edge(*edge) for edge in edges]
@property
def vertices(self):
return set(
sum(
([edge.start, edge.end] for edge in self.edges), []
)
)
def get_node_pairs(self, n1, n2, both_ends=True):
if both_ends:
node_pairs = [[n1, n2], [n2, n1]]
else:
node_pairs = [[n1, n2]]
return node_pairs
def remove_edge(self, n1, n2, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
edges = self.edges[:]
for edge in edges:
if [edge.start, edge.end] in node_pairs:
self.edges.remove(edge)
def add_edge(self, n1, n2, cost=1, both_ends=True):
node_pairs = self.get_node_pairs(n1, n2, both_ends)
for edge in self.edges:
if [edge.start, edge.end] in node_pairs:
return ValueError('Edge {} {} already exists'.format(n1, n2))
self.edges.append(Edge(start=n1, end=n2, cost=cost))
if both_ends:
self.edges.append(Edge(start=n2, end=n1, cost=cost))
@property
def neighbours(self):
neighbours = {vertex: set() for vertex in self.vertices}
for edge in self.edges:
neighbours[edge.start].add((edge.end, edge.cost))
return neighbours
def dijkstra(self, source, dest):
assert source in self.vertices, 'Such source node doesn\'t exist'
distances = {vertex: inf for vertex in self.vertices}
previous_vertices = {
vertex: None for vertex in self.vertices
}
distances[source] = 0
vertices = self.vertices.copy()
while vertices:
current_vertex = min(
vertices, key=lambda vertex: distances[vertex])
vertices.remove(current_vertex)
if distances[current_vertex] == inf:
break
for neighbour, cost in self.neighbours[current_vertex]:
alternative_route = distances[current_vertex] + cost
if alternative_route < distances[neighbour]:
distances[neighbour] = alternative_route
previous_vertices[neighbour] = current_vertex
path, current_vertex = deque(), dest
while previous_vertices[current_vertex] is not None:
path.appendleft(current_vertex)
current_vertex = previous_vertices[current_vertex]
if path:
path.appendleft(current_vertex)
return path
# +
graph = Graph([
("a", "b", 7), ("a", "c", 9), ("a", "f", 14), ("b", "c", 10),
("b", "d", 15), ("c", "d", 11), ("c", "f", 2), ("d", "e", 6),
("e", "f", 9)])
print(graph.dijkstra("a", "e"))
| dijkstry_algorithm_py.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="yTBP_QYuu6tc"
# #!pip install --upgrade pip
# #!pip install transformers==3.1.0
# + id="TiU_ES5tzpMH"
from transformers import pipeline
from os import listdir
import pandas as pd
from tqdm import tqdm
from sklearn.metrics import f1_score
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
# + id="spkccRiv0CB3"
classifier = pipeline("zero-shot-classification", device=0) # to utilize GPU
# +
#path_screenplays_scenes='./transcripts/shot-aligned_transcripts_2021_Max_Jack_Tanya.csv'
path_screenplays_scenes='./transcripts/shot-aligned_transcripts_2021_Peggy_Archie.csv'
# + colab={"base_uri": "https://localhost:8080/", "height": 69} id="hkfE6NRA0Dzy" outputId="8b3f9e37-3e46-4b25-813b-c5fa7bbc3c97"
def get_results(df, classifier, topic_labels):
df['topic'] = None
df['score'] = None
for i in tqdm(df.index):
text = df.loc[i, 'transcript']
if text is not None:
result = classifier(text, topic_labels)
#df.loc[i, 'topic'] = str(str(result['labels'][0])+"+"+str(result['labels'][1]))
#df.loc[i, 'score'] = str(str(result['scores'][0])+"+"+str(result['scores'][1])
df.loc[i, 'topic'] = result['labels'][0]
df.loc[i, 'score'] = result['scores'][0]
#if result['labels'][0]=='surprise':
#df.loc[i, 'score_hf'] = result['scores'][0]
#else:
#df.loc[i, 'score_hf'] = -1
return df
# +
#Initial experiments with other candidate labels
"""candidate_labels_df=pd.read_csv('life_events_scale.csv',sep='\t', names=['topic','weight'])
candidate_labels_df=candidate_labels_df
candidate_labels=dict(zip(candidate_labels_df.topic, candidate_labels_df.weight))
for topic,weight in tqdm(candidate_labels.items()):
print(topic)
print(weight)"""
# -
soap_opera_scale={'extramarital affair': 1/1.98, 'get divorced': 1/1.96,'illegitimate child': 1/1.45,'institutionalized for emotional problem': 1/1.43,'happily married': 1/4.05,'serious accident': 1/2.96,'murdered': 1/1.81,'attempt suicide': 1/1.26,'blackmailed': 1/1.86,'unfaithful spouse': 1/2.23,'sexually assaulted': 1/2.60,'abortion': 1/1.41}
def get_results_several_binary(df, classifier, topic_labels):
for topic,weight in tqdm(topic_labels.items()):
df[topic] = None
for i in df.index:
text = df.loc[i, 'transcript']
if text is not None:
result = classifier(text,topic,multi_class=False)
df.loc[i, topic] = result['scores'][0]*weight
#df.to_csv('soap_opera_Peggy_Archie.csv')
return df
# +
first_results=get_results_several_binary(df_with_text,classifier,candidate_labels)
cols_to_keep=['Unnamed: 0', 'begin', 'end', 'filename', 'shot_id', 'transcript']
first_results['score'] = first_results.drop(cols_to_keep, axis=1).max(axis=1)
first_results=first_results.sort_values(by=['score'],ascending=False)
first_results.to_csv('soap_opera_Peggy_Archie.csv')
| Zero_Shot_Pipeline_eastenders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Rental bike forecasting using SARIMAX
library(readr)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(xts)
library(fpp2)
library(DMwR)
library(Metrics)
library(tseries)
options(repr.plot.width=8, repr.plot.height=4)
# ## 1. Data import
path <- '../data/bike/bike-sharing-processed.csv'
data <- read_csv(path)
head(data)
tail(data)
nrow(data)
# ## 2. Convert to ts object
# +
# define date range as per the data
date_range <- seq(as.Date('2011-01-01'), as.Date('2012-12-31'), by='day')
# no need to put the date column as it will generate as ts index
dayofyear <- as.numeric(format(date_range[1], '%j'))
bike_data <- ts(data[, 2:14], start=c(2011, dayofyear), frequency = 365)
# -
head(bike_data)
autoplot(bike_data[, 12]) +
ggtitle('Bike count') +
ylab('count')
# ## 3. Data analysis
autoplot(diff(bike_data[, 12])) +
ggtitle('Differenced bike count') +
ylab('count')
# - The data seems to be I(1)
# ## 3. Train test split
# +
# train and test split
# last 2 month will be test set rest training set
# we will drop the explanatory variables for baseline models
# as they will not be need for forecasting
test_size <- as.numeric(61)
train_size <- nrow(bike_data)- test_size
train <- head(bike_data, train_size)
test <- tail(bike_data, test_size)
autoplot(train[, 12], series = 'train') +
autolayer(test[, 12], series = 'test') +
ggtitle('Rental bike count') +
ylab('count') +
xlab('Year') +
guides(colour=guide_legend(title="Data"))
# -
tail(train[, 12])
head(test[, 12])
head(train)
# +
# extract exogenous variables
exog_col <- colnames(train)[8:11]
# hurricane sandy to the exogenous list
# exog_col <- c(exog_col, colnames(train)[13])
train_exog <- train[, exog_col]
test_exog <- test[, exog_col]
head(train_exog)
# head(test_exog)
# -
# ## 4. Order evaluation using auto.arima()
model <- auto.arima(train[, 12], d = 1, D = 1, max.p = 2, max.q = 2, max.P = 2,
max.Q = 2, max.d = 1, max.D = 1, start.p = 0,
start.q = 0, start.P = 0, start.Q = 0)
summary(model)
# ## 5. Model fitting
# +
# use arima order determined in previous step
# fit <- Arima(train[, 12], order = c(1, 1, 1), seasonal = c(0, 1, 0),
# include.mean=FALSE, xreg=train_exog)
# saveRDS(fit, '../checkpoints/bike-sharing/R-models/sarimax-v1.rds')
# -
fit <- readRDS('../checkpoints/bike-sharing/R-models/sarimax-v1.rds')
train_pred <- fitted(fit)
test_pred <- forecast(fit, h=61, xreg=test_exog)$mean
autoplot(bike_data[, 12], series = 'actual data') +
autolayer(train_pred, series = 'train prediction') +
autolayer(test_pred, series = 'test prediction') +
xlab('Year') +
ylab('count') +
ggtitle('Forecasting using SARIMAX')
# ## 6. Model evaluation
# +
train_rmse <- sqrt(mse(train[, 12], train_pred))
train_mae <- mae(train[, 12], train_pred)
train_nrmse <- train_rmse/sd(train[, 12])
test_rmse <- sqrt(mse(test[, 12], test_pred))
test_mae <- mae(test[, 12], test_pred)
test_nrmse <- test_rmse/sd(test[, 12])
print(paste0('Training NRMSE :', round(train_nrmse, 3)))
print(paste0('Training MAE :', round(train_mae, 3)))
print(paste0('Test NRMSE :', round(test_nrmse, 3)))
print(paste0('Test MAE :', round(test_mae, 3)))
# -
autoplot(test[, 12], series = 'test data') +
autolayer(test_pred, series = 'test prediction') +
xlab('Year') +
ylab('count') +
ggtitle('Forecasting using SARIMAX on test data') +
theme(legend.position="bottom", legend.box = "horizontal")
# ## 7. Residual and co-efficient analysis
summary(fit)
checkresiduals(fit)
| 02.bike_count/11.bike-count-SARIMAX.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demonstate Functionality of the *mpc_nbody* module
# - Primarily written by <NAME>, with some additions from <NAME>
# ##### ---------------------------------------------------------------------------
# ##### *** MJP : Potential Silent Kernel Crash ... ***
# ##### At the time of writing there are some hard-coded links to ephemeris files in the REBOUNDX C-Code
# ##### As such, the python code(s) that call it will crash UNLESS you provide symbolic links in ANY/ALL run-directory
# ##### E.g.
# ##### >>> ln -s /Users/matthewjohnpayne/Envs/reboundx/examples/ephem_forces/linux_p1550p2650.430 .
# ##### >>> ln -s /Users/matthewjohnpayne/Envs/reboundx/examples/ephem_forces/sb431-n16s.bsp .
# ##### ---------------------------------------------------------------------------
#
# ##### Imports ...
# +
# Thrid party imports
import sys
import os
from filecmp import cmp
import numpy as np
import pytest
import importlib
from astroquery.jplhorizons import Horizons
# parent directory is */cheby_checker
HEAD_DIR = os.path.dirname(os.path.realpath(os.getcwd()))
sys.path.append(os.path.join(HEAD_DIR))
print(f' HEAD_DIR: {HEAD_DIR} ')
# import nbody-related code from main cheby_checker directory
from cheby_checker import mpc_nbody, parse_input
importlib.reload(mpc_nbody)
# -
# ### Import some development data for some numbered objects
# - This was downloaded from JPL horizons
DATA_DIR = os.path.join(HEAD_DIR, 'dev_data')
filenames = [os.path.join(DATA_DIR, file)
for file in ['30101.eq0_horizons', '30102.eq0_horizons']]
au_km = 149597870.700 # This is now a definition
vector1 = [-2.093834952466475E+00, 1.000913720009255E+00, 4.197984954533551E-01, -4.226738336365523E-03, -9.129140909705199E-03, -3.627121453928710E-03]
vector2 = [-3.143563543369602e+00, 2.689063646113277E+00, 3.554211184881579E+00, -5.610620819862405e-03, -4.232958051824352E-03, -1.638364029313663E-03]
Parsed1 = parse_input.ParseElements(filenames[0], 'eq')
Parsed2 = parse_input.ParseElements(filenames[1], 'eq')
# # Explicit Test Code Exists in mpc_nbody/tests
#
# - test_parse_input.py
# - test_run_nbody.py
#
# Run these using pytest:
# - cd ../tests
# - pytest test_parse_input.py
# - pytest test_run_nbody.py
# # Top-level: "mpc_nbody.NbodySim" - Parse, run, save, cheby.
# - Instantiation takes either no input (for manually feeding vectors later) or an input file in ele220 (not yet implemented) or OrbFit format, then calls the parser from parse_input.
# - Calling the class optionally takes a tstart, list of vectors, timestep, time range. If a filename was supplied at instantiation, the class can be called with no input to simply integrate that orbit from the epoch of the orbit (although one might want to change the default tstep and trange).
# - Both call a number of under-lying functionalities.
# +
# %%time
importlib.reload(mpc_nbody)
# First, let's initiate the class with an input file:
Sim = mpc_nbody.NbodySim(filenames[0], 'eq', save_parsed=True)
# Because save_parsed=True, the parsed orbit is saved to a holman_ic file:
print('Contents of file, "holman_ic" ... ')
# !cat holman_ic
# (this is optional; the holman_ic file does not get used.
# To demonstrate, let's delete it.)
# !rm holman_ic
# ---------------------------------------------------------------------------
#
# *** MJP : Potential Silent Kernel Crash ... ***
# At the time of writing there are some hard-coded links to ephemeris files in the REBOUNDX C-Code
# As such, the python code(s) that call it will crash UNLESS you provide symbolic links in ANY/ALL run-directory
# E.g.
# >>> ln -s /Users/matthewjohnpayne/Envs/reboundx/examples/ephem_forces/linux_p1550p2650.430 .
# >>> ln -s /Users/matthewjohnpayne/Envs/reboundx/examples/ephem_forces/sb431-n16s.bsp .
#
# ---------------------------------------------------------------------------
# Now run the integrator, by calling the object.
print('Running integration ... ')
Sim(tstep=20, trange=600, save_output=True)
# Because of the (again, optional) save_output=True, output was saved here:
# !head simulation_states.dat
# !wc simulation_states.dat
#This information is also all available inside the object now:
for item in Sim.__dict__ : print(item)
# -
# # Lower-level functionalities
# - The above "NbodySim" usage uses some underlying functionality to perform the parsing, integration and storage of output.
# - Here we demonstrate some of the underlying functionality.
# ##### mpc_nbody.NbodySim.\_\_init__
# - With no arguments to set up an empty object.
Sim = mpc_nbody.NbodySim()
Sim.__dict__
# ##### mpc_nbody.NbodySim.\_\_call__
# - Runs the N-body integrator. Can be done in a few different ways.
#Method 1, using a list containing the 6 elements of an object
importlib.reload(mpc_nbody)
Sim = mpc_nbody.NbodySim()
Sim(vectors = vector1,
tstart=2456117.5, tstep=20, trange=600)
_ = [print(k, np.shape(Sim.__dict__[k]), type(Sim.__dict__[k]))
for k in Sim.__dict__]
#Method 2, using a list containing the 6 elements of multiple objects
importlib.reload(mpc_nbody)
Sim = mpc_nbody.NbodySim()
Sim(vectors = vector1 + vector2,
tstart=2456117.5, tstep=20, trange=600)
_ = [print(k, np.shape(Sim.__dict__[k]), type(Sim.__dict__[k]))
for k in Sim.__dict__]
#Method 3, parse_input.ParseElements object
importlib.reload(mpc_nbody)
Sim = mpc_nbody.NbodySim()
Sim(vectors = Parsed1, tstart=Parsed1.time.tdb.jd, tstep=20, trange=600)
_ = [print(k, np.shape(Sim.__dict__[k]), type(Sim.__dict__[k]))
for k in Sim.__dict__]
#Method 4, a list of parse_input.ParseElements objects
importlib.reload(mpc_nbody)
Sim = mpc_nbody.NbodySim()
Sim(vectors = [Parsed1, Parsed2], tstart=Parsed1.time.tdb.jd, tstep=20, trange=600)
_ = [print(k, np.shape(Sim.__dict__[k]), type(Sim.__dict__[k]))
for k in Sim.__dict__]
#Method 5, have NbodySim do stuff automatically
#(only implemented for one object)
importlib.reload(mpc_nbody)
Sim = mpc_nbody.NbodySim(filenames[0], 'eq')
# This essentially stores the parsed elements within a pparticle atribute in Sim
Sim(tstep=20, trange=600)
_ = [print(k, np.shape(Sim.__dict__[k]), type(Sim.__dict__[k]))
for k in Sim.__dict__]
# ##### mpc_nbody.NbodySim.save_output()
# - Saves the output to a file (filename is optional argument).
importlib.reload(mpc_nbody)
Sim = mpc_nbody.NbodySim(filenames[0], 'eq')
Sim(tstep=20, trange=600)
Sim.save_output(output_file='notebook_save.dat')
# !head notebook_save.dat
# ##### mpc_nbody.run_nbody(input_vectors, tstart, tstep, trange)
# - The function that actually runs the N-body integrator. Called by NbodySim
importlib.reload(mpc_nbody)
(input_vectors, input_n_particles, output_times, output_vectors, output_n_times, output_n_particles
) = mpc_nbody.run_nbody(vector1 + vector2, tstart=2456117.5,
tstep=20, trange=600, geocentric=False)
np.shape(output_vectors)
# ##### mpc_nbody._fix_input(input)
# - Used to interpret what kind of input is given (6 element list, ParseElements object, list of ParseElements objects) and returns a standardized format (list containing 6 elements for each object).
# - Not intended for user usage, but demonstrated here anyway.
# Case 1: a single parse_input.ParseElements object
reparsed, nobj = mpc_nbody._fix_input(Parsed1)
for i in np.arange(nobj): print(f'object {i}', reparsed[0+i:6+i])
# Case 2: a list of parse_input.ParseElements objects
reparsed, nobj = mpc_nbody._fix_input([Parsed1, Parsed2])
for i in np.arange(nobj): print(f'object {i}', reparsed[0+i:6+i])
# Case 3: a list containing the 6-vector for an object
reparsed, nobj = mpc_nbody._fix_input(vector1)
for i in np.arange(nobj): print(f'object {i}', reparsed[0+i:6+i])
# Case 4: a list containing the 6-vectors of multiple objects as one long list.
# (this case is identical to the output)
reparsed, nobj = mpc_nbody._fix_input(vector1 + vector2)
for i in np.arange(nobj): print(f'object {i}', reparsed[0+i:6+i])
| notebooks/archaic/Demonstrate_Functionality_mpc_nbody.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started processing Kafka with Spark
#
# The first thing we'll need to do is tell Spark where to find the Kafka driver before we set Spark up. Currently, our notebook images are built against Spark 2.2. If you're using this with a different version of Spark, be sure to change `SPARK_VERSION` in the cell below before executing it.
import os
SPARK_VERSION="2.2.0"
os.environ["PYSPARK_SUBMIT_ARGS"] = "--packages org.apache.spark:spark-sql-kafka-0-10_2.11:%s pyspark-shell" % SPARK_VERSION
# Next up, we'll connect to Spark by establishing a `SparkSession`.
# +
import pyspark
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.master("local[2]") \
.appName("Social Firehose") \
.getOrCreate()
# -
# We're going to begin by loading the contents of a Kafka topic into a data frame. Because Spark data frames are _lazy_, or recomputed when accessed, this data frame will always have the most recent collection of messages in it.
df = spark \
.read \
.format("kafka") \
.option("kafka.bootstrap.servers", "kafka.kafka.svc:9092") \
.option("subscribe", "social-firehose") \
.load()
# We can see that this data frame always has the most recent collection of messages by running the `count()` action on it twice with a short delay in the middle. Note how many messages are generated in ten seconds:
import time
a = df.count()
time.sleep(10)
b = df.count()
(a, b)
# We can inspect the first few messages, but they'll be in a pretty raw format.
df.take(3)
# Now we'll import some functions and types from the Spark library so we can do something more useful with our data set.
from pyspark.sql.functions import explode
from pyspark.sql.functions import split
from pyspark.sql.functions import from_json
from pyspark.sql.functions import column
from pyspark.sql.types import StringType
from pyspark.sql.types import StructType
from pyspark.sql.types import StructField
# The first thing we'll do is extract the JSON payloads of the messages; we'll inspect the first ten as a sanity check.
values = df.select(df["value"].cast(StringType()).alias("value"))
values.show(10)
# The next thing we'll do is impose some structure on the messages by converting the serialized JSON objects into actual records:
#
# 1. First, we'll declare a `StructType` for the structure of our messages (three strings, named `text`, `user_id`, and `update_id`),
# 2. Next, we'll convert the JSON strings to structures using the `from_json` dataframe function, and
# 3. Finally, we'll `SELECT` the fields of the object so we have something that looks like a flat database tuple.
structure = StructType([StructField(fn, StringType(), True) for fn in "text user_id update_id".split()])
records = values.select(from_json(values["value"], structure).alias("json")) \
.select(column("json.update_id"), column("json.user_id").alias("user_id"), column("json.text"))
records.show(10)
# We can perform database-style aggregations on this data frame, like identifying the users responsible for the most status updates:
user_counts = records.groupBy("user_id").count().orderBy("count", ascending=False)
user_counts.show()
# If you run that query several times with a short delay in between, you may get different results since the data frame will reflect newly-arriving messages. Try it out!
#
# We can also count the number of users who have issued status updates (because of how we're generating the synthetic stream of updates, there is an upper bound on this number):
records.select("user_id").distinct().count()
# We can also identify the most prolix users. You probably have some social media connections who take advantage of every extra bit of character limit; a query like this will show you who they are!
from pyspark.sql.functions import length
user_loquacity = records.select(column("user_id"), length("text").alias("update_len")) \
.groupBy("user_id") \
.avg() \
.orderBy("avg(update_len)", ascending=False)
user_loquacity.show()
# We can also identify the most popular hashtags in users' updates. We'll start by turning each update into an array of words. Then we'll explode each array into multiple rows, so that each row has a separate, single element, i.e.
#
# ```
# 1, 2, "foo bar blah"
# ```
#
# would become
#
# ```
# 1, 2, [foo, bar, blah]
# ```
#
# which would become
#
# ```
# 1, 2, foo
# 1, 2, bar
# 1, 2, blah
# ```
#
# We'll then filter for hashtags (keeping only words starting with `#`) so we can find the most popular!
#
words = records.select(explode(split("text", " ")).alias("word"))
hashtags = words.filter(column("word").startswith("#"))
words.show()
hashtags.show()
hashtag_counts = hashtags.groupBy("word").count().orderBy("count", ascending=False)
hashtag_counts.show()
| notebooks/social-firehose.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook to calculate pi
# ## Relevant formulas
#
# - square area: $s = (2 r)^2$
# - circle area: $c = \pi r^2$
# - $c/s = (\pi r^2) / (4 r^2) = \pi / 4$
# - $\pi = 4 * c/s$
# ## Image to visualize the concept
#
# 
# +
# importing modules that we will need
import random
import matplotlib.pyplot as plt
# +
# initializing the number of "throws"
num_points = 5000
# +
# here we "throw darts" and count the number of hits
points = []
hits = 0
for _ in range(num_points):
x, y = random.random(), random.random()
if x*x + y*y < 1.0:
hits += 1
points.append((x, y, "orange"))
else:
points.append((x, y, "xkcd:kelly green"))
# +
# unzip points into 3 lists
x, y, colors = zip(*points)
# define figure dimensions
fig, ax = plt.subplots()
fig.set_size_inches(6.0, 6.0)
# plot results
ax.scatter(x, y, c=colors)
# +
# compute and print the estimate
fraction = hits / num_points
4 * fraction
# -
# +
# # !conda install -c r r-essentials
# # !conda install -y rpy2
# +
# # %load_ext rpy2.ipython
# +
# import pandas as pd
# df = pd.DataFrame({
# 'cups_of_coffee': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# 'productivity': [2, 5, 6, 8, 9, 8, 0, 1, 0, -1]
# })
# df.plot()
# +
# library(ggplot2)
# ggplot(df, aes(x=cups_of_coffee, y=productivity)) + geom_line()
| exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Ten en cuenta la siguiente curva de generalización, que muestra la pérdida del conjunto de entrenamiento y el conjunto de validación con respecto a la cantidad de iteraciones de entrenamiento.
#
# 
#
# **Figura 1. Pérdida en los conjuntos de entrenamiento y de validación.**
#
# En la Figura 1, se muestra un modelo en el que la pérdida de entrenamiento se reduce gradualmente, pero la pérdida de validación eventualmente aumenta. En otras palabras, esta curva de generalización muestra que el modelo se sobreajusta en función de los datos en el conjunto de entrenamiento. Si aplicamos nuestra Navaja de Ockham interna, tal vez podamos prevenir el sobreajuste mediante la penalización de modelos complejos, un principio denominado regularización.
#
# En otras palabras, en lugar de solo intentar reducir la pérdida (minimización del riesgo empírico):
#
# $$\begin{eqnarray}
# minimize(Loss(Data|Model))
# \end{eqnarray}$$
#
# ahora minimizaremos la pérdida más la complejidad, lo que se denomina minimización del riesgo estructural:
#
# $$\begin{eqnarray}
# minimize(Loss(Data|Model) + complexity(Model))
# \end{eqnarray}$$
#
# Nuestro algoritmo de optimización de entrenamiento ahora es una función de dos términos: el **término de pérdida**, que mide qué tan bien se ajusta el modelo a los datos, y el **término de regularización**, que mide la complejidad del modelo.
#
# El Curso intensivo de aprendizaje automático se centra en dos formas comunes (y, de cierto modo, relacionadas) de interpretar la complejidad del modelo:
#
# * la complejidad del modelo como una función de las ponderaciones (o pesos) de todos los atributos que contiene
# * la complejidad del modelo como una función de la cantidad total de atributos con ponderaciones que no sean cero (este enfoque se trata en un módulo más adelante)
#
# Si la complejidad del modelo es una función de ponderaciones, una ponderación de atributos con un valor absoluto alto es más complejo que una con un valor absoluto bajo.
#
# Podemos cuantificar la complejidad mediante la fórmula de la **regularización $L_2$**, que define el término de regularización como la suma de los cuadrados de todas las ponderaciones de atributos:
#
# $$\begin{eqnarray}
# L_2 regularization term = ||w||^2_2 = w^2_1 + w^2_2 + ... + w^2_n
# \end{eqnarray}$$
#
# En esta fórmula, las ponderaciones cerca de cero tienen poco efecto en la complejidad del modelo, mientras que las ponderaciones atípicas pueden tener un gran impacto.
#
# Por ejemplo, un modelo lineal con las siguientes ponderaciones:
#
# $$\begin{eqnarray}
# \{ w_1 = 0.2, w_2 = 0.5, w_3 = 5, w_4 = 1, w_5 = 0.25, w_6 = 0.75 \}
# \end{eqnarray}$$
#
# tiene un término de regularización $L_2$ de 26.915:
#
# $$\begin{eqnarray}
# w^2_1 + w^2_2 + w^2_3 + w^2_4 + w^2_5 + w^2_6
# \end{eqnarray}$$
# $$\begin{eqnarray}
# = 0.2^2 + 0.5^2 + 5^2 + 1^2 + 0.25^2 + 0.75^2
# \end{eqnarray}$$
# $$\begin{eqnarray}
# = 0.04 + 0.25 + 25 + 1 + 0.0625 + 0.5625
# \end{eqnarray}$$
# $$\begin{eqnarray}
# = 26.915
# \end{eqnarray}$$
#
# Pero $w_3$ (destacado en negrita arriba), con un valor al cuadrado de 25, conforma casi toda la complejidad. La suma de los cuadrados de las otras cinco ponderaciones agrega solo 1.915 al término de la regularización $L_2$.
| notebooks/02_Machine_Learning/Teoric/15_Regularizacion_para_lograr_simplicidad_RegularizacionL2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="RoPoEJMOlj1S" executionInfo={"status": "ok", "timestamp": 1647415433084, "user_tz": -540, "elapsed": 5409, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="3b701225-2f5d-484e-bac5-866a0bbf7a35"
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from sklearn.model_selection import train_test_split,cross_validate
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
print(tf.__version__)
df = pd.read_excel('/content/Test_dataset.xlsx')
df.set_index('Date', inplace=True)
print(df.columns)
# df.info()
# split_time = 420
# + colab={"base_uri": "https://localhost:8080/"} id="Rq5EcaUYmjkx" executionInfo={"status": "ok", "timestamp": 1647415439095, "user_tz": -540, "elapsed": 580, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="fc07a1f0-2710-4361-89ef-edeaa00d9499"
Y_colname = ['w_inc_UD']
# X_remove = ['datetime', 'DateTime', 'temp_group', 'casual', 'registered']
# X_colname = [x for x in raw_fe.columns if x not in Y_colname+X_remove]
X_colname = ['Target', 'MA5','1)철광석-중국', '4)철스크랩-터키수입가', '5)WTI', '5)BSI-해운지수', '2)중국-Flat재고', '3)중국-판재+롱재고', '4)중국-석탄내수',
'1)열연-미국', '1)열연-뭄바이', '1)열연-북유럽', '1)열연-한국', '1)GI-상해', '1)GI-미국']
# 예측값 : 다음주 평균가격
Dataset = df[X_colname+Y_colname]
# Dataset = df[Y_colname]
Dataset = Dataset.dropna(axis=0) # 데이터 없는거 지우기 (앞부분)
print(Dataset)
# + id="sDD0_VYXmuLv" executionInfo={"status": "ok", "timestamp": 1647415453223, "user_tz": -540, "elapsed": 665, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}}
# 추가 모델을 적용하고자 함
# 정규화 함수
def MinMaxScaler(data):
denom = np.max(data,0)-np.min(data,0)
nume = data-np.min(data,0)
return nume/denom
# 정규화 되돌리기 함수
def back_MinMax(data,value):
diff = np.max(data,0)-np.min(data,0)
back = value * diff + np.min(data,0)
return back
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="xNM2auhMm5hx" executionInfo={"status": "ok", "timestamp": 1647415523984, "user_tz": -540, "elapsed": 7, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="5436f887-fb82-4927-b748-061b37dcf7db"
xy= np.array(Dataset)
#print(xy[:,1])
# #%matplotlib notebook
plt.plot(xy[:,1]) # 해당주 평균가격
plt.show()
print(xy.shape)
# + id="mFuF6oVSnHNx" executionInfo={"status": "ok", "timestamp": 1647419382294, "user_tz": -540, "elapsed": 689, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}}
seqLength = 1 # window size
dataDim = 15 # feature 개수
hiddenDim = 10
outputDim = 1
lr = 0.01
iterations = 300
#Dataset 구분 : Train 비율 80%, Test 비율 20%
trainSize = int(len(xy)*0.8)
trainSet = xy[0:trainSize]
testSet = xy[trainSize-seqLength:]
trainSet = MinMaxScaler(trainSet)
testSet = MinMaxScaler(testSet)
# 20일간의 15가지 데이터(00, 00, 00, ,, ,,)를 받아와서
# 다음주 평균가격을 예측하는 모델로 구성
def buildDataSet(timeSeries, seqLength):
xdata = []
ydata = []
for i in range(0, len(timeSeries)-seqLength):
tx = timeSeries[i:i+seqLength,:-1]
ty = timeSeries[i+seqLength-1,[-1]]
xdata.append(tx)
ydata.append(ty)
return np.array(xdata), np.array(ydata)
trainX, trainY=buildDataSet(trainSet, seqLength)
testX, testY=buildDataSet(testSet, seqLength)
# + colab={"base_uri": "https://localhost:8080/"} id="zJ7zXmJ7o7eV" executionInfo={"status": "ok", "timestamp": 1647421057920, "user_tz": -540, "elapsed": 3, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="48b7ed5b-e8ce-48d7-e83c-b91462609dd3"
# trainX.shape
print(testSet)
# trainY.shape
# + colab={"base_uri": "https://localhost:8080/"} id="QyaH3iFatYx6" executionInfo={"status": "ok", "timestamp": 1647418860663, "user_tz": -540, "elapsed": 471, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="34e332fe-4cee-453d-cdf5-7be47d2c1763"
# (MLP는 단순 참조용으로 구성)
# 1) MLP 모델 구성
# First, let's define a RNN Cell, as a layer subclass.
from tensorflow import keras
from tensorflow.keras import layers
#model = keras.Sequential()
#model.add(layers.SimpleRNN(units=32, return_sequences=True,
# activation='relu',
# input_shape=[seqLength ,dataDim -1]))
#model.add(layers.SimpleRNN(16), activation='relu')
#model.add(layers.Dense(16), activation='relu')
#model.add(layers.Dense(1))
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(10, activation="relu", input_shape=[seqLength ,dataDim]),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid")
])
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="EHpne70eoyHi" executionInfo={"status": "ok", "timestamp": 1647419393588, "user_tz": -540, "elapsed": 570, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="17f8178b-a5a7-4e9b-be2c-3222b779bd65"
# 2) RNN 모델 구성
# First, let's define a RNN Cell, as a layer subclass.
from tensorflow import keras
from tensorflow.keras import layers
#model = keras.Sequential()
#model.add(layers.SimpleRNN(units=32, return_sequences=True,
# activation='relu',
# input_shape=[seqLength ,dataDim -1]))
#model.add(layers.SimpleRNN(16), activation='relu')
#model.add(layers.Dense(16), activation='relu')
#model.add(layers.Dense(1))
model = tf.keras.models.Sequential([
tf.keras.layers.SimpleRNN(10, activation="relu", input_shape=[seqLength ,dataDim]),
tf.keras.layers.Dense(10, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid")
])
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="qnVdHt11qOjR" executionInfo={"status": "ok", "timestamp": 1647419422490, "user_tz": -540, "elapsed": 21055, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="630443a1-9fde-4837-cb2f-130b7c6212bd"
# 모델 학습과정 설정
# model.compile(loss='mse', optimizer='adam', metrics=['mae'])
from tensorflow.keras.optimizers import RMSprop
model.compile(optimizer=RMSprop(learning_rate=0.001),
loss='binary_crossentropy',
metrics = ['accuracy'])
# 모델 트레이닝
history = model.fit(trainX, trainY, epochs=300, batch_size=20)
# history = model.fit(
# train_generator,
# steps_per_epoch=100,
# epochs=15,
# validation_data=validation_generator,
# validation_steps=50,
# verbose=2
# )
# + colab={"base_uri": "https://localhost:8080/"} id="FmcCbRVvq5jR" executionInfo={"status": "ok", "timestamp": 1647419424132, "user_tz": -540, "elapsed": 1648, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="727d5982-f5b5-4fcd-b6f7-0600369ffb06"
# 모델에 대한 정확도 평가
loss, accuracy = model.evaluate(trainX, trainY, verbose = 1)
accuracy = accuracy*100
print(" - Train 정확도 :", '%0.1f' % accuracy, "%\n")
loss, accuracy = model.evaluate(testX, testY, verbose = 1)
accuracy = accuracy*100
print(" - Test 정확도 :", '%0.1f' % accuracy, "%\n")
# + colab={"base_uri": "https://localhost:8080/", "height": 338} id="KJh1_cVKrneY" executionInfo={"status": "ok", "timestamp": 1647419424134, "user_tz": -540, "elapsed": 18, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="331583e9-4b21-4928-c6a4-98a6e8291958"
plt.figure(figsize = (20,5))
plt.plot(history.history['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="706K-BYgsHiD" executionInfo={"status": "ok", "timestamp": 1647419426771, "user_tz": -540, "elapsed": 609, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="ad1ee3f3-9c05-4797-8a00-88e5b7b87d23"
y_hat = model.predict(testX)
# print(y_hat)
y_hat = np.where(y_hat < 0.5, 0, 1)
print(y_hat.shape)
print(testY.shape)
# y_hat_flt = y_hat.flatten()
# testY_flt = testY.flatten()
# print(y_hat_flt)
# print(testY_flt)
# + colab={"base_uri": "https://localhost:8080/"} id="NhGyCEZNs2pO" executionInfo={"status": "ok", "timestamp": 1647419444950, "user_tz": -540, "elapsed": 1364, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="d1717910-c0ec-483c-d540-e4d33eb0d114"
score = ( np.mean(testY == y_hat) ) *100
# score = ( np.mean(testY_flt == y_hat_flt) ) *100
# print(score)
print("\n< 분류에 대한 ML > - RNN 기본모형 적용\n")
print(" * 테스트 기간\n")
print(" - 정확도(%) :", '%0.1f' % score, "%\n")
# + colab={"base_uri": "https://localhost:8080/", "height": 334} id="FaTHCMXryytf" executionInfo={"status": "ok", "timestamp": 1647419450814, "user_tz": -540, "elapsed": 811, "user": {"displayName": "\uc815\uc7ac\uc724", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjTaHBwgCHRr3NV8m730RIgW8ps1xdLdCrooNGNwA=s64", "userId": "17069826167537318317"}} outputId="94dab99e-cd4b-42dc-e434-f6cc9ddcb23b"
plt.figure(figsize = (20,5))
# ax1 = sns.distplot(test_actual, hist=False, label="Y")
# ax2 = sns.distplot(test_predict, hist=False, label="Y_hat")
# print("\n2) Test data - 정확성 체크")
plt.plot(testY) # 실제 가격
plt.plot(y_hat) # 예측 가격
plt.xlabel('Date')
plt.ylabel('Price up/down')
plt.legend(labels=["Y", "Y_hat"], loc = 'best')
plt.show()
# + id="Ah3pMVsYzwVj"
| 2022-03-22-Steel_CLS_ML_1step_RNN.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Matlab
% language: matlab
% name: matlab_kernel
% ---
% # Pseudospectra of the first derivative
% Illustration of the spectra and pseudospectra of the first derivative operator and the Chebyshev differentiation matrix on the interval $[-1,1]$, with no boundary conditions and with a boundary condition.
%
% Running this notebook requires Jupyter notebooks with the [matlab_kernel](https://pypi.python.org/pypi/matlab_kernel) installed, Matlab, and the Matlab package [chebfun](http://www.chebfun.org/).
%
% Developed by <NAME> for a course on Approximation Theory and Spectral Methods at the University of Washington. See <http://faculty.washington.edu/rjl/classes/am570a2015/codes.html> for more Jupyter Notebook examples.
%
path(path,'/Users/rjl/git/chebfun') # modify this for your local system
% ## No boundary conditions
%
% The first derivative operator $\partial_x$ on $[-1,1]$ with no boundary condition imposed has as its spectrum the entire complex plane, since for any $z$ there is an eigenfunction $e^{zx}$ with eigenvalue $z$.
% Now consider the square Chebyshev differentiation matrix $D$ with no boundary conditions imposed. This matrix maps values of $u(x_j)$ at $N$ Chebyshev points to the approximation to the derivative $u_x(x_j)$ at these same points. The values $U_0$ and $U_N$ can be arbitrary.
%
% Recall that if we apply $D^2$ to a vector we get approximations to the second derivative that correspond to $p''(x_j)$, where $p$ is the polynomial of degree $N$ that interpolates the $U_j$ values. Similarly if we apply $D^{N+1}$ we get values of the $(N+1)$st derivative of the polynomial, but $p^{(N+1)}(x) \equiv 0$, so $D^{N+1}$ is the zero matrix. Hence $D$ is nilpotent and all eigenvalues are equal to 0.
%
% Since $D$ is highly non-normal, if we compute the eigenvalues numerically we get nonzero values because of rounding errors...
% ## Set up the matrix and investigate eigenvalues:
% +
N = 24;
Dop = chebop(@(u) diff(u));
D = matrix(Dop,N, 'oldschool');
[V,Lam] = eig(D);
Lam = diag(Lam);
Lmax = max(abs(Lam));
plot(real(Lam),imag(Lam),'r.')
axis([-Lmax,Lmax,-Lmax,Lmax])
axis 'square'
% -
% We can compute the pseudo-spectrum $\sigma_\epsilon(D)$ by first computing the minimum singular value of $(zI-D)$ on a fine grid of points and then plotting the $\epsilon$ contour of this function...
xylim = 50;
x1 = linspace(-xylim,xylim,200);
y1 = linspace(-xylim,xylim,200);
[xx,yy] = meshgrid(x1,y1);
zz = xx + 1i*yy;
I = eye(size(D,1));
sigmin = zeros(size(zz));
for j=1:length(x1)
for i=1:length(y1)
sigmin(i,j) = min(svd(zz(i,j)*I - D));
end
end
plot(real(Lam),imag(Lam),'r.')
hold on
contour(xx,yy,sigmin,[1e-2,1e-3,1e-4],'b')
contour(xx,yy,sigmin,[1e-5,1e-6,1e-7],'k')
contour(xx,yy,sigmin,[1e-8,1e-10,1e-12, 1e-14],'g')
axis square
% Note that the computed eigenvalues lie within the $\epsilon = 10^{-14}$ pseduo-spectrum.
% ### Finer grid
%
% If we make the grid finer we get a better approximation to $\partial_x$. The larger $D$ matrix is still nilpotent, but the pseudospectra move out to cover more of the complex plane:
% +
N = 48;
Dop = chebop(@(u) diff(u));
D = matrix(Dop,N, 'oldschool');
[V,Lam] = eig(D);
Lam = diag(Lam);
xylim = 50;
x1 = linspace(-xylim,xylim,200);
y1 = linspace(-xylim,xylim,200);
[xx,yy] = meshgrid(x1,y1);
zz = xx + 1i*yy;
I = eye(size(D,1));
sigmin = zeros(size(zz));
for j=1:length(x1)
for i=1:length(y1)
sigmin(i,j) = min(svd(zz(i,j)*I - D));
end
end
plot(real(Lam),imag(Lam),'r.')
hold on
contour(xx,yy,sigmin,[1e-2,1e-3,1e-4],'b')
contour(xx,yy,sigmin,[1e-5,1e-6,1e-7],'k')
contour(xx,yy,sigmin,[1e-8,1e-10,1e-12],'g')
axis square
% -
% ### Adding a boundary condition
%
% Now consider the differential operator $\partial_x$ on $[-1,1]$ with the boundary condition $u(1) = 0$. This is the operator we will want to discretize to solve the advection equation $u_t = u_x$, whose solution advects to the left.
%
% We obtain the Chebyshev approximation by computing the square $N\times N$ matrix as above but then dropping the last row and column. Dropping the last column corresponds to assuming $U_N=0$.
% +
N = 24;
Dop = chebop(@(u) diff(u));
D = matrix(Dop,N, 'oldschool');
D = D(1:N-1, 1:N-1);
[V,Lam] = eig(D);
Lam = diag(Lam);
Lmax = max(abs(Lam))
% -
xylim = 50;
x1 = linspace(-xylim,xylim,200);
y1 = linspace(-xylim,xylim,200);
[xx,yy] = meshgrid(x1,y1);
zz = xx + 1i*yy;
I = eye(size(D,1));
sigmin = zeros(size(zz));
for j=1:length(x1)
for i=1:length(y1)
sigmin(i,j) = min(svd(zz(i,j)*I - D));
end
end
plot(real(Lam),imag(Lam),'r.')
hold on
contour(xx,yy,sigmin,[1e-2,1e-3,1e-4],'b')
contour(xx,yy,sigmin,[1e-5,1e-6,1e-7],'k')
contour(xx,yy,sigmin,[1e-8,1e-10,1e-12],'g')
axis square
% ### Finer grid:
N = 32;
Dop = chebop(@(u) diff(u));
D = matrix(Dop,N, 'oldschool');
D = D(1:N-1, 1:N-1);
[V,Lam] = eig(D);
Lam = diag(Lam);
Lmax = max(abs(Lam))
xylim = 50;
x1 = linspace(-xylim,xylim,200);
y1 = linspace(-xylim,xylim,200);
[xx,yy] = meshgrid(x1,y1);
zz = xx + 1i*yy;
I = eye(size(D,1));
sigmin = zeros(size(zz));
for j=1:length(x1)
for i=1:length(y1)
sigmin(i,j) = min(svd(zz(i,j)*I - D));
end
end
plot(real(Lam),imag(Lam),'r.')
hold on
contour(xx,yy,sigmin,[1e-2,1e-3,1e-4],'b')
contour(xx,yy,sigmin,[1e-5,1e-6,1e-7],'k')
contour(xx,yy,sigmin,[1e-8,1e-10,1e-12],'g')
axis([-40,0,-50,50])
axis square
% Note that in this case there are additional eigenvalues not show with magnitude $|\lambda| \approx 86$, but that these eigenvalues, like the ones shown that are away from the central curve, are well-conditioned and have small pseudospectral balls around them.
%
% The main things to notice in these figures are:
% - The eigenvalues and pseudospectra lie in the left half plane,
% - The pseudospectra include vertical lines near the origin. As the grid is refined, a larger region near the origin is covered by these vertical contours.
%
% The latter obervation suggests that the pseudospectrum of the differential operator we are approximating, $\partial_x$ on $[-1,1]$ with $u(1)=0$, might have this behavior everywhere in the left half plane. Indeed it does.
%
% Note that this differential operator has no eigenvalues. The functions $e^{zx}$ are no longer eigenfunctions because they do not satisfy the boundary condition. However, if $Re(z) < 0$ then these decay as $x$ increases and if they decay fast enough then $u(1)$ is nearly 0 relative to $u(-1)$. This suggests with $Re(z)$ very negative these are pseudo-eigenfunctions for very small values of $\epsilon$. Since the decay rate depends only on the real part, this further suggests that the pseudospectra should be regions of the left halfplane bounded by vertical lines. See Chapters 5 and 30 of Trefethen and Embree's *Spectral and Pseudospectra* for more details.
| sphinx/_static/Dx_Pseudospectra.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # Detecting Pneumonia from X-ray images using Deep Java Library
// *Disclaimer: this blog post is intended for educational purposes only. The application was developed using experimental code. The result should not be used for any medical diagnoses of pneumonia. This content has not been reviewed or approved by any scientists or medical professionals.*
//
// ## Introduction
// In this example, we demonstrate how deep learning (DL) can be used to detect pneumonia from chest X-ray images. This work is inspired by the [Chest X-ray Images Challenge](https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia) on Kaggle and a related [paper](https://www.cell.com/cell/fulltext/S0092-8674\(18\)30154-5). In this notebook, we illustrates how artificial intelligence can assist clinical decision making with focus on enterprise deployment. This work leverages a model trained using Keras and TensorFlow with [this Kaggle kernel](https://www.kaggle.com/aakashnain/beating-everything-with-depthwise-convolution). In this blog post, we will focus on generating predictions with this model using [Deep Java Library](https://djl.ai/) (DJL), an open source library to build and deploy DL in Java.
// ## Preparation
//
// This tutorial requires the installation of Java Kernel. To install the Java Kernel, see the [documentation](https://docs.djl.ai/jupyter/index.html).
//
// These are the dependencies we will use:
// +
// // %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
// %maven ai.djl:api:0.8.0
// %maven ai.djl.tensorflow:tensorflow-api:0.8.0
// %maven ai.djl.tensorflow:tensorflow-engine:0.8.0
// %maven ai.djl.tensorflow:tensorflow-model-zoo:0.8.0
// %maven org.bytedeco:javacpp:1.5.4
// %maven org.slf4j:slf4j-simple:1.7.26
// See https://github.com/awslabs/djl/blob/master/tensorflow/tensorflow-engine/README.md
// for more TensorFlow library selection options
// %maven ai.djl.tensorflow:tensorflow-native-auto:2.3.1
// -
// %%loadFromPOM
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
<version>3.8.0</version>
</dependency>
// ### Import java packages
import ai.djl.inference.*;
import ai.djl.modality.*;
import ai.djl.modality.cv.*;
import ai.djl.modality.cv.util.*;
import ai.djl.ndarray.*;
import ai.djl.repository.zoo.*;
import ai.djl.translate.*;
import ai.djl.training.util.*;
import ai.djl.util.*;
import java.net.*;
import java.nio.file.*;
import java.util.*;
// ### set the model URL
var modelUrl = "https://resources.djl.ai/demo/pneumonia-detection-model/saved_model.zip";
// ### Dive deep into Translator
//
// To successfully run inference, we need to define some preprocessing and post processing logic to achieve the best
// prediction result and understandable output.
class MyTranslator implements Translator<Image, Classifications> {
private static final List<String> CLASSES = Arrays.asList("Normal", "Pneumonia");
@Override
public NDList processInput(TranslatorContext ctx, Image input) {
NDManager manager = ctx.getNDManager();
NDArray array = input.toNDArray(manager, Image.Flag.COLOR);
array = NDImageUtils.resize(array, 224).div(255.0f);
return new NDList(array);
}
@Override
public Classifications processOutput(TranslatorContext ctx, NDList list) {
NDArray probabilities = list.singletonOrThrow();
return new Classifications(CLASSES, probabilities);
}
@Override
public Batchifier getBatchifier() {
return Batchifier.STACK;
}
}
// As you can see above, the translator resizes the image to 224x224 and normalizes the image by dividing by 255 before feeding it into the model. When doing inference, you need to follow the same pre-processing procedure as was used during training. In this case, we need to match the Keras training code. After running prediction, the model outputs probabilities of each class as an [NDArray](https://javadoc.io/doc/ai.djl/api/latest/index.html). We need to tell the predictor to translate it back to classes, namely “Normal” or "Pneumonia".
//
// Until this point, all preparation work is done, we can start working on the prediction logic.
// ## Predict using DJL
//
// ### Load the image
// We are going to load an CT scanned image of an infected lung from internet
var imagePath = "https://resources.djl.ai/images/chest_xray.jpg";
var image = ImageFactory.getInstance().fromUrl(imagePath);
image.getWrappedImage();
// ### Load your model
// Next, we will download the model from `modelUrl`. This will download the model into the DJL cache location
Criteria<Image, Classifications> criteria =
Criteria.builder()
.setTypes(Image.class, Classifications.class)
.optModelUrls(modelUrl)
.optTranslator(new MyTranslator())
.optProgress(new ProgressBar())
.build();
ZooModel model = ModelZoo.loadModel(criteria);
// ### Run inference
// Lastly, we will need to create a predictor using our model and translator. Once we have a predictor, we simply need to call the predict method on our test image.
// +
Predictor<Image, Classifications> predictor = model.newPredictor();
Classifications classifications = predictor.predict(image);
classifications
// -
| jupyter/tensorflow/pneumonia_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Singly Linked List
class Node(object):
def __init__(self,value):
self.value = value
self.next_node = None
a = Node(1)
b = Node(2)
c = Node(3)
a.next_node = b
b.next_node = c
a.value
a.next_node
a.next_node.value
# # Doubly Linked List
class DoublyLinkedListNode(object):
def __init__(self, value):
self.value = value
self.next_node = None
self.prev_node = None
d = DoublyLinkedListNode(4)
e = DoublyLinkedListNode(5)
f = DoublyLinkedListNode(6)
d.next_node = e
e.prev_node = d
e.next_node = f
f.prev_node = e
# # Linked List cycle check: Circular Linked List
def cycle_check(node):
marker1 = node
marker2 = node
while marker2.next_node!= None and marker2 != None:
marker1 = marker1.next_node
marker2 = marker2.next_node.next_node
if marker2 == marker1:
return True
return False
class Node(object):
def __init__(self,value):
self.value = value
self.next_node = None
# !pip install nose
# +
'''
Test Cell
'''
from nose.tools import assert_equal
#Circular List
a = Node(1)
b = Node(2)
c = Node(3)
a.next_node = b
b.next_node = c
c.next_node = a
#Non Circular List
x = Node(4)
y = Node(5)
z = Node(6)
x.next_node = y
y.next_node = z
# +
class TestCycleCheck(object):
def test_sol(self,sol):
assert_equal(sol(a),True)
assert_equal(sol(x),False)
print('All Test Cases passed')
#Run Tests
test_cycle_check = TestCycleCheck()
test_cycle_check.test_sol(cycle_check)
# -
# # Linked List Reversal
# +
class Node(object):
def __init__(self,value):
self.value = value
self.next_node = None
# -
def reverse(head_node):
current_node = head_node
previous_node = None
next_node = None
while current_node:
next_node = current_node.next_node
current_node.next_node = previous_node
previous_node = current_node
current_node = next_node
return previous_node
# +
#Create a list of 4 nodes
a = Node(1)
b = Node(2)
c = Node(3)
d = Node(4)
a.next_node = b
b.next_node = c
c.next_node = d
# -
print (a.next_node.value)
print (b.next_node.value)
print (c.next_node.value)
try:
print (d.next_node.value)
except:
print('Value is None')
# +
#Reverse the list
# -
reverse(a)
print (d.next_node.value)
print (c.next_node.value)
print (b.next_node.value)
| Linked-List.ipynb |