markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
Load the duplicate questions scoring app's URL.
scoring_url = aks_service.scoring_uri api_key = aks_service.get_keys()[0]
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
A constructor function for ID-text contents. Constructs buttons and text areas for each text ID and text passage.* Each buttons's description is set to a text's ID, and its click action is set to the handler.* Each text area's content is set to a text.* A dictionary is created to map IDs to text areas.
def buttons_and_texts( data, id, answerid, text, handle_click, layout=widgets.Layout(width="100%"), n=15 ): """Construct buttons, text areas, and a mapping from IDs to text areas.""" items = [] text_map = {} for i in range(min(n, len(data))): button = widgets.Button(description=data.iloc[i][id]) button.answerid = data.iloc[i][answerid] if answerid in data else None button.open = False button.on_click(handle_click) items.append(button) text_area = widgets.Textarea( data.iloc[i][text], placeholder=data.iloc[i][id], layout=layout ) items.append(text_area) text_map[data.iloc[i][id]] = text_area return items, text_map
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
A constructor function for the duplicates and questions explorer widget. This builds a box containing duplicates and question tabs, each in turn containing boxes that contain the buttons and text areas.
def duplicates_questions_widget( duplicates, questions, layout=widgets.Layout(width="100%") ): """Construct a duplicates and questions exploration widget.""" # Construct the duplicates Tab of buttons and text areas. duplicates_items, duplicates_map = buttons_and_texts( duplicates, duplicates_id, duplicates_answerid, duplicates_text, duplicates_click, n=duplicates.shape[0], ) duplicates_tab = widgets.Tab( [widgets.VBox(duplicates_items, layout=layout)], layout=widgets.Layout(width="100%", height="500px", overflow_y="auto"), ) duplicates_tab.set_title(0, duplicates_title) # Construct the questions Tab of buttons and text areas. questions_items, questions_map = buttons_and_texts( questions, questions_id, questions_answerid, questions_text, questions_click, n=questions.shape[0], ) questions_tab = widgets.Tab( [widgets.VBox(questions_items, layout=layout)], layout=widgets.Layout(width="100%", height="500px", overflow_y="auto"), ) questions_tab.set_title(0, questions_title) # Put both tabs in an HBox. duplicates_questions = widgets.HBox([duplicates_tab, questions_tab], layout=layout) return duplicates_map, questions_map, duplicates_questions
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
A handler function for a question passage button press. If the passage's text window is open, it is collapsed. Otherwise, it is opened.
def questions_click(button): """Respond to a click on a question button.""" global questions_map if button.open: questions_map[button.description].rows = None button.open = False else: questions_map[button.description].rows = 10 button.open = True
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
A handler function for a duplicate obligation button press. If the obligation is not selected, select it and update the questions tab with its top 15 question passages ordered by match score. Otherwise, if the duplicate's text window is open, it is collapsed, else it is opened.
def duplicates_click(button): """Respond to a click on a duplicate button.""" global duplicates_map if select_duplicate(button): duplicates_map[button.description].rows = 10 button.open = True else: if button.open: duplicates_map[button.description].rows = None button.open = False else: duplicates_map[button.description].rows = 10 button.open = True def select_duplicate(button): """Update the displayed questions to correspond to the button's duplicate selections. Returns whether or not the selected duplicate changed. """ global selected_button, questions_map, duplicates_questions if "selected_button" not in globals() or button != selected_button: if "selected_button" in globals(): selected_button.style.button_color = None selected_button.style.font_weight = "" selected_button = button selected_button.style.button_color = "yellow" selected_button.style.font_weight = "bold" duplicates_text = duplicates_map[selected_button.description].value questions_scores = score_text(duplicates_text) ordered_questions = questions.loc[questions_scores[questions_id]] questions_items, questions_map = buttons_and_texts( ordered_questions, questions_id, questions_answerid, questions_text, questions_click, n=questions_display, ) if questions_button_color is True and selected_button.answerid is not None: set_button_color(questions_items[::2], selected_button.answerid) if questions_button_score is True: questions_items = [ item for button, text_area in zip(*[iter(questions_items)] * 2) for item in (add_button_prob(button, questions_scores), text_area) ] duplicates_questions.children[1].children[0].children = questions_items duplicates_questions.children[1].set_title(0, selected_button.description) return True else: return False def add_button_prob(button, questions_scores): """Return an HBox containing button and its probability.""" id = button.description prob = widgets.Label( score_label + ": " + str( int( math.ceil(score_scale * questions_scores.loc[id][questions_probability]) ) ) ) return widgets.HBox([button, prob]) def set_button_color(button, answerid): """Set each button's color according to its label.""" for i in range(len(button)): button[i].style.button_color = ( "lightgreen" if button[i].answerid == answerid else None )
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
Functions for interacting with the web service.
def score_text(text): """Return a data frame with the original question scores for the text.""" headers = { "content-type": "application/json", "Authorization": ("Bearer " + api_key), } # jsontext = json.dumps({'input':'{0}'.format(text)}) jsontext = text_to_json(text) result = requests.post(scoring_url, data=jsontext, headers=headers) # scores = result.json()['result'][0] scores = eval(result.json()) scores_df = pd.DataFrame( scores, columns=[questions_id, questions_answerid, questions_probability] ) scores_df[questions_id] = scores_df[questions_id].astype(str) scores_df[questions_answerid] = scores_df[questions_answerid].astype(str) scores_df = scores_df.set_index(questions_id, drop=False) return scores_df
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
Control the appearance of cell output boxes.
%%html <style> .output_wrapper, .output { height:auto !important; max-height:1000px; /* your desired max-height here */ } .output_scroll { box-shadow:none !important; webkit-box-shadow:none !important; } </style>
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
Load dataLoad the pre-formatted text of questions.
questions_title = 'Questions' questions_id = 'Id' questions_answerid = 'AnswerId' questions_text = 'Text' questions_probability = 'Probability' questions_path = './data_folder/questions.tsv' questions = read_questions(questions_path, questions_id, questions_answerid)
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
Load the pre-formatted text of duplicates.
duplicates_title = 'Duplicates' duplicates_id = 'Id' duplicates_answerid = 'AnswerId' duplicates_text = 'Text' duplicates_path = './data_folder/dupes_test.tsv' duplicates = read_questions(duplicates_path, duplicates_id, duplicates_answerid)
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
Explore original questions matched up with duplicate questionsDefine other variables and settings used in creating the interface.
questions_display = 15 questions_button_color = True questions_button_score = True score_label = 'Score' score_scale = 100
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
This builds the exploration widget as a box containing duplicates and question tabs, each in turn containing boxes that have for each ID-text pair a button and a text area.
duplicates_map, questions_map, duplicates_questions = duplicates_questions_widget(duplicates, questions) duplicates_questions
_____no_output_____
MIT
architectures/Python-ML-RealTimeServing/{{cookiecutter.project_name}}/aks/07_RealTimeScoring.ipynb
dciborow/AIArchitecturesAndPractices
Registration Initialization: We Have to Start SomewhereInitialization is a critical aspect of most registration algorithms, given that most algorithms are formulated as an iterative optimization problem.In many cases we perform initialization in an automatic manner by making assumptions with regard to the contents of the image and the imaging protocol. For instance, if we expect that images were acquired with the patient in a known orientation we can align the geometric centers of the two volumes or the center of mass of the image contents if the anatomy is not centered in the image (this is what we previously did in [this example](60_RegistrationIntroduction.ipynb)).When the orientation is not known, or is known but incorrect, this approach will not yield a reasonable initial estimate for the registration.When working with clinical images, the DICOM tags define the orientation and position of the anatomy in the volume. The tags of interest are: (0020|0032) Image Position (Patient) : coordinates of the the first transmitted voxel. (0020|0037) Image Orientation (Patient): directions of first row and column in 3D space. (0018|5100) Patient Position: Patient placement on the table Head First Prone (HFP) Head First Supine (HFS) Head First Decibitus Right (HFDR) Head First Decibitus Left (HFDL) Feet First Prone (FFP) Feet First Supine (FFS) Feet First Decibitus Right (FFDR) Feet First Decibitus Left (FFDL) The patient position is manually entered by the CT/MR operator and thus can be erroneous (HFP instead of FFP will result in a $180^o$ orientation error).A heuristic, yet effective, solution is to use a sampling strategy of the parameter space. Note that this strategy is primarily useful in low dimensional parameter spaces (rigid or possibly affine transformations). In this notebook we illustrate how to sample the parameter space in a fixed pattern. We then initialize the registration with the parameters that correspond to the best similarity metric value obtained by our sampling.
import SimpleITK as sitk import os import numpy as np from ipywidgets import interact, fixed from downloaddata import fetch_data as fdata import registration_callbacks as rc import registration_utilities as ru # Always write output to a separate directory, we don't want to pollute the source directory. OUTPUT_DIR = 'Output' %matplotlib inline # This is the registration configuration which we use in all cases. The only parameter that we vary # is the initial_transform. def multires_registration(fixed_image, moving_image, initial_transform): registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) registration_method.SetOptimizerAsGradientDescent(learningRate=1.0, numberOfIterations=100, estimateLearningRate=registration_method.Once) registration_method.SetOptimizerScalesFromPhysicalShift() registration_method.SetInitialTransform(initial_transform) registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1]) registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas = [2,1,0]) registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn() registration_method.AddCommand(sitk.sitkStartEvent, rc.metric_start_plot) registration_method.AddCommand(sitk.sitkEndEvent, rc.metric_end_plot) registration_method.AddCommand(sitk.sitkMultiResolutionIterationEvent, rc.metric_update_multires_iterations) registration_method.AddCommand(sitk.sitkIterationEvent, lambda: rc.metric_plot_values(registration_method)) final_transform = registration_method.Execute(fixed_image, moving_image) print('Final metric value: {0}'.format(registration_method.GetMetricValue())) print('Optimizer\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription())) return final_transform
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Loading Data
data_directory = os.path.dirname(fdata("CIRS057A_MR_CT_DICOM/readme.txt")) fixed_series_ID = "1.2.840.113619.2.290.3.3233817346.783.1399004564.515" moving_series_ID = "1.3.12.2.1107.5.2.18.41548.30000014030519285935000000933" reader = sitk.ImageSeriesReader() fixed_image = sitk.ReadImage(reader.GetGDCMSeriesFileNames(data_directory, fixed_series_ID), sitk.sitkFloat32) moving_image = sitk.ReadImage(reader.GetGDCMSeriesFileNames(data_directory, moving_series_ID), sitk.sitkFloat32) # To provide a reasonable display we need to window/level the images. By default we could have used the intensity # ranges found in the images [SimpleITK's StatisticsImageFilter], but these are not the best values for viewing. # Using an external viewer we identified the following settings. fixed_intensity_range = (-1183,544) moving_intensity_range = (0,355) interact(lambda image1_z, image2_z, image1, image2,:ru.display_scalar_images(image1_z, image2_z, image1, image2, fixed_intensity_range, moving_intensity_range, 'fixed image', 'moving image'), image1_z=(0,fixed_image.GetSize()[2]-1), image2_z=(0,moving_image.GetSize()[2]-1), image1 = fixed(fixed_image), image2=fixed(moving_image));
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Arbitrarily rotate the moving image.
rotation_x = 0.0 rotation_z = 0.0 def modify_rotation(rx_in_degrees, rz_in_degrees): global rotation_x, rotation_z rotation_x = np.radians(rx_in_degrees) rotation_z = np.radians(rz_in_degrees) interact(modify_rotation, rx_in_degrees=(0.0,180.0,5.0), rz_in_degrees=(-90.0,180.0,5.0)); resample = sitk.ResampleImageFilter() resample.SetReferenceImage(moving_image) resample.SetInterpolator(sitk.sitkLinear) # Rotate around the physical center of the image. rotation_center = moving_image.TransformContinuousIndexToPhysicalPoint([(index-1)/2.0 for index in moving_image.GetSize()]) transform = sitk.Euler3DTransform(rotation_center, rotation_x, 0, rotation_z, (0,0,0)) resample.SetTransform(transform) modified_moving_image = resample.Execute(moving_image) interact(lambda image1_z, image2_z, image1, image2,:ru.display_scalar_images(image1_z, image2_z, image1, image2, moving_intensity_range, moving_intensity_range, 'original', 'rotated'), image1_z=(0,moving_image.GetSize()[2]-1), image2_z=(0,modified_moving_image.GetSize()[2]-1), image1 = fixed(moving_image), image2=fixed(modified_moving_image));
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Register using standard initialization (assumes orientation is similar)
initial_transform = sitk.CenteredTransformInitializer(fixed_image, modified_moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY) final_transform = multires_registration(fixed_image, modified_moving_image, initial_transform)
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Visually evaluate our results:
moving_resampled = sitk.Resample(modified_moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue()) interact(ru.display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), image1 = fixed(sitk.IntensityWindowing(fixed_image, fixed_intensity_range[0], fixed_intensity_range[1])), image2=fixed(sitk.IntensityWindowing(moving_resampled, moving_intensity_range[0], moving_intensity_range[1])));
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Register using heuristic initialization approach (using multiple orientations)As we want to account for significant orientation differences due to erroneous patient position (HFS...) we evaluate the similarity measure at locations corresponding to the various orientation differences. This can be done in two ways which will be illustrated below:Use the ImageRegistrationMethod.MetricEvaluate() method.Use the Exhaustive optimizer.The former approach is more computationally intensive as it constructs and configures a metric object each time it is invoked. It is therefore more appropriate for use if the set of parameter values we want to evaluate are not on a rectilinear grid in the parameter space. The latter approach is appropriate if the set of parameter values are on a rectilinear grid, in which case the approach is more computationally efficient.In both cases we use the CenteredTransformInitializer to obtain the initial translation. MetricEvaluateTo use the MetricEvaluate method we create a ImageRegistrationMethod, set its metric and interpolator. We then iterate over all parameter settings, set the initial transform and evaluate the metric. The minimal similarity measure value corresponds to the best parameter settings.
# Dictionary with all the orientations we will try. We omit the identity (x=0, y=0, z=0) as we always use it. This # set of rotations is arbitrary. For a complete grid coverage we would have 64 entries (0,pi/2,pi,1.5pi for each angle). all_orientations = {'x=0, y=0, z=90': (0.0,0.0,np.pi/2.0), 'x=0, y=0, z=-90': (0.0,0.0,-np.pi), 'x=0, y=0, z=180': (0.0,0.0,np.pi), 'x=180, y=0, z=0': (np.pi,0.0,0.0), 'x=180, y=0, z=90': (np.pi,0.0,np.pi/2.0), 'x=180, y=0, z=-90': (np.pi,0.0,-np.pi/2.0), 'x=180, y=0, z=180': (np.pi,0.0,np.pi)} # Registration framework setup. registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) # Evaluate the similarity metric using the eight possible orientations, translation remains the same for all. initial_transform = sitk.Euler3DTransform(sitk.CenteredTransformInitializer(fixed_image, modified_moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY)) registration_method.SetInitialTransform(initial_transform, inPlace=False) best_orientation = (0.0,0.0,0.0) best_similarity_value = registration_method.MetricEvaluate(fixed_image, modified_moving_image) # Iterate over all other rotation parameter settings. for key, orientation in all_orientations.items(): initial_transform.SetRotation(*orientation) registration_method.SetInitialTransform(initial_transform) current_similarity_value = registration_method.MetricEvaluate(fixed_image, modified_moving_image) if current_similarity_value < best_similarity_value: best_similarity_value = current_similarity_value best_orientation = orientation initial_transform.SetRotation(*best_orientation) final_transform = multires_registration(fixed_image, modified_moving_image, initial_transform)
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Visually evaluate our results:
moving_resampled = sitk.Resample(modified_moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue()) interact(ru.display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), image1 = fixed(sitk.IntensityWindowing(fixed_image, fixed_intensity_range[0], fixed_intensity_range[1])), image2=fixed(sitk.IntensityWindowing(moving_resampled, moving_intensity_range[0], moving_intensity_range[1])));
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Exhaustive optimizerThe exhaustive optimizer evaluates the similarity measure using a grid overlaid on the parameter space.The grid is centered on the parameter values set by the SetInitialTransform, and the location of its vertices are determined by the numberOfSteps, stepLength and optimizer scales. To quote the documentation of this class: "a side of the region is stepLength*(2*numberOfSteps[d]+1)*scaling[d]."Using this approach we have superfluous evaluations (15 evaluations corresponding to 3 values for rotations around the x axis and five for rotation around the z axis, as compared to the 8 evaluations using the MetricEvaluate method).
initial_transform = sitk.CenteredTransformInitializer(fixed_image, modified_moving_image, sitk.Euler3DTransform(), sitk.CenteredTransformInitializerFilter.GEOMETRY) registration_method = sitk.ImageRegistrationMethod() registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50) registration_method.SetMetricSamplingStrategy(registration_method.RANDOM) registration_method.SetMetricSamplingPercentage(0.01) registration_method.SetInterpolator(sitk.sitkLinear) # The order of parameters for the Euler3DTransform is [angle_x, angle_y, angle_z, t_x, t_y, t_z]. The parameter # sampling grid is centered on the initial_transform parameter values, that are all zero for the rotations. Given # the number of steps and their length and optimizer scales we have: # angle_x = -pi, 0, pi # angle_y = 0 # angle_z = -pi, -pi/2, 0, pi/2, pi registration_method.SetOptimizerAsExhaustive(numberOfSteps=[1,0,2,0,0,0], stepLength = np.pi) registration_method.SetOptimizerScales([1,1,0.5,1,1,1]) #Perform the registration in-place so that the initial_transform is modified. registration_method.SetInitialTransform(initial_transform, inPlace=True) registration_method.Execute(fixed_image, modified_moving_image) final_transform = multires_registration(fixed_image, modified_moving_image, initial_transform)
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Visually evaluate our results:
moving_resampled = sitk.Resample(modified_moving_image, fixed_image, final_transform, sitk.sitkLinear, 0.0, moving_image.GetPixelIDValue()) interact(ru.display_images_with_alpha, image_z=(0,fixed_image.GetSize()[2]), alpha=(0.0,1.0,0.05), image1 = fixed(sitk.IntensityWindowing(fixed_image, fixed_intensity_range[0], fixed_intensity_range[1])), image2=fixed(sitk.IntensityWindowing(moving_resampled, moving_intensity_range[0], moving_intensity_range[1])));
_____no_output_____
Apache-2.0
63_Registration_Initialization.ipynb
blowekamp/SimpleITK-Notebooks
Finetune 🤗 Transformers Models with PyTorch Lightning ⚡This notebook will use HuggingFace's `datasets` library to get data, which will be wrapped in a `LightningDataModule`. Then, we write a class to perform text classification on any dataset from the[ GLUE Benchmark](https://gluebenchmark.com/). (We just show CoLA and MRPC due to constraint on compute/disk)[HuggingFace's NLP Viewer](https://huggingface.co/nlp/viewer/?dataset=glue&config=cola) can help you get a feel for the two datasets we will use and what tasks they are solving for.--- - Give us a ⭐ [on Github](https://www.github.com/PytorchLightning/pytorch-lightning/) - Check out [the documentation](https://pytorch-lightning.readthedocs.io/en/latest/) - Ask a question on [GitHub Discussions](https://github.com/PyTorchLightning/pytorch-lightning/discussions/) - Join us [on Slack](https://join.slack.com/t/pytorch-lightning/shared_invite/zt-f6bl2l0l-JYMK3tbAgAmGRrlNr00f1A) - [HuggingFace datasets](https://github.com/huggingface/datasets) - [HuggingFace transformers](https://github.com/huggingface/transformers) Setup
!pip install pytorch-lightning datasets transformers from argparse import ArgumentParser from datetime import datetime from typing import Optional import datasets import numpy as np import pytorch_lightning as pl import torch from torch.utils.data import DataLoader from transformers import ( AdamW, AutoModelForSequenceClassification, AutoConfig, AutoTokenizer, get_linear_schedule_with_warmup, glue_compute_metrics )
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
GLUE DataModule
class GLUEDataModule(pl.LightningDataModule): task_text_field_map = { 'cola': ['sentence'], 'sst2': ['sentence'], 'mrpc': ['sentence1', 'sentence2'], 'qqp': ['question1', 'question2'], 'stsb': ['sentence1', 'sentence2'], 'mnli': ['premise', 'hypothesis'], 'qnli': ['question', 'sentence'], 'rte': ['sentence1', 'sentence2'], 'wnli': ['sentence1', 'sentence2'], 'ax': ['premise', 'hypothesis'] } glue_task_num_labels = { 'cola': 2, 'sst2': 2, 'mrpc': 2, 'qqp': 2, 'stsb': 1, 'mnli': 3, 'qnli': 2, 'rte': 2, 'wnli': 2, 'ax': 3 } loader_columns = [ 'datasets_idx', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions', 'labels' ] def __init__( self, model_name_or_path: str, task_name: str ='mrpc', max_seq_length: int = 128, train_batch_size: int = 32, eval_batch_size: int = 32, **kwargs ): super().__init__() self.model_name_or_path = model_name_or_path self.task_name = task_name self.max_seq_length = max_seq_length self.train_batch_size = train_batch_size self.eval_batch_size = eval_batch_size self.text_fields = self.task_text_field_map[task_name] self.num_labels = self.glue_task_num_labels[task_name] self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True) def setup(self, stage): self.dataset = datasets.load_dataset('glue', self.task_name) for split in self.dataset.keys(): self.dataset[split] = self.dataset[split].map( self.convert_to_features, batched=True, remove_columns=['label'], ) self.columns = [c for c in self.dataset[split].column_names if c in self.loader_columns] self.dataset[split].set_format(type="torch", columns=self.columns) self.eval_splits = [x for x in self.dataset.keys() if 'validation' in x] def prepare_data(self): datasets.load_dataset('glue', self.task_name) AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True) def train_dataloader(self): return DataLoader(self.dataset['train'], batch_size=self.train_batch_size) def val_dataloader(self): if len(self.eval_splits) == 1: return DataLoader(self.dataset['validation'], batch_size=self.eval_batch_size) elif len(self.eval_splits) > 1: return [DataLoader(self.dataset[x], batch_size=self.eval_batch_size) for x in self.eval_splits] def test_dataloader(self): if len(self.eval_splits) == 1: return DataLoader(self.dataset['test'], batch_size=self.eval_batch_size) elif len(self.eval_splits) > 1: return [DataLoader(self.dataset[x], batch_size=self.eval_batch_size) for x in self.eval_splits] def convert_to_features(self, example_batch, indices=None): # Either encode single sentence or sentence pairs if len(self.text_fields) > 1: texts_or_text_pairs = list(zip(example_batch[self.text_fields[0]], example_batch[self.text_fields[1]])) else: texts_or_text_pairs = example_batch[self.text_fields[0]] # Tokenize the text/text pairs features = self.tokenizer.batch_encode_plus( texts_or_text_pairs, max_length=self.max_seq_length, pad_to_max_length=True, truncation=True ) # Rename label to labels to make it easier to pass to model forward features['labels'] = example_batch['label'] return features
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
You could use this datamodule with standalone PyTorch if you wanted...
dm = GLUEDataModule('distilbert-base-uncased') dm.prepare_data() dm.setup('fit') next(iter(dm.train_dataloader()))
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
GLUE Model
class GLUETransformer(pl.LightningModule): def __init__( self, model_name_or_path: str, num_labels: int, learning_rate: float = 2e-5, adam_epsilon: float = 1e-8, warmup_steps: int = 0, weight_decay: float = 0.0, train_batch_size: int = 32, eval_batch_size: int = 32, eval_splits: Optional[list] = None, **kwargs ): super().__init__() self.save_hyperparameters() self.config = AutoConfig.from_pretrained(model_name_or_path, num_labels=num_labels) self.model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, config=self.config) self.metric = datasets.load_metric( 'glue', self.hparams.task_name, experiment_id=datetime.now().strftime("%d-%m-%Y_%H-%M-%S") ) def forward(self, **inputs): return self.model(**inputs) def training_step(self, batch, batch_idx): outputs = self(**batch) loss = outputs[0] return loss def validation_step(self, batch, batch_idx, dataloader_idx=0): outputs = self(**batch) val_loss, logits = outputs[:2] if self.hparams.num_labels >= 1: preds = torch.argmax(logits, axis=1) elif self.hparams.num_labels == 1: preds = logits.squeeze() labels = batch["labels"] return {'loss': val_loss, "preds": preds, "labels": labels} def validation_epoch_end(self, outputs): if self.hparams.task_name == 'mnli': for i, output in enumerate(outputs): # matched or mismatched split = self.hparams.eval_splits[i].split('_')[-1] preds = torch.cat([x['preds'] for x in output]).detach().cpu().numpy() labels = torch.cat([x['labels'] for x in output]).detach().cpu().numpy() loss = torch.stack([x['loss'] for x in output]).mean() self.log(f'val_loss_{split}', loss, prog_bar=True) split_metrics = {f"{k}_{split}": v for k, v in self.metric.compute(predictions=preds, references=labels).items()} self.log_dict(split_metrics, prog_bar=True) return loss preds = torch.cat([x['preds'] for x in outputs]).detach().cpu().numpy() labels = torch.cat([x['labels'] for x in outputs]).detach().cpu().numpy() loss = torch.stack([x['loss'] for x in outputs]).mean() self.log('val_loss', loss, prog_bar=True) self.log_dict(self.metric.compute(predictions=preds, references=labels), prog_bar=True) return loss def setup(self, stage): if stage == 'fit': # Get dataloader by calling it - train_dataloader() is called after setup() by default train_loader = self.train_dataloader() # Calculate total steps self.total_steps = ( (len(train_loader.dataset) // (self.hparams.train_batch_size * max(1, self.hparams.gpus))) // self.hparams.accumulate_grad_batches * float(self.hparams.max_epochs) ) def configure_optimizers(self): "Prepare optimizer and schedule (linear warmup and decay)" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps ) scheduler = { 'scheduler': scheduler, 'interval': 'step', 'frequency': 1 } return [optimizer], [scheduler] @staticmethod def add_model_specific_args(parent_parser): parser = ArgumentParser(parents=[parent_parser], add_help=False) parser.add_argument("--learning_rate", default=2e-5, type=float) parser.add_argument("--adam_epsilon", default=1e-8, type=float) parser.add_argument("--warmup_steps", default=0, type=int) parser.add_argument("--weight_decay", default=0.0, type=float) return parser
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
⚡ Quick Tip - Combine arguments from your DataModule, Model, and Trainer into one for easy and robust configuration
def parse_args(args=None): parser = ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = GLUEDataModule.add_argparse_args(parser) parser = GLUETransformer.add_model_specific_args(parser) parser.add_argument('--seed', type=int, default=42) return parser.parse_args(args) def main(args): pl.seed_everything(args.seed) dm = GLUEDataModule.from_argparse_args(args) dm.prepare_data() dm.setup('fit') model = GLUETransformer(num_labels=dm.num_labels, eval_splits=dm.eval_splits, **vars(args)) trainer = pl.Trainer.from_argparse_args(args) return dm, model, trainer
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
Training CoLASee an interactive view of the CoLA dataset in [NLP Viewer](https://huggingface.co/nlp/viewer/?dataset=glue&config=cola)
mocked_args = """ --model_name_or_path albert-base-v2 --task_name cola --max_epochs 3 --gpus 1""".split() args = parse_args(mocked_args) dm, model, trainer = main(args) trainer.fit(model, dm)
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
MRPCSee an interactive view of the MRPC dataset in [NLP Viewer](https://huggingface.co/nlp/viewer/?dataset=glue&config=mrpc)
mocked_args = """ --model_name_or_path distilbert-base-cased --task_name mrpc --max_epochs 3 --gpus 1""".split() args = parse_args(mocked_args) dm, model, trainer = main(args) trainer.fit(model, dm)
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
MNLI - The MNLI dataset is huge, so we aren't going to bother trying to train it here. - Let's just make sure our multi-dataloader logic is right by skipping over training and going straight to validation.See an interactive view of the MRPC dataset in [NLP Viewer](https://huggingface.co/nlp/viewer/?dataset=glue&config=mnli)
mocked_args = """ --model_name_or_path distilbert-base-uncased --task_name mnli --max_epochs 1 --gpus 1 --limit_train_batches 10 --progress_bar_refresh_rate 20""".split() args = parse_args(mocked_args) dm, model, trainer = main(args) trainer.fit(model, dm)
_____no_output_____
Apache-2.0
notebooks/04-transformers-text-classification.ipynb
inzouzouwetrust/pytorch-lightning
Example: View timeline
cd .. # Imports import instagram_api import os from pprint import pprint from datetime import date from IPython.display import Image, display #import imageio #imageio.plugins.ffmpeg.download() #File saved as /home/vikash/.imageio/ffmpeg/ffmpeg-linux64-v3.3.1. # Config from dotenv import load_dotenv from pathlib import Path env_path = Path('.env') load_dotenv(dotenv_path=env_path) username = os.getenv('FB_USERNAME') password = os.getenv('FB_PASSWORD') username is not None and password is not None # Models class Pictures(object): def __init__(self, data): picture_url = None thumbnail_url = None pprint(data) # Utils class FileUtils: @staticmethod def find_file(name): for root, dirs, files in os.walk('./'): if name in files: return os.path.join(root, name) @staticmethod def save_json_to_file(data, file): with open(file, 'w') as outfile: json.dump(data, outfile, indent=4, sort_keys=True) try: instagram is not None except: instagram = instagram_api.Client(username=username, password=password) if not instagram.isLoggedIn: instagram.login() instagram.isLoggedIn instagram.timelineFeed() post_num = 0 pic_num = 0 photos = [] feed = instagram.LastJson['items'] for post_num in range(len(feed)): post = feed[post_num] if 'carousel_media' not in post: picture = post else: pictures = post['carousel_media'] picture = pictures[post_num] url = picture['image_versions2']['candidates'][1]['url'] photos.append(url.split('?')[0]) break photos[0] pictures = [] feed = instagram.LastJson['items'] for post_num in range(len(feed)): post = feed[post_num] if 'carousel_media' not in post: pictures.append(Picture(post)) else: pictures = post['carousel_media'] for picture = pictures[post_num] url = picture['image_versions2']['candidates'][1]['url'] photos.append(url.split('?')[0]) break photos[0] # Display images images = [] for i in range(len(photos)): images.append(Image(url=photos[i])) display(*images)
_____no_output_____
MIT
examples/timeline.ipynb
Vikash-Kothary/british-express-python
Convolutional Neural Networks: ApplicationWelcome to Course 4's second assignment! In this notebook, you will:- Implement helper functions that you will use when implementing a TensorFlow model- Implement a fully functioning ConvNet using TensorFlow **After this assignment you will be able to:**- Build and train a ConvNet in TensorFlow for a classification problem We assume here that you are already familiar with TensorFlow. If you are not, please refer the *TensorFlow Tutorial* of the third week of Course 2 ("*Improving deep neural networks*"). 1.0 - TensorFlow modelIn the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call. As usual, we will start by loading in the packages.
import math import numpy as np import h5py import matplotlib.pyplot as plt import scipy from PIL import Image from scipy import ndimage import tensorflow as tf from tensorflow.python.framework import ops from cnn_utils import * %matplotlib inline np.random.seed(1)
_____no_output_____
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
Run the next cell to load the "SIGNS" dataset you are going to use.
# Loading the data (signs) X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() X_train_orig.shape plt.imshow(X_test_orig[1,:,:,:])
_____no_output_____
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
# Example of a picture index = 6 plt.imshow(X_train_orig[index]) print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
y = 2
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.To get started, let's examine the shapes of your data.
X_train = X_train_orig/255. X_test = X_test_orig/255. Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T print ("number of training examples = " + str(X_train.shape[0])) print ("number of test examples = " + str(X_test.shape[0])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) conv_layers = {}
number of training examples = 1080 number of test examples = 120 X_train shape: (1080, 64, 64, 3) Y_train shape: (1080, 6) X_test shape: (120, 64, 64, 3) Y_test shape: (120, 6)
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
1.1 - Create placeholdersTensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session.**Exercise**: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension **[None, n_H0, n_W0, n_C0]** and Y should be of dimension **[None, n_y]**. [Hint](https://www.tensorflow.org/api_docs/python/tf/placeholder).
# GRADED FUNCTION: create_placeholders def create_placeholders(n_H0, n_W0, n_C0, n_y): """ Creates the placeholders for the tensorflow session. Arguments: n_H0 -- scalar, height of an input image n_W0 -- scalar, width of an input image n_C0 -- scalar, number of channels of the input n_y -- scalar, number of classes Returns: X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float" Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float" """ ### START CODE HERE ### (≈2 lines) X = tf.placeholder(shape = [None, n_H0, n_W0, n_C0],dtype=tf.float32) Y = tf.placeholder(shape = [None, n_y],dtype=tf.float32) ### END CODE HERE ### return X, Y X, Y = create_placeholders(64, 64, 3, 6) print ("X = " + str(X)) print ("Y = " + str(Y))
X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32) Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32)
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
**Expected Output** X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32) Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32) 1.2 - Initialize parametersYou will initialize weights/filters $W1$ and $W2$ using `tf.contrib.layers.xavier_initializer(seed = 0)`. You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment.**Exercise:** Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use:```pythonW = tf.get_variable("W", [1,2,3,4], initializer = ...)```[More Info](https://www.tensorflow.org/api_docs/python/tf/get_variable).
# GRADED FUNCTION: initialize_parameters def initialize_parameters(): """ Initializes weight parameters to build a neural network with tensorflow. The shapes are: W1 : [4, 4, 3, 8] W2 : [2, 2, 8, 16] Returns: parameters -- a dictionary of tensors containing W1, W2 """ tf.set_random_seed(1) # so that your "random" numbers match ours ### START CODE HERE ### (approx. 2 lines of code) W1 = tf.get_variable('W1', [4, 4, 3, 8], initializer = tf.contrib.layers.xavier_initializer(seed = 0)) W2 = tf.get_variable('W2', [2, 2, 8, 16], initializer = tf.contrib.layers.xavier_initializer(seed = 0)) ### END CODE HERE ### parameters = {"W1": W1, "W2": W2} return parameters tf.reset_default_graph() with tf.Session() as sess_test: parameters = initialize_parameters() init = tf.global_variables_initializer() sess_test.run(init) print("W1 = " + str(parameters["W1"].eval()[1,1,1])) print("W2 = " + str(parameters["W2"].eval()[1,1,1]))
W1 = [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394 -0.06847463 0.05245192] W2 = [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058 -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228 -0.22779644 -0.1601823 -0.16117483 -0.10286498]
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
** Expected Output:** W1 = [ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394 -0.06847463 0.05245192] W2 = [-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058 -0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228 -0.22779644 -0.1601823 -0.16117483 -0.10286498] 1.2 - Forward propagationIn TensorFlow, there are built-in functions that carry out the convolution steps for you.- **tf.nn.conv2d(X,W1, strides = [1,s,s,1], padding = 'SAME'):** given an input $X$ and a group of filters $W1$, this function convolves $W1$'s filters on X. The third input ([1,f,f,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). You can read the full documentation [here](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d)- **tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):** given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. You can read the full documentation [here](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool)- **tf.nn.relu(Z1):** computes the elementwise ReLU of Z1 (which can be any shape). You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/nn/relu)- **tf.contrib.layers.flatten(P)**: given an input P, this function flattens each example into a 1D vector it while maintaining the batch-size. It returns a flattened tensor with shape [batch_size, k]. You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten)- **tf.contrib.layers.fully_connected(F, num_outputs):** given a the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected)In the last function above (`tf.contrib.layers.fully_connected`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters. **Exercise**: Implement the `forward_propagation` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED`. You should use the functions above. In detail, we will use the following parameters for all the steps: - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME" - Conv2D: stride 1, padding is "SAME" - ReLU - Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME" - Flatten the previous output. - FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost.
# GRADED FUNCTION: forward_propagation def forward_propagation(X, parameters): """ Implements the forward propagation for the model: CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "W2" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] W2 = parameters['W2'] ### START CODE HERE ### # CONV2D: stride of 1, padding 'SAME' Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME') # RELU A1 = tf.nn.relu(Z1) # MAXPOOL: window 8x8, sride 8, padding 'SAME' P1 = tf.nn.max_pool(A1, ksize = [1,8,8,1], strides = [1,8,8,1], padding = 'SAME') # CONV2D: filters W2, stride 1, padding 'SAME' Z2 = tf.nn.conv2d(P1,W2, strides = [1,1,1,1], padding = 'SAME') # RELU A2 = tf.nn.relu(Z2) # MAXPOOL: window 4x4, stride 4, padding 'SAME' P2 = tf.nn.max_pool(A2, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME') # FLATTEN P2 = tf.contrib.layers.flatten(P2) # FULLY-CONNECTED without non-linear activation function (not not call softmax). # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None" Z3 = tf.contrib.layers.fully_connected(P2, 6,activation_fn=None) ### END CODE HERE ### return Z3 tf.reset_default_graph() with tf.Session() as sess: np.random.seed(1) X, Y = create_placeholders(64, 64, 3, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) init = tf.global_variables_initializer() sess.run(init) a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)}) print("Z3 = " + str(a))
Z3 = [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064] [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
**Expected Output**: Z3 = [[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064] [-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]] 1.3 - Compute costImplement the compute cost function below. You might find these two functions helpful: - **tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y):** computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits)- **tf.reduce_mean:** computes the mean of elements across dimensions of a tensor. Use this to sum the losses over all the examples to get the overall cost. You can check the full documentation [here.](https://www.tensorflow.org/api_docs/python/tf/reduce_mean)** Exercise**: Compute the cost below using the function above.
# GRADED FUNCTION: compute_cost def compute_cost(Z3, Y): """ Computes the cost Arguments: Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples) Y -- "true" labels vector placeholder, same shape as Z3 Returns: cost - Tensor of the cost function """ ### START CODE HERE ### (1 line of code) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z3, labels = Y)) ### END CODE HERE ### return cost tf.reset_default_graph() with tf.Session() as sess: np.random.seed(1) X, Y = create_placeholders(64, 64, 3, 6) parameters = initialize_parameters() Z3 = forward_propagation(X, parameters) cost = compute_cost(Z3, Y) init = tf.global_variables_initializer() sess.run(init) a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)}) print("cost = " + str(a))
cost = 2.91034
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
**Expected Output**: cost = 2.91034 1.4 Model Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset. You have implemented `random_mini_batches()` in the Optimization programming assignment of course 2. Remember that this function returns a list of mini-batches. **Exercise**: Complete the function below. The model below should:- create placeholders- initialize parameters- forward propagate- compute the cost- create an optimizerFinally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. [Hint for initializing the variables](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer)
# GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009, num_epochs = 100, minibatch_size = 64, print_cost = True): """ Implements a three-layer ConvNet in Tensorflow: CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED Arguments: X_train -- training set, of shape (None, 64, 64, 3) Y_train -- test set, of shape (None, n_y = 6) X_test -- training set, of shape (None, 64, 64, 3) Y_test -- test set, of shape (None, n_y = 6) learning_rate -- learning rate of the optimization num_epochs -- number of epochs of the optimization loop minibatch_size -- size of a minibatch print_cost -- True to print the cost every 100 epochs Returns: train_accuracy -- real number, accuracy on the train set (X_train) test_accuracy -- real number, testing accuracy on the test set (X_test) parameters -- parameters learnt by the model. They can then be used to predict. """ ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables tf.set_random_seed(1) # to keep results consistent (tensorflow seed) seed = 3 # to keep results consistent (numpy seed) (m, n_H0, n_W0, n_C0) = X_train.shape n_y = Y_train.shape[1] costs = [] # To keep track of the cost # Create Placeholders of the correct shape ### START CODE HERE ### (1 line) X, Y = create_placeholders(n_H0,n_W0,n_C0,n_y) ### END CODE HERE ### # Initialize parameters ### START CODE HERE ### (1 line) parameters = initialize_parameters() ### END CODE HERE ### # Forward propagation: Build the forward propagation in the tensorflow graph ### START CODE HERE ### (1 line) Z3 = forward_propagation(X, parameters) ### END CODE HERE ### # Cost function: Add cost function to tensorflow graph ### START CODE HERE ### (1 line) cost = compute_cost(Z3, Y) ### END CODE HERE ### # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost. ### START CODE HERE ### (1 line) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) ### END CODE HERE ### # Initialize all the variables globally init = tf.global_variables_initializer() # Start the session to compute the tensorflow graph with tf.Session() as sess: # Run the initialization sess.run(init) # Do the training loop for epoch in range(num_epochs): minibatch_cost = 0. num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set seed = seed + 1 minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed) for minibatch in minibatches: # Select a minibatch (minibatch_X, minibatch_Y) = minibatch # IMPORTANT: The line that runs the graph on a minibatch. # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y). ### START CODE HERE ### (1 line) _ , temp_cost = sess.run([optimizer,cost],feed_dict={X:minibatch_X,Y:minibatch_Y}) ### END CODE HERE ### minibatch_cost += temp_cost / num_minibatches # Print the cost every epoch if print_cost == True and epoch % 5 == 0: print ("Cost after epoch %i: %f" % (epoch, minibatch_cost)) if print_cost == True and epoch % 1 == 0: costs.append(minibatch_cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() # Calculate the correct predictions predict_op = tf.argmax(Z3, 1) correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1)) # Calculate accuracy on the test set accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print(accuracy) train_accuracy = accuracy.eval({X: X_train, Y: Y_train}) test_accuracy = accuracy.eval({X: X_test, Y: Y_test}) print("Train Accuracy:", train_accuracy) print("Test Accuracy:", test_accuracy) return train_accuracy, test_accuracy, parameters
_____no_output_____
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code!
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
Cost after epoch 0: 1.917929 Cost after epoch 5: 1.506757 Cost after epoch 10: 0.955359 Cost after epoch 15: 0.845802 Cost after epoch 20: 0.701174 Cost after epoch 25: 0.571977 Cost after epoch 30: 0.518435 Cost after epoch 35: 0.495806 Cost after epoch 40: 0.429827 Cost after epoch 45: 0.407291 Cost after epoch 50: 0.366394 Cost after epoch 55: 0.376922 Cost after epoch 60: 0.299491 Cost after epoch 65: 0.338870 Cost after epoch 70: 0.316400 Cost after epoch 75: 0.310413 Cost after epoch 80: 0.249549 Cost after epoch 85: 0.243457 Cost after epoch 90: 0.200031 Cost after epoch 95: 0.175452
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
**Expected output**: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease. **Cost after epoch 0 =** 1.917929 **Cost after epoch 5 =** 1.506757 **Train Accuracy =** 0.940741 **Test Accuracy =** 0.783333 Congratulations! You have finised the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance). Once again, here's a thumbs up for your work!
fname = "images/thumbs_up.jpg" image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(64,64)) plt.imshow(my_image)
_____no_output_____
MIT
Convolution model Application.ipynb
GUXIANFEI/Convolutional-Neural-Networks
Rebound modelAim: Quantify the environmental impact due to the savings of households in consumption expenses, across different - industrial sectors and scenarios: - housing (rent): baseline for 2011, - energy: efficient_devices, renewable_energy - food-waste: avoidable_waste_saving - clothing: sufficiency, refuse, reshare, reuse for 2025 - furnishing: refuse, reuse for 2035 and 2050 - temporal periods: years 2006-2017 - spatial regions: parts of Switzerland_Input_: The household budet survey files to train the data _Model_: A random forest or Artificial neural network model _Output_: The rebound expenses and environmental footprints of the households TOC- Step 0: Initialisation- Step 1: Preprocessing- Step 2: Model - Step 3: Postprocessing - Step 4: LCA Author: Rhythima Shinde, ETH ZurichCo-Authors (for energy case study and temporal-regional rebound studies): Sidi Peng, Saloni Vijay, ETH Zurich ------------------------------------------------------------------------------------------------------------------------------- 0. Initialisation back 0.1. Input files & data parameters- (1a) **seasonal_file** -> For the year 2009-11, the file is provided by A.Froemelt. It is modified based on original HBS(HABE) data that we obtain from Federal Statistical Office of Switzerland. It is further modiefied in this code in the preprocessing section to rename columns.- (1b) **seasonal_file_SI** -> Lists the HBS data columns and associated activities to calculate the consumption based environmental footprint. The file can be found here.- (2) **habe_month** -> the HBS household ids and their derivation to the month and year of the survey filled - (3) dependent_indices -> based on the HBS column indices, this file lists the relevant consumption expense parameters which are predicted - (4) **independent_indices** -> the HBS column indices which define the household socio-economic properties- (5) **target_data** -> Selects the target dataset to predict the results. For most cases, it is the subset of the HBS (for the housing industry, it is the partner dataset 'ABZ', 'SCHL' or 'SM') - (6) **directory_name** -> based on the industry case, changes the dependent parameters, and income saved by the household (due to which the rebound is supposed to happen) - change the second value in the list. 0.2. Model parameters- (1) **iter_n** -> no.of iterations of runs- (2) **model_name** -> Random Forest (RF) or ANN (Artificial Neural Network) 0.3. Analysis parameters- (1) industry change: directory_name with following dependencies - scenarios, - partner_name/target dataset, - idx_column_savings_cons, - dependent_indices- (2) year change: seasonal_file - specify which years (2006, 2007, 2008... 2017)- (3) regional change: target_dataset - specify which regions (DE, IT, FR, ZH) - specify partner name (ABZ, SCHL, SM) USER INPUT NEEDED: chose model settings, methods of preprocessing
# model and folder settings directory_name = 'housing' # 'housing' or 'furniture' or 'clothing' or 'energy' iter_n=1 model_name='RF' # 'RF' or 'ANN' ## preprocessing methods option_deseason = 'deseasonal' # 'deseasonal' [option 1] or 'month-ind' [option 2] if option_deseason == 'month-ind': n_ind = 63 independent_indices='raw_data/independent_month.csv' if option_deseason == 'deseasonal': n_ind = 39 independent_indices='raw_data/independent.csv' input_normalise = 'no-normalise' #'no-normalise' for not normalising the data or 'normalise' import pandas as pd import numpy as np import sklearn.multioutput as sko from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier import scipy.stats as stats import statistics from sklearn.metrics import r2_score,mean_squared_error, explained_variance_score from sklearn.model_selection import cross_val_score, KFold, train_test_split, StratifiedShuffleSplit from sklearn.preprocessing import FunctionTransformer import matplotlib.pyplot as plt import brightway2 import seaborn as sns from statsmodels.stats.multicomp import pairwise_tukeyhsd, MultiComparison import statsmodels.api as sm from functools import reduce import os import pickle import csv # Additional libraries for neural network implementation # from numpy.random import seed # seed(1) # from tensorflow import set_random_seed # set_random_seed(2) # from keras import optimizers # from keras.models import Sequential # from keras.layers import Dense # from keras.wrappers.scikit_learn import KerasRegressor # Read the modified files by Nauser et al (2020) # - HBS data (merged raw HBS files) "HABE_mergerd_2006_2017" # - tranlsation file 'HABE_Cname_translator.xlsx' # - HBS hhids with the corresponding month of the survey ############################################################################################################################### seasonal_file = 'raw_data/HBS/HABE_merged_2006_2017.csv' seasonal_file_SI = 'raw_data/HBS/HABE_Cname_translator.xlsx' habe_month = 'raw_data/HBS/HABE_date.csv' inf_index_file = 'raw_data/HBS/HABE_inflation_index_all.xlsx' # seasonal_file = 'original_Andi_HBS/habe20092011_hh_prepared_imputed.csv' #based on the years # seasonal_file_SI='original_Andi_HBS/Draft_Paper_8_v11_SupportingInformation.xlsx' # habe_month='original_Andi_HBS/habe_hh_month.csv' ## form the databases df_habe = pd.read_csv(seasonal_file, delimiter=',', error_bad_lines=False, encoding='ISO-8859–1') df_habe_month = pd.read_csv(habe_month, delimiter=',', error_bad_lines=False, encoding='ISO-8859–1') inf_index = pd.read_excel(inf_index_file) dependent_indices= 'raw_data/dependent_'+directory_name+'.csv' dependent_indices_pd = pd.read_csv(dependent_indices, delimiter=',', encoding='ISO-8859–1') dependent_indices_pd_name = pd.read_csv(dependent_indices,sep=',')["name"] dependentsize=len(list(dependent_indices_pd_name)) independent_indices_pd = pd.read_csv(independent_indices, delimiter=',', encoding='ISO-8859–1') list_independent_columns = pd.read_csv(independent_indices, delimiter=',', encoding='ISO-8859–1')['name'].to_list() list_dependent_columns = pd.read_csv(dependent_indices, delimiter=',', encoding='ISO-8859–1')['name'].to_list() #add more columns to perform temporal analysis (month_names and time_periods) def label_month (row): if row['month'] == 1.0 : return 'January' if row['month'] == 2.0 : return 'February' if row['month'] == 3.0 : return 'March' if row['month'] == 4.0 : return 'April' if row['month'] == 5.0 : return 'May' if row['month'] == 6.0 : return 'June' if row['month'] == 7.0 : return 'July' if row['month'] == 8.0 : return 'August' if row['month'] == 9.0 : return 'September' if row['month'] == 10.0 : return 'October' if row['month'] == 11.0 : return 'November' if row['month'] == 12.0 : return 'December' def label_period (row): if (row["year"] == 2006) or (row["year"] == 2007) or (row["year"] == 2008): return '1' if (row["year"] == 2009) or (row["year"] == 2010) or (row["year"] == 2011): return '2' if (row["year"] == 2012) or (row["year"] == 2013) or (row["year"] == 2014): return '3' if (row["year"] == 2015) or (row["year"] == 2016) or (row["year"] == 2017): return '4' df_habe_month['month_name']=df_habe_month.apply(lambda row: label_month(row), axis=1) df_habe_month['period']=df_habe_month.apply(lambda row: label_month(row), axis=1)
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
TODO: update the right values for energy and food industry scenarios, and then merge in the above script
if directory_name =='housing': scenarios = {'baseline_2011':500} target_data ='ABZ' # target_data = 'subset-HBS' idx_column_savings_cons = 'net_rent_and_mortgage_interest_of_principal_residence' #289 if directory_name == 'furniture': scenarios = {'refuse_2035':17,'refuse_2050':17.4,'reuse_1_2035':6.9, 'reuse_1_2050':8.2,'reuse_2_2035':10.2,'reuse_2_2050':9.5} target_data = 'subset-HBS' idx_column_savings_cons = 'furniture_and_furnishings,_carpets_and_other_floor_coverings_incl._repairs' #313 if directory_name == 'clothing': scenarios = {'sufficiency_2025':76.08,'refuse_2025':5.7075,'share_2025':14.2875,'local_reuse_best_2025':9.13, 'local_reuse_worst_2025':4.54,'max_local_reuse_best_2025':10.25,'max_local_reuse_worst_2025':6.83} target_data = 'subset-HBS' idx_column_savings_cons = 'clothing' #248 if directory_name == 'energy': scenarios = {'efficient_devices':30,'renewable_energy':300} target_data = 'subset-HBS' idx_column_savings_cons = 'energy_of_principal_residence' #297 if directory_name == 'food': scenarios = {'avoidable_waste_saving':50} target_data = 'subset-HBS' idx_column_savings_cons = 'food_and_non_alcoholic_beverages' #97 #functions to make relevant sector-wise directories def make_pre_directory(outname,directory_name): outdir = 'preprocessing/'+directory_name if not os.path.exists(outdir): os.mkdir(outdir) fullname = os.path.join(outdir, outname) return fullname def make_pre_sub_directory(outname,directory_name,sub_dir): outdir = 'preprocessing/'+directory_name+'/'+sub_dir outdir1 = 'preprocessing/'+directory_name if not os.path.exists(outdir1): os.mkdir(outdir1) if not os.path.exists(outdir): os.mkdir(outdir) fullname = os.path.join(outdir, outname) return fullname def make_pre_sub_sub_directory(outname,directory_name,sub_dir,sub_sub_dir): outdir='preprocessing/'+directory_name+'/'+sub_dir+'/'+sub_sub_dir outdir1 = 'preprocessing/'+directory_name+'/'+sub_dir outdir2 = 'preprocessing/'+directory_name if not os.path.exists(outdir2): os.mkdir(outdir2) if not os.path.exists(outdir1): os.mkdir(outdir1) if not os.path.exists(outdir): os.mkdir(outdir) fullname = os.path.join(outdir, outname) return fullname
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1. Preprocessing TOC: - 1.1. Prepare training data- 1.2. Deseasonalise- 1.3. Normalize- 1.4. Checks 1.1. Prepare training data back 1.1.1. Rename HBS columns
var_translate = pd.read_excel(seasonal_file_SI, sheet_name='translator', header=3, usecols=['habe_code', 'habe_eng_p', 'habe_eng', 'vcode', 'qcode']) var_translate['habe_eng'] = var_translate['habe_eng'].str.strip() var_translate['habe_eng'] = var_translate['habe_eng'].str.replace(' ', '_') var_translate['habe_eng'] = var_translate['habe_eng'].str.replace('-', '_') var_translate['habe_eng'] = var_translate['habe_eng'].str.replace('"', '') var_translate['habe_eng'] = var_translate['habe_eng'].str.lower() var_translate['habe_code'] = var_translate['habe_code'].str.lower() dict_translate = dict(zip(var_translate['habe_code'], var_translate['habe_eng'])) df_habe.rename(columns=dict_translate, inplace=True) dict_translate = dict(zip(var_translate['qcode'], var_translate['habe_eng'])) df_habe.rename(columns=dict_translate, inplace=True) df_habe_rename = df_habe.loc[:, ~df_habe.columns.duplicated()] pd.DataFrame.to_csv(df_habe_rename, 'preprocessing/0_habe_rename.csv', sep=',',index=False)
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.1.2. Inflation adjustment
df_habe_rename = pd.read_csv('preprocessing/0_habe_rename.csv') df_new = pd.merge(df_habe_rename, df_habe_month, on='haushaltid') pd.DataFrame.to_csv(df_new,'preprocessing/0_habe_rename_month.csv', sep=',',index=False) list_var_total = dependent_indices_pd_name.tolist() list_var_total.pop() # monetary variables inflation adjusted list_mon = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12] list_year = [2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016] list_var_total = list_var_total + ["disposable_income", "total_expenditures"] # , 'infrequent_income'] df_inf = df_new for col in list_var_total: for year in list_year: for mon in list_mon: df_inf.loc[(df_inf['year'] == year) & (df_inf['month'] == mon), col] = \ df_inf.loc[(df_inf['year'] == year) & (df_inf['month'] == mon), col] / \ inf_index.loc[(inf_index['year'] == year) & (inf_index['month'] == mon), col].values * 100 pd.DataFrame.to_csv(df_inf, 'preprocessing/1_habe_inflation.csv', sep=',', index=False, encoding='utf-8')
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.1.3. Adapt the columns (optional - one hot encoding)
def new_columns(xx,directory_name): pd_df_saved = df_inf pd_df_saved.loc[:,'disposable_income'] = pd_df_saved['disposable_income'] - pd_df_saved.loc[:,xx] # pd_df_saved['total_expenditures'] = pd_df_saved['total_expenditures'] - pd_df_saved.iloc[:,313] fullname = make_pre_directory('1_habe_rename_new_columns.csv',directory_name) pd.DataFrame.to_csv(pd_df_saved,fullname, sep=',',index=False) return pd_df_saved df_habe_rename_saved = new_columns(idx_column_savings_cons,directory_name) # when redefining disposable income
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.1.4. Remove outliers
def remove_outliers(): df_outliers = df_habe_rename_saved # TODO if using the new definition of disposable income: use the df_habe_rename_saved # df_outliers = df_outliers[np.abs(stats.zscore(df_outliers['disposable_income']))<10] # df_outliers = df_outliers[np.abs(stats.zscore(df_outliers['saved_amount_(computed)']))<10] df_outliers = df_outliers[df_outliers['disposable_income'] >= 0] # simply keep all the 'sensible' disposable incomes # df_outliers = df_outliers[df_outliers['disposable_income'] <= 14800] # ADDED CRITERIA FOR REMOVING OUTLIERS OF THE DISP_INCOME # df_outliers = df_outliers[df_outliers['total_expenditures'] >= 0] # simply keep all the 'sensible' total_expenses df_outliers = df_outliers[df_outliers['saved_amount_(computed)'] >= 0] fullname = make_pre_directory('2_habe_rename_removeoutliers.csv',directory_name) pd.DataFrame.to_csv(df_outliers, fullname, sep=',', index=False) return df_outliers df_habe_outliers = remove_outliers() ## aggregate the data as per the categories def accumulate_categories_habe(df,new_column,file_name): list_dependent_columns = pd.read_csv(dependent_indices, delimiter=',', encoding='ISO-8859–1')['name'].to_list() list_dependent_columns_new = list_dependent_columns list_dependent_columns_new.append('disposable_income') list_dependent_columns_new.append(new_column) # Might not always need this df = df[list_dependent_columns_new] df = df.loc[:,~df.columns.duplicated()] #drop duplicates df[new_column] = df.iloc[:, [17]] df['income'] = df.iloc[:, [16]] df['food'] = df.iloc[:,[0,1,2]].sum(axis=1) df['misc'] = df.iloc[:,[3,4]].sum(axis=1) df['housing'] = df.iloc[:, [5, 6]].sum(axis=1) df['services'] = df.iloc[:, [7,8,9]].sum(axis=1) df['travel'] = df.iloc[:, [10,11,12, 13, 14]].sum(axis=1) df['savings'] = df.iloc[:, [15]] df = df[['income','food','misc','housing','services','travel','savings',new_column]] fullname = make_pre_directory(file_name,directory_name) pd.DataFrame.to_csv(df,fullname,sep=',',index= False) return df df_outliers = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers.csv') df_habe_accumulate = accumulate_categories_habe(df_outliers,'month_name','2_habe_rename_removeoutliers_aggregated.csv')
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.2. Deasonalising - [Option 1] Clustering based on months - [Option 2] Use month and period as independent variableback 1.2.1. [Option 1] Create monthly datasets, Plots/ Tables / Statistical tests for HABE monthly data
if option_deseason == 'deseasonal' : def split_month(): df_new = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers.csv') df_month = df_new.groupby('month_name') for i in range(12): df_new_month=pd.DataFrame(list(df_month)[i][1]) df_new_month['month_name']=df_new_month['month_name'].astype('str') fullname=make_pre_sub_directory('3_habe_monthly_'+df_new_month.month_name.unique()[0]+'.csv', directory_name,option_deseason) pd.DataFrame.to_csv(df_new_month,fullname,sep=',', index = False) split_month() # Split the accumulated categories per month def split_month_accumulated(): df_new = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers_aggregated.csv',sep=',') df_month = df_new.groupby('month_name') for i in range(12): df_new_month=pd.DataFrame(list(df_month)[i][1]) df_new_month['month_name']=df_new_month['month_name'].astype('str') fullname = make_pre_sub_directory('3_habe_monthly_'+df_new_month.month_name.unique()[0]+'_aggregated.csv', directory_name,option_deseason) pd.DataFrame.to_csv(df_new_month,fullname, sep=',', index = False) split_month_accumulated()
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.2.2. [Option 1] Making final clusters USER INPUT NEEDED: edit the cluster-list belowTODO - join clusters based on the p-values calculated above directly
## current clusters are made based on the mean table above if option_deseason == 'deseasonal' : Cluster_month_lists = {1:('January',),2:('February','March','April'),3:('May','June','July'), 4:('August','September','October','November'),5:('December',)} cluster_number_length = len(Cluster_month_lists) for key in Cluster_month_lists: df1=[] df_sum=[] for i in range(0,len(Cluster_month_lists[key])): print(Cluster_month_lists[key]) df=pd.read_csv(make_pre_sub_directory('3_habe_monthly_{}'.format(Cluster_month_lists[key][i])+'.csv', directory_name,option_deseason)) df_sum.append(df.shape[0]) df1.append(df) df_cluster = pd.concat(df1) assert df_cluster.shape[0]==sum(df_sum) # to check if the conacting was done correctly pd.DataFrame.to_csv(df_cluster,make_pre_sub_directory('4_habe_monthly_cluster_'+str(key)+'.csv', directory_name,option_deseason),sep=',') # TODO: update this to move to the sub directory of deseaspnal files # cluster_number_length = len(Cluster_month_lists) # for i in list(range(1,cluster_number_length+1)): # accumulate_categories_habe(df,'number_of_persons_per_household','4_habe_monthly_cluster_'+str(i)+'_aggregated.csv')
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.2.3. Option 2: Month as independent variable
if option_deseason == 'month-ind' : cluster_number_length = 1 # do one-hot encoding for month and year hbs_all = pd.read_csv('preprocessing/'+directory_name+'/1_habe_rename_new_columns.csv') month_encoding = pd.get_dummies(hbs_all.month_name, prefix='month') year_encoding = pd.get_dummies(hbs_all.year, prefix='year') hbs_all_encoding = pd.concat([hbs_all, month_encoding.reindex(month_encoding.index)], axis=1) hbs_all_encoding = pd.concat([hbs_all_encoding, year_encoding.reindex(year_encoding.index)], axis=1) for key in scenarios: output_encoding = make_pre_sub_sub_directory('3_habe_for_all_scenarios_encoding.csv', directory_name,option_deseason,key) pd.DataFrame.to_csv(hbs_all_encoding,output_encoding,sep=',',index=False) month_name = month_encoding.columns.tolist() year_name = year_encoding.columns.tolist()
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.3. Normalisation back 1.3.1. Normalisation of HBS and target data
# ## NORMALISATION # if input_normalise == 'normalise': # def normalise_habe(cluster): # transformer = FunctionTransformer(np.log1p, validate=True) # if option_deseason == 'deseasonal': # df_deseasonal_file = pd.read_csv('preprocessing/'+directory_name+ '/' + option_deseason + # '/4_habe_monthly_cluster_'+str(cluster)+'.csv', # delimiter=',') # if option_deseason == 'month-ind': # df_deseasonal_file = pd.read_csv('preprocessing/'+directory_name+ '/' + option_deseason + # '/3_habe_for_all_scenarios_encoding.csv',delimiter=',') # pd_df_new = df_deseasonal_file # for colsss in list_dependent_columns: # pd_df_new[[colsss]] = transformer.transform(df_deseasonal_file[[colsss]]) # for colsss in list_independent_columns: # min_colsss = df_deseasonal_file[[colsss]].quantile([0.01]).values[0] # max_colsss = df_deseasonal_file[[colsss]].quantile([0.99]).values[0] # pd_df_new[[colsss]] = (df_deseasonal_file[[colsss]] - min_colsss) / (max_colsss - min_colsss) # pd_df = pd_df_new[list_independent_columns+['haushaltid']+list_dependent_columns] # pd_df = pd_df.fillna(0) # fullname = make_pre_directory('4_habe_deseasonal_'+str(cluster)+'_'+str(option_deseason)+'_normalised.csv', # directory_name) # pd.DataFrame.to_csv(pd_df,fullname,sep=',',index=False) # if target_data == 'ABZ': # if input_normalise =='normalise': # def normalise_partner(i,key,option_deseason): # pd_df_partner = pd.read_csv('target_'+target_data+'.csv',delimiter=',') # df_complete = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers.csv',delimiter=',') # pd_df_partner['disposable_income'] = pd_df_partner['disposable_income'] + i # for colsss in list_independent_columns: # min_colsss = df_complete[[colsss]].quantile([0.01]).values[0] # max_colsss = df_complete[[colsss]].quantile([0.99]).values[0] # pd_df_partner[[colsss]] = (pd_df_partner[[colsss]] - min_colsss) / (max_colsss - min_colsss) # # pd_df_partner = pd_df_partner[pd_df_partner.iloc[:,30]<=1] # # pd_df_partner = pd_df_partner[pd_df_partner.iloc[:,32]<=1] # # pd_df_partner = pd_df_partner[pd_df_partner.iloc[:,33]>=0] #todo remove rows with normalisation over the range # fullname = make_pre_sub_sub_directory('5_final_'+ target_data + '_independent_final_'+str(i)+'.csv', # directory_name,option_deseason,key) # pd.DataFrame.to_csv(pd_df_partner,fullname,sep=',',index=False) # return pd_df_partner
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.3.2. Preprocessing without normalisation
if input_normalise == 'no-normalise': def normalise_habe(cluster): transformer = FunctionTransformer(np.log1p, validate=True) if option_deseason == 'deseasonal': df_deseasonal_file = pd.read_csv('preprocessing/'+directory_name+ '/' + option_deseason + '/4_habe_monthly_cluster_'+str(cluster)+'.csv', delimiter=',') if option_deseason == 'month-ind': df_deseasonal_file = pd.read_csv('preprocessing/'+directory_name+ '/' + str(option_deseason) + '/' + str(key) + '/3_habe_for_all_scenarios_encoding.csv',delimiter=',') pd_df_new = df_deseasonal_file pd_df = pd_df_new[list_independent_columns+['haushaltid']+list_dependent_columns] pd_df = pd_df.fillna(0) fullname = make_pre_sub_directory('4_habe_deseasonal_'+str(cluster)+'_short.csv', directory_name,option_deseason) pd.DataFrame.to_csv(pd_df,fullname,sep=',',index=False) for i in list(range(1,cluster_number_length+1)): df_normalise_habe_file = normalise_habe(i) ## Collecting the independent and dependent datasets def truncate_all(key): if option_deseason == 'deseasonal': df_seasonal_normalised = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers.csv', delimiter=',', error_bad_lines=False) if option_deseason == 'month-ind': df_seasonal_normalised = pd.read_csv('preprocessing/'+directory_name+ '/' + str(option_deseason) + '/' + str(key) + '/3_habe_for_all_scenarios_encoding.csv',delimiter=',') df_habe_imputed_clustered_d = df_seasonal_normalised[list_dependent_columns] df_habe_imputed_clustered_i = df_seasonal_normalised[list_independent_columns] fullname_d = make_pre_sub_sub_directory('raw_dependent.csv',directory_name,option_deseason,key) fullname_in = make_pre_sub_sub_directory('raw_independent.csv',directory_name,option_deseason,key) pd.DataFrame.to_csv(df_habe_imputed_clustered_d,fullname_d,sep=',',index=False) pd.DataFrame.to_csv(df_habe_imputed_clustered_i,fullname_in,sep=',',index=False) for key in scenarios: truncate_all(key) ## NORMALISATION if target_data == 'subset-HBS': def normalise_partner(i,key,option_deseason): N = 300 # TODO pass this as an argument when chosing subset of HBS pd_df_partner = pd.read_csv('preprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/raw_independent.csv', delimiter=',', error_bad_lines=False) pd_df_partner = pd_df_partner.sample(frac=0.4, replace=True, random_state=1) pd_df_partner['disposable_income'] = pd_df_partner['disposable_income']+i fullname = make_pre_sub_sub_directory('5_final_'+ target_data + '_independent_final_'+str(i)+'.csv', directory_name,option_deseason,key) pd.DataFrame.to_csv(pd_df_partner,fullname,sep=',',index=False) return pd_df_partner if target_data == 'ABZ': if input_normalise =='no-normalise': def normalise_partner(i,key,option_deseason): pd_df_partner = pd.read_csv('raw_data/target_'+target_data+'.csv',delimiter=',') df_complete = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers.csv',delimiter=',') pd_df_partner['disposable_income'] = pd_df_partner['disposable_income'] - i # pd_df_partner = pd_df_partner[pd_df_partner.iloc[:,30]<=1] # pd_df_partner = pd_df_partner[pd_df_partner.iloc[:,32]<=1] # pd_df_partner = pd_df_partner[pd_df_partner.iloc[:,33]>=0] #todo remove rows with normalisation over the range fullname = make_pre_sub_sub_directory('5_final_'+ target_data + '_independent_final_'+str(i)+'.csv', directory_name,option_deseason,key) pd.DataFrame.to_csv(pd_df_partner,fullname,sep=',',index=False) return pd_df_partner for key in scenarios: list_incomechange=[0,scenarios[key]] for i in list_incomechange: df_normalise_partner_file = normalise_partner(i,key,option_deseason)
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
1.4. Checksback
if input_normalise =='normalise': def truncate(cluster_number): if option_deseason == 'deseasonal': df_seasonal_normalised = pd.read_csv('preprocessing/'+directory_name+ '/' + option_deseason + '/4_habe_deseasonal_'+str(cluster_number)+'_normalised.csv', delimiter=',', error_bad_lines=False) if option_deseason == 'month-ind': df_seasonal_normalised = pd.read_csv('preprocessing/'+directory_name+ '/' + str(option_deseason) + '/' + str(key) + '/3_habe_for_all_scenarios_encoding.csv',delimiter=',') df_habe_imputed_clustered_d = df_seasonal_normalised[list_dependent_columns] df_habe_imputed_clustered_dl = np.expm1(df_habe_imputed_clustered_d) df_habe_imputed_clustered_i = df_seasonal_normalised[list_independent_columns] fullname_dl = make_pre_sub_sub_directory('raw_dependent_old_'+str(cluster_number)+'.csv',directory_name, 'checks',option_deseason) fullname_d = make_pre_sub_sub_directory('raw_dependent_'+str(cluster_number)+'.csv',directory_name, 'checks',option_deseason) fullname_in = make_pre_sub_sub_directory('raw_independent_'+str(cluster_number)+'.csv',directory_name, 'checks',option_deseason) pd.DataFrame.to_csv(df_habe_imputed_clustered_dl,fullname_dl,sep=',',index=False) pd.DataFrame.to_csv(df_habe_imputed_clustered_d,fullname_d,sep=',',index=False) pd.DataFrame.to_csv(df_habe_imputed_clustered_i,fullname_in,sep=',',index=False) if input_normalise =='no-normalise': def truncate(cluster_number): if option_deseason == 'deseasonal': df_seasonal_normalised = pd.read_csv('preprocessing/'+directory_name+ '/' + option_deseason + '/4_habe_deseasonal_'+str(cluster_number)+'_short.csv', delimiter=',', error_bad_lines=False) if option_deseason == 'month-ind': df_seasonal_normalised = pd.read_csv('preprocessing/'+directory_name+ '/' + str(option_deseason) + '/' + str(key) + '/3_habe_for_all_scenarios_encoding.csv',delimiter=',') df_habe_imputed_clustered_d = df_seasonal_normalised[list_dependent_columns] df_habe_imputed_clustered_i = df_seasonal_normalised[list_independent_columns] fullname_d = make_pre_sub_sub_directory('raw_dependent_'+str(cluster_number)+'.csv',directory_name, 'checks',option_deseason) fullname_in = make_pre_sub_sub_directory('raw_independent_'+str(cluster_number)+'.csv',directory_name, 'checks',option_deseason) pd.DataFrame.to_csv(df_habe_imputed_clustered_d,fullname_d,sep=',',index=False) pd.DataFrame.to_csv(df_habe_imputed_clustered_i,fullname_in,sep=',',index=False) for i in list(range(1,cluster_number_length+1)): truncate(i)
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
2. MODEL backTOC:- 2.1. Prepare train-test-target datasets- 2.2. Prediction 2.1. Prepare train-test-target datasets back
def to_haushalts(values,id_ix=0): haushalts = dict() haushalt_ids = np.unique(values[:,id_ix]) for haushalt_id in haushalt_ids: selection = values[:, id_ix] == haushalt_id haushalts[haushalt_id] = values[selection] return haushalts def split_train_test(haushalts,length_training,month_name,row_in_chunk): train, test = list(), list() cut_point = int(0.8*length_training) # 0.9*9754 # declare cut_point as per the size of the imputed database #TODO check if this is too less print('Month/cluster and cut_point',month_name, cut_point) for k,rows in haushalts.items(): train_rows = rows[rows[:,row_in_chunk] < cut_point, :] test_rows = rows[rows[:,row_in_chunk] > cut_point, :] train.append(train_rows[:, :]) test.append(test_rows[:, :]) return train, test ### NORMALISATION if input_normalise =='normalise': def df_habe_train_test(df,month_name,length_training): df=df.assign(id_split = list(range(df.shape[0]))) train, test = split_train_test(to_haushalts(df.values),length_training,month_name,row_in_chunk=df.shape[1]-1) train_rows = np.array([row for rows in train for row in rows]) test_rows = np.array([row for rows in test for row in rows]) independent = list(range(0,independent_indices_pd.shape[0])) dependent = list(range(independent_indices_pd.shape[0]+1, independent_indices_pd.shape[0]+dependent_indices_pd.shape[0]+1)) trained_independent = train_rows[:, independent] trained_dependent = train_rows[:, dependent] test_independent = test_rows[:, independent] test_dependent = test_rows[:, dependent] ## OPTIONAL lines FOR CHECK - comment if not needed np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/trained_dependent_nonexp.csv', trained_dependent, delimiter=',') np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/trained_dependent.csv', np.expm1(trained_dependent),delimiter=',') np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/trained_independent.csv', trained_independent, delimiter=',') np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/test_dependent.csv', np.expm1(test_dependent), delimiter=',') np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/test_independent.csv', test_independent, delimiter=',') return trained_independent,trained_dependent,test_independent,test_dependent def df_partner_test(y): df_partner = pd.read_csv('preprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/5_final_' + target_data + '_independent_final_' + str(y) + '.csv',delimiter=',') length_training = df_partner.shape[0] train_partner, test_partner = split_train_test(to_haushalts(df_partner.values),length_training,month_name,1) train_rows_partner = np.array([row for rows in train_partner for row in rows]) new_independent = list(range(0, n_ind)) # number of columns of the independent parameters train_partner_independent = train_rows_partner[:, new_independent] ### Optional lines for CHECK - comment if not needed np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/train_partner_independent_' + model_name + '_' + str(y) + '.csv', train_partner_independent, delimiter=',') return train_partner_independent ## form the train test datasets # NO-NORMALISATION if input_normalise =='no-normalise': def df_habe_train_test(df,month_name,length_training): df=df.assign(id_split = list(range(df.shape[0]))) train, test = split_train_test(to_haushalts(df.values),length_training,month_name,row_in_chunk=df.shape[1]-1) train_rows = np.array([row for rows in train for row in rows]) test_rows = np.array([row for rows in test for row in rows]) independent = list(range(0,independent_indices_pd.shape[0])) dependent = list(range(independent_indices_pd.shape[0]+1, independent_indices_pd.shape[0]+dependent_indices_pd.shape[0]+1)) trained_independent = train_rows[:, independent] trained_dependent = train_rows[:, dependent] test_independent = test_rows[:, independent] test_dependent = test_rows[:, dependent] ## OPTIONAL lines FOR CHECK - comment if not needed # np.savetxt('raw/checks/trained_dependent_nonexp_'+str(month_name)+'.csv', trained_dependent, delimiter=',') # np.savetxt('raw/checks/trained_independent_nonexp_'+str(month_name)+'.csv', trained_independent, delimiter=',') np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/test_dependent_'+str(month_name)+'.csv', test_dependent,delimiter=',') np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/test_independent_'+str(month_name)+'.csv', test_independent, delimiter=',') return trained_independent,trained_dependent,test_independent,test_dependent def df_partner_test(y): df_partner = pd.read_csv('preprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/5_final_' + target_data + '_independent_final_' + str(y) + '.csv', delimiter=',') length_training = df_partner.shape[0] train_partner, test_partner = split_train_test(to_haushalts(df_partner.values), length_training,cluster_number,1) train_rows_partner = np.array([row for rows in train_partner for row in rows]) new_independent = list(range(0, n_ind)) train_partner_independent = train_rows_partner[:, new_independent] ### Optional lines for CHECK - comment if not needed np.savetxt('preprocessing/'+directory_name+'/checks/'+option_deseason+'/train_partner_independent_' + model_name + '_' + str(y) + '.csv', train_partner_independent, delimiter=',') return train_partner_independent def make_post_directory(outname,directory_name): outdir = 'postprocessing/'+directory_name if not os.path.exists(outdir): os.mkdir(outdir) fullname = os.path.join(outdir, outname) return fullname def make_post_sub_directory(outname,directory_name,sub_dir): outdir_1='postprocessing/'+directory_name if not os.path.exists(outdir_1): os.mkdir(outdir_1) outdir = 'postprocessing/'+directory_name+'/'+sub_dir if not os.path.exists(outdir): os.mkdir(outdir) fullname = os.path.join(outdir, outname) return fullname def make_post_sub_sub_directory(outname,directory_name,sub_dir,sub_sub_dir): outdir_1='postprocessing/'+directory_name if not os.path.exists(outdir_1): os.mkdir(outdir_1) outdir = 'postprocessing/'+directory_name+'/'+sub_dir if not os.path.exists(outdir): os.mkdir(outdir) outdir_2='postprocessing/'+directory_name+'/'+sub_dir+'/'+sub_sub_dir if not os.path.exists(outdir_2): os.mkdir(outdir_2) fullname = os.path.join(outdir_2, outname) return fullname # FOR NO NORMALISATION AND TEST DATA def df_test(y,cluster_number): pd_df_partner = pd.read_csv('raw/checks/trained_independent_'+str(cluster_number)+'.csv', delimiter=',', header = None) pd_df_partner.iloc[:,-1] = pd_df_partner.iloc[:,-1] + y pd.DataFrame.to_csv(pd_df_partner, 'raw/checks/5_trained_independent_'+str(cluster_number)+'_'+str(y)+'.csv', sep=',',index=False) return pd_df_partner def df_stratified_test(y): pd_df_partner = pd.read_csv('raw/checks/5_setstratified_independent_1_'+str(y)+'.csv', delimiter=',') return pd_df_partner #If using Neural Networks # def ANN(): # nn = Sequential() # nn.add(Dense(39,kernel_initializer='normal',activation="relu",input_shape=(39,))) # nn.add(Dense(50,kernel_initializer='normal',activation="relu")) # nn.add(Dense(100,kernel_initializer='normal',activation="relu")) # nn.add(Dense(100,kernel_initializer='normal',activation="relu") ) # # nn.add(Dense(100,kernel_initializer='normal',activation="relu")) # # nn.add(Dense(100,kernel_initializer='normal',activation="relu")) # nn.add(Dense(dependentsize,kernel_initializer='normal')) #,kernel_constraint=min_max_norm(min_value=0.01,max_value=0.05))) # sgd = optimizers.SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True) # nn.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy']) # return nn
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
2.2. Clustered Prediction back
## NORMALISATION if input_normalise =='normalise': def fit_predict_cluster(i,y,cluster_number,key): df = pd.read_csv('preprocessing/'+directory_name+'/'+option_deseason+ '/4_habe_deseasonal_'+str(cluster_number)+'_normalised.csv', delimiter=',',error_bad_lines=False, encoding='ISO-8859–1') length_training = df.shape[0] trained_independent, trained_dependent, test_independent, test_dependent = df_habe_train_test(df, str(cluster_number), length_training) train_partner_independent = df_partner_test(y) if model_name == 'ANN': estimator = KerasRegressor(build_fn=ANN) estimator.fit(trained_independent, trained_dependent, epochs=100, batch_size=5, verbose=0) ### PREDICTION FROM HERE prediction_nn = estimator.predict(train_partner_independent) prediction_nn_denormalised = np.expm1(prediction_nn) fullname = make_post_sub_sub_directory('predicted_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) + '.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn_denormalised, delimiter=',') ### TEST PREDICTION prediction_nn_test = estimator.predict(test_independent) prediction_nn_test_denormalised = np.expm1(prediction_nn_test) fullname = make_post_sub_sub_directory('predicted_test' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) + '.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn_test_denormalised, delimiter=',') ### CROSS VALIDATION FROM HERE kfold = KFold(n_splits=10, random_state=12, shuffle=True) results1 = cross_val_score(estimator, test_independent, test_dependent, cv=kfold) print("Results_test: %.2f (%.2f)" % (results1.mean(), results1.std())) if model_name == 'RF': estimator = sko.MultiOutputRegressor(RandomForestRegressor(n_estimators=100, max_features=n_ind, random_state=30)) estimator.fit(trained_independent, trained_dependent) ### PREDICTION FROM HERE prediction_nn = estimator.predict(train_partner_independent) results0 = estimator.oob_score prediction_nn_denormalised = np.expm1(prediction_nn) fullname = make_post_sub_sub_directory('predicted_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) + '.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn_denormalised, delimiter=',') ### TEST PREDICTION prediction_nn_test = estimator.predict(test_independent) prediction_nn_test_denormalised = np.expm1(prediction_nn_test) fullname = make_post_sub_sub_directory('predicted_test' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) + '.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn_test_denormalised, delimiter=',') #### CROSS VALIDATION FROM HERE kfold = KFold(n_splits=10, random_state=12, shuffle=True) # results0 = estimator.oob_score # results1 = cross_val_score(estimator, test_independent, test_dependent, cv=kfold) results2 = r2_score(test_dependent,prediction_nn_test) results3 = mean_squared_error(test_dependent,prediction_nn_test) results4 = explained_variance_score(test_dependent,prediction_nn_test) # print("cross_val_score: %.2f (%.2f)" % (results1.mean(), results1.std())) # print("oob_r2_score: %.2f " % results0) print("r2_score: %.2f " % results2) print("mean_squared_error: %.2f " % results3) print("explained_variance_score: %.2f " % results4) ### FOR NO NORMALISATION if input_normalise =='no-normalise': def fit_predict_cluster(i,y,cluster_number,key): df_non_normalised = pd.read_csv('preprocessing/'+directory_name+'/'+option_deseason+'/4_habe_deseasonal_'+ str(cluster_number)+ '_short.csv', delimiter=',', error_bad_lines=False, encoding='ISO-8859–1') length_training = df_non_normalised.shape[0] print(length_training) trained_independent, trained_dependent, test_independent, test_dependent = df_habe_train_test(df_non_normalised, str(cluster_number), length_training) train_partner_independent = df_partner_test(y) ### Additional for the HBS test data subset # test_new_independent = df_test(y,1) # chosing just one cluster here # sratified_independent = df_stratified_test(y) if model_name == 'ANN': estimator = KerasRegressor(build_fn=ANN) estimator.fit(trained_independent, trained_dependent, epochs=100, batch_size=5, verbose=0) ### PREDICTION FROM HERE prediction_nn = estimator.predict(train_partner_independent) fullname = make_post_sub_sub_directory('predicted_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) +'.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn, delimiter=',') ### TEST PREDICTION prediction_nn_test = estimator.predict(test_independent) fullname = make_post_sub_sub_directory('predicted_test_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) +'.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn_test, delimiter=',') ### CROSS VALIDATION FROM HERE kfold = KFold(n_splits=10, random_state=12, shuffle=True) results1 = cross_val_score(estimator, test_independent, test_dependent, cv=kfold) print("Results_test: %.2f (%.2f)" % (results1.mean(), results1.std())) if model_name == 'RF': estimator = sko.MultiOutputRegressor(RandomForestRegressor(n_estimators=100, max_features=n_ind, random_state=30)) estimator.fit(trained_independent, trained_dependent) ### FEATURE IMPORTANCE rf = RandomForestRegressor() rf.fit(trained_independent, trained_dependent) FI = rf.feature_importances_ list_independent_columns = pd.read_csv(independent_indices, delimiter=',', encoding='ISO-8859–1')['name'].to_list() independent_columns = pd.DataFrame(list_independent_columns) FI_names = pd.DataFrame(FI) FI_names = pd.concat([independent_columns, FI_names], axis=1) FI_names.columns = ['independent_variables', 'FI_score'] pd.DataFrame.to_csv(FI_names,'preprocessing/'+directory_name+'/8_habe_feature_importance'+ '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) +'.csv', sep=',',index= False) FI_names_sorted = FI_names.sort_values('FI_score', ascending = False) # print(FI_names_sorted) ### PREDICTION FROM HERE prediction_nn = estimator.predict(train_partner_independent) fullname = make_post_sub_sub_directory('predicted_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) +'.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn, delimiter=',') ### TEST PREDICTION prediction_nn_test = estimator.predict(test_independent) fullname = make_post_sub_sub_directory('predicted_test_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) +'.csv',directory_name,option_deseason,key) np.savetxt(fullname, prediction_nn_test, delimiter=',') #### CROSS VALIDATION FROM HERE kfold = KFold(n_splits=10, random_state=12, shuffle=True) for i in range(16): column_predict = pd.DataFrame(test_dependent).iloc[:,i] model = sm.OLS(column_predict, test_independent).fit() print(i) print('standard error=',model.bse) # results0 = estimator.oob_score # results1 = cross_val_score(estimator, test_independent, test_dependent, cv=kfold) # results2 = r2_score(test_dependent,prediction_nn_test) # results3 = mean_squared_error(test_dependent,prediction_nn_test) # results4 = explained_variance_score(test_dependent,prediction_nn_test) # print("cross_val_score: %.2f (%.2f)" % (results1.mean(), results1.std())) # print("oob_r2_score: %.2f " % results0) print("r2_score: %.2f " % results2) print("mean_squared_error: %.2f " % results3) print("explained_variance_score: %.2f " % results4) # CLUSTER of MONTHS - PREDICTIONS for cluster_number in list(range(1,cluster_number_length+1)): print(cluster_number) for j in range(0, iter_n): for key in scenarios: list_incomechange=[0,scenarios[key]] for y in list_incomechange: fit_predict_cluster(j,y,cluster_number,key)
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
3.POSTPROCESSING back 3.1. Average of the clustered predictions
if option_deseason == 'month-ind': df_habe_outliers = pd.read_csv('preprocessing/'+directory_name+'/'+option_deseason+'/4_habe_deseasonal_'+ str(cluster_number)+ '_short.csv', delimiter=',') if option_deseason == 'deseasonal': df_habe_outliers = pd.read_csv('preprocessing/'+directory_name+'/2_habe_rename_removeoutliers.csv', delimiter=',') model_name = 'RF' def average_pandas_cluster(y,cluster_number,key): df_all = [] df_trained_partner = pd.read_csv('preprocessing/'+directory_name+'/checks/'+option_deseason+'/train_partner_independent_'+ model_name+'_'+str(y)+'.csv') for i in range(0,iter_n): df = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' + str(y) + '_' + str(i) + '_' + str(cluster_number) + '.csv', delimiter = ',', header=None) df_all.append(df) glued = pd.concat(df_all, axis=1, keys=list(map(chr,range(97,97+iter_n)))) glued = glued.swaplevel(0, 1, axis=1) glued = glued.groupby(level=0, axis=1).mean() glued_new = glued.reindex(columns=df_all[0].columns) max_income = df_habe_outliers[['disposable_income']].quantile([0.99]).values[0] min_income = df_habe_outliers[['disposable_income']].quantile([0.01]).values[0] glued_new['income'] = df_trained_partner[df_trained_partner.columns[-1]] pd.DataFrame.to_csv(glued_new, 'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' + str(y) + '_'+str(cluster_number)+'.csv', sep=',',header=None,index=False) for key in scenarios: list_incomechange=[0,scenarios[key]] for y in list_incomechange: for cluster_number in list(range(1,cluster_number_length+1)): average_pandas_cluster(y,cluster_number,key) def accumulate_categories_cluster(y,cluster_number): df_income = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' + str(y) + '_'+str(cluster_number)+'.csv', sep=',',header=None) # df_income['household_size'] = df_income.iloc[:, [17]] df_income['income'] = df_income.iloc[:, [16]] df_income['food'] = df_income.iloc[:,[0,1,2]].sum(axis=1) df_income['misc'] = df_income.iloc[:,[3,4]].sum(axis=1) df_income['housing'] = df_income.iloc[:, [5, 6]].sum(axis=1) df_income['services'] = df_income.iloc[:, [7, 8, 9 ]].sum(axis=1) df_income['travel'] = df_income.iloc[:, [10, 11, 12, 13, 14]].sum(axis=1) df_income['savings'] = df_income.iloc[:, [15]] df_income = df_income[['income','food','misc','housing','services','travel','savings']] pd.DataFrame.to_csv(df_income, 'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' + str(y) + '_'+str(cluster_number)+'_aggregated.csv', sep=',',index=False) return df_income for key in scenarios: list_incomechange=[0,scenarios[key]] for y in list_incomechange: for cluster_number in list(range(1,cluster_number_length+1)): accumulate_categories_cluster(y,cluster_number) # aggregation of clusters list_dfs_month=[] for key in scenarios: list_incomechange=[0,scenarios[key]] for y in list_incomechange: for cluster_number in list(range(1,cluster_number_length+1)): pd_predicted_month = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' + str(y) + '_'+str(cluster_number)+'_aggregated.csv', delimiter = ',') list_dfs_month.append(pd_predicted_month) df_concat = pd.concat(list_dfs_month,sort=False) by_row_index = df_concat.groupby(df_concat.index) df_means = by_row_index.mean() pd.DataFrame.to_csv(df_means,'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' + str(y) + '_' + str(dependentsize) +'_aggregated.csv', sep=',',index=False)
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
3.2. Calculate differences/ rebounds
list_dependent_columns = pd.read_csv(dependent_indices, delimiter=',', encoding='ISO-8859–1')['name'].to_list() def difference_new(): for cluster_number in list(range(1,cluster_number_length+1)): for key in scenarios: list_incomechange=[0,scenarios[key]] for i in range(0,iter_n): df_trained_partner = pd.read_csv('preprocessing/'+directory_name+'/checks/'+'/'+option_deseason+'/train_partner_independent_'+ model_name+'_'+str(y)+'.csv',header=None) df_500 = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_' +str(list_incomechange[1])+ '_'+str(i) + '_'+str(cluster_number)+'.csv', delimiter=',',header=None) df_0 = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_0_' + str(i) + '_'+str(cluster_number)+ '.csv', delimiter=',',header=None) df_500.columns = list_dependent_columns df_0.columns = df_500.columns df_diff = -df_500+df_0 if option_deseason == 'month-ind': df_diff['disposable_income']=df_trained_partner[df_trained_partner.columns[-25]] if option_deseason == 'deseasonal': df_diff['disposable_income']=df_trained_partner[df_trained_partner.columns[-1]] pd.DataFrame.to_csv(df_diff,'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_' + model_name + '_rebound_'+str(i)+ '_' + str(cluster_number) + '.csv',sep=',',index=False) difference_new() def average_clusters(key): df_all = [] for i in range(0,iter_n): df = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+ model_name + '_rebound_' + str(i)+ '_' + str(cluster_number)+'.csv',delimiter=',',index_col=None) df_all.append(df) df_concat = pd.concat(df_all,sort=False) by_row_index = df_concat.groupby(df_concat.index) df_means = by_row_index.mean() pd.DataFrame.to_csv(df_means, 'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+model_name +'_rebound.csv', sep=',',index=False) for key in scenarios: average_clusters(key) def accumulate_categories(key): df_income = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+model_name+ '_rebound.csv',delimiter=',') # df_income['household_size'] = df_income.iloc[:, [17]] df_income['income'] = df_income.iloc[:, [16]] df_income['food'] = df_income.iloc[:,[0,1,2]].sum(axis=1) df_income['misc'] = df_income.iloc[:,[3,4]].sum(axis=1) df_income['housing'] = df_income.iloc[:, [5, 6]].sum(axis=1) df_income['services'] = df_income.iloc[:, [7, 8, 9]].sum(axis=1) df_income['travel'] = df_income.iloc[:, [10, 11, 12,13, 14]].sum(axis=1) df_income['savings'] = df_income.iloc[:, [15]] df_income = df_income[['income','food','misc','housing','services','travel','savings']]#'transfers','total_sum' data[key]=list(df_income.mean()) if list(scenarios.keys()).index(key) == len(scenarios)-1: df = pd.DataFrame(data, columns = [key for key in scenarios], index=['income','food','misc','housing','services','travel','savings']) print(df) pd.DataFrame.to_csv(df.T, 'postprocessing/rebound_results_'+directory_name+ '_income.csv', sep=',',index=True) pd.DataFrame.to_csv(df_income, 'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+model_name+ '_rebound_aggregated.csv', sep=',',index=False) data={} for key in scenarios: accumulate_categories(key) groups=('<2000','2000-4000','4000-6000','6000-8000','8000-10000','>10000') def income_group(row): if row['disposable_income'] <= 2000: return groups[0] if row['disposable_income'] <= 4000: return groups[1] if row['disposable_income'] <= 6000: return groups[2] if row['disposable_income'] <= 8000: return groups[3] if row['disposable_income'] <= 10000: return groups[4] if row['disposable_income'] > 10000: return groups[5] def accumulate_income_groups(): df_income = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+model_name+ '_rebound.csv', delimiter=',') df_income['income_group'] = df_income.apply(lambda row: income_group(row), axis=1) df_income_new = df_income.groupby(['income_group']).mean() pd.DataFrame.to_csv(df_income_new,'postprocessing/rebound_results_'+directory_name+ '_income_categories.csv', sep=',',index=True) pd.DataFrame.to_csv(df_income, 'postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+model_name+ '_rebound_income.csv', sep=',',index=False) accumulate_income_groups() groups=('<2000','2000-4000','4000-6000','6000-8000','8000-10000','>10000') def income_group(row): if row['income'] <= 2000 : return groups[0] if row['income'] <= 4000: return groups[1] if row['income'] <= 6000: return groups[2] if row['income'] <= 8000: return groups[3] if row['income'] <= 10000: return groups[4] if row['income'] > 10000: return groups[5] def accumulate_income_groups_new(): df_income = pd.read_csv('postprocessing/'+directory_name+'/'+option_deseason+'/'+key+'/predicted_'+model_name+ '_rebound_aggregated.csv', delimiter=',') print(df_income.columns) df_income['income_group'] = df_income.apply(lambda row: income_group(row), axis=1) df_income_new = df_income.groupby(['income_group']).mean() pd.DataFrame.to_csv(df_income_new,'postprocessing/rebound_results_'+directory_name+ '_categories.csv', sep=',',index=True) accumulate_income_groups_new()
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
4. LCA back 1. Make a file with associated impacts_per_FU for each HABE category: - a. Get the ecoinvent data from brightway - b. Get the exiobase data from direct file (Livia's) - c. Attach the heia and Agribalyse values 2. Convert the impact_per_FU to impact_per_expenses3. Run the following scripts to - (a) allocate the income category to each household in HBS (train data) and ABZ (target data) - (b) calculate environmental impact per consumption main-category per income group as listed in the raw/dependent_10.csv - (1) From HBS: % of expense of consumption sub-category per consumption main-category as listed in the raw/dependent_10.csv - (2) expenses per FU of each consumption sub-category - (c) From target data: Multiply the rebound results (consumption expenses) with the env. impact values above based on the income of the household OR Use A.Kim's analysis here: https://github.com/aleksandra-kim/consumption_model for the calculation of impacts_per_FU for each HABE catergory
import pickle import csv file = open('LCA/contribution_scores_sectors_allfu1.pickle','rb') x = pickle.load(file) print(x) with open('LCA/impacts_per_FU_sectors.csv', 'w') as output: writer = csv.writer(output) for key, value in x: writer.writerow([key, value]) import pickle import csv import pickle import csv file = open('LCA/contribution_scores_5categories_allfu1.pickle','rb') x = pickle.load(file) print(x) with open('LCA/impacts_per_FU.csv', 'w') as output: writer = csv.writer(output) for key, value in x.items(): writer.writerow([key, value]) file = open('LCA/contribution_scores_v2.pickle','rb') x1 = pickle.load(file) with open('LCA/impacts_per_FU.csv', 'w') as output: writer = csv.writer(output) for key, value in x1.items(): writer.writerow([key, value]) file = open('LCA/contribution_scores_sectors_allfu1.pickle','rb') x = pickle.load(file) with open('LCA/impacts_per_FU_sectors.csv', 'w') as output: writer = csv.writer(output) for key, value in x.items(): writer.writerow([key, value]) import pandas as pd ## TODO use the manually updated CHF/FU to calculate the income per expense df_expense = pd.read_csv('LCA/impacts_per_expense.csv',sep=',',index_col='sector') df_income_CHF = pd.read_csv('postprocessing/rebound_results_'+directory_name+ '_income.csv',sep=',') for i in ['food','travel','housing','food','misc','services']: df_income_CHF[i+'_GHG']=df_expense.loc[i,'Average of GWP/CHF']*df_income_CHF[i] pd.DataFrame.to_csv(df_income_CHF,'postprocessing/rebound_results_'+directory_name+ '_income_all_GHG.csv',sep=',')
_____no_output_____
BSD-3-Clause
rebound_model.ipynb
rhythimashinde/rebound
![CMCC](http://cmcc.ufabc.edu.br/images/logo_site.jpg) **Regressão Linear** Este notebook mostra uma implementação básica de Regressão Linear e o uso da biblioteca [MLlib](http://spark.apache.org/docs/1.4.0/api/python/pyspark.ml.html) do PySpark para a tarefa de regressão na base de dados [Million Song Dataset](http://labrosa.ee.columbia.edu/millionsong/) do repositório [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/YearPredictionMSD). Nosso objetivo é predizer o ano de uma música através dos seus atributos de áudio. ** Neste notebook: **+ *Parte 1:* Leitura e *parsing* da base de dados + *Visualização 1:* Atributos + *Visualização 2:* Deslocamento das variáveis de interesse+ *Parte 2:* Criar um preditor de referência + *Visualização 3:* Valores Preditos vs. Verdadeiros+ *Parte 3:* Treinar e avaliar um modelo de regressão linear + *Visualização 4:* Erro de Treino+ *Parte 4:* Treinar usando MLlib e ajustar os hiperparâmetros + *Visualização 5:* Predições do Melhor modelo + *Visualização 6:* Mapa de calor dos hiperparâmetros+ *Parte 5:* Adicionando interações entre atributos+ *Parte 6:* Aplicando na base de dados de Crimes de São Francisco Para referência, consulte os métodos relevantes do PySpark em [Spark's Python API](https://spark.apache.org/docs/latest/api/python/pyspark.htmlpyspark.RDD) e do NumPy em [NumPy Reference](http://docs.scipy.org/doc/numpy/reference/index.html) ** Parte 1: Leitura e *parsing* da base de dados** ** (1a) Verificando os dados disponíveis ** Os dados da base que iremos utilizar estão armazenados em um arquivo texto. No primeiro passo vamos transformar os dados textuais em uma RDD e verificar a formatação dos mesmos. Altere a segunda célula para verificar quantas amostras existem nessa base de dados utilizando o método [count method](https://spark.apache.org/docs/latest/api/python/pyspark.htmlpyspark.RDD.count). Reparem que o rótulo dessa base é o primeiro registro, representando o ano.
from pyspark import SparkContext sc = SparkContext.getOrCreate() # carregar base de dados import os.path fileName = os.path.join('Data', 'millionsong.txt') numPartitions = 2 rawData = sc.textFile(fileName, numPartitions) # EXERCICIO numPoints = rawData.count() print (numPoints) samplePoints = rawData.take(5) print (samplePoints) # TEST Load and check the data (1a) assert numPoints==6724, 'incorrect value for numPoints' print("OK") assert len(samplePoints)==5, 'incorrect length for samplePoints' print("OK")
OK OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (1b) Usando `LabeledPoint` ** Na MLlib, bases de dados rotuladas devem ser armazenadas usando o objeto [LabeledPoint](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.regression.LabeledPoint). Escreva a função `parsePoint` que recebe como entrada uma amostra de dados, transforma os dados usandoo comando [unicode.split](https://docs.python.org/2/library/string.htmlstring.split), em seguida mapeando para `float` e retorna um `LabeledPoint`. Aplique essa função na variável `samplePoints` da célula anterior e imprima os atributos e rótulo utilizando os atributos `LabeledPoint.features` e `LabeledPoint.label`. Finalmente, calcule o número de atributos nessa base de dados.
from pyspark.mllib.regression import LabeledPoint import numpy as np # Here is a sample raw data point: # '2001.0,0.884,0.610,0.600,0.474,0.247,0.357,0.344,0.33,0.600,0.425,0.60,0.419' # In this raw data point, 2001.0 is the label, and the remaining values are features # EXERCICIO def parsePoint(line): """Converts a comma separated unicode string into a `LabeledPoint`. Args: line (unicode): Comma separated unicode string where the first element is the label and the remaining elements are features. Returns: LabeledPoint: The line is converted into a `LabeledPoint`, which consists of a label and features. """ Point = [float(x) for x in line.replace(',', ' ').split(' ')] return LabeledPoint(Point[0], Point[1:]) parsedSamplePoints = list(map(parsePoint,samplePoints)) firstPointFeatures = parsedSamplePoints[0].features firstPointLabel = parsedSamplePoints[0].label print (firstPointFeatures, firstPointLabel) d = len(firstPointFeatures) print (d) # TEST Using LabeledPoint (1b) assert isinstance(firstPointLabel, float), 'label must be a float' expectedX0 = [0.8841,0.6105,0.6005,0.4747,0.2472,0.3573,0.3441,0.3396,0.6009,0.4257,0.6049,0.4192] assert np.allclose(expectedX0, firstPointFeatures, 1e-4, 1e-4), 'incorrect features for firstPointFeatures' assert np.allclose(2001.0, firstPointLabel), 'incorrect label for firstPointLabel' assert d == 12, 'incorrect number of features' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
**Visualização 1: Atributos** A próxima célula mostra uma forma de visualizar os atributos através de um mapa de calor. Nesse mapa mostramos os 50 primeiros objetos e seus atributos representados por tons de cinza, sendo o branco representando o valor 0 e o preto representando o valor 1. Esse tipo de visualização ajuda a perceber a variação dos valores dos atributos. Pouca mudança de tons significa que os valores daquele atributo apresenta uma variância baixa.
import matplotlib.pyplot as plt import matplotlib.cm as cm %matplotlib inline sampleMorePoints = rawData.take(50) parsedSampleMorePoints = map(parsePoint, sampleMorePoints) dataValues = list(map(lambda lp: lp.features.toArray(), parsedSampleMorePoints)) def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0): """Template for generating the plot layout.""" plt.close() fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white') ax.axes.tick_params(labelcolor='#999999', labelsize='10') for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]: axis.set_ticks_position('none') axis.set_ticks(ticks) axis.label.set_color('#999999') if hideLabels: axis.set_ticklabels([]) plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-') map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right']) return fig, ax # generate layout and plot fig, ax = preparePlot(np.arange(.5, 11, 1), np.arange(.5, 49, 1), figsize=(8,7), hideLabels=True, gridColor='#eeeeee', gridWidth=1.1) image = plt.imshow(dataValues,interpolation='nearest', aspect='auto', cmap=cm.Greys) for x, y, s in zip(np.arange(-.125, 12, 1), np.repeat(-.75, 12), [str(x) for x in range(12)]): plt.text(x, y, s, color='#999999', size='10') plt.text(4.7, -3, 'Feature', color='#999999', size='11'), ax.set_ylabel('Observation') pass
_____no_output_____
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
**(1c) Deslocando os rótulos ** Para melhor visualizar as soluções obtidas, calcular o erro de predição e visualizar a relação dos atributos com os rótulos, costuma-se deslocar os rótulos para iniciarem em zero. Como primeiro passo, aplique a função `parsePoint` no RDD criado anteriormente, em seguida, crie uma RDD apenas com o `.label` de cada amostra. Finalmente, calcule os valores mínimos e máximos.
# EXERCICIO parsedDataInit = rawData.map(parsePoint) onlyLabels = parsedDataInit.map(lambda p: p.label) minYear = onlyLabels.min() maxYear = onlyLabels.max() print (maxYear, minYear) # TEST Find the range (1c) assert len(parsedDataInit.take(1)[0].features)==12, 'unexpected number of features in sample point' sumFeatTwo = parsedDataInit.map(lambda lp: lp.features[2]).sum() assert np.allclose(sumFeatTwo, 3158.96224351), 'parsedDataInit has unexpected values' yearRange = maxYear - minYear assert yearRange == 89, 'incorrect range for minYear to maxYear' print("OK") # EXERCICIO: subtraia os labels do valor mínimo parsedData = parsedDataInit.map(lambda p: LabeledPoint(p.label - minYear, p.features)) # Should be a LabeledPoint print (type(parsedData.take(1)[0])) # View the first point print ('\n{0}'.format(parsedData.take(1))) # TEST Shift labels (1d) oldSampleFeatures = parsedDataInit.take(1)[0].features newSampleFeatures = parsedData.take(1)[0].features assert np.allclose(oldSampleFeatures, newSampleFeatures), 'new features do not match old features' sumFeatTwo = parsedData.map(lambda lp: lp.features[2]).sum() assert np.allclose(sumFeatTwo, 3158.96224351), 'parsedData has unexpected values' minYearNew = parsedData.map(lambda lp: lp.label).min() maxYearNew = parsedData.map(lambda lp: lp.label).max() assert minYearNew == 0, 'incorrect min year in shifted data' assert maxYearNew == 89, 'incorrect max year in shifted data' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (1d) Conjuntos de treino, validação e teste ** Como próximo passo, vamos dividir nossa base de dados em conjunto de treino, validação e teste conforme discutido em sala de aula. Use o método [randomSplit method](https://spark.apache.org/docs/latest/api/python/pyspark.htmlpyspark.RDD.randomSplit) com os pesos (weights) e a semente aleatória (seed) especificados na célula abaixo parar criar a divisão das bases. Em seguida, utilizando o método `cache()` faça o pré-armazenamento da base processada. Esse comando faz o processamento da base através das transformações e armazena em um novo RDD que pode ficar armazenado em memória, se couber, ou em um arquivo temporário.
# EXERCICIO weights = [.8, .1, .1] seed = 42 parsedTrainData, parsedValData, parsedTestData = parsedData.randomSplit(weights, seed) parsedTrainData.cache() parsedValData.cache() parsedTestData.cache() nTrain = parsedTrainData.count() nVal = parsedValData.count() nTest = parsedTestData.count() print (nTrain, nVal, nTest, nTrain + nVal + nTest) print (parsedData.count()) # TEST Training, validation, and test sets (1e) assert parsedTrainData.getNumPartitions() == numPartitions, 'parsedTrainData has wrong number of partitions' assert parsedValData.getNumPartitions() == numPartitions, 'parsedValData has wrong number of partitions' assert parsedTestData.getNumPartitions() == numPartitions,'parsedTestData has wrong number of partitions' assert len(parsedTrainData.take(1)[0].features) == 12, 'parsedTrainData has wrong number of features' sumFeatTwo = (parsedTrainData .map(lambda lp: lp.features[2]) .sum()) sumFeatThree = (parsedValData .map(lambda lp: lp.features[3]) .reduce(lambda x, y: x + y)) sumFeatFour = (parsedTestData .map(lambda lp: lp.features[4]) .reduce(lambda x, y: x + y)) assert np.allclose([sumFeatTwo, sumFeatThree, sumFeatFour],2526.87757656, 297.340394298, 184.235876654), 'parsed Train, Val, Test data has unexpected values' assert nTrain + nVal + nTest == 6724, 'unexpected Train, Val, Test data set size' assert nTrain == 5359, 'unexpected value for nTrain' assert nVal == 678, 'unexpected value for nVal' assert nTest == 687, 'unexpected value for nTest' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** Part 2: Criando o modelo de *baseline* ** **(2a) Rótulo médio ** O baseline é útil para verificarmos que nosso modelo de regressão está funcionando. Ele deve ser um modelo bem simples que qualquer algoritmo possa fazer melhor. Um baseline muito utilizado é fazer a mesma predição independente dos dados analisados utilizando o rótulo médio do conjunto de treino. Calcule a média dos rótulos deslocados para a base de treino, utilizaremos esse valor posteriormente para comparar o erro de predição. Use um método apropriado para essa tarefa, consulte o [RDD API](https://spark.apache.org/docs/latest/api/python/pyspark.htmlpyspark.RDD).
# EXERCICIO averageTrainYear = (parsedTrainData .map(lambda p: p.label) .mean() ) print (averageTrainYear) # TEST Average label (2a) assert np.allclose(averageTrainYear, 53.6792311), 'incorrect value for averageTrainYear' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
**(2b) Erro quadrático médio ** Para comparar a performance em problemas de regressão, geralmente é utilizado o Erro Quadrático Médio ([RMSE](http://en.wikipedia.org/wiki/Root-mean-square_deviation)). Implemente uma função que calcula o RMSE a partir de um RDD de tuplas (rótulo, predição).
# EXERCICIO def squaredError(label, prediction): """Calculates the the squared error for a single prediction. Args: label (float): The correct value for this observation. prediction (float): The predicted value for this observation. Returns: float: The difference between the `label` and `prediction` squared. """ return (label - prediction) ** 2 def calcRMSE(labelsAndPreds): """Calculates the root mean squared error for an `RDD` of (label, prediction) tuples. Args: labelsAndPred (RDD of (float, float)): An `RDD` consisting of (label, prediction) tuples. Returns: float: The square root of the mean of the squared errors. """ return np.sqrt(labelsAndPreds .map(lambda lp: squaredError(lp[0], lp[1])).mean()) labelsAndPreds = sc.parallelize([(3., 1.), (1., 2.), (2., 2.)]) # RMSE = sqrt[((3-1)^2 + (1-2)^2 + (2-2)^2) / 3] = 1.291 exampleRMSE = calcRMSE(labelsAndPreds) print (exampleRMSE) # TEST Root mean squared error (2b) assert np.allclose(squaredError(3, 1), 4.), 'incorrect definition of squaredError' assert np.allclose(exampleRMSE, 1.29099444874), 'incorrect value for exampleRMSE' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
**(2c) RMSE do baseline para os conjuntos de treino, validação e teste ** Vamos calcular o RMSE para nossa baseline. Primeiro crie uma RDD de (rótulo, predição) para cada conjunto, e então chame a função `calcRMSE`.
# EXERCICIO labelsAndPredsTrain = parsedTrainData.map(lambda p: (p.label, averageTrainYear)) rmseTrainBase = calcRMSE(labelsAndPredsTrain) labelsAndPredsVal = parsedValData.map(lambda p: (p.label, averageTrainYear)) rmseValBase = calcRMSE(labelsAndPredsVal) labelsAndPredsTest = parsedTestData.map(lambda p: (p.label, averageTrainYear)) rmseTestBase = calcRMSE(labelsAndPredsTest) print ('Baseline Train RMSE = {0:.3f}'.format(rmseTrainBase)) print ('Baseline Validation RMSE = {0:.3f}'.format(rmseValBase)) print ('Baseline Test RMSE = {0:.3f}'.format(rmseTestBase)) # TEST Training, validation and test RMSE (2c) assert np.allclose([rmseTrainBase, rmseValBase, rmseTestBase],[21.506125957738682, 20.877445428452468, 21.260493955081916]), 'incorrect RMSE value' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** Visualização 2: Predição vs. real ** Vamos visualizar as predições no conjunto de validação. Os gráficos de dispersão abaixo plotam os pontos com a coordenada X sendo o valor predito pelo modelo e a coordenada Y o valor real do rótulo. O primeiro gráfico mostra a situação ideal, um modelo que acerta todos os rótulos. O segundo gráfico mostra o desempenho do modelo baseline. As cores dos pontos representam o erro quadrático daquela predição, quanto mais próxima do laranja, maior o erro.
from matplotlib.colors import ListedColormap, Normalize from matplotlib.cm import get_cmap cmap = get_cmap('YlOrRd') norm = Normalize() actual = np.asarray(parsedValData .map(lambda lp: lp.label) .collect()) error = np.asarray(parsedValData .map(lambda lp: (lp.label, lp.label)) .map(lambda lp: squaredError(lp[0], lp[1])) .collect()) clrs = cmap(np.asarray(norm(error)))[:,0:3] fig, ax = preparePlot(np.arange(0, 100, 20), np.arange(0, 100, 20)) plt.scatter(actual, actual, s=14**2, c=clrs, edgecolors='#888888', alpha=0.75, linewidths=0.5) ax.set_xlabel('Predicted'), ax.set_ylabel('Actual') pass predictions = np.asarray(parsedValData .map(lambda lp: averageTrainYear) .collect()) error = np.asarray(parsedValData .map(lambda lp: (lp.label, averageTrainYear)) .map(lambda lp: squaredError(lp[0], lp[1])) .collect()) norm = Normalize() clrs = cmap(np.asarray(norm(error)))[:,0:3] fig, ax = preparePlot(np.arange(53.0, 55.0, 0.5), np.arange(0, 100, 20)) ax.set_xlim(53, 55) plt.scatter(predictions, actual, s=14**2, c=clrs, edgecolors='#888888', alpha=0.75, linewidths=0.3) ax.set_xlabel('Predicted'), ax.set_ylabel('Actual')
_____no_output_____
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** Parte 3: Treinando e avaliando o modelo de regressão linear ** ** (3a) Gradiente do erro ** Vamos implementar a regressão linear através do gradiente descendente. Lembrando que para atualizar o peso da regressão linear fazemos: $$ \scriptsize \mathbf{w}_{i+1} = \mathbf{w}_i - \alpha_i \sum_j (\mathbf{w}_i^\top\mathbf{x}_j - y_j) \mathbf{x}_j \,.$$ onde $ \scriptsize i $ é a iteração do algoritmo, e $ \scriptsize j $ é o objeto sendo observado no momento. Primeiro, implemente uma função que calcula esse gradiente do erro para certo objeto: $ \scriptsize (\mathbf{w}^\top \mathbf{x} - y) \mathbf{x} \, ,$ e teste a função em dois exemplos. Use o método `DenseVector` [dot](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.linalg.DenseVector.dot) para representar a lista de atributos (ele tem funcionalidade parecida com o `np.array()`).
from pyspark.mllib.linalg import DenseVector # EXERCICIO def gradientSummand(weights, lp): """Calculates the gradient summand for a given weight and `LabeledPoint`. Note: `DenseVector` behaves similarly to a `numpy.ndarray` and they can be used interchangably within this function. For example, they both implement the `dot` method. Args: weights (DenseVector): An array of model weights (betas). lp (LabeledPoint): The `LabeledPoint` for a single observation. Returns: DenseVector: An array of values the same length as `weights`. The gradient summand. """ return DenseVector((weights.dot(lp.features) - lp.label) * lp.features) exampleW = DenseVector([1, 1, 1]) exampleLP = LabeledPoint(2.0, [3, 1, 4]) summandOne = gradientSummand(exampleW, exampleLP) print (summandOne) exampleW = DenseVector([.24, 1.2, -1.4]) exampleLP = LabeledPoint(3.0, [-1.4, 4.2, 2.1]) summandTwo = gradientSummand(exampleW, exampleLP) print (summandTwo) # TEST Gradient summand (3a) assert np.allclose(summandOne, [18., 6., 24.]), 'incorrect value for summandOne' assert np.allclose(summandTwo, [1.7304,-5.1912,-2.5956]), 'incorrect value for summandTwo' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (3b) Use os pesos para fazer a predição ** Agora, implemente a função `getLabeledPredictions` que recebe como parâmetro o conjunto de pesos e um `LabeledPoint` e retorna uma tupla (rótulo, predição). Lembre-se que podemos predizer um rótulo calculando o produto interno dos pesos com os atributos.
# EXERCICIO def getLabeledPrediction(weights, observation): """Calculates predictions and returns a (label, prediction) tuple. Note: The labels should remain unchanged as we'll use this information to calculate prediction error later. Args: weights (np.ndarray): An array with one weight for each features in `trainData`. observation (LabeledPoint): A `LabeledPoint` that contain the correct label and the features for the data point. Returns: tuple: A (label, prediction) tuple. """ return (observation.label, weights.dot(observation.features)) weights = np.array([1.0, 1.5]) predictionExample = sc.parallelize([LabeledPoint(2, np.array([1.0, .5])), LabeledPoint(1.5, np.array([.5, .5]))]) labelsAndPredsExample = predictionExample.map(lambda lp: getLabeledPrediction(weights, lp)) print (labelsAndPredsExample.collect()) # TEST Use weights to make predictions (3b) assert labelsAndPredsExample.collect() == [(2.0, 1.75), (1.5, 1.25)], 'incorrect definition for getLabeledPredictions' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (3c) Gradiente descendente ** Finalmente, implemente o algoritmo gradiente descendente para regressão linear e teste a função em um exemplo.
# EXERCICIO def linregGradientDescent(trainData, numIters): """Calculates the weights and error for a linear regression model trained with gradient descent. Note: `DenseVector` behaves similarly to a `numpy.ndarray` and they can be used interchangably within this function. For example, they both implement the `dot` method. Args: trainData (RDD of LabeledPoint): The labeled data for use in training the model. numIters (int): The number of iterations of gradient descent to perform. Returns: (np.ndarray, np.ndarray): A tuple of (weights, training errors). Weights will be the final weights (one weight per feature) for the model, and training errors will contain an error (RMSE) for each iteration of the algorithm. """ # The length of the training data n = trainData.count() # The number of features in the training data d = len(trainData.first().features) w = np.zeros(d) alpha = 1.0 # We will compute and store the training error after each iteration errorTrain = np.zeros(numIters) for i in range(numIters): # Use getLabeledPrediction from (3b) with trainData to obtain an RDD of (label, prediction) # tuples. Note that the weights all equal 0 for the first iteration, so the predictions will # have large errors to start. labelsAndPredsTrain = trainData.map(lambda l: getLabeledPrediction(w, l)) errorTrain[i] = calcRMSE(labelsAndPreds) # Calculate the `gradient`. Make use of the `gradientSummand` function you wrote in (3a). # Note that `gradient` sould be a `DenseVector` of length `d`. gradient = trainData.map(lambda l: gradientSummand(w, l)).sum() # Update the weights alpha_i = alpha / (n * np.sqrt(i+1)) w -= alpha_i*gradient return w, errorTrain # create a toy dataset with n = 10, d = 3, and then run 5 iterations of gradient descent # note: the resulting model will not be useful; the goal here is to verify that # linregGradientDescent is working properly exampleN = 10 exampleD = 3 exampleData = (sc .parallelize(parsedTrainData.take(exampleN)) .map(lambda lp: LabeledPoint(lp.label, lp.features[0:exampleD]))) print (exampleData.take(2)) exampleNumIters = 5 exampleWeights, exampleErrorTrain = linregGradientDescent(exampleData, exampleNumIters) print (exampleWeights) # TEST Gradient descent (3c) expectedOutput = [48.20389904, 34.53243006, 30.60284959] assert np.allclose(exampleWeights, expectedOutput), 'value of exampleWeights is incorrect' expectedError = [79.72766145, 33.64762907, 9.46281696, 9.45486926, 9.44889147] assert np.allclose(exampleErrorTrain, expectedError),'value of exampleErrorTrain is incorrect' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (3d) Treinando o modelo na base de dados ** Agora iremos treinar o modelo de regressão linear na nossa base de dados de treino e calcular o RMSE na base de validação. Lembrem-se que não devemos utilizar a base de teste até que o melhor parâmetro do modelo seja escolhido. Para essa tarefa vamos utilizar as funções linregGradientDescent, getLabeledPrediction e calcRMSE já implementadas.
# EXERCICIO numIters = 50 weightsLR0, errorTrainLR0 = linregGradientDescent(parsedTrainData, numIters) labelsAndPreds = parsedValData.map(lambda lp: getLabeledPrediction(weightsLR0, lp)) rmseValLR0 = calcRMSE(labelsAndPreds) print ('Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}'.format(rmseValBase, rmseValLR0)) # TEST Train the model (3d) expectedOutput = [ 22.64370481, 20.1815662, -0.21620107, 8.53259099, 5.94821844, -4.50349235, 15.51511703, 3.88802901, 9.79146177, 5.74357056, 11.19512589, 3.60554264] assert np.allclose(weightsLR0, expectedOutput), 'incorrect value for weightsLR0' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** Visualização 3: Erro de Treino ** Vamos verificar o comportamento do algoritmo durante as iterações. Para isso vamos plotar um gráfico em que o eixo x representa a iteração e o eixo y o log do RMSE. O primeiro gráfico mostra as primeiras 50 iterações enquanto o segundo mostra as últimas 44 iterações. Note que inicialmente o erro cai rapidamente, quando então o gradiente descendente passa a fazer apenas pequenos ajustes.
norm = Normalize() clrs = cmap(np.asarray(norm(np.log(errorTrainLR0))))[:,0:3] fig, ax = preparePlot(np.arange(0, 60, 10), np.arange(2, 6, 1)) ax.set_ylim(2, 6) plt.scatter(list(range(0, numIters)), np.log(errorTrainLR0), s=14**2, c=clrs, edgecolors='#888888', alpha=0.75) ax.set_xlabel('Iteration'), ax.set_ylabel(r'$\log_e(errorTrainLR0)$') pass norm = Normalize() clrs = cmap(np.asarray(norm(errorTrainLR0[6:])))[:,0:3] fig, ax = preparePlot(np.arange(0, 60, 10), np.arange(17, 22, 1)) ax.set_ylim(17.8, 21.2) plt.scatter(range(0, numIters-6), errorTrainLR0[6:], s=14**2, c=clrs, edgecolors='#888888', alpha=0.75) ax.set_xticklabels(map(str, range(6, 66, 10))) ax.set_xlabel('Iteration'), ax.set_ylabel(r'Training Error') pass
_____no_output_____
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** Part 4: Treino utilizando MLlib e Busca em Grade (Grid Search) ** **(4a) `LinearRegressionWithSGD` ** Nosso teste inicial já conseguiu obter um desempenho melhor que o baseline, mas vamos ver se conseguimos fazer melhor introduzindo a ordenada de origem da reta além de outros ajustes no algoritmo. MLlib [LinearRegressionWithSGD](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.regression.LinearRegressionWithSGD) implementa o mesmo algoritmo da parte (3b), mas de forma mais eficiente para o contexto distribuído e com várias funcionalidades adicionais. Primeiro utilize a função LinearRegressionWithSGD para treinar um modelo com regularização L2 (Ridge) e com a ordenada de origem. Esse método retorna um [LinearRegressionModel](https://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.regression.LinearRegressionModel). Em seguida, use os atributos [weights](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.regression.LinearRegressionModel.weights) e [intercept](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.regression.LinearRegressionModel.intercept) para imprimir o modelo encontrado.
from pyspark.mllib.regression import LinearRegressionWithSGD # Values to use when training the linear regression model numIters = 500 # iterations alpha = 1.0 # step miniBatchFrac = 1.0 # miniBatchFraction reg = 1e-1 # regParam regType = 'l2' # regType useIntercept = True # intercept # EXERCICIO firstModel = LinearRegressionWithSGD.train(parsedTrainData, iterations = numIters, step = alpha, miniBatchFraction = 1.0, regParam=reg,regType=regType, intercept=useIntercept) # weightsLR1 stores the model weights; interceptLR1 stores the model intercept weightsLR1 = firstModel.weights interceptLR1 = firstModel.intercept print( weightsLR1, interceptLR1) # TEST LinearRegressionWithSGD (4a) expectedIntercept = 13.332056210482524 expectedWeights = [15.9694010246,13.9897244172,0.669349383773,6.24618402989,4.00932179503,-2.30176663131,10.478805422,3.06385145385,7.14414111075,4.49826819526,7.87702565069,3.00732146613] assert np.allclose(interceptLR1, expectedIntercept), 'incorrect value for interceptLR1' assert np.allclose(weightsLR1, expectedWeights), 'incorrect value for weightsLR1' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
**(4b) Predição** Agora use o método [LinearRegressionModel.predict()](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.htmlpyspark.mllib.regression.LinearRegressionModel.predict) para fazer a predição de um objeto. Passe o atributo `features` de um `LabeledPoint` comp parâmetro.
# EXERCICIO samplePoint = parsedTrainData.take(1)[0] samplePrediction = firstModel.predict(samplePoint.features) print (samplePrediction) # TEST Predict (4b) assert np.allclose(samplePrediction, 56.4065674104), 'incorrect value for samplePrediction' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (4c) Avaliar RMSE ** Agora avalie o desempenho desse modelo no teste de validação. Use o método `predict()` para criar o RDD `labelsAndPreds` RDD, e então use a função `calcRMSE()` da Parte (2b) para calcular o RMSE.
# EXERCICIO labelsAndPreds = parsedValData.map(lambda lp: (lp.label, firstModel.predict(lp.features))) rmseValLR1 = calcRMSE(labelsAndPreds) print ('Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}\n\tLR1 = {2:.3f}'.format(rmseValBase, rmseValLR0, rmseValLR1)) # TEST Evaluate RMSE (4c) assert np.allclose(rmseValLR1, 19.025), 'incorrect value for rmseValLR1' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (4d) Grid search ** Já estamos superando o baseline em pelo menos dois anos na média, vamos ver se encontramos um conjunto de parâmetros melhor. Faça um grid search para encontrar um bom parâmetro de regularização. Tente valores para `regParam` dentro do conjunto `1e-10`, `1e-5`, e `1`.
# EXERCICIO bestRMSE = rmseValLR1 bestRegParam = reg bestModel = firstModel numIters = 500 alpha = 1.0 miniBatchFrac = 1.0 for reg in [1e-10, 1e-5, 1.0]: model = LinearRegressionWithSGD.train(parsedTrainData, numIters, alpha, miniBatchFrac, regParam=reg, regType='l2', intercept=True) labelsAndPreds = parsedValData.map(lambda lp: (lp.label, model.predict(lp.features))) rmseValGrid = calcRMSE(labelsAndPreds) print (rmseValGrid) if rmseValGrid < bestRMSE: bestRMSE = rmseValGrid bestRegParam = reg bestModel = model rmseValLRGrid = bestRMSE print ('Validation RMSE:\n\tBaseline = {0:.3f}\n\tLR0 = {1:.3f}\n\tLR1 = {2:.3f}\n\tLRGrid = {3:.3f}'.format(rmseValBase, rmseValLR0, rmseValLR1, rmseValLRGrid)) # TEST Grid search (4d) assert np.allclose(16.6813542516, rmseValLRGrid), 'incorrect value for rmseValLRGrid' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** Visualização 5: Predições do melhor modelo** Agora, vamos criar um gráfico para verificar o desempenho do melhor modelo. Reparem nesse gráfico que a quantidade de pontos mais escuros reduziu bastante em relação ao baseline.
predictions = np.asarray(parsedValData .map(lambda lp: bestModel.predict(lp.features)) .collect()) actual = np.asarray(parsedValData .map(lambda lp: lp.label) .collect()) error = np.asarray(parsedValData .map(lambda lp: (lp.label, bestModel.predict(lp.features))) .map(lambda lp: squaredError(lp[0], lp[1])) .collect()) norm = Normalize() clrs = cmap(np.asarray(norm(error)))[:,0:3] fig, ax = preparePlot(np.arange(0, 120, 20), np.arange(0, 120, 20)) ax.set_xlim(15, 82), ax.set_ylim(-5, 105) plt.scatter(predictions, actual, s=14**2, c=clrs, edgecolors='#888888', alpha=0.75, linewidths=.5) ax.set_xlabel('Predicted'), ax.set_ylabel(r'Actual') pass
_____no_output_____
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
** (4e) Grid Search para o valor de alfa e número de iterações ** Agora, vamos verificar diferentes valores para alfa e número de iterações para perceber o impacto desses parâmetros em nosso modelo. Especificamente tente os valores `1e-5` e `10` para `alpha` e os valores `500` e `5` para número de iterações. Avalie todos os modelos no conjunto de valdação. Reparem que com um valor baixo de alpha, o algoritmo necessita de muito mais iterações para convergir ao ótimo, enquanto um valor muito alto para alpha, pode fazer com que o algoritmo não encontre uma solução.
# EXERCICIO reg = bestRegParam modelRMSEs = [] for alpha in [1e-5, 10]: for numIters in [500, 5]: model = LinearRegressionWithSGD.train(parsedTrainData, numIters, alpha, miniBatchFrac, regParam=reg, regType='l2', intercept=True) labelsAndPreds = parsedValData.map(lambda lp: (lp.label, model.predict(lp.features))) rmseVal = calcRMSE(labelsAndPreds) print ('alpha = {0:.0e}, numIters = {1}, RMSE = {2:.3f}'.format(alpha, numIters, rmseVal)) modelRMSEs.append(rmseVal) # TEST Vary alpha and the number of iterations (4e) expectedResults = sorted([57.487692757541318, 57.487692757541318, 352324534.65684682]) assert np.allclose(sorted(modelRMSEs)[:3], expectedResults), 'incorrect value for modelRMSEs' print("OK")
OK
MIT
ATIVIDADE3/Lab02.ipynb
titocaco/BIGDATA2018
Anomaly detectionAnomaly detection is a machine learning task that consists in spotting so-called outliers.“An outlier is an observation in a data set which appears to be inconsistent with the remainder of that set of data.”Johnson 1992“An outlier is an observation which deviates so much from the other observations as to arouse suspicions that it was generated by a different mechanism.” Outlier/AnomalyHawkins 1980 Types of anomaly detection setups- Supervised AD - Labels available for both normal data and anomalies - Similar to rare class mining / imbalanced classification- Semi-supervised AD (Novelty Detection) - Only normal data available to train - The algorithm learns on normal data only- Unsupervised AD (Outlier Detection) - no labels, training set = normal + abnormal data - Assumption: anomalies are very rare
%matplotlib inline import warnings warnings.filterwarnings("ignore") import numpy as np import matplotlib import matplotlib.pyplot as plt
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Let's first get familiar with different unsupervised anomaly detection approaches and algorithms. In order to visualise the output of the different algorithms we consider a toy data set consisting in a two-dimensional Gaussian mixture. Generating the data set
from sklearn.datasets import make_blobs X, y = make_blobs(n_features=2, centers=3, n_samples=500, random_state=42) X.shape plt.figure() plt.scatter(X[:, 0], X[:, 1]) plt.show()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Anomaly detection with density estimation
from sklearn.neighbors.kde import KernelDensity # Estimate density with a Gaussian kernel density estimator kde = KernelDensity(kernel='gaussian') kde = kde.fit(X) kde kde_X = kde.score_samples(X) print(kde_X.shape) # contains the log-likelihood of the data. The smaller it is the rarer is the sample from scipy.stats.mstats import mquantiles alpha_set = 0.95 tau_kde = mquantiles(kde_X, 1. - alpha_set) n_samples, n_features = X.shape X_range = np.zeros((n_features, 2)) X_range[:, 0] = np.min(X, axis=0) - 1. X_range[:, 1] = np.max(X, axis=0) + 1. h = 0.1 # step size of the mesh x_min, x_max = X_range[0] y_min, y_max = X_range[1] xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) grid = np.c_[xx.ravel(), yy.ravel()] Z_kde = kde.score_samples(grid) Z_kde = Z_kde.reshape(xx.shape) plt.figure() c_0 = plt.contour(xx, yy, Z_kde, levels=tau_kde, colors='red', linewidths=3) plt.clabel(c_0, inline=1, fontsize=15, fmt={tau_kde[0]: str(alpha_set)}) plt.scatter(X[:, 0], X[:, 1]) plt.show()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
now with One-Class SVM The problem of density based estimation is that they tend to become inefficient when the dimensionality of the data increase. It's the so-called curse of dimensionality that affects particularly density estimation algorithms. The one-class SVM algorithm can be used in such cases.
from sklearn.svm import OneClassSVM nu = 0.05 # theory says it should be an upper bound of the fraction of outliers ocsvm = OneClassSVM(kernel='rbf', gamma=0.05, nu=nu) ocsvm.fit(X) X_outliers = X[ocsvm.predict(X) == -1] Z_ocsvm = ocsvm.decision_function(grid) Z_ocsvm = Z_ocsvm.reshape(xx.shape) plt.figure() c_0 = plt.contour(xx, yy, Z_ocsvm, levels=[0], colors='red', linewidths=3) plt.clabel(c_0, inline=1, fontsize=15, fmt={0: str(alpha_set)}) plt.scatter(X[:, 0], X[:, 1]) plt.scatter(X_outliers[:, 0], X_outliers[:, 1], color='red') plt.show()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Support vectors - OutliersThe so-called support vectors of the one-class SVM form the outliers
X_SV = X[ocsvm.support_] n_SV = len(X_SV) n_outliers = len(X_outliers) print('{0:.2f} <= {1:.2f} <= {2:.2f}?'.format(1./n_samples*n_outliers, nu, 1./n_samples*n_SV))
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Only the support vectors are involved in the decision function of the One-Class SVM.1. Plot the level sets of the One-Class SVM decision function as we did for the true density.2. Emphasize the Support vectors.
plt.figure() plt.contourf(xx, yy, Z_ocsvm, 10, cmap=plt.cm.Blues_r) plt.scatter(X[:, 0], X[:, 1], s=1.) plt.scatter(X_SV[:, 0], X_SV[:, 1], color='orange') plt.show()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
EXERCISE: **Change** the `gamma` parameter and see it's influence on the smoothness of the decision function.
# %load solutions/22_A-anomaly_ocsvm_gamma.py
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Isolation ForestIsolation Forest is an anomaly detection algorithm based on trees. The algorithm builds a number of random trees and the rationale is that if a sample is isolated it should alone in a leaf after very few random splits. Isolation Forest builds a score of abnormality based the depth of the tree at which samples end up.
from sklearn.ensemble import IsolationForest iforest = IsolationForest(n_estimators=300, contamination=0.10) iforest = iforest.fit(X) Z_iforest = iforest.decision_function(grid) Z_iforest = Z_iforest.reshape(xx.shape) plt.figure() c_0 = plt.contour(xx, yy, Z_iforest, levels=[iforest.threshold_], colors='red', linewidths=3) plt.clabel(c_0, inline=1, fontsize=15, fmt={iforest.threshold_: str(alpha_set)}) plt.scatter(X[:, 0], X[:, 1], s=1.) plt.show()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
EXERCISE: Illustrate graphically the influence of the number of trees on the smoothness of the decision function?
# %load solutions/22_B-anomaly_iforest_n_trees.py
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Illustration on Digits data setWe will now apply the IsolationForest algorithm to spot digits written in an unconventional way.
from sklearn.datasets import load_digits digits = load_digits()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
The digits data set consists in images (8 x 8) of digits.
images = digits.images labels = digits.target images.shape i = 102 plt.figure(figsize=(2, 2)) plt.title('{0}'.format(labels[i])) plt.axis('off') plt.imshow(images[i], cmap=plt.cm.gray_r, interpolation='nearest') plt.show()
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
To use the images as a training set we need to flatten the images.
n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) data.shape X = data y = digits.target X.shape
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Let's focus on digit 5.
X_5 = X[y == 5] X_5.shape fig, axes = plt.subplots(1, 5, figsize=(10, 4)) for ax, x in zip(axes, X_5[:5]): img = x.reshape(8, 8) ax.imshow(img, cmap=plt.cm.gray_r, interpolation='nearest') ax.axis('off')
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
1. Let's use IsolationForest to find the top 5% most abnormal images.2. Let's plot them !
from sklearn.ensemble import IsolationForest iforest = IsolationForest(contamination=0.05) iforest = iforest.fit(X_5)
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Compute the level of "abnormality" with `iforest.decision_function`. The lower, the more abnormal.
iforest_X = iforest.decision_function(X_5) plt.hist(iforest_X);
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Let's plot the strongest inliers
X_strong_inliers = X_5[np.argsort(iforest_X)[-10:]] fig, axes = plt.subplots(2, 5, figsize=(10, 5)) for i, ax in zip(range(len(X_strong_inliers)), axes.ravel()): ax.imshow(X_strong_inliers[i].reshape((8, 8)), cmap=plt.cm.gray_r, interpolation='nearest') ax.axis('off')
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Let's plot the strongest outliers
fig, axes = plt.subplots(2, 5, figsize=(10, 5)) X_outliers = X_5[iforest.predict(X_5) == -1] for i, ax in zip(range(len(X_outliers)), axes.ravel()): ax.imshow(X_outliers[i].reshape((8, 8)), cmap=plt.cm.gray_r, interpolation='nearest') ax.axis('off')
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
EXERCISE: Rerun the same analysis with all the other digits
# %load solutions/22_C-anomaly_digits.py
_____no_output_____
CC0-1.0
notebooks/22.Unsupervised_learning-anomaly_detection.ipynb
anthony-wang/scipy-2018-sklearn
Intro to machine learning - k-means--- Scikit-learn has a nice set of unsupervised learning routines which can be used to explore clustering in the parameter space.In this notebook we will use k-means, included in Scikit-learn, to demonstrate how the different rocks occupy different regions in the available parameter space.Let's load the data using pandas:
import pandas as pd import numpy as np df = pd.read_csv("../data/2016_ML_contest_training_data.csv") df.head() df.describe() df = df.dropna()
_____no_output_____
Apache-2.0
notebooks/Intro_to_machine_learning_kmeans.ipynb
michael-scarn/geocomp-0118
Calculate RHOB from DeltaPHI and PHIND
def rhob(phi_rhob, Rho_matrix= 2650.0, Rho_fluid=1000.0): """ Rho_matrix (sandstone) : 2.65 g/cc Rho_matrix (Limestome): 2.71 g/cc Rho_matrix (Dolomite): 2.876 g/cc Rho_matrix (Anyhydrite): 2.977 g/cc Rho_matrix (Salt): 2.032 g/cc Rho_fluid (fresh water): 1.0 g/cc (is this more mud-like?) Rho_fluid (salt water): 1.1 g/cc see wiki.aapg.org/Density-neutron_log_porosity returns density porosity log """ return Rho_matrix*(1 - phi_rhob) + Rho_fluid*phi_rhob phi_rhob = 2*(df.PHIND/100)/(1 - df.DeltaPHI/100) - df.DeltaPHI/100 calc_RHOB = rhob(phi_rhob) df['RHOB'] = calc_RHOB df.describe()
_____no_output_____
Apache-2.0
notebooks/Intro_to_machine_learning_kmeans.ipynb
michael-scarn/geocomp-0118
We can define a Python dictionary to relate facies with the integer label on the `DataFrame`
facies_dict = {1:'sandstone', 2:'c_siltstone', 3:'f_siltstone', 4:'marine_silt_shale', 5:'mudstone', 6:'wackentstone', 7:'dolomite', 8:'packstone', 9:'bafflestone'} df["s_Facies"] = df.Facies.map(lambda x: facies_dict[x]) df.head()
_____no_output_____
Apache-2.0
notebooks/Intro_to_machine_learning_kmeans.ipynb
michael-scarn/geocomp-0118
We can easily visualize the properties of each facies and how they compare using a `PairPlot`. The library `seaborn` integrates with matplotlib to make these kind of plots easily.
import seaborn as sns import matplotlib.pyplot as plt %matplotlib inline g = sns.PairGrid(df, hue="s_Facies", vars=['GR','RHOB','PE','ILD_log10'], size=4) g.map_upper(plt.scatter,**dict(alpha=0.4)) g.map_lower(plt.scatter,**dict(alpha=0.4)) g.map_diag(plt.hist,**dict(bins=20)) g.add_legend() g.set(alpha=0.5)
_____no_output_____
Apache-2.0
notebooks/Intro_to_machine_learning_kmeans.ipynb
michael-scarn/geocomp-0118
It is very clear that it's hard to separate these facies in feature space. Let's just select a couple of facies and using Pandas, select the rows in the `DataFrame` that contain information about those facies
selected = ['f_siltstone', 'bafflestone', 'wackentstone'] dfs = pd.concat(list(map(lambda x: df[df.s_Facies == x], selected))) g = sns.PairGrid(dfs, hue="s_Facies", vars=['GR','RHOB','PE','ILD_log10'], size=4) g.map_upper(plt.scatter,**dict(alpha=0.4)) g.map_lower(plt.scatter,**dict(alpha=0.4)) g.map_diag(plt.hist,**dict(bins=20)) g.add_legend() g.set(alpha=0.5) # Make X and y X = dfs[['GR','ILD_log10','PE']].as_matrix() y = dfs['Facies'].values
_____no_output_____
Apache-2.0
notebooks/Intro_to_machine_learning_kmeans.ipynb
michael-scarn/geocomp-0118