code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: mlearnable-datasets-detective # language: python # name: mlearnable-datasets-detective # --- import pandas as pd from pathlib import Path import os from os.path import isfile data_folder = Path(os.getcwd()).parent / 'data' / 'data_ml_csv' data_file = 'all_dabl_analysis.csv' list_file = [] for folder in os.listdir(data_folder): if not isfile(data_folder / folder): for file in os.listdir(data_folder / folder): list_file.append(data_folder / folder/ file) file = list_file[0] df = pd.read_csv(file) df.head() file = list_file[50] af = pd.read_csv(file) af.head() pd.concat([df,af], axis=0, ignore_index=True) df = pd.DataFrame() for file in list_file: new_df = pd.read_csv(file) df = pd.concat([df,new_df], axis=0, ignore_index=True) df.head() df.info() df[~df['pavel'].isna()] df.columns # + #TODO: faire une colonne avec le max de roc_auc et f1_macro # - df['mean_note'] = df[['f1_macro','roc_auc']].mean(axis = 1) df.nb_features = df.nb_features.astype(int) import matplotlib.pyplot as plt plt.hist(df.mean_note, bins = 50) df[df.mean_note.between(0.8,0.9)] # + # difficile de trouver quelque chose d'intéressant avec ça # - df[df.mean_note.between(0.8,0.9) & df.nb_features.between (5, 15) & (df.nb_classes > 4)] # inspection à la mano : les expériences intéressantes sont : df.iloc[11587]
notebook/Exploration resultats.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Gator hunt # # The Florida Fish and Wildlife Conservation Commission keeps track of [gators killed by hunters](http://myfwc.com/wildlifehabitats/managed/alligator/harvest/data-export/). A cut of this data lives in `../data/gators.csv`. # # Let's take a look. # import pandas # read in the CSV # check the output with `.head()` # ### Check it out # # First, let's take a look at our data and examine some of the column values that we might be interested in analyzing. We're already starting to think about the questions we want this data to help us answer. # get the info() # what's the year range, with counts? # let's also peep the carcass size values to get the pattern # ### Come up with a list of questions # # - What's the longest gator in our data? # - Average length by year? # - How many gators are killed by month? # ### Write a function to calculate gator length in inches # # Right now, the value for the gator's length is a string following this pattern: `{} ft. {} in.`. # # Let's create a new column to get the gator's length in a constant, numeric value: inches. # # We're going to write a function to do these steps: # - Given a row of data, capture the feet and inch values in the carcass size column -- we can split the string on 'ft.' and clean up each piece from there # - Multiply feet by 12 # - Add that number to the inch value # - `return` the result # # We shall call this function on the data frame using the [`.apply()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.apply.html) method. # + # define a function called `get-inches` that accepts one row as an argument # grab the carcass size value # split the value on 'ft.' # get the feet value, strip whitespace, coerce to integer # get the inch value, replace 'in.', strip whitespace, coerce to integer # return inches plus feet*12 # - # create a new column and fill it by applying our function to every row using `.apply()` # 👉 Learn more about functions in [this notebook](../reference/Functions.ipynb). # check the output with head() # sort by length descending, check it out with head() # ### Count by year # # Our friend `value_counts()` is _on it_. # get value counts by year # ### Average length by year # # To get the average length of gators by year, we'll run a [pivot table](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html). # # 👉 For more details on creating pivot tables, [see this notebook](../reference/Grouping%20data%20in%20pandas.ipynb#Pivot-tables). # get average length harvested by year # values is length # index is Year # aggfunc is 'mean' # see results # ### Treating dates as dates # # This data include the date on which the gator was killed, but the date values are being stored as strings. If we want to do some time-based analysis -- comparing the gator hunt by month, or whatever -- we'd want to deal directly with native dates. # # Noting the format (month-day-year), let's use the [`to_datetime()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.to_datetime.html) method to convert the dates into native date objects. We'll tell pandas to use the [correct date specification](http://strftime.org/) and to coerce errors to null values rather than throw a giant exception. # format is %m-%d-%Y # errors='coerce' # check the output with `head()` # If you want to doublecheck that the data type is correct, you can access the `dtypes` attribute. # check dtypes # [You can read more about date formatting here](https://docs.python.org/3/library/datetime.html). # ### Gator hunt by month # # [According to](http://myfwc.com/media/310257/Alligator-processors.pdf) the Florida Fish and Wildlife Conservation Commission, the gator hunt season is in the fall: # # ![gatorhunt](../img/gatorhunt.png "gatorhunt") # # Let's look at the totals by month: # - Create a new column for the month using a [lambda function](https://docs.python.org/3/tutorial/controlflow.html#lambda-expressions) # - Do value counts by month # create a new column, 'month', and fill it by apply a lambda function to extract the month # check unique values in our new column # do value_counts on the month column and sort_index() # What if we wanted to get a count by month _by year_? Pivot tables to the rescue, again. # # We'll provide the `pivot_table` method with five things: # - `df` specifies what data frame we're pivoting # - `index='month'` specifies the column we're grouping on # - `columns='Year'` specifies the columns value # - `aggfunc='count'` tells pandas how to aggregate the data -- we want to count the values # - `values='length_in'` specifies the column of data to apply the aggregation to -- we're going to count up every record of a carcass that has a length # create a pivot table called by_month_by_year # check the output # I have OCD and those `NaN`s mixed in with our numbers gives me a case of the dang fantods. Let's use the `.fillna()` method to replace those with `0`. # run fillna(0) on it
exercises/Gator hunt - working.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Creating a STAC of Landsat data # # In this tutorial we create a STAC of Landsat 8 data from Amazon Web Service's [Open Data program](https://registry.opendata.aws/landsat-8/). There's a lot of Landsat scenes, so we'll only take a subset of scenes that are from a specific year and over a specific location. We'll translate existing metadata about each scene to STAC information, utilizing the `eo`, `view`, and `proj` extensions. Finally we'll write out the STAC catalog to our local machine, allowing us to use [stac-browser](https://github.com/radiantearth/stac-browser) to preview the images. # # ### Requirements # # To run this tutorial you'll have needed to install PySTAC with the validation extra. To do this, use: # # ``` # pip install pystac[validation] # ``` # # Also to run this notebook you'll need [jupyter](https://jupyter.org/) installed locally as well. If you're running in a docker container, make sure that port `5555` is exposed if you want to run the server at the end of the notebook. import pystac # ### Reading Landsat 8 scene data # # AWS keeps a scene list that includes information about where the scene is located and how to access it's information. Landsat 8 was reorganized into a [Collection](https://www.usgs.gov/land-resources/nli/landsat/landsat-collections?qt-science_support_page_related_con=2#qt-science_support_page_related_con) system in the past, and so there's two places to read a scene list from (as [mentioned on the aws page](https://docs.opendata.aws/landsat-pds/readme.html)). We'll pull from the data that is organized as Collection 1 data, which is where all new processed data is added since 2017. # + import csv import gzip from io import StringIO from urllib.request import urlopen # Collection 1 data url = 'https://landsat-pds.s3.amazonaws.com/c1/L8/scene_list.gz' # Read and unzip the content response = urlopen(url) gunzip_response = gzip.GzipFile(fileobj=response) content = gunzip_response.read() # Read the scenes in as dictionaries scenes = list(csv.DictReader(StringIO(content.decode("utf-8")))) # - len(scenes) # As you can see, there are a lot of scenes! Even though STAC items contain just the metadata for the scenes (and not the bulky raster image), this would still be a lot of data and files to work with for this tutorial. # # Let's see what one of the scenes looks like, and then filter on those properties to scope things down. scenes[0] # As you can see, we have both a date and a location that we can filter on. # ### Filter scenes by a location # # Let's pick a location to filter the scenes by. Here we choose Philly, but feel free to change the location by modifying the latitude and longitude coordinates below and changing the location name: lat, lon = 39.9526, -75.1652 location_name = "Philly" filter_year = '2020' # We'll use the coordinates and the year to filter out any unwanted scenes: # + def keep_scene(scene): contains_location = float(scene['min_lat']) < lat and float(scene['max_lat']) > lat and \ float(scene['min_lon']) < lon and float(scene['max_lon']) > lon is_correct_year = '{}-'.format(filter_year) in scene['acquisitionDate'] return contains_location and is_correct_year location_scenes = [scene for scene in scenes if keep_scene(scene)] # - # This should leave us with a much more manageable subset of scenes: len(location_scenes) # We'll be working with a single scene through the next few sections, so let's use the first scene in our list: scene = location_scenes[0] scene # ### Read metadata from the MTL file # # Landsat 8 metadata is contained in an `MTL` file that is alongside of the `download_url` file specified in the scene data. Let's read the MTL file for the first scene and see what it looks like. # # First we define a function that reads a file based on the `download_url` location - we'll be using this a lot to get file URLs releated to a scene: def get_asset_url(scene, suffix): product_id = scene['productId'] download_url = scene['download_url'] asset_filename = '{}_{}'.format(product_id, suffix) return download_url.replace('index.html', asset_filename) # We can then use function to get the MTL file for our scene. Notice we use `pystac.STAC_IO.read_text` - this is the method that PySTAC uses to read text as it crawls a STAC. It can read from the local filesystem or HTTP/HTTPS by default. Also, it can be extended to read from other sources such as cloud providers - [see the documentation here](https://pystac.readthedocs.io/en/0.5/concepts.html#using-stac-io). For now we'll use it directly as an easy way to read a text file from an HTTPS source: mtl_url = get_asset_url(scene, 'MTL.txt') print(pystac.STAC_IO.read_text(mtl_url)) # The MTL file contains metadata in a text format that's a bit hard to use as-is; we can parse things out to a `dict` for easier access: def get_metadata(url): """ Convert Landsat MTL file to dictionary of metadata values """ mtl = {} mtl_text = pystac.STAC_IO.read_text(url) for line in mtl_text.split('\n'): meta = line.replace('\"', "").strip().split('=') if len(meta) > 1: key = meta[0].strip() item = meta[1].strip() if key != "GROUP" and key != "END_GROUP": mtl[key] = item return mtl metadata = get_metadata(mtl_url) metadata # ### Create a STAC Item from a scene # # Now that we have metadata for the scene let's use it to create a [STAC Item](https://github.com/radiantearth/stac-spec/blob/v1.0.0-beta.2/item-spec/item-spec.md). # # We can use the `help` method to see the signature of the `__init__` method on `pystac.Item`. You can also call `help` directly on `pystac.Item` for broader documentation, or check the [API docs for Item here](https://pystac.readthedocs.io/en/0.5/api.html#item). help(pystac.Item.__init__) # We can see we'll need at least an `id`, `geometry`, `bbox`, and `datetime`. Properties is required, but can be an empty dictionary that we fill out on the Item once it's created. # #### Item `id` # # For the Item's `id`, we'll use the scene ID. We'll chop off the last 5 characters as they are repeated for each ID and so aren't necessary: def get_item_id(metadata): return metadata['LANDSAT_SCENE_ID'][:-5] item_id = get_item_id(metadata) item_id # #### Item `datetime` # # Here we parse the datetime of the Item from two metadata fields that describe the date and time the scene was captured: # + from dateutil.parser import parse def get_datetime(metadata): return parse('%sT%s' % (metadata['DATE_ACQUIRED'], metadata['SCENE_CENTER_TIME'])) # - item_datetime = get_datetime(metadata) item_datetime # #### Item `bbox` # # Here we read in the bounding box information from the scene and transform it into the format of the Item's `bbox` property: def get_bbox(metadata): coords = [[ [float(metadata['CORNER_UL_LON_PRODUCT']), float(metadata['CORNER_UL_LAT_PRODUCT'])], [float(metadata['CORNER_UR_LON_PRODUCT']), float(metadata['CORNER_UR_LAT_PRODUCT'])], [float(metadata['CORNER_LR_LON_PRODUCT']), float(metadata['CORNER_LR_LAT_PRODUCT'])], [float(metadata['CORNER_LL_LON_PRODUCT']), float(metadata['CORNER_LL_LAT_PRODUCT'])], [float(metadata['CORNER_UL_LON_PRODUCT']), float(metadata['CORNER_UL_LAT_PRODUCT'])] ]] lats = [c[1] for c in coords[0]] lons = [c[0] for c in coords[0]] return [min(lons), min(lats), max(lons), max(lats)] item_bbox = get_bbox(metadata) item_bbox # #### Item `geometry` # # Getting the geometry of the scene is a little more tricky. The bounding box will be a axis-aligned rectangle of the area the scene occupies, but will not represent the true footprint of the image - Landsat 8 scenes are "tilted" according the the coordinate reference system, so there will be areas in the corner where no image data exists. When constructing a STAC Item it's best if you have the Item geometry represent the true footprint of the assets. # # To get the footprint of the scene we'll read in another metadata file that lives alongside the MTL - the `ANG.txt` file. This function uses the ANG file and the bbox to construct the GeoJSON polygon that represents the footprint of the scene: def get_geometry(scene, bbox): url = get_asset_url(scene, 'ANG.txt') sz = [] coords = [] ang_text = pystac.STAC_IO.read_text(url) for line in ang_text.split('\n'): if 'BAND01_NUM_L1T_LINES' in line or 'BAND01_NUM_L1T_SAMPS' in line: sz.append(float(line.split('=')[1])) if 'BAND01_L1T_IMAGE_CORNER_LINES' in line or 'BAND01_L1T_IMAGE_CORNER_SAMPS' in line: coords.append([float(l) for l in line.split('=')[1].strip().strip('()').split(',')]) if len(coords) == 2: break dlon = bbox[2] - bbox[0] dlat = bbox[3] - bbox[1] lons = [c/sz[1] * dlon + bbox[0] for c in coords[1]] lats = [((sz[0] - c)/sz[0]) * dlat + bbox[1] for c in coords[0]] coordinates = [[ [lons[0], lats[0]], [lons[1], lats[1]], [lons[2], lats[2]], [lons[3], lats[3]], [lons[0], lats[0]] ]] return {'type': 'Polygon', 'coordinates': coordinates} item_geometry = get_geometry(scene, item_bbox) item_geometry # This would be a good time to check our work - we can print out the GeoJSON and use [geojson.io](http://geojson.io/) to check and make sure we're using scenes that overlap our location. If this footprint is somewhere unexpected in the world, make sure the Lat/Long coordinates are correct and in the right order! # + import json print(json.dumps(item_geometry, indent=2)) # - # #### Create the item # # Now that we have the required attributes for an Item we can create it: item = pystac.Item(id=item_id, datetime=item_datetime, geometry=item_geometry, bbox=item_bbox, properties={}) # PySTAC has a `validate` method on STAC objects, which you can use to make sure you're constructing things correctly. If there's an issue the following line will throw an exception: item.validate() # #### Add Ground Sample Distance to common metadata # # We'll add the Ground Sample Distance that is defined as part of the Item [Common Metadata](https://github.com/radiantearth/stac-spec/blob/v1.0.0-beta.2/item-spec/common-metadata.md). We define this on the Item leve as 30 meters, which is the GSD for most of the bands of Landsat 8. However, there are some bands that have a different resolution; we will account for this by setting the GSD explicitly for each of those bands below. item.common_metadata.gsd = 30.0 # #### Adding the EO extension # # STAC has a rich [set of extensions](https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions) that allow STAC objects to encode information that is not part of the core spec but is used widely and standardized. An example of this is the [eo extension](https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/eo), which encapsulates data that that represents a snapshot of the earth for a single date and time. # # We can enable the `eo` extension for this item by using the `ext` property that exists on all STAC objects: item.ext.enable('eo') # #### Add cloud cover # # Here we add cloud cover from the metadata as part of the `eo` extension. def get_cloud_cover(metadata): return float(metadata['CLOUD_COVER']) item.ext.eo.cloud_cover = get_cloud_cover(metadata) item.ext.eo.cloud_cover # #### Adding assets # # STAC Items contain a list of [Assets](https://github.com/radiantearth/stac-spec/blob/v1.0.0-beta.2/item-spec/item-spec.md#asset-object), which are a list of files that relate to the Item. In our case we'll be cataloging each file related to the scene, including the Landsat 8 band files as well as the metadata files associated with the scene. # # Here we define a dictionary that describes the band assets. We use the `eo` extension's `Band` class to encapsulate information about the band each file represents, and also specify the Ground Sample Distance of each band: # + from pystac.extensions.eo import Band landsat_band_info = { 'B1': { 'band': Band.create(name="B1", common_name="coastal", center_wavelength=0.48, full_width_half_max=0.02), 'gsd': 30.0 }, 'B2': { 'band': Band.create(name="B2", common_name="blue", center_wavelength=0.44, full_width_half_max=0.06), 'gsd': 30.0 }, 'B3': { 'band': Band.create(name="B3", common_name="green", center_wavelength=0.56, full_width_half_max=0.06), 'gsd': 30.0 }, 'B4': { 'band': Band.create(name="B4", common_name="red", center_wavelength=0.65, full_width_half_max=0.04), 'gsd': 30.0 }, 'B5': { 'band': Band.create(name="B5", common_name="nir", center_wavelength=0.86, full_width_half_max=0.03), 'gsd': 30.0 }, 'B6': { 'band': Band.create(name="B6", common_name="swir16", center_wavelength=1.6, full_width_half_max=0.08), 'gsd': 30.0 }, 'B7': { 'band': Band.create(name="B7", common_name="swir22", center_wavelength=2.2, full_width_half_max=0.2), 'gsd': 30.0 }, 'B8': { 'band': Band.create(name="B8", common_name="pan", center_wavelength=0.59, full_width_half_max=0.18), 'gsd': 15.0 }, 'B9': { 'band': Band.create(name="B9", common_name="cirrus", center_wavelength=1.37, full_width_half_max=0.02), 'gsd': 30.0 }, 'B10': { 'band': Band.create(name="B10", common_name="lwir11", center_wavelength=10.9, full_width_half_max=0.8), 'gsd': 100.0 }, 'B11': { 'band': Band.create(name="B11", common_name="lwir12", center_wavelength=12.0, full_width_half_max=1.0), 'gsd': 100.0 } } # - # There are also other non-band assets associated with a scene, and we specify the Asset's URL and media type here, along with the key we will refer to each asset by: def get_other_assets(scene): return { 'thumbnail': { 'href': get_asset_url(scene, 'thumb_large.jpg'), 'media_type': pystac.MediaType.JPEG }, 'index': { 'href': get_asset_url(scene, 'index.html'), 'media_type': 'application/html' }, 'ANG': { 'href': get_asset_url(scene, 'ANG.txt'), 'media_type': 'text/plain' }, 'MTL': { 'href': get_asset_url(scene, 'MTL.txt'), 'media_type': 'text/plain' }, 'BQA': { 'href': get_asset_url(scene, 'BQA.TIF'), 'media_type': pystac.MediaType.GEOTIFF } } # With this information we can now define a method that adds all the relevant assets for a scene and add them to an item: def add_assets(item, scene): # Add bands for band_id, band_info in landsat_band_info.items(): band_url = get_asset_url(scene, '{}.TIF'.format(band_id)) asset = pystac.Asset(href=band_url, media_type=pystac.MediaType.COG) bands = [band_info['band']] item.ext.eo.set_bands(bands, asset) item.add_asset(band_id, asset) # If this asset has a different GSD than the item, set it on the asset if band_info['gsd'] != item.common_metadata.gsd: item.common_metadata.set_gsd(band_info['gsd'], asset) # Add other assets for asset_id, asset_info in get_other_assets(scene).items(): item.add_asset(asset_id, pystac.Asset(href=asset_info['href'], media_type=asset_info['media_type'])) add_assets(item, scene) # The logic for the Assets is such that if the `gsd` of the Asset is different from the Item's GSD (30 meters), the Asset's GSD will be specified directly on the Asset. We can see this by comparing the `dict` encoding of the Assets for band 10 and band 3: item.assets['B10'].to_dict() item.assets['B3'].to_dict() # Here we see the tumbnail asset, which does not include the band information for the `eo` extension as it does not represent any of the Item's bands: item.assets['thumbnail'].to_dict() # #### Add projection information # # We can specify the EPSG code for the scene as part of the [projection extension](https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/projection). The below method figures out the correct UTM Zone EPSG code based on the center latitude of the scene: def get_epsg(metadata, min_lat, max_lat): if 'UTM_ZONE' in metadata: center_lat = (min_lat + max_lat)/2.0 return int(('326' if center_lat > 0 else '327') + metadata['UTM_ZONE']) else: return None item.ext.enable('projection') item.ext.projection.epsg = get_epsg(metadata, item.bbox[1], item.bbox[3]) item.ext.projection.epsg # #### Add view geometry information # # The [View Geometry](https://github.com/radiantearth/stac-spec/tree/v1.0.0-beta.2/extensions/view) extension specifies information related to angles of sensors and other radiance angles that affect the view of resulting data. The Landsat 8 metadata specifies two of these parameters, so we add them to our Item: def get_view_info(metadata): return { 'sun_azimuth': float(metadata['SUN_AZIMUTH']), 'sun_elevation': float(metadata['SUN_ELEVATION']) } item.ext.enable('view') view_info = get_view_info(metadata) item.ext.view.sun_azimuth = view_info['sun_azimuth'] item.ext.view.sun_elevation = view_info['sun_elevation'] item.properties # Now that we've added all the metadata to the item, let's check the validator to make sure we've specified everything correctly. The validation logic will take into account the new extensions that have been enabled and validate against the proper schemas for those extensions. item.validate() # ### Building the Collection # # Now that we know how to build an item for a scene, let's build the collection that will contain all the Items. # # If we look at the `__init__` method for `pystac.Collection`, we can see what properties are required: help(pystac.Collection.__init__) # #### Collection `id` # # We'll use the location name we defined above in the ID for our Collection: collection_id = '{}-landsat-8'.format(location_name) collection_id # #### Collection `title` # # Here we set a simple title for our collection. collection_title = '2020 Landsat 8 images over {}'.format(location_name) collection_title # #### Collection `description` # # Here we give a brief description of the Collection. If this were a real Collection that were being published, I'd recommend putting a much more detailed description to ensure anyone using your STAC knows what they are working with! # # Notice we are using [Markdown](https://www.markdownguide.org/) to write the description. The `description` field can be Markdown to help tools that render information about STAC to display the information in a more readable way. collection_description = '''### {} Landsat 8 A collection of Landsat 8 scenes around {} in 2020. '''.format(location_name, location_name) print(collection_description) # #### Collection `extent` # # A Collection specifies the spatial and temporal extent of all the item it contains. Since Landsat 8 spans the globe, we'll simply put a global extent here. We'll also specify an open-ended time interval that starts with the first datetime for scenes hosted by AWS. # # Towards the end of the notebook, we'll use a method to easily scope this down to cover the times and space the Items occupy once we've added all the items. # + from datetime import datetime spatial_extent = pystac.SpatialExtent([[-180, -90, 180, 90]]) temporal_extent = pystac.TemporalExtent([[datetime(2013, 6, 1), None]]) collection_extent = pystac.Extent(spatial_extent, temporal_extent) # - collection = pystac.Collection(id=collection_id, title=collection_title, description=collection_description, extent=collection_extent) # We can now look at our Collection as a `dict` to check our values. collection.to_dict() # #### Set the license # # Notice the `license` above is `proprietary`. This is the default in PySTAC if no license is specified; however Landsat 8 is certainly not proprietary (thankfully!), so let's change the license to the correct [SPDX](https://spdx.org/licenses/) string for public domain data: collection_license = 'PDDL-1.0' # #### Set the providers # # A collection will specify the providers of the data, including what role they have played. We can set our provider information by instantiating `pystac.Provider` objects: collection.providers = [ pystac.Provider(name='USGS', roles=['producer'], url='https://landsat.usgs.gov/'), pystac.Provider(name='Planet Labs', roles=['processor'], url='https://github.com/landsat-pds/landsat_ingestor'), pystac.Provider(name='AWS', roles=['host'], url='https://landsatonaws.com/') ] # ### Create items for each scene # # We created an Item for a single scene above. This method consolidates that logic into a single method that can construct an Item from a scene, so we can create an Item for every scene in our subset: def item_from_scene(scene): mtl_url = get_asset_url(scene, 'MTL.txt') metadata = get_metadata(mtl_url) bbox = get_bbox(metadata) item = pystac.Item(id=get_item_id(metadata), datetime=get_datetime(metadata), geometry=get_geometry(scene, bbox), bbox=bbox, properties={}) item.common_metadata.gsd = 30.0 item.ext.enable('eo') item.ext.eo.cloud_cover = get_cloud_cover(metadata) add_assets(item, scene) item.ext.enable('projection') item.ext.projection.epsg = get_epsg(metadata, item.bbox[1], item.bbox[3]) item.ext.enable('view') view_info = get_view_info(metadata) item.ext.view.sun_azimuth = view_info['sun_azimuth'] item.ext.view.sun_elevation = view_info['sun_elevation'] item.validate() return item # Here we create an item per scene and add it to our collection. Since this is reading multiple metadata files per scene from the internet, it may take a little bit to run: for scene in location_scenes: item = item_from_scene(scene) collection.add_item(item) # #### Reset collection extent based on items # # Now that we've added all the item we can use the `update_extent_from_items` method on the Collection to set the extent based on the contained items: collection.update_extent_from_items() collection.extent.to_dict() # ### Set the HREFs for everything in the catalog # # We've been building up our Collection and Items in memory. This has been convenient as it allows us not to think about file paths as we construct our Catalog. However, a STAC is not valid without any HREFs! # # We can use the `normalize_hrefs` method to set all the HREFs in the entire STAC based on a root directory. This will use the [STAC Best Practices](https://github.com/radiantearth/stac-spec/blob/master/best-practices.md#catalog-layout) recommendations for STAC file layout for each Catalog, Collection and Item in the STAC. # # Here we use that method and set the root directory to a subdirectory of our user's `home` directory: # + from pathlib import Path root_path = str(Path.home() / '{}-landsat-stac'.format(location_name)) collection.normalize_hrefs(root_path) # - # Now that we have all the Collection's data set and HREFs in place we can validate the entire STAC using `validate_all`, which recursively crawls through a catalog and validates every STAC object in the catalog: collection.validate_all() # ### Write the catalog locally # # Now that we have our complete, validated STAC in memory, let's write it out. This is a simple as calling `save` on the Collection. We need to specify the type of catalog in order to property write out links - these types are described again in the STAC [Best Practices](https://github.com/radiantearth/stac-spec/blob/master/best-practices.md#use-of-links) documentation. # # We'll use the "self contained" type, which uses relative paths and does not specify absolute "self" links to any object. This makes the catalog more portable, as it remains valid even if you copy the STAC to new locations. collection.save(pystac.CatalogType.SELF_CONTAINED) # Now that we've written our STAC out we probably want to view it. We can use the `describe` method to print out a simple representation of the catalog: collection.describe() # We can also use the `to_dict` method on individual STAC objects in order to see the data, as we've been doing in the tutorial: collection.to_dict() # However, if we want to browse our STAC more interactively, we can use the [stac-browser](https://github.com/radiantearth/stac-browser) tool to read our local STAC. # # We can use this simple Python server (copied from [this gist](https://gist.github.com/acdha/925e9ffc3d74ad59c3ea)) to serve our our directory at port 5555: # + import os from http.server import HTTPServer, SimpleHTTPRequestHandler os.chdir(root_path) class CORSRequestHandler(SimpleHTTPRequestHandler): def end_headers(self): self.send_header('Access-Control-Allow-Origin', '*') self.send_header('Access-Control-Allow-Methods', 'GET') self.send_header('Cache-Control', 'no-store, no-cache, must-revalidate') return super(CORSRequestHandler, self).end_headers() with HTTPServer(('localhost', 5555), CORSRequestHandler) as httpd: httpd.serve_forever() # - # Now you can follow the [stac-browser](https://github.com/radiantearth/stac-browser#running) instructions for starting a stac-browser instance and point it at `http://localhost:5555/collection.json` to serve out the STAC! # # To quit the server, use the `Kernel` -> `Interrupt` menu option. # ### Acknowledgements # # Credit to [sat-stac-landsat](https://github.com/sat-utils/sat-stac-landsat) off of which a lot of this code was based.
docs/tutorials/creating-a-landsat-stac.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Boys and girls # # + 1 Introduction 介绍 # + 2 Load and check data 加载并检查数据 # + 2.1 load data 加载数据 # + 2.2 Outlier detection 异常值检测 # + 2.3 joining train and test set 拼接训练集和测试集 # + 2.4 check for null and missing values 检查缺失值 # + 3 Feature analysis 特征分析 # + 3.1 Numerical values 数值特征 # + 3.2 Categorical values 类别特征 # + 4 Filling missing Values 填补缺失数据 # + 4.1 Age 年龄 # + 5 Feature engineering 特征工程 # + 5.1 Name/Title 名字/职称 # + 5.2 Family Size 家庭大小 # + 5.3 Cabin 船舱 # + 5.4 Ticket 票 # + 6 Modeling 建模 # + 6.1 Simple modeling 简单模型 # + 6.1.1 Cross validate models 交叉验证模型 # + 6.1.2 Hyperparamater tunning for best models 微调超参数 # + 6.1.3 Plot learning curves 绘制学习曲线 # + 6.1.4 Feature importance of the tree based classifiers 特征重要性 # + 6.2 Ensemble modeling 模型融合 # + 6.2.1 Combining models 融合模型 # + 6.3 Prediction 预测 # + 6.3.1 Predict and Submit results 预测并提交结果 # # https://www.kaggle.com/yassineghouzam/titanic-top-4-with-ensemble-modeling # # https://www.kaggle.com/headsortails/pytanic # ## 1. 介绍 # 男孩还是女孩是一个二分类问题,类别均衡,比赛地址为https://www.kaggle.com/c/girls-and-boys # ## 2. 加载并检查数据 # ### 2.1 加载数据 import pandas as pd import numpy as np from collections import Counter import matplotlib.pyplot as plt import seaborn as sns import xgboost as xg # %matplotlib inline train = pd.read_csv('train.csv',header=None) test = pd.read_csv('test.csv',header=None) target = train[249] #248列是迷惑特征,有数据泄露 train = train.drop([248],axis=1) test = test.drop([248],axis=1) train.head() # ### 2.2 异常值检测 # + # Outlier detection def detect_outliers(df,n,features): """ Takes a dataframe df of features and returns a list of the indices corresponding to the observations containing more than n outliers according to the Tukey method. """ outlier_indices = [] # iterate over features(columns) for col in features: # 1st quartile (25%) Q1 = np.percentile(df[col], 25) # 3rd quartile (75%) Q3 = np.percentile(df[col],75) # Interquartile range (IQR) IQR = Q3 - Q1 # outlier step outlier_step = 1.5 * IQR # Determine a list of indices of outliers for feature col outlier_list_col = df[(df[col] < Q1 - outlier_step) | (df[col] > Q3 + outlier_step )].index # append the found outlier indices for col to the list of outlier indices outlier_indices.extend(outlier_list_col) # select observations containing more than 2 outliers outlier_indices = Counter(outlier_indices) multiple_outliers = list( k for k, v in outlier_indices.items() if v > n ) return multiple_outliers # + def sort_categorical_feature(df): categorical_feature=[] numerical_feature=[] col_type = df.dtypes for k in list(col_type.index): if col_type[k]== np.object: categorical_feature.append(k) else: numerical_feature.append(k) return categorical_feature,numerical_feature # - [categorical_feature,numerical_feature] = sort_categorical_feature(train) numerical_feature Outliers_to_drop = detect_outliers(train,10,numerical_feature) train.loc[Outliers_to_drop] # Show the outliers rows # Drop outliers train = train.drop(Outliers_to_drop, axis = 0).reset_index(drop=True) # ### 2.3 连接训练集和测试集 ## Join train and test datasets in order to obtain the same number of features during categorical conversion ## 对训练数据和测试数据进行一致的特征处理 train_len = len(train) dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True) # ### 2.4 检测缺失值 missing_data = dataset.isnull().sum() missing_data =missing_data[missing_data!=0] missing_data.sort_values(ascending=False)[:20] # plt.figure(figsize=(15,20)) # g = sns.barplot(y=list(missing_data.index),x=list(missing_data.values),orient='h') # g.set_xlabel("Missing Data",fontsize=12) # g.set_ylabel("Features",fontsize=12) # g.tick_params(labelsize=9) # g.set_title("Missing Data") train.info() train.isnull().sum().sort_values(ascending=False)[:10] train.describe() # ## 3. 特征分析 # ### 3.1 数值型特征 # #### 相关性 train_corr = train[numerical_feature].corr() plt.figure(figsize=(20,20)) g = sns.heatmap(train_corr,annot=False, fmt = ".2f", cmap = "coolwarm") # #### 特征偏度斜度 drop_train_data = fulldata.drop(dropNaCol[dropNaCol==True].index,axis=1) final_data = drop_train_data.fillna(0) final_data.isnull().sum().sort_values(ascending=False)[:5] final_data.head(5) final_data = final_data.drop(248,axis=1) strColr = final_data.dtypes==object dropstrdata = final_data.drop(strColr[strColr==True].index,axis=1) xg_train_label = data[249] xg_train_data = dropstrdata[:trainsize] xg_test_data = dropstrdata[trainsize:] xg_train_data.shape xg_test_data.shape train_num = np.int64(trainsize*0.85) trainsize train_data_arr = xg_train_data[:train_num] train_label_arr = xg_train_label[:train_num] test_data_arr = xg_train_data[train_num:] test_label_arr = xg_train_label[train_num:] xg_test_data.shape cl = xg.XGBClassifier(learning_rate=0.12,max_depth=4,subsample=0.9,silent=0,gamma=0,n_estimators=1000) cl.fit(X=train_data_arr,y=train_label_arr) cl.score(test_data_arr,test_label_arr) cl.feature_importances_[:5] pred_label = cl.predict(xg_test_data) pred_label from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB X_train = train_data_arr Y_train = train_label_arr X_test = test_data_arr Y_test = test_label_arr X_pred = xg_test_data # + logreg = LogisticRegression() logreg.fit(X_train, Y_train) Y_pred = logreg.predict(X_test) logreg.score(X_train, Y_train) # + random_forest = RandomForestClassifier(n_estimators=100) random_forest.fit(X_train, Y_train) Y_pred = random_forest.predict(X_pred) random_forest.score(X_train, Y_train) # - feature_importances= pd.Series(random_forest.feature_importances_) feature_importances.sort_values(ascending=False) Y_pred submission = pd.DataFrame({ "Id": test.index+1, "label": pred_label }) submission.to_csv('submit.csv', index=False)
boyvsgirl/boyvsgirls2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Week 1 - Python for beginners # --- import numpy as np import matplotlib.pyplot as plt # ## Part 2 - Graphs with matplotlib # --- ts = np.linspace(-3,3,300) ys = np.sin(ts) plt.plot(ts,ys) plt.show() # + def f(x,n): return x**n x = np.logspace(-2,2) n = range(1,4) for i in n: plt.plot(x,f(x,i),label='$y = x^' + str(i) + '$') plt.legend() plt.xscale('log') plt.yscale('log') plt.xlabel('$x$') plt.ylabel('$y$') plt.show() # - N = 100000 np.random.seed(0) R = np.random.rand(N) plt.hist(R,bins=100,density=True) plt.show() NSTEP = 10000 plt.figure(figsize=(10,5)) plt.xlabel('number of steps') plt.ylabel('position') for nseed in range(10): np.random.seed(nseed) step = np.random.choice([-1,1],NSTEP) position = np.cumsum(step) plt.plot(position) # --- # ## The Euler method for numerical integration # $$ # \frac{\mathrm{d}y(t)}{\mathrm{d}t} = -y(t) # $$ # # Numerically solve the DE and determine $y(t)$ for $ 0 \leq t \leq 10$, with $y(0) = 1$<br> # Compare with the analytical solution $y(t) = \mathrm{e}^{-t}$ # plt.style.use('ggplot') # + dt, tmin, tmax = 0.1, 0.0, 10.0 step = int((tmax-tmin)/dt) ts = np.linspace(tmin,tmax,step) ys = np.zeros(step) sol = np.exp(-ts) plt.plot(ts,sol,label='Analytical', lw=5) ys[0] = 1.0 for i in range(step-1): ys[i+1] = ys[i] - dt*ys[i] plt.plot(ts,ys,ls='--',lw=3,label='Numerical') plt.plot(ts,ys/sol,label='Ratio') plt.legend() plt.show() plt.semilogy(ts,abs(ys-sol), label='Err') plt.legend() plt.show() # - # --- # ## Simulating a damped harmonic oscillator import matplotlib.animation as animation # %matplotlib nbagg plt.style.use('ggplot') dim = 2 nums = 1000 R = np.zeros(dim) V = np.zeros(dim) Rs = np.zeros([dim,nums]) Vs = np.zeros([dim,nums]) Et = np.zeros(nums) time = np.zeros(nums) # + def init(): particles.set_data([], []) line.set_data([], []) title.set_text(r'') return particles, line, title def animate(i): global R, V, F, Rs, Vs, time, Et R, V = R + V * dt, V*(1-zeta/m*dt)-k/m*dt*R Rs[0:dim, i] = R Vs[0:dim, i] = V time[i] = i*dt Et[i] = 1/2*m*np.linalg.norm(V)**2+1/2*k*np.linalg.norm(R)**2 particles.set_data(R[0], R[1]) line.set_data(Rs[0,0:i], Rs[1, 0:i]) title.set_text(r"$t = {0:.2f}, E_T = {1:.3f}$".format(i*dt,Et[i])) return particles, line, title # + m, k, zeta = 1.0, 1.0, 0.25 R[0], R[1] = 1., 1. V[0], V[1] = 1., 0. dt = 0.1*np.sqrt(k/m) box = 5 fig, ax = plt.subplots(figsize=(7.5,7.5)) ax = plt.axes(xlim=(-box/2,box/2), ylim=(-box/2,box/2)) particles, = ax.plot([], [], 'ko', ms=10) line, = ax.plot([],[],lw=1) title=ax.text(0.5,1.05,r'',transform=ax.transAxes,va='center') anim = animation.FuncAnimation(fig,animate,init_func=init,frames=nums,interval=5,blit=True,repeat=False) anim #anim.save('movie.mp4', fps=20,dpi=400) # -
edx-stochastic-data-analysis/01/python-beginner.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + executionInfo={"elapsed": 3242, "status": "ok", "timestamp": 1628669160794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="XyawWW80QN8u" # !pip install -q tensorflow_text # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 128, "status": "ok", "timestamp": 1628669160795, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="tAoJ7Lj6_P9i" outputId="73e77f26-6a90-4db5-8ab2-d7376fea3021" from google.colab import drive drive.mount('/content/drive') # + executionInfo={"elapsed": 121, "status": "ok", "timestamp": 1628669160796, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="AaY_Oz0IQyIn" import numpy as np import typing from typing import Any, Tuple import tensorflow as tf from tensorflow.keras.layers.experimental import preprocessing import tensorflow_text as tf_text import matplotlib.pyplot as plt import matplotlib.ticker as ticker # + executionInfo={"elapsed": 118, "status": "ok", "timestamp": 1628669160797, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="9ZXfz3P3RIfk" use_builtins = True # + [markdown] id="ndWV0v46D_aC" # # The data # + [markdown] id="fJiG3ExNWPfe" # ## Download and prepare the dataset # + executionInfo={"elapsed": 117, "status": "ok", "timestamp": 1628669160798, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AO<KEY>0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="z0N3JoJMRjlP" # /content/drive/MyDrive/MasterThesis/paraphrasing/Parapgrasing-Masking.tsv # Download the file import pathlib # path_to_zip = tf.keras.utils.get_file( # 'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip', # extract=True) # path_to_file = pathlib.Path(path_to_zip).parent/'spa-eng/spa.txt' # path_to_file = pathlib.Path('/content/drive/MyDrive/MasterThesis/paraphrasing/Parapgrasing - Masking2.tsv') path_to_file = pathlib.Path('/content/drive/MyDrive/MasterThesis/paraphrasing/ParapgrasingMask/ParapgrasingMasking.tsv') # path_to_file = pathlib.Path('/content/drive/MyDrive/MasterThesis/paraphrasing/ParapgrasingMask/ParapgrasingMasking.tsv') # + executionInfo={"elapsed": 115, "status": "ok", "timestamp": 1628669160798, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="jR1HhhGtUvIw" def load_data(path): text = path.read_text(encoding='utf-8') lines = text.splitlines() pairs = [line.split('\t') for line in lines] # inp = [inp for targ, inp in pairs] # targ = [targ for targ, inp in pairs] targ = [inp for targ, inp in pairs] inp = [targ for targ, inp in pairs] return targ, inp # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 114, "status": "ok", "timestamp": 1628669160799, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="8VxLZ2wYVB3e" outputId="512172a4-a6e8-4e24-e853-a0a2ef1447b5" targ, inp = load_data(path_to_file) print(inp[3]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 98, "status": "ok", "timestamp": 1628669160800, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="_v8AyFv6VDu7" outputId="ea40f582-7341-4bb8-f732-6f0a15bf3c51" print(targ[3]) # + executionInfo={"elapsed": 592, "status": "ok", "timestamp": 1628669757631, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="TZRKIJUH6rro" from sklearn.model_selection import train_test_split # Creating training and validation sets using an 80-20 split input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(inp, targ, test_size=0.2) # Show length len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val) # + [markdown] id="ZIczuMYPWRUd" # ## Create a tf.data dataset # + executionInfo={"elapsed": 75, "status": "ok", "timestamp": 1628669160801, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="tYsJHFUUVPLu" BUFFER_SIZE = len(input_tensor_train) BATCH_SIZE = 64 dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE) dataset = dataset.batch(BATCH_SIZE) # + executionInfo={"elapsed": 75, "status": "ok", "timestamp": 1628669160802, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="__V8oMyf_xtu" dataset_val = tf.data.Dataset.from_tensor_slices((input_tensor_val, target_tensor_val)).shuffle(BUFFER_SIZE) dataset_val = dataset_val.batch(BATCH_SIZE) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 74, "status": "ok", "timestamp": 1628669160802, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="59WLqrOlWXpB" outputId="5f845b51-19f0-41d5-f55b-9fd49931f97f" for example_input_batch, example_target_batch in dataset_val.take(1): for i in range(5): print(example_input_batch.numpy()[i].decode('utf-8')) # print() print(example_target_batch.numpy()[i].decode('utf-8')) print() break # + executionInfo={"elapsed": 71, "status": "ok", "timestamp": 1628669160803, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="vtcRXqgs30sh" # + [markdown] id="6GHIFeWxX5rJ" # ## Text preprocessing # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 70, "status": "ok", "timestamp": 1628669160803, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="2vP5z8aEiaXt" outputId="2b5591b1-228e-49a7-f878-519ea154fa32" example_text = tf.constant('#إغلاق_حسابات_البدون_في_البنوك الحل على إ رصاصة سلاإأم يا فيك يا فيه نقطه اخر السطر .') print(example_text.numpy()) print(tf_text.normalize_utf8(example_text, 'NFKD').numpy()) # + executionInfo={"elapsed": 25, "status": "ok", "timestamp": 1628669160804, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="jy05RexWWa7_" import re text_cleaning_re = "[a-zA-Z]|\d+|[٠١٢٣٤٥٦٧٨٩]|[.#،<>@,\\-_”“٪ًَ]" def tf_lower_and_split_punct(text): text = tf.strings.regex_replace(text, '[إأآا]', 'ا') # Split accecented characters. text = tf_text.normalize_utf8(text, 'NFKD') # text = tf.strings.lower(text) # Keep space, a to z, and select punctuation. text = tf.strings.regex_replace(text, '[.?!,¿]', '') # Add spaces around punctuation. text = tf.strings.regex_replace(text, '[.?!,¿_#]', r' \0 ') # Strip whitespace. # text = tf.strings.strip(text) # text = re.sub(text_cleaning_re, ' ', str(text).lower()).strip() # text = tf.strings.regex_replace(text, r'(.)\1+', r'\1\1') text = tf.strings.regex_replace(text,text_cleaning_re, '') text = tf.strings.regex_replace(text, 'ى', 'ي') text = tf.strings.regex_replace(text, "ة", "ه") text = tf.strings.regex_replace(text, '[إأآا]', 'ا') # text = re.sub(r'(.)\1+', r'\1\1', str(text)) # text = re.sub("[إأآا]", "ا", str(text)) # text = re.sub("ى", "ي", str(text)) text = tf.strings.join(['[START]', text, '[END]'], separator=' ') return text # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 45, "status": "ok", "timestamp": 1628666385752, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="B-Ii1y-IYH5Y" outputId="96f6c84e-eb46-492c-ec83-05f6e4b9b7bd" print(example_text.numpy().decode()) print(tf_lower_and_split_punct(example_text).numpy().decode()) # + [markdown] id="yscyfTX7dJTw" # ## Text Vectorization # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 680, "status": "ok", "timestamp": 1628669271835, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="tcjpOnpMYP60" outputId="a2877975-c8dc-4f46-dbb3-f98360bb7d95" max_vocab_size = 5000 ####TextVectorization Before paraphrasing input_text_processor = preprocessing.TextVectorization( standardize=tf_lower_and_split_punct, # output_mode="int", max_tokens=max_vocab_size ) input_text_processor.adapt(input_tensor_train) input_text_processor.adapt(input_tensor_val) # Here are the first 10 words from the vocabulary: input_text_processor.get_vocabulary()[:20] # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 769, "status": "ok", "timestamp": 1628669273993, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="WxlkTXVzdQPy" outputId="cac0717a-d0cc-463b-8dfa-0c116dfbc2f1" #TextVectorization after paraphrasing(masking) output_text_processor = preprocessing.TextVectorization( standardize=tf_lower_and_split_punct, max_tokens=max_vocab_size) output_text_processor.adapt(target_tensor_train) output_text_processor.adapt(target_tensor_val) output_text_processor.get_vocabulary()[:20] # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 482, "status": "ok", "timestamp": 1628669289348, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="uxOgjkanjPp-" outputId="d4d838c5-b95d-43e1-dc8d-c809a6c02906" #Now these layers can convert a batch of strings into a batch of token IDs: example_tokens = input_text_processor(example_input_batch) example_tokens[:3, :10] # + colab={"base_uri": "https://localhost:8080/", "height": 53} executionInfo={"elapsed": 271, "status": "ok", "timestamp": 1628669291598, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="3x--N155jrL8" outputId="05786fb2-b4d0-42f0-e608-43fdc3b0640e" #The get_vocabulary method can be used to convert token IDs back to text: input_vocab = np.array(input_text_processor.get_vocabulary()) tokens = input_vocab[example_tokens[0].numpy()] ' '.join(tokens) # + colab={"base_uri": "https://localhost:8080/", "height": 299} executionInfo={"elapsed": 660, "status": "ok", "timestamp": 1628666386695, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="qAAO8j10jwT8" outputId="fd647c2c-12c5-482c-f20f-883f4e8445e0" #The returned token IDs are zero-padded. This can easily be turned into a mask: plt.subplot(1, 2, 1) plt.pcolormesh(example_tokens) plt.title('Token IDs') plt.subplot(1, 2, 2) plt.pcolormesh(example_tokens != 0) plt.title('Mask') # + [markdown] id="21DcO-u-kOKy" # #The encoder/decoder model # + executionInfo={"elapsed": 458, "status": "ok", "timestamp": 1628669301159, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="cC-7kyiWkKQd" embedding_dim = 256 units = 1024 # + [markdown] id="-3GvVd7ak2qY" # ##The encoder # + executionInfo={"elapsed": 283, "status": "ok", "timestamp": 1628669307139, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="kJGZ93d0ktOW" class ShapeChecker(): def __init__(self): # Keep a cache of every axis-name seen self.shapes = {} def __call__(self, tensor, names, broadcast=False): if not tf.executing_eagerly(): return if isinstance(names, str): names = (names,) shape = tf.shape(tensor) rank = tf.rank(tensor) if rank != len(names): raise ValueError(f'Rank mismatch:\n' f' found {rank}: {shape.numpy()}\n' f' expected {len(names)}: {names}\n') for i, name in enumerate(names): if isinstance(name, int): old_dim = name else: old_dim = self.shapes.get(name, None) new_dim = shape[i] if (broadcast and new_dim == 1): continue if old_dim is None: # If the axis name is new, add its length to the cache. self.shapes[name] = new_dim continue if new_dim != old_dim: raise ValueError(f"Shape mismatch for dimension: '{name}'\n" f" found: {new_dim}\n" f" expected: {old_dim}\n") # + executionInfo={"elapsed": 310, "status": "ok", "timestamp": 1628669316639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="2SvHKwdikRQU" class Encoder(tf.keras.layers.Layer): def __init__(self, input_vocab_size, embedding_dim, enc_units): super(Encoder, self).__init__() self.enc_units = enc_units self.input_vocab_size = input_vocab_size # The embedding layer converts tokens to vectors self.embedding = tf.keras.layers.Embedding(self.input_vocab_size, embedding_dim) # The GRU RNN layer processes those vectors sequentially. self.gru = tf.keras.layers.GRU(self.enc_units, # Return the sequence and state return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') def call(self, tokens, state=None): shape_checker = ShapeChecker() shape_checker(tokens, ('batch', 's')) # 2. The embedding layer looks up the embedding for each token. vectors = self.embedding(tokens) shape_checker(vectors, ('batch', 's', 'embed_dim')) # 3. The GRU processes the embedding sequence. # output shape: (batch, s, enc_units) # state shape: (batch, enc_units) output, state = self.gru(vectors, initial_state=state) shape_checker(output, ('batch', 's', 'enc_units')) shape_checker(state, ('batch', 'enc_units')) # 4. Returns the new sequence and its state. return output, state # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 311, "status": "ok", "timestamp": 1628669321585, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="0xGoMKf1kaFJ" outputId="a1cf0aa0-5dc1-49c7-b5b9-0de31a8d8071" # Convert the input text to tokens. example_tokens = input_text_processor(example_input_batch) # Encode the input sequence. encoder = Encoder(input_text_processor.vocabulary_size(), embedding_dim, units) example_enc_output, example_enc_state = encoder(example_tokens) print(f'Input batch, shape (batch): {example_input_batch.shape}') print(f'Input batch tokens, shape (batch, s): {example_tokens.shape}') print(f'Encoder output, shape (batch, s, units): {example_enc_output.shape}') print(f'Encoder state, shape (batch, units): {example_enc_state.shape}') # + [markdown] id="n5z40aMNk-cZ" # ##The attention head # + executionInfo={"elapsed": 334, "status": "ok", "timestamp": 1628669331255, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="z1xJw7Soka7s" class BahdanauAttention(tf.keras.layers.Layer): def __init__(self, units): super().__init__() # For Eqn. (4), the Bahdanau attention self.W1 = tf.keras.layers.Dense(units, use_bias=False) self.W2 = tf.keras.layers.Dense(units, use_bias=False) self.attention = tf.keras.layers.AdditiveAttention() def call(self, query, value, mask): shape_checker = ShapeChecker() shape_checker(query, ('batch', 't', 'query_units')) shape_checker(value, ('batch', 's', 'value_units')) shape_checker(mask, ('batch', 's')) # From Eqn. (4), `W1@ht`. w1_query = self.W1(query) shape_checker(w1_query, ('batch', 't', 'attn_units')) # From Eqn. (4), `W2@hs`. w2_key = self.W2(value) shape_checker(w2_key, ('batch', 's', 'attn_units')) query_mask = tf.ones(tf.shape(query)[:-1], dtype=bool) value_mask = mask context_vector, attention_weights = self.attention( inputs = [w1_query, value, w2_key], mask=[query_mask, value_mask], return_attention_scores = True, ) shape_checker(context_vector, ('batch', 't', 'value_units')) shape_checker(attention_weights, ('batch', 't', 's')) return context_vector, attention_weights # + [markdown] id="_n85ybwYBuE7" # ##Test the Attention layer # + executionInfo={"elapsed": 294, "status": "ok", "timestamp": 1628669358122, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="F7EBnNj6lBRp" attention_layer = BahdanauAttention(units) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 7, "status": "ok", "timestamp": 1628669359318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="kA2MKSwalM3W" outputId="43bf63a8-5bcb-471f-de1e-9dda850a415a" (example_tokens != 0).shape # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 321, "status": "ok", "timestamp": 1628669367847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="H2kk0ri0lHwu" outputId="5ddc2711-18ee-4d05-ab6d-ea1101e41984" # Later, the decoder will generate this attention query example_attention_query = tf.random.normal(shape=[len(example_tokens), 2, 10]) # Attend to the encoded tokens context_vector, attention_weights = attention_layer( query=example_attention_query, value=example_enc_output, mask=(example_tokens != 0)) print(f'Attention result shape: (batch_size, query_seq_length, units): {context_vector.shape}') print(f'Attention weights shape: (batch_size, query_seq_length, value_seq_length): {attention_weights.shape}') # + colab={"base_uri": "https://localhost:8080/", "height": 299} executionInfo={"elapsed": 105, "status": "ok", "timestamp": 1628666405977, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="_FLfQbmxlISS" outputId="0569a13b-0fff-4a55-9741-c794db7f0eba" plt.subplot(1, 2, 1) plt.pcolormesh(attention_weights[:, 0, :]) plt.title('Attention weights') plt.subplot(1, 2, 2) plt.pcolormesh(example_tokens != 0) plt.title('Mask') # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 310, "status": "ok", "timestamp": 1628669370962, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="rNq63wbqlQ5G" outputId="41d4d738-c546-4712-89e9-4de21d012c16" attention_weights.shape # + executionInfo={"elapsed": 348, "status": "ok", "timestamp": 1628669373969, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="0hptn2SIlT0x" attention_slice = attention_weights[0, 0].numpy() attention_slice = attention_slice[attention_slice != 0] # + colab={"base_uri": "https://localhost:8080/", "height": 424} executionInfo={"elapsed": 1077, "status": "ok", "timestamp": 1628669379907, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="w75xGyexlWxU" outputId="5999e11b-bbe3-41fb-8392-4d5b4fc0d0b6" plt.suptitle('Attention weights for one sequence') plt.figure(figsize=(12, 6)) a1 = plt.subplot(1, 2, 1) plt.bar(range(len(attention_slice)), attention_slice) # freeze the xlim plt.xlim(plt.xlim()) plt.xlabel('Attention weights') a2 = plt.subplot(1, 2, 2) plt.bar(range(len(attention_slice)), attention_slice) plt.xlabel('Attention weights, zoomed') # zoom in top = max(a1.get_ylim()) zoom = 0.85*top a2.set_ylim([0.90*top, top]) a1.plot(a1.get_xlim(), [zoom, zoom], color='k') # + [markdown] id="AaPKcbh1lblf" # ##The decoder # + executionInfo={"elapsed": 466, "status": "ok", "timestamp": 1628669383450, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="Edn0eFOGlYQG" class Decoder(tf.keras.layers.Layer): def __init__(self, output_vocab_size, embedding_dim, dec_units): super(Decoder, self).__init__() self.dec_units = dec_units self.output_vocab_size = output_vocab_size self.embedding_dim = embedding_dim # For Step 1. The embedding layer convets token IDs to vectors self.embedding = tf.keras.layers.Embedding(self.output_vocab_size, embedding_dim) # For Step 2. The RNN keeps track of what's been generated so far. self.gru = tf.keras.layers.GRU(self.dec_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform') # For step 3. The RNN output will be the query for the attention layer. self.attention = BahdanauAttention(self.dec_units) # For step 4. Eqn. (3): converting `ct` to `at` self.Wc = tf.keras.layers.Dense(dec_units, activation=tf.math.tanh, use_bias=False) # For step 5. This fully connected layer produces the logits for each # output token. self.fc = tf.keras.layers.Dense(self.output_vocab_size) # + executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1628669383451, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="Nttj7G3fleC0" class DecoderInput(typing.NamedTuple): new_tokens: Any enc_output: Any mask: Any class DecoderOutput(typing.NamedTuple): logits: Any attention_weights: Any # + executionInfo={"elapsed": 309, "status": "ok", "timestamp": 1628669399303, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="LLyv-oOZlgRs" def call(self, inputs: DecoderInput, state=None) -> Tuple[DecoderOutput, tf.Tensor]: shape_checker = ShapeChecker() shape_checker(inputs.new_tokens, ('batch', 't')) shape_checker(inputs.enc_output, ('batch', 's', 'enc_units')) shape_checker(inputs.mask, ('batch', 's')) if state is not None: shape_checker(state, ('batch', 'dec_units')) # Step 1. Lookup the embeddings vectors = self.embedding(inputs.new_tokens) shape_checker(vectors, ('batch', 't', 'embedding_dim')) # Step 2. Process one step with the RNN rnn_output, state = self.gru(vectors, initial_state=state) shape_checker(rnn_output, ('batch', 't', 'dec_units')) shape_checker(state, ('batch', 'dec_units')) # Step 3. Use the RNN output as the query for the attention over the # encoder output. context_vector, attention_weights = self.attention( query=rnn_output, value=inputs.enc_output, mask=inputs.mask) shape_checker(context_vector, ('batch', 't', 'dec_units')) shape_checker(attention_weights, ('batch', 't', 's')) # Step 4. Eqn. (3): Join the context_vector and rnn_output # [ct; ht] shape: (batch t, value_units + query_units) context_and_rnn_output = tf.concat([context_vector, rnn_output], axis=-1) # Step 4. Eqn. (3): `at = tanh(Wc@[ct; ht])` attention_vector = self.Wc(context_and_rnn_output) shape_checker(attention_vector, ('batch', 't', 'dec_units')) # Step 5. Generate logit predictions: logits = self.fc(attention_vector) shape_checker(logits, ('batch', 't', 'output_vocab_size')) return DecoderOutput(logits, attention_weights), state # + executionInfo={"elapsed": 367, "status": "ok", "timestamp": 1628669403917, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="_n43Inr-libv" Decoder.call = call # + executionInfo={"elapsed": 293, "status": "ok", "timestamp": 1628669405468, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="1OWYS8A9lj33" decoder = Decoder(output_text_processor.vocabulary_size(), embedding_dim, units) # + executionInfo={"elapsed": 296, "status": "ok", "timestamp": 1628669410843, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="Y-Vpdpodllvv" # Convert the target sequence, and collect the "[START]" tokens example_output_tokens = output_text_processor(example_target_batch) start_index = output_text_processor._index_lookup_layer('[START]').numpy() first_token = tf.constant([[start_index]] * example_output_tokens.shape[0]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 289, "status": "ok", "timestamp": 1628669414621, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="504mgOlVlncu" outputId="b644fdb5-090b-4563-b5dc-3696a4038007" # Run the decoder dec_result, dec_state = decoder( inputs = DecoderInput(new_tokens=first_token, enc_output=example_enc_output, mask=(example_tokens != 0)), state = example_enc_state ) print(f'logits shape: (batch_size, t, output_vocab_size) {dec_result.logits.shape}') print(f'state shape: (batch_size, dec_units) {dec_state.shape}') # + executionInfo={"elapsed": 9, "status": "ok", "timestamp": 1628669416339, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="2N1-81R4lpE7" sampled_token = tf.random.categorical(dec_result.logits[:, 0, :], num_samples=1) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 290, "status": "ok", "timestamp": 1628669417948, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="Dcq0_bdqlrn9" outputId="ac1949bb-553a-4687-fee4-4657c7f9fc1b" vocab = np.array(output_text_processor.get_vocabulary()) first_word = vocab[sampled_token.numpy()] first_word[:5] # + executionInfo={"elapsed": 398, "status": "ok", "timestamp": 1628669424796, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="1UDw_FWGltet" dec_result, dec_state = decoder( DecoderInput(sampled_token, example_enc_output, mask=(example_tokens != 0)), state=dec_state) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 521, "status": "ok", "timestamp": 1628669426205, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="ErxSLE8Rlwnh" outputId="3fd2d5fb-da59-4458-a9b7-044a0d2b9740" sampled_token = tf.random.categorical(dec_result.logits[:, 0, :], num_samples=1) first_word = vocab[sampled_token.numpy()] first_word[:5] # + [markdown] id="FZYqCCMBmBPF" # #Training # + [markdown] id="k9MnQklHDKTP" # ## Define the loss function # + executionInfo={"elapsed": 324, "status": "ok", "timestamp": 1628669431357, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="uU7SaL2Fly5r" class MaskedLoss(tf.keras.losses.Loss): def __init__(self): self.name = 'masked_loss' self.loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction='none') def __call__(self, y_true, y_pred): shape_checker = ShapeChecker() shape_checker(y_true, ('batch', 't')) shape_checker(y_pred, ('batch', 't', 'logits')) # Calculate the loss for each item in the batch. loss = self.loss(y_true, y_pred) shape_checker(loss, ('batch', 't')) # Mask off the losses on padding. mask = tf.cast(y_true != 0, tf.float32) shape_checker(mask, ('batch', 't')) loss *= mask # Return the total. return tf.reduce_sum(loss) # + [markdown] id="fQoZAibJDN1D" # ## Implement the training step # + executionInfo={"elapsed": 318, "status": "ok", "timestamp": 1628669438479, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="MMjdwhELmGvo" #Implement the training step class TrainTranslator(tf.keras.Model): def __init__(self, embedding_dim, units, input_text_processor, output_text_processor, use_tf_function=True): super().__init__() # Build the encoder and decoder encoder = Encoder(input_text_processor.vocabulary_size(), embedding_dim, units) decoder = Decoder(output_text_processor.vocabulary_size(), embedding_dim, units) self.encoder = encoder self.decoder = decoder self.input_text_processor = input_text_processor self.output_text_processor = output_text_processor self.use_tf_function = use_tf_function self.shape_checker = ShapeChecker() def train_step(self, inputs): self.shape_checker = ShapeChecker() if self.use_tf_function: return self._tf_train_step(inputs) else: return self._train_step(inputs) # + executionInfo={"elapsed": 309, "status": "ok", "timestamp": 1628669442677, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="5u7nApk7mLXc" def _preprocess(self, input_text, target_text): self.shape_checker(input_text, ('batch',)) self.shape_checker(target_text, ('batch',)) # Convert the text to token IDs input_tokens = self.input_text_processor(input_text) target_tokens = self.output_text_processor(target_text) self.shape_checker(input_tokens, ('batch', 's')) self.shape_checker(target_tokens, ('batch', 't')) # Convert IDs to masks. input_mask = input_tokens != 0 self.shape_checker(input_mask, ('batch', 's')) target_mask = target_tokens != 0 self.shape_checker(target_mask, ('batch', 't')) return input_tokens, input_mask, target_tokens, target_mask # + executionInfo={"elapsed": 286, "status": "ok", "timestamp": 1628669449156, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="CD5pPKOemM-2" TrainTranslator._preprocess = _preprocess # + executionInfo={"elapsed": 443, "status": "ok", "timestamp": 1628669453027, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="BFv9BM7ZmOwH" def _train_step(self, inputs): input_text, target_text = inputs (input_tokens, input_mask, target_tokens, target_mask) = self._preprocess(input_text, target_text) max_target_length = tf.shape(target_tokens)[1] with tf.GradientTape() as tape: # Encode the input enc_output, enc_state = self.encoder(input_tokens) self.shape_checker(enc_output, ('batch', 's', 'enc_units')) self.shape_checker(enc_state, ('batch', 'enc_units')) # Initialize the decoder's state to the encoder's final state. # This only works if the encoder and decoder have the same number of # units. dec_state = enc_state loss = tf.constant(0.0) for t in tf.range(max_target_length-1): # Pass in two tokens from the target sequence: # 1. The current input to the decoder. # 2. The target the target for the decoder's next prediction. new_tokens = target_tokens[:, t:t+2] step_loss, dec_state = self._loop_step(new_tokens, input_mask, enc_output, dec_state) loss = loss + step_loss # Average the loss over all non padding tokens. average_loss = loss / tf.reduce_sum(tf.cast(target_mask, tf.float32)) # Apply an optimization step variables = self.trainable_variables gradients = tape.gradient(average_loss, variables) self.optimizer.apply_gradients(zip(gradients, variables)) # Return a dict mapping metric names to current value return {'batch_loss': average_loss} # + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1628669454191, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="mEhp3pq-mQXg" TrainTranslator._train_step = _train_step # + executionInfo={"elapsed": 308, "status": "ok", "timestamp": 1628669458371, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="0mtZvbsNmSS0" def _loop_step(self, new_tokens, input_mask, enc_output, dec_state): input_token, target_token = new_tokens[:, 0:1], new_tokens[:, 1:2] # Run the decoder one step. decoder_input = DecoderInput(new_tokens=input_token, enc_output=enc_output, mask=input_mask) dec_result, dec_state = self.decoder(decoder_input, state=dec_state) self.shape_checker(dec_result.logits, ('batch', 't1', 'logits')) self.shape_checker(dec_result.attention_weights, ('batch', 't1', 's')) self.shape_checker(dec_state, ('batch', 'dec_units')) # `self.loss` returns the total for non-padded tokens y = target_token y_pred = dec_result.logits step_loss = self.loss(y, y_pred) return step_loss, dec_state # + executionInfo={"elapsed": 571, "status": "ok", "timestamp": 1628669461535, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="GSTnBASXmUBU" TrainTranslator._loop_step = _loop_step # + [markdown] id="C9X290LiDguC" # ## Test the training step # + executionInfo={"elapsed": 350, "status": "ok", "timestamp": 1628669464150, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="h2gQJLSSmVOx" #Test the training step translator = TrainTranslator( embedding_dim, units, input_text_processor=input_text_processor, output_text_processor=output_text_processor, use_tf_function=False) # Configure the loss and optimizer translator.compile( optimizer=tf.optimizers.Adam(), loss=MaskedLoss(), metrics=["accuracy"] ) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 386, "status": "ok", "timestamp": 1628669469406, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="5m01xokNmYyL" outputId="8b997131-26a9-41ba-8cc4-6fbe19bd590e" np.log(output_text_processor.vocabulary_size()) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 64335, "status": "ok", "timestamp": 1628669535658, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="galSRTeSmaR3" outputId="d44d8ee4-30b3-4b2d-8b7b-77852fb59d42" # %%time for n in range(10): print(translator.train_step([example_input_batch, example_target_batch])) print() # + executionInfo={"elapsed": 39, "status": "ok", "timestamp": 1628669535659, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="niZzANjVmhf6" @tf.function(input_signature=[[tf.TensorSpec(dtype=tf.string, shape=[None]), tf.TensorSpec(dtype=tf.string, shape=[None])]]) def _tf_train_step(self, inputs): return self._train_step(inputs) # + executionInfo={"elapsed": 36, "status": "ok", "timestamp": 1628669535659, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="I4ZVo8qJmmpb" TrainTranslator._tf_train_step = _tf_train_step # + executionInfo={"elapsed": 35, "status": "ok", "timestamp": 1628669535660, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="OwQ66_LEmnJ6" translator.use_tf_function = True # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 4132, "status": "ok", "timestamp": 1628669539759, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="1vI2AzXfmo2U" outputId="8b932a0e-ba57-408c-a09a-b6c15b68a726" translator.train_step([example_input_batch, example_target_batch]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 9339, "status": "ok", "timestamp": 1628669549087, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="xZZRQfE-mqap" outputId="199a9c20-e21f-4db4-aa47-e0322536110f" # %%time for n in range(10): print(translator.train_step([example_input_batch, example_target_batch])) print() # + colab={"base_uri": "https://localhost:8080/", "height": 301} executionInfo={"elapsed": 79358, "status": "ok", "timestamp": 1628666503812, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="LjGp440ymsSH" outputId="d68cc8c1-e192-43f8-bc1f-bd8dfefd7bb2" losses = [] for n in range(100): print('.', end='') logs = translator.train_step([example_input_batch, example_target_batch]) losses.append(logs['batch_loss'].numpy()) print() plt.plot(losses) # + executionInfo={"elapsed": 27, "status": "ok", "timestamp": 1628669549088, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="aUInL1GsmxkD" train_translator = TrainTranslator( embedding_dim, units, input_text_processor=input_text_processor, output_text_processor=output_text_processor) # Configure the loss and optimizer train_translator.compile( optimizer=tf.optimizers.Adam(), loss=MaskedLoss(), metrics=["accuracy"] ) # + [markdown] id="k3cL6T4Tm2EV" # ## Train the model # + executionInfo={"elapsed": 290, "status": "ok", "timestamp": 1628669568722, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="NgjJU-tNm4Xk" class BatchLogs(tf.keras.callbacks.Callback): def __init__(self, key): self.key = key self.logs = [] def on_train_batch_end(self, n, logs): self.logs.append(logs[self.key]) batch_loss = BatchLogs('batch_loss') # + executionInfo={"elapsed": 42, "status": "ok", "timestamp": 1628666503816, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="bkhrOkBv6k_T" # # Splitting the dataset for training and testing. # def is_val(x, _): # return x % 4 == 0 # def is_train(x, y): # return not is_val(x, y) # recover = lambda x, y: y # # Split the dataset for training. # val_dataset = dataset.enumerate() \ # .filter(is_val) \ # .map(recover) # # Split the dataset for testing/validation. # train_dataset = dataset.enumerate() \ # .filter(is_train) \ # .map(recover) # + executionInfo={"elapsed": 317, "status": "ok", "timestamp": 1628669572767, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="sln5WccxIb8W" from keras.callbacks import EarlyStopping my_callbacks = [ tf.keras.callbacks.EarlyStopping(monitor='val_loss',patience=5), # tf.keras.callbacks.ReduceLROnPlateau(factor=0.1, # min_lr = 0.01, # monitor = 'val_loss', # verbose = 1) # batch_loss = BatchLogs('batch_loss') ] # + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="B7ieq2elm71Y" # train_translator.fit(train_dataset,val_dataset, epochs=3,validation_steps=50,callbacks=my_callbacks) train_translator.fit(dataset,callbacks=my_callbacks,epochs=3) # + colab={"base_uri": "https://localhost:8080/", "height": 300} executionInfo={"elapsed": 17928, "status": "ok", "timestamp": 1628486570672, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="QYOXHm1Fm9ar" outputId="a8610700-e6e7-4872-dc41-ca739d66f730" plt.plot(batch_loss.logs) plt.ylim([0, 10]) plt.xlabel('Batch #') plt.ylabel('CE/token') # + [markdown] id="_EDWeKyGnExQ" # #Translate # + executionInfo={"elapsed": 45, "status": "ok", "timestamp": 1628625110280, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="LbrRu0eam_Za" class Translator(tf.Module): def __init__(self, encoder, decoder, input_text_processor, output_text_processor): self.encoder = encoder self.decoder = decoder self.input_text_processor = input_text_processor self.output_text_processor = output_text_processor self.output_token_string_from_index = ( tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=output_text_processor.get_vocabulary(), mask_token='', invert=True)) # The output should never generate padding, unknown, or start. index_from_string = tf.keras.layers.experimental.preprocessing.StringLookup( vocabulary=output_text_processor.get_vocabulary(), mask_token='') token_mask_ids = index_from_string(['', '[UNK]', '[START]']).numpy() token_mask = np.zeros([index_from_string.vocabulary_size()], dtype=np.bool) token_mask[np.array(token_mask_ids)] = True self.token_mask = token_mask self.start_token = index_from_string('[START]') self.end_token = index_from_string('[END]') # + executionInfo={"elapsed": 40, "status": "ok", "timestamp": 1628625110280, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="RzYJ_inQnVku" translator = Translator( encoder=train_translator.encoder, decoder=train_translator.decoder, input_text_processor=input_text_processor, output_text_processor=output_text_processor, ) # + [markdown] id="4_D7ID0DCXK-" # ##Convert token IDs to text # + executionInfo={"elapsed": 38, "status": "ok", "timestamp": 1628625110280, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="qPKIIhIYnWTf" #Convert token IDs to text def tokens_to_text(self, result_tokens): shape_checker = ShapeChecker() shape_checker(result_tokens, ('batch', 't')) result_text_tokens = self.output_token_string_from_index(result_tokens) shape_checker(result_text_tokens, ('batch', 't')) result_text = tf.strings.reduce_join(result_text_tokens, axis=1, separator=' ') shape_checker(result_text, ('batch')) result_text = tf.strings.strip(result_text) shape_checker(result_text, ('batch',)) return result_text # + executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1628625110281, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="2fiykfvTndg2" Translator.tokens_to_text = tokens_to_text # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 37, "status": "ok", "timestamp": 1628625110282, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="OQerZ5vSndzm" outputId="a34dfe24-8c97-429d-9e2a-a67260ded83e" example_output_tokens = tf.random.uniform( shape=[5, 2], minval=0, dtype=tf.int64, maxval=output_text_processor.vocabulary_size()) translator.tokens_to_text(example_output_tokens).numpy() # + [markdown] id="BdC2aAtQCcHw" # ## Sample from the decoder's predictions # + executionInfo={"elapsed": 32, "status": "ok", "timestamp": 1628625110283, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="9QE1GMVHnf2M" #Sample from the decoder's predictions def sample(self, logits, temperature): shape_checker = ShapeChecker() # 't' is usually 1 here. shape_checker(logits, ('batch', 't', 'vocab')) shape_checker(self.token_mask, ('vocab',)) token_mask = self.token_mask[tf.newaxis, tf.newaxis, :] shape_checker(token_mask, ('batch', 't', 'vocab'), broadcast=True) # Set the logits for all masked tokens to -inf, so they are never chosen. logits = tf.where(self.token_mask, -np.inf, logits) if temperature == 0.0: new_tokens = tf.argmax(logits, axis=-1) else: logits = tf.squeeze(logits, axis=1) new_tokens = tf.random.categorical(logits/temperature, num_samples=1) shape_checker(new_tokens, ('batch', 't')) return new_tokens # + executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1628625110283, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="lQqz8N5onlPA" Translator.sample = sample # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 31, "status": "ok", "timestamp": 1628625110284, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="zaB9gxKhnnIo" outputId="c9e4dc25-4073-4d61-ee4a-98a907fccbaa" example_logits = tf.random.normal([5, 1, output_text_processor.vocabulary_size()]) example_output_tokens = translator.sample(example_logits, temperature=1.0) example_output_tokens # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 25, "status": "ok", "timestamp": 1628625110284, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="iISJujhknouG" outputId="9c253365-7141-47bf-cce0-4750f4d8d0d7" example_logits = tf.random.normal([5, 1, output_text_processor.vocabulary_size()]) example_output_tokens = translator.sample(example_logits, temperature=1.0) example_output_tokens # + [markdown] id="9x84X4ndCjEs" # ## Implement the translation loop # + executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1628625110285, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="BzU_z3QWnqP6" #Implement the translation loop def translate_unrolled(self, input_text, *, max_length=50, return_attention=True, temperature=1.0): batch_size = tf.shape(input_text)[0] input_tokens = self.input_text_processor(input_text) enc_output, enc_state = self.encoder(input_tokens) dec_state = enc_state new_tokens = tf.fill([batch_size, 1], self.start_token) result_tokens = [] attention = [] done = tf.zeros([batch_size, 1], dtype=tf.bool) for _ in range(max_length): dec_input = DecoderInput(new_tokens=new_tokens, enc_output=enc_output, mask=(input_tokens!=0)) dec_result, dec_state = self.decoder(dec_input, state=dec_state) attention.append(dec_result.attention_weights) new_tokens = self.sample(dec_result.logits, temperature) # If a sequence produces an `end_token`, set it `done` done = done | (new_tokens == self.end_token) # Once a sequence is done it only produces 0-padding. new_tokens = tf.where(done, tf.constant(0, dtype=tf.int64), new_tokens) # Collect the generated tokens result_tokens.append(new_tokens) if tf.executing_eagerly() and tf.reduce_all(done): break # Convert the list of generates token ids to a list of strings. result_tokens = tf.concat(result_tokens, axis=-1) result_text = self.tokens_to_text(result_tokens) if return_attention: attention_stack = tf.concat(attention, axis=1) return {'text': result_text, 'attention': attention_stack} else: return {'text': result_text} # + executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1628625110285, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="85ub2TtpnuKU" Translator.translate = translate_unrolled # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 489, "status": "ok", "timestamp": 1628625110755, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="2OfJ09sBn0P9" outputId="1b6695ac-a944-4290-bcab-1b16183a7ec6" # %%time input_text = tf.constant([ 'بكري يا معرص يا عار الصعيد ملعون ضهر ابوك يا عرص', # "It's really cold here." 'تبا لكم ولأشكالكم', # "This is my life."" ]) result = translator.translate( input_text = input_text) print(result['text'][0].numpy().decode()) print(result['text'][1].numpy().decode()) print() # + executionInfo={"elapsed": 3, "status": "ok", "timestamp": 1628625110756, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="0Ss7K3-coG_G" @tf.function(input_signature=[tf.TensorSpec(dtype=tf.string, shape=[None])]) def tf_translate(self, input_text): return self.translate(input_text) Translator.tf_translate = tf_translate # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 18155, "status": "ok", "timestamp": 1628625128908, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="OKtQmQfJofI8" outputId="d602d767-eb27-44cb-d4ac-f867c8a36c1e" # %%time result = translator.tf_translate( input_text = input_text) print(result['text'][0].numpy().decode()) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 28, "status": "ok", "timestamp": 1628625128909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="WLlXsnUKog8e" outputId="03f4caa2-a670-4ed5-95fe-21f5c7115b34" # %%time result = translator.tf_translate( input_text = input_text) print(result['text'][0].numpy().decode()) print(result['text'][1].numpy().decode()) print() # + [markdown] id="7omXkyUjCrsl" # ## Visualize the process # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 24, "status": "ok", "timestamp": 1628625128909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="uw0i5Yxbut55" outputId="d7c5cdc5-184c-4262-c5fc-5972dffa9ec2" a = result['attention'][0] print(np.sum(a, axis=-1)) # + colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"elapsed": 20, "status": "ok", "timestamp": 1628625128912, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="TWf6Gfs3u0Hj" outputId="02234430-cd59-4953-ade0-88377be6b144" _ = plt.bar(range(len(a[0, :])), a[0, :]) # + colab={"base_uri": "https://localhost:8080/", "height": 285} executionInfo={"elapsed": 18, "status": "ok", "timestamp": 1628625128912, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="-BZyZG0Pu2eL" outputId="14da74eb-0bf5-4c80-ee63-26f47fa252f2" plt.imshow(np.array(a), vmin=0.0) # + executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1628625128913, "user": {"displayName": "Do salam", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="SBFtpVOuu4QO" def plot_attention(attention, sentence, predicted_sentence): sentence = tf_lower_and_split_punct(sentence).numpy().decode().split() predicted_sentence = predicted_sentence.numpy().decode().split() + ['[END]'] fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(1, 1, 1) attention = attention[:len(predicted_sentence), :len(sentence)] ax.matshow(attention, cmap='viridis', vmin=0.0) fontdict = {'fontsize': 14} ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90) ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict) ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) ax.set_xlabel('Input text') ax.set_ylabel('Output text') plt.suptitle('Attention weights') # + [markdown] id="JBLEv87QCzuP" # ## Labeled attention plots # + colab={"base_uri": "https://localhost:8080/", "height": 517} executionInfo={"elapsed": 826, "status": "ok", "timestamp": 1628625129727, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="RHq8Ps2Zu7wp" outputId="715e71af-cd45-4afb-b24e-b81f63be22d8" i=0 plot_attention(result['attention'][i], input_text[i], result['text'][i]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 16, "status": "ok", "timestamp": 1628625129728, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="CMEIazsuu-EA" outputId="71e012c6-cc34-440f-8bf1-bcf0221e6985" # %%time three_input_text = tf.constant([ # This is my life. 'يا راس هرم الفساد ف مصر معلش انت مش عارف تشغلهم', # Are they still home? 'برجل ولا يوصف بك كرجل ولا من اتباع الرجال انت أنت حثالة ومن أتباع الحثالة يا خسيس', # Try to find out.' 'كسمك يا بكاري يا جساما يا ابن المنايك', 'فين يا ابن القحبه يا معرص السودان مش هيعملوا اَي شي من مخططكم هاهاهاهاها و لسه يا خونه', 'يا حقير يا واطي يا داعر يا روكي لوليتش قاعد يلعب مصارعة 5 كورة يضرب فيها وما ياخذ عليها كرت ولا حتى فاول !!!!!!!!!!!!!!!!!!!!!' ]) result = translator.tf_translate(three_input_text) for tr in result['text']: print(tr.numpy().decode()) print() # + colab={"base_uri": "https://localhost:8080/", "height": 35} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1628625129728, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="9bwmT-1gvL_1" outputId="6b11bf75-bef0-4265-9a44-4ab1cb16718e" result['text'][1].numpy().decode() # + colab={"base_uri": "https://localhost:8080/", "height": 625} executionInfo={"elapsed": 10, "status": "ok", "timestamp": 1628625129729, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="-76ImO2FvZtL" outputId="1d1845da-4076-4de6-c874-bf4ba79e2052" i = 0 plot_attention(result['attention'][i], three_input_text[i], result['text'][i]) # + colab={"base_uri": "https://localhost:8080/", "height": 536} executionInfo={"elapsed": 1026, "status": "ok", "timestamp": 1628625130746, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="Zc9CaXtovzPy" outputId="b7c147ac-9823-4241-9c24-b42c622c482d" i = 1 plot_attention(result['attention'][i], three_input_text[i], result['text'][i]) # + colab={"base_uri": "https://localhost:8080/", "height": 584} executionInfo={"elapsed": 19, "status": "ok", "timestamp": 1628625130747, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="SfeB3WrWv2P8" outputId="2e4c5931-ce86-4076-aeb0-1758a65a4071" i = 2 plot_attention(result['attention'][i], three_input_text[i], result['text'][i]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 19, "status": "ok", "timestamp": 1628625130748, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="rOuVZASrv7KK" outputId="5e72ceed-23d9-4d31-9aa6-9028f0d51bce" long_input_text = tf.constant([inp[-1]]) import textwrap print('Expected output:\n', '\n'.join(textwrap.wrap(targ[-1]))) # + colab={"base_uri": "https://localhost:8080/", "height": 424} executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1628625130748, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="hHfoKvcHv9uH" outputId="0d939a69-b18d-4f77-cf68-ab35469925e2" result = translator.tf_translate(long_input_text) i = 0 plot_attention(result['attention'][i], long_input_text[i], result['text'][i]) _ = plt.suptitle('This never works') # + [markdown] id="tMy6QVQY2Ui8" # # Evalution Training (BLUE SCORE) # + executionInfo={"elapsed": 11, "status": "ok", "timestamp": 1628625130749, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="0xk6nVZl2iLq" # from nltk.translate.bleu_score import corpus_bleu # predictedTraining = [] # # for tr in range(len(inp)): # for i in range(1593): # input_text = tf.constant(inp[:1592]) # # test_input_text = tf.constant(inp_test[:5]) # # result = translator.tf_translate(input_text) # result = translator.tf_translate(input_text[:1592]) # result= result['text'][i].numpy().decode() # predicted_train_list = list(result.split("\n")) # predictedTraining.append(predicted_train_list) # bleu_dic = {} # bleu_dic['1-grams'] = corpus_bleu(targ, predictedTraining, weights=(1.0, 0, 0, 0)) # bleu_dic['1-2-grams'] = corpus_bleu(targ, predictedTraining, weights=(0.5, 0.5, 0, 0)) # bleu_dic['1-3-grams'] = corpus_bleu(targ, predictedTraining, weights=(0.3, 0.3, 0.3, 0)) # bleu_dic['1-4-grams'] = corpus_bleu(targ, predictedTraining, weights=(0.25, 0.25, 0.25, 0.25)) # res = "\n\n\n".join("Input: {} \nActual: {} \nPredicted: {}".format(x, y,z) for x, y, z in zip(inp, targ, predictedTraining)) # print(" \n-------------\n BLUE SCORE : \n-------------\n ",bleu_dic, "\n\n\n-------------\n") # print(res , "\n\n\n-------------\n") # + [markdown] id="lYZeDN7Etjwz" # # Evaluation (Blue score) # + [markdown] id="tiUd3P4h-zk2" # # # * Load test data # * Translate each setance # * Append translates setances to array (new columne) # * For loop # * Calculate corpus_bleu for pred and actual # # # # # + executionInfo={"elapsed": 405, "status": "ok", "timestamp": 1628625131144, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="SBd9kqMe8PSX" test_file = pathlib.Path('/content/drive/MyDrive/MasterThesis/paraphrasing/paraphrasingTest.tsv') targ_test, inp_test = load_data(test_file) test_dataset = tf.data.Dataset.from_tensor_slices((inp_test, targ_test)).shuffle(BUFFER_SIZE) test_dataset = test_dataset.batch(BATCH_SIZE) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1628625131144, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="qHQSmsvSJFW-" outputId="54b71fff-8be2-41d4-eb76-ee22cd99b94c" for example_input_batch, example_target_batch in test_dataset.take(1): for i in range(5): print(example_input_batch.numpy()[i].decode('utf-8')) # print() print(example_target_batch.numpy()[i].decode('utf-8')) print() break # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1628625131145, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="sLJpJfixLF5O" outputId="0aa72d9e-077c-4d5a-f4f5-d5453d26d3de" print(inp_test[2:3]) # + id="7MRorIooKEQM" test_input_text = tf.constant(inp_test[:402]) test_result = translator.tf_translate(test_input_text) for tr in test_result['text']: print(tr.numpy().decode()) # list(tr.split("\n")) print() # + [markdown] id="vC7tNSuHQuCv" # بدي احطهم بفنكشن واشوف شو النتائج # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 493016, "status": "ok", "timestamp": 1628627682493, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="zkhYrEApI_IW" outputId="db04cd86-808f-4771-e49b-96704a19364c" from nltk.translate.bleu_score import corpus_bleu from nltk.translate.bleu_score import sentence_bleu def bleu_score(): predicted = [] for tr in range(len(inp_test)): # for i in range(5): test_input_text = tf.constant(inp_test) # test_input_text = tf.constant(inp_test[:5]) test_result = translator.tf_translate(test_input_text) # test_result = translator.tf_translate(test_input_text[:5]) test_result= test_result['text'][tr].numpy().decode() predicted_list = list(test_result.split("\n")) predicted.append(predicted_list) score = sentence_bleu(targ_test[tr], predicted[tr], weights=(1, 0, 0, 0)) print("blue score : ",score) bleu_dic = {} # bleu_dic['1-grams'] = corpus_bleu(targ_test[:5], predicted, weights=(1.0, 0, 0, 0)) # bleu_dic['1-2-grams'] = corpus_bleu(targ_test[:5], predicted, weights=(0.5, 0.5, 0, 0)) # bleu_dic['1-3-grams'] = corpus_bleu(targ_test[:5], predicted, weights=(0.3, 0.3, 0.3, 0)) # bleu_dic['1-4-grams'] = corpus_bleu(targ_test[:5], predicted, weights=(0.25, 0.25, 0.25, 0.25)) bleu_dic['1-grams'] = corpus_bleu(targ_test, predicted, weights=(1.0, 0, 0, 0)) bleu_dic['1-2-grams'] = corpus_bleu(targ_test, predicted, weights=(0.5, 0.5, 0, 0)) bleu_dic['1-3-grams'] = corpus_bleu(targ_test, predicted, weights=(0.3, 0.3, 0.3, 0)) bleu_dic['1-4-grams'] = corpus_bleu(targ_test, predicted, weights=(0.25, 0.25, 0.25, 0.25)) res = "\n\n\n".join("Input: {} \nActual: {} \nPredicted: {}".format(x, y,z) for x, y, z in zip(inp_test, targ_test, predicted)) print(" \n-------------\n BLUE SCORE : \n-------------\n ",bleu_dic, "\n\n\n-------------\n") print(res , "\n\n\n-------------\n") return bleu_dic bleu_test = bleu_score() bleu_test # + colab={"base_uri": "https://localhost:8080/", "height": 281} executionInfo={"elapsed": 453, "status": "ok", "timestamp": 1628626878842, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="0fLImL-jt2zQ" outputId="3eed27ce-607c-4356-c216-611e46d6d277" plt.bar(x = bleu_test.keys(), height = bleu_test.values()) plt.title("BLEU Score with the test set") plt.ylim((0,1)) plt.show() # + executionInfo={"elapsed": 295, "status": "ok", "timestamp": 1628626887904, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="DEpYB664t16d" # plt.bar(x = bleu_train.keys(), height = bleu_train.values()) # plt.title("BLEU Score with the training set") # plt.ylim((0,1)) # plt.show() # + executionInfo={"elapsed": 3, "status": "ok", "timestamp": 1628626889109, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgP0GyFxHH-3NAPSZ_ethp4uMqaN8pUWhOxcO0NDw=s64", "userId": "03587359084159229589"}, "user_tz": -180} id="30HhUSSa2EoD"
Experiments/HateSpeechMaskingModels/otherExperiments/tensorflow/MT - Paraphrasing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # An on-line movie recommending service using Spark & Flask - Building the recommender # This notebook explains how to use the [MovieLens dataset](http://grouplens.org/datasets/movielens/) to build a movie recommender using [collaborative filtering](https://en.wikipedia.org/wiki/Recommender_system#Collaborative_filtering) with [Spark's Alternating Least Saqures](https://spark.apache.org/docs/latest/mllib-collaborative-filtering.html) implementation. It is organised in two parts. The first one is about getting and parsing movies and ratings data into Spark RDDs. The second is about building and using the recommender and persisting it for later use in our on-line recommender system. # This tutorial can be used independently to build a movie recommender model based on the MovieLens dataset. Most of the code in this first part, about how to use ALS with the public MovieLens dataset, comes from my solution to one of the exercises proposed in the [CS100.1x Introduction to Big Data with Apache Spark by <NAME> on edX](https://www.edx.org/course/introduction-big-data-apache-spark-uc-berkeleyx-cs100-1x), that is also [**publicly available since 2014 at Spark Summit**](https://databricks-training.s3.amazonaws.com/movie-recommendation-with-mllib.html) . There I've added with minor modifications to use a larger dataset and also code about how to store and reload the model for later use. # ## Getting and processing the data # In order to build an on-line movie recommender using Spark, we need to have our model data as preprocessed as possible. Parsing the dataset and building the model everytime a new recommendation needs to be done is not the best of the strategies. # The list of task we can pre-compute includes: # # - Loading and parsing the dataset. Persisting the resulting RDD for later use. # - Building the recommender model using the complete dataset. Persist the dataset for later use. # # This notebook explains the first of these tasks. # ### File download # GroupLens Research has collected and made available rating data sets from the [MovieLens web site](http://movielens.org). The data sets were collected over various periods of time, depending on the size of the set. They can be found [here](http://grouplens.org/datasets/movielens/). # In our case, we will use the latest datasets: # # - Small: 100,000 ratings and 2,488 tag applications applied to 8,570 movies by 706 users. Last updated 4/2015. # - Full: 21,000,000 ratings and 470,000 tag applications applied to 27,000 movies by 230,000 users. Last updated 4/2015. # complete_dataset_url = 'http://files.grouplens.org/datasets/movielens/ml-latest.zip' small_dataset_url = 'http://files.grouplens.org/datasets/movielens/ml-latest-small.zip' # We also need to define download locations. # + import os datasets_path = os.path.join('..', 'datasets') complete_dataset_path = os.path.join(datasets_path, 'ml-latest.zip') small_dataset_path = os.path.join(datasets_path, 'ml-latest-small.zip') # - # Now we can proceed with both downloads. # + import urllib small_f = urllib.urlretrieve (small_dataset_url, small_dataset_path) complete_f = urllib.urlretrieve (complete_dataset_url, complete_dataset_path) # - # Both of them are zip files containing a folder with ratings, movies, etc. We need to extract them into its individual folders so we can use each file later on. # + import zipfile with zipfile.ZipFile(small_dataset_path, "r") as z: z.extractall(datasets_path) with zipfile.ZipFile(complete_dataset_path, "r") as z: z.extractall(datasets_path) # - # ### Loading and parsing datasets # No we are ready to read in each of the files and create an RDD consisting of parsed lines. # Each line in the ratings dataset (`ratings.csv`) is formatted as: # # `userId,movieId,rating,timestamp` # # Each line in the movies (`movies.csv`) dataset is formatted as: # # `movieId,title,genres` # # Were *genres* has the format: # # `Genre1|Genre2|Genre3...` # # The tags file (`tags.csv`) has the format: # # `userId,movieId,tag,timestamp` # # And finally, the `links.csv` file has the format: # # `movieId,imdbId,tmdbId` # The format of these files is uniform and simple, so we can use Python [`split()`](https://docs.python.org/2/library/stdtypes.html#str.split) to parse their lines once they are loaded into RDDs. Parsing the movies and ratings files yields two RDDs: # # * For each line in the ratings dataset, we create a tuple of `(UserID, MovieID, Rating)`. We drop the *timestamp* because we do not need it for this recommender. # * For each line in the movies dataset, we create a tuple of `(MovieID, Title)`. We drop the *genres* because we do not use them for this recommender. # So let's load the raw ratings data. We need to filter out the header, included in each file. # + small_ratings_file = os.path.join(datasets_path, 'ml-latest-small', 'ratings.csv') small_ratings_raw_data = sc.textFile(small_ratings_file) small_ratings_raw_data_header = small_ratings_raw_data.take(1)[0] # - # Now we can parse the raw data into a new RDD. small_ratings_data = small_ratings_raw_data.filter(lambda line: line!=small_ratings_raw_data_header)\ .map(lambda line: line.split(",")).map(lambda tokens: (tokens[0],tokens[1],tokens[2])).cache() # For illustrative purposes, we can take the first few lines of our RDD to see the result. In the final script we don't call any Spark action (e.g. `take`) until needed, since they trigger actual computations in the cluster. small_ratings_data.take(3) # We proceed in a similar way with the `movies.csv` file. # + small_movies_file = os.path.join(datasets_path, 'ml-latest-small', 'movies.csv') small_movies_raw_data = sc.textFile(small_movies_file) small_movies_raw_data_header = small_movies_raw_data.take(1)[0] small_movies_data = small_movies_raw_data.filter(lambda line: line!=small_movies_raw_data_header)\ .map(lambda line: line.split(",")).map(lambda tokens: (tokens[0],tokens[1])).cache() small_movies_data.take(3) # - # The following sections introduce *Collaborative Filtering* and explain how to use *Spark MLlib* to build a recommender model. We will close the tutorial by explaining how a model such this is used to make recommendations, and how to persist it for later use (e.g. in our Python/flask web-service). # ## Collaborative Filtering # In Collaborative filtering we make predictions (filtering) about the interests of a user by collecting preferences or taste information from many users (collaborating). The underlying assumption is that if a user A has the same opinion as a user B on an issue, A is more likely to have B's opinion on a different issue x than to have the opinion on x of a user chosen randomly. # The image below (from [Wikipedia](https://en.wikipedia.org/?title=Collaborative_filtering)) shows an example of collaborative filtering. At first, people rate different items (like videos, images, games). Then, the system makes predictions about a user's rating for an item not rated yet. The new predictions are built upon the existing ratings of other users with similar ratings with the active user. In the image, the system predicts that the user will not like the video. # ![collaborative filtering](https://upload.wikimedia.org/wikipedia/commons/5/52/Collaborative_filtering.gif) # Spark MLlib library for Machine Learning provides a [Collaborative Filtering](https://spark.apache.org/docs/latest/mllib-collaborative-filtering.html) implementation by using [Alternating Least Squares](http://dl.acm.org/citation.cfm?id=1608614). The implementation in MLlib has the following parameters: # # - numBlocks is the number of blocks used to parallelize computation (set to -1 to auto-configure). # - rank is the number of latent factors in the model. # - iterations is the number of iterations to run. # - lambda specifies the regularization parameter in ALS. # - implicitPrefs specifies whether to use the explicit feedback ALS variant or one adapted for implicit feedback data. # - alpha is a parameter applicable to the implicit feedback variant of ALS that governs the baseline confidence in preference observations. # # ## Selecting ALS parameters using the small dataset # In order to determine the best ALS parameters, we will use the small dataset. We need first to split it into train, validation, and test datasets. training_RDD, validation_RDD, test_RDD = small_ratings_data.randomSplit([6, 2, 2], seed=0L) validation_for_predict_RDD = validation_RDD.map(lambda x: (x[0], x[1])) test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1])) # Now we can proceed with the training phase. # + from pyspark.mllib.recommendation import ALS import math seed = 5L iterations = 10 regularization_parameter = 0.1 ranks = [4, 8, 12] errors = [0, 0, 0] err = 0 tolerance = 0.02 min_error = float('inf') best_rank = -1 best_iteration = -1 for rank in ranks: model = ALS.train(training_RDD, rank, seed=seed, iterations=iterations, lambda_=regularization_parameter) predictions = model.predictAll(validation_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2])) rates_and_preds = validation_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions) error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean()) errors[err] = error err += 1 print 'For rank %s the RMSE is %s' % (rank, error) if error < min_error: min_error = error best_rank = rank print 'The best model was trained with rank %s' % best_rank # - # But let's explain this a little bit. First, let's have a look at how our predictions look. predictions.take(3) # Basically we have the UserID, the MovieID, and the Rating, as we have in our ratings dataset. In this case the predictions third element, the rating for that movie and user, is the predicted by our ALS model. # Then we join these with our validation data (the one that includes ratings) and the result looks as follows: rates_and_preds.take(3) # To that, we apply a squared difference and the we use the `mean()` action to get the MSE and apply `sqrt`. # Finally we test the selected model. # + model = ALS.train(training_RDD, best_rank, seed=seed, iterations=iterations, lambda_=regularization_parameter) predictions = model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2])) rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions) error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean()) print 'For testing data the RMSE is %s' % (error) # - # ## Using the complete dataset to build the final model # In order to build our recommender model, we will use the complete dataset. Therefore, we need to process it the same way we did with the small dataset. # + # Load the complete dataset file complete_ratings_file = os.path.join(datasets_path, 'ml-latest', 'ratings.csv') complete_ratings_raw_data = sc.textFile(complete_ratings_file) complete_ratings_raw_data_header = complete_ratings_raw_data.take(1)[0] # Parse complete_ratings_data = complete_ratings_raw_data.filter(lambda line: line!=complete_ratings_raw_data_header)\ .map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),int(tokens[1]),float(tokens[2]))).cache() print "There are %s recommendations in the complete dataset" % (complete_ratings_data.count()) # - # Now we are ready to train the recommender model. # + training_RDD, test_RDD = complete_ratings_data.randomSplit([7, 3], seed=0L) complete_model = ALS.train(training_RDD, best_rank, seed=seed, iterations=iterations, lambda_=regularization_parameter) # - # Now we test on our testing set. # + test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1])) predictions = complete_model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2])) rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions) error = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean()) print 'For testing data the RMSE is %s' % (error) # - # We can see how we got a more accurate recommender when using a much larger dataset. # ## How to make recommendations # Although we aim at building an on-line movie recommender, now that we know how to have our recommender model ready, we can give it a try providing some movie recommendations. This will help us coiding the recommending engine later on when building the web service, and will explain how to use the model in any other circumstances. # When using collaborative filtering, getting recommendations is not as simple as predicting for the new entries using a previously generated model. Instead, we need to train again the model but including the new user preferences in order to compare them with other users in the dataset. That is, the recommender needs to be trained every time we have new user ratings (although a single model can be used by multiple users of course!). This makes the process expensive, and it is one of the reasons why scalability is a problem (and Spark a solution!). Once we have our model trained, we can reuse it to obtain top recomendations for a given user or an individual rating for a particular movie. These are less costly operations than training the model itself. # So let's first load the movies complete file for later use. # + complete_movies_file = os.path.join(datasets_path, 'ml-latest', 'movies.csv') complete_movies_raw_data = sc.textFile(complete_movies_file) complete_movies_raw_data_header = complete_movies_raw_data.take(1)[0] # Parse complete_movies_data = complete_movies_raw_data.filter(lambda line: line!=complete_movies_raw_data_header)\ .map(lambda line: line.split(",")).map(lambda tokens: (int(tokens[0]),tokens[1],tokens[2])).cache() complete_movies_titles = complete_movies_data.map(lambda x: (int(x[0]),x[1])) print "There are %s movies in the complete dataset" % (complete_movies_titles.count()) # - # Another thing we want to do, is give recommendations of movies with a certain minimum number of ratings. For that, we need to count the number of ratings per movie. # + def get_counts_and_averages(ID_and_ratings_tuple): nratings = len(ID_and_ratings_tuple[1]) return ID_and_ratings_tuple[0], (nratings, float(sum(x for x in ID_and_ratings_tuple[1]))/nratings) movie_ID_with_ratings_RDD = (complete_ratings_data.map(lambda x: (x[1], x[2])).groupByKey()) movie_ID_with_avg_ratings_RDD = movie_ID_with_ratings_RDD.map(get_counts_and_averages) movie_rating_counts_RDD = movie_ID_with_avg_ratings_RDD.map(lambda x: (x[0], x[1][0])) # - # ### Adding new user ratings # Now we need to rate some movies for the new user. We will put them in a new RDD and we will use the user ID 0, that is not assigned in the MovieLens dataset. Check the [dataset](http://grouplens.org/datasets/movielens/) movies file for ID to Tittle assignment (so you know what movies are you actually rating). # + new_user_ID = 0 # The format of each line is (userID, movieID, rating) new_user_ratings = [ (0,260,9), # Star Wars (1977) (0,1,8), # Toy Story (1995) (0,16,7), # Casino (1995) (0,25,8), # Leaving Las Vegas (1995) (0,32,9), # Twelve Monkeys (a.k.a. 12 Monkeys) (1995) (0,335,4), # Flintstones, The (1994) (0,379,3), # Timecop (1994) (0,296,7), # Pulp Fiction (1994) (0,858,10) , # Godfather, The (1972) (0,50,8) # Usual Suspects, The (1995) ] new_user_ratings_RDD = sc.parallelize(new_user_ratings) print 'New user ratings: %s' % new_user_ratings_RDD.take(10) # - # Now we add them to the data we will use to train our recommender model. We use Spark's `union()` transformation for this. complete_data_with_new_ratings_RDD = complete_ratings_data.union(new_user_ratings_RDD) # And finally we train the ALS model using all the parameters we selected before (when using the small dataset). # + from time import time t0 = time() new_ratings_model = ALS.train(complete_data_with_new_ratings_RDD, best_rank, seed=seed, iterations=iterations, lambda_=regularization_parameter) tt = time() - t0 print "New model trained in %s seconds" % round(tt,3) # - # It took some time. We will need to repeat that every time a user add new ratings. Ideally we will do this in batches, and not for every single rating that comes into the system for every user. # ### Getting top recommendations # Let's now get some recommendations! For that we will get an RDD with all the movies the new user hasn't rated yet. We will them together with the model to predict ratings. # + new_user_ratings_ids = map(lambda x: x[1], new_user_ratings) # get just movie IDs # keep just those not on the ID list (thanks <NAME> for spotting the error!) new_user_unrated_movies_RDD = (complete_movies_data.filter(lambda x: x[0] not in new_user_ratings_ids).map(lambda x: (new_user_ID, x[0]))) # Use the input RDD, new_user_unrated_movies_RDD, with new_ratings_model.predictAll() to predict new ratings for the movies new_user_recommendations_RDD = new_ratings_model.predictAll(new_user_unrated_movies_RDD) # - # We have our recommendations ready. Now we can print out the 25 movies with the highest predicted ratings. And join them with the movies RDD to get the titles, and ratings count in order to get movies with a minimum number of counts. First we will do the join and see what does the result looks like. # Transform new_user_recommendations_RDD into pairs of the form (Movie ID, Predicted Rating) new_user_recommendations_rating_RDD = new_user_recommendations_RDD.map(lambda x: (x.product, x.rating)) new_user_recommendations_rating_title_and_count_RDD = \ new_user_recommendations_rating_RDD.join(complete_movies_titles).join(movie_rating_counts_RDD) new_user_recommendations_rating_title_and_count_RDD.take(3) # So we need to flat this down a bit in order to have `(Title, Rating, Ratings Count)`. new_user_recommendations_rating_title_and_count_RDD = \ new_user_recommendations_rating_title_and_count_RDD.map(lambda r: (r[1][0][1], r[1][0][0], r[1][1])) # Finally, get the highest rated recommendations for the new user, filtering out movies with less than 25 ratings. # + top_movies = new_user_recommendations_rating_title_and_count_RDD.filter(lambda r: r[2]>=25).takeOrdered(25, key=lambda x: -x[1]) print ('TOP recommended movies (with more than 25 reviews):\n%s' % '\n'.join(map(str, top_movies))) # - # ### Getting individual ratings # Another useful usecase is getting the predicted rating for a particular movie for a given user. The process is similar to the previous retreival of top recommendations but, instead of using `predcitAll` with every single movie the user hasn't rated yet, we will just pass the method a single entry with the movie we want to predict the rating for. my_movie = sc.parallelize([(0, 500)]) # Quiz Show (1994) individual_movie_rating_RDD = new_ratings_model.predictAll(new_user_unrated_movies_RDD) individual_movie_rating_RDD.take(1) # Not very likely that the new user will like that one... Obviously we can include as many movies as we need in that list! # ## Persisting the model # Optionally, we might want to persist the base model for later use in our on-line recommendations. Although a new model is generated everytime we have new user ratings, it might be worth it to store the current one, in order to save time when starting up the server, etc. We might also save time if we persist some of the RDDs we have generated, specially those that took longer to process. For example, the following lines save and load a ALS model. # + from pyspark.mllib.recommendation import MatrixFactorizationModel model_path = os.path.join('..', 'models', 'movie_lens_als') # Save and load model model.save(sc, model_path) same_model = MatrixFactorizationModel.load(sc, model_path) # - # Among other things, you will see in your filesystem that there are folder with product and user data into [Parquet](https://parquet.apache.org/) format files. # ## Genre and other fields # We havent used the `genre` and `timestamp` fields in order to simplify the transformations and the whole tutorial. Incorporating them doesn't reprensent any problem. A good use could be filtering recommendations by any of them (e.g. recommendations by genre, or recent recommendations) like we have done with the minimum number of ratings.
movie/server/notebooks/building-recommender.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: MindSpore # language: python # name: mindspore # --- # # 加载文本数据集 # # `Ascend` `GPU` `CPU` `数据准备` # # [![在线运行](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_modelarts.png)](https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9taW5kc3BvcmUtd2Vic2l0ZS5vYnMuY24tbm9ydGgtNC5teWh1YXdlaWNsb3VkLmNvbS9ub3RlYm9vay9tYXN0ZXIvcHJvZ3JhbW1pbmdfZ3VpZGUvemhfY24vbWluZHNwb3JlX2xvYWRfZGF0YXNldF90ZXh0LmlweW5i&imageid=65f636a0-56cf-49df-b941-7d2a07ba8c8c)&emsp; # [![下载Notebook](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_notebook.png)](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/zh_cn/mindspore_load_dataset_text.ipynb)&emsp; # [![下载样例代码](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_download_code.png)](https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/master/programming_guide/zh_cn/mindspore_load_dataset_text.py)&emsp; # [![查看源文件](https://gitee.com/mindspore/docs/raw/master/resource/_static/logo_source.png)](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/programming_guide/source_zh_cn/load_dataset_text.ipynb) # ## 概述 # # MindSpore提供的`mindspore.dataset`模块可以帮助用户构建数据集对象,分批次地读取文本数据。同时,在各个数据集类中还内置了数据处理和数据分词算子,使得数据在训练过程中能够像经过pipeline管道的水一样源源不断地流向训练系统,提升数据训练效果。 # # 此外,MindSpore还支持分布式场景数据加载,用户可以在加载数据集时指定分片数目,具体用法参见[数据并行模式加载数据集](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/distributed_training_ascend.html#id6)。 # # 下面,本教程将简要演示如何使用MindSpore加载和处理文本数据。 # ## 准备环节 # 1. 准备文本数据,内容如下: # # ```text # Welcome to Beijing # 北京欢迎您! # 我喜欢English! # ``` # 2. 创建`tokenizer.txt`文件并复制文本数据到该文件中,将该文件存放在`./datasets`路径下。执行如下代码完成本步骤。 # + import os if not os.path.exists('./datasets'): os.mkdir('./datasets') file_handle = open('./datasets/tokenizer.txt', mode='w') file_handle.write('Welcome to Beijing \n北京欢迎您! \n我喜欢English! \n') file_handle.close() # ! tree ./datasets # - # 3. 导入`mindspore.dataset`和`mindspore.dataset.text`模块。 import mindspore.dataset as ds import mindspore.dataset.text as text # ## 加载数据集 # MindSpore目前支持加载文本领域常用的经典数据集和多种数据存储格式下的数据集,用户也可以通过构建自定义数据集类实现自定义方式的数据加载。各种数据集的详细加载方法,可参考编程指南中[数据集加载](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/dataset_loading.html)章节。 # # 下面演示使用`MindSpore.dataset`模块中的`TextFileDataset`类加载数据集。 # # 1. 配置数据集目录,创建数据集对象。 DATA_FILE = './datasets/tokenizer.txt' dataset = ds.TextFileDataset(DATA_FILE, shuffle=False) # 2. 创建字典迭代器,通过迭代器获取数据,可以获得分词前的数据。 for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) # ## 数据处理 # MindSpore目前支持的数据处理算子及其详细使用方法,可参考编程指南中[数据处理](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/pipeline.html)章节。 # # 下面演示构建pipeline,对文本数据集进行混洗和文本替换操作。 # # 1. 对数据集进行混洗。 # + ds.config.set_seed(58) dataset = dataset.shuffle(buffer_size=3) for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) # - # 2. 对数据集进行文本替换。 # + replace_op1 = text.RegexReplace("Beijing", "Shanghai") replace_op2 = text.RegexReplace("北京", "上海") dataset = dataset.map(operations=replace_op1) dataset = dataset.map(operations=replace_op2) for data in dataset.create_dict_iterator(output_numpy=True): print(text.to_str(data['text'])) # - # ## 数据分词 # MindSpore目前支持的数据分词算子及其详细使用方法,可参考编程指南中[分词器](https://www.mindspore.cn/docs/programming_guide/zh-CN/master/tokenizer.html)章节。 # # 下面演示使用`WhitespaceTokenizer`分词器来分词,该分词是按照空格来进行分词。 # 1. 创建`tokenizer`。 tokenizer = text.WhitespaceTokenizer() # 2. 执行操作`tokenizer`。 dataset = dataset.map(operations=tokenizer) # 3. 创建字典迭代器,通过迭代器获取数据。 for data in dataset.create_dict_iterator(num_epochs=1, output_numpy=True): print(text.to_str(data['text']).tolist())
docs/mindspore/programming_guide/source_zh_cn/load_dataset_text.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pyforest - Import all Python Data Science Libraries pip install Pyforest df = pd.read_csv('http://winterolympicsmedals.com/medals.csv') df.head() active_imports() # It imports only those libraries that are in use # + lst1 = [1,2,3,4,5] lst2 = [6,7,8,9,10] plt.plot(lst1,lst2) plt.xlabel("X-axis") plt.ylabel("Y-axis") plt.show() # - np.array([1,2,3,4,5]) active_imports() df1= pd.read_csv("C:\\Users\\mrsid\\Desktop\\30 days of ML challenge\\NumPy and Pandas\\mercedesbenz.csv") df1.head() sns.distplot(df1['y']) active_imports()
_notebooks/2021-04-22-Pyforest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Softmax, part 1 # # Task: practice using the `softmax` function. # # **Why**: The softmax is a building block that is used throughout machine learning, statistics, data modeling, and even statistical physics. This activity is designed to get comfortable with how it works at a high and low level. # # **Note**: Although "softmax" is the conventional name in machine learning, you may also see it called "soft *arg* max". The [Wikipedia article](https://en.wikipedia.org/w/index.php?title=Softmax_function&oldid=1065998663) has a good explanation. # ## Setup import torch from torch import tensor import ipywidgets as widgets import matplotlib.pyplot as plt # %matplotlib inline # ## Task # The following function defines `softmax` by using PyTorch built-in functionality. def softmax_torch(x): return torch.softmax(x, axis=0) # Let's try it on an example tensor. x = tensor([1., 2., 3.]) softmax_torch(x) # 1. Start by playing with the interactive widget below. Describe the outputs when: # # 1. All of the inputs are the same. # 2. One input is much bigger than the others. # 3. One input is much smaller than the others. # # Finally, describe the input that gives the largest possible value for output 1. r = 2.0 @widgets.interact(x0=(-r, r), x1=(-r, r), x2=(-r, r)) def show_softmax(x0, x1, x2): x = tensor([x0, x1, x2]) xs = softmax_torch(x) plt.barh([2, 1, 0], xs) plt.xlim(0, 1) plt.yticks([2, 1, 0], ['output 0', 'output 1', 'output 2']) plt.ylabel("softmax(x)") return xs # 2. Fill in the following function to implement softmax yourself: def softmax(xx): # Exponentiate x so all numbers are positive. expos = xx.exp() assert expos.min() >= 0 # Normalize (divide by the sum). # return ... return expos / expos.sum() # 3. Evaluate `softmax(x)` and verify that it is close to the `softmax_torch(x)` you evaluated above. softmax(x) # 4. Evaluate `softmax_torch(__)` for each of the following expressions. Observe how each output relates to `softmax_torch(x)`. # # - `x + 1` # - `x - 100` # - `x - x.max()` # - `x * 0.5` # - `x * 3.0` # 5. *Numerical issues*. Assign `x2 = 50 * x`. Try `softmax(x2)` and observe that the result includes the dreaded `nan` -- "not a number". Something went wrong. **Evaluate the first mathematical operation in `softmax`** for this particularly problematic input. You should see another kind of abnormal value. x2 = 50 * x softmax(x2) # your code here x2.exp() # 6. *Fixing numerical issues*. Now try `softmax(x2 - 150.0)`. Observe that you now get valid numbers. Also observe how the constant we subtracted relates to the value of `x2`. # your code here softmax(x2 - 150) # 7. Copy your `softmax` implementation to a new function, `softmax_stable`, and change it so that it subtracts `xx.max()` before exponentiating. (Don't use any in-place operations.) Verify that `softmax_stable(x2)` now works, and obtains the same result as `softmax_torch(x2)`. # your code here def softmax_stable(xx): # Exponentiate x so all numbers are positive. expos = (xx - xx.max()).exp() assert expos.min() >= 0 # Normalize (divide by the sum). # return ... return expos / expos.sum() softmax_stable(x2) softmax_torch(x2) # ## Analysis # Consider the following situation: y2 = tensor([1., 0.,]) y3 = y2 - 1 y3 y4 = y2 * 2 y4 # 1. Are `softmax(y2)` and `softmax(y3)` the same or different? How could you tell without having to evaluate them? # # *your answer here* # 2. Are `softmax(y2)` and `softmax(y4)` the same or different? How could you tell without having to evaluate them? # # *your answer here* # 3. Explain why `softmax(x2)` failed. # *your answer here* # 4. Use your observations in \#1-2 above to explain why `softmax_stable` still gives the correct answer even though we changed the input. # *your answer here* # 5. Explain why `softmax_stable` doesn't give us infinity or Not A Number anymore. # *your answer here* # ## Extension *optional* # # Try to prove your observation in Analysis \#1 by symbolically simplifying the expression `softmax(logits + c)` and seeing if you can get `softmax(logits)`. Remember that `softmax(x) = exp(x) / exp(x).sum()` and `exp(a + b) = exp(a)exp(b)`. #
static/fundamentals/u5n1-softmax_soln.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Optimization # # <NAME> # > “If there occur some changes in nature, the amount of action necessary for this change must be as small as possible.” Maupertuis (sec XVIII). # # ## Definition # # Optimization is the process of finding the best value from possible alternatives with regards to a certain criteria [Wikipedia](http://en.wikipedia.org/wiki/Mathematical_optimization). # Usually, such best value is the value that maximizes or minimizes the criteria. In this context, to solve an optimization problem is to find the maximum or minimum of a function. To solve an optimization problem, we first have to model the problem and define the objective, the variables, and the constraints of the problem. In optimization, these terms are usually defined as: # # 1. Cost or objective function: a function describing what we want to optimize. # 2. Design variable(s): variables that will be manipulated to optimize the cost function. # 3. Constraint functions: constrain the solution to a set of possible values. # # Read more about that in [Introduction to Optimization](http://neos-guide.org/content/optimization-introduction) from the [NEOS Guide](http://neos-guide.org/). # # ## Maxima and minima # # In mathematics, the maximum and minimum of a function are the largest and smallest values that the function takes at a point either within a neighborhood (local) or on the function entire domain (global) ([Wikipedia](http://en.wikipedia.org/wiki/Maxima_and_minima)). For a function of one variable, if the maximum or minimum of a function is not at the limits of the domain and if at least the first and second derivatives of the function exist, a maximum and minimum can be found as the point where the first derivative of the function is zero. If the second derivative on that point is positive, then it's a minimum, if it is negative, it's a maximum. # # <div class='center-align'><figure><img src='./../images/maxmin.png' width=300 alt='minma and maxima of a function'/> <figcaption><center><i>Figure. Maxima and minima of a function of one variable.</i></center></figcaption> </figure></div> # ### Maximum range of projectile motion # # What is the optimal angle to throw a projectile to achieve the greatest distance? # # The equations of motion for a projectile launched with an angle $\theta$ with the horizontal (consider constant gravitational acceleration, $g$) and neglecting the air resistance are: # # $$ \begin{array}{l l} # x(t) = x_0 + v_0\cos(\theta)\:t \\ # y(t) = y_0 + v_0\sin(\theta)\:t - \frac{g\:t^2}{2} \\ # \\ # v_x(t) = v_0\cos(\theta) \\ # v_y(t) = v_0\sin(\theta) - g\:t \\ # \\ # a_x(t) = 0 \\ # a_y(t) = -g # \end{array} $$ # # **Time of flight** # The time of flight can be calculated from the equation for the vertical velocity and using the properties that at the maximum height, the vertical velocity is zero and the time of rising is equal to the time of falling: # # $$ t_{flight} = \frac{2v_0\sin(\theta)}{g} $$ # # **Range** # The range, $R$, is the horizontal distance reached by the projectile with respect to the point of release: # # $$ \begin{array}{l l} # R = v_0\cos(\theta)\:t_{flight} \\ # \\ # R = \frac{v_0^2\sin(2\theta)}{g} # \end{array} $$ # # The maximum range is obtained by finding the angle that maximizes the equation above. In the context of optimization, the equation for $R$ is the cost function, $\theta$ is the design variable, and the constraint is that feasible angles are in the interval $[0,\: 90^o]$. # The solution of this problem is trivial, we know by inspection that the maximum range is given when $\sin(2\theta)$ is maximum, and for that, $\theta=45^o$. But let's show the solution with the approach using derivatives: # # $$ \frac{\mathrm{d} R}{\mathrm{d} \theta} = \frac{2v_0^2\cos(2\theta)}{g} $$ # # Find the value of $\theta$ where this derivative is zero: # # $$ \frac{2v_0^2\cos(2\theta)}{g} = 0 \quad \Rightarrow \quad \theta=45^o $$ # # We can check this value corresponds to a maximum if the second derivative of $R$ at this value is negative: # # $$ \frac{\mathrm{d}^2 R}{\mathrm{d} \theta^2} = -\frac{4v_0^2\sin(2\theta)}{g} $$ # # Which indeed is negative at $\theta=45^o$. # # We can use Sympy to solve the steps of this problem. Let's import the necessary Python libraries and customize the environment for that: # + import numpy as np # %matplotlib inline import matplotlib import matplotlib.pyplot as plt matplotlib.rcParams['lines.linewidth'] = 3 matplotlib.rcParams['font.size'] = 13 matplotlib.rcParams['lines.markersize'] = 5 matplotlib.rc('axes', grid=False, labelsize=14, titlesize=16, ymargin=0.05) matplotlib.rc('legend', numpoints=1, fontsize=11) import sympy as sym from sympy.plotting import plot #from sympy.interactive import printing #printing.init_printing() from IPython.display import display from IPython.core.display import Math # - # Let's create symbolic variables, define the equation, differentiate it, and find the maximum value: v0, g, theta = sym.symbols('v0 g theta') R = v0**2*sym.sin(2*theta)/g Rdiff = sym.diff(R, theta) display(Math(sym.latex('R =') + sym.latex(R))) display(Math(sym.latex(r'\frac{\mathrm{d}R}{\mathrm{d}\theta} =') + sym.latex(Rdiff))) ang = sym.solve(Rdiff, theta) display(Math(sym.latex('Sols:') + sym.latex(ang))) display(Math(sym.latex(r'R(\theta=\pi/4)=') + sym.latex(sym.N(R.subs({v0: 10, g: 9.8, theta: sym.pi/4}), 4)))) plot(R.subs({v0: 10, g: 9.8}), (theta, 0, sym.pi/2), xlabel=r'$\theta(rad)$', ylabel=r'$R$'); # ### Maximum volume of a box # # We want to make a box from a square cardboard with side $a$ such that its volume should be maximum. What is the optimal distance where the square cardbox should be cut and folded to make a box with maximum volume? # # <div class='center-align'><figure><img src='./../images/box.png' width=400 alt='box optimization'/> <figcaption><center><i>Figure. A box to be made from a cardboard such that its volume should be maximum. Where we should cut?</i></center></figcaption> </figure></div> # # If the distance where to cut and fold the cardbox is $b$, see figure above, the volume of the box will be: # # $$ \begin{array}{l l} # V(b) = b(a-2b)(a-2b) \\ # \\ # V(b) = a^2b - 4ab^2 + 4b^3 # \end{array} $$ # # The expression for $V$ is the cost function, $b$ is the design variable, and the constraint is that feasible values of $b$ are in the interval $[0, a/2]$. # The first derivative of $V$ is: # # $$ \frac{\mathrm{d}V}{\mathrm{d}b} = a^2 - 8ab + 12b^2 $$ # # We have to find the values for $b$ where this derivative is zero. Let's use Sympy for that: a, b = sym.symbols('a b') V = b*(a - 2*b)*(a - 2*b) Vdiff = sym.expand(sym.diff(V, b)) roots = sym.solve(Vdiff, b) display(Math(sym.latex('V =') + sym.latex(V))) display(Math(sym.latex('\\frac{\mathrm{d}V}{\mathrm{d}b} =') + sym.latex(Vdiff))) display(Math(sym.latex('Roots:') + sym.latex(roots))) # Discarding the solution $b=a/2$ (where $V=0$), $b=a/6$ results in the maximum volume. We can check that by ploting the volume of the cardbox for $a=1$ and $b: [0,\:0.5]$: plot(V.subs({a: 1}), (b, 0, .5), xlabel='b', ylabel='V') display(Math(sym.latex('V_{a=1}^{max} =') + sym.latex(sym.N(V.subs({b: a/6}).subs({a: 1}), 2)))) # The examples above are trivial problems of optimization; there was only one design variable in each case and the cost functions, although nonlinear, were simple. When there is more than one design variable (the cost function depends on more than one variable), the general idea of finding minimum and maximum values where the derivatives are zero still holds, but the problem gets more complicated. For an introduction on that, particularly applied to biomechanics, see chapter 4 (start on page 609) of Nigg and Herzog (2006). # # Let's see now a classical problem in biomechanics where optimization is useful and there is more than one design variable. # ## The distribution problem in biomechanics # # Using the inverse dynamics approach in biomechanics, we can determine the net force and torque acting on a joint if we know the external forces on the segments and the kinematics and inertial properties of the segments. But with this approach we are unable to determine the individual muscles forces that created such torque, as expressed in the following equation: # # $$ M_{total} = M_1 + M_2 + \dots + M_n = r_1F_1 + r_2F_2 + \dots + r_nF_n $$ # # where $r_i$ is the moment arm of the force $F_i$ that generates a torque $M_i$, a parcel of the (known) total torque $M_{total}$. # # Even if we know the moment arm of each muscle (e.g., from cadaveric data or from image analysis), the equation above has $n$ unknowns. Because there is more than one muscle that potentially created such torque, there are more unknowns than equations, and the problem is undetermined. So, the problem is how to find how the torque is distributed among the muscles of that joint. # # One solution is to consider that we (biological systems) optimize our effort in order to minimize energy expenditure, stresses on our tissues, fatigue, etc. The principle of least action, stated in the opening of this text, is an allusion that optimization might be ubiquitous in nature. With this rationale, let's solve the distribution problem in biomechanics using optimization and find the minimum force of each muscle necessary to complete a given task. # # The following cost functions have been proposed to solve the distribution problem in biomechanics: # # $$ \begin{array}{l l} # \sum_{i=1}^N F_i \quad &\text{(e.g., Seireg and Arkivar, 1973)} # \\ # \sum_{i=1}^N F_i^2 \quad & # \\ # \sum_{i=1}^N \left(\frac{F_i}{pcsa_i}\right)^2 \quad &\text{(e.g., Crowninshield and Brand, 1981)} # \\ # \sum_{i=1}^N \left(\frac{F_i}{M_{max,i}}\right)^3 \quad &\text{(e.g., Herzog, 1987)} # \end{array} $$ # # Where $pcsa_i$ is the physiological cross-sectional area of muscle $i$ and $M_{max,i}$ is the maximum torque muscle $i$ can produce. # Each muscle force $F_i$ is a design variable and the following constraints must be satisfied: # # $$ \begin{array}{l l} # 0 \leq F_i \leq F_{max} # \\ # \sum_{i=1}^N r_i \times F_i = M # \end{array} $$ # # Let's apply this concept to solve a distribution problem in biomechanics. # # ### Muscle force estimation # # Consider the following main flexors of the elbow joint (see figure below): biceps long head, biceps short head, and brachialis. Suppose that the elbow net joint torque determined using inverse dynamics is 20 Nm (flexor). How much each of these muscles contributed to the net torque? # # <div class='center-align'><figure><img src='./../images/elbowflexors.png' alt='Elbow flexors'/> <figcaption><center><i>Figure. A view in OpenSim of the arm26 model showing three elbow flexors (Biceps long and short heads and Brachialis).</i></center></figcaption> </figure></div> # # For the optimization, we will need experimental data for the moment arm, maximum moment, and pcsa of each muscle. Let's import these data from the OpenSim arm26 model: # time elbow_flexion BIClong BICshort BRA r_ef = np.loadtxt('./../data/r_elbowflexors.mot', skiprows=7) f_ef = np.loadtxt('./../data/f_elbowflexors.mot', skiprows=7) # The maximum isometric force of these muscles are defined in the arm26 model as: Biceps long head: 624.3 N, Biceps short head: 435.56 N, and Brachialis: 987.26 N. Let's compute the mamimum torques that each muscle could produce considering a static situation at the different elbow flexion angles: m_ef = r_ef*1 m_ef[:, 2:] = r_ef[:, 2:]*f_ef[:, 2:] # And let's visualize these data: labels = ['Biceps long head', 'Biceps short head', 'Brachialis'] fig, ax = plt.subplots(nrows=1, ncols=3, sharex=True, figsize=(10, 4)) ax[0].plot(r_ef[:, 1], r_ef[:, 2:]) #ax[0].set_xlabel('Elbow angle $(\,^o)$') ax[0].set_title('Moment arm (m)') ax[1].plot(f_ef[:, 1], f_ef[:, 2:]) ax[1].set_xlabel('Elbow angle $(\,^o)$', fontsize=16) ax[1].set_title('Maximum force (N)') ax[2].plot(m_ef[:, 1], m_ef[:, 2:]) #ax[2].set_xlabel('Elbow angle $(\,^o)$') ax[2].set_title('Maximum torque (Nm)') ax[2].legend(labels, loc='best', framealpha=.5) ax[2].set_xlim(np.min(r_ef[:, 1]), np.max(r_ef[:, 1])) plt.tight_layout() plt.show() # These data don't have the pcsa value of each muscle. We will estimate the pcsa considering that the amount of maximum muscle force generated per area is constant and equal to $50N/cm^2$. Consequently, the pcsa (in $cm^2$) for each muscle is: a_ef = np.array([624.3, 435.56, 987.26])/50 # 50 N/cm2 # ### Static versus dynamic optimization # # We can solve the distribution problem separately for each angle (instant) of the elbow; we will refer to that as static optimization. However, there is no guarantee that when we analyze all these solutions across the range of angles, they will be the best solution overall. One reason is that static optimization ignores the time history of the muscle force. Dynamic optimization refers to the optimization over a period of time. For such, we will need to input a cost function spanning the entire period of time at once. Dynamic optimization usually has a higher computational cost than static optimization. # # For now, we will solve the present problem using static optimization. # # ### Solution of the optimization problem # # For the present optimization, we are dealing with a problem of minimization, multidimensional (function of several variables), nonlinear, constrained, and we can't assume that the cost function is convex. Numerical optimization is hardly a simple task. There are many different algorithms and public and commercial software for performing optimization. For instance, look at [NEOS Server](http://www.neos-server.org/neos/), a free internet-based service for solving numerical optimization problems. # We will solve the present problem using the [scipy.optimize](http://docs.scipy.org/doc/scipy/reference/optimize.html#module-scipy.optimize) package which provides several optimization algorithms. We will use the function `minimize`: # # ```python # scipy.optimize.minimize(fun, x0, args=(), method=None, jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, callback=None, options=None) # """Minimization of scalar function of one or more variables.""" # ``` # # Now, let's write Python functions for each cost function: from scipy.optimize import minimize # + def cf_f1(x): """Cost function: sum of forces.""" return x[0] + x[1] + x[2] def cf_f2(x): """Cost function: sum of forces squared.""" return x[0]**2 + x[1]**2 + x[2]**2 def cf_fpcsa2(x, a): """Cost function: sum of squared muscle stresses.""" return (x[0]/a[0])**2 + (x[1]/a[1])**2 + (x[2]/a[2])**2 def cf_fmmax3(x, m): """Cost function: sum of cubic forces normalized by moments.""" return (x[0]/m[0])**3 + (x[1]/m[1])**3 + (x[2]/m[2])**3 # - # Let's also define the Jacobian for each cost function (which is an optional parameter for the optimization): # + def cf_f1d(x): """Derivative of cost function: sum of forces.""" dfdx0 = 1 dfdx1 = 1 dfdx2 = 1 return np.array([dfdx0, dfdx1, dfdx2]) def cf_f2d(x): """Derivative of cost function: sum of forces squared.""" dfdx0 = 2*x[0] dfdx1 = 2*x[1] dfdx2 = 2*x[2] return np.array([dfdx0, dfdx1, dfdx2]) def cf_fpcsa2d(x, a): """Derivative of cost function: sum of squared muscle stresses.""" dfdx0 = 2*x[0]/a[0]**2 dfdx1 = 2*x[1]/a[1]**2 dfdx2 = 2*x[2]/a[2]**2 return np.array([dfdx0, dfdx1, dfdx2]) def cf_fmmax3d(x, m): """Derivative of cost function: sum of cubic forces normalized by moments.""" dfdx0 = 3*x[0]**2/m[0]**3 dfdx1 = 3*x[1]**2/m[1]**3 dfdx2 = 3*x[2]**2/m[2]**3 return np.array([dfdx0, dfdx1, dfdx2]) # - # Let's define initial values: M = 20 # desired torque at the elbow iang = 69 # which will give the closest value to 90 degrees r = r_ef[iang, 2:] f0 = f_ef[iang, 2:] a = a_ef m = m_ef[iang, 2:] x0 = f_ef[iang, 2:]/10 # far from the correct answer for the sum of torques print('M =', M) print('x0 =', x0) print('r * x0 =', np.sum(r*x0)) # Inequality constraints (such as boundaries in our problem) can be entered with the parameter `bounds` to the `minimize` function: bnds = ((0, f0[0]), (0, f0[1]), (0, f0[2])) # Equality constraints (such as the sum of torques should equals the desired torque in our problem), as well as inequality constraints, can be entered with the parameter `constraints` to the `minimize` function (and we can also opt to enter the Jacobian of these constraints): # use this in combination with the parameter bounds: cons = ({'type': 'eq', 'fun' : lambda x, r, f0, M: np.array([r[0]*x[0] + r[1]*x[1] + r[2]*x[2] - M]), 'jac' : lambda x, r, f0, M: np.array([r[0], r[1], r[2]]), 'args': (r, f0, M)}) # to enter everything as constraints: cons = ({'type': 'eq', 'fun' : lambda x, r, f0, M: np.array([r[0]*x[0] + r[1]*x[1] + r[2]*x[2] - M]), 'jac' : lambda x, r, f0, M: np.array([r[0], r[1], r[2]]), 'args': (r, f0, M)}, {'type': 'ineq', 'fun' : lambda x, r, f0, M: f0[0]-x[0], 'jac' : lambda x, r, f0, M: np.array([-1, 0, 0]), 'args': (r, f0, M)}, {'type': 'ineq', 'fun' : lambda x, r, f0, M: f0[1]-x[1], 'jac' : lambda x, r, f0, M: np.array([0, -1, 0]), 'args': (r, f0, M)}, {'type': 'ineq', 'fun' : lambda x, r, f0, M: f0[2]-x[2], 'jac' : lambda x, r, f0, M: np.array([0, 0, -1]), 'args': (r, f0, M)}, {'type': 'ineq', 'fun' : lambda x, r, f0, M: x[0], 'jac' : lambda x, r, f0, M: np.array([1, 0, 0]), 'args': (r, f0, M)}, {'type': 'ineq', 'fun' : lambda x, r, f0, M: x[1], 'jac' : lambda x, r, f0, M: np.array([0, 1, 0]), 'args': (r, f0, M)}, {'type': 'ineq', 'fun' : lambda x, r, f0, M: x[2], 'jac' : lambda x, r, f0, M: np.array([0, 0, 1]), 'args': (r, f0, M)}) # Although more verbose, if all the Jacobians of the constraints are also informed, this alternative seems better than informing bounds for the optimization process (less error in the final result and less iterations). # # Given the characteristics of the problem, if we use the function `minimize` we are limited to the SLSQP (Sequential Least SQuares Programming) solver. # # Finally, let's run the optimization for the four different cost functions and find the optimal muscle forces: f1r = minimize(fun=cf_f1, x0=x0, args=(), jac=cf_f1d, constraints=cons, method='SLSQP', options={'disp': True}) f2r = minimize(fun=cf_f2, x0=x0, args=(), jac=cf_f2d, constraints=cons, method='SLSQP', options={'disp': True}) fpcsa2r = minimize(fun=cf_fpcsa2, x0=x0, args=(a,), jac=cf_fpcsa2d, constraints=cons, method='SLSQP', options={'disp': True}) fmmax3r = minimize(fun=cf_fmmax3, x0=x0, args=(m,), jac=cf_fmmax3d, constraints=cons, method='SLSQP', options={'disp': True}) # Let's compare the results for the different cost functions: # + dat = np.vstack((np.around(r*100,1), np.around(a,1), np.around(f0,0), np.around(m,1))) opt = np.around(np.vstack((f1r.x, f2r.x, fpcsa2r.x, fmmax3r.x)),1) er = ['-', '-', '-', '-', np.sum(r*f1r.x)-M, np.sum(r*f2r.x)-M, np.sum(r*fpcsa2r.x)-M, np.sum(r*fmmax3r.x)-M] data = np.vstack((np.vstack((dat, opt)).T, er)).T from pandas import DataFrame rows = ['$\text{Moment arm}\;[cm]$', '$pcsa\;[cm^2]$', '$F_{max}\;[N]$', '$M_{max}\;[Nm]$', '$\sum F_i$', '$\sum F_i^2$', '$\sum(F_i/pcsa_i)^2$', '$\sum(F_i/M_{max,i})^3$'] cols = ['Biceps long head', 'Biceps short head', 'Brachialis', 'Error in M'] df = DataFrame(data, index=rows, columns=cols) print('\nComparison of different cost functions for solving the distribution problem') df # - # ## Comments # # The results show that the estimations for the muscle forces depend on the cost function used in the optimization. Which one is correct? This is a difficult question and it's dependent on the goal of the actual task being modeled. Glitsch and Baumann (1997) investigated the effect of different cost functions on the optimization of walking and running and the predicted muscles forces were compared with the electromyographic activity of the corresponding muscles of the lower limb. They found that, among the analyzed cost functions, the minimization of the sum of squared muscle stresses resulted in the best similarity with the actual electromyographic activity. # # In general, one should always test different algorithms and different initial values before settling for the solution found. Downey (2011), Kitchin (2013), and Kiusalaas (2013) present more examples on numerical optimization. The [NEOS Guide](http://neos-guide.org/) is a valuable source of information on this topic and [OpenOpt](http://openopt.org/) is a good alternative software for numerical optimization in Python. # ## Exercises # # 1. Regarding the distribution problem for the elbow muscles presented in this text: # a. Test different initial values for the optimization. # b. Test other values for the elbow angle where the results are likely to change. # # 2. In an experiment to estimate forces of the elbow flexors, through inverse dynamics it was found an elbow flexor moment of 10 Nm. Consider the following data for maximum force (F0), moment arm (r), and pcsa (A) of the brachialis, brachioradialis, and biceps brachii muscles: F0 (N): 1000, 250, 700; r (cm): 2, 5, 4; A (cm$^2$): 33, 8, 23, respectively (data from Robertson et al. (2013)). # a. Use static optimization to estimate the muscle forces. # b. Test the robustness of the results using different initial values for the muscle forces. # c. Compare the results for different cost functions. # ## References # # - Crowninshield RD, <NAME> (1981) [A physiologically based criterion of muscle force prediction in locomotion](http://www.ncbi.nlm.nih.gov/pubmed/7334039). Journal of Biomechanics, 14, 793–801. # - <NAME> (2011) [Physical Modeling in MATLAB](http://greenteapress.com/matlab/). Green Tea Press. # - <NAME> (1987) [Individual muscle force estimations using a non-linear optimal design](http://www.ncbi.nlm.nih.gov/pubmed/3682873). J Neurosci Methods, 21, 167-179. # - <NAME>, <NAME> (1997) [The three-dimensional determination of internal loads in the lower extremity](http://www.ncbi.nlm.nih.gov/pubmed/9456380). Journal of Biomechanics, 30, 1123–1131. # - <NAME> (2013) [pycse - Python Computations in Science and Engineering](http://kitchingroup.cheme.cmu.edu/pycse/). # - Kiusalaas (2013) [Numerical methods in engineering with Python 3](http://books.google.com.br/books?id=aJkXoxxoCoUC). 3rd edition. Cambridge University Press. # - <NAME> and <NAME> (2006) [Biomechanics of the Musculo-skeletal System](https://books.google.com.br/books?id=hOIeAQAAIAAJ&dq=editions:ISBN0470017678). 3rd Edition. Wiley. # - <NAME>, <NAME>, <NAME>, <NAME> (2013) [Research Methods in Biomechanics](http://books.google.com.br/books?id=gRn8AAAAQBAJ). 2nd Edition. Human Kinetics. # - <NAME>, <NAME> (1973) [A mathematical model for evaluation of forces in lower extremeties of the musculo-skeletal system](http://www.ncbi.nlm.nih.gov/pubmed/4706941). Journal of Biomechanics, 6, 313–322, IN19–IN20, 323–326.
notebooks/Optimization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + from __future__ import division import os, sys import urllib, cStringIO import pymongo as pm ## first establish ssh tunnel to server where database is running import base64 import numpy as np from numpy import * import PIL from PIL import Image import base64 import matplotlib from matplotlib import pylab, mlab, pyplot from IPython.core.pylabtools import figsize, getfigs plt = pyplot import seaborn as sns sns.set_context('poster') sns.set_style('white') from matplotlib.path import Path import matplotlib.patches as patches import pandas as pd from svgpathtools import parse_path, wsvg, svg2paths # - # ### setup # + # directory & file hierarchy proj_dir = os.path.abspath('../../..') analysis_dir = os.getcwd() results_dir = os.path.join(proj_dir,'results') plot_dir = os.path.join(results_dir,'plots') csv_dir = os.path.join(results_dir,'csv') exp_dir = os.path.abspath(os.path.join(proj_dir,'experiments')) sketch_dir = os.path.abspath(os.path.join(proj_dir,'sketches')) ## add helpers to python path if os.path.join(proj_dir,'analysis','python') not in sys.path: sys.path.insert(1,os.path.join(proj_dir,'analysis','python')) if not os.path.exists(results_dir): os.makedirs(results_dir) if not os.path.exists(plot_dir): os.makedirs(plot_dir) if not os.path.exists(csv_dir): os.makedirs(csv_dir) # Assign variables within imported analysis helpers import analysis_helpers as h if sys.version_info[0]>=3: from importlib import reload reload(h) import svg_rendering_helpers as srh reload(srh) # + # set vars auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user pswd = auth.values[0][0] user = 'sketchloop' host = 'rxdhawkins.me' ## cocolab ip address # have to fix this to be able to analyze from local import pymongo as pm conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1') db = conn['3dObjects'] coll = db['graphical_conventions'] # which iteration name should we use? ## "good" iterations that we currently combine: ['run3_size4_waiting', 'run4_generalization'] iterationNames = ['run3_size4_waiting', 'run4_generalization'] # - # #### wsvg arguments # ``` # wsvg(paths=None, colors=None, filename=join, stroke_widths=None, nodes=None, node_colors=None, node_radii=None, openinbrowser=False, timestamp=False, margin_size=0.1, mindim=600, dimensions=None, viewbox=None, text=None, text_path=None, font_size=None, attributes=None, svg_attributes=None) # ``` # ### what we need (tests on single sketches) # - render_svg # - svg_to_png # - svg2paths (already defined in svgpathtools) ### rendering stroke-by-stroke stroke_dir = os.path.join(sketch_dir, 'sample_subsets') reload(srh) ## render out svg srh.render_svg(parsed,base_dir=stroke_dir) ## get svg path list svg_paths = srh.generate_svg_path_list(os.path.join(stroke_dir,'svg')) ## convert to png srh.svg_to_png(svg_paths,base_dir=stroke_dir) # ### svg rendering with stroke deletion in both directions D = pd.read_csv(os.path.join(results_dir, 'graphical_conventions.csv')) reload(srh) for g in D['gameID].unique()': D_game = D[D['gameID'] == g] print "rendering sketches from game " + g + " with " + str(len(D_game)) + " trials" for t in range(1, 41): D_trial = D_game[D_game['trialNum'] == t] iterationName = 'run4_generalization' if list(D_trial['Generalization'])[0] == 'between' else 'run3_size4_waiting' trial_level_path = g + '_' + str(t) + '_' + list(D_trial['target'])[0] + '_' + str(list(D_trial['repetition'])[0]) + '_' + iterationName if int(trial_level_path.split('_')[1]) > 40: print(trial_level_path) svg_list = ast.literal_eval(D[(D['gameID'] == g) & (D['trialNum'] == t)]['svgString'].unique()[0]) assert len(svg_list) != 0 # deleting from the end for num_strokes in range(len(svg_list)): # 0 to 7 stroke_and_direction = '_' + str(num_strokes) + '_start.svg' # deleting how many strokes? stroke_level_path = trial_level_path + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in range(num_strokes,len(svg_list))] ## render out svg srh.render_svg(parsed,base_dir=stroke_dir, out_fname=stroke_level_path) # and deleting from the start for num_strokes in range(len(svg_list)): # 0 to 7 stroke_and_direction = '_' + str(num_strokes) + '_end.svg' # deleting how many strokes? stroke_level_path = trial_level_path + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in range(0, len(svg_list) - num_strokes)] ## render out svg srh.render_svg(parsed,base_dir=stroke_dir, out_fname=stroke_level_path) import ast # self-similarity rendering reload(srh) for g in D['gameID'].unique(): D_game = D[D['gameID'] == g] print("rendering sketches from game " + g + " with " + str(len(D_game)) + " trials") for t in range(1, 41): D_trial = D_game[D_game['trialNum'] == t] iterationName = 'run4_generalization' if list(D_trial['generalization'])[0] == 'between' else 'run3_size4_waiting' trial_level_path = g + '_' + str(t) + '_' + list(D_trial['target'])[0] + '_' + str(list(D_trial['repetition'])[0]) + '_' + iterationName if int(trial_level_path.split('_')[1]) > 40: print(trial_level_path) svg_list = ast.literal_eval(D[(D['gameID'] == g) & (D['trialNum'] == t)]['svgString'].unique()[0]) assert len(svg_list) != 0 for num_strokes in range(len(svg_list)): # 0 to 7 - 8 strokes stroke_and_direction = '_' + str(num_strokes) + '.svg' # kth-index stroke deleted stroke_level_path = trial_level_path + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in range(len(svg_list)) if i != num_strokes] ## render out svg if len(parsed) > 0: srh.render_svg(parsed, base_dir=stroke_dir, out_fname=stroke_level_path) # # and deleting from the start # for num_strokes in range(len(svg_list)): # 0 to 7 # stroke_and_direction = '_' + str(num_strokes) + '_end.svg' # deleting how many strokes? # stroke_level_path = trial_level_path + stroke_and_direction # parsed = [parse_path(svg_list[i]) for i in range(0, len(svg_list) - num_strokes)] # ## render out svg # srh.render_svg(parsed,base_dir=stroke_dir, out_fname=stroke_level_path) # self-similarity GREEDY LESIONING ALGORITHM using binary lesion representations import ast from itertools import combinations random.seed(42) reload(srh) for g in D['gameID'].unique(): D_game = D[D['gameID'] == g] print("rendering sketches from game " + g + " with " + str(len(D_game)) + " trials") for t in range(1, 41): D_trial = D_game[D_game['trialNum'] == t] iterationName = 'run4_generalization' if list(D_trial['generalization'])[0] == 'between' else 'run3_size4_waiting' trial_level_path = g + '_' + str(t) + '_' + list(D_trial['target'])[0] + '_' + str(list(D_trial['repetition'])[0]) + '_' + iterationName if int(trial_level_path.split('_')[1]) > 40: print(trial_level_path) svg_list = ast.literal_eval(D[(D['gameID'] == g) & (D['trialNum'] == t)]['svgString'].unique()[0]) assert len(svg_list) != 0 total_num_strokes = len(svg_list) for num_stroke in range(total_num_strokes): # 0 to 7 - 8 strokes comb_list = list(combinations(range(total_num_strokes), num_stroke)) len(comb_list) == 2**num_stroke for comb_index, comb in enumerate(comb_list): stroke_and_direction = '_' + str(num_stroke) + '_' + str(comb_index) + '.svg' # kth-index stroke deleted stroke_level_path = trial_level_path + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in comb] ## render out svg if len(parsed) > 0: srh.render_svg(parsed, base_dir=stroke_dir, out_fname=stroke_level_path) import ast from matplotlib import colors # to_compare = [['run3_1820-86afdf51-4b4c-43a9-a9e1-e6e20618de1b_3_repeated_waiting_02_0', # 'run3_1820-86afdf51-4b4c-43a9-a9e1-e6e20618de1b_12_repeated_waiting_02_1']] to_compare = [['run4_0647-0ece0513-aa05-4676-bb63-3845126ce2c1_1_repeated_dining_04_0', 'run4_0647-0ece0513-aa05-4676-bb63-3845126ce2c1_11_repeated_dining_04_1']] # ['run4_0836-215b3490-cdf2-4a13-b55a-ea90f7b1903b_2_repeated_dining_03_0', # 'run4_0836-215b3490-cdf2-4a13-b55a-ea90f7b1903b_10_repeated_dining_03_1'] # ] to_compare = [['run3_0724-1a59371c-6fe6-4d6d-a987-db533e75a5e5_15_repeated_dining_05_2', 'run3_0724-1a59371c-6fe6-4d6d-a987-db533e75a5e5_20_repeated_dining_05_3'], ['run4_6333-9ef157b9-68ab-438e-bb27-f588abc2c906_23_repeated_dining_05_4', 'run4_6333-9ef157b9-68ab-438e-bb27-f588abc2c906_19_repeated_dining_05_3'], ['run4_0955-60582965-41c6-492d-aa67-98e15630db39_2_repeated_dining_01_0', 'run4_0955-60582965-41c6-492d-aa67-98e15630db39_10_repeated_dining_01_1'], ['run3_4529-638b7428-2c20-4fb2-a388-1c7513f675d1_7_repeated_waiting_05_0', 'run3_4529-638b7428-2c20-4fb2-a388-1c7513f675d1_9_repeated_waiting_05_1'], ['run4_7940-e30c3a20-dd99-4efc-ae92-f650a7f1d6e6_20_repeated_waiting_02_3', 'run4_7940-e30c3a20-dd99-4efc-ae92-f650a7f1d6e6_21_repeated_waiting_02_4']] # + # customized rendering for qualitative analyses reload(srh) # to_compare = [['run3_0724-1a59371c-6fe6-4d6d-a987-db533e75a5e5_1_repeated_dining_04_0', # 'run3_0724-1a59371c-6fe6-4d6d-a987-db533e75a5e5_12_repeated_dining_04_1'], # ['run3_4529-638b7428-2c20-4fb2-a388-1c7513f675d1_7_repeated_waiting_05_0', # 'run3_4529-638b7428-2c20-4fb2-a388-1c7513f675d1_9_repeated_waiting_05_1'], # ['run4_7940-e30c3a20-dd99-4efc-ae92-f650a7f1d6e6_20_repeated_waiting_02_3', # 'run4_7940-e30c3a20-dd99-4efc-ae92-f650a7f1d6e6_21_repeated_waiting_02_4']] for pair in to_compare: for sketch in pair: print "rendering sketch: " + sketch g = sketch.split('_')[1] t = sketch.split('_')[2] ## find all svg paths of each sketch ## delete one and render (with path name indicating which stroke was deleted) svg_list = ast.literal_eval(D[(D['gameID'] == g) & (D['trialNum'] == int(t))]['svgString'].unique()[0]) assert len(svg_list) != 0 for num_strokes in range(len(svg_list)): # 0 to 7 stroke_and_direction = '_' + str(num_strokes) + '_dummie2.svg' # deleting which stroke? stroke_level_path = sketch + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in range(len(svg_list)) if i != num_strokes] ## render out svg srh.render_svg(parsed,base_dir=stroke_dir, out_fname=stroke_level_path) # - # self-similarity GREEDY LESIONING ALGORITHM using permutations import ast from itertools import permutations random.seed(42) reload(srh) for g in D['gameID'].unique(): D_game = D[D['gameID'] == g] print("rendering sketches from game " + g + " with " + str(len(D_game)) + " trials") for t in range(1, 41): D_trial = D_game[D_game['trialNum'] == t] iterationName = 'run4_generalization' if list(D_trial['generalization'])[0] == 'between' else 'run3_size4_waiting' trial_level_path = g + '_' + str(t) + '_' + list(D_trial['target'])[0] + '_' + str(list(D_trial['repetition'])[0]) + '_' + iterationName if int(trial_level_path.split('_')[1]) > 40: print(trial_level_path) svg_list = ast.literal_eval(D[(D['gameID'] == g) & (D['trialNum'] == t)]['svgString'].unique()[0]) assert len(svg_list) != 0 total_num_strokes = len(svg_list) perm_list = list(set(permutations(range(total_num_strokes)))) if total_num_strokes <= 4: how_many = len(perm_list) else: how_many = 100 for perm_index, perm in enumerate(perm_list[:how_many]): if perm == range(total_num_strokes): continue for num_strokes, stroke_index in enumerate(perm): # 0 to 7 - 8 strokes stroke_and_direction = '_' + str(num_strokes) + '_' + str(perm_index) + '.svg' # kth-index stroke deleted stroke_level_path = trial_level_path + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in range(len(svg_list)) if i != num_strokes] ## render out svg if len(parsed) > 0: srh.render_svg(parsed, base_dir=stroke_dir, out_fname=stroke_level_path) # # and deleting from the start # for num_strokes in range(len(svg_list)): # 0 to 7 # stroke_and_direction = '_' + str(num_strokes) + '_end.svg' # deleting how many strokes? # stroke_level_path = trial_level_path + stroke_and_direction # parsed = [parse_path(svg_list[i]) for i in range(0, len(svg_list) - num_strokes)] # ## render out svg # srh.render_svg(parsed,base_dir=stroke_dir, out_fname=stroke_level_path) #### now, rendering with different colors!!!! reload(srh) qd = pd.read_csv('qualitative_stroke_analysis_2.csv') for pair in to_compare: for sketch in pair: print ('game ' + g) g = sketch.split('_')[1] if g == '0724-1a59371c-6fe6-4d6d-a987-db533e75a5e5': target = 'dining_05' elif g == '4529-638b7428-2c20-4fb2-a388-1c7513f675d1': target = 'waiting_05' elif g == '7940-e30c3a20-dd99-4efc-ae92-f650a7f1d6e6': target = 'waiting_02' elif g == '6333-9ef157b9-68ab-438e-bb27-f588abc2c906': target = 'dining_05' elif g == '0647-0ece0513-aa05-4676-bb63-3845126ce2c1':#if g == '0955-60582965-41c6-492d-aa67-98e15630db39': target = 'dining_04' elif g == '1820-86afdf51-4b4c-43a9-a9e1-e6e20618de1b': target = 'waiting_02' else: target = 'dining_03' r = sketch.split('_')[6] qd_ = qd[(qd['gameID'] == g) & (qd['lesioned_rep'] == int(r))] stroke_numbers = qd_['deleted_stroke_number'].unique() svg_list = ast.literal_eval(D[(D['gameID'] == g) & (D['repetition'] == int(r)) & (D['target'] == target)]['svgString'].unique()[0]) # Color normalization max_similarity = np.max(np.array(qd_['similarity'])) min_similarity = np.min(np.array(qd_['similarity'])) norm = colors.Normalize(vmin=min_similarity, vmax=max_similarity) cmap = plt.cm.get_cmap('viridis') stroke_colors = [] for num_strokes in range(len(svg_list)): similarity = qd_[qd_['deleted_stroke_number'] == num_strokes]['similarity'].unique()[0] rgba = cmap(1 - norm(similarity)) color=colors.to_hex(rgba) stroke_colors.append(color) stroke_and_direction = '_color3.svg' # deleting which stroke? stroke_level_path = sketch + stroke_and_direction parsed = [parse_path(svg_list[i]) for i in range(len(svg_list))] srh.render_svg_color(parsed, stroke_colors, base_dir=stroke_dir, out_fname=stroke_level_path) ## get svg path list svg_paths = srh.generate_svg_path_list(os.path.join(stroke_dir,'svg')) #svg_paths = [path for path in svg_paths if path.split('.')[0].split('/')[-1].split('_')[-1] == 'color3'] #and path.split('.')[0].split('/')[-1].split('_')[1] == '1820-86afdf51-4b4c-43a9-a9e1-e6e20618de1b'] svg_paths[0] ## convert to png reload(srh) diffs = ['/Users/megumisano/graphical_conventions/sketches/self_similarity/svg/' + path + '.svg' for path in diff] srh.svg_to_png(diffs,base_dir=stroke_dir) len(set(os.listdir(os.path.join(stroke_dir, 'svg')))) len(set(os.listdir(os.path.join(stroke_dir, 'png')))) svg_paths = os.listdir(os.path.join(stroke_dir, 'svg')) png_paths = os.listdir(os.path.join(stroke_dir, 'png')) svg_paths_ = [path.split('.')[0] for path in svg_paths] png_paths_ = [path.split('.')[0] for path in png_paths] diff = set(svg_paths_).difference(set(png_paths_)) len(diff)
analysis/ipynb/megsano/svg_rendering_stroke_deletion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Notebook for sample test of multiregressor # # + import pandas as pd import numpy as np import sklearn.ensemble # import lime.lime_tabular as lime_tab # import lime.lime_tabular_mod as lime_tab import lime.lime_tabular_multiregressor as lime_tab import matplotlib.pyplot as plt import random import importlib importlib.reload(lime_tab) pd.set_option("display.max_columns", None) pd.set_option("mode.chained_assignment", None) plt.style.use({"figure.facecolor": "white"}) # + [markdown] pycharm={"name": "#%% md\n"} # ### Load iris dataset # + pycharm={"name": "#%%\n"} iris_dataset = pd.read_csv( "../data/iris-classification/iris.data", names=[ "sepal_length", "sepal_width", "petal_length", "petal_width", "label" ]).sample(frac=1) iris_dataset # + [markdown] pycharm={"name": "#%% md\n"} # ### Prepare dataset # + pycharm={"name": "#%%\n"} labels_column_name = "label" jackknife_idx = random.choice(iris_dataset.index.to_numpy()) # jackknife_idx = 118 train_data = iris_dataset.loc[iris_dataset.index != jackknife_idx] train_x = train_data.drop(columns=labels_column_name).to_numpy() train_y = train_data[labels_column_name].to_numpy() test_data = iris_dataset.loc[iris_dataset.index == jackknife_idx] test_x = test_data.drop(columns=labels_column_name).to_numpy() test_y = test_data[labels_column_name].to_numpy() attributes = iris_dataset.columns.tolist() attributes.remove(labels_column_name) print(f"Attributes: {attributes}") # + pycharm={"name": "#%%\n"} model = sklearn.ensemble.RandomForestClassifier() model.fit(train_x, train_y) result = model.predict(test_x) accuracy = sklearn.metrics.accuracy_score(test_y, result) print(f"Predicted: {result}. It's accuracy: {accuracy}") # + [markdown] pycharm={"name": "#%% md\n"} # ### Use LIME to explain local prediction # + pycharm={"name": "#%%\n"} explainer = lime_tab.LTEMultiRegressionTree( train_x, feature_names = attributes, class_names = model.classes_, with_kfold = 5, discretize_continuous=False, use_inversed_data_for_training=True ) # - # Show probabilities for random instance (given by random forest on test dataset) # + pycharm={"name": "#%%\n"} probabilities = model.predict_proba(test_x)[0,:] probabilities_for_labels = {} for label, probability in zip(model.classes_, probabilities): probabilities_for_labels[label] = probability print(probabilities_for_labels) predicted_label_id = np.argmax(probabilities) # - # Explain the instance # + pycharm={"name": "#%%\n"} explanation = explainer.explain_instance( test_x.ravel(), model.predict_proba, num_features = 4, top_labels = 3 ) explanation.as_pyplot_figure(explanation.top_labels[0]) explanation.predict_proba # probabilites from explained model # + pycharm={"name": "#%%\n"} test_data # + pycharm={"name": "#%%\n"} print(explanation.get_prediction_for_explained_model()) print(explanation.get_prediction_for_surrogate_model()) print(explanation.get_prediction_for_surrogate_model(normalized=True)) # + pycharm={"name": "#%%\n"} print("MSE on training set:", np.array(explanation.get_losses_for_surrogate_model())) print("MSE on cv-test set:", np.mean(explanation.get_losses_for_cv_model(), axis=1)) print("std on cv-test set:", np.std(explanation.get_losses_for_cv_model(), axis=1)) # + pycharm={"name": "#%%\n"} explanation.get_predicted_label() # + pycharm={"name": "#%%\n"} explanation.render_explanation_tree("../data/img/tree_explanation/tree_multiregressor.png") # + pycharm={"name": "#%%\n"} print(explanation.get_decision_rules_for_explanation()) # + pycharm={"name": "#%%\n"} explanation.get_fidelity_loss_on_kfold()
doc/mod/notebooks/test_multiregressor.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Project 6: Analyzing Stock Sentiment from Twits # ## Instructions # Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. # # ## Packages # When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code. # # ### Load Packages # + import json import nltk import os import random import re import torch import numpy as np from torch import nn, optim import torch.nn.functional as F # - # ## Introduction # When deciding the value of a company, it's important to follow the news. For example, a product recall or natural disaster in a company's product chain. You want to be able to turn this information into a signal. Currently, the best tool for the job is a Neural Network. # # For this project, you'll use posts from the social media site [StockTwits](https://en.wikipedia.org/wiki/StockTwits). The community on StockTwits is full of investors, traders, and entrepreneurs. Each message posted is called a Twit. This is similar to Twitter's version of a post, called a Tweet. You'll build a model around these twits that generate a sentiment score. # # We've collected a bunch of twits, then hand labeled the sentiment of each. To capture the degree of sentiment, we'll use a five-point scale: very negative, negative, neutral, positive, very positive. Each twit is labeled -2 to 2 in steps of 1, from very negative to very positive respectively. You'll build a sentiment analysis model that will learn to assign sentiment to twits on its own, using this labeled data. # # The first thing we should to do, is load the data. # # ## Import Twits # ### Load Twits Data # This JSON file contains a list of objects for each twit in the `'data'` field: # # ``` # {'data': # {'message_body': 'Neutral twit body text here', # 'sentiment': 0}, # {'message_body': 'Happy twit body text here', # 'sentiment': 1}, # ... # } # ``` # # The fields represent the following: # # * `'message_body'`: The text of the twit. # * `'sentiment'`: Sentiment score for the twit, ranges from -2 to 2 in steps of 1, with 0 being neutral. # # # To see what the data look like by printing the first 10 twits from the list. # + with open(os.path.join( '..', '..', 'data', 'project_6_stocktwits', 'twits.json'), 'r') as f: twits = json.load(f) print(twits['data'][:10]) # - # ### Length of Data # Now let's look at the number of twits in dataset. Print the number of twits below. # + # print out the number of twits print('Number of twits: {}'.format(len(twits['data']))) # - # ### Split Message Body and Sentiment Score messages = [twit['message_body'] for twit in twits['data']] # Since the sentiment scores are discrete, we'll scale the sentiments to 0 to 4 for use in our network sentiments = [twit['sentiment'] + 2 for twit in twits['data']] # ## Preprocessing the Data # With our data in hand we need to preprocess our text. These twits are collected by filtering on ticker symbols where these are denoted with a leader $ symbol in the twit itself. For example, # # `{'message_body': 'RT @google Our annual look at the year in Google blogging (and beyond) http://t.co/sptHOAh8 $GOOG', # 'sentiment': 0}` # # The ticker symbols don't provide information on the sentiment, and they are in every twit, so we should remove them. This twit also has the `@google` username, again not providing sentiment information, so we should also remove it. We also see a URL `http://t.co/sptHOAh8`. Let's remove these too. # # The easiest way to remove specific words or phrases is with regex using the `re` module. You can sub out specific patterns with a space: # # ```python # re.sub(pattern, ' ', text) # ``` # This will substitute a space with anywhere the pattern matches in the text. Later when we tokenize the text, we'll split appropriately on those spaces. # ### Pre-Processing # + nltk.download('wordnet') def preprocess(message): """ This function takes a string as input, then performs these operations: - lowercase - remove URLs - remove ticker symbols - removes punctuation - tokenize by splitting the string on whitespace - removes any single character tokens Parameters ---------- message : The text message to be preprocessed. Returns ------- tokens: The preprocessed text into tokens. """ # Lowercase the twit message text = message.lower() # Replace URLs with a space in the message text = re.sub(r'http\S+', ' ', text) # Replace ticker symbols with a space. # The ticker symbols are any stock symbol that starts with $. text = re.sub(r'\$\S+', ' ', text) # Replace StockTwits usernames with a space. # The usernames are any word that starts with @. text = re.sub(r'\@\S+', ' ', text) # Replace everything not a letter with a space text = re.sub(r'[^a-z]', ' ', text) # Tokenize by splitting the string on whitespace into a list of words tokens = text.split() # Lemmatize words using the WordNetLemmatizer. #You can ignore any word that is not longer than one character. lemmatizer = nltk.stem.WordNetLemmatizer() tokens = [lemmatizer.lemmatize(token) for token in tokens if len(token) > 1] return tokens # - # ### Preprocess All the Twits # Now we can preprocess each of the twits in our dataset. Apply the function `preprocess` to all the twit messages. tokenized = [preprocess(message) for message in messages] tokenized[:3] # ### Bag of Words # Now with all of our messages tokenized, we want to create a vocabulary and count up how often each word appears in our entire corpus. Use the [`Counter`](https://docs.python.org/3.1/library/collections.html#collections.Counter) function to count up all the tokens. # + from collections import Counter """ Create a vocabulary by using Bag of words """ tokenized_list = [token for tokens in tokenized for token in tokens] bow = Counter(tokenized_list) # - # ### Frequency of Words Appearing in Message # With our vocabulary, now we'll remove some of the most common words such as 'the', 'and', 'it', etc. These words don't contribute to identifying sentiment and are really common, resulting in a lot of noise in our input. If we can filter these out, then our network should have an easier time learning. # # We also want to remove really rare words that show up in a only a few twits. Here you'll want to divide the count of each word by the number of messages. Then remove words that only appear in some small fraction of the messages. # + """ Set the following variables: freqs low_cutoff high_cutoff K_most_common """ # Dictionary that contains the Frequency of words appearing in messages. # The key is the token and the value is the frequency of that word in the corpus. total_count = sum(bow.values()) freqs = {word: count/total_count for word, count in bow.items()} # Float that is the frequency cutoff. # Drop words with a frequency that is lower or equal to this number. low_cutoff = 1e-6 # Integer that is the cut off for most common words. # Drop words that are the `high_cutoff` most common words. high_cutoff = 17 # The k most common words in the corpus. Use `high_cutoff` as the k. K_most_common = [word[0] for word in bow.most_common(high_cutoff)] filtered_words = [ word for word in freqs if ( freqs[word] > low_cutoff and word not in K_most_common)] print(K_most_common) print(len(filtered_words)) # - # ### Updating Vocabulary by Removing Filtered Words # Let's creat three variables that will help with our vocabulary. # + """ Set the following variables: vocab id2vocab filtered """ # A dictionary for the `filtered_words`. # The key is the word and value is an id that represents the word. vocab = {word: index for index, word in enumerate(filtered_words, 1)} # Reverse of the `vocab` dictionary. The key is word id and value is the word. id2vocab = {index: word for word, index in vocab.items()} # tokenized with the words not in `filtered_words` removed. filtered = [[word for word in token if word in vocab] for token in tokenized] # - # ### Balancing the classes # Let's do a few last pre-processing steps. If we look at how our twits are labeled, we'll find that 50% of them are neutral. This means that our network will be 50% accurate just by guessing 0 every single time. To help our network learn appropriately, we'll want to balance our classes. # That is, make sure each of our different sentiment scores show up roughly as frequently in the data. # # What we can do here is go through each of our examples and randomly drop twits with neutral sentiment. What should be the probability we drop these twits if we want to get around 20% neutral twits starting at 50% neutral? We should also take this opportunity to remove messages with length 0. # + balanced = {'messages': [], 'sentiments': []} n_neutral = sum(1 for each in sentiments if each == 2) N_examples = len(sentiments) keep_prob = (N_examples - n_neutral)/4/n_neutral for idx, sentiment in enumerate(sentiments): message = filtered[idx] if len(message) == 0: # skip this message because it has length zero continue elif sentiment != 2 or random.random() < keep_prob: balanced['messages'].append(message) balanced['sentiments'].append(sentiment) # - # If you did it correctly, you should see the following result n_neutral = sum(1 for each in balanced['sentiments'] if each == 2) N_examples = len(balanced['sentiments']) n_neutral/N_examples # Finally let's convert our tokens into integer ids which we can pass to the network. token_ids = [ [vocab[word] for word in message] for message in balanced['messages']] sentiments = balanced['sentiments'] # ## Neural Network # Now we have our vocabulary which means we can transform our tokens into ids, which are then passed to our network. So, let's define the network now! # # Here is a nice diagram showing the network we'd like to build: # # #### Embed -> RNN -> Dense -> Softmax # ### Implement the text classifier # Before we build text classifier, if you remember from the other network that you built in "Sentiment Analysis with an RNN" exercise - which there, the network called " SentimentRNN", here we named it "TextClassifer" - consists of three main parts: 1) init function `__init__` 2) forward pass `forward` 3) hidden state `init_hidden`. # # This network is pretty similar to the network you built expect in the `forward` pass, we use softmax instead of sigmoid. The reason we are not using sigmoid is that the output of NN is not a binary. In our network, sentiment scores have 5 possible outcomes. We are looking for an outcome with the highest probability thus softmax is a better choice. # + # check if GPU is available train_on_gpu = torch.cuda.is_available() device = torch.device('cuda' if train_on_gpu else 'cpu') if(train_on_gpu): print('Training on GPU...') else: print('No GPU available, training on CPU...') # - class TextClassifier(nn.Module): def __init__(self, vocab_size, embed_size, lstm_size, output_size, lstm_layers=1, dropout=0.1): """ Initialize the model by setting up the layers. Parameters ---------- vocab_size : The vocabulary size. embed_size : The embedding layer size. lstm_size : The LSTM layer size. output_size : The output size. lstm_layers : The number of LSTM layers. dropout : The dropout probability. """ super().__init__() self.vocab_size = vocab_size self.embed_size = embed_size self.lstm_size = lstm_size self.output_size = output_size self.lstm_layers = lstm_layers self.dropout = dropout # setup embedding layer self.embedding = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.embed_size) # setup LSTM layer self.lstm = nn.LSTM(input_size=self.embed_size, hidden_size=self.lstm_size, num_layers=self.lstm_layers, batch_first=False, dropout=self.dropout) # setup dropout layer self.dropout = nn.Dropout(p=0.3) # setup fully-connected layer self.fc = nn.Linear(in_features=self.lstm_size, out_features=self.output_size) # setup sigmoid layer self.log_softmax = nn.LogSoftmax(dim=1) def init_hidden(self, batch_size): """ Initializes hidden state Parameters ---------- batch_size : The size of batches. Returns ------- hidden_state """ # create two new tensors with sizes lstm_layers x batch_size x lstm_size, # initialized to zero, for hidden state and cell state of LSTM weight = next(self.parameters()).data if train_on_gpu: hidden = (weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_().cuda(), weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_().cuda()) else: hidden = (weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_(), weight.new(self.lstm_layers, batch_size, self.lstm_size).zero_()) return hidden def forward(self, nn_input, hidden_state): """ Perform a forward pass of our model on nn_input. Parameters ---------- nn_input : The batch of input to the NN. hidden_state : The LSTM hidden state. Returns ------- logps: log softmax output hidden_state: The new hidden state. """ batch_size = nn_input.size(0) # embeddings and lstm_out embedding_out = self.embedding(nn_input) lstm_out, hidden_state = self.lstm(embedding_out, hidden_state) # get the last lstm_out in the sequence lstm_out = lstm_out[-1] # dropout and fully-connected layer fc_out = self.dropout(self.fc(lstm_out)) # softmax logps = self.log_softmax(fc_out) # return last sigmoid output and hidden state return logps, hidden_state # ### View Model model = TextClassifier(len(vocab), embed_size=10, lstm_size=6, output_size=5, dropout=0.1, lstm_layers=2) model.embedding.weight.data.uniform_(-1, 1) input = torch.randint(0, 1000, (5, 4), dtype=torch.int64) hidden = model.init_hidden(4) print(model) # ## Training # ### DataLoaders and Batching # Now we should build a generator that we can use to loop through our data. It'll be more efficient if we can pass our sequences in as batches. Our input tensors should look like `(sequence_length, batch_size)`. So if our sequences are 40 tokens long and we pass in 25 sequences, then we'd have an input size of `(40, 25)`. # # If we set our sequence length to 40, what do we do with messages that are more or less than 40 tokens? For messages with fewer than 40 tokens, we will pad the empty spots with zeros. We should be sure to **left** pad so that the RNN starts from nothing before going through the data. If the message has 20 tokens, then the first 20 spots of our 40 long sequence will be 0. If a message has more than 40 tokens, we'll just keep the first 40 tokens. def dataloader(messages, labels, sequence_length=30, batch_size=32, shuffle=False): """ Build a dataloader. """ if shuffle: indices = list(range(len(messages))) random.shuffle(indices) messages = [messages[idx] for idx in indices] labels = [labels[idx] for idx in indices] total_sequences = len(messages) for ii in range(0, total_sequences, batch_size): batch_messages = messages[ii: ii+batch_size] # first initialize a tensor of all zeros batch = torch.zeros( (sequence_length, len(batch_messages)), dtype=torch.int64) for batch_num, tokens in enumerate(batch_messages): token_tensor = torch.tensor(tokens) # left pad start_idx = max(sequence_length - len(token_tensor), 0) batch[start_idx:, batch_num] = token_tensor[:sequence_length] label_tensor = torch.tensor(labels[ii: ii+len(batch_messages)]) yield batch, label_tensor # ### Training and Validation # With our data in nice shape, we'll split it into training and validation sets. # + """ Split data into training and validation datasets. Use an appropriate split size. The features are the `token_ids` and the labels are the `sentiments`. """ split_frac = 0.8 split_idx = int(len(token_ids)*split_frac) train_features, valid_features = token_ids[:split_idx], token_ids[split_idx:] train_labels, valid_labels = sentiments[:split_idx], sentiments[split_idx:] # - text_batch, labels = next(iter(dataloader(train_features, train_labels, sequence_length=40, batch_size=512))) model = TextClassifier(len(vocab)+1, 1024, 512, 5, dropout=0.) hidden = model.init_hidden(512) if train_on_gpu: text_batch = text_batch.cuda() model.cuda() logps, hidden = model.forward(text_batch, hidden) # ### Training # It's time to train the neural network! model = TextClassifier(len(vocab)+1, 1024, 512, 5, lstm_layers=2, dropout=0.2) model.embedding.weight.data.uniform_(-1, 1) model.to(device) # + """ Train your model with dropout. Make sure to clip your gradients. Print the training loss, validation loss, and validation accuracy for every 100 steps. """ epochs = 2 batch_size = 512 sequence_length = 40 learning_rate = 0.001 clip = 5 print_every = 100 criterion = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate) model.train() for epoch in range(epochs): print('Starting epoch {}'.format(epoch + 1)) hidden = model.init_hidden(batch_size) steps = 0 for text_batch, labels in dataloader( train_features, train_labels, batch_size=batch_size, sequence_length=sequence_length, shuffle=True): if text_batch.size() != torch.Size([sequence_length, batch_size]): continue steps += 1 hidden = tuple([each.data for each in hidden]) # set device text_batch, labels = text_batch.to(device), labels.to(device) for each in hidden: each.to(device) model.zero_grad() # get the output from the model output, hidden = model.forward(text_batch, hidden) # calculate the loss and perform backprop loss = criterion(output.squeeze(), labels) loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. nn.utils.clip_grad_norm_(model.parameters(), clip) optimizer.step() if steps % print_every == 0: val_losses = [] val_hidden = model.init_hidden(batch_size) model.eval() for val_text_batch, val_labels in dataloader( valid_features, valid_labels, batch_size=batch_size, sequence_length=sequence_length): if val_text_batch.size() != torch.Size([sequence_length, batch_size]): continue # create new variables for the hidden state, otherwise # we'd backprop through the entire training history val_hidden = tuple([each.data for each in val_hidden]) val_text_batch, val_labels = val_text_batch.to(device), val_labels.to(device) for each in val_hidden: each.to(device) val_output, hidden = model.forward(val_text_batch, val_hidden) val_loss = criterion(val_output.squeeze(), val_labels) val_losses.append(val_loss.item()) model.train() print('Epoch: {}/{}...'.format(epoch+1, epochs), 'Step: {}...'.format(steps), 'Loss: {:.6f}...'.format(loss.item()), 'Val Loss: {:.6f}'.format(np.mean(val_losses))) # - # ## Making Predictions # ### Prediction # Okay, now that you have a trained model, try it on some new twits and see if it works appropriately. Remember that for any new text, you'll need to preprocess it first before passing it to the network. Implement the `predict` function to generate the prediction vector from a message. def predict(text, model, vocab): """ Make a prediction on a single sentence. Parameters ---------- text : The string to make a prediction on. model : The model to use for making the prediction. vocab : Dictionary for word to word ids. The key is the word and the value is the word id. Returns ------- pred : Prediction vector """ tokens = preprocess(text) # filter non-vocab words tokens = [word for word in tokens if word in filtered_words] # convert words to ids tokens = [vocab[word] for word in tokens] # add a batch dimension text_input = torch.tensor(tokens).unsqueeze(1) if train_on_gpu: text_input = text_input.cuda() model.cuda() # get the NN output hidden = model.init_hidden(text_input.size(1)) logps, _ = model.forward(text_input, hidden) # take the exponent of the NN output to get a range of 0 to 1 for each label. pred = torch.exp(logps) return pred text = "Google is working on self driving cars, I'm bullish on $goog" model.eval() model.to('cpu') predict(text, model, vocab) # ### Questions: What is the prediction of the model? What is the uncertainty of the prediction? # Sentiment 1 is the most probable sentiment with a probability of 87.7% for the given text. # Now we have a trained model and we can make predictions. We can use this model to track the sentiments of various stocks by predicting the sentiments of twits as they are coming in. Now we have a stream of twits. For each of those twits, pull out the stocks mentioned in them and keep track of the sentiments. Remember that in the twits, ticker symbols are encoded with a dollar sign as the first character, all caps, and 2-4 letters, like $AAPL. Ideally, you'd want to track the sentiments of the stocks in your universe and use this as a signal in your larger model(s). # # ## Testing # ### Load the Data with open(os.path.join('..', '..', 'data', 'project_6_stocktwits', 'test_twits.json'), 'r') as f: test_data = json.load(f) # ### Twit Stream # + def twit_stream(): for twit in test_data['data']: yield twit next(twit_stream()) # - # Using the `prediction` function, let's apply it to a stream of twits. def score_twits(stream, model, vocab, universe): """ Given a stream of twits and a universe of tickers, return sentiment scores for tickers in the universe. """ for twit in stream: # get the message text text = twit['message_body'] symbols = re.findall('\$[A-Z]{2,4}', text) score = predict(text, model, vocab) for symbol in symbols: if symbol in universe: yield {'symbol': symbol, 'score': score, 'timestamp': twit['timestamp']} # + universe = {'$BBRY', '$AAPL', '$AMZN', '$BABA', '$YHOO', '$LQMT', '$FB', '$GOOG', '$BBBY', '$JNUG', '$SBUX', '$MU'} score_stream = score_twits(twit_stream(), model, vocab, universe) next(score_stream) # - # That's it. You have successfully built a model for sentiment analysis! # ## Submission # Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
project_6_sentiment_analysis_with_neural_networks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ###### Excercise: Create a program that inputs a string, displays the string, finds the number of words that start with t/T, reverses the third word, and joins the given string with a new string (also input by the user). def t(string): count = 0 words = string.split() for word in words: if word[0].lower() == "t": count += 1 print("The number of words starting with T/t are " + str(count) + ".") def reverse_third(string): words = string.split() third_word = words[2] reversed = "" for alphabet in third_word: reversed = alphabet + reversed print("The third word reversed is " + reversed) def join(string): s = input("Enter another string to join with: ") print("The joint string is:") print(string + s) def main(): choice = 0 while choice != 6: print("\n=================== MAIN MENU ===================") print("1. Enter a string") print("2. Display the string") print("3. Count number of words starting with T/t") print("4. Print reversed third word") print("5. Join string with another string") print("6. Quit") print("=================================================\n") choice = int(input("Enter your choice: ")) if choice not in [1,2,3,4,5,6]: print("Invalid input. Please try again later.") break elif choice == 1: string = input("Enter your string: ") print("String stored.") elif choice == 2: print("Your given string is: ") print(string) elif choice == 3: t(string) elif choice == 4: reverse_third(string) elif choice == 5: join(string) print("End of program.") main()
String input, display, count words starting with t, reverse third word, join with another string.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] tags=[] # # SOP069 - Install ODBC for SQL Server # # ## Description # # Some subcommands in `azdata` require the SQL Server ODBC driver. If the # driver is not installed, the following error is given: # # > ERROR: Error processing command: “InterfaceError” (‘IM002’, ‘\[IM002\] # > \[Microsoft\]\[ODBC Driver Manager\] Data source name not found and no # > default driver specified (0) (SQLDriverConnect)’) # # ## Steps # # ### Install ODBC Driver 17 for SQL Server # + tags=[] import platform import webbrowser if platform.system() == "Windows": webbrowser.open('https://www.microsoft.com/en-us/download/details.aspx?id=56567') else: webbrowser.open('https://docs.microsoft.com/en-us/sql/connect/odbc/linux-mac/installing-the-microsoft-odbc-driver-for-sql-server') # + tags=[] print("Notebook execution is complete.")
Big-Data-Clusters/CU9/public/content/install/sop069-install-odbc-driver-for-sql-server.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Django Shell-Plus # language: python # name: django_extensions # --- import pandas as pd from apis_core.apis_metainfo.models import Collection as ACollection from apis_core.apis_entities.models import Person as APerson from apis_core.apis_entities.models import Place as APlace from apis_core.apis_entities.models import Institution as AInstitution from apis_core.apis_entities.models import Work as AWork file = "data/hbas_family_relations.csv" df = pd.read_csv(file, encoding="utf-8", delimiter=',') df df['source'] = df.apply( lambda row: "https://bahrschnitzler.acdh.oeaw.ac.at/{}".format(row['hbas-key-source']), axis=1 ) df['target'] = df.apply( lambda row: "https://bahrschnitzler.acdh.oeaw.ac.at/{}".format(row['Hbas-key-target']), axis=1 ) for i, row in df.iterrows(): source = Uri.objects.get(uri=row['source']).entity target = Uri.objects.get(uri=row['target']).entity rel_type, _ = PersonPersonRelation.objects.get_or_create( name=row['seh_relation'] ) PersonPerson.objects.get_or_create( relation_type=rel_type, related_personA=APerson.objects.get(pk=source.id), related_personB=APerson.objects.get(pk=target.id) ) set(df['seh_relation']) file = "data/relation_person_person_family-relations_name_name-reverse.csv" df = pd.read_csv(file, encoding="utf-8", delimiter=';').fillna('False') df.head() for i, row in df.iterrows(): if row['name'] != 'False': try: pprel = PersonPersonRelation.objects.get(name=row['name']) except Exception as e: pprel = None print(e, row['name']) if pprel: pprel.name_reverse = row['name_reverse'] pprel.save() PersonPersonRelation.objects.filter(name='isNiblingOf')
perspers_relations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="UA1x5l3QlWqC" # # 1.3「転移学習」で少量データの分類を実現する方法 # # - 本ファイルでは、学習済みのVGGモデルを使用し、転移学習でアリとハチの画像を分類するモデルを学習します # # # + [markdown] id="DRJbymzfozM5" # ## 本ファイルは、Google Colabでの実行を前提としています # + [markdown] id="82UCc0cflWqI" # # 学習目標 # # 1. 画像データからDatasetを作成できるようになる # 2. DataSetからDataLoaderを作成できるようになる # 3. 学習済みモデルの出力層を任意の形に変更できるようになる # 4. 出力層の結合パラメータのみを学習させ、転移学習が実装できるようになる # # # + [markdown] id="RJN48pwclWqJ" # # 事前準備 # # 1. 書籍の指示に従い、本章で使用するデータをダウンロード # # 2. forループの経過時間と残り時間を計測するパッケージtqdmをインストールします。 # # conda install -c conda-forge tqdm # # # + colab={"base_uri": "https://localhost:8080/"} id="sWDnhY8Plo4P" outputId="9bf366b5-0c33-4f2f-a723-bcdc1f205d4d" # !git clone https://github.com/YutaroOgawa/pytorch_advanced.git # + colab={"base_uri": "https://localhost:8080/"} id="KLva_hyUlxlf" outputId="1a9528d2-539a-4296-9edb-7793cdd0b464" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="_dLafiH3l0v2" outputId="d3476745-7a8f-479f-caf3-42f1bc6a5377" # %cd "pytorch_advanced" # + colab={"base_uri": "https://localhost:8080/"} id="gw4aia4Dl3F2" outputId="1df93c9f-938b-4e8d-9f75-cbf721b02e30" # !ls # + colab={"base_uri": "https://localhost:8080/"} id="X6LE69pul4zW" outputId="27b8663a-ebb5-4f09-d477-2d71cdd1d372" # %cd "1_image_classification" # + colab={"base_uri": "https://localhost:8080/"} id="6YIxC8nkmLgB" outputId="59315278-1a73-4e23-e654-2c2aeb6ec5a5" # !ls # + id="teXfeIJxmMkw" # make_folders_and_data_downloads.ipynbの中身を実行 import os import urllib.request import zipfile data_dir = "./data/" if not os.path.exists(data_dir): os.mkdir(data_dir) url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json" save_path = os.path.join(data_dir, "imagenet_class_index.json") if not os.path.exists(save_path): urllib.request.urlretrieve(url, save_path) url = "https://download.pytorch.org/tutorial/hymenoptera_data.zip" save_path = os.path.join(data_dir, "hymenoptera_data.zip") if not os.path.exists(save_path): urllib.request.urlretrieve(url, save_path) # ZIPファイルを読み込み zip = zipfile.ZipFile(save_path) zip.extractall(data_dir) # ZIPを解凍 zip.close() # ZIPファイルをクローズ # ZIPファイルを消去 os.remove(save_path) # + id="V4M1yJuxmVyp" # 追記以上-------------------------- # + id="7Nk2_bfilWqJ" # パッケージのimport import glob import os.path as osp import random import numpy as np import json from PIL import Image from tqdm import tqdm import matplotlib.pyplot as plt # %matplotlib inline import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as data import torchvision from torchvision import models, transforms # + id="EsQFy7nflWqK" # 乱数のシードを設定 torch.manual_seed(1234) np.random.seed(1234) random.seed(1234) # + [markdown] id="e0wuE1LQlWqK" # # DataSetを作成 # + id="6PerlS6ilWqK" # 入力画像の前処理をするクラス # 訓練時と推論時で処理が異なる class ImageTransform(): """ 画像の前処理クラス。訓練時、検証時で異なる動作をする。 画像のサイズをリサイズし、色を標準化する。 訓練時はRandomResizedCropとRandomHorizontalFlipでデータオーギュメンテーションする。 Attributes ---------- resize : int リサイズ先の画像の大きさ。 mean : (R, G, B) 各色チャネルの平均値。 std : (R, G, B) 各色チャネルの標準偏差。 """ def __init__(self, resize, mean, std): self.data_transform = { 'train': transforms.Compose([ transforms.RandomResizedCrop( resize, scale=(0.5, 1.0)), # データオーギュメンテーション transforms.RandomHorizontalFlip(), # データオーギュメンテーション transforms.ToTensor(), # テンソルに変換 transforms.Normalize(mean, std) # 標準化 ]), 'val': transforms.Compose([ transforms.Resize(resize), # リサイズ transforms.CenterCrop(resize), # 画像中央をresize×resizeで切り取り transforms.ToTensor(), # テンソルに変換 transforms.Normalize(mean, std) # 標準化 ]) } def __call__(self, img, phase='train'): """ Parameters ---------- phase : 'train' or 'val' 前処理のモードを指定。 """ return self.data_transform[phase](img) # + colab={"base_uri": "https://localhost:8080/", "height": 521} id="mnzGcMN_lWqL" outputId="f7d4f271-7c1f-4028-a5fe-9c1d1721218d" # 訓練時の画像前処理の動作を確認 # 実行するたびに処理結果の画像が変わる # 1. 画像読み込み image_file_path = './data/goldenretriever-3724972_640.jpg' img = Image.open(image_file_path) # [高さ][幅][色RGB] # 2. 元の画像の表示 plt.imshow(img) plt.show() # 3. 画像の前処理と処理済み画像の表示 size = 224 mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) transform = ImageTransform(size, mean, std) img_transformed = transform(img, phase="train") # torch.Size([3, 224, 224]) # (色、高さ、幅)を (高さ、幅、色)に変換し、0-1に値を制限して表示 img_transformed = img_transformed.numpy().transpose((1, 2, 0)) img_transformed = np.clip(img_transformed, 0, 1) plt.imshow(img_transformed) plt.show() # + colab={"base_uri": "https://localhost:8080/"} id="F3gJ3XmJlWqM" outputId="23617ca5-dd0c-4bba-bfd7-e41fc270607e" # アリとハチの画像へのファイルパスのリストを作成する def make_datapath_list(phase="train"): """ データのパスを格納したリストを作成する。 Parameters ---------- phase : 'train' or 'val' 訓練データか検証データかを指定する Returns ------- path_list : list データへのパスを格納したリスト """ rootpath = "./data/hymenoptera_data/" target_path = osp.join(rootpath+phase+'/**/*.jpg') print(target_path) path_list = [] # ここに格納する # globを利用してサブディレクトリまでファイルパスを取得する for path in glob.glob(target_path): path_list.append(path) return path_list # 実行 train_list = make_datapath_list(phase="train") val_list = make_datapath_list(phase="val") train_list # + colab={"base_uri": "https://localhost:8080/"} id="ADiGymTSlWqM" outputId="02e26af1-1b30-4634-a5fb-0bf4901fc8a8" # アリとハチの画像のDatasetを作成する class HymenopteraDataset(data.Dataset): """ アリとハチの画像のDatasetクラス。PyTorchのDatasetクラスを継承。 Attributes ---------- file_list : リスト 画像のパスを格納したリスト transform : object 前処理クラスのインスタンス phase : 'train' or 'test' 学習か訓練かを設定する。 """ def __init__(self, file_list, transform=None, phase='train'): self.file_list = file_list # ファイルパスのリスト self.transform = transform # 前処理クラスのインスタンス self.phase = phase # train or valの指定 def __len__(self): '''画像の枚数を返す''' return len(self.file_list) def __getitem__(self, index): ''' 前処理をした画像のTensor形式のデータとラベルを取得 ''' # index番目の画像をロード img_path = self.file_list[index] img = Image.open(img_path) # [高さ][幅][色RGB] # 画像の前処理を実施 img_transformed = self.transform( img, self.phase) # torch.Size([3, 224, 224]) # 画像のラベルをファイル名から抜き出す if self.phase == "train": label = img_path[30:34] elif self.phase == "val": label = img_path[28:32] # ラベルを数値に変更する if label == "ants": label = 0 elif label == "bees": label = 1 return img_transformed, label # 実行 train_dataset = HymenopteraDataset( file_list=train_list, transform=ImageTransform(size, mean, std), phase='train') val_dataset = HymenopteraDataset( file_list=val_list, transform=ImageTransform(size, mean, std), phase='val') # 動作確認 index = 0 print(train_dataset.__getitem__(index)[0].size()) print(train_dataset.__getitem__(index)[1]) # + [markdown] id="6M2pUtcklWqN" # # DataLoaderを作成 # + colab={"base_uri": "https://localhost:8080/"} id="-TwF8UealWqN" outputId="8c0a5ed3-8a7d-407c-bb72-0cc4be80648f" # ミニバッチのサイズを指定 batch_size = 32 # DataLoaderを作成 train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=True) val_dataloader = torch.utils.data.DataLoader( val_dataset, batch_size=batch_size, shuffle=False) # 辞書型変数にまとめる dataloaders_dict = {"train": train_dataloader, "val": val_dataloader} # 動作確認 batch_iterator = iter(dataloaders_dict["train"]) # イテレータに変換 inputs, labels = next( batch_iterator) # 1番目の要素を取り出す print(inputs.size()) print(labels) # + [markdown] id="kDdKxowclWqN" # # ネットワークモデルの作成する # + colab={"base_uri": "https://localhost:8080/", "height": 102, "referenced_widgets": ["350e40ad89fd4b289557201d12bd1878", "44a3efe233ad48699e9f59747f120f8d", "241cd1502c7c473ca0f525562f268c92", "96edb32a99d04f7d8bb9f161a699d4d3", "1b891dddf11343e696e5fd695c5a71df", "<KEY>", "<KEY>", "<KEY>"]} id="AsfGZ9B7lWqO" outputId="d7c303e3-b1e8-4229-ab39-c209cb820410" # 学習済みのVGG-16モデルをロード # VGG-16モデルのインスタンスを生成 use_pretrained = True # 学習済みのパラメータを使用 net = models.vgg16(pretrained=use_pretrained) # VGG16の最後の出力層の出力ユニットをアリとハチの2つに付け替える net.classifier[6] = nn.Linear(in_features=4096, out_features=2) # 訓練モードに設定 net.train() print('ネットワーク設定完了:学習済みの重みをロードし、訓練モードに設定しました') # + [markdown] id="D8rap7ISlWqO" # # 損失関数を定義 # + id="2wiJCU6VlWqO" # 損失関数の設定 criterion = nn.CrossEntropyLoss() # + [markdown] id="Kt3ziuoUlWqO" # # 最適化手法を設定 # + colab={"base_uri": "https://localhost:8080/"} id="oxkNa9LvlWqP" outputId="a480e08e-674e-45c3-c95d-b8c2ff848608" # 転移学習で学習させるパラメータを、変数params_to_updateに格納する params_to_update = [] # 学習させるパラメータ名 update_param_names = ["classifier.6.weight", "classifier.6.bias"] # 学習させるパラメータ以外は勾配計算をなくし、変化しないように設定 for name, param in net.named_parameters(): if name in update_param_names: param.requires_grad = True params_to_update.append(param) print(name) else: param.requires_grad = False # params_to_updateの中身を確認 print("-----------") print(params_to_update) # + id="lDfKFIDglWqP" # 最適化手法の設定 optimizer = optim.SGD(params=params_to_update, lr=0.001, momentum=0.9) # + [markdown] id="4ocQ2VbJlWqP" # # 学習・検証を実施 # + id="mXgRLEXHlWqP" # モデルを学習させる関数を作成 def train_model(net, dataloaders_dict, criterion, optimizer, num_epochs): # epochのループ for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch+1, num_epochs)) print('-------------') # epochごとの学習と検証のループ for phase in ['train', 'val']: if phase == 'train': net.train() # モデルを訓練モードに else: net.eval() # モデルを検証モードに epoch_loss = 0.0 # epochの損失和 epoch_corrects = 0 # epochの正解数 # 未学習時の検証性能を確かめるため、epoch=0の訓練は省略 if (epoch == 0) and (phase == 'train'): continue # データローダーからミニバッチを取り出すループ for inputs, labels in tqdm(dataloaders_dict[phase]): # optimizerを初期化 optimizer.zero_grad() # 順伝搬(forward)計算 with torch.set_grad_enabled(phase == 'train'): outputs = net(inputs) loss = criterion(outputs, labels) # 損失を計算 _, preds = torch.max(outputs, 1) # ラベルを予測 # 訓練時はバックプロパゲーション if phase == 'train': loss.backward() optimizer.step() # イタレーション結果の計算 # lossの合計を更新 epoch_loss += loss.item() * inputs.size(0) # 正解数の合計を更新 epoch_corrects += torch.sum(preds == labels.data) # epochごとのlossと正解率を表示 epoch_loss = epoch_loss / len(dataloaders_dict[phase].dataset) epoch_acc = epoch_corrects.double( ) / len(dataloaders_dict[phase].dataset) print('{} Loss: {:.4f} Acc: {:.4f}'.format( phase, epoch_loss, epoch_acc)) # + colab={"base_uri": "https://localhost:8080/"} id="i0cuPPsxlWqQ" outputId="202ca950-75c7-4236-b7e1-b7cd3e55b688" # 学習・検証を実行する num_epochs=2 train_model(net, dataloaders_dict, criterion, optimizer, num_epochs=num_epochs) # + [markdown] id="6NRfoFSZlWqQ" # 以上
1_image_classification/1_3_transfer_learning_on_GoogleColab.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from os import path import numpy as np from enmspring import PDB # ### Part 1: Set the location of the test pdb rootfolder = '/home/yizaochen/codes/dna_rna/enmspring/pdb_gro' pdb_in = path.join(rootfolder, 'g_tract_21mer.perfect.pdb') print(f'vmd -pdb {pdb_in}') print(f'vim {pdb_in}') # ### Part 2: Read pdb in reader = PDB.PDBReader(pdb_in, skip_header=4, skip_footer=2) atom1 = reader.atomgroup[0] atom1.tempFactor # ### Part 3: Random generate tempFactor in [0,1] for atom in reader.atomgroup: atom1.tempFactor = np.random.random() # ### Part 4: Output PDB pdb_out = path.join(rootfolder, 'tempfactor_test.pdb') writer = PDB.PDBWriter(pdb_out, reader.atomgroup) writer.write_pdb() print(f'vmd -pdb {pdb_out}') print(f'vim {pdb_out}') # ### Reference: # https://www.mdanalysis.org/MDAnalysisTutorial/writing.html
notebooks/.ipynb_checkpoints/vmd_beta_color_test-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: taddle # language: python # name: taddle # --- # This file extracts features from the images using the CNN and computes cluster-level aggregations. # <br> # <br> # Written by <NAME> # <br> # 5/2020 # + import os import numpy as np import pandas as pd from tqdm.notebook import tqdm import pickle import torch import torch.nn as nn import torch.optim as optim import torchvision from torchvision import datasets, models, transforms import matplotlib.pyplot as plt from PIL import Image BASE_DIR = '..' import sys sys.path.append(BASE_DIR) from config import TRAINING_CONFIG, RANDOM_SEED # + COUNTRIES_DIR = os.path.join(BASE_DIR, 'data', 'countries') PROCESSED_DIR = os.path.join(BASE_DIR, 'data', 'processed') RESULTS_DIR = os.path.join(BASE_DIR, 'results') TYPE = TRAINING_CONFIG['TYPE'] COUNTRY = TRAINING_CONFIG['COUNTRY'] METRIC = TRAINING_CONFIG['METRIC'] CNN_TRAIN_IMAGE_DIR = os.path.join(BASE_DIR, 'data', 'cnn_images', TYPE, COUNTRY, METRIC) CNN_SAVE_DIR = os.path.join(BASE_DIR, 'models', TYPE, COUNTRY, METRIC) # - # reduce if memory errors on CUDA BATCH_SIZE = 32 assert TYPE in ['single_country', 'country_held_out'] assert COUNTRY in ['malawi_2016', 'ethiopia_2015'] assert METRIC in ['house_has_cellphone', 'est_monthly_phone_cost_pc'] os.makedirs(RESULTS_DIR, exist_ok=True) os.makedirs(os.path.join(RESULTS_DIR, TYPE, COUNTRY), exist_ok=True) # # Feature extract with CNN def load_country(country, metric): filepath = os.path.join(PROCESSED_DIR, TYPE, COUNTRY, f'{metric}.csv') df_images = pd.read_csv(filepath) return df_images df_images = load_country(COUNTRY, METRIC) df_images.head() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print(f'Using {device} as backend') model = torch.load(os.path.join(CNN_SAVE_DIR, f'trained_model_{METRIC}.pt'), map_location=device) model.classifier # remove the final layers model.classifier = model.classifier[:4] model.classifier # + input_size = 224 transformer = transforms.Compose([ transforms.Resize(input_size), transforms.CenterCrop(input_size), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # custom dataset for fast image loading and processing # does not follow the usual style of folder -> folder for each class -> image # we just want one folder with images class ForwardPassDataset(torch.utils.data.Dataset): def __init__(self, image_dir, transformer): self.image_dir = image_dir self.image_list = os.listdir(self.image_dir) self.transformer = transformer def __len__(self): return len(self.image_list) def __getitem__(self, index): image_name = self.image_list[index] # Load image X = self.filename_to_im_tensor(self.image_dir + '/' + image_name) # dataloaders need to return a label, but for the forward pass we don't really care return X, -1 def filename_to_im_tensor(self, file): im = (plt.imread(file)[:,:,:3] * 256).astype(np.uint8) im = Image.fromarray(im) im = self.transformer(im) return im def run_forward_pass(model, df_images, mode): assert mode in ['train', 'valid'] model.eval() # shape of final array will be (num_images, 4096) # we also want to record the image each index represents feats = None if mode == 'train': feats = np.zeros(((df_images['is_train']).sum(), 4096)) else: feats = np.zeros(((~df_images['is_train']).sum(), 4096)) image_order = [] i = 0 # use the validation images to do the forward pass dataset = ForwardPassDataset(os.path.join(CNN_TRAIN_IMAGE_DIR, mode), transformer) dataloader = torch.utils.data.DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4) image_order += dataset.image_list # forward pass for this class for inputs, _ in tqdm(dataloader): inputs = inputs.to(device) outputs = model(inputs) feats[i:i+len(inputs),:] = outputs.cpu().detach().numpy() i += len(inputs) forward_pass_df = pd.DataFrame.from_dict({'image_name': image_order, 'feat_index': np.arange(len(image_order))}) return feats, forward_pass_df # - feats_train, df_train = run_forward_pass(model, df_images, 'train') feats_valid, df_valid = run_forward_pass(model, df_images, 'valid') df_train = pd.merge(left=df_images, right=df_train, on='image_name', how='inner') df_valid = pd.merge(left=df_images, right=df_valid, on='image_name', how='inner') # have we maintained all images? assert len(df_train) == (df_images['is_train']).sum() assert len(df_valid) == (~df_images['is_train']).sum() df_train df_valid # ## Aggregate Features def aggregate_features(df, feats, country, mode): assert mode in ['train', 'valid'] group = df.groupby(['cluster_lat', 'cluster_lon']) x = np.zeros((len(group), 4096)) cluster_list = [] # the corresponding clusters (lat, lon) to the x aggregate feature array for i, g in enumerate(group): lat, lon = g[0] im_sub = df[(df['cluster_lat'] == lat) & (df['cluster_lon'] == lon)].reset_index(drop=True) agg_feats = np.zeros((len(im_sub), 4096)) for j, d in im_sub.iterrows(): agg_feats[j,:] = feats[d.feat_index] agg_feats = agg_feats.mean(axis=0) # averages the features across all images in the cluster x[i,:] = agg_feats cluster_list.append([lat, lon]) # save to the correct directory save_dir = os.path.join(RESULTS_DIR, TYPE, country, METRIC, 'cnn') os.makedirs(save_dir, exist_ok=True) print(f'saving to {save_dir}') np.save(os.path.join(save_dir, f'cluster_feats_{mode}_{METRIC}.npy'), x) pickle.dump(cluster_list, open(os.path.join(save_dir, f'cluster_order_{mode}_{METRIC}.pkl'), 'wb')) aggregate_features(df_train, feats_train, COUNTRY, 'train') aggregate_features(df_valid, feats_valid, COUNTRY, 'valid')
scripts/feature_extract.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Class 4A: Introduction to Programming in Python # # We will begin at 2:00 PM! Until then, feel free to use the chat to socialize, and enjoy the music! # # <img src="images/programming.jpg" width=50% style="margin-left:auto; margin-right:auto"> # # # <div align = "left"> # <br> # <br> # Photo by <a href="https://www.pexels.com/photo/two-women-sitting-on-sofa-holding-laptop-computers-1181268"><NAME></a> from Pexels # </div> # # <div align = "right"> # September 27, 2021 <br> # <NAME> # </div> # + [markdown] slideshow={"slide_type": "slide"} # ## Class Outline: # # - 1st hour - Introduction to Python # - Announcements (2 mins) # - Introduction (3 mins) # - Writing and Running Code (15 min) # - Interpreting Code (15 min) # - Review and Recap (5 min) # # + [markdown] slideshow={"slide_type": "slide"} # ## Learning Objectives # + [markdown] slideshow={"slide_type": "fragment"} # - Look at some lines of code and predict what the output will be. # + [markdown] slideshow={"slide_type": "fragment"} # - Convert an English sentence into code. # + [markdown] slideshow={"slide_type": "fragment"} # - Recognize the order specific lines of code need to be run to get the desired output. # + [markdown] slideshow={"slide_type": "fragment"} # - Imagine how programming can be useful to your life! # + [markdown] slideshow={"slide_type": "slide"} # ## Part 1: Introduction (5 mins) # # <img src="images/bus_stop.jpg" width=60% alt="bus stop" style="margin-left:auto; margin-right:auto"> # + [markdown] slideshow={"slide_type": "slide"} # ## Part 2: Writing and Running code (15 mins) # # <img src="images/bus_stop.jpg" width=60% style="margin-left:auto; margin-right:auto"> # + slideshow={"slide_type": "skip"} # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} tags=[] # ### Using Python to do Math # # | Task | Symbol | # |---------------------------|--------| # | Addition | `+` | # | Subtraction | `-` | # | Multiplication | `*` | # | Division | `/` | # | Square, cube, ... | `**` | # | Squareroot, Cuberoot, ... | `**` | # | Trigonometry (sin,cos,tan)| Later...| # # # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # #### Demo (side-by-side) # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # # Addition # + cell_style="split" slideshow={"slide_type": "fragment"} 4 + 5 +2 # + cell_style="split" slideshow={"slide_type": "fragment"} # Subtraction 4-5 # + cell_style="split" slideshow={"slide_type": "fragment"} # Multiplication 4*5 # + cell_style="split" slideshow={"slide_type": "fragment"} # Division 10/2 # + cell_style="split" slideshow={"slide_type": "fragment"} # Square, cube, ... 2**3 # + cell_style="split" slideshow={"slide_type": "fragment"} # Squareroot, Cuberoot, ... 4**(1/2) # + slideshow={"slide_type": "skip"} # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} tags=[] # ### Assigning *numbers* to variables # # - You can assign numbers to a "variable". # - You can think of a variable as a "container" that represents what you assigned to it # - There are some rules about "valid" names for variables, we'll talk about the details later # - Rule 1: Can't start the name of a variable with a number! # # - General guideline for now: just use a combination of words and numbers # # <br> # <br> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # #### Demo (side-by-side) # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # Two numbers num1 = 4000000000000 num2 = 8 print(num1,num2) # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # Multiply numbers together num1*num2 # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} tags=[] # ### Assigning *words* to variables # # - You can also assign words and sentences to variables! # - Surround anything that's not a number with double quotes " and " # + cell_style="split" slideshow={"slide_type": "fragment"} mysentence = "This is a whole sentence with a list of sports, swimming, tennis, badminton" print(mysentence) # + slideshow={"slide_type": "skip"} # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} tags=[] # ### Using Python to work with words # # Python has some nifty "functions" to work with words and sentences. # # Here's a table summarizing some interesting ones, we'll keep adding to this as the term goes on. # # | Task | Function | # | ---- | -------- | # | Make everything upper-case | `upper()` | # | Make everything lower-case | `lower()` | # | Capitalize first letter of every word | `title()` | # | Count letters or sequences | `count()` | # # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # <br> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # #### Demo (side-by-side) # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] mysentence # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # split on a comma mysentence.split(',') # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # make upper case mysentence.upper() # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # make lower case mysentence.lower() # + cell_style="split" slideshow={"slide_type": "fragment"} # count the times "i" occurs mysentence.count('i') # + cell_style="split" slideshow={"slide_type": "fragment"} tags=[] # count two characters: hi mysentence.count('hi') # - mysentence2 = " Hello. World ..... " mysentence2 mysentence2.strip(' ') mysentence2.replace(' ','^') # + [markdown] slideshow={"slide_type": "slide"} # ## Part 3: Interpreting code (15 mins) # # <img src="images/bus_stop.jpg" width=60% style="margin-left:auto; margin-right:auto"> # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # ### Q1: Interpret Code # # Look at the following code chunk, can you predict what the output will be? # # ``` # some_numbers = [1, 50, 40, 75, 400, 1000] # # for i in some_numbers: # print(i*5) # # ``` # # <br> # <br> # <br> # <br> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **A. Prints 6 random numbers.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **B. Prints the 6 numbers in `some_numbers`.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **C. Prints the 6 numbers in `some_numbers` multiplied by 5.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **D. I don't know.** # + cell_style="split" slideshow={"slide_type": "fragment"} # Let's try it! some_numbers = [1, 50, 40, 75, 400, 1000] for i in some_numbers: print(i*5) # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # ### Q2: Interpret Code # # Look at the following code chunk, can you predict what the output will be? # # ``` # some_numbers = [1, 50, 40, 75, 400, 1000] # # for i in some_numbers: # # if i > 50: # print(i/5) # else: # print(i) # # ``` # # <br> # <br> # <br> # <br> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **A. Prints the 6 numbers in `some_numbers`.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **B. Prints the number in `some_numbers` if it is less than or equal to 50, otherwise prints the number divided by 5.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **C. Prints the number in `some_numbers` if it is greater than 50, otherwise prints the number divided by 5.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **D. I don't know.** # + cell_style="split" slideshow={"slide_type": "fragment"} # Let's try it! some_numbers = [1, 50, 40, 75, 400, 1000] for i in some_numbers: if i > 50: print(i/5) else: print(i) # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # ### Q3: Interpret Code # # Look at the following code chunk, can you predict what the output will be? # # <br> # <br> # # ``` # some_numbers = [1, 50, 40, 75, 400, 1000] # # def process_number(number): # return (number**2)/10 # # for i in some_numbers: # if i > 50: # print(process_number(i)) # ``` # # <br> # <br> # <br> # <br> # <br> # <br> # <br> # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **A. Prints the number in `some_numbers` if it is greater than 50, otherwise prints nothing.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **B. Prints the output of the `process_number()` function applied to `some_numbers`.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **C. Prints the output of the `process_number()` function if the original number is greater than 50, otherwise prints nothing.** # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # **D. I don't know.** # + cell_style="split" slideshow={"slide_type": "fragment"} # Let's try it! some_numbers = [1, 50, 40, 75, 400, 1000] def process_number(number): return (number**2)/10 for i in some_numbers: if i > 50: print(process_number(i)) # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # ### Q4: Order matters! # # Suppose you are asked to complete the following operation: # # > Take the number 5, square it, subtract 2, and then multiply the result by 10 # # Does the order of the operations you do matter? Yes! # + cell_style="split" slideshow={"slide_type": "fragment"} # Let's try it: ((5**2) -2)*10 # + cell_style="split" slideshow={"slide_type": "fragment"} # Here is the same operation as above but in multiple lines number = 5 number = number**2 number = number - 2 number = number * 10 print(number) # + slideshow={"slide_type": "skip"} # + [markdown] cell_style="split" slideshow={"slide_type": "subslide"} # ### Q5: Parson's problem # # A Parson's Problem is one where you are given all the lines of code to solve the problem, but they are jumbled and it's up to you to get the right order. # # A student would like to get this as the final output of some code that they are writing: # # ``` # 3 is smaller than, or equal to 10. # 4 is smaller than, or equal to 10. # 5 is smaller than, or equal to 10. # 6 is smaller than, or equal to 10. # 7 is smaller than, or equal to 10. # 8 is smaller than, or equal to 10. # 9 is smaller than, or equal to 10. # 10 is smaller than, or equal to 10. # 11 is bigger than 10! # 12 is bigger than 10! # 13 is bigger than 10! # 14 is bigger than 10! # 15 is bigger than 10! # ``` # + [markdown] cell_style="split" slideshow={"slide_type": "fragment"} # Here are ALL the lines of code they will need to use, but they are scrambled in the wrong order. Can you produce the desired output? # # *Hint: Pay attention to the indents!* # # ``` # my_numbers = [3,4,5,6,7,8,9,10,11,12,13,14,15] # for i in my_numbers: # if i > 10: # print(i,'is smaller than, or equal to 10.') # else: # print(i,'is bigger than 10!') # ``` # # # + my_numbers = [3,4,5,6,7,8,9,10,11,12,13,14,15] for i in my_numbers: if i > 10: print(i,'is bigger than 10!') else: print(i,'is smaller than, or equal to 10.') # + [markdown] slideshow={"slide_type": "subslide"} # ## Congratulations!! # # - You have just shown that you can program! # # - Over 75% of the course programming content will be focused on details of the things you've seen above: # - Numbers and Strings # - Loops and Conditionals # - Functions # # - If you followed along with most of what we covered, you're in good shape for this course # # <img src="https://media.giphy.com/media/11sBLVxNs7v6WA/giphy.gif" width=80% style="margin-left:auto; margin-right:auto"> # -
notes/week04/Class4A/Class4A.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # # !pip install IPythonDisplayFireworks << how to install on your own notebook! # - from IPythonDisplayFireworks import Fireworks Fireworks() Fireworks() # Run as many times as you want!
try.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="I33uQDtyTrzn" # <a href="https://colab.research.google.com/github/beephsupreme/toki-notebook/blob/main/calc_sockets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + id="4ce1d00c" import math def calc_sockets(): oc = float(input("Enter spacing [ex. 160 or 310]: " )) feet = float(input("Enter length [feet]: ")) sockets = (feet * 0.3048) / (oc / 1000) print("%d feet @ %d spacing has %d sockets." % (feet, oc, math.ceil(sockets))) # + colab={"base_uri": "https://localhost:8080/"} id="24a22e6b" executionInfo={"status": "ok", "timestamp": 1642174353974, "user_tz": 480, "elapsed": 17636, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiOHaSuCko1zwqzZdyOOkt57zmyKEC7Xua1_izptA=s64", "userId": "12351497890495482184"}} outputId="eacd4e87-3dbb-4326-f403-9f0fb57d7972" calc_sockets()
calc_sockets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Sample Notebook for new CI template code # # This is a sample notebook to test new CI template using message exchange. # # ## Description # # This is a new template code for language-agnostic worker samples. # # ## Sample 0: Boilarplate code for calling CI services # # ### Find your server IP address first! IP = '192.168.99.100' BASE = 'http://' + IP + '/v1/' # You need to change this to your service server # + import requests import json def jprint(data): print(json.dumps(data, indent=4)) # Change this to your Docker container's IP HEADERS = {'Content-Type': 'application/json'} # Check API Status res = requests.get(BASE + 'services') print("Here is the list of services supported by this submit agent:") jprint(res.json()) # List of sample file locations small_network = 'https://s3-us-west-2.amazonaws.com/ci-service-data/small.sif' med_network = 'https://s3-us-west-2.amazonaws.com/ci-service-data/yeastHighQuality.sif' # Pre-computed kernel and network (HumanNet) human_net = 'http://chianti.ucsd.edu/~kono/ci/data/HumanNet.v1.symbol.sif' human_net_kernel = 'http://chianti.ucsd.edu/~kono/ci/data/human_net.kernel.symbol.txt' # - # ## Sample1: Find Subnetwork using pre-computed kernel file # # if you already have pre-computed kernel file, you can use it to find sub networks using: # # * URL of Kernel file # * URL of SIF (network) file # # ### _f (kernel, network, query) = subnetwork_ # + # Pre-computed sample kernel and network (HumanNet) stored in UCSD web server (about 0.5 GB) human_net = 'http://chianti.ucsd.edu/~kono/ci/data/HumanNet.v1.symbol.sif' human_net_kernel = 'http://chianti.ucsd.edu/~kono/ci/data/human_net.kernel.symbol.txt' # Specify kernel and SIF file locations base_query = { 'kernel_url': human_net_kernel, 'network_url': human_net, } gene_list = ['BRCA1', 'MAPK1'] base_query['query'] = gene_list res = requests.post(BASE + 'services/kernel', data=json.dumps(base_query), headers=HEADERS) jprint(res.json()) job_id1 = res.json()['job_id'] # - # Check the status of the job res = requests.get(BASE + 'queue') jprint(res.json()) # Get the result (Subnetwork in CX format) result_url = BASE + 'queue/' + job_id1 + '/result' print(result_url) res = requests.get(result_url) #jprint(res.json()) # ## Sample 2: Create kernel from SIF file # # ### _f (network) = kernel_ # + # Specify locations of the SIF files sif_file_locations = [small_network, med_network] kernel_generation_tasks = {} for sif_location in sif_file_locations: body = { 'network_url': sif_location } res = requests.post(BASE + 'services/kernel', json=body, headers=HEADERS) kernel_generation_tasks[sif_location] = res.json()['job_id'] jprint(res.json()) # - # Check the status of the job res = requests.get(BASE + 'queue') jprint(res.json()) # + # Get result job_id_for_small_network = kernel_generation_tasks[small_network] kernel_url = BASE + 'queue/' + job_id_for_small_network + '/result' print(kernel_url) res = requests.get(kernel_url) kernel_info = res.json() jprint(kernel_info) # + # Use the result to find subnetwork base_query = { 'kernel_url': kernel_info['kernel_file'], 'network_url': kernel_info['network'] } gene_list = ["NRAS", "KRAS", "MAPK1"] base_query['query'] = gene_list res = requests.post(BASE + 'services/subnet', data=json.dumps(base_query), headers=HEADERS) find_subnet_job_id = res.json()['job_id'] jprint(res.json()) # - # Check the result result_url = BASE + 'queue/' + find_subnet_job_id + '/result' print(result_url) res = requests.get(result_url) #jprint(res.json()) # ## Sample 3: Get actual Kernel file # For current configuration, kernels will be saved in a server in a restricted zone (= docker container separated from internet zone). To access this, you need to open port. # # __Of course, this may be changed. It depends on deployment target.__ # # + import pandas as pd temp = kernel_info['kernel_file'] parts = temp.split('/') parts[2] = IP + ':3333' # This is the default opened port... kernel_url = '/'.join(parts) print(kernel_url) df = pd.read_csv(kernel_url, sep="\t") df.head()
sample-notebooks/Heat_Diffusion_Example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.7.8 64-bit # language: python # name: python3 # --- import requests from bs4 import BeautifulSoup Site_Link = "https://www.timesjobs.com/candidate/job-search.html?searchType=personalizedSearch&from=submit&txtKeywords=python&txtLocation=" html_file = requests.get(Site_Link) #yes for this you need to be online and it may fail once or twice html_file html_text = html_file.text html_text import pprint pprint.pprint(html_text) # + soup = BeautifulSoup(html_text,features="lxml") soup # - # <h3 class="joblist-comp-name"> # Pure Tech Codex Private Limited # # </h3> Company_Names = soup.find_all('h3', class_ ="joblist-comp-name" ) for Name in Company_Names: print(Name.text) # <ul class="list-job-dtl clearfix"> # # # <li> # <label>Job Description:</label> # Job Description: 2 to 3 Years experience in Python.Expert in Python , with knowledge of at least one Python web framework Flask , Django etc.Expert in server programming.Go... <a href="https://www.timesjobs.com/job-detail/python-pure-tech-codex-private-limited-pune-2-to-3-yrs-jobid-OHwfF0d6EhNzpSvf__PLUS__uAgZw==&amp;source=srp" target="_blank">More Details</a> # </li> # # <li> # <label>KeySkills:</label> # <span class="srp-skills"> # # rest , <strong class="blkclor">python</strong> , database , django , debugging , mongodb # # </span> </li> # # # # <!-- # <li> # <i class="material-icons">location_on</i> # Pune # </li> # --> # # # </ul> Company_Names = soup.find_all('h3', class_ ="joblist-comp-name") # check rest at https://www.youtube.com/watch?v=XVv6mJpFOb0 Job_Desc = soup.find_all('ul' ,class_="list-job-dtl clearfix") for Name , desc in Company_Names, Job_Desc: print(Name.text , desc.li.text) # <li class="clearfix job-bx wht-shd-bx"> # <header class="clearfix"> # <!-- # --> # <!-- --> # <h2> # <a href="https://www.timesjobs.com/job-detail/python-pure-tech-codex-private-limited-pune-2-to-3-yrs-jobid-OHwfF0d6EhNzpSvf__PLUS__uAgZw==&amp;source=srp" target="_blank" onclick="logViewUSBT('view','66840090','rest , python , database , django , debugging , mongodb','Pune','2 - 3','IT Software : Software Products &amp; Services','1' )"> # <strong class="blkclor">Python</strong></a> </h2> # <h3 class="joblist-comp-name"> # Pure Tech Codex Private Limited # </h3> # </header> # <ul class="top-jd-dtl clearfix"> # <li><i class="material-icons">card_travel</i>2 - 3 yrs</li> # <li> # <i class="material-icons">location_on</i> # <span title="Pune">Pune</span> # </li> # </ul> # <ul class="list-job-dtl clearfix"> # <li> # <label>Job Description:</label> # Job Description: 2 to 3 Years experience in Python.Expert in Python , with knowledge of at least one Python web framework Flask , Django etc.Expert in server programming.Go... <a href="https://www.timesjobs.com/job-detail/python-pure-tech-codex-private-limited-pune-2-to-3-yrs-jobid-OHwfF0d6EhNzpSvf__PLUS__uAgZw==&amp;source=srp" target="_blank">More Details</a> # </li> # <li> # <label>KeySkills:</label> # <span class="srp-skills"> # rest , <strong class="blkclor">python</strong> , database , django , debugging , mongodb # </span> </li> # <!-- # <li> # <i class="material-icons">location_on</i> # Pune # </li> # --> # </ul> # <div class="list-job-bt clearfix"> # <div class="list-action"> # <div class="applied-dtl clearfix" id="showPostApplyData_66840090"> # <a href="javascript:callExtJobApply('66840090','adId=OHwfF0d6EhNzpSvf__PLUS__uAgZw==&amp;compName=Career Progress Consultants','TJPFSRP');" onclick="trackClickEvent('View_AND_Apply_SRP','from_srp_externalJobs');logViewUSBT('apply','66840090','rest , python , database , django , debugging , mongodb','Pune','2 - 3','IT Software : Software Products &amp; Services','1')" class="waves-effect waves-light btn">Apply</a> # <span class="jobs-status clearfix"> # <!-- # <i class="material-icons trnding-up" title="Recently posted job, Recruiter is actively looking for candidates">check_circle</i> # # --> # </span> # <span class="sim-posted"> # <span>Posted 6 days ago</span> # </span> # </div> # </div> # </div> # </li> # The one above is full info of 1 job in description, it's put here physically to view the format and pick the items/tags needed # job = soup.find('li', class_="clearfix job-bx wht-shd-bx") #li is list , it's a list which contains all the info about the job described on site job link = job.a print(link) title = job.find('h3' ,class_="joblist-comp-name").text.strip() print(title,type(title)) KeySkills = job.find_all('li') for i in KeySkills: print(i,end="\n!!!!!!!!!!!!!1\n") # + JobDesc = KeySkills[-2]#it's 2nd last KeySkills = KeySkills[-1]#it's last JobDesc , KeySkills # - JobDesc.text.strip() JobDesc = list(JobDesc.text.replace('\n','').replace('\r','').split(':')) Description = JobDesc[-1] Description KeySkills.text.replace(' ','').replace('\n','').replace('\r','') type(KeySkills) temp = list(KeySkills.text.replace(' ','').replace('\n','').replace('\r','').split(':')) temp temp[-1] = list(temp[-1].split(',')) temp print(temp[0], "are :- ") for i in temp[-1]: print(i) Skills_Req = temp[-1] # <span class="sim-posted"> # <span>Posted 6 days ago</span> # </span> # This above is to see when was the job posted Posted = job.find('span' , class_="sim-posted") Posted Posted = Posted.text.replace('\n','').strip() Posted print(f''' Company Name: {title} Skills Required: {Skills_Req} Job Description: {Description} {Posted} ''') # #Now Trying on a bigger lvl # + jobs = soup.find_all('li', class_="clearfix job-bx wht-shd-bx") for j in jobs: print(j,end="\n\n\n") # - j = jobs[0] j def remove_rn(s): return s.replace('\r','').replace('\n','').strip() def remove_rn_lst(lst): for i in range(len(lst)): lst[i] = remove_rn(lst[i]) return lst def check_dated(date): lst_temp = date.split(' ') if lst_temp[1] == "few": return True try: val = int(lst_temp[1]) if val < 11: return True return False except: return False return False # format Posted 6 days ago # Posted few days ago # Posted a month ago # #For the Link! tempJ = jobs[0] print(tempJ.find('h2').a["href"]) for j in jobs: post = remove_rn(j.find('span' , class_="sim-posted").text) if(check_dated(post) is False): continue name = remove_rn(j.find('h3', class_ ="joblist-comp-name").text) temp = j.find_all('li') desc = remove_rn(temp[-2].text).split(':')[-1] skills = remove_rn(temp[-1].text).split(':')[-1].split(',') link = j.find('h2').a["href"] print(f''' Company Name: {name} Skills Required: {remove_rn_lst(skills)} Job Description: {desc[0:len(desc)-len("... More Details")]}. Link: {link} {post} ''')
Static/requests/Testing/Webscraping_live.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Predicting global innovation index using regression models # ## Data loading and preparation import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error, median_absolute_error, r2_score from sklearn.feature_selection import f_regression import seaborn as sns from scipy import stats from sklearn.linear_model import LinearRegression, Lasso, ElasticNet, ElasticNetCV from sklearn.feature_selection import RFE from scipy.stats import mannwhitneyu from sklearn.preprocessing import StandardScaler merged = pd.read_csv('../../data/converted/merged.csv') # Including the global happiness index into the merged dataset # including happiness index happy = pd.read_csv('../../data/converted/happiness_index.csv') happy['year'] = 2019 merged = merged.merge(happy, on=['country_iso', 'year'], how='outer', sort=True) pd.Series(merged.columns) df = merged.copy() # The merged dataset has data for many years. We will only consider data from 2015 to now. For each country and feature, we will consider the last value available. df.drop(df[df.year < 2015].index, inplace = True) df_last = df.groupby('country_iso', as_index=False).last() # calculate feature mean over all years available (using mean to include outlier effects) #df_mean = df.groupby('country_iso', as_index=False).mean() # remove year column df_last.drop(['year'], axis = 1, inplace = True) # We have to limit the prediction to the set of countries for which we have the global index available. df_last.dropna(subset=['gii_innovation_output'], inplace=True) df_last.count() # Some of the features do not have enough data. We will limit our analysis to the features that have values for most of the countries of the global index. to_drop = df_last.columns[df_last.columns.str.contains('creative_svc_')].values to_drop = np.append(to_drop,['book_titles', 'total_hospital_density_per_100k', 'cultural_occupation', 'literacy_rate', 'gii_stem_assessment', 'poverty_ratio']) df_subset = df_last.drop(columns=to_drop) df_subset.count() df_sample = df_subset.dropna(axis=0) df_sample.count() # We are restricting our analysis to a sample of 51 countries, from a universe of 129 (number of countries covered by the Global Innovation Index). This should be enough to prove that the regression model is accurate. target = df_sample[['gii_innovation_output']] feats = df_sample.drop(columns=['gii_innovation_output', 'global_innovation_index', 'country_iso']) X_train, X_test, y_train, y_test = train_test_split(feats, target, test_size=0.20, random_state=42) scaler = StandardScaler() feats_scaled = scaler.fit_transform(feats) X_train = scaler.transform(X_train) X_test = scaler.transform(X_test) # ## Regression Models models = ['random_forest', 'lr', 'lr_lasso', 'lr_rfe'] results = pd.DataFrame(data={'mae': [0,0,0,0], 'r2_score': [0,0,0,0]}, index=models) # ### Random Forest rf_model = RandomForestRegressor(max_depth=5, random_state=0) rf_model.fit(X_train, y_train) y_predict = rf_model.predict(X_test) mean_absolute_error(y_test, y_predict) y_predict_insample = rf_model.predict(X_train) mean_absolute_error(y_train, y_predict_insample) r2_score(y_test, y_predict) r2_score(y_train, y_predict_insample) results.loc['random_forest', 'mae'] = mean_absolute_error(y_test, y_predict) results.loc['random_forest', 'r2_score'] = r2_score(y_test, y_predict) # ### Linear Regression linear = LinearRegression() linear.fit(X_train, y_train) y_predict = linear.predict(X_test) mean_absolute_error(y_test, y_predict) r2_score(y_test, y_predict) feats_nonneg = feats.copy() feats_nonneg['foreign_investment'] = feats_nonneg['foreign_investment'] + abs(feats_nonneg['foreign_investment'].min()) feats_nonneg['gii_rule_of_law'] = feats_nonneg['gii_rule_of_law'] + abs(feats_nonneg['gii_rule_of_law'].min()) feats_nonneg.describe().loc['min',:] scores, pvalues = f_regression(feats, target) scores results.loc['lr', 'mae'] = mean_absolute_error(y_test, y_predict) results.loc['lr', 'r2_score'] = r2_score(y_test, y_predict) # ### Linear Regression using Lasso lr_lasso = Lasso() lr_lasso.fit(X_train, y_train) y_predict = lr_lasso.predict(X_test) mean_absolute_error(y_test, y_predict) r2_score(y_test, y_predict) results.loc['lr_lasso', 'mae'] = mean_absolute_error(y_test, y_predict) results.loc['lr_lasso', 'r2_score'] = r2_score(y_test, y_predict) features = pd.Series(lr_lasso.coef_, index=feats.columns.values) features.sort_values(ascending=False) # ### Linear Regression using RFECV # + import matplotlib.pyplot as plt from sklearn.model_selection import StratifiedKFold from sklearn.feature_selection import RFECV X = feats_scaled y = target lr_rfecv = LinearRegression() rfecv = RFECV(estimator=lr_rfecv, step=1, scoring='r2') rfecv.fit(X, y) print("Optimal number of features : %d" % rfecv.n_features_) # Plot number of features VS. cross-validation scores plt.figure() plt.xlabel("Number of features selected") plt.ylabel("Cross validation score (nb of correct classifications)") plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) plt.show() # - rfecv.grid_scores_ # ### Linear Regression using RFE (recursive feature elimination) X = feats_scaled y = target lr_rfe = LinearRegression() #Initializing RFE model rfe = RFE(lr_rfe) #Transforming data using RFE X_rfe = rfe.fit_transform(X,y) #Fitting the data to model lr_rfe.fit(X_rfe,y) print(rfe.support_) print(rfe.ranking_) #no of features nof_list=np.arange(1,len(feats.columns.values)) high_score=0 #Variable to store the optimum features nof=0 score_list =[] for n in range(len(nof_list)): X_train, X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state = 0) model = LinearRegression() rfe = RFE(lr_rfe,nof_list[n]) X_train_rfe = rfe.fit_transform(X_train,y_train) X_test_rfe = rfe.transform(X_test) lr_rfe.fit(X_train_rfe,y_train) score = lr_rfe.score(X_test_rfe,y_test) score_list.append(score) if(score>high_score): high_score = score nof = nof_list[n] print("Optimum number of features: %d" %nof) print("Score with %d features: %f" % (nof, high_score)) cols = list(feats.columns) lr_rfe_final = LinearRegression() #Initializing RFE model rfe = RFE(lr_rfe_final, 2) #Transforming data using RFE X_rfe = rfe.fit_transform(X,y) #Fitting the data to model lr_rfe_final.fit(X_rfe,y) temp = pd.Series(rfe.support_,index = cols) selected_features_rfe = temp[temp==True].index print(selected_features_rfe) lr_rfe_final.coef_ rfe.support_ pd.Series(lr_rfe_final.coef_[0], index=selected_features_rfe).apply(abs).sort_values(ascending=False) selected_features_rfe predict = lr_rfe_final.predict(pd.DataFrame(X, columns=feats.columns)[selected_features_rfe]) mean_absolute_error(y, predict) predict_test = lr_rfe_final.predict(pd.DataFrame(X_test, columns=feats.columns)[selected_features_rfe]) mean_absolute_error(y_test, predict_test) r2_score(y_test, predict_test) results.loc['lr_rfe', 'mae'] = mean_absolute_error(y_test, predict_test) results.loc['lr_rfe', 'r2_score'] = r2_score(y_test, predict_test) # ### Selecting best model results # Our two best performing models are the Lasso linear model and the linear regression using RFE. Let's take a look at the used features and its coefficients for the first one: coefs = pd.Series(lr_lasso.coef_, index=feats.columns) coefs.drop(coefs[coefs==0.0].index, inplace=True) coefs.sort_values(ascending=False) # The only thing that can be highlighted here is that the ease of business has a negative effect on the innovation index. Besides that, the used features are a mix among creativity, economic and educational factors. Lets take a look at the second model, the linear regression using RFE: coefs = pd.Series(lr_rfe_final.coef_[0], index=selected_features_rfe) coefs.drop(coefs[coefs==0.0].index, inplace=True) coefs.sort_values(ascending=False) # That is surprising. The linear regression model was able to predict, with a reasonable accuracy, using only two factors: number of patent families and broadband subscriptions. Let's predict for all countries and check the differences: df_prediction = df_sample[['country_iso', 'gii_innovation_output']] rfe_predict = lr_rfe_final.predict(X_rfe) df_prediction['rfe_prediction'] = rfe_predict lasso_predict = lr_lasso.predict(feats_scaled) df_prediction['lasso_prediction'] = lasso_predict df_prediction.plot.scatter('gii_innovation_output', 'rfe_prediction') df_prediction.plot.scatter('gii_innovation_output', 'lasso_prediction') df_prediction.to_csv('lr_predictions.csv') mean_absolute_error(df_prediction.gii_innovation_output, df_prediction.lasso_prediction) mean_absolute_error(df_prediction.gii_innovation_output, df_prediction.rfe_prediction) # Looking at the scatter plots and the final Mean Absolute Error (considering all countries), we can see that the first model, Lasso, performs a lot better. df_prediction.to_csv('linear_regression.csv') # ## Conclusions # We could find a linear regression model which could predict a country's innovation index with reasonable accuracy. However, the set of features used to predict does not show any special insight: they are a combination of economic, creativity and educational factors. # # It is possible to predict the index using a very smaller set of features, particularly the number of patent families and broadband subscriptions, but the prediction is less accurate than the one using a more, mixed factors.
data_analysis/supervised/linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # name: python3 # --- # %matplotlib inline import numpy as np np.random.seed(0) import pandas as pd import matplotlib.pyplot as plt import gym import tensorflow.compat.v2 as tf tf.random.set_seed(0) from tensorflow import keras # # 函数近似方法 # ## 使用环境:小车上山MountainCar-v0 # # * 一个小车在一定范围内行驶,在任意时刻,从水平方向看,小车的位置是[-1.2,0.6],速度范围[-0.07,0.07] # * 每个时刻,智能体可以对小车施加3种动作中的一种:向左施力,不施力,向右施力。智能体的施力和小车的水平位置会共同决定下一时刻小车速度 # * 当某时刻小车的水平位置大于0.5时,控制目标达成,回合结束。控制的目标是让小车以尽可能少的步骤叨叨目标。 env = gym.make("MountainCar-v0") env.seed(0) print("观测空间 = {}".format(env.observation_space)) print("动作空间 = {}".format(env.action_space)) print("位置范围 = {}".format((env.unwrapped.min_position, env.unwrapped.max_position))) print("速度范围 = {}".format((-env.unwrapped.max_speed, env.unwrapped.max_speed))) print("目标位置 = {}".format(env.unwrapped.goal_position)) # 在绝大多数情况下,智能体简单向右的施力不足以让小车越过目标。 # + positions, velocities = [], [] observation = env.reset() while True: positions.append(observation[0]) velocities.append(observation[1]) next_observation, reward, done, _ = env.step(2) if done: break observation = next_observation if next_observation[0] > 0.5: print("成功到达") else: print("失败退出") # 绘制位置和速度图像 fig, ax = plt.subplots() ax.plot(positions, label='position') ax.plot(velocities, label='velocity') ax.legend() # - env.close() # ## 函数近似原理 # * 评估状态价值:用一个参数为$\bf{w}$的函数$v(s;\bold{w})$来近似 # # * 评估动作价值:用一个参数为$\bf{w}$的函数$q(s,a;\bold{w})$来近似 # # 动作集$A$有限时,还可以用矢量函数$q(s;\bold{w}) = \left( q(s,\bold{w}):a \in A \right)$来近似。每个元素对应一个动作,整个矢量函数除了参数外只用状态做输入。 # # * 形式不限,但是需要实现更新,学习过程中只更新$\bold{w}$。更新参数的方法既可用于策略价值评估,也可用于最优策略求解 # # ### 随机梯度下降 # # * 同策回合更新价值估计与函数近似方法相结合,可以得到函数近似回合更新价值估计算法。与之前的区别在于价值更新时更新的对象是函数参数,而非价值估计 # # #### 算法1:随机梯度下降函数近似评估策略价值 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 采样:用环境和策略$\pi$生成轨迹样本 # 2. 初始化回报:$G \leftarrow 0$ # 3. 逐步更新:对$t = T-1,T-2,\dots,0$,执行以下步骤: # 1. 更新回报:$G \leftarrow \gamma G +R_{t+1}$ # 2. 更新价值:若评估动作价值则更新$\bold{w}$以减小$[G - q(S_t,A_t;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[G - q(S_t,A_t;\bold{w})]\nabla q(S_t,A_t;\bold{w})$);若评估状态价值则更新$\bold{w}$以减小$[G - v(S_t;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[G - v(S_t;\bold{w})]\nabla v(S_t;\bold{w})$) # # 【**随机梯度下降**】若采用以上算法评估动作价值,则更新参数时应当试图减小每一步的回报估计$G_t$和动作价值估计$q(S_t,A_t;\bold{w})$的差别。定义每一步的损失为$[G - q(S_t,A_t;\bold{w})]^2$,整个回合的损失为$\sum_{t=0}^{T-1}[G - q(S_t,A_t;\bold{w})]^2$。若沿着$\sum_{t=0}^{T-1}[G - q(S_t,A_t;\bold{w})]^2$对$\bold{w}$的梯度反方向更新策略参数$$\bold{w}$$,就有机会减小损失。这样的方法称为**随机梯度下降** # $$ # \bold{w} \leftarrow \bold{w} - \frac{1}{2} \alpha_t \nabla[G - q(S_t,A_t;\bold{w})]^2 = \bold{w} + \alpha_t [G - q(S_t,A_t;\bold{w})]\nabla q(S_t,A_t;\bold{w}) # $$ # 可利用支持自动梯度计算的软件包。状态价值类似。 # # 将策略改进引入随机梯度下降评估策略,即可实现随机梯度下降最优策略求解。 # # #### 算法2:随机梯度下降求最优策略 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 采样:用环境和当前动作价值估计导出的策略生成轨迹样本 # 2. 初始化回报:$G \leftarrow 0$ # 3. 逐步更新:对$t = T-1,T-2,\dots,0$,执行以下步骤: # 1. 更新回报:$G \leftarrow \gamma G +R_{t+1}$ # 2. 更新价值:若评估动作价值则更新$\bold{w}$以减小$[G - q(S_t,A_t;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[G - q(S_t,A_t;\bold{w})]\nabla q(S_t,A_t;\bold{w})$);若评估状态价值则更新$\bold{w}$以减小$[G - v(S_t;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[G - v(S_t;\bold{w})]\nabla v(S_t;\bold{w})$) # ### 半梯度下降 # # * 对于单步更新时序差分估计的动作价值函数:$U_t = R_{t+1} +\gamma q(S_{t+1},A_{t+1};\bold{w})$ # * 动作价值的估计:$q(S_t,A_t;\bold{w})$ # * 定义单步损失:$[U_t - q(S_t,A_t,\bold{w})]^2$;整个回合的损失:$\sum_{t=0}^{T-1} [U_t - q(S_t,A_t,\bold{w})]^2$ # # * 更新参数$\bold{w}$以减小损失时,不对回报的估计$U_t = R_{t+1} +\gamma q(S_{t+1},A_{t+1};\bold{w})$求梯度,支队动作价值估计求梯度,这就是**半梯度下降** # # #### 算法3:半梯度下降算法估计动作价值或SARSA算法求最优策略 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 若为策略评估,则根据输入策略确定动作$A$;若为寻找最优策略,则用当前动作价值估计导出的策略确定动作$A$ # 2. 若回合未结束,执行以下操作: # 1. 采样:执行动作$A$,观测得到奖励$R$和状态$S'$ # 2. 若为策略评估,则根据输入策略确定动作$A'$;若为寻找最优策略,则用当前动作价值估计导出的策略确定动作$A'$ # 3. 计算回报的估计值:$U \leftarrow R +\gamma q(S',A';\bold{w})$ # 4. 更新动作价值函数:更新参数$\bold{w}$以减小$[U - q(S,A;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[U - q(S_t,A_t;\bold{w})]\nabla q(S_t,A_t;\bold{w})$)。<u>此步不可重复计算$U$</u> # 5. $S \leftarrow S', A \leftarrow A'$ # # #### 算法4:半梯度下降算法估计状态价值或期望SARSA算法或Q学习 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 初始化状态:选择$S$ # 2. 若回合未结束,执行以下操作: # 1. 若为策略评估,则根据输入策略确定动作$A$;若为寻找最优策略,则用当前动作价值估计导出的策略确定动作$A$ # 2. 采样:执行动作$A$,观测得到奖励$R$和状态$S'$ # 3. 计算回报的估计值 # * 若是状态价值评估,则$U \leftarrow R+\gamma v(S';\bold{w})$ # * 若是期望SARSA算法,则$U \leftarrow R+\gamma \sum_a \pi(a|S';\bold{w})q(S',a;\bold{w})$ # * 若是Q学习,则$U \leftarrow R +\gamma \max_a q(S',a;\bold{w})$ # 4. 更新动作价值函数 # * 若是状态价值评估则更新$\bold{w}$以减小$[U - v(s;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[U - v(S;\bold{w})]\nabla v(S;\bold{w}) $) # * 若是期望SARSA算法或Q学习则更新参数$\bold{w}$以减小$[U - q(S,A;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[U - q(S_t,A_t;\bold{w})]\nabla q(S_t,A_t;\bold{w})$)。<u>此步不可重复计算$U$</u> # 5. $S \leftarrow S'$ # # 若采用自动计算微分并更新参数的包来减小损失,则务必注意不能对回报的估计求梯度。有些软件包可以组织计算过程中梯度的传播,也可以在计算回报估计的表达式时使用阻止梯度传播的功能。还有一种办法是复制一份参数$\bold w_{目标} = \bold{w}$,在计算回报估计的表达式时用这份复制后的参数$\bold{w}_{目标}$来计算回报估计 # ### 带资格迹的半梯度下降 # # 资格迹参数$\bold{z}$和价值参数$\bold{w}$具有相同的形状大小,且元素逐一对应。资格迹参数中的每个元素表示了在更新价值参数对应元素时应当使用的权重乘以价值估计对该分量的梯度。 # # * 更新动作价值:$\bold{w} \leftarrow \bold{w} + \alpha[U - q(S_t,A_t;\bold{w})]$ # * 更新状态价值:$\bold{w} \leftarrow \bold{w} + \alpha[U - v(S_t;\bold{w})]$ # # 当选取资格迹为累积迹时,资格迹的递推定义式如下: # # * $t = 0$时$\bold{z}_0 = 0$ # * $t > 0$时: # * 动作价值:$\bold{z}_t = \gamma \lambda \bold{z}_{t-1} +\nabla q(S_t,A_t;\bold{w})$ # * 状态价值:$\bold{z}_t = \gamma \lambda \bold{z}_{t-1} +\nabla v(S_t;\bold{w})$ # # 递推式第一项:对前一次更新时使用的资格迹衰减而来,可以改变$\lambda$的值以改变衰减速度 # # 递推式第二项:加强项,由动作价值的梯度决定,事实上确定了价值参数对总体价值估计的影响。 # # 对总体价值估计影响大的那些价值参数分量是当前比较重要的分量,应当加强其资格迹。由于梯度可正可负可为0,因此更新后的资格迹可能为负数,对应价值参数分量的权重值就是负数。因此面对相同的时序差分误差,可能某些分量增加而某些分量减小 # # #### 算法5:TD($\lambda$)算法估计动作价值或SARSA($\lambda$)算法 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 初始化状态动作对:选择状态$S$ # * 若是策略评估,则根据输入策略确定动作$A$ # * 若是寻找最优策略,则用当前动作价值估计导出的策略确定动作$A$ # 2. 若回合未结束,执行以下操作: # 1. 采样:执行动作$A$,观测得到奖励$R$和状态$S'$ # 2. 若为策略评估,则根据输入策略确定动作$A'$;若为寻找最优策略,则用当前动作价值估计导出的策略确定动作$A'$ # 3. 计算回报的估计值:$U \leftarrow R +\gamma q(S',A';\bold{w})$ # 4. 更新资格迹:$\bold{z} \leftarrow \gamma\lambda\bold{z} +\nabla q(S,A;\bold{w})$ # 5. 更新动作价值函数:$\bold{w} \leftarrow \bold{w} + \alpha[U - q(S,A;\bold{w})]\bold{z}$ # 6. $S \leftarrow S', A \leftarrow A'$ # # #### 算法6:TD($\lambda$)算法估计状态价值或期望SARSA($\lambda$)算法或Q学习 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 初始化资格迹:$\bold{z} \leftarrow 0$ # 2. 初始化状态:选择状态$S$ # 3. 若回合未结束,执行以下操作: # 1. 若是策略评估,则根据输入策略确定动作$A$;若是寻找最优策略,则用当前动作价值估计导出的策略确定动作$A$ # 2. 采样:执行动作$A$,观测得到奖励$R$和状态$S'$ # 3. 计算回报的估计值 # * 若是状态价值评估,则$U \leftarrow R+\gamma v(S';\bold{w})$ # * 若是期望SARSA算法,则$U \leftarrow R+\gamma \sum_a \pi(a|S';\bold{w})q(S',a;\bold{w})$ # * 若是Q学习,则$U \leftarrow R +\gamma \max_a q(S',a;\bold{w})$ # 4. 更新资格迹 # * 若是状态价值评估则$\bold{z} \leftarrow \gamma \lambda\bold{z} +\nabla v(S;\bold{w})$ # * 若是期望SARSA算法或Q学习则$\bold{z} \leftarrow \gamma \lambda\bold{z} +\nabla q(S,A;\bold{w})$ # 5. 更新动作价值函数 # * 若是状态价值评估则更新$\bold{w}$以减小$[U - v(s;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[U - v(S;\bold{w})]\bold{z} $) # * 若是期望SARSA算法或Q学习则更新参数$\bold{w}$以减小$[U - q(S,A;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[U - q(S_t,A_t;\bold{w})]bold{z}$)。<u>此步不可重复计算$U$</u> # 6. $S \leftarrow S'$ # ## 线性近似 # 【**线性近似**】用许多特征向量的线性组合来近似价值函数。特征向量则依赖于输入(状态或状态动作对) # # > 以动作价值为例,可为每个状态动作对定义多个不同的特征$\bold{x}(s,a) = (x_j(s,a):j\in J)$,进而定义近似函数为这些特征的线性组合,即: # > $$ # > q(s,a;\bold{w}) = [\bold{x}(s,a)]^T\bold{w} = \sum_{j \in J}x_j(s,a)w_j # > $$ # > 对于状态函数: # > $$ # > v(s;\bold{w}) = [\bold{x}(s)]^T\bold{w} = \sum_{j \in J}x_j(s)w_j # > $$ # # ### 精确查表与线性近似的关系 # # 3~5章的查表法可看作线性近似的特例。对于动作价值而言,可以认为有$|S|\times|A|$个特征向量,只在某个状态动作对出为1,其余全为0。此时,所有向量的线性组合就是整个动作价值函数,线性组合系数的值就是动作价值函数的值。 # # ### 线性最小二乘策略评估 # # 线性最小二乘是一种批处理(batch)的方法,它每次针对多个经验样本,试图找到整个样本集上的最优估计 # # 【**线性最小二乘回合更新**】(LSMC)其试图最小化 # $$ # L(\bold{w}) = \sum_t [G_t - q(S,A;\bold{W})]^2 # $$ # 在线性近似的情形下,其梯度为 # $$ # \begin{aligned} # &\sum_t [G_t - q(S_t,A_t;\bold{w})] \nabla q(S_t,A_t;\bold{w}) \\ # &=\sum_t [G_t - (\bold{x}(S_t,A_t))^T \bold{w}]\bold{x}(S_t,A_t)\\ # &=\sum_tG_t\bold{x}(S_t,A_t) - \sum_t \bold{x}(S_t,A_t)(\bold{x}(S_t,A_t))^T\bold{w} # \end{aligned} # $$ # 将待求权重$\bold{w}_{LSMC}$代入并令上式为0,则: # $$ # \sum_tG_t\bold{x}(S_t,A_t) - \sum_t \bold{x}(S_t,A_t)(\bold{x}(S_t,A_t))^T\bold{w}_{LSMC} =0 # $$ # 得: # $$ # \bold{w}_{LSMC} = \left(\sum_t \bold{x}(S_t,A_t)(\bold{x}(S_t,A_t))^T\right)^{-1}\sum_tG_t\bold{x}(S_t,A_t) # $$ # 由此得到线性最小二乘回合更新的计算式子。可直接利用上式更新权重 # # 【**线性最小二乘时序差分更新**】(LSTD)其试图最小化 # $$ # L(\bold{w}) = \sum_t [U_t - q(S,A;\bold{W})]^2 # $$ # 求其半梯度,并令其等于0,得: # $$ # \bold{w}_{LSTD} = \left(\sum_t \bold{x}(S_t,A_t)(\bold{x}(S_t,A_t)-\gamma\bold{x}(S_{t+1},A_{t+1}))^T\right)^{-1}\sum_tR_{t+1}\bold{x}(S_t,A_t) # $$ # # ### 线性最小二乘最优策略求解 # # Q学习中,回报的估计为$U_t = R_{t+1} +\gamma \max_{a\in A(S_{t+1})} q(S_{t+1},a;\bold{w})$ # # 相比于时序差分:将$A_{t+1}$换位$A_{t+1}^* = \mathop{\arg\max}_a q(S_{t+1},a;\bold{w})$ # # 因此: # $$ # \bold{w}_{LSTDQ} = \left(\sum_t \bold{x}(S_t,A_t)(\bold{x}(S_t,A_t)-\gamma\bold{x}(S_{t+1},A_{t+1}^*))^T\right)^{-1}\sum_tR_{t+1}\bold{x}(S_t,A_t) # $$ # 求解上述最小二乘解,可得到最优价值函数的估计,进而得到最优策略的更新。据此策略反复迭代即可得到线性最小二乘Q学习 # # #### 算法7:线性最小二乘Q学习算法求解最优策略 # # 【输入】许多经验 # # 【输出】最优动作价值估计和确定性最优策略估计 # # 1. 初始化:$\bold{w} \leftarrow any$,用最优价值函数确定贪心策略$\pi$ # 2. 迭代更新:迭代进行以下操作: # 1. 更新价值:$\bold{w}' \leftarrow \left(\sum_t \bold{x}(S_t,A_t)(\bold{x}(S_t,A_t)-\gamma\bold{x}(S_{t+1},A_{t+1}^*))^T\right)^{-1}\sum_tR_{t+1}\bold{x}(S_t,A_t)$,其中$A_{t+1}^*$是由确定性策略$\pi$决定的在状态$S_{t+1}$的动作 # 2. 策略改进:据最优价值函数决定策略$\pi$ # 3. 任意达到迭代终止条件(如$\bold{w}$和$\bold{w}'$足够接近,或$\pi$和$\pi'$足够接近),则终止迭代;否则更新二者,进行下一轮迭代 # ### 独热编码和砖瓦编码 # # 在小车上山问题中,位置和速度都是连续变量,要从连续空间中导出数目有限的特征: # 【**独热编码**】在二维的位置-速度空间中,将其划分成若干小格。位置轴范围总长是$l_{位置}$,每个小格的宽度为$\delta_{位置}$,共有$b_{位置} = l_{位置} / \delta_{位置}$个小格。同理有$l_{速度}$、$\delta_{速度}$以及$b_{速度}$。由此整个空间有$b_{位置}b_{速度}$个小格。每个小格对应一个特征,当位置速度对位于某个小格时,那个小格对应的位置特征为1,其余小格对应的特征均为0。由此提取出$b_{位置}b_{速度}$个特征。 # # 采用独热编码后的价值函数,对于同一小格内的所有位置速度对,其价值函数估计相同,因此只是一种近似。若想让近似更精确,则需增大特征数目 # # 【**砖瓦编码**】在精度相同情况下减少特征数目。在独热编码基础上引入多层大网格。采用$m$层砖瓦编码,每层的大网格都是原来独热编码小格的$m$位长,$m$位宽。在相邻两层之间,在两个维度上都偏移一个独热编码的小格,对于任意的位置速度对,它在每一层都会落到某个大网格中。这样,我们可以让每层中大网格对应的特征为1,其他特征为0.综合考虑所有层,总共大致有$b_{位置}b_{速度}/m$个特征,特征数大大减小。 # 线性最优策略求解,砖瓦编码 class TileCoder: def __init__(self, layers, features): self.layers = layers self.features = features self.codebook = {} def get_feature(self, codeword): if codeword in self.codebook: return self.codebook[codeword] count = len(self.codebook) if count >= self.features: # 冲突处理 return hash(codeword) % self.features self.codebook[codeword] = count return count def __call__(self,floats=(), ints=()): dim = len(floats) scaled_floats = tuple(f * self.layers * self.layers for f in floats) features = [] for layer in range(self.layers): codeword = (layer,) + tuple(int((f + (1 + dim * i) * layer ) / self.layers) for i, f in enumerate(scaled_floats)) + ints feature = self.get_feature(codeword) features.append(feature) return features class SARSAAgent: def __init__(self, env, layers=8, features=1893, gamma=1., lr = 0.03, epsilon=0.001): self.action_n = env.action_space.n # 动作数 self.obs_low = env.observation_space.low self.obs_scale = env.observation_space.high - \ env.observation_space.low # 观测空间范围 self.encoder = TileCoder(layers, features) # 砖瓦编码器 self.w = np.zeros(features) # 权重 self.gamma = gamma # 折扣 self.lr = lr # 学习率 self.epsilon = epsilon # 探索 def encode(self, observation, action): # 编码 states = tuple((observation - self.obs_low) / self.obs_scale) actions = (action, ) return self.encoder(states, actions) def get_q(self, observation, action): # 动作价值 features = self.encode(observation, action) return self.w[features].sum() def decide(self, observation): if np.random.rand() < self.epsilon: return np.random.randint(self.action_n) else: qs = [self.get_q(observation, action) for action in range(self.action_n)] return np.argmax(qs) def learn(self, observation, action, reward, next_observation, done, next_action): u = reward + (1 - done) * self.gamma * \ self.get_q(next_observation,next_action) td_error = u - self.get_q(observation, action) features = self.encode(observation, action) self.w[features] += self.lr * td_error def play_sarsa(env, agent, train=False, render=False): episode_reward = 0 observation = env.reset() action = agent.decide(observation) while True: if render: env.render() next_observation, reward, done, _ = env.step(action) episode_reward += reward next_action = agent.decide(next_observation) # 终止状态时此步无意义 if train: agent.learn(observation, action, reward, next_observation, done, next_action) if done: break observation, action = next_observation, next_action return episode_reward # + agent = SARSAAgent(env) # 训练 episodes = 400 episode_rewards = [] for episode in range(episodes): episode_reward = play_sarsa(env, agent, train=True) episode_rewards.append(episode_reward) plt.plot(episode_rewards) # 测试 agent.epsilon = 0. # 取消探索 episode_rewards = [play_sarsa(env, agent) for _ in range(100)] print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards), len(episode_rewards), np.mean(episode_rewards))) # - class SARSALambdaAgent(SARSAAgent): def __init__(self, env, layers=8, features=1893, gamma=1, lr=0.03, epsilon=0.001, lambd=0.9): super().__init__(env,layers,features,gamma,lr,epsilon) self.lambd = lambd self.z = np.zeros(features) # 初始化资格迹 def learn(self, observation, action, reward, next_observation, done, next_action): u = reward if not done: u += self.gamma * self.get_q(next_observation, next_action) self.z *= self.gamma * self.lambd features = self.encode(observation, action) self.z[features] = 1 # 替换迹 td_error = u - self.get_q(observation, action) self.w += self.lr * td_error * self.z if done: self.z = np.zeros_like(self.z) # + agent = SARSALambdaAgent(env) # 训练 episodes = 140 episode_rewards = [] for episode in range(episodes): episode_reward = play_sarsa(env, agent, train=True) episode_rewards.append(episode_reward) plt.plot(episode_rewards) # 测试 agent.epsilon = 0. # 取消探索 episode_rewards = [play_sarsa(env, agent) for _ in range(100)] print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards), len(episode_rewards), np.mean(episode_rewards))) # + poses = np.linspace(env.unwrapped.min_position, env.unwrapped.max_position, 128) vels = np.linspace(-env.unwrapped.max_speed, env.unwrapped.max_speed, 128) positions, velocities = np.meshgrid(poses, vels) # 绘制动作价值估计 @np.vectorize def get_q(position, velocity, action): return agent.get_q((position, velocity), action) q_values = np.empty((len(poses), len(vels), 3)) for action in range(3): q_values[:, :, action] = get_q(positions, velocities, action) fig, axes = plt.subplots(1, 3, figsize=(15, 4)) for action, ax in enumerate(axes): c = ax.pcolormesh(positions, velocities, q_values[:, :, action], shading='auto') ax.set_xlabel('position') ax.set_ylabel('velocity') fig.colorbar(c, ax=ax) ax.set_title('action = {}'.format(action)) # 绘制状态价值估计 v_values = q_values.max(axis=-1) fig, ax = plt.subplots(1, 1) c = ax.pcolormesh(positions, velocities, v_values, shading='auto') ax.set_xlabel('position') ax.set_ylabel('velocity') fig.colorbar(c, ax=ax); # + # 绘制动作估计 @np.vectorize def decide(position, velocity): return agent.decide((position, velocity)) q_values = np.empty((len(poses), len(vels), 3)) action_values = decide(positions, velocities) fig, ax = plt.subplots() c = ax.pcolormesh(positions, velocities, action_values, shading='auto') ax.set_xlabel('position') ax.set_ylabel('velocity') fig.colorbar(c, ax=ax, boundaries=[-.5, .5, 1.5, 2.5], ticks=[0, 1, 2]); # - # ## 函数近似的收敛性 # 线性近似具有简单的线性叠加结构,这使得线性近似可以获得额外的收敛性;对于函数近似算法,收敛性往往只在采用梯度下降的回合更新时有保证,而在采用半梯度下降的时序差分方法时是没有保证的。各种收敛情况在下列表中给出,其中查表法是指不采用函数近似的方法;所有的收敛性都是在学习率满足 Robbins-Monro 序列下才具有的,且一般都可以通过验证随机近似 Robbins-Monro 算法的条件证明,对于最优策略求解的收敛性证明,则需要用到了其随机优化的版本。 # # | 学习方法 | | 查表法 | 线性近似 | 非线性近似 | # | -------- | ------------------------ | ------ | ---------- | ---------- | # | 同策 | 回合更新 | 收敛 | 收敛 | 收敛 | # | | 线性最小二乘回合更新 | 收敛 | 收敛 | 不适用 | # | | 时序差分更新 | 收敛 | 收敛 | 不一定收敛 | # | | 线性最小二乘时序差分更新 | 收敛 | 收敛 | 不适用 | # | 异策 | 回合更新 | 收敛 | 收敛 | 收敛 | # | | 线性最小二乘回合更新 | 收敛 | 收敛 | 不适用 | # | | 时序差分更新 | 收敛 | 不一定收敛 | 不一定收敛 | # | | 线性最小二乘时序差分更新 | 收敛 | 收敛 | 不适用 | # # | 学习方法 | 查表法 | 线性近似 | 非线性近似 | # | ---------------- | ------ | ---------------------- | ---------- | # | 回合更新 | 收敛 | 收敛或在最优解附近摆动 | 不一定收敛 | # | SARSA | 收敛 | 收敛或在最优解附近摆动 | 不一定收敛 | # | Q 学习 | 收敛 | 不一定收敛 | 不一定收敛 | # | 最小二乘迭代更新 | 收敛 | 收敛或在最优解附近摆动 | 不适用 | # # 值得一提的是,对于异策 Q 学习,即使采用了线性近似,仍然不能保证收敛。研究人员发现,只要异策、自益、函数近似这三者同时出现,就不能保证收敛性,但有一个著名的反例叫做 Baird 反例(Baird's counterexample)。 # ## 深度Q学习 # * 深度学习与强化学习相结合,是第一个深度强化学习算法 # * 核心:用一个人工神经网络$q(s,a;\bold{w})$来代替动作价值函数。由于神经网络具有强大的表达能力,能够自动寻找特征,因此有很大的潜力 # # * 同时出现异策、自益、函数近似时无法保证收敛性,会出现训练不稳定或训练困难的情况,目前主要的改进: # 1. **经验回放**:将经验(即历史的状态、动作、奖励等)存储起来,再在存储的经验中按一定规则采样 # 2. **目标网络**:修改网络的更新方式,例如不把学习到的网络权重马上用于后续的自益过程 # ### 经验回放 # # 采用批处理的模式能提高稳定性,经验回放就是一种让经验概率分布变得稳定的技术,它可以提高训练的稳定性 # # 【**关键步骤**】 # # * 存储:将轨迹以$(S_t,A_t,R_{t+1},S_{t+1})$的形式存储起来 # * 采样回放:使用某种规则从存储的$(S_t,A_t,R_{t+1},S_{t+1})$中随机取出一条或多条经验 # # #### 算法8:带经验回放的Q学习最优策略求解 # # 1. 初始化:任意初始化参数$\bold{w}$ # 2. 逐回合执行以下操作: # 1. 初始化状态:选择状态$S$ # 2. 若回合未结束,执行以下操作: # 1. 采样:根据$q(S,\cdot;\bold{w})$选择动作$A$并执行,观测得到奖励$R$和新状态$S'$ # 2. 存储:将经验$(S,A,R,S')$存入经验库中 # 3. 回放:从经验库中选取经验$(S_i,A_i,R_i,S_i')$ # 4. 计算回报的估计值:$U_i \leftarrow R_i + \gamma \max_a q(S'_i,)$ # 5. 更新动作价值函数:更新$\bold{w}$以减小$[U_i - q(S_i,A_i;\bold{w})]^2$(如$\bold{w} \leftarrow \bold{w} + \alpha[U_i - q(S_i,A_i;\bold{w})]\nabla q(S_i,A_i;\bold{w}) $) # 6. $S \leftarrow S'$ # # 【**好处**】 # # 1. 训练Q网络时可消除数据的关联,使得数据更像是独立同分布,以减小参数更新的方差,加速收敛 # 2. 能够重复使用经验,对于数据获取困难的情况尤其适用 # # 【**分类**】 # # 1. 从存储角度: # # * **集中式回放**:智能体在一个环境中运行,将经验统一存储于经验池中 # * **分布式回放**:智能体的多份拷贝同时在多个环境中运行,并将经验统一存储于经验池中。由于多个智能体拷贝同时生成经验,所以能够在使用更多资源的同时更快地收集经验 # # 2. 从采样角度: # # * **均匀回放**:等概率从经验集中选取经验,并用取得的经验更新最优价值函数 # # * **优先回放**(PER):为经验池里的每个经验指定一个优先级,在选取经验时更倾向于选择优先级高的经验 # # 一般做法:若某个经验$i$的优先级为$p_i$,则选取该经验的概率为$\frac{p_i}{\sum_kp_k}$ # # * <u>成比例优先</u>:第$i$个经验的优先级为 # $$ # p_i = (\delta_i +\varepsilon)^\alpha # $$ # 其中$\delta_i = U_t - q(S_t,A_t;\bold{w})$或$\delta_i = U_t - v(S_t;\bold{w})$为时序差分误差,$\varepsilon$是预先设定的小正数,$\alpha$为正参数 # # * <u>基于排序优先</u>:第$i$个经验的优先级为 # $$ # p_i = \left(\frac{1}{\mathrm{rank}_i}\right)^\alpha # $$ # 其中$\mathrm{rank}_i$是第$i$个经验从大到小的排名,从1开始 # # 分布式经验回放和优先回放结合:分布式优先经验回放 # # 【**缺点**】会导致回合更新和多步学习算法无法使用。一般情况下与Q学习结合时可规避此缺点。 # + # 用于画图的类 # %matplotlib notebook class Chart: def __init__(self): self.fig, self.ax = plt.subplots(1, 1) # plt.ion() def plot(self, episode_rewards): self.ax.clear() self.ax.plot(episode_rewards) self.ax.set_xlabel('iteration') self.ax.set_ylabel('episode reward') self.fig.canvas.draw() # - # 经验回放 class DQNReplayer: def __init__(self, capacity): self.memory = pd.DataFrame(index=range(capacity), columns=['observation','action','reward','next_observation','done']) self.i = 0 self.count = 0 self.capacity = capacity def store(self, *args): self.memory.loc[self.i] = args self.i = (self.i + 1) % self.capacity self.count = min(self.count + 1, self.capacity) def sample(self, size): indices = np.random.choice(self.count, size=size) return (np.stack(self.memory.loc[indices, field]) for field in self.memory.columns) # DQN class DQNAgent: def __init__(self, env, net_kwargs={}, gamma=0.09, epsilon=0.001, replayer_capacity=10000, batch_size=64): observation_dim = env.observation_space.shape[0] self.action_n = env.action_space.n self.gamma = gamma self.epsilon = epsilon self.batch_size = batch_size self.replayer = DQNReplayer(replayer_capacity) self.evaluate_net = self.build_network(input_size=observation_dim, output_size=self.action_n, **net_kwargs) # 评估网络 self.target_net = self.build_network(input_size=observation_dim, output_size=self.action_n, **net_kwargs) # 目标网络 self.target_net.set_weights(self.evaluate_net.get_weights()) def build_network(self, input_size, hidden_sizes, output_size, activation=tf.nn.relu, output_activation=None, lr=0.01): model = keras.Sequential() for layer, hidden_size in enumerate(hidden_sizes): kwargs = dict(input_shape=(input_size,)) if not layer else {} model.add(keras.layers.Dense(units=hidden_size, activation=activation, **kwargs)) model.add(keras.layers.Dense(units=output_size, activation=output_activation)) # 输出层 optimizer = tf.optimizers.Adam(lr=lr) model.compile(loss='mse', optimizer=optimizer) return model def learn(self, observation, action, reward, next_observation, done): self.replayer.store(observation, action, reward, next_observation, done) # 存储经验 observations, actions, rewards, next_observations, dones = \ self.replayer.sample(self.batch_size) # 经验回放 next_qs = self.target_net.predict(next_observations) next_max_qs = next_qs.max(axis=-1) us = rewards + self.gamma * (1. - dones) * next_max_qs targets = self.evaluate_net.predict(observations) targets[np.arange(us.shape[0]), actions] = us self.evaluate_net.fit(observations, targets, verbose=0) if done: # 更新目标网络 self.target_net.set_weights(self.evaluate_net.get_weights()) def decide(self, observation): # epsilon贪心策略 if np.random.rand() < self.epsilon: return np.random.randint(self.action_n) qs = self.evaluate_net.predict(observation[np.newaxis]) return np.argmax(qs) def play_qlearning(env, agent, train=False, render=False): episode_reward = 0 observation = env.reset() while True: if render: env.render() action = agent.decide(observation) next_observation, reward, done, _ = env.step(action) episode_reward += reward if train: agent.learn(observation, action, reward, next_observation, done) if done: break observation = next_observation return episode_reward # + net_kwargs = {'hidden_sizes' : [64, 64], 'lr' : 0.001} agent = DQNAgent(env, net_kwargs=net_kwargs) # 训练 episodes = 500 episode_rewards = [] chart = Chart() for episode in range(episodes): episode_reward = play_qlearning(env, agent, train=True) episode_rewards.append(episode_reward) chart.plot(episode_rewards) # 测试 agent.epsilon = 0. # 取消探索 episode_rewards = [play_qlearning(env, agent) for _ in range(100)] print('平均回合奖励 = {} / {} = {}'.format(sum(episode_rewards), len(episode_rewards), np.mean(episode_rewards)))
Jupyter NoteBook/6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- ''' Exercise 4.5. Read about spirals at http: // en. wikipedia. org/ wiki/ Spiral ; then write a program that draws an Archimedian spiral (or one of the other kinds). Solution: http: // thinkpython2. com/ code/ spiral. py . ''' # + """This module contains a code example related to Think Python, 2nd Edition by <NAME> http://thinkpython2.com Copyright 2015 <NAME> License: http://creativecommons.org/licenses/by/4.0/ """ from __future__ import print_function, division import turtle def draw_spiral(t, n, length=3, a=0.1, b=0.0002): """Draws an Archimedian spiral starting at the origin. Args: n: how many line segments to draw length: how long each segment is a: how loose the initial spiral starts out (larger is looser) b: how loosly coiled the spiral is (larger is looser) http://en.wikipedia.org/wiki/Spiral """ theta = 0.0 for i in range(n): t.fd(length) dtheta = 1 / (a + b * theta) t.lt(dtheta) theta += dtheta # create the world and bob bob = turtle.Turtle() draw_spiral(bob, n=1000) turtle.mainloop() # -
ch4/ex4_5.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="-CeROyTeKWgH" colab_type="text" # #Aplicación de spectral clustering a datos con estructura de grafos # # En NLP, muchas veces los datos con los que se trabaja tienen una esgructura de grafo (por ejemplo, redes léxicas, paradigmas verbales, etc.) El modelo de spectral clustering puede adaptarse a esta estructura y crear clusters de los nodos. A continuación presentamos una aplicación a un grafo de datos bilingües. # + id="sls8H04WK_Nz" colab_type="code" colab={} #Importamos los paquetes que vamos a utilizar import numpy as np import matplotlib.pyplot as plt import networkx as nx import pandas as pd from scipy.linalg import eig from csv import reader from operator import itemgetter from sklearn.decomposition import PCA # + [markdown] id="7kRpSlduLhqy" colab_type="text" # ## Preprocesamiento de los datos # # Cuando trabajamos con datos ya en estructura de grafos, el preprocesamiento se vuelve más simple, pues no requerimos generar esta estructura. En este caso, en lugar de tener una secuencia de pasos Vector - Grafo - Vector, nos saltaremos el primer paso y sólo tendremos los pasos Grafo - Vector. # # + id="3btxfbHymOhJ" colab_type="code" outputId="27446a04-db7a-4f79-ec2e-73599696e7c5" colab={"base_uri": "https://localhost:8080/", "height": 122} #Montamos el contenido de Google Drive from google.colab import drive drive.mount('/content/drive') # + id="1TF0rE7LmnWe" colab_type="code" outputId="6e923415-3228-41fc-9caf-e0ed19b5532b" colab={"base_uri": "https://localhost:8080/", "height": 34} #Abrimos el archivo file = open('/content/drive/My Drive/Curso_RIIAA/data/corpus_bilingual.txt','r') print(file) # + id="PKBrWgxknDQD" colab_type="code" colab={} #Leemos el archivo edges = list(reader(file, delimiter='\t')) # + [markdown] id="zwGE35iTMmoP" colab_type="text" # Los datos se encuentran estructurados en un grafo bipartito, donde un conjunto de nodos corresponde al lenguaje Náhuatl y otro al Español. Como existen préstamos entre una y otra lengua, utilizaremos un índice para diferenciar a que lengua corresponde cada forma léxica. # + id="PEna00lbnxHB" colab_type="code" outputId="3d520a4b-815e-4be6-909b-195b6068eb77" colab={"base_uri": "https://localhost:8080/", "height": 54} #Creamos las aristas que definen el grafo edges = [(edge[0]+'_NA',edge[1]+'_ES',float(edge[4])/38608) for edge in edges] #Dividir entre el máximo (38608) normaliza los pesos print(edges) # + [markdown] id="DXmb5bshNDJz" colab_type="text" # Podemos visualizar los datos a aprtir de la librería de $networkx$. Esta misma librería nos puede facilitar la creación de la matriz de adyacencia. # + id="klE_el-FoQaC" colab_type="code" outputId="512d727e-e178-44a3-8d7f-e59c1c350eb4" colab={"base_uri": "https://localhost:8080/", "height": 356} #Creamos un grafo a partir de las aristas que hemos definido G = nx.Graph() G.add_weighted_edges_from(edges[:10]) #Tomamos pocas aristas para que sea más fácil su visualización #Visualización de las aristas en formato networkx print(G.edges(data=True)) #Dibuja el grafo nx.draw(G, with_labels=True, node_size=10) # + [markdown] id="fZ_KSrqtNz9x" colab_type="text" # ## Aplicación del algoritmo de spectral clustering # # Una vez que tenemos los datos en un formato de grafo tratable, podemos pasar a aplicar el alforitmo de spectral clustering. Para esto, obtenemos la matriz de adyacencia. # + id="0PTjT1FFsBka" colab_type="code" outputId="e5f7345b-d799-4384-e518-cbcfd664c191" colab={"base_uri": "https://localhost:8080/", "height": 394} #Obtiene la matriz de adyacencia a partir del formato netowrkx A = nx.to_numpy_array(G) #Guarda las etiqeutas de los nodos labels = G.nodes #Visualiza la matriz de adyacencia df = pd.DataFrame(A, index=labels, columns=labels) print(df.to_string()) # + [markdown] id="HbqdB0tpOXOT" colab_type="text" # Ya que la matriz de adyacencia guarda información del grafo en formato vectorial, podemos visualizarla en un espacio $\mathbb{R}^d$. Sin embargo, notamos que ésta no nos da suficiente información para clusterizar los puntos. # + id="rrfcx3oVtJwA" colab_type="code" outputId="ced888b2-bd17-4bc5-a8d9-4c3b4826d9f1" colab={"base_uri": "https://localhost:8080/", "height": 269} #Función para plotear def plot_words(Z,ids,color='blue'): #Reduce a dos dimensiones con PCA Z = PCA(n_components=2).fit_transform(Z) r=0 #Plotea las dimensiones plt.scatter(Z[:,0],Z[:,1], marker='o', c=color) for label,x,y in zip(ids, Z[:,0], Z[:,1]): #Agrega las etiquetas plt.annotate(label, xy=(x,y), xytext=(-1,1), textcoords='offset points', ha='center', va='bottom') r+=1 plot_words(A,labels) plt.show() # + [markdown] id="TV7ByCN5Orvp" colab_type="text" # Por tanto, aplicamos spectral clustering, obteniendo la matriz laplaciana como: $L = D - A$, donde $D$ es la matriz de grado y $A$ la de adyacencia. Posteriormente hacemos la factorización espectral. # + id="A6SPlNd7taKP" colab_type="code" outputId="8880cdce-151c-4c90-b742-252b93e34a16" colab={"base_uri": "https://localhost:8080/", "height": 269} #Se calcula la matriz Laplaciana L = np.diag(A.sum(0))-A #Se calculan los eigen valores y eigen vectores de L eig_vals, eig_vecs = eig(L) #Se ordenan con respecto a los eigenvalores values = sorted(zip(eig_vals.real,eig_vecs), key=itemgetter(0)) #Obtenemos ambos eigens vals, vecs = zip(*values) #Se crea una matriz de eigenvectores matrix = np.array(vecs) #Visualización de eigenvalores plt.plot(np.array(vals),'o') plt.show() # + [markdown] id="IL_J921NPdTP" colab_type="text" # Finalmnete, obtenemos los nuevos vectores a partir de los eigenvectores de $L$ asociados a los eigenvalores más pequeños. # + id="pXI-bQSnvCfF" colab_type="code" outputId="0d7973b1-f60b-4278-9f0e-6bfdf27e7abd" colab={"base_uri": "https://localhost:8080/", "height": 286} #Obtiene la matriz con los vectores nuevos M_hat = matrix.T.real #Se toman todos los eigenvectores #Tamaño de la matriz print(M_hat.shape) #Ploteamos los datos nuevos plot_words(M_hat,labels) # + [markdown] id="8lIb3kLlP47q" colab_type="text" # ### Clustering de los puntos # # Una vez obtenido los nuevos vectores, podemos aplicar un método de clustering (k-means) para observar las regularidades encontradas. # + id="toRSmLt_u4fm" colab_type="code" outputId="196660c8-cfc2-4eba-d2ba-31ff405bea7b" colab={"base_uri": "https://localhost:8080/", "height": 269} from sklearn.cluster import KMeans #Número de centroides centroids=5 #Aplicación de kmenas kmeans = KMeans(n_clusters=centroids, init='random').fit(M_hat) #Obtención de los clusters pred_lables = kmeans.predict(M_hat) #Plot de clusters plot_words(M_hat, labels, color=pred_lables) plt.show()
notebooks/03_Spectral_clustering_graph_bilingual.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## HLCM # # <NAME>, UrbanSim, June 2018 # # This notebook is a primary model estimation for HLCM Bay Area # import os; os.chdir('../') import numpy as np, pandas as pd from urbansim_templates import modelmanager as mm from urbansim_templates.models import LargeMultinomialLogitStep import orca # ### Load data # Load any script-based Orca registrations from scripts import datasources from scripts import models # #### Tables loaded by datasources.py for table_name in orca.list_tables(): print(table_name.upper()) print(orca.get_table(table_name).to_frame().columns.tolist()) print() # + ## If you wanna make a df for any of the tables #households = orca.get_table('households').to_frame() #units = orca.get_table('units').to_frame() # - # #### Look into an H5 file manually # + # hdf = pd.HDFStore('data/bayarea_ual.h5', 'r') # hdf.keys() # + # establishments = pd.read_hdf('data/bayarea_ual.h5', 'establishments') # - # ### Generate accessibility measures # + #orca.list_steps() # - orca.run(['initialize_network']) orca.run(['network_aggregations']) for table_name in orca.list_tables(): print(table_name.upper()) print(orca.get_table(table_name).to_frame().columns.tolist()) print() # ### Model Estimation # + m = LargeMultinomialLogitStep() m.choosers = ['households'] m.alternatives = ['buildings','nodes'] m.choice_column = 'unit_id' m.alt_sample_size = 10 m.model_expression = 'res_price_per_sqft + residential_units_500 - 1' m.name = 'hlcm1' m.tags = ['arezoo', 'test'] # - # %%time m.fit()
summer-2018-model/notebooks/.ipynb_checkpoints/HLCM-checkpoint.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.1.1 # language: julia # name: julia-1.1 # --- 1 + 1 1 + 1 2 + 2 1 + 1 2 + 2; ?println ;pwd println("I'm excited to learn Julia!") my_answer = 42 typeof(my_answer) my_pi = 3.14159 typeof(my_pi) 😺 = "smiley cat!" typeof(😺) 😺 = 1 typeof(😺) 😀 = 0 😞 = -1 😺 + 😞 == 😀 sum = 3 + 7 difference = 10 - 3 product = 20 * 5 quotient = 100 / 10 power = 10^2 modulus = 101%2 ?convert days = 365 days_float = float(days) @assert days == 365 @assert days_float == 365.0 convert(Int64, "1") parse(Float64, "1") s1 = "I am a string." s2 = """I am also a string. """ "Here, we get an "error" because it's ambiguous where this string ends " """Look, Mom, no "errors"!!! """ typeof('a') 'We will get an error here' name = "Jane" num_fingers = 10 num_toes = 10 println("Hello, my name is $name.") println("I have $num_fingers fingers and $num_toes toes.") println("That is $(num_fingers + num_toes) digits in all!!") s3 = "How many cats "; s4 = "is too many cats?"; 😺 = 10 string(s3, s4) string("I don't know, but ", 😺, " is too few.") "hi"^100 myfavoriteanimals = ("penguins", "cats", "sugargliders") myfavoriteanimals[1] myfavoriteanimals = (bird = "penguins", mammal = "cats", marsupial = "sugargliders") myfavoriteanimals[1] myfavoriteanimals.bird myphonebook = Dict("Jenny" => "867-5309", "Ghostbusters" => "555-2368") myphonebook["Jenny"] myphonebook["Kramer"] = "555-FILK" myphonebook pop!(myphonebook, "Kramer") myphonebook myphonebook[1] myfriends = ["Ted", "Robyn", "Barney", "Lily", "Marshall"] mixture = [1, 1, 2, 3, "Ted", "Robyn"] fibonacci = [1, 1, 2, 3, 5, 8, 13] push!(fibonacci, 21) pop!(fibonacci) fibonacci favorites = [["koobideh", "chocolate", "eggs"],["penguins", "cats", "sugargliders"]] numbers = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] rand(4, 3) rand(4, 3, 2) n = 0 while n < 10 n += 1 println(n) end n n = 0 while n < 10; n += 1 println(n) end n # + myfriends = ["Ted", "Robyn", "Barney", "Lily", "Marshall"] i = 1 while i <= length(myfriends) friend = myfriends[i] println("Hi $friend, it's great to see you!") i += 1 end # - for n in 1:10 println(n) end m, n = 5, 5 A = fill(0, (m, n)) for i in 1:m for j in 1:n A[i, j] = i + j end end A B = fill(0, (m, n)) for i in 1:m, j in 1:n B[i, j] = i + j end B C = [i + j for i in 1:m, j in 1:n] [ x+y for x in 1:10, y in 2:5] # + N=15 if (N % 3 == 0) && (N % 5 == 0) # `&&` means "AND"; % computes the remainder after division println("FizzBuzz") elseif N % 3 == 0 println("Fizz") elseif N % 5 == 0 println("Buzz") else println(N) end # - x = 10 y = 5 if x > y x else y end (x > y) ? x : y false && (println("hi"); true) true && (println("hi"); true) (x > 0) && error("x cannot be greater than 0") true || println("hi") false || println("hi") # + function sayhi(name) println("Hi $name, it's great to see you!") end function f(x) x^2 end # - sayhi("kyle") sayhi2(name) = println("Hi $name, it's great to see you!") sayhi2("ho") sayhi3 = name -> println("Hi $name, it's great to see you!") sayhi3("hohoo") sayhi(55595472) sayhi4 = (firstname, lastname) -> println("Hi $firstname $lastname, it's greate to see you!") sayhi4("hi", "ho") A = rand(3, 3) f(A) f("hi") v = rand(3) f(v) v = [3, 5, 2] sort(v) v sort!(v) v map(f, [1, 2, 3]) x -> x^3 map(x -> x^3, [1, 2, 3]) broadcast(f, [1, 2, 3]) f.([1, 2, 3]) f(A) f.(A) A = [i + 3*j for j in 0:2, i in 1:3] f(A) B = f.(A) A .+ 2 .* f.(A) ./ A using Pkg Pkg.add("Example") Pkg.add("Colors") using Colors palette = distinguishable_colors(100) rand(palette, 3, 3) Pkg.add("PyPlot") using PyPlot globaltemperatures = [14.4, 14.5, 14.8, 15.2, 15.5, 15.8] numpirates = [45000, 20000, 15000, 5000, 400, 17]; gr() plot(numpirates, globaltemperatures, label="line") scatter(numpirates, globaltemperatures, label="points", color="red") xlabel("Number of Pirates [Approximate]") ylabel("Global Temperature (C)") title("Influence of pirate population on global warming") xflip!() Pkg.add("UnicodePlots") unicodeplots() plot(numpirates, globaltemperatures, label="line") scatter!(numpirates, globaltemperatures, label="points") xlabel!("Number of Pirates [Approximate]") ylabel!("Global Temperature (C)") title!("Influence of pirate population on global warming") a = rand(10^7) # 1D vector of random numbers, uniform on [0,1) sum(a) @time sum(a) @time sum(a) @time sum(a) # using Pkg # Pkg.add("BenchmarkTools") using BenchmarkTools # + using Libdl C_code = """ #include <stddef.h> double c_sum(size_t n, double *X) { double s = 0.0; for (size_t i = 0; i < n; ++i) { s += X[i]; } return s; } """ const Clib = tempname() # make a temporary file # compile to a shared library by piping C_code to gcc # (works only if you have gcc installed): open(`gcc -fPIC -O3 -msse3 -xc -shared -o $(Clib * "." * Libdl.dlext) -`, "w") do f print(f, C_code) end # define a Julia function that calls the C function: c_sum(X::Array{Float64}) = ccall(("c_sum", Clib), Float64, (Csize_t, Ptr{Float64}), length(X), X) # - c_sum(a) c_sum(a) ≈ sum(a) # type \approx and then <TAB> to get the ≈ symbolb c_sum(a) - sum(a) ?isapprox c_bench = @benchmark c_sum($a) println("C: Fastest time was $(minimum(c_bench.times) / 1e6) msec") d = Dict() # a "dictionary", i.e. an associative array d["C"] = minimum(c_bench.times) / 1e6 # in milliseconds d using Plots gr() # + using Statistics # bring in statistical support for standard deviations t = c_bench.times / 1e6 # times in milliseconds m, σ = minimum(t), std(t) histogram(t, bins=500, xlim=(m - 0.01, m + σ), xlabel="milliseconds", ylabel="count", label="") # - f(x) = x^2 f(10) f([1,2,3]) f.([1,2,3]) foo(x::String, y::String) = println("My inputs x and y are both strings!") foo("hello", "hi!") foo(3, 4) foo(x::Int, y::Int) = println("My inputs x and y are both integers!") foo(3, 4) foo("hello", "hi!") methods(foo) methods(+) @which foo(3, 4) @which 3.0 + 3.0 foo(x::Number, y::Number) = println("My inputs x and y are both numbers!") foo(3.0, 4.0) foo(x, y) = println("I accept inputs of any type!") v = rand(3) foo(v, v) struct OrderedPair x::Real y::Real OrderedPair(x,y) = x > y ? error("out of order") : new(x,y) end OrderedPair(1, 2) OrderedPair(2, 1) import Pkg; Pkg.add("DataFrames") using DataFrames df = DataFrame(A = 1:4, B = ["M", "F", "F", "M"]) df.A df.A === df[!, 1] names(df) df = DataFrame() df.A = 1:8 df.B = ["M", "F", "F", "M", "F", "M", "M", "F"] df df = DataFrame(A = Int[], B = String[]) push!(df, (1, "M")) push!(df, [2, "N"]) push!(df, Dict(:B => "F", :A => 3)) df = DataFrame(A = 1:2:1000, B = repeat(1:10, inner=50), C = 1:500) df[df.A .> 500, :] df[in.(df.A, Ref([1, 5, 601])), :] df = DataFrame(A = 1:4, B = ["M", "F", "F", "M"]) describe(df) df = DataFrame(A = 1:4, B = 4.0:-1.0:1.0) aggregate(df, sum) aggregate(df, [sum, prod]) df.A[1] = 10 df people = DataFrame(ID = [20, 40], Name = ["<NAME>", "<NAME>"]) jobs = DataFrame(ID = [20, 40], Job = ["Lawyer", "Doctor"]) join(people, jobs, on = :ID) Pkg.add("JuMP") Pkg.add("GLPK") Pkg.add("Test") # + using JuMP, GLPK, Test const MOI = JuMP.MathOptInterface """ example_transp() Allocation of passenger cars to trains to minimize cars required or car-miles run. Based on: Fourer, <NAME> and <NAME>, A Modeling Language for Mathematical Programming, http://www.ampl.com/REFS/amplmod.ps.gz Appendix D. Author: <NAME> Date: Jan 30, 2015 """ function example_transp() ORIG = ["GARY", "CLEV", "PITT"] DEST = ["FRA", "DET", "LAN", "WIN", "STL", "FRE", "LAF"] supply = [1_400, 2_600, 2_900] demand = [900, 1_200, 600, 400, 1_700, 1_100, 1_000] @assert sum(supply) == sum(demand) cost = [ 39 14 11 14 16 82 8; 27 9 12 9 26 95 17; 24 14 17 13 28 99 20 ] model = Model(with_optimizer(GLPK.Optimizer)) @variable(model, trans[1:length(ORIG), 1:length(DEST)] >= 0) @objective(model, Min, sum(cost[i, j] * trans[i, j] for i in 1:length(ORIG), j in 1:length(DEST))) @constraint(model, [i in 1:length(ORIG)], sum(trans[i, j] for j in 1:length(DEST)) == supply[i]) @constraint(model, [j in 1:length(DEST)], sum(trans[i, j] for i in 1:length(ORIG)) == demand[j]) JuMP.optimize!(model) @test JuMP.termination_status(model) == MOI.OPTIMAL @test JuMP.primal_status(model) == MOI.FEASIBLE_POINT @test JuMP.objective_value(model) == 196200.0 end example_transp() # + using JuMP, GLPK # Preparing an optimization model m = Model(with_optimizer(GLPK.Optimizer)) # Declaring variables @variable(m, 0<= x1 <=10) @variable(m, x2 >=0) @variable(m, x3 >=0) # Setting the objective @objective(m, Max, x1 + 2x2 + 5x3) # Adding constraints @constraint(m, constraint1, -x1 + x2 + 3x3 <= -5) @constraint(m, constraint2, x1 + 3x2 - 7x3 <= 10) # Printing the prepared optimization model print(m) # Solving the optimization problem JuMP.optimize!(m) # Printing the optimal solutions obtained println("Optimal Solutions:") println("x1 = ", JuMP.value(x1)) println("x2 = ", JuMP.value(x2)) println("x3 = ", JuMP.value(x3)) # Printing the optimal dual variables println("Dual Variables:") println("dual1 = ", JuMP.shadow_price(constraint1)) println("dual2 = ", JuMP.shadow_price(constraint2)) # - using Pkg; Pkg.add("GLPK") # + using JuMP, GLPK m = Model(with_optimizer(GLPK.Optimizer)) @variable(m, 0 <= x <= 2 ) @variable(m, 0 <= y <= 30 ) @objective(m, Max, 5x + 3*y ) @constraint(m, 1x + 5y <= 3.0 ) JuMP.optimize!(m) println("Objective value: ", JuMP.objective_value(m)) println("x = ", JuMP.value(x)) println("y = ", JuMP.value(y)) # -
Julia/julia_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #### Importing Libraries import pandas as pd import numpy as np import matplotlib.pyplot as plt # #### Function for extracting data from the Flight Aware webpage def extract_data(flight_type): offset = [0, 20] data = [] aircraft = [] for i in offset: page = "https://flightaware.com/live/aircrafttype/"+str(flight_type)+"?;offset="+str(i)+";order=actualdeparturetime;sort=DESC" data = pd.read_html(page, attrs={"class":"prettyTable fullWidth"}) if aircraft is None: aircraft = pd.DataFrame(data[0]) else: df = pd.DataFrame(data[0]) aircraft.append(df) return aircraft flight_type = ["C208", "PC12", "C182"] for i in flight_type: aircraft = extract_data(i) aircraft_table = aircraft[0].append(aircraft[1]).copy() aircraft_table.to_excel("Aircraft type "+i+".xlsx", sheet_name = i) # #### ANALYSIS FOR AIRCRAFT TYPE C208 # #### Reading the data for Aircraft Type C208 in the Jupyter Notebook c208 = pd.read_excel("/Users/apurvasalvi/Desktop/magniX/Aircraft type C208.xlsx") c208 pd.DataFrame(c208.groupby(['Origin', 'Destination']).size()) pd.DataFrame(c208.groupby(['Origin']).size()) # Most airports had flights frequently going to the same location # #### Checking for NaN in the data c208.isnull().sum() # After observing the above data, we can see that the Destination, EstimatedArrival Time, EstimatedTimeEnroute and EstimatedTimeEnroute (Min) columns have NaN values. It is safe to assume that the columns with Departure values missing could be the cancelled flights which did not depart from the location. # Tracking the cancelled flight in a different Dataframe called c208_cancelled. c208_cancelled = c208[c208['Destination'].isnull()] c208_cancelled.Destination = c208_cancelled.Destination.fillna('Cancelled Flight') c208_cancelled.drop(['EstimatedArrival Time', 'EstimatedTimeEnroute', 'EstimatedTimeEnroute (Min)'], axis=1) c208_cancelled.Destination = c208_cancelled.Destination.fillna('Cancelled Flight') c208_cancelled # #### Visualizing the total number of flights which got cancelled from each of these airports c208_cancelled['Origin'].value_counts().plot(kind='bar') plt.title("Number of cancelled flights") plt.ylabel("Number of cancelled flights") plt.xlabel("Name of the airport") # The airport at Bethel has the most number of cancelled flights today # #### Dropping the Cancelled Flights from the original Dataframe c208.dropna(axis=0, subset=["Destination"], inplace=True) c208 # There is still 1 flight which took off from Saipan Intl(PGSN) and landed in Guam Intl(PGUM) which did not record it's Departure and Estimated Arrival Time. c208['Origin'].value_counts().plot(kind='bar') plt.title("Number of flights that took off from the airports") plt.ylabel("Number of flights") plt.xlabel("Name of the airport") # Thus, for C208 we can conclude that maximum number of flight took off from the Baltimore/Washington Intl(KBWI) and the Airport at Bethel had the most number of cancelled flights. # ### ANALYSIS FOR AIRCRAFT TYPE C182 # #### Reading the data for Aircraft Type C182 in Jupyter Notebook c182 = pd.read_excel("/Users/apurvasalvi/Desktop/magniX/Aircraft type C182.xlsx") c182 c182.groupby(["Origin", "Destination"]).size() c182.groupby(["Origin"]).size() # #### Checking for NaN c182.isnull().sum() # Out of the 39 flights of the type C182, we can see that 33 flights never made to their destination or got cancelled. # Let's do some analysis on the cancelled flights c182_cancelled = c182[c182['Destination'].isnull()] c182_cancelled.Destination = c182_cancelled.Destination.fillna('Cancelled Flight') c182_cancelled.drop(['EstimatedArrival Time', 'EstimatedTimeEnroute', 'EstimatedTimeEnroute (Min)'], axis=1) # Visualizing the number of flights that got cancelled from each airport c182_cancelled['Origin'].value_counts().plot(kind='bar') plt.title("Number of cancelled flights") plt.ylabel("Number of cancelled flights") plt.xlabel("Name of the airport") # The maximum number of flights that got cancelled were 2, and from the airports: Ogden-Hinckley (KOGD), Prescott Rgnl - <NAME> Fld (KPRC), Georgetown Muni (KGTU) # #### Dropping the Cancelled Flights from the original Dataframe c182.dropna(axis=0, subset=["Destination"], inplace=True) c182 # Even in C182, there is still 1 flight which took off from Lancaster Rgnl(KLNC) and landed in Dallas Executive(KRBD) which did not record it's Departure and Estimated Arrival Time. c182['Origin'].value_counts().plot(kind='bar') plt.title("Number of flights that took off from the airports") plt.ylabel("Number of flights") plt.xlabel("Name of the airport") # ### ANALYSIS FOR AIRCRAFT TYPE PC12 # Reading the data for aircraft type PC12 into Jupyter Notebook pc12 = pd.read_excel("/Users/apurvasalvi/Desktop/magniX/Aircraft type PC12.xlsx") pc12 pc12.groupby(["Origin", "Destination"]).size() pc12.groupby(["Origin"]).size() # #### Checking for NaN pc12.isnull().sum() # Out of the 33 flights of the type C182, we can see that only 5 flights never made to their destination or got cancelled. # Let's do some analysis on the cancelled flights pc12_cancelled = pc12[pc12['Destination'].isnull()] pc12_cancelled.Destination = pc12_cancelled.Destination.fillna('Cancelled Flight') pc12_cancelled.drop(['EstimatedArrival Time', 'EstimatedTimeEnroute', 'EstimatedTimeEnroute (Min)'], axis=1) # #### Visualizing the number of flights that got cancelled pc12_cancelled['Origin'].value_counts().plot(kind='bar') plt.title("Number of cancelled flights") plt.ylabel("Number of cancelled flights") plt.xlabel("Name of the airport") # #### Dropping the Cancelled Flights from the original Dataframe pc12.dropna(axis=0, subset=["Destination"], inplace=True) pc12 pc12['Origin'].value_counts().plot(kind='bar') plt.title("Number of flights that took off from the airports") plt.ylabel("Number of flights") plt.xlabel("Name of the airport") # The airports at Nantucket Memorial (KACK) and John Wayne (KSNA) had the most number of flights departing today # ### COMBINED ANALYSIS FOR C208, C182, AND PC12 # Cancelled and non cancelled flight analysis # # |---|C208|C182|PC12| # |---|---|---|---| # |Cancelled Flights|10|33|5| # |Flights that reached Destination|29|5|28| # |Total Flights Scheduled|39|38|33| # # ##### C208 -> 26% Cancelled Flights # ##### C182 -> 86% Cancelled Flights # ##### PC12 -> 15% Cancelled Flights # Most popular airports to take off from # # |Aircraft Type|Origin|Number of flights that took off| # |---|---|---| # |C208|Baltimore/Washington Intl(KBWI)|3| # |C182|---|---| # |PC12|Nantucket Memorial (KACK)|2| # |PC12|<NAME> (KSNA)|2| # Most cancelled Flights # # |Aircraft Type|Origin|Number of cancelled flights| # |---|---|---| # |C208|Bethel (BET/PABE)|2| # |C182|Ogden-Hinckley (KOGD|2| # |C182|<NAME> - Ernest a Love Fld (KPRC)|2| # |C182|Georgetown Muni (KGTU)|2| # |PC12|El Dorado Int'l (BOG / SKBO)|1| # |PC12|<NAME>uni (KGHG)|1| # |PC12|Fresno Yosemite Intl (KFAT)|1| # |PC12|Merrill Field (MRI / PAMR)|1| # |PC12|<NAME> (KCEZ)|1| # ### CONCLUSION # # From the analysis, it is clear that the aircraft type PC12 seems like the most reliable aircraft since it has a success rate of 85%, whereas the aircraft type C182 seems like the most unreliable airacraft with a success rate of only 14% #
MagniX Assignment 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.9.5 64-bit # name: python3 # --- # # Tutorial 1 - Bank decision loan problem with crisp inputs # In this tutorial a fuzzy inference system for loan approbation is builded. The problem has three input variables: score, ratio, and credit; and one output variable: decision. # + import os os.chdir('/workspaces/fuzzy-expert') # + import warnings warnings.filterwarnings("ignore") # - # ## Variable specification # # In the following code, a dictionary containing the variables of the problem is defined. The keys of the dictionary are the names of the variables in the rules. For each variable is defined the limits of the universe of discourse, the terms, and the membership function for each term. Finally, the variable `score` is ploted. # + tags=[] import matplotlib.pyplot as plt import numpy as np from fuzzy_expert.variable import FuzzyVariable variables = { "score": FuzzyVariable( universe_range=(150, 200), terms={ "High": [(175, 0), (180, 0.2), (185, 0.7), (190, 1)], "Low": [(155, 1), (160, 0.8), (165, 0.5), (170, 0.2), (175, 0)], }, ), "ratio": FuzzyVariable( universe_range=(0.1, 1), terms={ "Goodr": [(0.3, 1), (0.4, 0.7), (0.41, 0.3), (0.42, 0)], "Badr": [(0.44, 0), (0.45, 0.3), (0.5, 0.7), (0.7, 1)], }, ), # "credit": FuzzyVariable( universe_range=(0, 10), terms={ "Goodc": [(2, 1), (3, 0.7), (4, 0.3), (5, 0)], "Badc": [(5, 0), (6, 0.3), (7, 0.7), (8, 1)], }, ), # "decision": FuzzyVariable( universe_range=(0, 10), terms={ "Approve": [(5, 0), (6, 0.3), (7, 0.7), (8, 1)], "Reject": [(2, 1), (3, 0.7), (4, 0.3), (5, 0)], }, ), } plt.figure(figsize=(10, 2.5)) variables["score"].plot() # - # ## Rule specification # # # The fuzzy inference system has two rules. They are directly stored in a list. # + from fuzzy_expert.rule import FuzzyRule rules = [ FuzzyRule( premise=[ ("score", "High"), ("AND", "ratio", "Goodr"), ("AND", "credit", "Goodc"), ], consequence=[("decision", "Approve")], ), FuzzyRule( premise=[ ("score", "Low"), ("AND", "ratio", "Badr"), ("OR", "credit", "Badc"), ], consequence=[("decision", "Reject")], ) ] print(rules[0]) print() print(rules[1]) # - # ## Inference system specification and computations # # Finally, the fuzzy inference system is specified. The model is used to evaluate the following crisp values for the input variables: `score=190`, `ratio=0.39`, and `credit=1.5`. The model returns a dictionary with the values of the variables in the consequence of the rules and the certainty factor of the conclusion. # + from fuzzy_expert.inference import DecompositionalInference model = DecompositionalInference( and_operator="min", or_operator="max", implication_operator="Rc", composition_operator="max-min", production_link="max", defuzzification_operator="cog", ) model( variables=variables, rules=rules, score=190, ratio=0.39, credit=1.5, ) # - # ## Visualization of the results # # The function plot can be used to obtain a graphical represntation of the results of the inference systems. It uses the same parameters used to the evaluation of the system. # plt.figure(figsize=(10, 6)) model.plot( variables=variables, rules=rules, score=190, ratio=0.39, credit=1.5, ) # ## User interaction # # Using the `ipywidgets` package in Jupyter Lab it is possible to obtain an interactive user interface for the user. The funtion `demo` is used to plot the results; following, the function `interact` is used to create the user interface. # + from ipywidgets import interact, widgets def demo(score, ratio, credit): plt.figure(figsize=(10,6)) model.plot( variables=variables, rules=rules, score=score, ratio=ratio, credit=credit, ) interact( demo, score=widgets.FloatSlider(min=150, max=200), ratio=widgets.FloatSlider(min=0.1, max=1), credit=widgets.FloatSlider(min=0, max=10), ) # - # ![inter](interact.png)
sphinx/tutorials/tutorial-1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # #!/usr/bin/python3 import numpy as np import pandas as pd from pandas import Series, DataFrame import matplotlib.pyplot as plt #plt.style.use(['science','ieee', 'no-latex']) plt.style.use(['science','ieee', 'no-latex']) # + import matplotlib as mpl from cycler import cycler #mpl.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk') mpl.rcParams['axes.prop_cycle'] = cycler(color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']) data = [23, 45, 56, 78, 213] #plt.bar([1, 2, 3, 4, 5], data, color='C2') plt.bar([1, 2, 3, 4, 5], data) #mpl.rcParams["axes.prop_cycle"] = mpl.cycler('color', ['1EAFAE', 'A3FFFF', '69FFFF']) plt.show() # - data = [23, 45, 56, 78, 213] type = ["A", "B", "C", "D", "E"] plt.bar(type, data) plt.show() # + import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl th = np.linspace(0, 2*np.pi, 128) def demo(sty): mpl.style.use(sty) fig, ax = plt.subplots(figsize=(3, 3)) ax.set_title('style: {!r}'.format(sty), color='C0') ax.plot(th, np.cos(th), 'C1', label='C1') ax.plot(th, np.sin(th), 'C2', label='C2') ax.legend() demo('default') demo('seaborn') #plt.style.use('ggplot') demo('ggplot')
python-plot/change-color.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/gan_mog_mode_hopping.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="OYWMcJafmrfI" # # Mixture of Gaussians example with GANs # # This code was adapted from the ODEGAN code here: # https://github.com/deepmind/deepmind-research/blob/master/ode_gan/odegan_mog16.ipynb # # **The original colab was created by <NAME>**‎. Adapted by <NAME>. # # + [markdown] id="yAHjf0hcm8Az" # # **This code implements GANs for Mixture of Gaussians.** # # It also provides an implementation of ODEGAN (Training Generative Adversarial Networks by Solving Ordinary Differential Equations by Qin et al.) # # The approach of ODE-GAN was mentioned in the book as using higer order integrators such as RungeKutta4. # + colab={"base_uri": "https://localhost:8080/"} id="ibLjLVKasC4a" outputId="c29a7069-200e-4307-da27-54f177aa76d3" # !pip install dm-haiku # + id="n8p0WAstrhUT" # @title Imports import jax from jax import lax import jax.numpy as jnp import numpy as np import matplotlib.pyplot as plt import haiku as hk import scipy as sp import functools # + id="aoIaRyCysZEs" # @title An MLP Haiku Module class MLP(hk.Module): def __init__(self, depth, hidden_size, out_dim, name="SimpleNet"): super(MLP, self).__init__(name=name) self._depth = depth self._hidden_size = hidden_size self._out_dim = out_dim layers = [] for i in range(self._depth): layers.append(hk.Linear(self._hidden_size, name="linear_%d" % (i))) self._layers = layers self._final_layer = hk.Linear(self._out_dim, name="final_layer") def __call__(self, input): h = input for i in range(self._depth): h = jax.nn.relu(self._layers[i](h)) return self._final_layer(h) # + id="KBgWwKKyv6VI" # @title Real Data def real_data(batch_size): mog_mean = np.array( [ [1.50, 1.50], [1.50, 0.50], [1.50, -0.50], [1.50, -1.50], [0.50, 1.50], [0.50, 0.50], [0.50, -0.50], [0.50, -1.50], [-1.50, 1.50], [-1.50, 0.50], [-1.50, -0.50], [-1.50, -1.50], [-0.50, 1.50], [-0.50, 0.50], [-0.50, -0.50], [-0.50, -1.50], ] ) temp = np.tile(mog_mean, (batch_size // 16 + 1, 1)) mus = temp[0:batch_size, :] return mus + 0.02 * np.random.normal(size=(batch_size, 2)) # + id="E6uViIllRDlL" # @title ODE-integrators def euler_step(func, y0, f0, t0, dt): # Euler update y1 = jax.tree_map(lambda u, v: dt * v + u, y0, f0) return y1 def runge_kutta_step(func, y0, f0, t0, dt): # RK4 Butcher tableaux alpha = jnp.array([1.0 / 2.0, 1.0 / 2.0, 1.0, 0]) beta = jnp.array( [ [1.0 / 2.0, 0, 0, 0], [0, 1.0 / 2.0, 0, 0], [0, 0, 1.0, 0], ] ) c_sol = jnp.array([1.0 / 6.0, 1.0 / 3.0, 1.0 / 3.0, 1.0 / 6.0]) def body_fun(i, k): ti = t0 + dt * alpha[i - 1] yi = jax.tree_map(lambda u, v: u + dt * jnp.tensordot(beta[i - 1, :], v, axes=1), y0, k) ft = func(yi, ti) return jax.tree_map(lambda x, y: x.at[i, :].set(y), k, ft) k = jax.tree_map(lambda f: jnp.zeros((4,) + f.shape, f.dtype).at[0, :].set(f), f0) k = lax.fori_loop(1, 4, body_fun, k) y1 = jax.tree_map(lambda u, v: dt * jnp.tensordot(c_sol, v, axes=1) + u, y0, k) return y1 # + id="NHCYH1tnwaTL" # @title Utility Functions. def disc_loss(disc_params, gen_params, real_examples, latents): fake_examples = gen_model.apply(gen_params, None, latents) real_logits = disc_model.apply(disc_params, None, real_examples) fake_logits = disc_model.apply(disc_params, None, fake_examples) disc_real = real_logits - jax.nn.log_sigmoid(real_logits) disc_fake = -jax.nn.log_sigmoid(fake_logits) return -jnp.mean(disc_real + disc_fake) def gen_loss(disc_params, gen_params, real_examples, latents): fake_examples = gen_model.apply(gen_params, None, latents) fake_logits = disc_model.apply(disc_params, None, fake_examples) disc_fake = fake_logits - jax.nn.log_sigmoid(fake_logits) return -jnp.mean(disc_fake) def gen_loss_per_example(disc_params, gen_params, real_examples, latents): fake_examples = gen_model.apply(gen_params, None, latents) fake_logits = disc_model.apply(disc_params, None, fake_examples) disc_fake = fake_logits - jax.nn.log_sigmoid(fake_logits) return -disc_fake def gen_norm_per_example(disc_params, gen_params, real_examples, latents): grad = jax.jacfwd(gen_loss_per_example, argnums=1)(disc_params, gen_params, real_examples, latents) flat, _ = jax.tree_flatten(grad) norm = jnp.zeros(shape=(latents.shape[0],)) for a in flat: norm += jnp.sum(a * a, axis=np.arange(1, len(a.shape))) return -jnp.mean(norm) def disc_loss_per_example(disc_params, gen_params, real_examples, latents): fake_examples = gen_model.apply(gen_params, None, latents) real_logits = disc_model.apply(disc_params, None, real_examples) fake_logits = disc_model.apply(disc_params, None, fake_examples) disc_real = real_logits - jax.nn.log_sigmoid(real_logits) disc_fake = -jax.nn.log_sigmoid(fake_logits) return -(disc_real + disc_fake) def disc_norm_per_example(disc_params, gen_params, real_examples, latents): grad = jax.jacfwd(disc_loss_per_example, argnums=0)(disc_params, gen_params, real_examples, latents) flat, _ = jax.tree_flatten(grad) norm = jnp.zeros(shape=(latents.shape[0],)) for a in flat: norm += jnp.sum(a * a, axis=np.arange(1, len(a.shape))) return -jnp.mean(norm) def gen_norm(disc_params, gen_params, real_examples, latents): grad = jax.grad(gen_loss, argnums=1)(disc_params, gen_params, real_examples, latents) flat, _ = jax.tree_flatten(grad) norm = 0.0 for a in flat: norm += jnp.sum(a * a) return -norm def get_gen_grad(gen_params, t, disc_params, real_examples, latents): return jax.grad(gen_loss, argnums=1)(disc_params, gen_params, real_examples, latents) def get_disc_grad(disc_params, t, gen_params, real_examples, latents): return jax.grad(disc_loss, argnums=0)(disc_params, gen_params, real_examples, latents) def variance_calc(disc_params, gen_params, real_examples, latents): neg_var = gen_norm_per_example(disc_params, gen_params, real_examples, latents) neg_var -= gen_norm(disc_params, gen_params, real_examples, latents) return neg_var # + id="xjTBhJuOh_wO" # @title Visualising the data. def kde(mu, tau, bbox=None, xlabel="", ylabel="", cmap="Blues", st=0): values = np.vstack([mu, tau]) kernel = sp.stats.gaussian_kde(values) fig, ax = plt.subplots() ax.axis(bbox) ax.set_aspect(abs(bbox[1] - bbox[0]) / abs(bbox[3] - bbox[2])) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xticks([]) ax.set_yticks([]) xx, yy = np.mgrid[bbox[0] : bbox[1] : 300j, bbox[2] : bbox[3] : 300j] positions = np.vstack([xx.ravel(), yy.ravel()]) f = np.reshape(kernel(positions).T, xx.shape) cfset = ax.contourf(xx, yy, f, cmap=cmap) plt.tight_layout() # plt.show() # + id="G2G32j5N1psa" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="fdcc56a2-0497-45fe-d63c-1505645f9708" # @title Integration n_itrs = 20001 # @param {type : 'integer'} n_save = 2000 # @param {type : 'integer'} latent_size = 32 # @param {type : 'integer'} bs = 512 # @param {type : 'integer'} odeint = "euler_step" # @param ['euler_step', 'euler_heun_step', 'runge_kutta_step'] {type : 'string'} delta_t = 0.05 # @param {type : 'number'} reg_param = 0.0 # @param {type : 'number'} t = 0.0 def forward_disc(batch): disc_model = MLP(2, 25, 1) return disc_model(batch) def forward_gen(batch): gen_model = MLP(2, 25, 2) return gen_model(batch) disc_model = hk.transform(forward_disc) gen_model = hk.transform(forward_gen) real_examples = real_data(bs) ODEINT = {"runge_kutta_step": runge_kutta_step, "euler_step": euler_step} @jax.jit def ode_update(i, disc_params, gen_params, real_examples, latents): dloss, disc_grad = jax.value_and_grad(disc_loss, argnums=0)(disc_params, gen_params, real_examples, latents) gloss, gen_grad = jax.value_and_grad(gen_loss, argnums=1)(disc_params, gen_params, real_examples, latents) variance, disc_gen_grad = jax.value_and_grad(variance_calc)(disc_params, gen_params, real_examples, latents) norms_per_example, _ = jax.value_and_grad(gen_norm_per_example, argnums=0)( disc_params, gen_params, real_examples, latents ) discnorm, gen_disc_grad = jax.value_and_grad(disc_norm_per_example, argnums=1)( disc_params, gen_params, real_examples, latents ) norms, _ = jax.value_and_grad(gen_norm, argnums=0)(disc_params, gen_params, real_examples, latents) grad_disc_fn = functools.partial( get_disc_grad, **{"gen_params": gen_params, "real_examples": real_examples, "latents": latents} ) grad_gen_fn = functools.partial( get_gen_grad, **{"disc_params": disc_params, "real_examples": real_examples, "latents": latents} ) new_gen_params = ODEINT[odeint](grad_gen_fn, gen_params, gen_grad, 0.0, delta_t) new_disc_params = ODEINT[odeint](grad_disc_fn, disc_params, disc_grad, 0.0, delta_t) new_disc_params = jax.tree_map(lambda x, y: x + delta_t * reg_param * y, new_disc_params, disc_gen_grad) new_gen_params = jax.tree_map(lambda x, y: x + delta_t * reg_param * y, new_gen_params, gen_disc_grad) return new_disc_params, new_gen_params, -dloss, -gloss rng = jax.random.PRNGKey(np.random.randint(low=0, high=int(1e7))) test_latents = np.random.normal(size=(bs * 10, latent_size)) latents = np.random.normal(size=(bs, latent_size)) disc_params = disc_model.init(rng, real_examples) gen_params = gen_model.init(jax.random.PRNGKey(np.random.randint(low=0, high=int(1e7))), latents) x = np.arange(-2.0, 2.0, 0.1) y = np.arange(-2.0, 2.0, 0.1) X, Y = np.meshgrid(x, y) pairs = np.stack((X, Y), axis=-1) pairs = np.reshape(pairs, (-1, 2)) bbox = [-2, 2, -2, 2] kde(real_examples[:, 0], real_examples[:, 1], bbox=bbox, st=0) plt.title("Data") plt.show() for e in range(n_itrs): real_examples = real_data(bs) latents = np.random.normal(size=(bs, latent_size)) (disc_params, gen_params, dloss, gloss) = ode_update(e, disc_params, gen_params, real_examples, latents) t += delta_t if e % n_save == 0: print("i = %d, discriminant loss = %s, generator loss = %s" % (e, dloss, gloss)) fake_examples = gen_model.apply(gen_params, None, test_latents) kde(fake_examples[:, 0], fake_examples[:, 1], bbox=bbox, st=e) plt.title("Samples at iteration {}".format(e)) plt.show() # + id="o4_PsWiQLT8B"
notebooks/misc/gan_mog_mode_hopping.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from haven import haven_jupyter as hj from haven import haven_results as hr try: # %load_ext google.colab.data_table except: pass # path to where the experiments got saved savedir_base = "experiments" # filter exps filterby_list = None # get experiments rm = hr.ResultManager(savedir_base=savedir_base, filterby_list=filterby_list, verbose=0) # dashboard variables title_list = ['dataset', 'model'] y_metrics = ['val_mae'] # launch dashboard hj.get_dashboard(rm, vars(), wide_display=True) # - # !python test.py -e shanghai -d /kaggle/input/shanghaitech/ShanghaiTech/part_B/ -sb experiments -nw 2
testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # [Paris Saclay Center for Data Science](http://www.datascience-paris-saclay.fr) # # ## [Titanic RAMP](http://www.ramp.studio/problems/titanic): survival prediction of Titanic passengers # # _<NAME> (Institut Curie/Mines ParisTech), <NAME> (Institut Curie/Mines ParisTech), <NAME> (LTCI/Télécom ParisTech), <NAME> (LAL/CNRS)_ # # ## Introduction # This is an initiation project to introduce RAMP and get you to know how it works. # # The goal is to develop prediction models able to **identify people who survived from the sinking of the Titanic, based on gender, age, and ticketing information**. # # The data we will manipulate is from the [Titanic kaggle challenge](https://www.kaggle.com/c/titanic-gettingStarted). # ### Requirements # # * numpy>=1.10.0 # * matplotlib>=1.5.0 # * pandas>=0.19.0 # * scikit-learn>=0.17 (different syntaxes for v0.17 and v0.18) # * seaborn>=0.7.1 # %matplotlib inline import os import glob import numpy as np from scipy import io import matplotlib.pyplot as plt import pandas as pd # ## Exploratory data analysis # ### Loading the data train_filename = 'data/train.csv' data = pd.read_csv(train_filename) y_train = data['Survived'].values X_train = data.drop(['Survived', 'PassengerId'], axis=1) X_train.head(5) data.describe() data.count() # The original training data frame has 891 rows. In the starting kit, we give you a subset of 445 rows. Some passengers have missing information: in particular `Age` and `Cabin` info can be missing. The meaning of the columns is explained on the [challenge website](https://www.kaggle.com/c/titanic-gettingStarted/data): # ### Predicting survival # # The goal is to predict whether a passenger has survived from other known attributes. Let us group the data according to the `Survived` columns: data.groupby('Survived').count() # About two thirds of the passengers perished in the event. A dummy classifier that systematically returns "0" would have an accuracy of 62%, higher than that of a random model. # ### Some plots # #### Features densities and co-evolution # A scatterplot matrix allows us to visualize: # * on the diagonal, the density estimation for each feature # * on each of the off-diagonal plots, a scatterplot between two features. Each dot represents an instance. from pandas.plotting import scatter_matrix scatter_matrix(data.get(['Fare', 'Pclass', 'Age']), alpha=0.2, figsize=(8, 8), diagonal='kde'); # #### Non-linearly transformed data # # The `Fare` variable has a very heavy tail. We can log-transform it. # + data_plot = data.get(['Age', 'Survived']) data_plot = data.assign(LogFare=lambda x : np.log(x.Fare + 10.)) scatter_matrix(data_plot.get(['Age', 'LogFare']), alpha=0.2, figsize=(8, 8), diagonal='kde'); data_plot.plot(kind='scatter', x='Age', y='LogFare', c='Survived', s=50, cmap=plt.cm.Paired); # - # #### Plot the bivariate distributions and marginals of two variables # # Another way of visualizing relationships between variables is to plot their bivariate distributions. # + import seaborn as sns sns.set() sns.set_style("whitegrid") sns.jointplot(data_plot.Age[data_plot.Survived == 1], data_plot.LogFare[data_plot.Survived == 1], kind="kde", size=7, space=0, color="b"); sns.jointplot(data_plot.Age[data_plot.Survived == 0], data_plot.LogFare[data_plot.Survived == 0], kind="kde", size=7, space=0, color="y"); # - # ## Making predictions # # A basic prediction workflow, using scikit-learn, will be presented below. # First, we will perform some simple preprocessing of our data: # # * [one-hot encode](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OneHotEncoder.html) the categorical features: `Sex`, `Pclass`, `Embarked` # * for the numerical columns `Age`, `SibSp`, `Parch`, `Fare`, fill in missing values with a default value (`-1`) # * all remaining columns will be dropped # # This can be done succintly with [`make_column_transformer`](https://scikit-learn.org/stable/modules/generated/sklearn.compose.make_column_transformer.html) which performs specific transformations on specific features. # + from sklearn.compose import make_column_transformer from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer categorical_cols = ['Sex', 'Pclass', 'Embarked'] numerical_cols = ['Age', 'SibSp', 'Parch', 'Fare'] preprocessor = make_column_transformer( (OneHotEncoder(handle_unknown='ignore'), categorical_cols), (SimpleImputer(strategy='constant', fill_value=-1), numerical_cols), ) # - # The `preprocessor` object created with `make_column_transformer` can be used in a scikit-learn [`pipeline`](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html). A `pipeline` assembles several steps together and can be used to cross validate an entire workflow. Generally, transformation steps are combined with a final estimator. # # We will create a pipeline consisting of the `preprocessor` created above and a final estimator, `LogisticRegression`. # + from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression pipeline = Pipeline([ ('transformer', preprocessor), ('classifier', LogisticRegression()), ]) # - # We can cross-validate our `pipeline` using `cross_val_score`. Below we will have specified `cv=8` meaning KFold cross-valdiation splitting will be used, with 8 folds. The Area Under the Receiver Operating Characteristic Curve (ROC AUC) score is calculated for each split. The output `score` will be an array of 8 scores from each KFold. The score mean and standard of the 8 scores is printed at the end. # + from sklearn.model_selection import cross_val_score scores = cross_val_score(pipeline, X_train, y_train, cv=8, scoring='roc_auc') print("mean: %e (+/- %e)" % (scores.mean(), scores.std())) # - # ### Testing # # Once you have created a model with cross-valdiation scores you are happy with, you can test how well your model performs on the independent test data. # # First we will read in our test data: # + # test_filename = 'data/test.csv' # data = pd.read_csv(test_filename) # y_test = data['Survived'].values # X_test = data.drop(['Survived', 'PassengerId'], axis=1) # X_test.head(5) # - # Next we need to fit our pipeline on our training data: # + # clf = pipeline.fit(X_train, y_train) # - # Now we can predict on our test data: # + # y_pred = pipeline.predict(X_test) # - # Finally, we can calculate how well our model performed on the test data: # + # from sklearn.metrics import roc_auc_score # score = roc_auc_score(y_test, y_pred) # score # - # ## RAMP submissions # For submitting to the [RAMP site](http://ramp.studio), you will need to write a `submission.py` file that defines a `get_estimator` function that returns a scikit-learn [pipeline](https://scikit-learn.org/stable/modules/generated/sklearn.pipeline.Pipeline.html). # # For example, to submit our basic example above, we would define our `pipeline` within the function and return the pipeline at the end. Remember to include all the necessary imports at the beginning of the file. # + from sklearn.compose import make_column_transformer from sklearn.preprocessing import OneHotEncoder from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.linear_model import LogisticRegression def get_estimator(): categorical_cols = ['Sex', 'Pclass', 'Embarked'] numerical_cols = ['Age', 'SibSp', 'Parch', 'Fare'] preprocessor = make_column_transformer( (OneHotEncoder(handle_unknown='ignore'), categorical_cols), (SimpleImputer(strategy='constant', fill_value=-1), numerical_cols), ) pipeline = Pipeline([ ('transformer', preprocessor), ('classifier', LogisticRegression()), ]) return pipeline # - # If you take a look at the sample submission in the directory `submissions/starting_kit`, you will find a file named `submission.py`, which has the above code in it. # # You can test that the sample submission works by running `ramp_test_submission` in your terminal (ensure that `ramp-workflow` has been installed and you are in the `titanic` ramp kit directory). Alternatively, within this notebook you can run: # + # # !ramp_test_submission # - # To test that your own submission works, create a new folder within `submissions` and name it how you wish. Within your new folder save your `submission.py` file that defines a `get_estimator` function. Test your submission locally by running: # # `ramp_test_submission --submission <folder>` # # where `<folder>` is the name of the new folder you created above. # ## Submitting to [ramp.studio](http://ramp.studio) # # Once you found a good solution, you can submit it to [ramp.studio](http://www.ramp.studio). First, if it is your first time using RAMP, [sign up](http://www.ramp.studio/sign_up), otherwise [log in](http://www.ramp.studio/login). Then, find the appropriate open event for the [titanic](http://www.ramp.studio/events/titanic) challenge. Sign up for the event. Note that both RAMP and event signups are controlled by RAMP administrators, so there **can be a delay between asking for signup and being able to submit**. # # Once your signup request(s) have been accepted, you can go to your [sandbox](http://www.ramp.studio/events/titanic/sandbox) and copy-paste (or upload) your `submissions.py` file. Save your submission, name it, then click 'submit'. The submission is trained and tested on our backend in the same way as `ramp_test_submission` does it locally. While your submission is waiting in the queue and being trained, you can find it in the "New submissions (pending training)" table in [my submissions](http://www.ramp.studio/events/titanic/my_submissions). Once it is trained, you get a mail, and your submission shows up on the [public leaderboard](http://www.ramp.studio/events/titanic/leaderboard). # # If there is an error (despite having tested your submission locally with `ramp_test_submission`), it will show up in the "Failed submissions" table in [my submissions](http://www.ramp.studio/events/titanic/my_submissions). You can click on the error to see part of the trace. # # After submission, do not forget to give credits to the previous submissions you reused or integrated into your submission. # # The data set we use at the backend is usually different from what you find in the starting kit, so the score may be different. # # The usual workflow with RAMP is to explore solutions by refining feature transformations, selecting different models and perhaps do some AutoML/hyperopt, etc., in a notebook setting, then test them with `ramp_test_submission`. The script prints mean cross-validation scores: # # ``` # ---------------------------- # train auc = 0.85 ± 0.005 # train acc = 0.81 ± 0.006 # train nll = 0.45 ± 0.007 # valid auc = 0.87 ± 0.023 # valid acc = 0.81 ± 0.02 # valid nll = 0.44 ± 0.024 # test auc = 0.83 ± 0.006 # test acc = 0.76 ± 0.003 # test nll = 0.5 ± 0.005 # ``` # # The official score in this RAMP (the first score column after "historical contributivity" on the [leaderboard](http://www.ramp.studio/events/titanic/leaderboard)) is area under the roc curve ("auc"), so the line that is relevant in the output of `ramp_test_submission` is `valid auc = 0.87 ± 0.023`. # ## More information # # You can find more information in the [README](https://github.com/paris-saclay-cds/ramp-workflow/blob/master/README.md) of the [ramp-workflow library](https://github.com/paris-saclay-cds/ramp-workflow). # ## Contact # # Don't hesitate to [contact us](mailto:<EMAIL>?subject=titanic notebook).
rampwf/tests/kits/titanic_no_test/titanic_no_test_starting_kit.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.5 64-bit (''adam_can_play_any_strategy'': conda)' # name: python3 # --- import os EXAMPLE_DIR = os.getcwd() os.chdir(os.path.join('..', '..')) PROJECT_DIR = os.getcwd() print('EXAMPLE_DIR: ', EXAMPLE_DIR) print('PROJECT_DIR: ', PROJECT_DIR) # + # %load_ext autoreload # %autoreload 2 import gym from IPython.display import Video, display import sys import time import numpy as np from pathlib import Path sys.path.append(os.path.join(PROJECT_DIR, 'src')) from src.graph import Graph from src.graph import graph_factory from src.config import ROOT_PATH from src.strategy_synthesis.multiobjective_solver import MultiObjectiveSolver from src.simulation.simulator import Simulator # TODO: Install Wombats to your local directory relative to regret_synthesis_toolbox WOMBATS_EXPERIMENT_DIRECTORY = os.path.join(os.path.dirname(PROJECT_DIR), 'wombats_experiments') sys.path.append(WOMBATS_EXPERIMENT_DIRECTORY) from wombats.systems.minigrid import GYM_MONITOR_LOG_DIR_NAME from wombats.systems.minigrid import DynamicMinigrid2PGameWrapper DIR = EXAMPLE_DIR Graph.automata_data_dir = DIR # + debug = True env_id = 'MiniGrid-FloodingLava-v0' pdfa_config_yaml="config/PDFA_charging_station" player_steps = {'sys': [1], 'env': [1]} load_game_from_file = False plot_minigrid = False plot_pdfa = True plot_product = False finite = True view = True save_flag = True format = 'png' stochastic = False adversarial = True plot_strategies=False plot_graph_with_strategy = False plot_graph_with_pareto = False plot_pareto = True speedup = True env_filename = os.path.join(DIR, 'plots', 'gym_env.png') Path(os.path.split(env_filename)[0]).mkdir(parents=True, exist_ok=True) env_dpi = 300 # - # PDFA pdfa = graph_factory.get( 'PDFA', graph_name="pdfa", config_yaml=pdfa_config_yaml, save_flag=save_flag, plot=plot_pdfa, view=view, format=format) # + # OpenAI Minigrid Env env = gym.make(env_id) env = DynamicMinigrid2PGameWrapper( env, player_steps=player_steps, monitor_log_location=os.path.join(DIR, GYM_MONITOR_LOG_DIR_NAME)) env.reset() env.render_notebook(env_filename, env_dpi) # + file_name = env_id + 'Game' filepath = os.path.join(DIR, 'config', file_name) config_yaml = os.path.relpath(filepath, ROOT_PATH) # Game Construction start = time.time() two_player_graph = graph_factory.get('TwoPlayerGraph', graph_name='TwoPlayerGame', config_yaml=config_yaml, from_file=load_game_from_file, minigrid=env, save_flag=save_flag, plot=plot_minigrid, view=view, format=format) end = time.time() # + # Product Game Construction file_name = env_id + 'ProductAutomaton' # config_yaml = os.path.join(DIR, 'config', file_name) config_yaml = None start = time.time() game = graph_factory.get('ProductGraph', graph_name='ProductAutomaton', config_yaml=config_yaml, trans_sys=two_player_graph, automaton=pdfa, save_flag=True, prune=False, debug=False, absorbing=True, finite=finite, plot=plot_product, integrate_accepting=True, view=view, format=format) end = time.time() print(f'Product Construction took {end-start:.2f} seconds') # - solver = MultiObjectiveSolver(game, epsilon=1e-7, max_iteration=300, stochastic=stochastic, adversarial=adversarial) solver.solve(plot_strategies=plot_strategies, plot_graph_with_strategy=plot_graph_with_strategy, plot_graph_with_pareto=plot_graph_with_pareto, plot_pareto=plot_pareto, speedup=speedup, debug=debug, view=view, format=format) # + iterations = 100 iterations = 1 for pp in solver.get_pareto_points(): strategy = solver.get_a_strategy_for(pp) print('-'*100) print(f"Evaluate for a pareto point {pp}") print('-'*100) sim = Simulator(env, game) sim.run(iterations=iterations, sys_strategy=strategy, render=False, record_video=iterations<=5) sim.get_stats() sim.plot_grid() # + import matplotlib.pyplot as plt costs = [] for result in sim._results: costs.append(result['Cost']) costs = np.array(costs) fig = plt.figure() ax = fig.add_subplot(111) ax.scatter(costs[:, 0], costs[:, 1]) solver.plot_pareto_front(ax) # - # # Evaluation # + # TODO: FIX THIS SHIT player = 'env' As = [] for multiactions in env.player_actions[player]: action_strings = [] for agent, actions in zip(env.unwrapped.agents, multiactions): action_string = [] for action in actions: if action is None or np.isnan(action): continue a_str = agent.ACTION_ENUM_TO_STR[action] action_string.append(a_str) action_strings.append(tuple(action_string)) action_strs = action_strings[0] if player == 'sys' else action_strings[1:] As.append(tuple(action_strs)) print(As) # + pp = solver.get_pareto_points()[0] strategy = solver.get_a_strategy_for(pp) env_actions = [As[1], As[2], As[5], As[1], As[1]] sim = Simulator(env, game) sim.run_turn_based_game( sys_strategy=strategy, env_actions=env_actions, render=False, record_video=True) # -
examples/ChargingStation/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Data Preparation # # The first step is to get the data. # # In this example, let us get the data from binance.com. We can use the binance python api. # # https://python-binance.readthedocs.io/en/latest/ import pandas as pd import numpy as np from binance.client import Client client = Client() info = client.get_symbol_info('BTCUSDT') info klines = client.get_historical_klines("BTCUSDT", Client.KLINE_INTERVAL_1DAY, "1 Jan, 2020") len(klines) bitcoin_df = pd.DataFrame(klines) bitcoin_df.columns = ["open_time", "open", "high", "low", "close", "volume", "close_time", "quote", "no_trades", "base_buy", "quote_buy", "ignore"] bitcoin_df import datetime bitcoin_df["date"] = bitcoin_df["open_time"].apply(lambda x: datetime.datetime.fromtimestamp(x/1000)) bitcoin_df bitcoin_df.to_csv("bitcoin_daily_prices.csv", index=False)
Bit-coin prediction/1 Data Preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import matplotlib.pyplot as plt from laser.abcd import Beampath, GaussianBeampath # %matplotlib inline # # Example with all types of elements # + # Initialise input bp = Beampath(10, np.deg2rad(-15)) # Add elements bp.add_freespace(80) # Window bp.add_interface(1.7, 100) bp.add_freespace(15) bp.add_interface(1) bp.add_freespace(20) bp.add_thicklens(1.33, 20) # a thick lens is the same as interface > propagation > interface bp.add_freespace(20) bp.add_thinlens(50) bp.add_freespace(30) bp.add_thinlens(-100) # Calculate image position bp.add_image() # Display the beam path bp.plot() # - # ## Modify beampath after construction # + # Edit the beam path after it was built # Edit the object bp.edit_object(radius = 2, angle=0, position = -100) # Remove first thicklens bp.remove_element([2,4]) # Remove last element bp.pop() # Replot bp.plot() # Add a beampath bp_add = Beampath() bp_add.add_freespace(110) bp_add.add_thicklens(1.5, 10, np.inf, -50) bp_add.add_freespace(100) bp.extend(bp_add) # Remove the image from the calculations bp.remove_image() # replot bp.plot() # - # # Gaussian beam propagation # + bp_abcd = Beampath(radius=0.2e-3, angle=-0.002) bp_abcd.add_freespace(0.2) bp_abcd.add_thinlens(0.025) bp_abcd.add_freespace(0.05) bp_abcd.add_thinlens(0.01) # bp_abcd.add_image() bp_abcd.plot() bp_gauss = GaussianBeampath(radius=bp_abcd.radius[0], angle=bp_abcd.angle[0], wavelength = 800e-9) bp_gauss.add_freespace(0.2) bp_gauss.add_thinlens(0.025) bp_gauss.add_freespace(0.05) bp_gauss.add_thinlens(0.01) # bp_gauss.add_image() bp_gauss.plot() print('radius ABCD: %.2f um' % np.abs(1e6*bp_abcd.radius[-1])) print('radius Gaussian: %.2f um' % (1e6*bp_gauss.radius[-1])) # - # # Telescope from Thorlabs lenses bp2 = Beampath(radius=5, angle=0, index=1, position=-50) # all 4 input parameters bp2.add_freespace(50) bp2.add_thicklens(1.5168, 6.5, 30.06, -172.0) bp2.add_freespace(46.4+97.3) bp2.add_thicklens(1.5168, 4, 353.3, -60.03) bp2.add_freespace(100) bp2.plot() # # Display measured parameters # + bp3 = Beampath(0,0.25) bp3.add_freespace(40) bp3.add_thicklens(1.414, 20, radius_in=30) bp3.add_freespace(10) bp3.add_image() # "plot_digit" keyword argument to change the number # of displayed decimals (trailing zeros are trimmed) # "figsize" to change the figure size (expect a tuple, default is (12,6)) bp3.plot(plot_digit=5, figsize=(10,3)) # Access to the beam parameter along the path, and the total ABCD matrix print('Radius:\n', bp3.radius, '\n') print('Angle:\n', bp3.angle, '\n') print('Index:\n', bp3.index, '\n') print('Position:\n', bp3.position, '\n') print('ABCD matrix:\n', bp3.M) # - bp4 = Beampath() # point source by default bp4.add_freespace(100) bp4.add_thinlens(100) # image at infinity (object in focal plane of lens) bp4.add_freespace(100) # Add a dummy exit plane (grey dashes) bp4.add_image() bp4.plot()
abcd_example.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # # $$CatBoost\ PredictionDiff \ Feature\ Importance\ Tutorial$$ # Sometimes it is very important to understand which feature made the greatest contribution to the final result. To do this, the CatBoost model has a get_feature_importance method. import numpy as np from catboost import CatBoost, Pool, datasets from sklearn.model_selection import train_test_split # First, let's prepare the dataset: train_df, test_df = datasets.msrank_10k() X_train, y_train, group_id_train = np.array(train_df.drop([0, 1], axis=1)), np.array(train_df[0]), np.array(train_df[1]) X_test, y_test, group_id_test = np.array(test_df.drop([0, 1], axis=1)), np.array(test_df[0]), np.array(test_df[1]) train_pool = Pool(X_train, y_train, group_id=group_id_train) test_pool = Pool(X_test, y_test, group_id=group_id_test) # Let's train CatBoost: model = CatBoost({'iterations': 50, 'loss_function': 'YetiRank', 'verbose': False, 'random_seed': 42}) model.fit(train_pool); # Catboost provides several types of feature importances. One of them is PredictionDiff: A vector with contributions of each feature to the RawFormulaVal difference for each pair of objects. # Let's find such pair of objects in 1-st group in test pool that our model ranks in wrong order. # + # find 1st group group_size = 1 while group_id_test[group_size] == group_id_test[0]: group_size += 1 # get predictions target = y_test[:group_size] prediction = model.predict(X_test[: group_size], prediction_type='RawFormulaVal') prediction = zip(prediction, target, range(group_size)) # + # find a wrong ranked pair of objects wrong_prediction_idxs = [ int(np.max([(x[0], x[2]) for x in prediction if x[1] == 0])), int(np.min([(x[1], x[2]) for x in prediction if x[1] == 3])) ] test_pool_slice = X_test[wrong_prediction_idxs] zip(model.predict(test_pool_slice, prediction_type='RawFormulaVal'), target[wrong_prediction_idxs]) # - # Let's calculate PredictionDiff for these two objects and see most important features. # # As you can see, changing in the feature 133 could change model prediction. prediction_diff = model.get_feature_importance(type='PredictionDiff', data=test_pool_slice, prettified=True) prediction_diff.head(10) model.plot_predictions( data=test_pool_slice, features_to_change=prediction_diff["Feature Id"][:3], plot=True);
catboost/tutorials/model_analysis/prediction_diff_feature_importance_tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import numpy as np v = np.zeros(3) class Animal: pass dog1 = Animal() cat1 = Animal() isinstance(dog1, Animal) isinstance(1, Animal) isinstance(1, int) class Animal: def sleep(self): print "I am sleeping" def eat(self, food): print "I am eating", food dog1 = Animal() dog1.sleep() dog1.eat("banana") dog1.eat("an apple") class Animal: def __init__(self, name, weight): self.name = name self.__weight__ = weight def __add__(self, other): return Animal(self.name+"x"+other.name, self.__weight__+other.__weight__) def __repr__(self): return "My name is "+self.name def __str__(self): return "Animal: "+self.name def sleep(self): print self.name, "is sleeping" def eat(self, food): if self.__weight__ < 10: print "I am eating", food self.__weight__ += 1 else: print "no thanks" dog1 = Animal("doggy", 100) cat1 = Animal("kitty", 1) dog1.name dog1.weight dog1 + cat1 dog1.eat(cat1) class Animal: def __init__(self, name, weight): self.name = name self.__weight__ = weight def __add__(self, other): return Animal(self.name+"x"+other.name, self.__weight__+other.__weight__) def __repr__(self): return "My name is "+self.name def __str__(self): return "Animal: "+self.name def sleep(self): print self.name, "is sleeping" def eat(self, food): if self.__weight__ < 10: print "I am eating", food self.__weight__ += 1 else: print "no thanks" # + class Bird(Animal): def __init__(self, name, weight): Animal.__init__(self, name, weight) self.name = "!"+self.name+"!" def fly(self): print "look,", self.name, "is flying" def __str__(self): return "Bird: "+self.name class Fish(Animal): def swim(self): print "I am swimming" def __str__(self): return "Fish: "+self.name # - bird1 = Bird("birdy", 1) fish1 = Fish("fishy", 1) bird1.eat(fish1) bird1.fly() bird1 + fish1 fish1.swim()
oop2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import io import requests import numpy as np import pandas as pd from collections import namedtuple # # Datasets # ## Government Measurement Dataset # Oxford Covid-19 Government Response Tracker (OxCGRT) oxcgrt_url = 'https://raw.githubusercontent.com/OxCGRT/covid-policy-tracker/master/data/OxCGRT_latest.csv' oxcgrt_data = requests.get(oxcgrt_url).content oxcgrt_data = pd.read_csv(io.StringIO(oxcgrt_data.decode('utf-8'))) # ## Testing Cases Dataset test_url = 'https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/testing/covid-testing-all-observations.csv' test_data = requests.get(test_url).content test_data = pd.read_csv(io.StringIO(test_data.decode('utf-8'))) # ## Confirmed Cases Dataset confirmed_url = 'https://datahub.io/core/covid-19/r/time-series-19-covid-combined.csv' confirmed_data = requests.get(confirmed_url).content confirmed_data = pd.read_csv(io.StringIO(confirmed_data.decode('utf-8'))) # # Data Cleaning # #### Due to different date formats adopted by different dataset, we need to unify date formats across different datasets. oxcgrt_data['Date'] = oxcgrt_data['Date'].apply(lambda x: pd.to_datetime(x, format='%Y%m%d')) test_data['Date'] = test_data['Date'].apply(lambda x: pd.to_datetime(x, format='%Y-%m-%d')) confirmed_data['Date'] = confirmed_data['Date'].apply(lambda x: pd.to_datetime(x, format='%Y-%m-%d')) # #### Map from measurement name to its abbreviation and vice versa for government measure dataset. a2m = {'C1': 'C1_School closing', 'C2': 'C2_Workplace closing', 'C3': 'C3_Cancel public events', 'C4': 'C4_Restrictions on gatherings', 'C5': 'C5_Close public transport', 'C6': 'C6_Stay at home requirements', 'C7': 'C7_Restrictions on internal movement', 'C8': 'C8_International travel controls', 'H1': 'H1_Public information campaigns'} m2a = {v: k for k, v in a2m.items()} # #### Government measurements is divided into 18 indicators (C1 ~ C8, E1 ~ E4, H1 ~ H5, and M1), and we focus on C1 to C8 and H1, # #### because E1 ~ E4 are fiscal and monetary policies and H2 ~ H5 are testing and contact tracing policies and investment in health system. # #### A "Stringency Index" is also provided to measure the strigency of government measures. # #### More descriptions can be found: https://www.bsg.ox.ac.uk/sites/default/files/2020-05/BSG-WP-2020-032-v5.0_0.pdf, # #### and https://www.bsg.ox.ac.uk/sites/default/files/Calculation%20and%20presentation%20of%20the%20Stringency%20Index.pdf # #### Note that this dataset has been modified officially on April 30th with note: https://www.bsg.ox.ac.uk/sites/default/files/OxCGRT.%20What%27s%20changed%2024%20April%202020.pdf # select C1 to C8, H1 and Stringency Index dc_measure_data = dict() Measure = namedtuple('Measure', 'C1 C2, C3, C4, C5, C6, C7, C8, H1, stringency') for index, row in oxcgrt_data.iterrows(): value = [] for abbr, measure in a2m.items(): if np.isnan(row[measure]): value.append(None) else: value.append(row[measure]) if np.isnan(row['StringencyIndexForDisplay']): value.append(None) else: value.append(row['StringencyIndexForDisplay']) key = (row['Date'], row['CountryName']) value = Measure(*value) dc_measure_data[key] = value # #### Due to incomplete reports by different governments, there are some invalid (NAN) values in the government measurement dataset. # #### We refill invalid values by the following steps: # 1. sort dictionary by keys which are tuples: first by country, second by date # 2. refill None values using the value of the before/after days, as measures won't change significantly # 3. delete a data sample if we cannot find a valid value in maximum 7 before/after days # + dc_measure_data = {elem[0]: elem[1] for elem in sorted(dc_measure_data.items(), key=lambda x: (x[0][1], x[0][0]))} delete_key = [] for key, value in dc_measure_data.items(): for s, v in value._asdict().items(): if v is None: refill = False # forward pass cnt = 1 prev_date = key[0] + pd.DateOffset(-1) prev_key = (prev_date, key[1]) while cnt < 7: if dc_measure_data.get(prev_key, None) is not None: prev_v = getattr(dc_measure_data.get(prev_key, None), s) if prev_v is not None: dc_measure_data[key] = dc_measure_data[key]._replace(**{s: prev_v}) refill = True break else: prev_date = prev_date + pd.DateOffset(-1) prev_key = (prev_date, key[1]) cnt += 1 else: prev_date = prev_date + pd.DateOffset(-1) prev_key = (prev_date, key[1]) cnt += 1 if not refill: cnt = 1 after_date = key[0] + pd.DateOffset(1) after_key = (after_date, key[1]) while cnt < 7: if dc_measure_data.get(after_key, None) is not None: after_v = getattr(dc_measure_data.get(after_key, None), s) if after_v is not None: dc_measure_data[key] = dc_measure_data[key]._replace(**{s: after_v}) refill = True break else: after_date = after_date + pd.DateOffset(-1) after_key = (after_date, key[1]) cnt += 1 else: after_date = after_date + pd.DateOffset(-1) after_key = (after_date, key[1]) cnt += 1 if not refill: delete_key.append(key) break dc_measure_data = {k: v for k, v in dc_measure_data.items() if k not in delete_key} # - # #### In the test cases dataset, different countries report test numbers under one or more standards, including: # #### 'tests performed', 'cases tested', 'people tested', ..., and 'unit unclear'. # #### More information can be found: https://ourworldindata.org/covid-testing#our-checklist-for-covid-19-testing-data. # #### To keep consistency, we only keep the test numbers under one standard which has the maximum available data samples. # #### Note that as long as the standard in any single country is the same along the timeline, the analysis is valid. # #### Steps are as follows: # Step 1: get the number of available data for each country under each standard sc_tested_dict = dict() # 'sc' means standard and country for index, row in test_data.iterrows(): entity = row['Entity'] country, standard = entity.split('-') country = country.strip() standard = standard.strip() if country not in sc_tested_dict.keys(): sc_tested_dict[country] = {standard: 1} else: if standard not in sc_tested_dict[country]: sc_tested_dict[country][standard] = 1 else: sc_tested_dict[country][standard] += 1 # Step 2: find the standard with the maximum number of available data for each country with some exceptional countries # + ssc_tested_dict = dict() # 'ssc' means single standard and country for country, value in sc_tested_dict.items(): sorted_value = {k: v for k, v in sorted(value.items(), key=lambda item: -item[1])} standard = list(sorted_value)[0] ssc_tested_dict[country] = standard ssc_tested_dict['United States'] = 'inconsistent units (COVID Tracking Project)' # - # Step 3: a). remove data that is not consistent with the selected standard; b). reformat Entity name for index, row in test_data.iterrows(): entity = row['Entity'] country, standard = entity.split('-') country = country.strip() standard = standard.strip() if standard != ssc_tested_dict[country]: test_data.drop(index, inplace=True) else: test_data.loc[index, 'Entity'] = country # #### Record the number of daily test cases using the number of cumulative cases. # + import pdb dc_tested_dict = dict() Tested = namedtuple('Tested', 'cumulative daily') for index, row in test_data.iterrows(): key = (row['Date'], row['Entity']) if (not np.isnan(row['Daily change in cumulative total'])) and (not np.isnan(row['Cumulative total'])): dc_tested_dict[key] = Tested(int(row['Cumulative total']), int(row['Daily change in cumulative total'])) else: if not np.isnan(row['Cumulative total']): dc_tested_dict[key] = Tested(int(row['Cumulative total']), None) # - # #### In the confirmed cases dataset, the number of cumulative confirmed cases in some countries are inconsistent, and we need to: # #### 1. replace the country name 'US' with 'United States' and 'Korea, South' with 'South Korea'. # #### 2. Remove invalid (NaN) number of confirmed cases. # #### 3. For some countries, combine all province/state statistics into a single country-level statistic. # + confirmed_data = confirmed_data.replace('US', 'United States') confirmed_data = confirmed_data.replace('Korea, South', 'South Korea') dc_confirmed_dict = dict() # 'dc' means date and country for index, row in confirmed_data.iterrows(): key = (row['Date'], row['Country/Region']) if not np.isnan(row['Confirmed']): if key in dc_confirmed_dict.keys(): dc_confirmed_dict[key] = dc_confirmed_dict[key] + int(row['Confirmed']) else: dc_confirmed_dict[key] = int(row['Confirmed']) # - # #### compute daily confirmed cases using the number of cumulative cases. Confirmed = namedtuple('Confirmed', 'cumulative daily') for key, value in dc_confirmed_dict.items(): prev_date_key = (key[0] + pd.DateOffset(-1), key[1]) if prev_date_key in dc_confirmed_dict.keys(): if not isinstance(dc_confirmed_dict[prev_date_key], Confirmed): dc_confirmed_dict[key] = Confirmed(value, value - dc_confirmed_dict[prev_date_key]) else: dc_confirmed_dict[key] = Confirmed(value, value - dc_confirmed_dict[prev_date_key].cumulative) else: dc_confirmed_dict[key] = Confirmed(value, None) # # Date Integration # #### Combine data from three datasets together using dictionary structure: key - (date, country), value - (Measure, Confirmed, Tested). # #### Because the three datasets are collected by different organizations/groups, we need to filter out incomplete data samples. # #### Each item represent a sample for a country in a day which appears in all three datasets. dc_combined_data = dict() # 'dc' means date and country Combined = namedtuple('Combined', 'measure confirmed tested') for key, value in dc_measure_data.items(): if key in dc_confirmed_dict and key in dc_tested_dict: dc_combined_data[key] = Combined(value, dc_confirmed_dict[key], dc_tested_dict[key]) # # Have a look at the final combined dataset print('Total number of samples: {}'.format(len(dc_combined_data))) # + import random key = random.choice(list(dc_combined_data.keys())) print('A random sample in the integrated dataset:') print('key: ', key) print('value: ', dc_combined_data[key]) # - # ### Save the cleaned and integrated data into a file # + import pickle filename = '../cleaned_integrated_data.pkl' f = open(filename, 'wb') pickle.dump(dc_combined_data, f) f.close() # - # ### Code to load the cleaned and integrated data # + with open(filename, 'rb') as fp: loaded = pickle.load(fp) print('Total number of samples: {}'.format(len(dc_combined_data))) key = random.choice(list(dc_combined_data.keys())) print('A random sample in the integrated dataset:') print('key: ', key) print('value: ', dc_combined_data[key]) # -
part2/data_clean_integration.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # Data Manipulation # :label:`sec_ndarray` # # In order to get anything done, we need some way to store and manipulate data. # Generally, there are two important things we need to do with data: (i) acquire # them; and (ii) process them once they are inside the computer. There is no # point in acquiring data without some way to store it, so let us get our hands # dirty first by playing with synthetic data. To start, we introduce the # $n$-dimensional array, which is also called the *tensor*. # # If you have worked with NumPy, the most widely-used # scientific computing package in Python, # then you will find this section familiar. # No matter which framework you use, # its *tensor class* (`ndarray` in MXNet, # `Tensor` in both PyTorch and TensorFlow) is similar to NumPy's `ndarray` with # a few killer features. # First, GPU is well-supported to accelerate the computation # whereas NumPy only supports CPU computation. # Second, the tensor class # supports automatic differentiation. # These properties make the tensor class suitable for deep learning. # Throughout the book, when we say tensors, # we are referring to instances of the tensor class unless otherwise stated. # # ## Getting Started # # In this section, we aim to get you up and running, # equipping you with the basic math and numerical computing tools # that you will build on as you progress through the book. # Do not worry if you struggle to grok some of # the mathematical concepts or library functions. # The following sections will revisit this material # in the context of practical examples and it will sink in. # On the other hand, if you already have some background # and want to go deeper into the mathematical content, just skip this section. # # + [markdown] origin_pos=2 tab=["pytorch"] # (**To start, we import `torch`. Note that though it's called PyTorch, we should # import `torch` instead of `pytorch`.**) # # + origin_pos=5 tab=["pytorch"] import torch # + [markdown] origin_pos=7 # [**A tensor represents a (possibly multi-dimensional) array of numerical values.**] # With one axis, a tensor is called a *vector*. # With two axes, a tensor is called a *matrix*. # With $k > 2$ axes, we drop the specialized names # and just refer to the object as a $k^\mathrm{th}$ *order tensor*. # # + [markdown] origin_pos=9 tab=["pytorch"] # PyTorch provides a variety of functions # for creating new tensors # prepopulated with values. # For example, by invoking `arange(n)`, # we can create a vector of evenly spaced values, # starting at 0 (included) # and ending at `n` (not included). # By default, the interval size is $1$. # Unless otherwise specified, # new tensors are stored in main memory # and designated for CPU-based computation. # # + origin_pos=12 tab=["pytorch"] x = torch.arange(12, dtype=torch.float32) x # + [markdown] origin_pos=14 # (**We can access a tensor's *shape***) (~~and the total number of elements~~) (the length along each axis) # by inspecting its `shape` property. # # + origin_pos=15 tab=["pytorch"] x.shape # + [markdown] origin_pos=16 # If we just want to know the total number of elements in a tensor, # i.e., the product of all of the shape elements, # we can inspect its size. # Because we are dealing with a vector here, # the single element of its `shape` is identical to its size. # # + origin_pos=18 tab=["pytorch"] x.numel() # + [markdown] origin_pos=20 # To [**change the shape of a tensor without altering # either the number of elements or their values**], # we can invoke the `reshape` function. # For example, we can transform our tensor, `x`, # from a row vector with shape (12,) to a matrix with shape (3, 4). # This new tensor contains the exact same values, # but views them as a matrix organized as 3 rows and 4 columns. # To reiterate, although the shape has changed, # the elements have not. # Note that the size is unaltered by reshaping. # # + origin_pos=21 tab=["pytorch"] X = x.reshape(3, 4) X # + [markdown] origin_pos=23 # Reshaping by manually specifying every dimension is unnecessary. # If our target shape is a matrix with shape (height, width), # then after we know the width, the height is given implicitly. # Why should we have to perform the division ourselves? # In the example above, to get a matrix with 3 rows, # we specified both that it should have 3 rows and 4 columns. # Fortunately, tensors can automatically work out one dimension given the rest. # We invoke this capability by placing `-1` for the dimension # that we would like tensors to automatically infer. # In our case, instead of calling `x.reshape(3, 4)`, # we could have equivalently called `x.reshape(-1, 4)` or `x.reshape(3, -1)`. # # Typically, we will want our matrices initialized # either with zeros, ones, some other constants, # or numbers randomly sampled from a specific distribution. # [**We can create a tensor representing a tensor with all elements # set to 0**] (~~or 1~~) # and a shape of (2, 3, 4) as follows: # # + origin_pos=25 tab=["pytorch"] torch.zeros((2, 3, 4)) # + [markdown] origin_pos=27 # Similarly, we can create tensors with each element set to 1 as follows: # # + origin_pos=29 tab=["pytorch"] torch.ones((2, 3, 4)) # + [markdown] origin_pos=31 # Often, we want to [**randomly sample the values # for each element in a tensor**] # from some probability distribution. # For example, when we construct arrays to serve # as parameters in a neural network, we will # typically initialize their values randomly. # The following snippet creates a tensor with shape (3, 4). # Each of its elements is randomly sampled # from a standard Gaussian (normal) distribution # with a mean of 0 and a standard deviation of 1. # # + origin_pos=33 tab=["pytorch"] torch.randn(3, 4) # + [markdown] origin_pos=35 # We can also [**specify the exact values for each element**] in the desired tensor # by supplying a Python list (or list of lists) containing the numerical values. # Here, the outermost list corresponds to axis 0, and the inner list to axis 1. # # + origin_pos=37 tab=["pytorch"] torch.tensor([[2, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) # + [markdown] origin_pos=39 # ## Operations # # This book is not about software engineering. # Our interests are not limited to simply # reading and writing data from/to arrays. # We want to perform mathematical operations on those arrays. # Some of the simplest and most useful operations # are the *elementwise* operations. # These apply a standard scalar operation # to each element of an array. # For functions that take two arrays as inputs, # elementwise operations apply some standard binary operator # on each pair of corresponding elements from the two arrays. # We can create an elementwise function from any function # that maps from a scalar to a scalar. # # In mathematical notation, we would denote such # a *unary* scalar operator (taking one input) # by the signature $f: \mathbb{R} \rightarrow \mathbb{R}$. # This just means that the function is mapping # from any real number ($\mathbb{R}$) onto another. # Likewise, we denote a *binary* scalar operator # (taking two real inputs, and yielding one output) # by the signature $f: \mathbb{R}, \mathbb{R} \rightarrow \mathbb{R}$. # Given any two vectors $\mathbf{u}$ and $\mathbf{v}$ *of the same shape*, # and a binary operator $f$, we can produce a vector # $\mathbf{c} = F(\mathbf{u},\mathbf{v})$ # by setting $c_i \gets f(u_i, v_i)$ for all $i$, # where $c_i, u_i$, and $v_i$ are the $i^\mathrm{th}$ elements # of vectors $\mathbf{c}, \mathbf{u}$, and $\mathbf{v}$. # Here, we produced the vector-valued # $F: \mathbb{R}^d, \mathbb{R}^d \rightarrow \mathbb{R}^d$ # by *lifting* the scalar function to an elementwise vector operation. # # The common standard arithmetic operators # (`+`, `-`, `*`, `/`, and `**`) # have all been *lifted* to elementwise operations # for any identically-shaped tensors of arbitrary shape. # We can call elementwise operations on any two tensors of the same shape. # In the following example, we use commas to formulate a 5-element tuple, # where each element is the result of an elementwise operation. # # ### Operations # # [**The common standard arithmetic operators # (`+`, `-`, `*`, `/`, and `**`) # have all been *lifted* to elementwise operations.**] # # + origin_pos=41 tab=["pytorch"] x = torch.tensor([1.0, 2, 4, 8]) y = torch.tensor([2, 2, 2, 2]) x + y, x - y, x * y, x / y, x ** y # The ** operator is exponentiation # + [markdown] origin_pos=43 # Many (**more operations can be applied elementwise**), # including unary operators like exponentiation. # # + origin_pos=45 tab=["pytorch"] torch.exp(x) # + [markdown] origin_pos=47 # In addition to elementwise computations, # we can also perform linear algebra operations, # including vector dot products and matrix multiplication. # We will explain the crucial bits of linear algebra # (with no assumed prior knowledge) in :numref:`sec_linear-algebra`. # # We can also [***concatenate* multiple tensors together,**] # stacking them end-to-end to form a larger tensor. # We just need to provide a list of tensors # and tell the system along which axis to concatenate. # The example below shows what happens when we concatenate # two matrices along rows (axis 0, the first element of the shape) # vs. columns (axis 1, the second element of the shape). # We can see that the first output tensor's axis-0 length ($6$) # is the sum of the two input tensors' axis-0 lengths ($3 + 3$); # while the second output tensor's axis-1 length ($8$) # is the sum of the two input tensors' axis-1 lengths ($4 + 4$). # # + origin_pos=49 tab=["pytorch"] X = torch.arange(12, dtype=torch.float32).reshape((3,4)) Y = torch.tensor([[2.0, 1, 4, 3], [1, 2, 3, 4], [4, 3, 2, 1]]) torch.cat((X, Y), dim=0), torch.cat((X, Y), dim=1) # + [markdown] origin_pos=51 # Sometimes, we want to [**construct a binary tensor via *logical statements*.**] # Take `X == Y` as an example. # For each position, if `X` and `Y` are equal at that position, # the corresponding entry in the new tensor takes a value of 1, # meaning that the logical statement `X == Y` is true at that position; # otherwise that position takes 0. # # + origin_pos=52 tab=["pytorch"] X == Y # + [markdown] origin_pos=53 # [**Summing all the elements in the tensor**] yields a tensor with only one element. # # + origin_pos=54 tab=["pytorch"] X.sum() # + [markdown] origin_pos=56 # ## Broadcasting Mechanism # :label:`subsec_broadcasting` # # In the above section, we saw how to perform elementwise operations # on two tensors of the same shape. Under certain conditions, # even when shapes differ, we can still [**perform elementwise operations # by invoking the *broadcasting mechanism*.**] # This mechanism works in the following way: # First, expand one or both arrays # by copying elements appropriately # so that after this transformation, # the two tensors have the same shape. # Second, carry out the elementwise operations # on the resulting arrays. # # In most cases, we broadcast along an axis where an array # initially only has length 1, such as in the following example: # # + origin_pos=58 tab=["pytorch"] a = torch.arange(3).reshape((3, 1)) b = torch.arange(2).reshape((1, 2)) a, b # + [markdown] origin_pos=60 # Since `a` and `b` are $3\times1$ and $1\times2$ matrices respectively, # their shapes do not match up if we want to add them. # We *broadcast* the entries of both matrices into a larger $3\times2$ matrix as follows: # for matrix `a` it replicates the columns # and for matrix `b` it replicates the rows # before adding up both elementwise. # # + origin_pos=61 tab=["pytorch"] a + b # + [markdown] origin_pos=62 # ## Indexing and Slicing # # Just as in any other Python array, elements in a tensor can be accessed by index. # As in any Python array, the first element has index 0 # and ranges are specified to include the first but *before* the last element. # As in standard Python lists, we can access elements # according to their relative position to the end of the list # by using negative indices. # # Thus, [**`[-1]` selects the last element and `[1:3]` # selects the second and the third elements**] as follows: # # + origin_pos=63 tab=["pytorch"] X[-1], X[1:3] # + [markdown] origin_pos=64 tab=["pytorch"] # Beyond reading, (**we can also write elements of a matrix by specifying indices.**) # # + origin_pos=66 tab=["pytorch"] X[1, 2] = 9 X # + [markdown] origin_pos=68 # If we want [**to assign multiple elements the same value, # we simply index all of them and then assign them the value.**] # For instance, `[0:2, :]` accesses the first and second rows, # where `:` takes all the elements along axis 1 (column). # While we discussed indexing for matrices, # this obviously also works for vectors # and for tensors of more than 2 dimensions. # # + origin_pos=69 tab=["pytorch"] X[0:2, :] = 12 X # + [markdown] origin_pos=71 # ## Saving Memory # # [**Running operations can cause new memory to be # allocated to host results.**] # For example, if we write `Y = X + Y`, # we will dereference the tensor that `Y` used to point to # and instead point `Y` at the newly allocated memory. # In the following example, we demonstrate this with Python's `id()` function, # which gives us the exact address of the referenced object in memory. # After running `Y = Y + X`, we will find that `id(Y)` points to a different location. # That is because Python first evaluates `Y + X`, # allocating new memory for the result and then makes `Y` # point to this new location in memory. # # + origin_pos=72 tab=["pytorch"] before = id(Y) Y = Y + X id(Y) == before # + [markdown] origin_pos=73 # This might be undesirable for two reasons. # First, we do not want to run around # allocating memory unnecessarily all the time. # In machine learning, we might have # hundreds of megabytes of parameters # and update all of them multiple times per second. # Typically, we will want to perform these updates *in place*. # Second, we might point at the same parameters from multiple variables. # If we do not update in place, other references will still point to # the old memory location, making it possible for parts of our code # to inadvertently reference stale parameters. # # + [markdown] origin_pos=74 tab=["pytorch"] # Fortunately, (**performing in-place operations**) is easy. # We can assign the result of an operation # to a previously allocated array with slice notation, # e.g., `Y[:] = <expression>`. # To illustrate this concept, we first create a new matrix `Z` # with the same shape as another `Y`, # using `zeros_like` to allocate a block of $0$ entries. # # + origin_pos=77 tab=["pytorch"] Z = torch.zeros_like(Y) print('id(Z):', id(Z)) Z[:] = X + Y print('id(Z):', id(Z)) # + [markdown] origin_pos=79 tab=["pytorch"] # [**If the value of `X` is not reused in subsequent computations, # we can also use `X[:] = X + Y` or `X += Y` # to reduce the memory overhead of the operation.**] # # + origin_pos=81 tab=["pytorch"] before = id(X) X += Y id(X) == before # + [markdown] origin_pos=83 # ## Conversion to Other Python Objects # # + [markdown] origin_pos=85 tab=["pytorch"] # [**Converting to a NumPy tensor (`ndarray`)**], or vice versa, is easy. # The torch Tensor and numpy array will share their underlying memory # locations, and changing one through an in-place operation will also # change the other. # # + origin_pos=87 tab=["pytorch"] A = X.numpy() B = torch.from_numpy(A) type(A), type(B) # + [markdown] origin_pos=89 # To (**convert a size-1 tensor to a Python scalar**), # we can invoke the `item` function or Python's built-in functions. # # + origin_pos=91 tab=["pytorch"] a = torch.tensor([3.5]) a, a.item(), float(a), int(a) # + [markdown] origin_pos=93 # ## Summary # # * The main interface to store and manipulate data for deep learning is the tensor ($n$-dimensional array). It provides a variety of functionalities including basic mathematics operations, broadcasting, indexing, slicing, memory saving, and conversion to other Python objects. # # # ## Exercises # # 1. Run the code in this section. Change the conditional statement `X == Y` in this section to `X < Y` or `X > Y`, and then see what kind of tensor you can get. # 1. Replace the two tensors that operate by element in the broadcasting mechanism with other shapes, e.g., 3-dimensional tensors. Is the result the same as expected? # # + [markdown] origin_pos=95 tab=["pytorch"] # [Discussions](https://discuss.d2l.ai/t/27) #
python/d2l-en/pytorch/chapter_preliminaries/ndarray.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- import identification_py2 as ob import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error import control as con import glob #for returning files having the specified path extension import statistics as stats import os #checking for empty file # %pylab inline # ###### Passing all the data into arrays # + task_first = sorted(glob.glob('step_log_new/*/*task*.log')) #corresponds to .log files that has data related to the first position control_first = sorted(glob.glob('step_log_new/*/*control*.log')) task_remaining = sorted(glob.glob('step_log_new/*/*task*.log.*')) #corresponds to remaining log.'n' files control_remaining = sorted(glob.glob('step_log_new/*/*control*.log.*')) task = sorted(task_first + task_remaining) #set of all task_velocity logs control = sorted(control_first + control_remaining) #set of all control logs observations = len(task_first) #total number of experiments conducted/observations taken positions = int(len(task) / observations) #number of points in the given task space task_full = [] #A task_velocity list whose each element is a list of similar log files i.e from the same position control_full = [] #A control_output list whose each element is a list of similar log files i.e from the same position for i in range(0, positions): task_full.append([]) control_full.append([]) for j in range(0, observations): task_full[i].append(task[i + (j * positions)]) control_full[i].append(control[i + (j * positions)]) count = 0 #counter that returns the number of empty files for i in range(0, positions): for j in range(0, observations): if os.stat(task_full[i][j]).st_size == 0: count = count + 1 for i in range(0, positions): for j in range(0, observations-count): if os.stat(task_full[i][j]).st_size == 0: del(task_full[i][j]) del(control_full[i][j]) # Reading all the data into a dataframe array df_ist_soll = [] for i in range(0, positions): df_ist_soll.append([]) for j in range(0, observations): try: df_ist_soll[i].append(ob.batch_read_data(control_full[i][j], task_full[i][j])) except: continue # - # ###### Manually changing the setpoint # + #for i in range(0, observations): # df_ist_soll[0][i].x_soll[df_ist_soll[0][i].x_soll > 0] = 0.15 # df_ist_soll[3][i].x_soll[df_ist_soll[3][i].x_soll > 0] = 0.17 # - # ###### Displaying all the observations # + # The first try except code avoids the errors arising due to the already existing Overview directory. # The second try except code avoids the errors resulting from the plotting of the empty data file try: os.makedirs('View_Data/') except OSError, e: if e.errno != os.errno.EEXIST: raise pass for i in range(0, positions): fig = plt.figure(figsize = (10,30)) fig.suptitle('Position %s'%(i + 1), fontsize = 20, fontweight = 'bold') for j in range(0, observations): try: ax = fig.add_subplot(observations, 1, j + 1) ax.set_title('Observation %s'%(j + 1)) plt.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.plot(df_ist_soll[i][j]) except: pass plt.savefig('View_Data/Position %s.png'%(i + 1)) # - # ###### Smoothing using Savgol filter def smoothing_sg(): smooth_1 = [] #array having values, whoose smoothing is done according to first order smooth_2 = [] #array having values, whoose smoothing is done according to second order for i in range(0, len(yout_array)): smooth_1.append(ob.smooth(yout_array[i], 1)) smooth_2.append(ob.smooth(yout_array[i], 2)) return smooth_1, smooth_2 # ###### PT1 Estimation # + # The steady state value is calculated based on the final values of the step response. #In case of a faulty step response, the overall model also gets affected. #youto,to are the yout and t outputs from the pt1 and pt2 system #tf and delay are the transfer functions of the output and its delay #tdytdts is an array that contains all the above values in a sequential order def pt1(): to_1 = [] tf_1 = [] youto_1 = [] delay_1 = [] tdytdts_1 = [] delay_tf_1 = [] steady_state_1 = [] time_constant_1 = [] smooth_1 = smoothing_sg()[0] for i in range(0,len(smooth_1)): tdytdts_1.append(ob.pt1(smooth_1[i], t_array[i])) tf_1.append(tdytdts_1[i][0]) youto_1.append(tdytdts_1[i][1]) to_1.append(tdytdts_1[i][2]) delay_1.append(tdytdts_1[i][3]) time_constant_1.append(tdytdts_1[i][4]) steady_state_1.append(tdytdts_1[i][5]) return steady_state_1, time_constant_1, delay_1 # - # ###### PT2 Estimation '''PT2 modeling''' def pt2(): to_2 = [] tf_2 = [] zeta = [] youto_2 = [] delay_2 = [] tdytdts_2 = [] delay_tf_2 = [] steady_state_2 = [] time_constant_2 = [] smooth_2 = smoothing_sg()[1] try: for i in range(0,len(smooth_2)): tdytdts_2.append(ob.pt2(smooth_2[i], t_array[i])) tf_2.append(tdytdts_2[i][0]) youto_2.append(tdytdts_2[i][1]) to_2.append(tdytdts_2[i][2]) delay_2.append(tdytdts_2[i][3]) time_constant_2.append(tdytdts_2[i][4]) steady_state_2.append(tdytdts_2[i][5]) zeta.append(tdytdts_2[i][6]) except: pass return steady_state_2, time_constant_2, delay_2, zeta # ###### Plotting of ideal pt1 model from each point in the task space # + # Each of the timeseries in a position is modeled according to the pt1 modeling and the ideal model # in a position is calculated by taking the average of these individual models. system_matrix_pt1 = [] #contains all the state space parameters of all ideal models mean_matrix_pt1 = [] median_matrix_pt1 = [] std_matrix_pt1 = [] # std = standard deviation var_matrix_pt1 = [] # var = variance model_pos_pt1 = [] # model as time series for each positions yout_full_pt1 = [] model_time_pt1 = [] model_output_pt1 = [] for i in range(0, positions): try: # xin_array, yout_array, t_array = ob.strip_multiply(df_ist_soll[i]) xin_array, yout_array, t_array, m_factor = ob.unit_response(df_ist_soll[i]) steady_state_1, time_constant_1, delay_1 = pt1() ideal_tf_pt1, ideal_model_output_pt1, ideal_model_time_pt1 = ob.ideal_pt1(steady_state_1, time_constant_1, delay_1) except: continue yout_full_pt1.append(yout_array) model_pos_pt1.append(ideal_model_output_pt1) mean_matrix_pt1.append(stats.mean(ideal_model_output_pt1)) median_matrix_pt1.append(stats.median(ideal_model_output_pt1)) std_matrix_pt1.append(stats.pstdev(ideal_model_output_pt1)) var_matrix_pt1.append(stats.variance(ideal_model_output_pt1)) plt.plot(ideal_model_time_pt1, ideal_model_output_pt1, label = 'position %s ideal model'%(i+1)) plt.legend() plt.savefig('model_pt1.png') model_time_pt1.append(ideal_model_time_pt1) model_output_pt1.append(ideal_model_output_pt1) system_matrix_pt1.append(ob.ss(ideal_tf_pt1)) # - # ###### Plotting of ideal pt2 model from each point in the task space # system_matrix_pt2 = [] mean_matrix_pt2 = [] median_matrix_pt2 = [] std_matrix_pt2 = [] # std = standard deviation var_matrix_pt2 = [] # var = variance model_pos_pt2 = [] # model as time series for each positions yout_full_pt2 = [] model_time_pt2 = [] model_output_pt2 = [] m_factor_array = [] # used in model validation while plotting for i in range(0, positions): try: #xin_array, yout_array, t_array = ob.strip_multiply(df_ist_soll[i]) xin_array, yout_array, t_array, m_factor = ob.unit_response(df_ist_soll[i]) steady_state_2, time_constant_2, delay_2, zeta = pt2() ideal_tf_pt2, ideal_model_output_pt2, ideal_model_time_pt2 = ob.ideal_pt2(steady_state_2, time_constant_2, delay_2, zeta) except: continue yout_full_pt2.append(yout_array) model_pos_pt2.append(ideal_model_output_pt2) mean_matrix_pt2.append(stats.mean(ideal_model_output_pt2)) median_matrix_pt2.append(stats.median(ideal_model_output_pt2)) std_matrix_pt2.append(stats.pstdev(ideal_model_output_pt2)) var_matrix_pt2.append(stats.variance(ideal_model_output_pt2)) plt.plot(ideal_model_time_pt2, ideal_model_output_pt2, label = 'position %s ideal model'%(i+1)) plt.legend() plt.savefig('model_pt2.png') model_time_pt2.append(ideal_model_time_pt2) model_output_pt2.append(ideal_model_output_pt2) m_factor_array.append(mean(m_factor)) system_matrix_pt2.append(ob.ss(ideal_tf_pt2)) # ###### Displaying statistical output of each positions in a text file # + quant_matrix_pt1 = [] with open("Statistical_Output_pt1.txt", "w") as text_file: text_file.write('###########################\n') text_file.write(' STATISTICAL INFORMATION \n') text_file.write('###########################\n') for i in range(0, len(model_pos_pt1)): text_file.write('Position %s\n'%(i+1)) text_file.write('Mean:%s\n' %mean_matrix_pt1[i]) text_file.write('Median:%s\n' %median_matrix_pt1[i]) text_file.write('Standard Deviation:{0}\n' .format(std_matrix_pt1[i])) text_file.write('Variance:%s\n' %var_matrix_pt1[i]) text_file.write('Quantiles[0.25, 0.50, 0.75]:%s\n' \ # %pd.Series(model_pos_pt1[i]).quantile\ # ([.25, .5, .75]).values) quant_matrix_pt1.append(pd.Series(model_pos_pt1[i]).quantile([.25, .5, .75]).values) text_file.write('Min:%s\n' %min(model_pos_pt1[i])) text_file.write('Max:%s\n\n' %max(model_pos_pt1[i])) quant_matrix_pt2 = [] with open("Statistical_Output_pt2.txt", "w") as text_file: text_file.write('###########################\n') text_file.write(' STATISTICAL INFORMATION \n') text_file.write('###########################\n') for i in range(0, len(model_pos_pt2)): text_file.write('Position %s\n'%(i+1)) text_file.write('Mean:%s\n' %mean_matrix_pt2[i]) text_file.write('Median:%s\n' %median_matrix_pt2[i]) text_file.write('Standard Deviation:{0}\n' .format(std_matrix_pt2[i])) text_file.write('Variance:%s\n' %var_matrix_pt2[i]) text_file.write('Quantiles[0.25, 0.50, 0.75]:%s\n' \ # %pd.Series(model_pos_pt2[i]).quantile\ # ([.25, .5, .75]).values) quant_matrix_pt2.append(pd.Series(model_pos_pt2[i]).quantile([.25, .5, .75]).values) text_file.write('Min:%s\n' %min(model_pos_pt2[i])) text_file.write('Max:%s\n\n' %max(model_pos_pt2[i])) # - # ###### Dataframe that contains statistical info of all ideal models # + d_pt1 = {'Position': range(1, positions+1), 'Mean': mean_matrix_pt1, 'Median': median_matrix_pt1, 'Std_Dev': std_matrix_pt1,\ 'Variance': var_matrix_pt1, 'Quantile': quant_matrix_pt1} #variable to pass data cols_pt1 = ['Position', 'Mean', 'Median', 'Std_Dev', 'Variance', 'Quantile'] #column names try: df_ideal_pt1 = pd.DataFrame(data = d_pt1) except: pass df_ideal_pt1 = df_ideal_pt1[cols_pt1] d_pt2 = {'Position': range(1, positions+1), 'Mean': mean_matrix_pt2, 'Median': median_matrix_pt2, 'Std_Dev': std_matrix_pt2,\ 'Variance': var_matrix_pt2, 'Quantile': quant_matrix_pt2} #variable to pass data cols_pt2 = ['Position', 'Mean', 'Median', 'Std_Dev', 'Variance', 'Quantile'] #column names try: df_ideal_pt2 = pd.DataFrame(data = d_pt2) except: pass df_ideal_pt2 = df_ideal_pt2[cols_pt2] # - # ###### Statistical values of all the ideal models in a textfile # + with open("All_Model_Statistical_Output_pt1.txt", "w") as text_file: text_file.write('###########################\n') text_file.write(' STATISTICAL INFORMATION \n') text_file.write('###########################\n') for i in range(0, positions): text_file.write('\nPosition %s\n'%(i+1)) text_file.write('Obs Mean Median Standard Deviation Variance \ Quantile[.25, .5, .75]\n') for j in range(0, observations): try: text_file.write('%s %s %s %s %s %s\n'\ %((j+1), stats.mean(yout_full_pt1[i][j]), \ stats.median(yout_full_pt1[i][j]), \ stats.pstdev(yout_full_pt1[i][j]),\ stats.variance(yout_full_pt1[i][j]),pd.Series(yout_full_pt1[i][j]).quantile([.25, .5, .75]).values)) except: continue with open("All_Model_Statistical_Output_pt2.txt", "w") as text_file: text_file.write('###########################\n') text_file.write(' STATISTICAL INFORMATION \n') text_file.write('###########################\n') for i in range(0, positions): text_file.write('\nPosition %s\n'%(i+1)) text_file.write('Obs Mean Median Standard Deviation Variance \ Quantile[.25, .5, .75]\n') for j in range(0, observations): try: text_file.write('%s %s %s %s %s %s\n'\ %((j+1), stats.mean(yout_full_pt2[i][j]), \ stats.median(yout_full_pt2[i][j]), \ stats.pstdev(yout_full_pt2[i][j]),\ stats.variance(yout_full_pt2[i][j]),pd.Series(yout_full_pt2[i][j]).quantile([.25, .5, .75]).values)) except: continue # - # ###### Statistical values of all the model timeseries in a dataframe # + pos_matrix_pt1 = [] obs_matrix_pt1 = [] mean_matrix_pt1 = [] median_matrix_pt1 = [] std_matrix_pt1 = [] var_matrix_pt1 = [] quant_matrix_pt1 = [] for i in range(0, positions): for j in range(0, observations): try: pos_matrix_pt1.append(i+1) obs_matrix_pt1.append(j+1) mean_matrix_pt1.append(stats.mean(yout_full_pt1[i][j])) median_matrix_pt1.append(stats.median(yout_full_pt1[i][j])) std_matrix_pt1.append(stats.pstdev(yout_full_pt1[i][j])) var_matrix_pt1.append(stats.variance(yout_full_pt1[i][j])) quant_matrix_pt1.append(pd.Series(yout_full_pt1[i][j]).quantile([.25, .5, .75]).values) except: del pos_matrix_pt1[-1] del obs_matrix_pt1[-1] continue d_pt1 = {'Position': pos_matrix_pt1, 'Observation': obs_matrix_pt1, 'Mean': mean_matrix_pt1, 'Median': median_matrix_pt1, 'Std_Dev': std_matrix_pt1,\ 'Variance': var_matrix_pt1, 'Quantile': quant_matrix_pt1} cols_pt1 = ['Position', 'Observation', 'Mean', 'Median', 'Std_Dev', 'Variance', 'Quantile'] df_all_pt1 = pd.DataFrame(data = d_pt1) df_all_pt1 = df_all_pt1[cols_pt1] pos_matrix_pt2 = [] obs_matrix_pt2 = [] mean_matrix_pt2 = [] median_matrix_pt2 = [] std_matrix_pt2 = [] var_matrix_pt2 = [] quant_matrix_pt2 = [] for i in range(0, positions): for j in range(0, observations): try: pos_matrix_pt2.append(i+1) obs_matrix_pt2.append(j+1) mean_matrix_pt2.append(stats.mean(yout_full_pt2[i][j])) median_matrix_pt2.append(stats.median(yout_full_pt2[i][j])) std_matrix_pt2.append(stats.pstdev(yout_full_pt2[i][j])) var_matrix_pt2.append(stats.variance(yout_full_pt2[i][j])) quant_matrix_pt2.append(pd.Series(yout_full_pt2[i][j]).quantile([.25, .5, .75]).values) except: del pos_matrix_pt2[-1] del obs_matrix_pt2[-1] continue d_pt2 = {'Position': pos_matrix_pt2, 'Observation': obs_matrix_pt2, 'Mean': mean_matrix_pt2, 'Median': median_matrix_pt2, 'Std_Dev': std_matrix_pt2,\ 'Variance': var_matrix_pt2, 'Quantile': quant_matrix_pt2} cols_pt2 = ['Position', 'Observation', 'Mean', 'Median', 'Std_Dev', 'Variance', 'Quantile'] df_all_pt2 = pd.DataFrame(data = d_pt2) df_all_pt2 = df_all_pt2[cols_pt2] # - # ###### State Space Parameters of all the ideal models in a textfile # + with open("State_Space_Parameters_pt1.txt", "w") as text_file: text_file.write('###########################\n') text_file.write(' STATE SPACE PARAMETERS \n') text_file.write('###########################\n') for i in range(0, positions): text_file.write('\nPosition %s\n'%(i+1)) text_file.write('%s'%system_matrix_pt1[i]) text_file.write('\n') with open("State_Space_Parameters_pt2.txt", "w") as text_file: text_file.write('###########################\n') text_file.write(' STATE SPACE PARAMETERS \n') text_file.write('###########################\n') for i in range(0, positions): text_file.write('\nPosition %s\n'%(i+1)) text_file.write('%s'%system_matrix_pt2[i]) text_file.write('\n') # - # ###### Model Validation try: os.makedirs('Model_Validation/') except OSError, e: if e.errno != os.errno.EEXIST: raise pass for i in range(0, positions): fig = plt.figure(figsize = (5,4)) fig.suptitle('Position %s'%(i + 1), fontsize = 20, fontweight = 'bold') plt.plot(model_time_pt2[i], model_output_pt2[i], '--r', label = 'ideal pt2 model') plt.plot(model_time_pt1[i], model_output_pt1[i], '--b', label = 'ideal pt1 model') plt.legend() for j in range(0, observations): try: plt.tight_layout(rect=[0, 0.03, 1, 0.95]) plt.plot(df_ist_soll[i][j] * m_factor_array[j]) except: pass plt.savefig('Model_Validation/Position %s model.png'%(i+1))
Process_Modeling_py2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 11</font> # # ## Download: http://github.com/dsacademybr # Versão da Linguagem Python from platform import python_version print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version()) # ## Prevendo a Ocorrência de Diabetes from IPython.display import Image Image('Workflow.png') # ## Conjunto de Dados do Repositório de Machine Learning da UCI / Kaggle # https://www.kaggle.com/uciml/pima-indians-diabetes-database/data # Importando os módulos import pandas as pd import matplotlib as mat import matplotlib.pyplot as plt import numpy as np # %matplotlib inline pd.__version__ mat.__version__ # Carregando o dataset df = pd.read_csv("pima-data.csv") # Verificando o formato dos dados df.shape # Verificando as primeiras linhas do dataset df.head(5) # Verificando as últimas linhas do dataset df.tail(5) # Verificando se existem valores nulos df.isnull().values.any() # Identificando a correlação entre as variáveis # Correlação não implica causalidade def plot_corr(df, size=10): corr = df.corr() fig, ax = plt.subplots(figsize = (size, size)) ax.matshow(corr) plt.xticks(range(len(corr.columns)), corr.columns) plt.yticks(range(len(corr.columns)), corr.columns) # Criando o gráfico plot_corr(df) # Visualizando a correlação em tabela # Coeficiente de correlação: # # +1 = forte correlação positiva # 0 = não há correlação # -1 = forte correlação negativa df.corr() # Definindo as classes diabetes_map = {True : 1, False : 0} # Aplicando o mapeamento ao dataset df['diabetes'] = df['diabetes'].map(diabetes_map) # Verificando as primeiras linhas do dataset df.head(5) # Verificando como os dados estão distribuídos num_true = len(df.loc[df['diabetes'] == True]) num_false = len(df.loc[df['diabetes'] == False]) print("Número de Casos Verdadeiros: {0} ({1:2.2f}%)".format(num_true, (num_true/ (num_true + num_false)) * 100)) print("Número de Casos Falsos : {0} ({1:2.2f}%)".format(num_false, (num_false/ (num_true + num_false)) * 100)) # ## Spliting # # 70% para dados de treino e 30% para dados de teste from IPython.display import Image Image('Treinamento.png') import sklearn as sk sk.__version__ from sklearn.model_selection import train_test_split # Seleção de variáveis preditoras (Feature Selection) atributos = ['num_preg', 'glucose_conc', 'diastolic_bp', 'thickness', 'insulin', 'bmi', 'diab_pred', 'age'] # Variável a ser prevista atrib_prev = ['diabetes'] # Criando objetos X = df[atributos].values Y = df[atrib_prev].values X Y # Definindo a taxa de split split_test_size = 0.30 # Criando dados de treino e de teste X_treino, X_teste, Y_treino, Y_teste = train_test_split(X, Y, test_size = split_test_size, random_state = 42) # Imprimindo os resultados print("{0:0.2f}% nos dados de treino".format((len(X_treino)/len(df.index)) * 100)) print("{0:0.2f}% nos dados de teste".format((len(X_teste)/len(df.index)) * 100)) X_treino # ## Verificando o Split # + print("Original True : {0} ({1:0.2f}%)".format(len(df.loc[df['diabetes'] == 1]), (len(df.loc[df['diabetes'] ==1])/len(df.index) * 100))) print("Original False : {0} ({1:0.2f}%)".format(len(df.loc[df['diabetes'] == 0]), (len(df.loc[df['diabetes'] == 0])/len(df.index) * 100))) print("") print("Training True : {0} ({1:0.2f}%)".format(len(Y_treino[Y_treino[:] == 1]), (len(Y_treino[Y_treino[:] == 1])/len(Y_treino) * 100))) print("Training False : {0} ({1:0.2f}%)".format(len(Y_treino[Y_treino[:] == 0]), (len(Y_treino[Y_treino[:] == 0])/len(Y_treino) * 100))) print("") print("Test True : {0} ({1:0.2f}%)".format(len(Y_teste[Y_teste[:] == 1]), (len(Y_teste[Y_teste[:] == 1])/len(Y_teste) * 100))) print("Test False : {0} ({1:0.2f}%)".format(len(Y_teste[Y_teste[:] == 0]), (len(Y_teste[Y_teste[:] == 0])/len(Y_teste) * 100))) # - # ## Valores Missing Ocultos # # # Verificando se existem valores nulos df.isnull().values.any() df.head(5) print("# Linhas no dataframe {0}".format(len(df))) print("# Linhas missing glucose_conc: {0}".format(len(df.loc[df['glucose_conc'] == 0]))) print("# Linhas missing diastolic_bp: {0}".format(len(df.loc[df['diastolic_bp'] == 0]))) print("# Linhas missing thickness: {0}".format(len(df.loc[df['thickness'] == 0]))) print("# Linhas missing insulin: {0}".format(len(df.loc[df['insulin'] == 0]))) print("# Linhas missing bmi: {0}".format(len(df.loc[df['bmi'] == 0]))) print("# Linhas missing age: {0}".format(len(df.loc[df['age'] == 0]))) # ## Tratando Dados Missing - Impute # Substituindo os valores iguais a zero, pela média dos dados from sklearn.impute import SimpleImputer # + # Criando objeto preenche_0 = SimpleImputer(missing_values = 0, strategy = "mean") # Substituindo os valores iguais a zero, pela média dos dados X_treino = preenche_0.fit_transform(X_treino) X_teste = preenche_0.fit_transform(X_teste) # - X_treino # ## 50 a 80% do tempo de trabalho de um Cientista de Dados é usado na preparação dos dados. # ## Construindo e treinando o modelo # Utilizando um classificador Naive Bayes from sklearn.naive_bayes import GaussianNB # Criando o modelo preditivo modelo_v1 = GaussianNB() # Treinando o modelo modelo_v1.fit(X_treino, Y_treino.ravel()) # ## Verificando a exatidão no modelo nos dados de treino from sklearn import metrics nb_predict_train = modelo_v1.predict(X_treino) print("Exatidão (Accuracy): {0:.4f}".format(metrics.accuracy_score(Y_treino, nb_predict_train))) print() # ## Verificando a exatidão no modelo nos dados de teste nb_predict_test = modelo_v1.predict(X_teste) print("Exatidão (Accuracy): {0:.4f}".format(metrics.accuracy_score(Y_teste, nb_predict_test))) print() # ## Métricas from IPython.display import Image Image('ConfusionMatrix.jpg') # + # Criando uma Confusion Matrix print("Confusion Matrix") print("{0}".format(metrics.confusion_matrix(Y_teste, nb_predict_test, labels = [1, 0]))) print("") print("Classification Report") print(metrics.classification_report(Y_teste, nb_predict_test, labels = [1, 0])) # - # # Otimizando o modelo com RandomForest from sklearn.ensemble import RandomForestClassifier modelo_v2 = RandomForestClassifier(random_state = 42) modelo_v2.fit(X_treino, Y_treino.ravel()) # Verificando os dados de treino rf_predict_train = modelo_v2.predict(X_treino) print("Exatidão (Accuracy): {0:.4f}".format(metrics.accuracy_score(Y_treino, rf_predict_train))) # Verificando nos dados de teste rf_predict_test = modelo_v2.predict(X_teste) print("Exatidão (Accuracy): {0:.4f}".format(metrics.accuracy_score(Y_teste, rf_predict_test))) print() # + print("Confusion Matrix") print("{0}".format(metrics.confusion_matrix(Y_teste, rf_predict_test, labels = [1, 0]))) print("") print("Classification Report") print(metrics.classification_report(Y_teste, rf_predict_test, labels = [1, 0])) # - # ## Regressão Logística from sklearn.linear_model import LogisticRegression # Terceira versão do modelo usando Regressão Logística modelo_v3 = LogisticRegression(C = 0.7, random_state = 42, max_iter = 1000) modelo_v3.fit(X_treino, Y_treino.ravel()) lr_predict_test = modelo_v3.predict(X_teste) print("Exatidão (Accuracy): {0:.4f}".format(metrics.accuracy_score(Y_teste, lr_predict_test))) print() print("Classification Report") print(metrics.classification_report(Y_teste, lr_predict_test, labels = [1, 0])) # + ### Resumindo ## Exatidão nos dados de teste # Modelo usando algoritmo Naive Bayes = 0.7359 # Modelo usando algoritmo Random Forest = 0.7400 # Modelo usando algoritmo Regressão Logística = 0.7446 # - # ## Fazendo Previsões Com o Modelo Treinado import pickle # Salvando o modelo para usar mais tarde filename = 'modelo_treinado_v3.sav' pickle.dump(modelo_v3, open(filename, 'wb')) X_teste # Carregando o modelo e fazendo previsão com novos conjuntos de dados # (X_teste, Y_teste devem ser novos conjuntos de dados preparados com o procedimento de limpeza e transformação adequados) loaded_model = pickle.load(open(filename, 'rb')) resultado1 = loaded_model.predict(X_teste[15].reshape(1, -1)) resultado2 = loaded_model.predict(X_teste[18].reshape(1, -1)) print(resultado1) print(resultado2) # # Fim # Interessado(a) em conhecer os cursos e formações da DSA? Confira aqui nosso catálogo de cursos: # # https://www.datascienceacademy.com.br/pages/todos-os-cursos-dsa # ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
pyfund/Cap11/DSA-Python-Cap11-Machine-Learning.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Before your start: # - Read the README.md file # - Comment as much as you can and use the resources in the README.md file # - Happy learning! # Import reduce from functools, numpy and pandas import numpy as np import pandas as pd from functools import reduce # # Challenge 1 - Mapping # # #### We will use the map function to clean up words in a book. # # In the following cell, we will read a text file containing the book The Prophet by <NAME>. # + # Run this code: location = '../data/58585-0.txt' with open(location, 'r', encoding="utf8") as f: prophet = f.read().split(' ') # - # #### Let's remove the first 568 words since they contain information about the book but are not part of the book itself. # # Do this by removing from `prophet` elements 0 through 567 of the list (you can also do this by keeping elements 568 through the last element). # + # your code here # del prophet[0:568] print(prophet) # - # If you look through the words, you will find that many words have a reference attached to them. For example, let's look at words 1 through 10. # your code here prophet[0:10] # #### The next step is to create a function that will remove references. # # We will do this by splitting the string on the `{` character and keeping only the part before this character. Write your function below. # + # a = "the{7}" def reference(a): # ''' # Input: A string # Output: The string with references removed # Example: # Input: 'the{7}' # Output: 'the' # # ''' a = a.rsplit("{", 1)[0] return a a = "the{7}" print(reference(a)) # a = "the{7}" # # # a = a.replace("{", " ") # a = a.rsplit("{", 1)[0] # a # # a = ['PROPHET\n\n|Almustafa,', 'the{7}', 'chosen', 'and', 'the\nbeloved,', 'who', 'was', 'a', 'dawn'] # print(a.rsplit('{}', 1)[0]) # def before({, a): # # Find first part and return slice before it. # pos_a = value.find(a) # if pos_a == -1: return "" # return value[0:pos_a] # # your code here # print(a) # - # Now that we have our function, use the `map()` function to apply this function to our book, The Prophet. Return the resulting list to a new list called `prophet_reference`. # + # your code here prophet_reference = list(map(reference, prophet)) print(prophet_reference) # - # Another thing you may have noticed is that some words contain a line break. Let's write a function to split those words. Our function will return the string split on the character `\n`. Write your function in the cell below. # + def line_break(a): ''' Input: A string Output: A list of strings split on the line break (\n) character Example: Input: 'the\nbeloved' Output: ['the', 'beloved'] ''' a = a.split("\n") return a a = "the\nbeloved" print(line_break(a)) # - # Apply the `line_break` function to the `prophet_reference` list. Name the new list `prophet_line`. # + # your code here prophet_line = list(map(line_break, prophet_reference)) print(prophet_line) # - # If you look at the elements of `prophet_line`, you will see that the function returned lists and not strings. Our list is now a list of lists. Flatten the list using list comprehension. Assign this new list to `prophet_flat`. # + # your code here prophet_flat = [item for sublist in prophet_line for item in sublist] print(prophet_flat) # - # # Challenge 2 - Filtering # # When printing out a few words from the book, we see that there are words that we may not want to keep if we choose to analyze the corpus of text. Below is a list of words that we would like to get rid of. Create a function that will return false if it contains a word from the list of words specified and true otherwise. # + def word_filter(x): ''' Input: A string Output: True if the word is not in the specified list and False if the word is in the list. Example: word list = ['and', 'the'] Input: 'and' Output: False Input: 'John' Output: True ''' word_list = ['and', 'the', 'a', 'an'] if x in word_list: return False else: return True # x = 'a' # word_filter(x) # # your code here # - # Use the `filter()` function to filter out the words speficied in the `word_filter()` function. Store the filtered list in the variable `prophet_filter`. prophet_filter = list(filter(word_filter, prophet_flat)) print(prophet_filter) # # Bonus Challenge # # Rewrite the `word_filter` function above to not be case sensitive. # + def word_filter_case(x): word_list = ['and', 'the', 'a', 'an'] if x.casefold() in word_list: return False else: return True # your code here prophet_filter_case = list(filter(word_filter_case, prophet_flat)) print(prophet_filter_case) # + #Remove '' # prophet_filter_case.remove('') # - # # Challenge 3 - Reducing # # #### Now that we have significantly cleaned up our text corpus, let's use the `reduce()` function to put the words back together into one long string separated by spaces. # # We will start by writing a function that takes two strings and concatenates them together with a space between the two strings. def concat_space(a, b): ''' Input:Two strings Output: A single string separated by a space Example: Input: 'John', 'Smith' Output: '<NAME>' ''' # your code here return a + " " + b concat_space('John', 'Smith') # Use the function above to reduce the text corpus in the list `prophet_filter` into a single string. Assign this new string to the variable `prophet_string`. # + # your code here prophet_string = reduce(concat_space, prophet_filter_case) print(prophet_string) # - # # Challenge 4 - Applying Functions to DataFrames # # #### Our next step is to use the apply function to a dataframe and transform all cells. # # To do this, we will connect to Ironhack's database and retrieve the data from the *pollution* database. Select the *beijing_pollution* table and retrieve its data. # Use the following link to download the data # url = "https://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv" # + # your code here data = pd.read_csv('C:/Users/denis/Desktop/Ironhack/ironhackLabs/module-1/Map-Reduce-Filter/data/PRSA_data_2010.1.1-2014.12.31.csv', sep=',') # - # Let's look at the data using the `head()` function. # your code here data.head() # The next step is to create a function that divides a cell by 24 to produce an hourly figure. Write the function below. def hourly(x): ''' Input: A numerical value Output: The value divided by 24 Example: Input: 48 Output: 2.0 ''' # your code here return x/24 # Apply this function to the columns `Iws`, `Is`, and `Ir`. Store this new dataframe in the variable `pm25_hourly`. # + # your code here # df.apply(half) # Apply function numpy.square() to square the value 2 column only i.e. with column names 'x' and 'y' only pm25_hourly = data.apply(lambda x: x/24 if x.name in ['Iws', 'Is', 'Ir'] else x) pm25_hourly # - # np.len(pm25_hourly['lws']) pm25_hourly['Iws'].count() # #### Our last challenge will be to create an aggregate function and apply it to a select group of columns in our dataframe. # # Write a function that returns the standard deviation of a column divided by the length of a column minus 1. Since we are using pandas, do not use the `len()` function. One alternative is to use `count()`. Also, use the numpy version of standard deviation. def sample_sd(x): ''' Input: A Pandas series of values Output: the standard deviation divided by the number of elements in the series Example: Input: pd.Series([1,2,3,4]) Output: 0.3726779962 ''' col = x['Iws'] return(np.std(col)/(col.count() - 1)) # your code here import numpy as np sample_sd(pm25_hourly) def sample_sdNEW(x): ''' Input: A Pandas series of values Output: the standard deviation divided by the number of elements in the series Example: Input: pd.Series([1,2,3,4]) Output: 0.3726779962 ''' for col in x.columns: print(np.std(x[col])/(x[col].count() - 1)) data = pm25_hourly._get_numeric_data() data = data[['TEMP','PRES','Iws','Is','Ir']] data.columns sample_sdNEW(data)
module-1/Map-Reduce-Filter/your-code/main.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # 在上一篇筆記裡我們測試了 Model.Config 對實際儲存大小的影響,接下來我們要進一步研究模型訓練的方式。這裡的主要參考資料是 hugginface 提供的 [run_clm.py](https://github.com/huggingface/transformers/blob/master/examples/tensorflow/language-modeling/run_clm.py)。 # # 資料我們使用處理過的 line-sentence 的中文維基百科內容。 # # + from __future__ import print_function import logging import os import sys import random import argparse import numpy as np import tensorflow as tf from transformers import BertTokenizerFast, TFGPT2LMHeadModel, GPT2Config testfile1 = '../data/line_sentence_000002.txt' testfile2 = '../data/poet.song.0.txt' # + def initialize_gpt2(pretrained_path=None): ''' Model initialization. ''' myconfig = GPT2Config( n_ctx=1024, n_embd=768, n_head=12, n_layer=6, n_positions=1024, vocab_size=25129, use_cache=True, ) # if pretrained_path is None: print('Initialize new model with config: '+str(myconfig)) model = TFGPT2LMHeadModel(myconfig) else: print('Load pretrained model from: '+str(pretrained_path)) model = TFGPT2LMHeadModel.from_pretrained(pretrained_path) model.summary() # def dummy_loss(y_true, y_pred): ''' A dummy loss function for causal language model. ''' return tf.reduce_mean(y_pred) # optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5) model.compile(optimizer=optimizer, loss={"loss": dummy_loss}) return(model) tokenizer = BertTokenizerFast.from_pretrained('bert-base-chinese') #model = initialize_gpt2(pretrained_path='../model/mygpt2_01/') model = initialize_gpt2() # + # Test clm function def test_clm(model, tokenizer, starting_text='人之初,性本善', max_length=50, num_trials=5): # Parse seeding string input_ids = tokenizer.encode(starting_text, return_tensors='tf') # Generate text generated = model.generate(input_ids, max_length=max_length, num_return_sequences=num_trials, no_repeat_ngram_size=2, repetition_penalty=1.5, top_p=0.92, temperature=.85, do_sample=True, top_k=125, early_stopping=True) # Output output=[] for i in range(num_trials): text = tokenizer.decode(generated[i], skip_special_tokens= True) # Decode the generated text text = text.replace(' ','') # Remove spaces between tokens trial = {'id':i+1, 'text': text} print(text+'\n') output.append(trial) return(0) test_clm(model, tokenizer) # + # Data Preprocessing code from run_clm.py from datasets import load_dataset from functools import partial from sklearn.model_selection import train_test_split # region Helper classes class SavePretrainedCallback(tf.keras.callbacks.Callback): # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback # that saves the model with this method after each epoch. def __init__(self, output_dir, **kwargs): super().__init__() self.output_dir = output_dir def on_epoch_end(self, epoch, logs=None): self.model.save_pretrained(self.output_dir) # endregion # region Data generator def sample_generator(dataset): # Trim off the last partial batch if present sample_ordering = np.random.permutation(len(dataset)) for sample_idx in sample_ordering: example = dataset[int(sample_idx)] # Handle dicts with proper padding and conversion to tensor. example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int32) for key, arr in example.items()} yield example, example["labels"] # TF needs some kind of labels, even if we don't use them return # endregion def create_dataset_from_text_files(data_files): # region Load datasets raw_datasets = load_dataset('text', data_files=data_files) print('Load datasets from file: '+data_files["train"]) print(raw_datasets) print(raw_datasets['train']['text'][101]) print(len(raw_datasets['train']['text'])) print() # endregion # region Dataset preprocessing print('Dataset preprocessing:') # First we tokenize all the texts. column_names = raw_datasets["train"].column_names text_column_name = "text" if "text" in column_names else column_names[0] print('\t column_names:\t'+'.'.join(column_names)) print('\t text_column_name:\t'+text_column_name) print() def tokenize_function(examples): return tokenizer(examples[text_column_name], truncation=True) tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=1, remove_columns=column_names, load_from_cache_file=True, desc="Running tokenizer on dataset", ) print('Tokenzied Datasets:') print(tokenized_datasets) print(tokenized_datasets['train']['input_ids'][101][:10]) print(len(tokenized_datasets['train']['input_ids'])) print() block_size = tokenizer.model_max_length print('\t block_size:\t'+str(block_size)) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=1, load_from_cache_file=True, desc=f"Grouping texts in chunks of {block_size}", ) train_dataset = lm_datasets["train"] print('Train Datasets:') print(train_dataset) print(train_dataset['input_ids'][101][:5]) print(train_dataset['labels'][101][:5]) num_replicas = 1 train_generator = partial(sample_generator, train_dataset, tokenizer) train_signature = { feature: tf.TensorSpec(shape=(None,), dtype=tf.int32) for feature in train_dataset.features if feature != "special_tokens_mask" } train_sig = (train_signature, train_signature["labels"]) options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF tf_train_dataset = ( tf.data.Dataset.from_generator(train_generator, output_signature=train_sig) .with_options(options) .batch(batch_size=num_replicas * 128, drop_remainder=True) .repeat(int(3)) ) return(tf_train_dataset) data_files = {} data_files["train"] = tf_train_dataset = create_dataset_from_text_files(data_files) # + # My own data process def process_line_sentence_file(furl, tokenizer): ''' Read the line-sentence text file and create tokenized dataset. ''' # Read file with open(furl, 'r') as f: sentences = f.readlines() # Tokenization with tokenizer.encode() block_size = tokenizer.model_max_length examples = [] for sentence in sentences: if len(sentence)<=block_size: examples.append(tokenizer.encode(sentence)) else: # Truncate in block of block_size #print('Sequence legnth is larger than model_max_length: '+str(len(sentence))+'\t'+str(len(sentence)//block_size+1)) for i in range(0, len(sentence), block_size): end = min(i+block_size, len(sentence)) #print('\t Adding substring: '+str(i)+' - '+str(end)) examples.append(tokenizer.encode(sentence[i:end])) # Create tensors print(len(examples)) # Build x,y for training inputs, labels = [], [] for ex in examples: inputs.append(ex[:-1]) labels.append(ex[1:]) # input_t = tf.ragged.constant(inputs).to_tensor() label_t = tf.ragged.constant(labels).to_tensor() dataset = tf.data.Dataset.from_tensor_slices((input_t, label_t)) return(dataset) mydataset = process_line_sentence_file(testfile1, tokenizer) print(mydataset) # + #optimizer = tf.keras.optimizers.Adam(learning_rate=5e-5) #def dummy_loss(y_true, y_pred): # return tf.reduce_mean(y_pred) #model.compile(optimizer=optimizer, loss={"loss": dummy_loss}) #model.summary() TOTAL_SENTENCES = 1020 EPOCHS = 100 BATCH_SIZE = 128 #data = tf.data.Dataset.from_tensor_slices((train_dataset['input_ids'], train_dataset['labels'])) model.fit(mydataset.shuffle(10000).batch(BATCH_SIZE), epochs=EPOCHS, batch_size=BATCH_SIZE, steps_per_epoch=(TOTAL_SENTENCES//BATCH_SIZE)+1) # - test_clm(model, tokenizer)
notebooks/Test_GPT2_Model_Training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Benchmark and Bounds Tests # # The purpose of this notebook is to benchmark all of the single GPU cuML algorithms against their skLearn counterparts, while also providing the ability to find and verify upper bounds. This version of the `cuml_benchmarks` is meant to complete faster than the full version and on GPUs will smaller memory capacities. If you need an exhaustive benchmark, please use the `cuml_benchmarks` notebook. # # This benchmark will persist results into a file so that benchmarking may be continued, in the case of failure. # # Also supported is the ability to draw charts with the results, which should aid in presentations and transparency to end-users. # # **Note: if you get a Memory Error, please reduce your upper bound bench_rows to something that will fit in your GPU's memory. This benchmark is Single GPU only, and you will have the opportunity to choose which GPU you want to benchmark** # ## Notebook Credits # **Authorship**<br /> # Original Author: <NAME>, based on the work of Corey Nolet's original [cuML Benchmarks](intermediate_notebooks/benchmarks/cuml_benchmarks.ipynb)<br /> # Last Edit: <NAME>, 9/25/2019<br /> # # **Test System Specs**<br /> # Test System Hardware: GV100<br /> # Test System Software: Ubuntu 18.04<br /> # RAPIDS Version: 0.10.0a - Docker Install<br /> # Driver: 410.79<br /> # CUDA: 10.0<br /> # # # **Known Working Systems**<br /> # RAPIDS Versions: 0.10 # + import numpy as np import pandas as pd import cudf import os import time import pickle import cuml import matplotlib.pyplot as plt import seaborn as sns import warnings warnings.filterwarnings('ignore') from pylab import rcParams rcParams['figure.figsize'] = 40, 20 rcParams['figure.dpi'] = 100 sns.set_style("darkgrid") print(cuml.__version__) # - # ## Please choose the GPU you'll be benchmarking and set its ID in the OS environment # !nvidia-smi os.environ["CUDA_VISIBLE_DEVICES"] = "0" # Choose GPU here # + # Default parameters N_JOBS_SKLEARN = -1 # Passed to the n_jobs parameter, indicates number of cpu jobs to run # Note that some sklearn algorithms do not support n_jobs (e.g. PCA), so they run a single job RERUN_BENCH = True # Set to true to force re-running even if a result is cached MAX_BENCH_ROW_COUNTS = -1 # When iterating over many row sizes, only consider first N options (for faster testing, set to -1 for all options) MAX_BENCH_FEATURE_COUNTS = -1 # When iterating over many feature counts, only consider first N options (for faster testing, set to -1 for all options) # - # # Benchmark function definitions # ### Data loading functions # + import gzip def load_data_mortgage_X(nrows, ncols, cached = '../../data/mortgage/mortgage.npy.gz',source='mortgage', dtype = np.float32): print("Loading " + str(cached)) if os.path.exists(cached) and source=='mortgage': print('use mortgage data') with gzip.open(cached) as f: X = np.load(f) X = X[np.random.randint(0,X.shape[0]-1,nrows),:ncols] else: print('use random data') X = np.random.random((nrows,ncols)).astype(dtype) df = pd.DataFrame({'fea%d'%i:X[:,i] for i in range(X.shape[1])}).fillna(0) return df def load_data_mortgage_Xy(nrows, ncols, dtype = np.float32): """ Generate a dataframe and series based on rows and cols """ X = load_data_mortgage_X(nrows, ncols, dtype = dtype) y = load_data_mortgage_X(nrows, 1, dtype = dtype)["fea0"] return (X, y) def load_data_X(nrows, ncols, dtype = np.float32): """ Generate a single dataframe with specified rows and cols """ X = np.random.uniform(-1, 1, (nrows,ncols)) df = pd.DataFrame({'fea%d'%i:X[:,i].astype(dtype) for i in range(X.shape[1])}) return df def load_data_Xy(nrows, ncols, dtype = np.float32): """ Generate a dataframe and series based on rows and cols """ X = load_data_X(nrows, ncols, dtype) y = load_data_X(nrows, 1, dtype)["fea0"] return (X, y) def load_data_X_npy(nrows, ncols, dtype=np.float32): return np.random.uniform(-1, 1,(nrows, ncols)) def load_data_Xy_npy(nrows, ncols, dtype = np.float32): X = load_data_X_npy(nrows, ncols, dtype) y = load_data_X_npy(nrows, 1, dtype) return (X, y) # + def pandas_convert(data): if isinstance(data, tuple): return tuple([pandas_convert(d) for d in data]) elif isinstance(data, pd.DataFrame): return cudf.DataFrame.from_pandas(data) elif isinstance(data, pd.Series): return cudf.Series.from_pandas(data) else: raise Exception("Unsupported type %s" % str(type(data))) def no_convert(data): if isinstance(data, tuple): return tuple([d for d in data]) elif isinstance(data, np.ndarray): return data else: raise Exception("Unsupported type %s" % str(type(data))) # - # ### Pluggable benchmark function class SpeedupBenchmark(object): def __init__(self, converter = pandas_convert): self.name = "speedup" self.converter = converter def __str__(self): return "Speedup" def run(self, algo, rows, dims, data): data2 = self.converter(data) cu_start = time.time() algo.cuml(data2) cu_elapsed = time.time() - cu_start sk_start = time.time() algo.sk(data) sk_elapsed = time.time() - float(sk_start) # Needs to return the calculation and the name given to it. return sk_elapsed / float(cu_elapsed) class BenchmarkRunner(object): def __init__(self, benchmarks = [SpeedupBenchmark()], out_filename = "benchmark.pickle", rerun = RERUN_BENCH, n_runs = 3, bench_rows = [2**x for x in range(13, 20)], bench_dims = [64, 128, 256, 512]): self.benchmarks = benchmarks self.rerun = rerun self.n_runs = n_runs self.bench_rows = bench_rows[:MAX_BENCH_ROW_COUNTS] self.bench_dims = bench_dims[:MAX_BENCH_FEATURE_COUNTS] self.out_filename = out_filename def load_results(self): if os.path.exists(self.out_filename): print("Loaded previous benchmark results from %s" % (self.out_filename)) with open(self.out_filename, 'rb') as f: return pickle.load(f) else: return {} def store_results(self, final_results): with open(self.out_filename, 'wb') as f: pickle.dump(final_results, f) def run(self, algo): final_results = self.load_results() for benchmark in self.benchmarks: if algo.name in final_results: results = final_results[algo.name] else: results = {} final_results[algo.name] = results for n_rows in self.bench_rows: for n_dims in self.bench_dims: if (n_rows, n_dims, benchmark.name) not in results or self.rerun: print("Running %s. (nrows=%d, n_dims=%d)" % (str(algo), n_rows, n_dims)) data = algo.load_data(n_rows, n_dims) runs = [benchmark.run(algo, n_rows, n_dims, data) for i in range(self.n_runs)] results[(n_rows, n_dims, benchmark.name)] = np.mean(runs) print("Benchmark for %s = %f" % (str((n_rows, n_dims, benchmark.name)), results[(n_rows, n_dims, benchmark.name)])) self.store_results(final_results) def chart(self, algo, title = "cuML vs SKLearn"): for benchmark in self.benchmarks: results = self.load_results()[algo.name] final = {} plts = [] for dim in self.bench_dims: data = {k: v for (k, v) in results.items() if dim == k[1]} if len(data) > 0: data = [(k[0], v) for k, v in data.items()] data.sort(key = lambda x: x[0]) final[dim] = list(map(lambda x: x[1], data)) keys = list(map(lambda x: np.log2(x[0]), data)) line = plt.plot(keys, final[dim], label = str(dim), linewidth = 3, marker = 'o', markersize = 7) plts.append(line[0]) leg = plt.legend(handles = plts, fontsize = 30) leg.set_title("Dimensions", prop = {'size':'x-large'}) plt.title("%s %s: %s" % (algo, benchmark, title), fontsize = 30) plt.ylabel(str(benchmark), fontsize = 20) plt.xlabel("Training Examples (2^x)", fontsize = 40) plt.tick_params(axis='both', which='major', labelsize=15) plt.tick_params(axis='both', which='minor', labelsize=15) plt.show() class BaseAlgorithm(object): def __init__(self, load_data = load_data_X): self.load_data = load_data # # Benchmarks and Results # ### Nearest Neighbors # + from sklearn.neighbors import NearestNeighbors from cuml.neighbors import NearestNeighbors as cumlNN class NearestNeighborsAlgo(BaseAlgorithm): def __init__(self, n_neighbors = 1024, load_data = load_data_X): self.n_neighbors = n_neighbors self.name = "nearest_neighbors" BaseAlgorithm.__init__(self, load_data) def __str__(self): return "NearestNeighbors" def sk(self, X): knn_sk = NearestNeighbors(n_neighbors = self.n_neighbors, algorithm = 'brute', n_jobs=N_JOBS_SKLEARN) knn_sk.fit(X) D_sk,I_sk = knn_sk.kneighbors(X[0:100]) def cuml(self, X): knn_cuml = cumlNN(n_neighbors = self.n_neighbors) knn_cuml.fit(X) D_cuml,I_cuml = knn_cuml.kneighbors(X[0:100]) # - runner = BenchmarkRunner(benchmarks = [SpeedupBenchmark(no_convert)], bench_rows = [2**x for x in range(11, 17)]) runner.run(NearestNeighborsAlgo(load_data = load_data_X_npy)) runner = BenchmarkRunner() runner.chart(NearestNeighborsAlgo()) # ### DBSCAN # + from sklearn.cluster import DBSCAN as skDBSCAN from cuml import DBSCAN as cumlDBSCAN class DBSCANAlgo(BaseAlgorithm): def __init__(self, eps = 3, min_samples = 2): self.name = "dbscan" self.eps = 3 self.min_samples = 2 BaseAlgorithm.__init__(self) def __str__(self): return "DBSCAN" def sk(self, X): clustering_sk = skDBSCAN(eps = self.eps, min_samples = self.min_samples, algorithm = "brute", n_jobs=N_JOBS_SKLEARN) clustering_sk.fit(X) def cuml(self, X): clustering_cuml = cumlDBSCAN(eps = self.eps, min_samples = self.min_samples) clustering_cuml.fit(X) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(10, 17)]) runner.run(DBSCANAlgo()) runner = BenchmarkRunner(bench_rows = [2**x for x in range(10, 17)]) runner.chart(DBSCANAlgo()) # ### UMAP # + from umap import UMAP as skUMAP from cuml.manifold.umap import UMAP as cumlUMAP class UMAPAlgo(BaseAlgorithm): def __init__(self, n_neighbors = 5, n_epochs = 500): self.name = "umap" self.n_neighbors = n_neighbors self.n_epochs = n_epochs BaseAlgorithm.__init__(self) def __str__(self): return "UMAP" def sk(self, X): clustering_sk = skUMAP(n_neighbors = self.n_neighbors, n_epochs = self.n_epochs) clustering_sk.fit(X) def cuml(self, X): clustering_cuml = cumlUMAP(n_neighbors = self.n_neighbors, n_epochs = self.n_epochs) clustering_cuml.fit(X) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(12, 16)]) runner.run(UMAPAlgo()) runner = BenchmarkRunner(bench_rows = [2**x for x in range(12, 16)]) runner.chart(UMAPAlgo()) # ### K-means Clustering # + from sklearn.cluster import KMeans as skKmeans from cuml.cluster import KMeans as cumlKmeans class KMeansAlgo(BaseAlgorithm): def __init__(self, n_clusters=5): self.name = "kmeans" self.n_clusters = n_clusters BaseAlgorithm.__init__(self, load_data_X_npy) def __str__(self): return "KMeans" def sk(self, X): clustering_sk = skKmeans(n_clusters=self.n_clusters, n_jobs=N_JOBS_SKLEARN) clustering_sk.fit(X) def cuml(self, X): clustering_cuml = cumlKmeans(n_clusters=self.n_clusters) clustering_cuml.fit(X) # - runner = BenchmarkRunner(benchmarks = [SpeedupBenchmark(no_convert)], bench_rows = [2**x for x in range(12, 18, 2)]) runner.run(KMeansAlgo()) runner = BenchmarkRunner(bench_rows = [2**x for x in range(12, 18, 2)]) runner.chart(KMeansAlgo()) # ### Linear Regression # + from sklearn.linear_model import LinearRegression as skLR from cuml.linear_model import LinearRegression as cumlLR class LinearRegressionAlgo(BaseAlgorithm): def __init__(self): BaseAlgorithm.__init__(self, load_data_Xy) self.name = "linear_regression" def __str__(self): return "Linear Regression" def sk(self, data): X, y = data clustering_sk = skLR(n_jobs=N_JOBS_SKLEARN) clustering_sk.fit(X, y) def cuml(self, data): X, y = data cuml_lr = cumlLR() cuml_lr.fit(X, y) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(15, 18)]) runner.run(LinearRegressionAlgo()) runner = BenchmarkRunner(bench_rows = [2**x for x in range(15, 18)]) runner.chart(LinearRegressionAlgo()) # ### PCA / SVD # + from sklearn.decomposition import PCA as skPCA from cuml import PCA as cumlPCA class PCAAlgo(BaseAlgorithm): def __init__(self, n_components = 10, load_data = load_data_mortgage_X): self.n_components = 10 self.name = "pca" BaseAlgorithm.__init__(self, load_data = load_data) def __str__(self): return "PCA" def sk(self, X): skpca = skPCA(n_components = 10) skpca.fit(X) def cuml(self, X): cumlpca = cumlPCA(n_components = 10) cumlpca.fit(X) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(18, 20)]) runner.run(PCAAlgo()) runner = BenchmarkRunner(bench_rows = [2**x for x in range(18, 20)]) runner.chart(PCAAlgo()) # + from sklearn.ensemble import RandomForestClassifier as skRFC from cuml.ensemble import RandomForestClassifier as cumlRFC class RandomForestClassifierAlgo(BaseAlgorithm): def __init__(self, n_estimators = 1000, max_depth = 8, load_data = load_data_mortgage_Xy): self.n_estimators = n_estimators self.max_depth = max_depth self.name = "random_forest_classifier" BaseAlgorithm.__init__(self, load_data = load_data) def __str__(self): return "Random Forest Classifier" def sk(self, data): X, y = data skrfc = skRFC(n_jobs = -1, n_estimators = self.n_estimators, max_depth = self.max_depth) skrfc.fit(X, y.astype(np.int32)) def cuml(self, data): X, y = data cumlrfc = cumlRFC(n_estimators = self.n_estimators, max_depth = self.max_depth) cumlrfc.fit(X, y.astype(np.int32)) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(18, 20)]) runner.run(RandomForestClassifierAlgo()) runner = BenchmarkRunner(bench_rows = [2**x for x in range(18, 20)]) runner.chart(RandomForestClassifierAlgo()) # + from sklearn.random_projection import GaussianRandomProjection as skGRP from cuml.random_projection import GaussianRandomProjection as cumlGRP class GaussianRandomProjectionAlgo(BaseAlgorithm): def __init__(self, load_data = load_data_mortgage_X): self.name = "gaussian_random_projection" BaseAlgorithm.__init__(self, load_data = load_data) def __str__(self): return "Gaussian Random Projection" def sk(self, data): X = data skrfc = skGRP(n_components = 2) skrfc.fit(X) skrfc.transform(X) def cuml(self, data): X = data cumlrfc = cumlGRP(n_components = 2) cumlrfc.fit(X) cumlrfc.transform(X) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(11, 20)]) runner.run(GaussianRandomProjectionAlgo()) # + from sklearn.random_projection import SparseRandomProjection as skSRP from cuml.random_projection import SparseRandomProjection as cumlSRP class SparseRandomProjection(BaseAlgorithm): def __init__(self, load_data = load_data_mortgage_X): self.name = "gaussian_random_projection" BaseAlgorithm.__init__(self, load_data = load_data) def __str__(self): return "Gaussian Random Projection" def sk(self, data): X = data skrfc = skSRP(n_components = 2) skrfc.fit(X) skrfc.transform(X) def cuml(self, data): X = data cumlrfc = cumlSRP(n_components = 2) cumlrfc.fit(X) cumlrfc.transform(X) # - runner = BenchmarkRunner(bench_rows = [2**x for x in range(11, 25)]) runner.run(SparseRandomProjection()) # + from sklearn.manifold.tsne import trustworthiness as skTrust from cuml.metrics. import SparseRandomProjection as cumlSRP class SparseRandomProjection(BaseAlgorithm): def __init__(self, load_data = load_data_mortgage_X): self.name = "gaussian_random_projection" BaseAlgorithm.__init__(self, load_data = load_data) def __str__(self): return "Gaussian Random Projection" def sk(self, data): X = data skrfc = skSRP(n_components = 2) skrfc.fit(X) skrfc.transform(X) def cuml(self, data): X = data cumlrfc = cumlSRP(n_components = 2) cumlrfc.fit(X) cumlrfc.transform(X)
intermediate_notebooks/benchmarks/cuml_benchmarks_quick.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Logistic Classifier # # > ### Pass, Fail 를 정해야 하는 상황에서 주로 사용 # > ex) # > - 이메일에서 스팸 메일(1)과 일반 메일(0)을 구분할 때 # > - 페이스북에서 흥미 있을만한 피드(1)와 흥미 없을만한 피드(0) # > - 카드를 사용할 때 평소에 주로 사용되는 패턴(0)인지 아닌지(1) # ## sigmoid # # ![](https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Logistic-curve.svg/320px-Logistic-curve.svg.png) # # ## $ # g(z) = \frac{1}{(1 + e^{-z})} # $ # ## Logistic Hypothesis # ## $ # H(X) = \frac{1}{1 + e^{-W^{T}X}} # $ # ## New Cost function for logistic # $ # cost(W) = \frac{1}{m}\sum_{i=1}^{m}c(H(x),y) # $ # # $ # c(H(x),y) = \left(\begin{array}{c} -log(H(x)) : y = 1 \\ -log(1 - H(x)) : y = 0 \end{array}\right) # $ # # y == 1: # - H(x) = 1 -> -log(z) = 0 # - H(x) = 0 -> -log(z) = infinity # # y == 0: # - H(x) = 0 -> -log(1 - z) = 0 # - H(x) = 1 -> -log(1 - z) = infinity # # # $ # c(H(x),y) = -ylog(H(x))-(1-y)log(1 - H(x)) # $ # # # $ # cost(W) = -\frac{1}{m}\sum_{i=1}^{m}ylog(H(x))+(1-y)log(1 - H(x)) # $ # # ```python # cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(hypothesis) + (1-Y)*tf.log(1-hypothesis))) # ``` # # ## Minimize # # $ # W := W - a\frac{a}{aW}cost(W) # $ # # ```python # a = tf.Variable(0.1) # optimizer = tf.train.GradientDescentOptimizer(a) # train = optimizer.minimize(cost) # ``` import tensorflow as tf # ## Initialize Variables x_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]] y_data = [[0], [0], [0], [1], [1], [1]] X = tf.placeholder(tf.float32, shape=[None, 2]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([2, 1]), name='weight') b = tf.Variable(tf.random_normal([1], name='bias')) # ## Hypothesis # # ## $$ # g(z) = \frac{1}{(1 + e^{-z})} # $$ hypothesis = tf.sigmoid(tf.matmul(X, W) + b) # ## Cost # # ## $$ # cost(W) = -\frac{1}{m}\sum_{i=1}^{m}ylog(H(x))+(1-y)log(1 - H(x)) # $$ cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) # ## Minimize # # ## $ # W := W - a\frac{a}{aW}cost(W) # $ # train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) # ## Accuacy computation # True if hypothesis > 0.5 else False predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) # ## Launch graph with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nHypothesis: \n", h, "\nCorrect (Y): \n", c, "\nAccuracy: \n", a) # # Classifying diabetes import tensorflow as tf import numpy as np # + xy = np.loadtxt('data/data-03-diabetes.csv', delimiter=',', dtype=np.float32) x_data = xy[:, 0:-1] y_data = xy[:, [-1]] # - X = tf.placeholder(tf.float32, shape=[None, 8]) Y = tf.placeholder(tf.float32, shape=[None, 1]) W = tf.Variable(tf.random_normal([8, 1]), name='weight') b = tf.Variable(tf.random_normal([1], name='bias')) hypothesis = tf.sigmoid(tf.matmul(X, W) + b) cost = -tf.reduce_mean(Y * tf.log(hypothesis) + (1 - Y) * tf.log(1 - hypothesis)) train = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost) predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32) accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for step in range(10001): cost_val, _ = sess.run([cost, train], feed_dict={X: x_data, Y: y_data}) if step % 200 == 0: print(step, cost_val) h, c, a = sess.run([hypothesis, predicted, accuracy], feed_dict={X: x_data, Y: y_data}) print("\nHypothesis: \n", h, "\nCorrect (Y): \n", c, "\nAccuracy: \n", a)
lab05-Logistic Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Politician Activity on Twitter by Political Affiliation # # The parameters in the cell below can be adjusted to explore other political affiliations and time frames. # # ### How to explore other political affiliation? # The ***affiliation*** parameter can be use to aggregate politicians by their political affiliations. The column `affiliation` in this [this other notebook](../politicians.ipynb?autorun=true) show the politicians that belong each political affiliation. # # ***Alternatively***, you can direcly use the [politicians API](http://mediamonitoring.gesis.org/api/politicians/swagger/), or access it with the [SMM Wrapper](https://pypi.org/project/smm-wrapper/). # # ## A. Set Up parameters # Parameters: affiliation = 'CSU' from_date = '2017-09-01' to_date = '2018-12-31' aggregation = 'week' # ## B. Using the SMM Politician API # + import pandas as pd # create an instance to the smm wrapper from smm_wrapper import SMMPoliticians smm = SMMPoliticians() # using the api to get the data df = smm.dv.get_politicians() # Filter the accounts by party, and valid ones (the ones that contain tw_ids) party_df = df[(df['affiliation']==affiliation) & (df['tw_ids'].notnull())] # query the Social Media Monitoring API tweets_by = pd.concat(smm.dv.tweets_by(_id=politician_id, from_date=from_date, to_date=to_date, aggregate_by=aggregation) for politician_id in party_df.index) replies_to = pd.concat(smm.dv.replies_to(_id=politician_id, from_date=from_date, to_date=to_date, aggregate_by=aggregation) for politician_id in party_df.index) # aggregate the replies total_tweets_by = tweets_by.groupby('date').agg({'tweets': 'sum'}) total_replies_to = replies_to.groupby('date').agg({'replies': 'sum'}) # - # ## C. Plotting # + #plotting data import plotly from plotly import graph_objs as go plotly.offline.init_notebook_mode(connected=True) plotly.offline.iplot({ "data": [go.Scatter(x=total_tweets_by.index, y=total_tweets_by['tweets'], name='Tweets', line_shape='spline'), go.Scatter(x=total_replies_to.index, y=total_replies_to['replies'], name='Replies', line_shape='spline')], "layout": go.Layout( title='Twitter (Tweets and Replies)', yaxis=dict(title='N')) })
python/affiliations/twitter.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="sSOIl3z06Vd2" colab_type="text" # ### Markov decision process # # This week's methods are all built to solve __M__arkov __D__ecision __P__rocesses. In the broadest sense, an MDP is defined by how it changes states and how rewards are computed. # # State transition is defined by $P(s' |s,a)$ - how likely are you to end at state $s'$ if you take action $a$ from state $s$. Now there's more than one way to define rewards, but we'll use $r(s,a,s')$ function for convenience. # # _This notebook is inspired by the awesome_ [CS294](https://github.com/berkeleydeeprlcourse/homework/blob/36a0b58261acde756abd55306fbe63df226bf62b/hw2/HW2.ipynb) _by Berkeley_ # + [markdown] id="UEaz7SYC6Vd3" colab_type="text" # For starters, let's define a simple MDP from this picture: # # <img src="https://upload.wikimedia.org/wikipedia/commons/a/ad/Markov_Decision_Process.svg" width="400px" alt="Diagram by Waldoalvarez via Wikimedia Commons, CC BY-SA 4.0"/> # + id="17gYflyM6Vd4" colab_type="code" outputId="a8f8dcdd-0e77-4faf-d7c6-96f18617d3b3" executionInfo={"status": "ok", "timestamp": 1590699614524, "user_tz": 240, "elapsed": 23806, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 145} import sys, os if 'google.colab' in sys.modules and not os.path.exists('.setup_complete'): # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/spring20/setup_colab.sh -O- | bash # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/grading.py -O ../grading.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week2_model_based/submit.py # !wget -q https://raw.githubusercontent.com/yandexdataschool/Practical_RL/coursera/week2_model_based/mdp.py # !touch .setup_complete # This code creates a virtual display to draw game images on. # It will have no effect if your machine has a monitor. if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0: # !bash ../xvfb start os.environ['DISPLAY'] = ':1' # + id="c8W0vTHS6Vd7" colab_type="code" colab={} transition_probs = { 's0': { 'a0': {'s0': 0.5, 's2': 0.5}, 'a1': {'s2': 1} }, 's1': { 'a0': {'s0': 0.7, 's1': 0.1, 's2': 0.2}, 'a1': {'s1': 0.95, 's2': 0.05} }, 's2': { 'a0': {'s0': 0.4, 's2': 0.6}, 'a1': {'s0': 0.3, 's1': 0.3, 's2': 0.4} } } rewards = { 's1': {'a0': {'s0': +5}}, 's2': {'a1': {'s0': -1}} } from mdp import MDP mdp = MDP(transition_probs, rewards, initial_state='s0') # + [markdown] id="8-pwX3LR6Vd-" colab_type="text" # We can now use MDP just as any other gym environment: # + id="tapKmj_h6Vd_" colab_type="code" outputId="126f022d-22d5-4ec4-9ef1-ade6998986f8" executionInfo={"status": "ok", "timestamp": 1590699614526, "user_tz": 240, "elapsed": 23786, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 54} print('initial state =', mdp.reset()) next_state, reward, done, info = mdp.step('a1') print('next_state = %s, reward = %s, done = %s' % (next_state, reward, done)) # + [markdown] id="AyYnppZM6VeD" colab_type="text" # but it also has other methods that you'll need for Value Iteration # + id="BeAXmQni6VeE" colab_type="code" outputId="f0765469-3b2e-45db-ceb3-31d42ae046f9" executionInfo={"status": "ok", "timestamp": 1590699614527, "user_tz": 240, "elapsed": 23775, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 108} print("mdp.get_all_states =", mdp.get_all_states()) print("mdp.get_possible_actions('s1') = ", mdp.get_possible_actions('s1')) print("mdp.get_next_states('s1', 'a0') = ", mdp.get_next_states('s1', 'a0')) print("mdp.get_reward('s1', 'a0', 's0') = ", mdp.get_reward('s1', 'a0', 's0')) print("mdp.get_transition_prob('s1', 'a0', 's0') = ", mdp.get_transition_prob('s1', 'a0', 's0')) # + [markdown] id="baAmKp1j6VeH" colab_type="text" # ### Optional: Visualizing MDPs # # You can also visualize any MDP with the drawing fuction donated by [neer201](https://github.com/neer201). # # You have to install graphviz for system and for python. For ubuntu just run: # # 1. `sudo apt-get install graphviz` # 2. `pip install graphviz` # 3. restart the notebook # # __Note:__ Installing graphviz on some OS (esp. Windows) may be tricky. However, you can ignore this part alltogether and use the standart vizualization. # + id="Bh6Jdeae6VeH" colab_type="code" outputId="8b9db746-765d-4d7e-d623-c50f704539cd" executionInfo={"status": "ok", "timestamp": 1590699614527, "user_tz": 240, "elapsed": 23764, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 35} from mdp import has_graphviz from IPython.display import display print("Graphviz available:", has_graphviz) # + id="X8RhztUx6VeM" colab_type="code" outputId="ff9617a3-ecfa-42be-aa54-e27397f6cca2" executionInfo={"status": "ok", "timestamp": 1590699614787, "user_tz": 240, "elapsed": 24013, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 322} if has_graphviz: from mdp import plot_graph, plot_graph_with_state_values, plot_graph_optimal_strategy_and_state_values display(plot_graph(mdp)) # + [markdown] id="IgeLCN9O6VeQ" colab_type="text" # ### Value Iteration # # Now let's build something to solve this MDP. The simplest algorithm so far is __V__alue __I__teration # # Here's the pseudo-code for VI: # # --- # # `1.` Initialize $V^{(0)}(s)=0$, for all $s$ # # `2.` For $i=0, 1, 2, \dots$ # # `3.` $ \quad V_{(i+1)}(s) = \max_a \sum_{s'} P(s' | s,a) \cdot [ r(s,a,s') + \gamma V_{i}(s')]$, for all $s$ # # --- # + [markdown] id="LqkE7Ev96VeQ" colab_type="text" # First, let's write a function to compute the state-action value function $Q^{\pi}$, defined as follows # # $$Q_i(s, a) = \sum_{s'} P(s' | s,a) \cdot [ r(s,a,s') + \gamma V_{i}(s')]$$ # # + id="SnlfQYf96VeQ" colab_type="code" outputId="82f6f0e6-2fdf-4008-d36a-6f382b60b5bd" executionInfo={"status": "ok", "timestamp": 1590699614788, "user_tz": 240, "elapsed": 24004, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 35} # %%writefile mdp_get_action_value.py def get_action_value(mdp, state_values, state, action, gamma): """ Computes Q(s,a) as in formula above """ # <YOUR CODE> Q = 0 for s_prime in mdp.get_all_states(): # print(state) # print(action) # # print(state_values) # print(s_prime) P = mdp.get_transition_prob(state,action,s_prime) r = mdp.get_reward(state,action,s_prime) # state_values[ Q = Q + P*(r + gamma*state_values[s_prime]) # return <YOUR CODE> return Q # + id="Ue9Zv-3u6VeU" colab_type="code" colab={} import importlib import mdp_get_action_value importlib.reload(mdp_get_action_value) from mdp_get_action_value import get_action_value # + id="4u2M-fqA6VeW" colab_type="code" colab={} import numpy as np test_Vs = {s: i for i, s in enumerate(sorted(mdp.get_all_states()))} # print(test_Vs) # print(test_Vs['s0']) # print(get_action_value(mdp, test_Vs, 's2', 'a1', 0.9)) assert np.isclose(get_action_value(mdp, test_Vs, 's2', 'a1', 0.9), 0.69) assert np.isclose(get_action_value(mdp, test_Vs, 's1', 'a0', 0.9), 3.95) # + [markdown] id="IED0lemi6VeY" colab_type="text" # Using $Q(s,a)$ we can now define the "next" V(s) for value iteration. # $$V_{(i+1)}(s) = \max_a \sum_{s'} P(s' | s,a) \cdot [ r(s,a,s') + \gamma V_{i}(s')] = \max_a Q_i(s,a)$$ # + id="QElbREIr6VeY" colab_type="code" colab={} def get_new_state_value(mdp, state_values, state, gamma): """ Computes next V(s) as in formula above. Please do not change state_values in process. """ if mdp.is_terminal(state): return 0 # <YOUR CODE> # new_state_values = state_values.copy() V_ip1 = max(get_action_value(mdp,state_values,state,action,gamma) for action in mdp.get_possible_actions(state)) return V_ip1 # + id="S400BepA6Vec" colab_type="code" colab={} test_Vs_copy = dict(test_Vs) assert np.isclose(get_new_state_value(mdp, test_Vs, 's0', 0.9), 1.8) assert np.isclose(get_new_state_value(mdp, test_Vs, 's2', 0.9), 1.08) assert np.isclose(get_new_state_value(mdp, {'s0': -1e10, 's1': 0, 's2': -2e10}, 's0', 0.9), -13500000000.0), \ "Please ensure that you handle negative Q-values of arbitrary magnitude correctly" assert test_Vs == test_Vs_copy, "Please do not change state_values in get_new_state_value" # + [markdown] id="qQMD7mMe6Vee" colab_type="text" # Finally, let's combine everything we wrote into a working value iteration algo. # + id="8_FFLC1B6Vee" colab_type="code" outputId="37088575-3428-4e2d-bff4-a62ef625f1ab" executionInfo={"status": "ok", "timestamp": 1590699614792, "user_tz": 240, "elapsed": 23986, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} # parameters gamma = 0.9 # discount for MDP num_iter = 100 # maximum iterations, excluding initialization # stop VI if new values are this close to old values (or closer) min_difference = 0.001 # initialize V(s) state_values = {s: 0 for s in mdp.get_all_states()} if has_graphviz: display(plot_graph_with_state_values(mdp, state_values)) for i in range(num_iter): # Compute new state values using the functions you defined above. # It must be a dict {state : float V_new(state)} new_state_values = {s: get_new_state_value(mdp,state_values,s,gamma) for s in mdp.get_all_states()} assert isinstance(new_state_values, dict) # Compute difference diff = max(abs(new_state_values[s] - state_values[s]) for s in mdp.get_all_states()) print("iter %4i | diff: %6.5f | " % (i, diff), end="") print(' '.join("V(%s) = %.3f" % (s, v) for s, v in state_values.items())) state_values = new_state_values if diff < min_difference: print("Terminated") # display(plot_graph_with_state_values(mdp, state_values)) break # + id="yhVoLvGG6Veg" colab_type="code" outputId="e75a8411-2020-4e24-b1ac-1e843da2aee4" executionInfo={"status": "ok", "timestamp": 1590699614793, "user_tz": 240, "elapsed": 23977, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 261} if has_graphviz: display(plot_graph_with_state_values(mdp, state_values)) # + id="IGLaMCPU6Vei" colab_type="code" outputId="a4cd8101-62c6-4d88-e7dd-9bdf54793563" executionInfo={"status": "ok", "timestamp": 1590699614793, "user_tz": 240, "elapsed": 23966, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 35} print("Final state values:", state_values) assert abs(state_values['s0'] - 3.781) < 0.01 assert abs(state_values['s1'] - 7.294) < 0.01 assert abs(state_values['s2'] - 4.202) < 0.01 # + [markdown] id="tUqkN_A96Vem" colab_type="text" # Now let's use those $V^{*}(s)$ to find optimal actions in each state # # $$\pi^*(s) = argmax_a \sum_{s'} P(s' | s,a) \cdot [ r(s,a,s') + \gamma V_{i}(s')] = argmax_a Q_i(s,a)$$ # # The only difference vs V(s) is that here we take not max but argmax: find action such with maximum Q(s,a). # + id="aU3JK6Kw06_8" colab_type="code" outputId="a475ae09-bc31-4c75-f55a-dbd84f9f9108" executionInfo={"status": "ok", "timestamp": 1590699614940, "user_tz": 240, "elapsed": 24103, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 35} stats = {'a':1000, 'b':3000, 'c': 100, 'd':3000} max(stats, key=stats.get) max_val = max(stats.values()) max_key = [k for k, v in stats.items() if v == max_val] print(max_val, max_key) # + id="adwsWaAq6Vem" colab_type="code" colab={} import random def get_optimal_action(mdp, state_values, state, gamma=0.9): """ Finds optimal action using formula above. """ if mdp.is_terminal(state): return None # <YOUR CODE> Q_dict = {action: get_action_value(mdp,state_values,state,action,gamma) for action in mdp.get_possible_actions(state)} max_Q = max(Q_dict.values()) max_actions = [action for action, Q in Q_dict.items() if Q == max_Q] return random.choice(max_actions) # + id="mztk4TAP6Veo" colab_type="code" colab={} assert get_optimal_action(mdp, state_values, 's0', gamma) == 'a1' assert get_optimal_action(mdp, state_values, 's1', gamma) == 'a0' assert get_optimal_action(mdp, state_values, 's2', gamma) == 'a1' assert get_optimal_action(mdp, {'s0': -1e10, 's1': 0, 's2': -2e10}, 's0', 0.9) == 'a0', \ "Please ensure that you handle negative Q-values of arbitrary magnitude correctly" assert get_optimal_action(mdp, {'s0': -2e10, 's1': 0, 's2': -1e10}, 's0', 0.9) == 'a1', \ "Please ensure that you handle negative Q-values of arbitrary magnitude correctly" # + id="L71NSZaf6Veq" colab_type="code" outputId="59a5edb1-c3d2-4e9e-a1dd-c1bfb4fcc520" executionInfo={"status": "ok", "timestamp": 1590699614943, "user_tz": 240, "elapsed": 24091, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 281} if has_graphviz: try: display(plot_graph_optimal_strategy_and_state_values(mdp, state_values)) except ImportError: raise ImportError("Run the cell that starts with \"%%writefile mdp_get_action_value.py\"") # + id="pq_Xjbkj6Ver" colab_type="code" outputId="75e3e641-839b-4594-c24b-a36b070a3949" executionInfo={"status": "ok", "timestamp": 1590699615651, "user_tz": 240, "elapsed": 24789, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 35} # Measure agent's average reward s = mdp.reset() rewards = [] for _ in range(10000): s, r, done, _ = mdp.step(get_optimal_action(mdp, state_values, s, gamma)) rewards.append(r) print("average reward: ", np.mean(rewards)) assert(0.40 < np.mean(rewards) < 0.55) # + [markdown] id="L1zxMhM16Vet" colab_type="text" # ### Frozen lake # + id="eDplqNGb6Vet" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="7c72642d-3ae1-4fa7-ef66-2eaa2935d8f4" executionInfo={"status": "ok", "timestamp": 1590699615652, "user_tz": 240, "elapsed": 24787, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} from mdp import FrozenLakeEnv mdp = FrozenLakeEnv(slip_chance=0) mdp.render() # + id="c8_Ys_tT6Vev" colab_type="code" colab={} def value_iteration(mdp, state_values=None, gamma=0.9, num_iter=1000, min_difference=1e-5): """ performs num_iter value iteration steps starting from state_values. Same as before but in a function """ state_values = state_values or {s: 0 for s in mdp.get_all_states()} for i in range(num_iter): # Compute new state values using the functions you defined above. It must be a dict {state : new_V(state)} # new_state_values = <YOUR CODE> new_state_values = {s: get_new_state_value(mdp,state_values,s,gamma) for s in mdp.get_all_states()} assert isinstance(new_state_values, dict) # Compute difference diff = max(abs(new_state_values[s] - state_values[s]) for s in mdp.get_all_states()) print("iter %4i | diff: %6.5f | V(start): %.3f " % (i, diff, new_state_values[mdp._initial_state])) state_values = new_state_values if diff < min_difference: break return state_values # + id="NR0ojg8H6Vex" colab_type="code" outputId="c71bac50-09bd-4823-a4e4-1348bee799a8" executionInfo={"status": "ok", "timestamp": 1590699615653, "user_tz": 240, "elapsed": 24776, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 145} state_values = value_iteration(mdp) # + id="EqOVGLYT6Vez" colab_type="code" outputId="bb42822f-5d4e-4856-84c5-1cad0cdd9f40" executionInfo={"status": "ok", "timestamp": 1590699615654, "user_tz": 240, "elapsed": 24768, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 872} s = mdp.reset() mdp.render() for t in range(100): a = get_optimal_action(mdp, state_values, s, gamma) print(a, end='\n\n') s, r, done, _ = mdp.step(a) mdp.render() if done: break # + [markdown] id="7Hju3FOM6Ve2" colab_type="text" # ### Let's visualize! # # It's usually interesting to see what your algorithm actually learned under the hood. To do so, we'll plot state value functions and optimal actions at each VI step. # + id="YF2WYCNL6Ve2" colab_type="code" colab={} import matplotlib.pyplot as plt # %matplotlib inline def draw_policy(mdp, state_values): plt.figure(figsize=(3, 3)) h, w = mdp.desc.shape states = sorted(mdp.get_all_states()) V = np.array([state_values[s] for s in states]) Pi = {s: get_optimal_action(mdp, state_values, s, gamma) for s in states} plt.imshow(V.reshape(w, h), cmap='gray', interpolation='none', clim=(0, 1)) ax = plt.gca() ax.set_xticks(np.arange(h)-.5) ax.set_yticks(np.arange(w)-.5) ax.set_xticklabels([]) ax.set_yticklabels([]) Y, X = np.mgrid[0:4, 0:4] a2uv = {'left': (-1, 0), 'down': (0, -1), 'right': (1, 0), 'up': (0, 1)} for y in range(h): for x in range(w): plt.text(x, y, str(mdp.desc[y, x].item()), color='g', size=12, verticalalignment='center', horizontalalignment='center', fontweight='bold') a = Pi[y, x] if a is None: continue u, v = a2uv[a] plt.arrow(x, y, u*.3, -v*.3, color='m', head_width=0.1, head_length=0.1) plt.grid(color='b', lw=2, ls='-') plt.show() # + id="NL_82GYZ6Ve4" colab_type="code" outputId="acd2dca3-5603-41b8-8414-59fa1859318a" executionInfo={"status": "ok", "timestamp": 1590699622770, "user_tz": 240, "elapsed": 31870, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 1000} state_values = {s: 0 for s in mdp.get_all_states()} for i in range(10): print("after iteration %i" % i) state_values = value_iteration(mdp, state_values, num_iter=1) draw_policy(mdp, state_values) # please ignore iter 0 at each step # + id="DcL_pN7c6Ve6" colab_type="code" outputId="62b6a621-5750-4809-e5c9-1d7a6bda37d5" executionInfo={"status": "ok", "timestamp": 1590700209951, "user_tz": 240, "elapsed": 25452, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 234} from IPython.display import clear_output from time import sleep mdp = FrozenLakeEnv(map_name='8x8', slip_chance=0.1) state_values = {s: 0 for s in mdp.get_all_states()} for i in range(30): clear_output(True) print("after iteration %i" % i) state_values = value_iteration(mdp, state_values, num_iter=1) draw_policy(mdp, state_values) sleep(0.5) # please ignore iter 0 at each step # + [markdown] id="uBfT1sj26Ve8" colab_type="text" # Massive tests # + id="V_df4MpV6Ve9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 181} outputId="46253e12-c247-4733-89f4-80250fb9706a" executionInfo={"status": "ok", "timestamp": 1590699643004, "user_tz": 240, "elapsed": 52093, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} mdp = FrozenLakeEnv(slip_chance=0) state_values = value_iteration(mdp) total_rewards = [] for game_i in range(1000): s = mdp.reset() rewards = [] for t in range(100): s, r, done, _ = mdp.step( get_optimal_action(mdp, state_values, s, gamma)) rewards.append(r) if done: break total_rewards.append(np.sum(rewards)) print("average reward: ", np.mean(total_rewards)) assert(1.0 <= np.mean(total_rewards) <= 1.0) print("Well done!") # + id="7vftJx1h6Ve-" colab_type="code" outputId="d56b8405-ca0a-4019-bca6-3a17a292e06d" executionInfo={"status": "ok", "timestamp": 1590699644469, "user_tz": 240, "elapsed": 53550, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 363} # Measure agent's average reward mdp = FrozenLakeEnv(slip_chance=0.1) state_values = value_iteration(mdp) total_rewards = [] for game_i in range(1000): s = mdp.reset() rewards = [] for t in range(100): s, r, done, _ = mdp.step( get_optimal_action(mdp, state_values, s, gamma)) rewards.append(r) if done: break total_rewards.append(np.sum(rewards)) print("average reward: ", np.mean(total_rewards)) assert(0.8 <= np.mean(total_rewards) <= 0.95) print("Well done!") # + id="gnCkLr1b6VfA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 454} outputId="4cce99b7-d40d-4164-e190-2800896f6319" executionInfo={"status": "ok", "timestamp": 1590699646254, "user_tz": 240, "elapsed": 55332, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} # Measure agent's average reward mdp = FrozenLakeEnv(slip_chance=0.25) state_values = value_iteration(mdp) total_rewards = [] for game_i in range(1000): s = mdp.reset() rewards = [] for t in range(100): s, r, done, _ = mdp.step( get_optimal_action(mdp, state_values, s, gamma)) rewards.append(r) if done: break total_rewards.append(np.sum(rewards)) print("average reward: ", np.mean(total_rewards)) assert(0.6 <= np.mean(total_rewards) <= 0.7) print("Well done!") # + id="YfXvk9Op6VfD" colab_type="code" outputId="04c8af09-5b75-46e5-e488-117309515186" executionInfo={"status": "ok", "timestamp": 1590699657865, "user_tz": 240, "elapsed": 66934, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 672} # Measure agent's average reward mdp = FrozenLakeEnv(slip_chance=0.2, map_name='8x8') state_values = value_iteration(mdp) total_rewards = [] for game_i in range(1000): s = mdp.reset() rewards = [] for t in range(100): s, r, done, _ = mdp.step( get_optimal_action(mdp, state_values, s, gamma)) rewards.append(r) if done: break total_rewards.append(np.sum(rewards)) print("average reward: ", np.mean(total_rewards)) assert(0.6 <= np.mean(total_rewards) <= 0.8) print("Well done!") # + [markdown] id="BYre1ZuB6VfF" colab_type="text" # ### Submit to coursera # # If your submission doesn't finish in 30 seconds, set `verbose=True` and try again. # + id="e8M3wDPk6VfF" colab_type="code" outputId="1c919e63-527c-4645-d279-f8815c2c28a3" executionInfo={"status": "error", "timestamp": 1590699657866, "user_tz": 240, "elapsed": 66922, "user": {"displayName": "\u5ed6\u58eb\u9f4a", "photoUrl": "", "userId": "15119494860324039567"}} colab={"base_uri": "https://localhost:8080/", "height": 135} from submit import submit_assigment submit_assigment( get_action_value, get_new_state_value, get_optimal_action, value_iteration, <EMAIL>, <TOKEN>, verbose=False, )
Week2_Dynamic Programming/practice_vi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # python warm-up for statistics and machine learning in life sciences # # The goal of this notebook is to provide a warm-up before the course, as well as provide a gentle yet functionnal intro to some of the libraries we will be using during the course. # # # * [01. Basic python](#basic) # * [02. numpy and vectorized operations](#numpy) # * [03. basic plotting](#plot) # * [04. generating random numbers](#random) # * [05. statistical testing](#stats) # * [06. bringing together numpy, numpy.random, and matplotlib](#together) # * [07. the briefest intro to pandas](#pandas) # # ## 00. Installation # # This course requires you to install a number of external libraries. # # If you have installed python via anaconda, then you should already have most of them installed. # # # Try the code below to check that : import numpy print('numpy ok') import seaborn print('seaborn ok') import pandas print('pandas ok') import sklearn print('sklearn ok') # If one of these failed, identify which one and follow the corresponding installation instructions for : # # * numpy : in a terminal (Mac/Linux) or the anaconda prompt (Windows) write `conda install numpy` and follow instruction # * seaborn : same as numpy, but the command is : `conda install seaborn` # * pandas : same as numpy, but the command is : `conda install pandas` # * [sklearn](https://scikit-learn.org/stable/install.html) # # After an installation re-launch the code above until you have no errors # # > Note : you may have to restart the notebook kernel (Kernel>Restart) for the new libraries to be available # # # > Note 2 : alternatively and if you prefer, you can do the installations with `pip` # # ## 01. basic python <a class="anchor" id="basic"></a> # # + X = [] for i in range(10): X.append( i**2 ) # squared print(X) # + for x in X: print(x) # - for x in X: if x%2 == 1: print(x,'is odd') else: print(x,'is even') # + # list comprehension is a very fine way of compressing all this X = [ i**2 for i in range(10) ] Xeven = [ x for x in X if x%2 == 0 ] Xodd = [ x for x in X if x%2 == 1 ] print( 'X ', X ) print( 'Xeven', Xeven ) print( 'Xodd ', Xodd ) # - # ## 02. numpy and vectorized operations <a class="anchor" id="numpy"></a> # + import numpy as np # an array can be created from a list X_array = np.array(X) print(X_array) # - # you can perform operations on an array : print( X_array / 2 ) # divides each element by 2 print( np.exp(X_array ) ) # exponential print( np.log(X_array ) ) # logarithm print( 'shape' , X_array.shape ) # diemnsions of the array print( 'mean ' , np.mean(X_array) ) # mean of the array print( 'standard deviation' , np.std(X_array) ) # standard deviation of the array # ### linspace and arange # # These function extend `range` : # * `np.linspace` : create an array comtaining an evenly spaced number of points between $a$ and $b$ # * `np.arange` : create an array with numbers going from $a$ to $b$ with a given increment print( 'linspace 0,2,9 :' , np.linspace(0,2,9) , sep='\t' ) print( 'linspace -0.5,0.5,11 :' , np.linspace(-0.5,0.5,11) , sep='\t' ) print( 'linspace 10,0,11 :' , np.linspace(10,0,11) , sep='\t' ) print( "arange 0,2,0.1 :", np.arange(1.5,2,0.1) , sep='\t' ) print( "arange -1,1,0.125 :", np.arange(-1,1,0.125) , sep='\t' ) print( "arange 10,2 :", np.arange(10,2,1) , sep='\t' ) # reverse does not work! # ## 03. basic plotting <a class="anchor" id="plot"></a> # + import matplotlib.pyplot as plt plt.plot( [0,1,2,3] , [10,5,7,0.2] ) plt.show() # - # **Adding color, symbols, ...** # # `matplotlib` offers many options to customize the appearance of your plot. # # Here are the (some) common arguments to `plot()` (which can also be applied to many other graphical representations): # * `color` : could be given as a (red,green,blue) tuple, a [name](https://matplotlib.org/3.1.0/gallery/color/named_colors.html), a hex code, ... (see [here](https://matplotlib.org/tutorials/colors/colors.html) for all the options) # * `marker` : symbols for the data point. `'.'` is a point, `'v'` a down triangle, ... see [here](https://matplotlib.org/3.3.3/api/markers_api.html#module-matplotlib.markers) for the list of possibilities. # * `linestyle` : style of the line. `'-'` is solid, `'--'` is dashed, `''` for no line. See [here](https://matplotlib.org/3.3.3/gallery/lines_bars_and_markers/linestyles.html) for more options # * `linewidth` : width of the lines # * `markersize` : size of the markers # # You are invited to experiment and explore these options. Here are a few examples: # # + y1 = [1,2,3,10,5] y2 = [10,9,7,5.5,6] y3 = [4,3,1.5,1] # green, dashed line, with circle markers plt.plot( y1, color = 'green', marker = 'o', linestyle = '--', linewidth = 2, markersize = 8 ) # blue triangle with no line plt.plot( y2, color = 'blue', marker = 'v', linestyle = '' , markersize = 16 ) # solid orange line plt.plot(y3, color = 'orange', marker = '', linestyle = '-', linewidth = 4 ) plt.show() # - # Note that: # * you can call plot several time in a row to make several lines appear (only `plt.show()` causes the figure to appear) # * the frame of the picture automatically adjust to what it needs to show # **multiple subplots** # # Now would normally be when we show you how to add labels, titles and legends to figures. # # However, the way `matplotlib` is built, it is actually a bit more efficient to first learn how to create multiple subplots. # # # Creating multiple plots is possible with the function `plt.subplots()`. # Amon its many arguments, it takes: # * `nrows` : number of subplot rows # * `ncols` : number of subplot columns # * `figsize` : tuple (width,height) of the figure # # This function creates a Figure and an Axes object. # The Axes object can be either : # * a simple Axe is there is 1 row and 1 columns # * a list of Axe objects if there is 1 row and multiple columns, or 1 column and multiple rows # * a list of lists of Axes objects if there is multiple rows and multiple columns # # + y1 = [1,2,3,10,5] y2 = [10,9,7,5.5,6] y3 = [4,3,1.5,1] import matplotlib.pyplot as plt # subplots returns a Figure and an Axes object fig, ax = plt.subplots(nrows=1, ncols=2) # 2 columns and 1 row # ax is a list with two objects. Each object correspond to 1 subplot # accessing to the first column ax[0] ax[0].plot( y1, color = 'green', marker = 'o', linestyle = '--', linewidth = 2, markersize = 8 ) # accessing to the second column ax[1] ax[1].plot( y2, color = 'blue', marker = 'v', linestyle = '' , markersize = 16 ) ax[1].plot( y3, color = 'orange', marker = '', linestyle = '-' ) plt.show() # - # Notice how we call `ax[0].plot(...)` instead of `plt.plot(...)` to specify in which subplots we want to plot. # **multiple subplots - continued** # # Let's see the same thing with several lines and several columns # + y1 = [1,2,3,10,5] y2 = [10,9,7,5.5,6] y3 = [4,3,1.5,1] y4 = [1,2,3,7,5] # 2 columns and 2 rows, and we also set the figure size fig, ax = plt.subplots(nrows=2, ncols=2 , figsize = (12,12)) # ax is a list of two lists with two objects each. # accessing to the first row, first column : ax[0][0] ax[0][0].plot( y1, color = 'green', marker = 'o', linestyle = '--', linewidth = 2, markersize = 8 ) # accessing to the first row, second column : ax[0][1] ax[0][1].plot( y2, color = 'blue', marker = 'v', linestyle = '' , markersize = 16 ) # accessing to the second row, first column : ax[1][0] ax[1][0].plot( y3, color = 'orange', marker = 'x', linestyle = '-' ) # accessing to the first row, second column : ax[1][1] ax[1][1].plot( y4, color = 'teal', linestyle = '-.' , linewidth=5 ) plt.show() # - # **setting up labels** # # To set the labels at the x-axis, y-axis and title, we use the method of the Axe object: # * `.set_xlabel(...)` # * `.set_ylabel(...)` # * `.set_title(...) ` # # + y1 = [1,2,3,10,5] y2 = [10,9,7,5.5,6] y3 = [4,3,1.5,1] # subplots returns a Figure and an Axes object fig, ax = plt.subplots(nrows=1, ncols=2 , figsize=(10,5)) # 2 columns and 1 row # accessing to the first column ax[0] ax[0].plot( y1, color = 'green', marker = 'o', linestyle = '--', linewidth = 2, markersize = 8 ) ax[0].set_xlabel('x-axis label') ax[0].set_ylabel('y-axis label') ax[0].set_title('plot 1') # accessing to the second column ax[1] ax[1].plot( y2, color = 'blue', marker = 'v', linestyle = '' , markersize = 16 ) ax[1].plot( y3, color = 'orange', marker = '', linestyle = '-' ) ax[1].set_xlabel('x-axis label') ax[1].set_ylabel('y-axis label') ax[1].set_title('plot 2') plt.show() # - # **setting up a legend** # # Each element we add to the figure using `plot()` can be given a label using the `label` argument. # Then, a legend may be added to the figure using the `legend()` method. # # This `legend()` method can take a `loc` argument that specifies where it should be plotted. # Possible values for this argument are: `'best' , 'upper right' , 'upper left' , 'lower left' , 'lower right' , 'right' , 'center left' , 'center right' , 'lower center' , 'upper center' , 'center'` (the default is `best`). # # + fig, ax = plt.subplots(nrows=1, ncols=1 , figsize=(10,5)) # 2 columns and 1 row # NB : with 1 col and 1 row, ax is directly the sole subplot we have # so to call it we just use ax.plot , ax.set_xlabel , ... ax.plot( y1, color = 'green', marker = 'o', linestyle = '--', linewidth = 2 , label = 'line A' ) ax.plot( y2, color = 'blue', marker = 'v', linestyle = '' , markersize = 8 , label = 'line B' ) ax.plot( y3, color = 'orange', marker = '', linestyle = '-' , linewidth = 2 , label = 'line C' ) ax.set_xlabel('x-axis label') ax.set_ylabel('y-axis label') ax.set_title('plot with a legend') #adding a legend in the upper right ax.legend( loc='upper right') plt.show() # - # **additional : writing a figure to a file** # # Writing a matplotlib figure to a file can be achieved simply by replacing the call to `plt.show()` to `plt.savefig(...)`. # # `plt.savefig` takes a number of argument, the most commons are : # * `fname` : name of the file to write the figure. The extension is used to determine the output format (.pdf,.png, .jpg , .svg , ...). Many formats are supported, you can get a list with this command : `plt.gcf().canvas.get_supported_filetypes()` # * `dpi` : dots per inches , useful to set-up when saving to raster formats (ie., pixel-based such as png or jpeg). The actual size of the image is set using the argument `figsize` of `plt.subplots()` # # # > Note : in a jupyter notebook the figure will still be shown, whereas in a standard .py script it will not appear on screen. # # # Here is a demonstration. Apply in on your side and verify that the file `testPlot.png` was created: # + import matplotlib.pyplot as plt y1 = [1,2,3,10,5] y2 = [10,9,7,5.5,6] y3 = [4,3,1.5,1] # subplots returns a Figure and an Axes object fig, ax = plt.subplots(nrows=1, ncols=2 , figsize = (10,6) ) # 2 columns and 1 row # ax is a list with two objects. Each object correspond to 1 subplot # accessing to the first column ax[0] ax[0].plot( y1, color = 'green', marker = 'o', linestyle = '--', linewidth = 2, markersize = 8 ) # accessing to the second column ax[1] ax[1].plot( y2, color = 'blue', marker = 'v', linestyle = '' , markersize = 16 ) ax[1].plot( y3, color = 'orange', marker = '', linestyle = '-' ) plt.savefig( 'testPlot.png' , dpi = 90 ) # - # # <br> # # # ## 04. generating random numbers <a class="anchor" id="random"></a> # # # **the basics** # + import numpy.random as rd # random floats between 0 and 1 for i in range(4): print( rd.random() ) # - print( rd.random(size=10) ) # draw directly 10 numbers # **setting the seed : pseudorandomness and reproducibility** rd.seed(42) # setting the seed to 42 print( '1st draw' , rd.random(size=5) ) print( '2nd draw' , rd.random(size=5) ) rd.seed(42) print( 'after resetting seed' , rd.random(size=5) ) # **beyond the uniform distribution** # # numpy offers you quite a large [set of distributions you can draw from](https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html#distributions). # # Let's look at the normal distribution: # + normalDraw = rd.normal(size = 1000 ) print( 'mean ' , np.mean( normalDraw ) ) print( 'stdev' , np.std( normalDraw ) ) # + normalDraw2 = rd.normal( loc = -2 , scale = 3 , size = 300 ) # loc chnages the location (mean), and scale changes the standard deviation print( 'mean ' , np.mean( normalDraw2 ) ) print( 'stdev' , np.std( normalDraw2 ) ) # - # of course, we could want to plot these drawn numbers: plt.hist( normalDraw , alpha = 0.5 , label='loc=0 , scale=1') plt.hist( normalDraw2 , alpha = 0.5 , label='loc=-2 , scale=3') plt.legend() plt.show() # ## 05. statistical testing <a class="anchor" id="stats"></a> # # `numpy.random` let's you draw random numbers ; # `scipy.stats` implements the probability density functions, and Percent point function, as well as the most statistical tests. # # + import scipy.stats as stats # plotting the probability density function for 1 of the random draw we just made: x = np.linspace(-10,10,1001) normPDF = stats.norm.pdf( x , loc = -2 , scale = 3 ) plt.hist( normalDraw2 , alpha = 0.5 , label='random draw' , density = True) # don't forget density=True plt.plot(x,normPDF , label='PDF' ) plt.legend() plt.show() # - # We can also get the expected quantiles of a distribution: print( '95% quantile of a Chi-square distribution with 3 degrees of freedom:', stats.chi2.ppf(0.95 , df=3)) print( 'fraction of a Chi-square distribution with 3 degrees of freedom above of equal to 5' , 1 - stats.chi2.cdf( 5 , df=3 ) ) # And you can apply some classical statistical tests: # + # t-test of independance between two random samples: rd.seed(73) s1 = rd.normal( size=67 ) s2 = rd.normal( size=54 , loc = 0.2) testStat , pval = stats.ttest_ind(s1,s2 , equal_var=True) # equal variance : Student's t-test ; unequal : Welch's #almost all of these stat functions return the same test-statistic , pvalue tuple print('result of the t-test') print('\tt:',testStat) print('\tp-value:',pval) # - # **What is our conclusion for these tests results? What do you think about this?** # + # Kolmogorov-smirnov test for a chi-square distribution sample = rd.chisquare(df=13 , size = 43) # kstest expect as second argument the cdf function of the reference distribution # this is how to handle the fact that me must set an argument (degree of freedom) refDistribution = stats.chi2(df=13).cdf testStat , pval = stats.kstest( sample , refDistribution ) # alternative : # testStat , pval = stats.kstest( sample , lambda x : stats.chi2.cdf(x , df=13 ) ) print('result of the Kolmogorov-Smirnov test comparing our sample to a Chi-square distribution with 13 degrees of freedom') print('\tK:',testStat) print('\tp-value:',pval) # - # If you are interested, this [webpage](https://machinelearningmastery.com/statistical-hypothesis-tests-in-python-cheat-sheet/) references all implemented tests, with examples. # ## 06. bringing together numpy, numpy.random, and matplotlib <a class="anchor" id="together"></a> # # The random generation function return a numpy array, meaning it is fairly trivial to combine it with other arrays: # # + # combining x = np.sort( rd.normal(loc=170 , scale = 23 , size = 100) ) y_theoretical = 0.75 * x + 100 # simple linear relationship : y = a * x + b measurement_noise = rd.normal(scale = 10 , size = 100) # some noise associated to the measure y_observed = y_theoretical + measurement_noise # observed = expected + noise fig,ax = plt.subplots(figsize=(8,8)) plt.plot( x , y_theoretical , label = 'expected' ) plt.plot( x , y_observed , marker = '.' , linestyle='' , alpha = 0.7 , label = 'observed') plt.legend() plt.show() # - # ## 07. the briefest intro to pandas <a class="anchor" id="pandas"></a> # # `pandas` is a powerful library when doing data analysis, especially in the forms of table. # # Basically, it reimplements R data.frame as a DataFrame object and ties together neatly with the libraries we've just seen. # # + import pandas as pd df = pd.read_table( 'beetle.csv' , sep=',' , index_col=0 ) # pandas automatically detects header. df.head() # - Nrows, Ncols = df.shape print( 'number of rows:',Nrows, 'number of columns:', Ncols ) print( 'column names' , df.columns ) df.describe() # select a single column: df['dose'] df[ ['ndied','nalive'] ] # select several columns # **Plotting DataFrame Columns** # # Because `DataFrame` columns are iterable, they can seamlessly be given as argument to `plot()`. # + # plotting the column dose along the x-axis and prop along the y-axis # I use the + marker, with a teal color. plt.plot(df['dose'] , df['prop'] , color = 'teal' , linestyle='' , marker = '+' , markersize=10 ) plt.xlabel( 'dose' ) plt.ylabel( 'proportion of dead' ) plt.show() # - # DataFrame column can be manipulated like numpy array: # + ## we can combine columns using normal operators Odds = df['nalive'] / df['ndied'] # the odds of being alive is nalive / ndead ## adding a new column to the DataFrame is trivial: df['Odds'] = Odds ## we can also apply numpy function to them df['logOdds'] = np.log( df['Odds'] ) plt.plot(df['dose'] , df['logOdds'] , color = 'teal' , linestyle='' , marker = '+' , markersize=10 ) plt.xlabel( 'dose' ) plt.ylabel( 'log Odds' ) plt.show()
scikit_learn_ML/python_notebooks/00_python_warmup.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- # # Fully-Connected Neural Nets # In the previous homework you implemented a fully-connected two-layer neural network on CIFAR-10. The implementation was simple but not very modular since the loss and gradient were computed in a single monolithic function. This is manageable for a simple two-layer network, but would become impractical as we move to bigger models. Ideally we want to build networks using a more modular design so that we can implement different layer types in isolation and then snap them together into models with different architectures. # # In this exercise we will implement fully-connected networks using a more modular approach. For each layer we will implement a `forward` and a `backward` function. The `forward` function will receive inputs, weights, and other parameters and will return both an output and a `cache` object storing data needed for the backward pass, like this: # # ```python # def layer_forward(x, w): # """ Receive inputs x and weights w """ # # Do some computations ... # z = # ... some intermediate value # # Do some more computations ... # out = # the output # # cache = (x, w, z, out) # Values we need to compute gradients # # return out, cache # ``` # # The backward pass will receive upstream derivatives and the `cache` object, and will return gradients with respect to the inputs and weights, like this: # # ```python # def layer_backward(dout, cache): # """ # Receive derivative of loss with respect to outputs and cache, # and compute derivative with respect to inputs. # """ # # Unpack cache values # x, w, z, out = cache # # # Use values in cache to compute derivatives # dx = # Derivative of loss with respect to x # dw = # Derivative of loss with respect to w # # return dx, dw # ``` # # After implementing a bunch of layers this way, we will be able to easily combine them to build classifiers with different architectures. # # In addition to implementing fully-connected networks of arbitrary depth, we will also explore different update rules for optimization, and introduce Dropout as a regularizer and Batch Normalization as a tool to more efficiently optimize deep networks. # # + # As usual, a bit of setup import time import numpy as np import matplotlib.pyplot as plt from cs231n.classifiers.fc_net import * from cs231n.data_utils import get_CIFAR10_data from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array from cs231n.solver import Solver # %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading external modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython # %load_ext autoreload # %autoreload 2 def rel_error(x, y): """ returns relative error """ return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y)))) # + # Load the (preprocessed) CIFAR10 data. data = get_CIFAR10_data() for k, v in data.iteritems(): print '%s: ' % k, v.shape # - # # Affine layer: foward # Open the file `cs231n/layers.py` and implement the `affine_forward` function. # # Once you are done you can test your implementaion by running the following: # + # Test the affine_forward function num_inputs = 2 input_shape = (4, 5, 6) output_dim = 3 input_size = num_inputs * np.prod(input_shape) weight_size = output_dim * np.prod(input_shape) x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape) w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim) b = np.linspace(-0.3, 0.1, num=output_dim) out, _ = affine_forward(x, w, b) correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297], [ 3.25553199, 3.5141327, 3.77273342]]) # Compare your output with ours. The error should be around 1e-9. print 'Testing affine_forward function:' print 'difference: ', rel_error(out, correct_out) # - # # Affine layer: backward # Now implement the `affine_backward` function and test your implementation using numeric gradient checking. # + # Test the affine_backward function x = np.random.randn(10, 2, 3) w = np.random.randn(6, 5) b = np.random.randn(5) dout = np.random.randn(10, 5) dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout) _, cache = affine_forward(x, w, b) dx, dw, db = affine_backward(dout, cache) # The error should be around 1e-10 print 'Testing affine_backward function:' print 'dx error: ', rel_error(dx_num, dx) print 'dw error: ', rel_error(dw_num, dw) print 'db error: ', rel_error(db_num, db) # - # # ReLU layer: forward # Implement the forward pass for the ReLU activation function in the `relu_forward` function and test your implementation using the following: # + # Test the relu_forward function x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4) out, _ = relu_forward(x) correct_out = np.array([[ 0., 0., 0., 0., ], [ 0., 0., 0.04545455, 0.13636364,], [ 0.22727273, 0.31818182, 0.40909091, 0.5, ]]) # Compare your output with ours. The error should be around 1e-8 print 'Testing relu_forward function:' print 'difference: ', rel_error(out, correct_out) # - # # ReLU layer: backward # Now implement the backward pass for the ReLU activation function in the `relu_backward` function and test your implementation using numeric gradient checking: # + x = np.random.randn(10, 10) dout = np.random.randn(*x.shape) dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout) _, cache = relu_forward(x) dx = relu_backward(dout, cache) # The error should be around 1e-12 print 'Testing relu_backward function:' print 'dx error: ', rel_error(dx_num, dx) # - # # "Sandwich" layers # There are some common patterns of layers that are frequently used in neural nets. For example, affine layers are frequently followed by a ReLU nonlinearity. To make these common patterns easy, we define several convenience layers in the file `cs231n/layer_utils.py`. # # For now take a look at the `affine_relu_forward` and `affine_relu_backward` functions, and run the following to numerically gradient check the backward pass: # + from cs231n.layer_utils import affine_relu_forward, affine_relu_backward x = np.random.randn(2, 3, 4) w = np.random.randn(12, 10) b = np.random.randn(10) dout = np.random.randn(2, 10) out, cache = affine_relu_forward(x, w, b) dx, dw, db = affine_relu_backward(dout, cache) dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout) dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout) db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout) print 'Testing affine_relu_forward:' print 'dx error: ', rel_error(dx_num, dx) print 'dw error: ', rel_error(dw_num, dw) print 'db error: ', rel_error(db_num, db) # - # # Loss layers: Softmax and SVM # You implemented these loss functions in the last assignment, so we'll give them to you for free here. You should still make sure you understand how they work by looking at the implementations in `cs231n/layers.py`. # # You can make sure that the implementations are correct by running the following: # + num_classes, num_inputs = 10, 50 x = 0.001 * np.random.randn(num_inputs, num_classes) y = np.random.randint(num_classes, size=num_inputs) dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False) loss, dx = svm_loss(x, y) # Test svm_loss function. Loss should be around 9 and dx error should be 1e-9 print 'Testing svm_loss:' print 'loss: ', loss print 'dx error: ', rel_error(dx_num, dx) dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False) loss, dx = softmax_loss(x, y) # Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8 print '\nTesting softmax_loss:' print 'loss: ', loss print 'dx error: ', rel_error(dx_num, dx) # - # # Two-layer network # In the previous assignment you implemented a two-layer neural network in a single monolithic class. Now that you have implemented modular versions of the necessary layers, you will reimplement the two layer network using these modular implementations. # # Open the file `cs231n/classifiers/fc_net.py` and complete the implementation of the `TwoLayerNet` class. This class will serve as a model for the other networks you will implement in this assignment, so read through it to make sure you understand the API. You can run the cell below to test your implementation. # + N, D, H, C = 3, 5, 50, 7 X = np.random.randn(N, D) y = np.random.randint(C, size=N) std = 1e-2 model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std) print 'Testing initialization ... ' W1_std = abs(model.params['W1'].std() - std) b1 = model.params['b1'] W2_std = abs(model.params['W2'].std() - std) b2 = model.params['b2'] assert W1_std < std / 10, 'First layer weights do not seem right' assert np.all(b1 == 0), 'First layer biases do not seem right' assert W2_std < std / 10, 'Second layer weights do not seem right' assert np.all(b2 == 0), 'Second layer biases do not seem right' print 'Testing test-time forward pass ... ' model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H) model.params['b1'] = np.linspace(-0.1, 0.9, num=H) model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C) model.params['b2'] = np.linspace(-0.9, 0.1, num=C) X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T scores = model.loss(X) correct_scores = np.asarray( [[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096], [12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143], [12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]]) scores_diff = np.abs(scores - correct_scores).sum() assert scores_diff < 1e-6, 'Problem with test-time forward pass' print 'Testing training loss (no regularization)' y = np.asarray([0, 5, 1]) loss, grads = model.loss(X, y) correct_loss = 3.4702243556 assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss' model.reg = 1.0 loss, grads = model.loss(X, y) correct_loss = 26.5948426952 assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss' for reg in [0.0, 0.7]: print 'Running numeric gradient check with reg = ', reg model.reg = reg loss, grads = model.loss(X, y) for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False) print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])) # - # # Solver # In the previous assignment, the logic for training models was coupled to the models themselves. Following a more modular design, for this assignment we have split the logic for training models into a separate class. # # Open the file `cs231n/solver.py` and read through it to familiarize yourself with the API. After doing so, use a `Solver` instance to train a `TwoLayerNet` that achieves at least `50%` accuracy on the validation set. # + model = TwoLayerNet() solver = None ############################################################################## # TODO: Use a Solver instance to train a TwoLayerNet that achieves at least # # 50% accuracy on the validation set. # ############################################################################## solver = Solver(model, data) solver = Solver(model, data, update_rule='sgd', optim_config={ 'learning_rate': 1e-3, }, lr_decay=0.95, print_every=100 ) solver.train() print solver.best_val_acc ############################################################################## # END OF YOUR CODE # ############################################################################## # + # Run this cell to visualize training loss and train / val accuracy plt.subplot(2, 1, 1) plt.title('Training loss') plt.plot(solver.loss_history, 'o') plt.xlabel('Iteration') plt.subplot(2, 1, 2) plt.title('Accuracy') plt.plot(solver.train_acc_history, '-o', label='train') plt.plot(solver.val_acc_history, '-o', label='val') plt.plot([0.5] * len(solver.val_acc_history), 'k--') plt.xlabel('Epoch') plt.legend(loc='lower right') plt.gcf().set_size_inches(15, 12) plt.show() # - # # Multilayer network # Next you will implement a fully-connected network with an arbitrary number of hidden layers. # # Read through the `FullyConnectedNet` class in the file `cs231n/classifiers/fc_net.py`. # # Implement the initialization, the forward pass, and the backward pass. For the moment don't worry about implementing dropout or batch normalization; we will add those features soon. # ## Initial loss and gradient check # As a sanity check, run the following to check the initial loss and to gradient check the network both with and without regularization. Do the initial losses seem reasonable? # # For gradient checking, you should expect to see errors around 1e-6 or less. # + N, D, H1, H2, C = 2, 15, 20, 30, 10 X = np.random.randn(N, D) y = np.random.randint(C, size=(N,)) for reg in [0, 3.14]: print 'Running check with reg = ', reg model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C, reg=reg, weight_scale=5e-2, dtype=np.float64) loss, grads = model.loss(X, y) print 'Initial loss: ', loss for name in sorted(grads): f = lambda _: model.loss(X, y)[0] grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5) print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name])) # - # As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs. # + # TODO: Use a three-layer Net to overfit 50 training examples. num_train = 50 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } weight_scale = 1e-3 * 50 learning_rate = 1e-3 model = FullyConnectedNet([100, 100], weight_scale=weight_scale, dtype=np.float64) solver = Solver(model, small_data, print_every=100, num_epochs=20, batch_size=num_train, update_rule='sgd', lr_decay=0.95, optim_config={ 'learning_rate': learning_rate, } ) solver.train() plt.plot(solver.loss_history, 'o') plt.title('Training loss history') plt.xlabel('Iteration') plt.ylabel('Training loss') plt.show() # - # Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs. # + # TODO: Use a five-layer Net to overfit 50 training examples. num_train = 50 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } learning_rate = 0.02 weight_scale = 0.03 model = FullyConnectedNet([100, 100, 100, 100], weight_scale=weight_scale, dtype=np.float64) solver = Solver(model, small_data, print_every=10, num_epochs=20, batch_size=num_train, update_rule='sgd', lr_decay = 0.95, optim_config={ 'learning_rate': learning_rate, } ) solver.train() plt.plot(solver.loss_history, 'o') plt.title('Training loss history') plt.xlabel('Iteration') plt.ylabel('Training loss') plt.show() # - # # Inline question: # Did you notice anything about the comparative difficulty of training the three-layer net vs training the five layer net? # # # Answer: # Five layer net is more difficult to tune hyper-prarmeters. # # # Update rules # So far we have used vanilla stochastic gradient descent (SGD) as our update rule. More sophisticated update rules can make it easier to train deep networks. We will implement a few of the most commonly used update rules and compare them to vanilla SGD. # # SGD+Momentum # Stochastic gradient descent with momentum is a widely used update rule that tends to make deep networks converge faster than vanilla stochstic gradient descent. # # Open the file `cs231n/optim.py` and read the documentation at the top of the file to make sure you understand the API. Implement the SGD+momentum update rule in the function `sgd_momentum` and run the following to check your implementation. You should see errors less than 1e-8. # + from cs231n.optim import sgd_momentum N, D = 4, 5 w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D) dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D) v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D) config = {'learning_rate': 1e-3, 'velocity': v} next_w, _ = sgd_momentum(w, dw, config=config) expected_next_w = np.asarray([ [ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789], [ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526], [ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263], [ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]]) expected_velocity = np.asarray([ [ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158], [ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105], [ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053], [ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]]) print 'next_w error: ', rel_error(next_w, expected_next_w) print 'velocity error: ', rel_error(expected_velocity, config['velocity']) # - # Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster. # + num_train = 4000 small_data = { 'X_train': data['X_train'][:num_train], 'y_train': data['y_train'][:num_train], 'X_val': data['X_val'], 'y_val': data['y_val'], } solvers = {} for update_rule in ['sgd', 'sgd_momentum']: print 'running with ', update_rule model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2) solver = Solver(model, small_data, num_epochs=5, batch_size=100, update_rule=update_rule, optim_config={ 'learning_rate': 1e-3, }, verbose=True) solvers[update_rule] = solver solver.train() print plt.subplot(3, 1, 1) plt.title('Training loss') plt.xlabel('Iteration') plt.subplot(3, 1, 2) plt.title('Training accuracy') plt.xlabel('Epoch') plt.subplot(3, 1, 3) plt.title('Validation accuracy') plt.xlabel('Epoch') for update_rule, solver in solvers.iteritems(): plt.subplot(3, 1, 1) plt.plot(solver.loss_history, 'o', label=update_rule) plt.subplot(3, 1, 2) plt.plot(solver.train_acc_history, '-o', label=update_rule) plt.subplot(3, 1, 3) plt.plot(solver.val_acc_history, '-o', label=update_rule) for i in [1, 2, 3]: plt.subplot(3, 1, i) plt.legend(loc='upper center', ncol=4) plt.gcf().set_size_inches(15, 15) plt.show() # - # # RMSProp and Adam # RMSProp [1] and Adam [2] are update rules that set per-parameter learning rates by using a running average of the second moments of gradients. # # In the file `cs231n/optim.py`, implement the RMSProp update rule in the `rmsprop` function and implement the Adam update rule in the `adam` function, and check your implementations using the tests below. # # [1] <NAME> and <NAME>. "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude." COURSERA: Neural Networks for Machine Learning 4 (2012). # # [2] <NAME> and <NAME>, "Adam: A Method for Stochastic Optimization", ICLR 2015. # + # Test RMSProp implementation; you should see errors less than 1e-7 from cs231n.optim import rmsprop N, D = 4, 5 w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D) dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D) cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D) config = {'learning_rate': 1e-2, 'cache': cache} next_w, _ = rmsprop(w, dw, config=config) expected_next_w = np.asarray([ [-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247], [-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774], [ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447], [ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]]) expected_cache = np.asarray([ [ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321], [ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377], [ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936], [ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]]) print 'next_w error: ', rel_error(expected_next_w, next_w) print 'cache error: ', rel_error(expected_cache, config['cache']) # + # Test Adam implementation; you should see errors around 1e-7 or less from cs231n.optim import adam N, D = 4, 5 w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D) dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D) m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D) v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D) config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5} next_w, _ = adam(w, dw, config=config) expected_next_w = np.asarray([ [-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977], [-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929], [ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969], [ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]]) expected_v = np.asarray([ [ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,], [ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,], [ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,], [ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]]) expected_m = np.asarray([ [ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474], [ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316], [ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158], [ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]]) print 'next_w error: ', rel_error(expected_next_w, next_w) print 'v error: ', rel_error(expected_v, config['v']) print 'm error: ', rel_error(expected_m, config['m']) # - # Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules: # + learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3} for update_rule in ['adam', 'rmsprop']: print 'running with ', update_rule model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2) solver = Solver(model, small_data, num_epochs=5, batch_size=100, update_rule=update_rule, optim_config={ 'learning_rate': learning_rates[update_rule] }, verbose=True) solvers[update_rule] = solver solver.train() print plt.subplot(3, 1, 1) plt.title('Training loss') plt.xlabel('Iteration') plt.subplot(3, 1, 2) plt.title('Training accuracy') plt.xlabel('Epoch') plt.subplot(3, 1, 3) plt.title('Validation accuracy') plt.xlabel('Epoch') for update_rule, solver in solvers.iteritems(): plt.subplot(3, 1, 1) plt.plot(solver.loss_history, 'o', label=update_rule) plt.subplot(3, 1, 2) plt.plot(solver.train_acc_history, '-o', label=update_rule) plt.subplot(3, 1, 3) plt.plot(solver.val_acc_history, '-o', label=update_rule) for i in [1, 2, 3]: plt.subplot(3, 1, i) plt.legend(loc='upper center', ncol=4) plt.gcf().set_size_inches(15, 15) plt.show() # - # # Train a good model! # Train the best fully-connected model that you can on CIFAR-10, storing your best model in the `best_model` variable. We require you to get at least 50% accuracy on the validation set using a fully-connected net. # # If you are careful it should be possible to get accuracies above 55%, but we don't require it for this part and won't assign extra credit for doing so. Later in the assignment we will ask you to train the best convolutional network that you can on CIFAR-10, and we would prefer that you spend your effort working on convolutional nets rather than fully-connected nets. # # You might find it useful to complete the `BatchNormalization.ipynb` and `Dropout.ipynb` notebooks before completing this part, since those techniques can help you train powerful models. # + best_model = None ################################################################################ # TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might # # batch normalization and dropout useful. Store your best model in the # # best_model variable. # ################################################################################ best_model = TwoLayerNet() best_model = FullyConnectedNet([100], weight_scale=1e-3, dtype=np.float64) solver = Solver(best_model, data, update_rule='sgd', optim_config={ 'learning_rate': 1e-3, }, lr_decay=0.95, print_every=100 ) solver.train() print 'best valid accuracy:', solver.best_val_acc ################################################################################ # END OF YOUR CODE # ################################################################################ # - # # Test you model # Run your best model on the validation and test sets. You should achieve above 50% accuracy on the validation set. y_test_pred = np.argmax(best_model.loss(data['X_test']), axis=1) y_val_pred = np.argmax(best_model.loss(data['X_val']), axis=1) print 'Validation set accuracy: ', (y_val_pred == data['y_val']).mean() print 'Test set accuracy: ', (y_test_pred == data['y_test']).mean()
assignment2/FullyConnectedNets.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.10 64-bit # name: python3 # --- # # patient # # The patinet table is a core part of the eICU-CRD and contains all information related to tracking patient unit stays. The table also contains patient demographics and hospital level information. # + # Import libraries import numpy as np import pandas as pd import matplotlib.pyplot as plt import psycopg2 import getpass import pdvega # for configuring connection from configobj import ConfigObj import os # %matplotlib inline # + # Create a database connection using settings from config file config='../db/config.ini' # connection info conn_info = dict() if os.path.isfile(config): config = ConfigObj(config) conn_info["sqluser"] = config['username'] conn_info["sqlpass"] = config['password'] conn_info["sqlhost"] = config['host'] conn_info["sqlport"] = config['port'] conn_info["dbname"] = config['dbname'] conn_info["schema_name"] = config['schema_name'] else: conn_info["sqluser"] = 'postgres' conn_info["sqlpass"] = '' conn_info["sqlhost"] = 'localhost' conn_info["sqlport"] = 5432 conn_info["dbname"] = 'eicu' conn_info["schema_name"] = 'public,eicu_crd' # Connect to the eICU database print('Database: {}'.format(conn_info['dbname'])) print('Username: {}'.format(conn_info["sqluser"])) if conn_info["sqlpass"] == '': # try connecting without password, i.e. peer or OS authentication try: if (conn_info["sqlhost"] == 'localhost') & (conn_info["sqlport"]=='5432'): con = psycopg2.connect(dbname=conn_info["dbname"], user=conn_info["sqluser"]) else: con = psycopg2.connect(dbname=conn_info["dbname"], host=conn_info["sqlhost"], port=conn_info["sqlport"], user=conn_info["sqluser"]) except: conn_info["sqlpass"] = getpass.getpass('Password: ') con = psycopg2.connect(dbname=conn_info["dbname"], host=conn_info["sqlhost"], port=conn_info["sqlport"], user=conn_info["sqluser"], password=conn_info["sqlpass"]) else: con = psycopg2.connect(dbname=conn_info["dbname"], host=conn_info["sqlhost"], port=conn_info["sqlport"], user=conn_info["sqluser"], password=conn_info["sqlpass"]) query_schema = 'set search_path to ' + conn_info['schema_name'] + ';' # - NUM_PATIENTS = 200859 # ## uniquePid # # The `uniquePid` column identifies a single patient across multiple stays. Let's look at a single `uniquepid`. # + uniquepid = '002-33870' query = query_schema + """ select * from patient where uniquepid = '{}' """.format(uniquepid) df = pd.read_sql_query(query, con) df.head() # - # Here we see two unit stays for a single patient. Note also that both unit stays have the same `patienthealthsystemstayid` - this indicates that they occurred within the same hospitalization. # # We can see the `unitstaytype` was 'admit' for one stay, and 'stepdown/other' for another. Other columns can give us more information. # + pusid = '141178' query = query_schema + """ select * from patient where patientunitstayid = '{}' """.format(pusid) df = pd.read_sql_query(query, con) df # + pusid = '141178' query = query_schema + """ select * from nursecharting where patientunitstayid = '{}' """.format(pusid) df = pd.read_sql_query(query, con) df # + pusid = '141178' query = query_schema + """ select * from lab where patientunitstayid = '{}' """.format(pusid) df = pd.read_sql_query(query, con) df # - df[['patientunitstayid', 'wardid', 'unittype', 'unitstaytype', 'hospitaladmitoffset', 'unitdischargeoffset']] # Note that it's not explicitly obvious which stay occurred first. Earlier stays will be closer to hospital admission, and therefore have a *higher* hospitaladmitoffset. Above, the stay with a `hospitaladmitoffset` of -14 was first (occurring 14 minutes after hospital admission), followed by the next stay with a `hospitaladmitoffset` of 22 (which occurred 22 minutes after hospital admission). Practically, we wouldn't consider the first admission a "real" ICU stay, and it's likely an idiosyncrasy of the administration system at this particular hospital. Notice how both rows have the same `wardid`. # ## Age # # As ages over 89 are required to be deidentified by HIPAA, the `age` column is actually a string field, with ages over 89 replaced with the string value '> 89'. # + query = query_schema + """ select age, count(*) as n from patient group by age order by n desc """ df = pd.read_sql_query(query, con) df.head() # - # As is common in eICU-CRD, there are a subset of hospitals who routinely utilize this portion of the medical record (and thus have 90-100% data completion), while there are other hospitals who rarely use this interface and thus have poor data completion (0-10%). # ## unitdischargestatus # Identifies whether the patient is alive or dead when discharged from ICU. # There is a similar one for hospital discharge. # # + query = query_schema + """ select unitdischargestatus, count(*) as n from patient group by unitdischargestatus order by n desc """ df = pd.read_sql_query(query, con) df.head() # + unitdischargestatus = 'Expired' query = query_schema + """ select * from patient where unitdischargestatus = '{}' order by patienthealthsystemstayid """.format(unitdischargestatus) df = pd.read_sql_query(query, con) df.head() # - # ## patienthealthsystemstayid # Entries with the same ID indicate that there is multiple ICU visits during the same hospitalization. # + query = query_schema + """ select patienthealthsystemstayid, count(*) as n from patient group by patienthealthsystemstayid order by n desc """ df = pd.read_sql_query(query, con) df.head() # + patienthealthsystemstayid = '590180' query = query_schema + """ select * from patient where patienthealthsystemstayid = '{}' order by hospitaladmitoffset """.format(patienthealthsystemstayid) df = pd.read_sql_query(query, con) df.head() df[['patientunitstayid', 'wardid', 'unittype', 'unitstaytype', 'hospitaladmitoffset', 'unitdischargeoffset']] # - # ## unitType # Type of ICU the patients stayed in. # + query = query_schema + """ select unitType, count(*) as n from patient group by unitType order by n desc """ df = pd.read_sql_query(query, con) assert df['n'].sum() == NUM_PATIENTS, f"{df['n'].sum()} =/= {NUM_PATIENTS}" df # -
notebooks/patient.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np import matplotlib.pyplot as plt import matplotlib.dates as mdates import datetime as dt from matplotlib.finance import candlestick_ohlc import tushare as ts start = dt.datetime(2015, 7, 1) data = pd.io.data.DataReader('AAPL', 'yahoo', start) data = data.reset_index() data['Date2'] = data['Date'].apply(lambda d: mdates.date2num(d.to_pydatetime())) tuples = [tuple(x) for x in data[['Date2','Open','High','Low','Close']].values] fig, ax = plt.subplots() ax.xaxis_date() ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y-%m-%d")) plt.xticks(rotation=45) plt.xlabel("Date") plt.ylabel("Price") plt.title("AAPL") candlestick_ohlc(ax, tuples, width=.6, colorup='g', alpha =.4);
util/stick_pic.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import torch from torch import nn, optim import torch.nn.functional as F from torchvision import datasets import torchvision.transforms as transforms from torch.utils.data.sampler import SubsetRandomSampler from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt transform = { "train": transforms.Compose([ transforms.RandomHorizontalFlip(0.5), transforms.RandomGrayscale(0.1), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]), "test": transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])} # + batch_size = 100 train_data = datasets.CIFAR10('data3', train=True, download=True, transform=transform["train"]) test_data = datasets.CIFAR10('data3', train=False, download=True, transform=transform["test"]) # + dev_size = 0.2 idx = list(range(len(train_data))) np.random.shuffle(idx) split_size = int(np.floor(dev_size * len(train_data))) train_idx, dev_idx = idx[split_size:], idx[:split_size] train_sampler = SubsetRandomSampler(train_idx) dev_sampler = SubsetRandomSampler(dev_idx) # - train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=train_sampler) dev_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, sampler=dev_sampler) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size) class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(3, 10, 3, 1, 1) self.norm1 = nn.BatchNorm2d(10) self.conv2 = nn.Conv2d(10, 20, 3, 1, 1) self.norm2 = nn.BatchNorm2d(20) self.conv3 = nn.Conv2d(20, 40, 3, 1, 1) self.norm3 = nn.BatchNorm2d(40) self.pool = nn.MaxPool2d(2, 2) self.linear1 = nn.Linear(40 * 4 * 4, 100) self.norm4 = nn.BatchNorm1d(100) self.linear2 = nn.Linear(100, 10) self.dropout = nn.Dropout(0.2) def forward(self, x): x = self.pool(self.norm1(F.relu(self.conv1(x)))) x = self.pool(self.norm2(F.relu(self.conv2(x)))) x = self.pool(self.norm3(F.relu(self.conv3(x)))) x = x.view(-1, 40 * 4 * 4) x = self.dropout(x) x = self.norm4(F.relu(self.linear1(x))) x = self.dropout(x) x = F.log_softmax(self.linear2(x), dim=1) return x model = CNN().to("cuda") loss_function = nn.NLLLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) epochs = 100 # + train_losses, dev_losses, train_acc, dev_acc= [], [], [], [] x_axis = [] for e in range(1, epochs+1): losses = 0 acc = 0 iterations = 0 model.train() for data, target in train_loader: iterations += 1 pred = model(data.to("cuda")) loss = loss_function(pred, target.to("cuda")) optimizer.zero_grad() loss.backward() optimizer.step() losses += loss.item() p = torch.exp(pred) top_p, top_class = p.topk(1, dim=1) acc += accuracy_score(target.to("cpu"), top_class.to("cpu")) dev_losss = 0 dev_accs = 0 iter_2 = 0 if e%5 == 0 or e == 1: x_axis.append(e) with torch.no_grad(): model.eval() for data_dev, target_dev in dev_loader: iter_2 += 1 dev_pred = model(data_dev.to("cuda")) dev_loss = loss_function(dev_pred, target_dev.to("cuda")) dev_losss += dev_loss.item() dev_p = torch.exp(dev_pred) top_p, dev_top_class = dev_p.topk(1, dim=1) dev_accs += accuracy_score(target_dev.to("cpu"), dev_top_class.to("cpu")) train_losses.append(losses/iterations) dev_losses.append(dev_losss/iter_2) train_acc.append(acc/iterations) dev_acc.append(dev_accs/iter_2) print("Epoch: {}/{}.. ".format(e, epochs), "Training Loss: {:.3f}.. ".format(losses/iterations), "Validation Loss: {:.3f}.. ".format(dev_losss/iter_2), "Training Accuracy: {:.3f}.. ".format(acc/iterations), "Validation Accuracy: {:.3f}".format(dev_accs/iter_2)) # - plt.plot(x_axis,train_losses, label='Training loss') plt.plot(x_axis, dev_losses, label='Validation loss') plt.legend(frameon=False) plt.show() plt.plot(x_axis, train_acc, label="Training accuracy") plt.plot(x_axis, dev_acc, label="Validation accuracy") plt.legend(frameon=False) plt.show() model.eval() iter_3 = 0 acc_test = 0 for data_test, target_test in test_loader: iter_3 += 1 test_pred = model(data_test.to("cuda")) test_pred = torch.exp(test_pred) top_p, top_class_test = test_pred.topk(1, dim=1) acc_test += accuracy_score(target_test.to("cpu"), top_class_test.to("cpu")) print(acc_test/iter_3)
Activity03/Activity03.GPU.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # В этом блокноте я запускаю все эксперименты по сжатию модели (а результаты оцениваю в соседнем). # # Для запуска могут требоваться библиотеки, не указанные в requirements. # original_path = 'C:/Users/ddale/Downloads/NLP/rusvectores/model.model' exp_path = 'C:/Users/ddale/Downloads/NLP/compress_ft/' # + import gensim import numpy as np import gc import matplotlib.pyplot as plt # %matplotlib inline # + import os import sys def add_sys_path(p): p = os.path.abspath(p) if p not in sys.path: sys.path.append(p) # - add_sys_path('..') # + from importlib import reload import compress_fasttext reload(compress_fasttext) import compress_fasttext.utils reload(compress_fasttext.utils) from compress_fasttext.utils import getsize import compress_fasttext.prune reload(compress_fasttext.prune) from compress_fasttext.prune import prune_ngrams import compress_fasttext.navec_like reload(compress_fasttext.navec_like) import compress_fasttext.quantization reload(compress_fasttext.quantization) from compress_fasttext.quantization import quantize import compress_fasttext.compress reload(compress_fasttext.compress) from compress_fasttext.compress import make_new_fasttext_model, quantize_ft, svd_ft, prune_ft, prune_ft_freq # - from compress_fasttext.utils import mb import compress_fasttext.evaluation reload(compress_fasttext.evaluation) from compress_fasttext.evaluation import make_evaluator # ## Быстрый выигрыш: fp16 и удаление ненужных матриц big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(original_path) big_ft.adjust_vectors() print(mb(big_ft)) ev = make_evaluator(big_ft, '../data/toy_corpora/') print(ev(big_ft)) # Эксперимент первый: удаляем ненужную матрицу с векторами слов и сокращаем точность до полу-флоата. # # На значениях векторов это не сказывается *примерно никак*, а модель худеет вдвое. ft_fp16 = make_new_fasttext_model(big_ft, big_ft.vectors.astype(np.float16), big_ft.vectors_ngrams.astype(np.float16)) print(mb(ft_fp16)) original_size = mb(big_ft) fp16_size = mb(ft_fp16) fp16_sim = ev(ft_fp16) print(fp16_sim) ft_fp16.save(exp_path + 'ft_fp16.bin') ft_fp16_words = prune_ft(big_ft, new_vocab_size=len(big_ft.vocab), new_ngrams_size=1) ft_fp16_words.vectors_ngrams = ft_fp16_words.vectors_ngrams * 0 print(mb(ft_fp16_words)) print(ev(ft_fp16_words)) ft_fp16_words.save(exp_path + 'ft_fp16_words.bin') # + top_vocab, top_vectors = prune_vocab(ft, new_vocab_size=0) new_ngrams = big_ft.vectors_ngrams.astype(np.float16) ft_fp16_ngrams = make_new_fasttext_model( big_ft, new_vectors=top_vectors, new_vectors_ngrams=new_ngrams, new_vocab=top_vocab, ) print(mb(ft_fp16_ngrams)) print(ev(ft_fp16_ngrams)) ft_fp16_ngrams.save(exp_path + 'ft_fp16_ngrams.bin') # - # ## Сокращение размерности # По существу, тут используется старая добрая матричная декомпозиция (truncated SVD). # # Широкая матрица из дробных чисел приближенно представляется как произведение двух узких. # + dimred_sizes = [] dimred_sims = [] dimred_dims = [150, 100, 50, 25, 10, 5] for d in dimred_dims: model = svd_ft(big_ft, n_components=d) s = mb(model) q = vecs_similarity(big_ft, model) dimred_sizes.append(s) dimred_sims.append(q) print(d, s, q) model.save(exp_path + 'ft_dimred_{}.bin'.format(d)) gc.collect() # - gc.collect() plt.plot(dimred_sizes, dimred_sims); plt.xlabel('model size, mb'); plt.ylabel('old/new vector similarity'); # ## Квантизация # В базовом варианте квантизация - это следующий логичный шаг после перехода от float32 к float16. Теперь мы заменим float'ы в матрице эмбеддингов на int'ы! Но использоваться они будут только для хранения, а при применении эмбеддинга каждому int'у будет сопоставляться дробное число, взятое из специальной матрицы-справочника. Получить такой справочник можно с помощью кластеризации (типа k-means). # # Идея о кластеризации может навести на следующий шаг: а давайте сопоставим int'ы не отдельным числам-компонентам эмбеддинга, а их последовательностям. Например, если кодироваться будут последовательности из 3 чисел, то 300-мерный вектор можно будет приближенно описать 100 целочисленными кодами. Такой подход называется product quantization, и он позволяет подойти к задаче сокращения размерности с совершенно иной стороны, чем матричная декомпозиция. # # # ``` # 300 976.6052942276001 0.9934673549560236 # 150 518.0685205459595 0.9764971832428561 # 100 365.2229471206665 0.949259066389255 # 50 212.37737369537354 0.85697155924806 # 25 135.95458698272705 0.7258812834258547 # 10 90.10091495513916 0.5838264818986315 # 5 74.81635761260986 0.5129448006057942 # ``` # + pq_sizes = [] pq_sims = [] pq_dims = [300, 150, 100, 50, 25, 10, 5] for d in pq_dims: model = quantize_ft(big_ft, qdim=d, centroids=255, sample=10_000) s = mb(model) q = vecs_similarity(big_ft, model) pq_sizes.append(s) pq_sims.append(q) print(d, s, q) model.save(exp_path + 'ft_pq_{}.bin'.format(d)) gc.collect() # + plt.plot(dimred_sizes, dimred_sims) plt.plot(pq_sizes, pq_sims) plt.scatter([original_size], [1]) plt.scatter([fp16_size], [fp16_sim]) plt.legend(['svd', 'pq', 'original', 'fp16']) #plt.xscale('log') plt.xlabel('model size, mb') plt.ylabel('old/new vector similarity'); # - # ## Уменьшение словаря # Мы уже попробовали уменьшить матрицы fasttext-модели в ширину (с помощью декомпозиции) и "в глубину" (округляя их значения). Но ведь можно уменьшать матрицы и в длину, просто избавляясь от некоторых строк! Fasttext позволяет делать это относительно безнаказанно, ведь представление незнакомого слова можно построить по его n-граммам, а к самим n-граммам применен hashing trick. n_vecs = big_ft.vectors.shape[0] n_grams = big_ft.vectors_ngrams.shape[0] print(n_vecs, n_grams) # Честно говоря, тут творится какая-то самая магическая магия, потому что теперь качество модели и размер связаны совсем немонотонно - они зависят от числа коллизий, а оно зависит от размера хэшей... # При сжатии модели с коэффициентом 1.0 я получаю тот же самый размер модели, но совершенно ужасную точность. # # ПОЧЕМУ??? # + trim_sizes = [] trim_sims = [] trim_mults = [1.0, 0.5, 0.2, 0.1, 0.05, 0.02, 0.01, 0.005, 0.002, 0.001] for d in trim_mults: model = trim_ft(big_ft, new_ngrams_size=int(d*n_grams), new_vocab_size=int(d*n_vecs), fp16=True) s = mb(model) q = vecs_similarity(big_ft, model) trim_sizes.append(s) trim_sims.append(q) print('{:6.6} {:6.6} {:6.4}'.format(d, s, q)) model.save(exp_path + 'ft_trim_{}.bin'.format(d)) gc.collect() # - plt.plot(trim_sizes, trim_sims); plt.xlabel('model size, mb'); plt.ylabel('old/new vector similarity'); # # Отбор n-грам # + big_n = big_ft.vectors_ngrams.shape[0] small_n = 10_000 new_to_old_buckets, old_hash_count = compress_fasttext.prune.count_buckets( big_ft, list(big_ft.vocab.keys()) , small_n ) print(big_n) print(len(old_hash_count)) # + trimf_sizes = [] trimf_sims = [] trimf_mults = [1.0, 0.5, 0.2, 0.1, 0.05, 0.02, 0.01, 0.005, 0.002, 0.001] for d in trimf_mults: model = prune_ft_freq(big_ft, new_ngrams_size=int(d*len(old_hash_count)), new_vocab_size=int(d*n_vecs), fp16=True, pq=False) s = mb(model) q = ev(model) trimf_sizes.append(s) trimf_sims.append(q) print('{:6.6} {:6.6} {:6.4}'.format(d, s, q)) model.save(exp_path + 'ft_prune_freq_{}.bin'.format(d)) gc.collect() # - # # Разбираемся в странностях сжатия # # Одной странностью была бага, которую я посадил при пересортировке словаря. # # Другая странность - что однобуквенные слова в словарь обученной модели не входят, и оттого совершенно теряются. # ft = big_ft gc.collect() model = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_trim_{}.bin'.format(1.0)) from compress_fasttext.evaluation import cosine, vocabulary_from_files tv = vocabulary_from_files('../data/toy_corpora/') len(tv) lost = set() for w in tv: c = cosine(ft[w], model[w]) if c < 0.95 and w not in lost: lost.add(w) print('{:20} {:+4.4} '.format(w, c)) # Из 2 миллионов строк используются, кажется, только 330 тысяч. Вот это расточительность! # + big_n = big_ft.vectors_ngrams.shape[0] small_n = 10_000 new_to_old_buckets, old_hash_count = compress_fasttext.prune.count_buckets( big_ft, list(big_ft.vocab.keys()) + ['и', 'а', 'у', 'в'], small_n ) print(big_n) print(len(old_hash_count)) # - import pandas as pd cvs = pd.Series(list(old_hash_count.values())).sort_values(ascending=False) cvs.quantile([0.5, 0.75, 0.9, 0.95, 0.99]) plt.plot(cvs.values.cumsum()); # # Гибридная моделька ft_prune_01 = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_trim_{}.bin'.format(0.1)) print(mb(ft_prune_01)) print(vecs_similarity(big_ft, ft_prune_01)) ft_prune_01_pq_100 = quantize_ft(ft_prune_01, qdim=100, centroids=255, sample=None) print(mb(ft_prune_01_pq_100)) print(vecs_similarity(big_ft, ft_prune_01_pq_100)) ft_prune_01_pq_100.save(exp_path + 'ft_prune_0.1_pq_100.bin') # + ft_prune_05 = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_trim_{}.bin'.format(0.5)) print(mb(ft_prune_05)) print(vecs_similarity(big_ft, ft_prune_05)) ft_prune_05_pq_100 = quantize_ft(ft_prune_05, qdim=100, centroids=255, sample=None) print(mb(ft_prune_05_pq_100)) print(vecs_similarity(big_ft, ft_prune_05_pq_100)) # - ft_prune_05_pq_100.save(exp_path + 'ft_prune_0.5_pq_100.bin') ftc2 = prune_ft_freq(big_ft, new_vocab_size=20_000, new_ngrams_size=100_000, pq=True, qdim=100) print(mb(ftc2)) print(ev(ftc2)) ftc2.save(exp_path + 'ft_freqprune_100K_20K_pq_100.bin') # ### Подбор параметров # Делаем целый парад гибридных моделек (это можно было бы на самом деле заметно ускорить, копируя словарики, но чё-то мне лень). # # 4 * 3 * 4 = 48 моделек придётся сейчас положить мне на диск. Но ладно. from compress_fasttext.prune import prune_ngrams, prune_vocab, count_buckets, RowSparseMatrix def prune_ft_freq(ft, new_vocab_size=20_000, new_ngrams_size=100_000, fp16=True, pq=True, qdim=100, centroids=255): new_to_old_buckets, old_hash_count = count_buckets(ft, list(ft.vocab.keys()), new_ngrams_size=new_ngrams_size) id_and_count = sorted(old_hash_count.items(), key=lambda x: x[1], reverse=True) ids = [x[0] for x in id_and_count[:new_ngrams_size]] top_ngram_vecs = ft.vectors_ngrams[ids] if pq and len(top_ngram_vecs) > 0: top_ngram_vecs = quantize(top_ngram_vecs, qdim=qdim, centroids=centroids) elif fp16: top_ngram_vecs = top_ngram_vecs.astype(np.float16) rsm = RowSparseMatrix.from_small(ids, top_ngram_vecs, nrows=ft.vectors_ngrams.shape[0]) top_voc, top_vec = prune_vocab(ft, new_vocab_size=new_vocab_size) if pq and len(top_vec) > 0: top_vec = quantize(top_vec, qdim=qdim, centroids=centroids) elif fp16: top_vec = top_vec.astype(np.float16) return make_new_fasttext_model(ft, top_vec, rsm, new_vocab=top_voc) for qdim in [300, 100, 50]: for new_ngrams_size in [400_000, 100_000, 50_000]: for new_vocab_size in [100_000, 20_000, 5_000, 0]: mn = 'ft_freqprune_{}K_{}K_pq_{}.bin'.format(int(new_ngrams_size/1000), int(new_vocab_size/1000), qdim) print(mn) if os.path.exists(exp_path + mn): continue ftc = prune_ft_freq(big_ft, new_vocab_size=new_vocab_size, new_ngrams_size=new_ngrams_size, pq=True, qdim=qdim) ftc.save(exp_path + mn) # ## считываем результаты с диска и сравниваем # # Перенёс этот раздел в evaluate_all.ipynb ft_prune_01_pq_100 = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_prune_0.1_pq_100.bin') ft_freqprune_100K_20K_pq_100 = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_freqprune_100K_20K_pq_100.bin') format3 = '{:6.6} {:6.6} {:6.4}' # + dimred_sizes = [] dimred_sims = [] dimred_dims = [150, 100, 50, 25, 10, 5] for d in dimred_dims: model = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_dimred_{}.bin'.format(d)) s = mb(model) q = ev(model) dimred_sizes.append(s) dimred_sims.append(q) print(format3.format(d*1.0, s, q)) gc.collect() # - 300 976.6052942276001 0.9934673549560236 150 518.0685205459595 0.9764971832428561 100 365.2229471206665 0.949259066389255 50 212.37737369537354 0.85697155924806 25 135.95458698272705 0.7258812834258547 10 90.10091495513916 0.5838264818986315 5 74.81635761260986 0.5129448006057942 # + pq_sizes = [] pq_sims = [] pq_dims = [300, 150, 100, 50, 25, 10, 5] for d in pq_dims: model = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_pq_{}.bin'.format(d)) s = mb(model) q = ev(model) pq_sizes.append(s) pq_sims.append(q) print(format3.format(d*1.0, s, q)) gc.collect() # - # ``` # исходно было вообще что-то бешеное # # 1.0 1280.6667785644531 0.7343834324890214 # 0.5 640.8182830810547 0.7475458227979501 # 0.2 257.0719566345215 0.7804176247791937 # 0.1 128.47385025024414 0.7923524743459548 # 0.05 64.23728561401367 0.8217848257624584 # 0.02 25.72348403930664 0.7655165270039631 # 0.01 12.862011909484863 0.6882214727696405 # 0.005 6.431332588195801 0.6139219773191233 # 0.002 2.5676469802856445 0.5112279759786497 # 0.001 1.2841386795043945 0.4415964335883225 # ``` # + trim_sizes = [] trim_sims = [] trim_mults = [1.0, 0.5, 0.2, 0.1, 0.05, 0.02, 0.01, 0.005, 0.002, 0.001] for d in trim_mults: model = gensim.models.fasttext.FastTextKeyedVectors.load(exp_path + 'ft_trim_{}.bin'.format(d)) s = mb(model) q = ev(model) trim_sizes.append(s) trim_sims.append(q) print(format3.format(d, s, q)) gc.collect() # + plt.figure(figsize=(8,4)) plt.plot(dimred_sizes, dimred_sims) plt.plot(pq_sizes, pq_sims) plt.plot(trim_sizes, trim_sims) plt.plot(trimf_sizes, trimf_sims) plt.scatter([original_size], [1]) plt.scatter([fp16_size], [fp16_sim]) # plt.scatter([mb(ft_prune_01_pq_100),], [ev(ft_prune_01_pq_100),]) plt.scatter( [mb(ft_freqprune_100K_20K_pq_100)], [ev(ft_freqprune_100K_20K_pq_100)] ) plt.legend(['svd', 'pq', 'prune', 'prune freq', 'original', 'fp16', 'prune freq + pq'], loc='lower right') plt.xscale('log') plt.xlabel('model size, mb') plt.ylabel('cosine similarity') plt.title('Representation accuracy vs model compression');
experiments/compressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from ctapipe import utils from matplotlib import pyplot as plt import numpy as np from pkg_resources import resource_filename from ctapipe.visualization import CameraDisplay import os # %matplotlib inline # <h3> Import reader module from ctapipe.io import EventSource # <h3> loop and print one event # + # you can use wildchards in the name (change the file name to test) example_file_path = '/home/foffano/cta/lst/LST-1.Run0030.0000.fits.fz' inputfile_reader = EventSource( input_url=example_file_path, max_events=10 ) print("--> NUMBER OF FILES", inputfile_reader.multi_file.num_inputs()) # - # check how many modules are on for i, event in enumerate(inputfile_reader): all_modules_on=np.sum(event.lst.tel[0].evt.module_status) if(all_modules_on > 1): print(f"Event {event.lst.tel[0].evt.event_id}, num modules on: {all_modules_on}") # <h3> Event containers event # <h3> Instrument container subarray=event.inst.subarray event.inst.subarray.info() event.inst.subarray subarray.peek() plt.xlim(-100,100) plt.ylim(-100,100) subarray.to_table() tel=event.inst.subarray.tels[0] # contains camera and optics print(tel.camera) print(tel.optics) tel.camera tel.camera.pix_y tel.camera.to_table() tel.optics tel.optics.mirror_area disp = CameraDisplay(tel.camera) # <h3>LST container # event.lst event.lst.tel[0] event.lst.tel[0].svc event.lst.tel[0].evt # <h3> Module status event.lst.tel[0].svc.module_ids print(event.lst.tel[0].evt.module_status.shape) print(event.lst.tel[0].evt.event_id) print(event.lst.tel[0].evt.module_status) plt.plot(event.lst.tel[0].evt.module_status) np.sum(event.lst.tel[0].evt.module_status) mod=event.lst.tel[0].evt.module_status no_working_mod_rank=np.where(mod == 0)[0] print(f"No working module ranks: \n {no_working_mod_rank}") module_ids=event.lst.tel[0].svc.module_ids no_working_mod=module_ids[no_working_mod_rank] print(f"No working modules \n {no_working_mod}") # <h3> Event status plt.plot(event.lst.tel[0].evt.pixel_status) no_working_pixel_rank=np.where(event.lst.tel[0].evt.pixel_status==0)[0] print(f"No working pixel ranks: \n {no_working_pixel_rank}") pixel_ids=event.lst.tel[0].svc.pixel_ids no_working_pixels=pixel_ids[no_working_pixel_rank] print(f"No working pixels: \n {no_working_pixels}") # <H3> R0 event container event.r0 event.r0.tel[0] # + disp = CameraDisplay(tel.camera) disp.image = event.r0.tel[0].waveform[0,:,20] # display channel 0, sample 10 disp.cmap = plt.cm.coolwarm disp.add_colorbar() disp.set_limits_minmax(0, 455) #plt.xlim(-0.1,0.1) #plt.ylim(-0.1,0.1) # - event.r0.event_id # <H3> Select the high gain waveform of the pixel at the place 12 in the array and plot it event.r0.tel[0].waveform.shape HG=0 LG=1 waveform_HG=event.r0.tel[0].waveform[HG,] # all HG waveforms waveform_HG.shape # + plt.plot(event.r0.tel[0].waveform[HG,:,20]) #plot sample 20 of HG waveforms # + from ipywidgets import interact @interact def view_waveform(chan=0, pix_rank=200): plt.plot(event.r0.tel[0].waveform[chan,pix_rank]) # - for i, mywave in enumerate(waveform_HG) : if mywave.sum() == 0 : print(i, mywave) #print("--> module, pixel ", event.lst.tel[0].svc.module_ids[int(i/7)], ", ", i, "\n waveform ", mywave, " \n" ) print("--> module ", event.lst.tel[0].svc.module_ids[int((i+1)/7)],", pixel id ", event.lst.tel[0].svc.pixel_ids[i]," ",event.lst.tel[0].evt.pixel_status[i], " \n" ) plt.plot(mywave) waveform_LG=event.r0.tel[0].waveform[LG,] waveform_LG.shape plt.plot(event.r0.tel[0].waveform[LG,:]) # + import glob ls = glob.glob("/home/foffano/cta/lst/LST-1.Run0030.*.fits.fz") # - # ls
notebooks/read_LST1_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Assignment 4b: Converting XML to CSV # # ### Due: Tuesday the 4th of December 2018 at 20:00 # # - Please name your notebook with the following naming convention: # ASSIGNMENT_4b_FIRSTNAME_LASTNAME.ipynb # - Please submit your complete assignment (4a + 4b) by compressing all your material (notebooks + python files + additional files) into **a single .zip file** following this naming convention: ASSIGNMENT_4_FIRSTNAME_LASTNAME.zip. # Use [this google form](https://goo.gl/forms/Guqm1p448xMgXo0E3) for submission. # - If you have **questions** about this assignment, please refer to the **forum on the Canvas site**. # # In this second part of Assignment 4, you will be asked to read a type of XML and to convert it to a type of CSV. # ## Introduction # # [NewsReader](http://www.newsreader-project.eu/) needs your help! # Within the project, a English NLP pipeline has been developed and they would like to know how well it performs. # However, in order to run the scorer, they must convert their output format [NAF](http://www.newsreader-project.eu/files/2013/01/techreport.pdf) to CoNLL. In this assignment, you will write the converter! # # The NLP task they've chosen is [Entity Linking](https://en.wikipedia.org/wiki/Entity_linking). The goal of Entity Linking is to link an expression to the identity of an entity. For example, in the sentence *<NAME> cars*, the goal of Entity Linking would be to link the expression *Ford* to the [Wikipedia page of Ford Motor Company](https://en.wikipedia.org/wiki/Ford_Motor_Company). This is a challenging task, since *Ford* has many meanings; for example, it can also mean the actor [<NAME>](https://en.wikipedia.org/wiki/Harrison_Ford). # # The output from the NewsReader pipeline is not a text file, nor CSV/TSV. No, it's a type of XML. Instead of going through a file line by line, you search for specific elements or attributes of elements. from lxml import etree # ## NE (Named Entities) # Please observe the following element. Try to understand which elements are children/parents from which elements. # # ```xml # <entity id="e3" type="PERSON"> # <references> # <!--<NAME>--> # <span> # <target id="t42" /> # <target id="t43" /> # </span> # </references> # <externalReferences> # <externalRef resource="spotlight_v1" reference="http://dbpedia.org/resource/Craig_Wood_(golfer)" confidence="1.0" reftype="en" /> # <externalRef resource="spotlight_v1" reference="http://dbpedia.org/resource/Craig_Wood_(NASCAR)" confidence="9.0210754E-36" reftype="en" /> # <externalRef resource="spotlight_v1" reference="http://dbpedia.org/resource/Craig_Wood_(film_editor)" confidence="1.739528E-36" reftype="en" /> # </externalReferences> # </entity> # # ``` # # Above, you see an example of the Newsreader output in NAF. # * the `entity` element is the main element. # * the `entity` element contains information about its id and the entity type (attributes `id`, and `type`, respectively.) # * the first child of the entity element is the `references` element. This element provides us with the information that the entity is '<NAME>' and that the term 'Craig' is the 42nd term in the document and 'Wood' the 43rd. # * the second child of the entity element is the `externalReferences` element. This shows the output from the system 'spotlight_v1', which tries to link the entity '<NAME>' to Dbpedia (structured Wikipedia). The system has a `confidence` of 1.0 (the highest possible value) that the entity refers to http://dbpedia.org/resource/Craig_Wood_(golfer). # # # Our goal is to extract the following information from this element: # * entity type: 'PERSON', 'ORGANISATION' or 'LOCATION'. This can be found in the attribute `type` of element `entity`. # * the dbpedia link with the highest confidence (see `externalReferences/externalRef`). # * finally, we want to know for each term (t_42 and t_43), which position they have in the entity. t_42 ('Craig') is the first term in the entity, and t_43 ('Wood') is the last term in the entity. # We want to convert this entity element to a format called CoNLL. Using the entity element as input, it should output the following: # # ... # ``` # 41 from _ _ # 42 Craig (PERSON http://dbpedia.org/resource/Craig_Wood_(golfer) # 43 Wood PERSON) http://dbpedia.org/resource/Craig_Wood_(golfer) # 44 's _ _ # ``` # ... # # Note that it also includes the tokens that are not annotated as an entity. # ## Goal of this assignment # The goal of this assignment is to complete the code snippet below, which means that you will convert one NAF file to CoNLL. The assignment can be roughly divided into the following steps: # * **Step 1:** complete the helper functions below # * **Step 2:** call the helper functions in the for-loop # * **Step 3:** move the code to python files (`converter.py` and `utils.py`) # This is the code snippet that you will have to complete and run: # + doc = etree.parse('../Data/xml_data/naf.xml') t_id2info = dict() entity_els = doc.findall('entities/entity') for entity_el in entity_els: # determine entity type (default is _) entity_type = type_of_entity(entity_el) # extract dbpedia link with highest confidence (default is _) chosen_dbpedia_link = dbpedia_link_with_highest_confidence(entity_el) # determine the position of t_ids in the entity t_ids_positions = t_ids_with_position(entity_el) #loop over t_ids and their positions for t_id, position in t_ids_positions: #get position of t_id #HINT: use the indicate_position_of_tid function entity_type_with_position = indicate_position_of_tid(entity_type, position) #update dictionary t_id2info[t_id] = {'entity_type_with_position': entity_type_with_position, 'dbpedia_link': chosen_dbpedia_link} print(t_id, t_id2info[t_id]) input('continue?') # only here for debugging # the input here allows you to inspect the output one entity element at a time # load the mapping of term identifier to lemmas tid2lemma = load_mapping_tid2token(doc) # use the information from t_id2info and tid2lemma to create the conll # T_ID TAB LEMMA TAB ENTITY_TYPE_WITH_POSITION TAB CHOSEN_DBPEDIA_LINK NEWLINE # HINT if a t_id does not have annotation both ENTITY_TYPE_WITH_POSITION and CHOSEN_DBPEDIA_LINK are '_' with open('../Data/xml_data/naf.conll', 'w') as outfile: for t_id, lemma in sorted(tid2lemma.items()): # your code here # - # We will first load one entity element as XML in order to help us develop our program that will run on many entity elements. # + #load the element as XML element. entity = ''' <entity id="e3" type="PERSON"> <references> <!--<NAME>--> <span> <target id="t42" /> <target id="t43" /> </span> </references> <externalReferences> <externalRef resource="spotlight_v1" reference="http://dbpedia.org/resource/Craig_Wood_(golfer)" confidence="1.0" reftype="en" /> <externalRef resource="spotlight_v1" reference="http://dbpedia.org/resource/Craig_Wood_(NASCAR)" confidence="9.0210754E-36" reftype="en" /> <externalRef resource="spotlight_v1" reference="http://dbpedia.org/resource/Craig_Wood_(film_editor)" confidence="1.739528E-36" reftype="en" /> </externalReferences> </entity>''' entity_el = etree.fromstring(entity) # - # ### Step 1: Help functions # In order extract all the relevant information from the entity element, we are going to write a lot of small functions to help us. # #### 1a. Get the entity type # Create a function `type_of_entity()` that takes one parameter: `entity_el` (positional parameter). It should return the `entity_type` of the entity element (access the value of the attribute `type`). If the value is an empty string, or the attribute does not exist, return the string `'_'`. # + def type_of_entity(entity_el): ''' given an entity element, return the entity type ''' # your code here entity_type = type_of_entity(entity_el) print(entity_type) # - # #### 1b. Get the DBpedia link with the highest confidence # Create a function `dbpedia_link_with_highest_confidence()` that takes one parameter: `entity_el` (positional parameter). It should return the dbpedia link with the highest confidence, or return the string `'_'` if there are no dbpedia links in the `externalReferences` element. # # To do this, create a list of tuples with dbpedia links with their corresponding confidences: # ``` # [(1.0, 'http://dbpedia.org/resource/Craig_Wood_(golfer)'), # (9.0210754E-36, 'http://dbpedia.org/resource/Craig_Wood_(NASCAR)'), # (1.739528E-36, 'http://dbpedia.org/resource/Craig_Wood_(film_editor)')] # ``` # # **HINT:** do not forget to change the confidence to float (it's now a string). # + def dbpedia_link_with_highest_confidence(entity_el): ''' given an entity element, return the dbpedia link with the highest confidence ''' # your code here result = dbpedia_link_with_highest_confidence(entity_el) print(result) # - # #### 1c. Find the positions of terms in entity # Create a function called `t_ids_with_position()` that takes one parameter: `entity_el` (positional parameter). It should loop over the `/references/span/target` elements and return a list of tuples (`term id`, `position_in_entity`). # Possible values for `position_in_entity` are: `"start"`, `"middle"`, `"end"`, `"start_and_end"` # # Example of output for the example `entity_el`: # ``` # [(42, 'start'), (43, 'end')] # ``` # # **HINT:** return an empty list if there are no target elements. # + def t_ids_with_position(entity_el): ''' given an entity element, return the position of each term id in that entity ''' term_positions = [] # find all 'span/target elements' elements and determine the number of children target_els = entity_el.findall('references/span/target') len_target_els = len(target_els) #if there is only one element, the position is 'start_and_end' if len_target_els == 1: # your code here #if there are 0 children or two or more children, loop over the target elements. else: #HINT: use enumerate #HINT: use 'len_target_els' to check if it's the last element # your code here return term_positions t_ids_with_position(entity_el) # - # #### 1d. Get the entity_type with the position # # Create a function `indicate_position_of_tid()` that returns the entity_type with parantheses that indicate the position of a term id. # # For example, if the entity type is 'ORG', the function should work in the following way: # ``` # -position = 'start': '(ORG' # -position = 'middle': 'ORG' # -position = 'end': 'ORG)' # -position = 'start_and_end': '(ORG)' # ``` # # If there is no entity type (`entity_type == '_'`), return the string `'_'`. # + #### return entity type with parentheses indicating position def indicate_position_of_tid(entity_type, position): ''' this function returns the entity_type with the position. for example, if the entity type is 'ORG', the function should work in the following way -position = 'start': '(ORG' -position = 'middle': 'ORG' -position = 'end': 'ORG)' -position = 'start_and_end': '(ORG)' if entity_type == '_': return '_' ''' # your code here return result # - # #### 1e. Mapping the t_ids to their corresponding lemmas # # Create a function `load_mapping_tid2token()` that takes one parameter: `doc` (positional parameter), which represents a loaded XML file of type `lxml.etree._ElementTree`. It should return a dictionary mapping all t_ids to their corresponding lemmas. # # For example, for this element: # ``` # <term id="t1" type="open" lemma="accord" pos="V" morphofeat="VBG"> # <span> # <target id="w1" /> # </span> # <externalReferences> # <externalRef resource="wn30g.bin64" reference="ili-30-02700104-v" confidence="0.732195" /> # <externalRef resource="wn30g.bin64" reference="ili-30-02255268-v" confidence="0.267805" /> # <externalRef resource="WordNet-3.0" reference="ili-30-02700104-v" confidence="0.59329313" /> # <externalRef resource="WordNet-3.0" reference="ili-30-02255268-v" confidence="0.40670687" /> # </externalReferences> # </term> # ``` # the dictionary would be update with the: # a) KEY: 1 (integer) # b) VALUE: 'accord' # + def load_mapping_tid2token(doc): """ given a loaded xml file (doc) of type lxml.etree._ElementTree create a dictionary mapping all t_ids to their corresponding lemmas """ # your code here tid2lemma = load_mapping_tid2token(doc) # - # ### Step 2: Calling the helper functions # Now call the helper functions in the for-loop at the top of this notebook. Complete this code by writing the output to a CoNLL file. # # # ### Step 3: Moving the code to Python files # Create two Python files: # - `utils.py`: should contain all your helper functions # - `converter.py`: should contain the main code (and import the helper functions)
Assignments/ASSIGNMENT-4b.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="QctwEeqJ8XmR" outputId="0c0a5144-3d9d-448f-fe73-8381babe8e40" # !pip install numpy==1.16.1 # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="RxEFd5r07aIw" outputId="da0353d8-b510-4ea3-cf04-6abdbec3b41f" import numpy as np from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt import keras from keras.models import Model, Sequential from keras.layers import Dense, GlobalAveragePooling2D, Input, Embedding, Bidirectional, LSTM, Flatten, concatenate, Dropout, Conv1D, MaxPool1D, BatchNormalization, LeakyReLU, GRU, UpSampling1D, BatchNormalization from keras import optimizers from keras.callbacks import LearningRateScheduler, ModelCheckpoint from keras import regularizers from keras.losses import mean_squared_error import tensorflow as tf # + [markdown] colab_type="text" id="RXWJTHD-5Nw_" # # Loading dataset # + colab={} colab_type="code" id="MbJaJXzD8CLL" matches_X = np.load("matches_X_final.npy") players_X = np.load("players_X_final.npy") #Y = np.load("Y_final.npy") # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="etByhqmG8Qcm" outputId="51655c3c-b9a3-42ba-b944-d9b4f9909ce0" print(matches_X.shape) print(players_X.shape) #print(Y.shape) # + colab={} colab_type="code" id="E-XAUmCCFX4Z" matches_X_new = np.reshape(matches_X, (matches_X.shape[0], matches_X.shape[1]*matches_X.shape[2])) players_X_new = np.reshape(players_X, (players_X.shape[0], players_X.shape[1]*players_X.shape[2])) # + colab={} colab_type="code" id="rP-2uFYAenbx" matches_X_new_1 = np.reshape(matches_X, (matches_X.shape[0], matches_X.shape[1]*matches_X.shape[2], 1)) players_X_new_1 = np.reshape(players_X, (players_X.shape[0], players_X.shape[1]*players_X.shape[2], 1)) # + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="feqQpXXVFsWn" outputId="607b1299-9b37-462c-a319-6fcbf8c1a93c" print(matches_X_new.shape) print(players_X_new.shape) print(matches_X_new_1.shape) print(players_X_new_1.shape) # + colab={} colab_type="code" id="ud1qRf4y_hpk" matches_X_train, matches_X_val, players_X_train, players_X_val = train_test_split(matches_X_new, players_X_new, shuffle=True, test_size=0.1) # + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="98OSmqPt_0u-" outputId="27362a87-97d2-476d-e8cb-016db8733505" print(matches_X_train.shape) print(players_X_train.shape) #print(Y_train.shape) # + [markdown] colab_type="text" id="uHNeOOkU2dko" # # Original Model # + colab={} colab_type="code" id="njCVAnfs9Hfr" input_1 = Input(shape=(124, 1)) input_2 = Input(shape=(360, 1)) match = Conv1D(216, 3, activation='relu')(input_1) match = Dropout(0.5)(match) match = Flatten()(match) match = Dense(216, activation='relu', kernel_initializer='normal')(match) match = Dropout(0.5)(match) match = Dense(128, activation='relu')(match) match = Dropout(0.5)(match) player = Conv1D(216, 3, activation='relu')(input_2) player = Dropout(0.5)(player) player = Flatten()(player) player = Dense(216, activation='relu', kernel_initializer='normal')(player) player = Dropout(0.5)(player) player = Dense(128, activation='relu')(player) player = Dropout(0.5)(player) concat = concatenate([match, player]) concat = Dense(128, activation='relu')(concat) concat = Dropout(0.5)(concat) out = Dense(297, activation='relu')(concat) model = Model(inputs=[input_1, input_2], outputs=[out]) model.compile(optimizer='adam', loss='mse', metrics=[tf.keras.metrics.RootMeanSquaredError()]) # + colab={} colab_type="code" id="ZH-kFZ4pEvIx" model.summary() # + colab={} colab_type="code" id="0eK-aQZaEypX" history = model.fit(x=[matches_X_new_1, players_X_new_1], y=Y, validation_split=0.1, batch_size=10, epochs=100) # history = model.fit(x=[matches_X_train, players_X_train],y=Y_train, batch_size= 10,epochs=20) # + [markdown] colab_type="text" id="ilpQ2nsb2jre" # # Autoencoder Model # Not being used # + colab={} colab_type="code" id="cg-y01w_UE8k" input_m = Input(shape=(124,1)) x = Conv1D(216, 3, activation='relu', padding='same')(input_m) x = Dropout(0.5)(x) x = MaxPool1D(2, padding='same')(x) x = Conv1D(128, 3, activation='relu', padding='same')(x) x = Dropout(0.5)(x) x = MaxPool1D(2, padding='same')(x) x = Conv1D(64, 3, activation='relu', padding='same')(x) encoded = MaxPool1D(2, padding='same')(x) x = Conv1D(64, 3, activation='relu', padding='same')(encoded) x = UpSampling1D(2)(x) x = Conv1D(128, 2, activation='relu')(x) x = Dropout(0.5)(x) x = UpSampling1D(2)(x) x = Conv1D(216, 3, activation='relu', padding='same')(x) x = Dropout(0.5)(x) x = UpSampling1D(2)(x) decoded = Conv1D(1, 3, activation='relu',padding='same')(x) autoencoder_matches = Model(input_m, decoded) autoencoder_matches.compile(optimizer='adam', loss='mse',metrics=[tf.keras.metrics.RootMeanSquaredError()]) # + colab={"base_uri": "https://localhost:8080/", "height": 765} colab_type="code" id="JjgkKbf0wnKP" outputId="f04f65f5-7f8d-4d58-a79b-1c6c952223a1" autoencoder_matches.summary() # + colab={} colab_type="code" id="InjX9eiEuwJV" input_m = Input(shape=(360,1)) x = Conv1D(216, 3, activation='relu', padding='same')(input_m) x = Dropout(0.5)(x) x = MaxPool1D(2, padding='same')(x) x = Conv1D(128, 3, activation='relu', padding='same')(x) x = Dropout(0.5)(x) x = MaxPool1D(2, padding='same')(x) x = Conv1D(64, 3, activation='relu', padding='same')(x) encoded = MaxPool1D(2, padding='same')(x) x = Conv1D(64, 3, activation='relu', padding='same')(encoded) x = UpSampling1D(2)(x) x = Conv1D(128, 3, activation='relu', padding='same')(x) x = Dropout(0.5)(x) x = UpSampling1D(2)(x) x = Conv1D(216, 3, activation='relu', padding='same')(x) x = Dropout(0.5)(x) x = UpSampling1D(2)(x) decoded = Conv1D(1, 3, activation='relu', padding='same')(x) autoencoder_player = Model(input_m, decoded) autoencoder_player.compile(optimizer='adam', loss='mse',metrics=[tf.keras.metrics.RootMeanSquaredError()]) # + colab={"base_uri": "https://localhost:8080/", "height": 765} colab_type="code" id="x883lhZG3v8L" outputId="050ff330-63a6-43df-8430-66e5b56f5f07" autoencoder_player.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="8eVhDYFgV7DD" outputId="c3817a95-2b30-4448-e4ad-895f25e4105d" history = autoencoder_matches.fit(x=matches_X_train, y=matches_X_train, validation_data=(matches_X_val,matches_X_val), batch_size=10, epochs=100) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="tfRX0hUt8kA1" outputId="9fdc0533-aa7b-419d-ba2d-267ecfd87a7c" history_players = autoencoder_player.fit(x=players_X_train, y=players_X_train, validation_data=(players_X_val,players_X_val), batch_size=10, epochs=100) # + colab={} colab_type="code" id="r2CcXcus4eTd" autoencoder_matches.save("autoencoder_matches.h5") autoencoder_player.save("autoencoder_player.h5") # + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="lgqPDgzwKRQ8" outputId="6c080bb7-974b-4c7d-8d80-85fffa8408fa" plt.plot(history.history['val_root_mean_squared_error']) plt.title('team model RMSE') plt.ylabel('RMSE') plt.xlabel('epoch') plt.legend(['val'], loc='upper left') plt.show() # + colab={} colab_type="code" id="uMpTWlDSUE1F" plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'val'], loc='upper left') plt.show() # + colab={} colab_type="code" id="qjn5MJghZhr6" pred = model.predict(x=[matches_X_val, players_X_val]) # + [markdown] colab_type="text" id="1xhU4gdQX6YF" # # Deep Autoencoder # Current model # + colab={"base_uri": "https://localhost:8080/", "height": 391} colab_type="code" id="FSzamE2yA_F2" outputId="d500c30b-4256-4b94-a5a5-09721c4771ee" input_m = Input(shape=(46,)) encoded = Dense(128, activation='relu')(input_m) encoded = Dropout(0.3)(encoded) encoded = Dense(64, activation='relu')(encoded) encoded = Dense(32, activation='relu')(encoded) decoded = Dense(64, activation='relu')(encoded) decoded = Dropout(0.3)(decoded) decoded = Dense(128, activation='relu')(decoded) decoded = Dense(46, activation='relu')(decoded) autoencoder_matches = Model(input_m, decoded) autoencoder_matches.compile(optimizer='adam', loss='mse',metrics=[tf.keras.metrics.RootMeanSquaredError()]) autoencoder_matches.summary() # + colab={"base_uri": "https://localhost:8080/", "height": 527} colab_type="code" id="YSOcQkj5ZRVv" outputId="9d11bd40-bec5-4a74-a034-a27976cb1505" input_m = Input(shape=(360,)) encoded = Dense(128, activation='relu')(input_m) encoded = Dropout(0.3)(encoded) encoded = Dense(64, activation='relu')(encoded) encoded = Dropout(0.3)(encoded) encoded = Dense(32, activation='relu')(encoded) encoded = Dense(16, activation='relu')(encoded) decoded = Dense(32, activation='relu')(encoded) decoded = Dropout(0.3)(decoded) decoded = Dense(64, activation='relu')(decoded) decoded = Dense(128, activation='relu')(decoded) decoded = Dropout(0.3)(decoded) decoded = Dense(360, activation='relu')(decoded) autoencoder_players = Model(input_m, decoded) autoencoder_players.compile(optimizer='adam', loss='mse',metrics=[tf.keras.metrics.RootMeanSquaredError()]) autoencoder_players.summary() # + colab={} colab_type="code" id="mT_CY68gfc-E" # + colab={} colab_type="code" id="cdvBg5Vetzdy" import random noisy_match_x = np.zeros((141,46)) for i in range(141): for j in range(46): if j % 100 == 0: noisy_match_x[i][j] = matches_X_train[i][j] + random.random() else: noisy_match_x[i][j] = matches_X_train[i][j] noisy_player_x = np.zeros((141,360)) for i in range(141): for j in range(360): if j % 300 == 0: noisy_player_x[i][j] = players_X_train[i][j] + random.random() else: noisy_player_x[i][j] = players_X_train[i][j] # + colab={} colab_type="code" id="5uhJEMevf-VH" # + colab={} colab_type="code" id="th7yObOAumU8" noisy_val_match_x = np.zeros((16,46)) for i in range(16): for j in range(46): if j % 100 == 0: noisy_val_match_x[i][j] = matches_X_val[i][j] + random.random() else: noisy_val_match_x[i][j] = matches_X_val[i][j] noisy_val_player_x = np.zeros((16,360)) for i in range(16): for j in range(360): if j % 300 == 0: noisy_val_player_x[i][j] = players_X_val[i][j] + random.random() else: noisy_val_player_x[i][j] = players_X_val[i][j] # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="Sgqp6XI6Y3VI" outputId="2647fd36-3542-49e8-b6bd-7763c27e0aeb" history = autoencoder_matches.fit(x=noisy_match_x, y=matches_X_train, validation_data=(noisy_val_match_x,matches_X_val), batch_size=5, epochs=100) # + colab={"base_uri": "https://localhost:8080/", "height": 282} colab_type="code" id="WLD14m8zxC-_" outputId="daa9aead-12b6-4bb6-df66-5916f8d25554" plt.plot(history.history["val_root_mean_squared_error"]) plt.plot(history.history["root_mean_squared_error"]) # + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="f1ZJt0MWZEnL" outputId="363e61d9-a094-4509-b17c-8f16f8e9d3a1" history_players = autoencoder_players.fit(x=noisy_player_x, y=players_X_train, validation_data=(noisy_val_player_x,players_X_val), batch_size=5, epochs=100) # + colab={} colab_type="code" id="ilcF8BeuZfp5" autoencoder_matches.save("auto_matches_dense.h5") autoencoder_players.save("auto_players_dense.h5") # + [markdown] colab_type="text" id="9ow7MxRN5Ugl" # # Test Data # + colab={} colab_type="code" id="6matPFhCO_Aa" def test_data(match,players): matches_X = np.load(match) players_X = np.load(players) matches_X = np.expand_dims(matches_X, axis=0) players_X = np.expand_dims(players_X, axis=0) matches_X = np.reshape(matches_X, (matches_X.shape[0], matches_X.shape[1]*matches_X.shape[2])) players_X = np.reshape(players_X, (players_X.shape[0], players_X.shape[1]*players_X.shape[2]*players_X.shape[3])) print(matches_X.shape,players_X.shape) return matches_X,players_X # + colab={} colab_type="code" id="A1HXcglc-YgS" def test_pred(fixtures,autoencoder_matches,autoencoder_players): pred_all_matches = [] pred_all_players = [] teams = [] player_teams = [] for fix in fixtures: print(fix[0].split('_')[:2]) teams.extend(fix[0].split('_')[:2]) matches_X,players_X = test_data(fix[0],fix[1]) pred_match = autoencoder_matches.predict(matches_X) pred_players = autoencoder_players.predict(players_X) pred_all_matches.append(pred_match) pred_all_players.append(pred_players) pred_all_matches = np.vstack(pred_all_matches) pred_all_players = np.vstack(pred_all_players) temp = [[team]*18 for team in teams] for i in range(len(teams)): player_teams += temp[i] print(pred_all_matches.shape,pred_all_players.shape) return pred_all_matches,pred_all_players,teams,player_teams # + [markdown] colab_type="text" id="CsYBZ8t7C8Oq" # ## Matches from sim 1 and 2 # + [markdown] colab_type="text" id="Ae7MnxnliDMI" # ### FINALS # + colab={} colab_type="code" id="Jqm1AjugiEvg" matches_X_final,players_X_final = test_data("Juventus_Atalanta_teams.npy","Juventus_Atalanta_players.npy") pred_finals_match = autoencoder_matches.predict(matches_X_final) pred_finals_players = autoencoder_players.predict(players_X_final) pred_all_matches_final = pred_finals_match pred_all_players_final = pred_finals_players pred_all_matches_final.shape,pred_all_players_final.shape # + [markdown] colab_type="text" id="Q4iH3gsudJl2" # ### Semi finals # + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="OMbQRqjYdMG8" outputId="ba0de2df-94d1-47ce-eedd-30a274f9d365" matches_X_bayern2,players_X_bayern2 = test_data("Atalanta_Barcelona_teams.npy","Atalanta_Barcelona_players.npy") matches_X_barca2,players_X_barca2 = test_data("Paris_Juventus_teams.npy","Paris_Juventus_players.npy") pred_bayern2_match = autoencoder_matches.predict(matches_X_bayern2) pred_bayern2_players = autoencoder_players.predict(players_X_bayern2) pred_barca2_match = autoencoder_matches.predict(matches_X_barca2) pred_barca2_players = autoencoder_players.predict(players_X_barca2) pred_all_matches_s = np.vstack((pred_bayern2_match,pred_barca2_match)) pred_all_players_s = np.vstack((pred_bayern2_players,pred_barca2_players)) pred_all_matches_s.shape,pred_all_players_s.shape # + [markdown] colab_type="text" id="f8JC_jvsdGf2" # ### Q finals # + colab={"base_uri": "https://localhost:8080/", "height": 102} colab_type="code" id="bNhS6yzeQBH4" outputId="3c4bba5b-f598-436d-de0a-5485536071fc" matches_X_atlanta,players_X_atlanta = test_data("Juventus_Atlético_teams.npy","Juventus_Atlético_players.npy") matches_X_barca2,players_X_barca2 = test_data("Atalanta_Bayern_teams.npy","Atalanta_Bayern_players.npy") matches_X_juve2,players_X_juve2 = test_data("Barcelona_Leipzig_teams.npy","Barcelona_Leipzig_players.npy") matches_X_leip,players_X_leip = test_data("Paris_Man. City_teams.npy","Paris_Man. City_players.npy") pred_atlanta_match = autoencoder_matches.predict(matches_X_atlanta) pred_atlanta_players = autoencoder_players.predict(players_X_atlanta) pred_barca2_match = autoencoder_matches.predict(matches_X_barca2) pred_barca2_players = autoencoder_players.predict(players_X_barca2) pred_juve2_match = autoencoder_matches.predict(matches_X_juve2) pred_juve2_players = autoencoder_players.predict(players_X_juve2) pred_leip_match = autoencoder_matches.predict(matches_X_leip) pred_leip_players = autoencoder_players.predict(players_X_leip) pred_all_matches_q = np.vstack((pred_atlanta_match,pred_barca2_match,pred_juve2_match,pred_leip_match)) pred_all_players_q = np.vstack((pred_atlanta_players,pred_barca2_players,pred_juve2_players,pred_leip_players)) pred_all_matches_q.shape,pred_all_players_q.shape # + [markdown] colab_type="text" id="I_JpBe1jcOnw" # ### Round of 16 # + colab={} colab_type="code" id="5j-zK3xiNkk0" matches_X_barca = np.load("Barca_Napoli_teams.npy") players_X_barca = np.load("Barca_Napoli_players.npy") matches_X_barca = np.expand_dims(matches_X_barca, axis=0) matches_X_barca = np.reshape(matches_X_barca, (matches_X_barca.shape[0], matches_X_barca.shape[1]*matches_X_barca.shape[2], 1)) players_X_barca = np.expand_dims(players_X_barca, axis=0) players_X_barca = np.reshape(players_X_barca, (players_X_barca.shape[0], players_X_barca.shape[1]*players_X_barca.shape[2]*players_X_barca.shape[3], 1)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="1H_sUdG8OHFj" outputId="853501ea-b924-46d7-baac-ce94b4726340" matches_X_barca.shape,players_X_barca.shape # + colab={} colab_type="code" id="xr5VRtPlPGh7" #pred_barca = model.predict(x=[matches_X_barca, players_X_barca]) pred_barca_match = autoencoder_matches.predict(matches_X_barca) pred_barca_players = autoencoder_players.predict(players_X_barca) # + colab={} colab_type="code" id="Pf7tQbw0QJZ6" matches_X_bayern = np.load("Bayern_Chelsea_teams.npy") players_X_bayern = np.load("Bayern_Chelsea_players.npy") matches_X_bayern = np.expand_dims(matches_X_bayern, axis=0) matches_X_bayern = np.reshape(matches_X_bayern, (matches_X_bayern.shape[0], matches_X_bayern.shape[1]*matches_X_bayern.shape[2], 1)) players_X_bayern = np.expand_dims(players_X_bayern, axis=0) players_X_bayern = np.reshape(players_X_bayern, (players_X_bayern.shape[0], players_X_bayern.shape[1]*players_X_bayern.shape[2]*players_X_bayern.shape[3], 1)) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="RCM0kxgTQhwE" outputId="051a65f1-2a9d-4b67-a9a6-655d02733b49" matches_X_bayern.shape # + colab={} colab_type="code" id="AbK7m0yQjaF0" #pred_bayern = model.predict(x=[matches_X_bayern, players_X_bayern]) pred_bayern_match = autoencoder_matches.predict(matches_X_bayern) pred_bayern_players = autoencoder_players.predict(players_X_bayern) # + colab={} colab_type="code" id="NFwbf4_njfYv" matches_X_city = np.load("City_Madrid_teams.npy") players_X_city = np.load("City_Madrid_players.npy") matches_X_city = np.expand_dims(matches_X_city, axis=0) matches_X_city = np.reshape(matches_X_city, (matches_X_city.shape[0], matches_X_city.shape[1]*matches_X_city.shape[2], 1)) players_X_city = np.expand_dims(players_X_city, axis=0) players_X_city = np.reshape(players_X_city, (players_X_city.shape[0], players_X_city.shape[1]*players_X_city.shape[2]*players_X_city.shape[3], 1)) # + colab={} colab_type="code" id="kPkaue-5j7tO" #pred_city = model.predict(x=[matches_X_city, players_X_city]) pred_city_match = autoencoder_matches.predict(matches_X_city) pred_city_players = autoencoder_players.predict(players_X_city) # + colab={} colab_type="code" id="U2MNWn4ekBsG" matches_X_juve = np.load("Juventus_Lyon_teams.npy") players_X_juve = np.load("Juventus_Lyon_players.npy") matches_X_juve = np.expand_dims(matches_X_juve, axis=0) matches_X_juve = np.reshape(matches_X_juve, (matches_X_juve.shape[0], matches_X_juve.shape[1]*matches_X_juve.shape[2], 1)) players_X_juve = np.expand_dims(players_X_juve, axis=0) players_X_juve = np.reshape(players_X_juve, (players_X_juve.shape[0], players_X_juve.shape[1]*players_X_juve.shape[2]*players_X_juve.shape[3], 1)) # + colab={} colab_type="code" id="yk-EpDk-kUyg" #pred_juve = model.predict(x=[matches_X_juve, players_X_juve]) pred_juve_match = autoencoder_matches.predict(matches_X_juve) pred_juve_players = autoencoder_players.predict(players_X_juve) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="vPeGDuC0-poV" outputId="dc21765b-301d-484e-cf30-e71f0c0ae16d" pred_all_matches = np.vstack((pred_barca_match,pred_bayern_match,pred_city_match,pred_juve_match)) pred_all_players = np.vstack((pred_barca_players,pred_bayern_players,pred_city_players,pred_juve_players)) pred_all_matches.shape,pred_all_players.shape # + [markdown] colab_type="text" id="x_8Xd4RAWP2Z" # # Autoencoder prediction test # + [markdown] colab_type="text" id="JtUYH0VDCl-8" # ## Loading complete CSVs # + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" id="miFXw2EFKsP0" outputId="ae3dd85b-d9a5-438f-9c58-ce6535d94498" # !wget https://raw.githubusercontent.com/ashwinvaswani/whatif/master/Data/match_info_merged.csv?token=<KEY> # + colab={"base_uri": "https://localhost:8080/", "height": 224} colab_type="code" id="2H3lUDV_Kt5u" outputId="8064ff04-bfff-48c8-fe42-3fb40b84e701" # !wget https://raw.githubusercontent.com/ashwinvaswani/whatif/master/Data/player_info_merged.csv?token=<KEY> # + colab={} colab_type="code" id="h-DPjLttkd3M" import pandas as pd player_csv = pd.read_csv("player_info_merged.csv?token=<KEY>") team_csv = pd.read_csv("match_info_merged.csv?token=<KEY>") # + [markdown] colab_type="text" id="r3f-pPkmCryQ" # ## Prediction results # + colab={} colab_type="code" id="amQzo8nUcRdC" val_pred_matches = autoencoder_matches.predict(noisy_val_match_x) val_pred_players = autoencoder_players.predict(noisy_val_player_x) # + colab={} colab_type="code" id="rcCLHwLPkla8" def denorm(val, maxim, minim): f = val*(maxim-minim) +minim return f # + colab={} colab_type="code" id="9v6LdTTB13R0" def auto_pl_denorm(arr,pl_csv,is_deep=False): goals_max = pl_csv["Goals scored"].max() goals_min = pl_csv["Goals scored"].min() assists_max = pl_csv["Assists"].max() assists_min = pl_csv["Assists"].min() fouls_max = pl_csv["Fouls committed"].max() fouls_min = pl_csv["Fouls committed"].min() total_shots_max = pl_csv["Total shots"].max() total_shots_min = pl_csv["Total shots"].min() shots_target_max = pl_csv["Shots on Target"].max() shots_target_min = pl_csv["Shots on Target"].min() interceptions_max = pl_csv["Interceptions"].max() interceptions_min = pl_csv["Interceptions"].min() crosses_max = pl_csv["Crosses"].max() crosses_min = pl_csv["Crosses"].min() time_played_max = pl_csv["Time Played"].max() time_played_min = pl_csv["Time Played"].min() #print(goals_max,goals_min,total_shots_max,total_shots_min,time_played_max,time_played_min) goals = [] total_shots = [] shots_target=[] assists=[] fouls=[] interceptions=[] crosses=[] time = [] if is_deep: for i in np.arange(0,360,10): if arr[i] > 0: print(arr[i]) goals.append(denorm(arr[i],goals_max,goals_min)) total_shots.append(denorm(arr[i+1],total_shots_max,total_shots_min)) shots_target.append(denorm(arr[i+2],shots_target_max,shots_target_min)) assists.append(denorm(arr[i+3],assists_max,assists_min)) fouls.append(denorm(arr[i+6],fouls_max,fouls_min)) interceptions.append(denorm(arr[i+4],interceptions_max,interceptions_min)) crosses.append(denorm(arr[i+5],crosses_max,crosses_min)) time.append(denorm(arr[i+8],time_played_max,time_played_min)) else: for i in np.arange(0,360,10): if arr[i][0] > 0: print(arr[i][0]) goals.append(denorm(arr[i][0],goals_max,goals_min)) total_shots.append(denorm(arr[i+1][0],total_shots_max,total_shots_min)) shots_target.append(denorm(arr[i+2][0],shots_target_max,shots_target_min)) assists.append(denorm(arr[i+3][0],assists_max,assists_min)) fouls.append(denorm(arr[i+6][0],fouls_max,fouls_min)) interceptions.append(denorm(arr[i+4][0],interceptions_max,interceptions_min)) crosses.append(denorm(arr[i+5][0],crosses_max,crosses_min)) time.append(denorm(arr[i+8][0],time_played_max,time_played_min)) return goals,total_shots,shots_target,assists,fouls,interceptions,crosses,time def auto_mat_denorm(arr,match_csv,is_deep=False): Goals_max = match_csv["Goals"].max() Goals_min = match_csv["Goals"].min() Block_max = match_csv["Blocks"].max() Block_min = match_csv["Blocks"].min() Possession_max = match_csv["Possession"].max() Possession_min = match_csv["Possession"].min() Passes_max = match_csv["Passes"].max() Passes_min = match_csv["Passes"].min() passing_acc_max = match_csv["Passing Accuracy"].max() passing_acc_min = match_csv["Passing Accuracy"].min() corners_max = match_csv["Corners"].max() corners_min = match_csv["Corners"].min() #print(Block_max,Block_min,Possession_max,Possession_min,Passes_max,Passes_min,passing_acc_max,passing_acc_min,corners_max,corners_min) if is_deep: print("goals pred:",arr[0],arr[0+23]) print("Shotarget pred:",arr[2],arr[2+23]) goals = [denorm(arr[0],Goals_max,Goals_min),denorm(arr[0+23],Goals_max,Goals_min)] blocks = [denorm(arr[4],Block_max,Block_min),denorm(arr[4+23],Block_max,Block_min)] poss = [denorm(arr[8],Possession_max,Possession_min),denorm(arr[8+23],Possession_max,Possession_min)] passes = [denorm(arr[9],Passes_max,Passes_min),denorm(arr[9+23],Passes_max,Passes_min)] pass_acc = [denorm(arr[10],passing_acc_max,passing_acc_min),denorm(arr[10+23],passing_acc_max,passing_acc_min)] corners = [denorm(arr[6],corners_max,corners_min),denorm(arr[6+23],corners_max,corners_min)] else: print("goals pred:",arr[0][0],arr[0+23][0]) print("Shotarget pred:",arr[2][0],arr[2+23][0]) goals = [denorm(arr[0][0],Goals_max,Goals_min),denorm(arr[0+23][0],Goals_max,Goals_min)] blocks = [denorm(arr[4][0],Block_max,Block_min),denorm(arr[4+23][0],Block_max,Block_min)] poss = [denorm(arr[8][0],Possession_max,Possession_min),denorm(arr[8+23][0],Possession_max,Possession_min)] passes = [denorm(arr[9][0],Passes_max,Passes_min),denorm(arr[9+23][0],Passes_max,Passes_min)] pass_acc = [denorm(arr[10][0],passing_acc_max,passing_acc_min),denorm(arr[10+23][0],passing_acc_max,passing_acc_min)] corners = [denorm(arr[6][0],corners_max,corners_min),denorm(arr[6+23][0],corners_max,corners_min)] return goals,blocks,poss,passes,pass_acc,corners # teams = ["Barca","Napoli","Bayern","Chelsea","City","Madrid","Juventus","Lyon"] # teams_q = ["Atlético","Juventus","Bayern","Atalanta","Leipzig","Barca","City","Paris"] # teams_q2 = ["Juventus","Atlético","Atalanta","Bayern","Barca","Leipzig","Paris","City",] # teams_s1 = ["Barca","Atalanta","Juventus","Paris"] # teams_s2 = ["Atalanta","Barca","Paris","Juventus"] # teams_finals = ["Juventus","Atalanta"] def fixture_prediction(pred_all_matches,pred_all_players,teams,player_teams,team_csv,player_csv,csv_file_name): goals_match = [] blocks = [] poss = [] passes = [] pass_acc = [] corners = [] for p in pred_all_matches: g,b,po,pa,pacc,cor = auto_mat_denorm(p,team_csv,is_deep=1) goals_match.extend(g) blocks.extend(b) poss.extend(po) passes.extend(pa) pass_acc.extend(pacc) corners.extend(cor) goals = [] total_shots = [] shots_target=[] assists=[] fouls=[] interceptions=[] crosses=[] time = [] players = [] for i in range(18): players.append("Player {}".format(i+1)) players = players*8 players = players[:len(player_teams)] #players_finals = players[:36] # players_teams = ['Barca']*18+['Napoli']*18+['Bayern']*18+['Chelsea']*18+['City']*18+['Madrid']*18+['Juventus']*18+['Lyon']*18 # players_teams_q = ['Atlético']*18+['Juventus']*18+['Bayern']*18+['Atalanta']*18+['Leipzig']*18+['Barca']*18+['City']*18+['Paris']*18 # players_teams_q2 = ['Juventus']*18+['Atlético']*18+['Atalanta']*18+['Bayern']*18+['Barca']*18+['Leipzig']*18+['Paris']*18+['City']*18 # players_teams_s1 = ['Barca']*18+['Atalanta']*18+['Juventus']*18+['Paris']*18 # players_teams_s2 = ['Atalanta']*18+['Barca']*18+['Paris']*18+['Juventus']*18 # players_teams_finals = ['Juventus']*18+['Atalanta']*18 for m in pred_all_players: g,t,tar,ass,f,intr,crs,ti = auto_pl_denorm(m,player_csv,is_deep=1) goals.extend(g) total_shots.extend(t) shots_target.extend(tar) assists.extend(ass) fouls.extend(f) interceptions.extend(intr) crosses.extend(crs) time.extend(ti) matches_auto_df = pd.DataFrame(columns=["Teams","Goals","Blocks","Possession","Passes","Passing Accuracy","Corners"]) matches_auto_df["Teams"] = teams matches_auto_df["Goals"] = [round(g) for g in goals_match] matches_auto_df["Blocks"] = [round(g) for g in blocks] matches_auto_df["Possession"] = [round(g) for g in poss] matches_auto_df["Passes"] = [round(g) for g in passes] matches_auto_df["Passing Accuracy"] = [round(g) for g in pass_acc] matches_auto_df["Corners"] = [round(g) for g in corners] #print(matches_auto_df) players_auto_df = pd.DataFrame(columns=["Player","Team","Goals","Shots","Shots_Target","Assists","Fouls","Interceptions","Crosses","Time"]) players_auto_df["Player"] = players players_auto_df["Team"] = player_teams players_auto_df["Goals"] = goals players_auto_df["Shots"] = total_shots players_auto_df["Shots_Target"] = shots_target players_auto_df["Assists"] = assists players_auto_df["Fouls"] = fouls players_auto_df["Interceptions"] = interceptions players_auto_df["Crosses"] = crosses players_auto_df["Time"] = time print(players_auto_df.head()) matches_auto_df.to_csv("CSV/matches_"+csv_file_name+".csv",index=False) players_auto_df.to_csv("CSV/players_"+csv_file_name+".csv",index=False) print("CSVs saved...") return matches_auto_df # + colab={"base_uri": "https://localhost:8080/", "height": 451} colab_type="code" id="ypbeIyYmKeRk" outputId="a8a1fd76-e957-40ef-a500-9ab3ef21caa3" fixtures = [("Bayern_Atalanta_teams.npy","Bayern_Atalanta_players.npy") ] pred_all_matches,pred_all_players,teams,player_teams = test_pred(fixtures,autoencoder_matches,autoencoder_players) fixture_prediction(pred_all_matches,pred_all_players,teams,player_teams,team_csv,player_csv,"final") # + [markdown] colab_type="text" id="JYRf609d5O8O" # # Denorm Validation Original # + colab={} colab_type="code" id="S6Y5Y-sbCYKo" blocks_home = [] blocks_away = [] poss = [] pass_home = [] pass_away = [] pass_acc_home = [] pass_acc_away = [] corners_home = [] corners_away = [] #"Possession","Passes Home","Passes Away", "Passing Accuracy Home", "Passing Accuracy Away","Corners Home", "Corners Away" for val in pred: val = np.reshape(val,(1,val.shape[0])) denorm_match,denorm_player = return_denormed(val,player_csv,team_csv) blocks_home.append(denorm_match[0][0]) blocks_away.append(denorm_match[0][1]) poss.append(denorm_match[0][2]) pass_home.append(denorm_match[0][3]) pass_away.append(denorm_match[0][4]) pass_acc_home.append(denorm_match[0][5]) pass_acc_away.append(denorm_match[0][6]) corners_home.append(denorm_match[0][7]) corners_away.append(denorm_match[0][8]) # + colab={} colab_type="code" id="gncsPQ6HCYH7" val_matches_df = pd.DataFrame(columns = ["Blocks Home", "Blocks Away", "Possession", "Passes Home","Passes Away", "Passing Accuracy Home", "Passing Accuracy Away","Corners Home", "Corners Away"]) val_matches_df["Blocks Home"] = blocks_home val_matches_df["Blocks Away"] = blocks_away val_matches_df["Possession"] = poss val_matches_df["Passes Home"] = pass_home val_matches_df["Passes Away"] = pass_away val_matches_df["Passing Accuracy Home"] = pass_acc_home val_matches_df["Passing Accuracy Away"] = pass_acc_away val_matches_df["Corners Home"] = corners_home val_matches_df["Corners Away"] = corners_away # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="mAnT02FRCYFY" outputId="52487f6e-df2e-4a61-aa1f-ee5560ab8e4e" val_matches_df.head() # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="4dsC-YjLSLG2" outputId="3f9c0efb-26d3-4e67-c758-d6792106dcba" blocks_home = [] blocks_away = [] poss = [] pass_home = [] pass_away = [] pass_acc_home = [] pass_acc_away = [] corners_home = [] corners_away = [] #"Possession","Passes Home","Passes Away", "Passing Accuracy Home", "Passing Accuracy Away","Corners Home", "Corners Away" for val in Y_val: val = np.reshape(val,(1,val.shape[0])) denorm_match,denorm_player = return_denormed(val,player_csv,team_csv) blocks_home.append(denorm_match[0][0]) blocks_away.append(denorm_match[0][1]) poss.append(denorm_match[0][2]) pass_home.append(denorm_match[0][3]) pass_away.append(denorm_match[0][4]) pass_acc_home.append(denorm_match[0][5]) pass_acc_away.append(denorm_match[0][6]) corners_home.append(denorm_match[0][7]) corners_away.append(denorm_match[0][8]) val_matches_df = pd.DataFrame(columns = ["Blocks Home", "Blocks Away", "Possession", "Passes Home","Passes Away", "Passing Accuracy Home", "Passing Accuracy Away","Corners Home", "Corners Away"]) val_matches_df["Blocks Home"] = blocks_home val_matches_df["Blocks Away"] = blocks_away val_matches_df["Possession"] = poss val_matches_df["Passes Home"] = pass_home val_matches_df["Passes Away"] = pass_away val_matches_df["Passing Accuracy Home"] = pass_acc_home val_matches_df["Passing Accuracy Away"] = pass_acc_away val_matches_df["Corners Home"] = corners_home val_matches_df["Corners Away"] = corners_away val_matches_df.head() # + colab={} colab_type="code" id="MIr6bRxNC-Yh" val_matches_df.to_csv("val_matches.csv",index=False) # + [markdown] colab_type="text" id="b41hfAzJYIuf" # # Denorm Function for original # + colab={} colab_type="code" id="9OGEJPbGknIk" def return_denormed(arr, pl_csv, match_csv): Block_max = match_csv["Blocks"].max() Block_min = match_csv["Blocks"].min() Possession_max = match_csv["Possession"].max() Possession_min = match_csv["Possession"].min() Passes_max = match_csv["Passes"].max() Passes_min = match_csv["Passes"].min() passing_acc_max = match_csv["Passing Accuracy"].max() passing_acc_min = match_csv["Passing Accuracy"].min() corners_max = match_csv["Corners"].max() corners_min = match_csv["Corners"].min() goals_max = pl_csv["Goals scored"].max() goals_min = pl_csv["Goals scored"].min() assists_max = pl_csv["Assists"].max() assists_min = pl_csv["Assists"].min() fouls_max = pl_csv["Fouls committed"].max() fouls_min = pl_csv["Fouls committed"].min() total_shots_max = pl_csv["Total shots"].max() total_shots_min = pl_csv["Total shots"].min() shots_target_max = pl_csv["Shots on Target"].max() shots_target_min = pl_csv["Shots on Target"].min() interceptions_max = pl_csv["Interceptions"].max() interceptions_min = pl_csv["Interceptions"].min() crosses_max = pl_csv["Crosses"].max() crosses_min = pl_csv["Crosses"].min() time_played_max = pl_csv["Time Played"].max() time_played_min = pl_csv["Time Played"].min() y_unnormalized = [] y_unnormalized_individual = [] for e,i in enumerate(arr): temp = [] temp.append(denorm(arr[e][0], Block_max, Block_min)) temp.append(denorm(arr[e][1], Block_max, Block_min)) temp.append(denorm(arr[e][2], Possession_max, Possession_min)) temp.append(denorm(arr[e][3], Passes_max, Passes_min)) temp.append(denorm(arr[e][4], Passes_max, Passes_min)) temp.append(denorm(arr[e][5], passing_acc_max, passing_acc_min)) temp.append(denorm(arr[e][6], passing_acc_max, passing_acc_min)) temp.append(denorm(arr[e][7], corners_max, corners_min)) temp.append(denorm(arr[e][8], corners_max, corners_min)) y_unnormalized.append(temp) temp_players = [] for m in range(36): temp_players.append(denorm(arr[e][m+9], goals_max, goals_min)) temp_players.append(denorm(arr[e][m+10], assists_max, assists_min)) temp_players.append(denorm(arr[e][m+11], fouls_max, fouls_min)) temp_players.append(denorm(arr[e][m+12], total_shots_max, total_shots_min)) temp_players.append(denorm(arr[e][m+13], shots_target_max, shots_target_min)) temp_players.append(denorm(arr[e][m+14], interceptions_max, interceptions_min)) temp_players.append(denorm(arr[e][m+15], crosses_max, crosses_min)) temp_players.append(denorm(arr[e][m+16], int(time_played_max), int(time_played_min))) y_unnormalized_individual.append(temp_players) return y_unnormalized, y_unnormalized_individual # + colab={} colab_type="code" id="cCm9QoY5RFKL" gt_norm, gt_pl_norm = return_denormed(Y,player_csv,team_csv) # + [markdown] colab_type="text" id="LDdvgwXO6-7h" # # Denorm Original Test # + colab={} colab_type="code" id="OwEZKUX6mJqb" barca_norm, barca_norm_pl = return_denormed(pred_barca, player_csv, team_csv) bayern_norm, bayern_norm_pl = return_denormed(pred_bayern, player_csv, team_csv) city_norm, city_norm_pl = return_denormed(pred_city, player_csv, team_csv) juve_norm, juve_norm_pl = return_denormed(pred_juve, player_csv, team_csv) # + colab={} colab_type="code" id="Ic2YnbiQmLqK" matches_df = pd.DataFrame(columns = ["Game", "Blocks Home", "Blocks Away", "Possession","Passes Home","Passes Away", "Passing Accuracy Home", "Passing Accuracy Away","Corners Home", "Corners Away"]) matches_df["Game"] = ["Barca vs Napoli", "Bayern vs Chelsea","City vs Madrid", "Juventus vs Lyon"] matches_df["Blocks Home"] = [barca_norm[0][0], bayern_norm[0][0], city_norm[0][0], juve_norm[0][0]] matches_df["Blocks Away"] = [barca_norm[0][1], bayern_norm[0][1], city_norm[0][1], juve_norm[0][1]] matches_df["Possession"] = [barca_norm[0][2], bayern_norm[0][2], city_norm[0][2], juve_norm[0][2]] matches_df["Passes Home"] = [barca_norm[0][3], bayern_norm[0][3], city_norm[0][3], juve_norm[0][3]] matches_df["Passes Away"] = [barca_norm[0][4], bayern_norm[0][4], city_norm[0][4], juve_norm[0][4]] matches_df["Passing Accuracy Home"] = [barca_norm[0][5], bayern_norm[0][5], city_norm[0][5], juve_norm[0][5]] matches_df["Passing Accuracy Away"] = [barca_norm[0][6], bayern_norm[0][6], city_norm[0][6], juve_norm[0][6]] matches_df["Corners Home"] = [barca_norm[0][7], bayern_norm[0][7], city_norm[0][7], juve_norm[0][7]] matches_df["Corners Away"] = [barca_norm[0][8], bayern_norm[0][8], city_norm[0][8], juve_norm[0][8]] # + colab={"base_uri": "https://localhost:8080/", "height": 207} colab_type="code" id="d2bwbdGbnDTd" outputId="d433aaf9-e9bd-4bf0-b5e5-65b90d3baec0" matches_df.head() # + colab={} colab_type="code" id="wpsXDvwNp_a8" matches_df.to_csv("Match_data.csv",index=False) # + colab={} colab_type="code" id="d3AuyDpGnEy7" players_df = pd.DataFrame(columns = ["Player", "Team", "Goals", "Assists","Fouls","Total shots", "Shots on target", "Interceptions","Crosses", "Time played"]) # + colab={} colab_type="code" id="gVkDiivNtGsy" players = [] for i in range(18): players.append("Player {}".format(i+1)) players = players*8 players_teams = ['Barca']*18+['Napoli']*18+['Bayern']*18+['Chelsea']*18+['City']*18+['Madrid']*18+['Juventus']*18+['Lyon']*18 players_df["Player"] = players players_df["Team"] = players_teams # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="qItpGGFY-qIV" outputId="9d961dd2-fc6c-4946-de1e-28471d07fae7" all_norm_pl = barca_norm_pl[0]+bayern_norm_pl[0]+city_norm_pl[0]+juve_norm_pl[0] len(all_norm_pl) # + colab={} colab_type="code" id="GJvsNNO7UH0l" gt_mat_norm,gt_pl_norm = return_denormed(np.reshape(Y_val[0],(1,Y_val[0].shape[0])),player_csv,team_csv) # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="zeYkhS7kFm7Q" outputId="e1c6a176-eef4-4b8e-afbb-f6f171f396ec" Y_val[0].shape # + colab={} colab_type="code" id="oXTWuetjGMcP" gt_pl_norm = gt_pl_norm[0] # + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="AnviJohL9eS-" outputId="995a2c31-5dc5-47e0-b9e4-e350f740ff06" goals = [] assists = [] fouls = [] total_shots = [] shot_target = [] inter = [] crosses = [] time = [] for i in np.arange(0,len(gt_pl_norm),8): goals.append(gt_pl_norm[i]) assists.append(gt_pl_norm[i+1]) fouls.append(gt_pl_norm[i+2]) total_shots.append(gt_pl_norm[i+3]) shot_target.append(gt_pl_norm[i+4]) inter.append(gt_pl_norm[i+5]) crosses.append(gt_pl_norm[i+6]) time.append(gt_pl_norm[i+7]) print(len(goals),len(assists),len(fouls),len(total_shots),len(shot_target),len(inter),len(crosses),len(time)) # + colab={} colab_type="code" id="7o_xwttQ-RYC" players_df["Goals"] = goals players_df["Assists"] = assists players_df["Fouls"] = fouls players_df["Total shots"] = total_shots players_df["Shots on target"] = shot_target players_df["Interceptions"] = inter players_df["Crosses"] = crosses players_df["Time played"] = time # + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="7afeunpXAq4i" outputId="09c7b169-6049-4096-d481-1705e73423f4" players_df.head() # + colab={} colab_type="code" id="TJKIKqfYAtCX" players_df.to_csv("Player_data.csv",index=False) # + colab={"base_uri": "https://localhost:8080/", "height": 111} colab_type="code" id="0KVmzHlYy2iO" outputId="b63b94ce-d888-494d-9c15-531949059990" Goals_max = team_csv["Goals"].max() Goals_min = team_csv["Goals"].min() Block_max = team_csv["Blocks"].max() Block_min = team_csv["Blocks"].min() Possession_max = team_csv["Possession"].max() Possession_min = team_csv["Possession"].min() Passes_max = team_csv["Passes"].max() Passes_min = team_csv["Passes"].min() passing_acc_max = team_csv["Passing Accuracy"].max() passing_acc_min = team_csv["Passing Accuracy"].min() corners_max = team_csv["Corners"].max() corners_min = team_csv["Corners"].min() def auto_denorm(pred_arr): goals = [denorm(pred_arr[0][0][0],Goals_max,Goals_min),denorm(pred_arr[0][62][0],Goals_max,Goals_min)] blocks = [denorm(pred_arr[0][4][0],Block_max,Block_min),denorm(pred_arr[0][4+62][0],Block_max,Block_min)] possession = [denorm(pred_arr[0][8][0],Possession_max,Possession_min),denorm(pred_arr[0][8+62][0],Possession_max,Possession_min)] passes = [denorm(pred_arr[0][9][0],Passes_max,Passes_min),denorm(pred_arr[0][9+62][0],Passes_max,Passes_min)] pass_acc = [denorm(pred_arr[0][10][0],passing_acc_max,passing_acc_min),denorm(pred_arr[0][10+62][0],passing_acc_max,passing_acc_min)] corners = [denorm(pred_arr[0][6][0],corners_max,corners_min),denorm(pred_arr[0][6+62][0],corners_max,corners_min)] return goals,blocks,possession,passes,pass_acc,corners goals,blocks,possession,passes,pass_acc,corners = auto_denorm(pred_bayern) matches_df = pd.DataFrame(columns = ["Team", "Blocks","Possession","Passes","Passing Accuracy", "Corners"]) matches_df["Team"] = ["Bayern","Chelsea"] matches_df["Goals"] = goals matches_df["Blocks"] = blocks matches_df["Possession"] = possession matches_df["Passes"] = passes matches_df["Passing Accuracy"] = pass_acc matches_df["Corners"] = corners matches_df.head()
Data/model inputs/Copy_of_whatif_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.8 64-bit (''Qiskit_Ocean'': conda)' # name: python388jvsc74a57bd0173f2ffbce537830dc37cc01123f1dcc118f483457eee13e6b392e68a5e39cc7 # --- # + import io import numpy as np from numpy import pi import pydot from PIL import Image import retworkx as rx from qiskit import IBMQ, QuantumRegister, ClassicalRegister, QuantumCircuit, transpile from qiskit.test.mock import FakeManhattan # - def create_qaoa_circuit() -> QuantumCircuit: """ taken from IBMQ Experience """ qreg_q = QuantumRegister(3, 'q') circuit = QuantumCircuit(qreg_q) circuit.h(qreg_q[0]) circuit.h(qreg_q[1]) circuit.cx(qreg_q[1], qreg_q[0]) circuit.rz(-0.06942544294642758, qreg_q[0]) circuit.cx(qreg_q[1], qreg_q[0]) circuit.rz(-0.20827632883928274, qreg_q[0]) circuit.rx(0.7530122052855767, qreg_q[0]) circuit.h(qreg_q[2]) circuit.cx(qreg_q[2], qreg_q[1]) circuit.rz(1.2496579730356965, qreg_q[1]) circuit.cx(qreg_q[2], qreg_q[1]) circuit.rz(-0.20827632883928274, qreg_q[1]) circuit.rx(0.7530122052855767, qreg_q[1]) circuit.rx(0.7530122052855767, qreg_q[2]) return circuit # transpile circuit for ibmq_manhattan device = FakeManhattan() conf = device.configuration() print(conf.basis_gates) qc = create_qaoa_circuit() circuit_transpiled = transpile(qc, backend=device, optimization_level=3) circuit_transpiled.draw(output='mpl') # + graph = rx.PyGraph(multigraph=False) coupling_map = [tuple(edge) for edge in conf.coupling_map] graph.add_nodes_from([i for i in range(65)]) graph.add_edges_from_no_data(coupling_map) circuit_nodes = [19, 20, 21] neighbor_nodes = [12, 18, 22, 25] def node_attr(node): kwargs = dict(style='filled', shape="circle", fontsize="14", fixedsize="true", width="0.4", height="0.4") if node in circuit_nodes: kwargs.update(fillcolor='green') elif node in neighbor_nodes: kwargs.update(fillcolor='lightblue', label="") else: kwargs.update(fillcolor='darkblue', label="") return kwargs dot_str = graph.to_dot(node_attr) dot = pydot.graph_from_dot_data(dot_str)[0] png = dot.create_png(prog='neato') Image.open(io.BytesIO(png)) # -
comparison/Qiskit/LinearProgramming/QAOA/Example/three_var_problem/Example_Circuit_Transpiled_Manhattan.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:esl] # language: python # name: conda-env-esl-py # --- # ### Principal Component Regression # # The SVD of the centered matrix $\mathbf{X}$ is another way of expressing the *principal components* of the variables in $\mathbf{X}$ where the sample covariance matrix is given by $\mathbf{S} = \mathbf{X}^\top\mathbf{X} / N$. Using the SVD of $\mathbf{X}$ we obtain: # # $$\mathbf{X}^\top\mathbf{X} = \mathbf{V}\mathbf{D}^2\mathbf{V}^\top$$ # # which is the eigen decomposition of $\mathbf{X}^\top\mathbf{X}$ and of $\mathbf{S}$ up to a factor $N$. # # The columns of $\mathbf{V}$ (the eigenvectors $v_j$) are also called the principal compinents directions of $\mathbf{X}$. # # The first principal component direction $v_1$ has the property that $\mathbf{z}_1 = \mathbf{X}v_1 = \mathbf{u}_1 d_1$ has the largest sample variance amongs all normalized linear combinations of $\mathbf{X}$, and the last principle component direction $v_p$ has the property that $\mathbf{z}_p = \mathbf{X}v_p = \mathbf{u}_p d_p$ has the smallest sample variance amongs all normalized linear combinations of $\mathbf{X}$. # # **Note** :The variance of $z$ is given as: # # $$\text{Var}(z_j) = \text{Var}(\mathbf{X}v_j) = \frac{d_j^2}{N}$$ # # Principal component regression forms the derived input columns $\mathbf{z}_m = \mathbf{X}v_m$, and then regresses $\mathbf{y}$ on $\mathbf{z}_1, \mathbf{z}_2, \cdots, \mathbf{z}_M$ for some $M \leq p$. Since the $\mathbf{z}_M$ are orthogonal, this regression is just a sum of univariate regressions: # # $$\mathbf{\hat y}_{M}^{\text{pcr}} = \bar y\mathbf{1} + \sum_{m=1}^M \hat\theta_m\mathbf{z}_m$$ # # where $\hat\theta_m = \langle\mathbf{z}_m, y\rangle / \langle\mathbf{z}_m, \mathbf{z}_m\rangle$, To find the coefficients of PCR $\hat{\beta}^{\text{pcr}}(M)$, we can simply replace $\mathbf{z}_M$ with $\mathbf{X}v_m$: # # $$\begin{aligned} # \mathbf{\hat y}_{M}^{\text{pcr}} &= \bar y\mathbf{1} + \sum_{m=1}^M \hat\theta_m\mathbf{X}v_m\\ # &=\bar y\mathbf{1} + \mathbf{X}\underbrace{\sum_{m=1}^M \hat\theta_m v_m}_{\hat{\beta}^{\text{pcr}}(M)} # \end{aligned}$$ # # So, the value of $\hat{\beta}^{\text{pcr}}(M)$ which can be used for future predictions is given by: # # $$\hat{\beta}^{\text{pcr}}(M) = \sum_{m=1}^M \hat\theta_m v_m$$ # # The overall steps of PCR algorithm is given as follows: # # * Standardize the input vector $\mathbf{X}$ to have zero mean and unit variance and subtract the mean from the response $y$. # * Compute the eigendecomposition of $\mathbf{X}^\top\mathbf{X}$ as # # $$\mathbf{X}^\top\mathbf{X} = \mathbf{V}\mathbf{D}^2\mathbf{V}^\top$$ # # * Compute the vectors $\mathbf{z}_m = \mathbf{X}v_m$ # * Compute the regression coefficients $\hat\theta_m$ # # $$\hat\theta_m = \langle\mathbf{z}_m, y\rangle / \langle\mathbf{z}_m, \mathbf{z}_m\rangle$$ # # where $\langle\mathbf{z}_m, \mathbf{z}_m\rangle = d_m^2$, and $d_m^2$ is the $m^{th}$ diagonal element of $\mathbf{D}$ # # * For $0\leq M\leq p$, the PCR estimate of $\mathbf{y}$ is given by: # $$\mathbf{\hat y}_{M}^{\text{pcr}} = \bar y\mathbf{1} + \sum_{m=1}^M \hat\theta_m\mathbf{z}_m$$ # # and $\hat{\beta}^{\text{pcr}}(M) = \sum_{m=1}^M \hat\theta_m v_m$ # # ### Python Implementation # # We use *Prostate Cancer* data to test our implementation of the PCR algorithm. import numpy as np import pandas as pd import matplotlib.pyplot as plt from ml_datasets.esl import ProstateCancer from sklearn import preprocessing from esl.chapter03 import kfold, LeastSquaresRegression, RidgeRegression prostate_cancer = ProstateCancer(verbose=0) # + df = prostate_cancer.load() train_test = prostate_cancer.train_test df_x = df[prostate_cancer.meta[:-1]] df_y = df[prostate_cancer.meta[-1]] scaler = preprocessing.StandardScaler() x_train = scaler.fit_transform(df_x[train_test=='T'].values) x_test = scaler.transform(df_x[train_test=='F'].values) y_train_temp = df_y[train_test=='T'].copy().values y_train = y_train_temp - np.mean(y_train_temp) y_test_temp = df_y[train_test=='F'].copy().values y_test = y_test_temp - np.mean(y_train_temp) # - # Next, we build the `PrincipalComponentRegression` class based on the mentioned PCR algorithm. # # This class contains two methods: # # * `fit`: to fit the model using the PCR algorithm. Here, we'll obtain the path of $\hat\beta_{pcr}$. # * `predict`: to generate prediction given the computed $\hat\beta_{pcr}$. # # The following is the `PrincipalComponentRegression` class: class PrincipalComponentRegression: def __init__(self, **kwargs): self.num_components = kwargs.get('num_components', 1) def fit(self, x, y): xTx = x.T @ x v, d2, vt = np.linalg.svd(xTx) beta = np.zeros((x.shape[1], x.shape[1])) for m in range(self.num_components): z_m = x @ v[:, m] theta_m = np.inner(z_m, y) / d2[m] beta[:, m] = theta_m * v[:, m] self.beta_hat = np.sum(beta, axis=1) def predict(self, x): return x @ self.beta_hat def error(self, x, y): y_hat = self.predict(x) error = y_hat - y return error.T @ error, (error.T @ error) / len(error) # #### Cross-Validation to find the best number of components/directions # # In the book, to find the best number of components/directions, we need to perform cross validation on the training data. The following is the main loop to perform cross validation. For every number of components, we perform training on the 10 folds and record the MSE: # + x_dict, y_dict = kfold(x_train, y_train, nfold=10, seed=2) num_components = np.arange(x_train.shape[1] + 1) RSS = dict() MRSS = dict() for m in num_components: RSS_temp = [] MRSS_temp = [] for fold in x_dict.keys(): if m == 0: y_train_temp = y_dict[fold]['train'].copy() y_val_temp = y_dict[fold]['val'].copy() error = (y_val_temp).T @ (y_val_temp) merror = error / len(y_val_temp) else: x_train_temp = x_dict[fold]['train'].copy() x_val_temp = x_dict[fold]['val'].copy() y_train_temp = y_dict[fold]['train'].copy() y_val_temp = y_dict[fold]['val'].copy() pcr = PrincipalComponentRegression(num_components=m, intercept=False, mean_y=np.mean(y_train)) pcr.fit(x_train_temp, y_train_temp) error, merror = pcr.error(x_val_temp, y_val_temp) RSS_temp.append(error) MRSS_temp.append(merror) RSS[m] = RSS_temp MRSS[m] = MRSS_temp # - # **Note**: The cross-validation algorithm requires randomly generating data indices; therefore, the performance of the training depends on the randomly partitioned data. # # We use the *one-standard error* rule to choose the most parsimonious model whose error is no more than one standard error above the error of the best model. The code below does this: # + x_cv = np.array(list(RSS.keys())) # calculate the mean of MSE from the 10-fold cross validation y_cv_mean = np.array([np.mean(MRSS[key]) for key in MRSS.keys()]) # calculate the standard error y_cv_std = np.array([np.std(MRSS[key]) / (10 ** 0.5) for key in MRSS.keys()]) # find the most parsimonious model upper_bound = np.min(y_cv_mean) + y_cv_std[np.argmin(y_cv_mean)] best_dir = np.min(np.where(y_cv_mean <= upper_bound)[0]) # - # Now, we plot the cross validation error on every number of components (Figure 3.7 - Middle Right): # + plt.figure(figsize=(8, 6)) plt.errorbar(x_cv, y_cv_mean, yerr=y_cv_std, capsize=5, ecolor='C0', color='C1', elinewidth=1, linewidth=1, marker='o', markersize=5) x_cvv = np.linspace(-0.4, 8.4, 20) y_cvv = np.linspace(0.4, 1.8, 20) # this is to plot the purple horizontal line plt.plot(x_cvv, upper_bound * np.ones(len(x_cvv)), '--', color='purple', linewidth=1) # this is to plot the purple vertical line plt.plot(best_dir * np.ones(len(y_cvv)), y_cvv, '--', color='purple', linewidth=1) plt.xlim([-0.4, 8.4]) plt.ylim([0.45, 1.81]) plt.xlabel('Number of Directions') plt.ylabel('CV Error') plt.show() # - # The following part generates the PCR column of Table 3.3. # # Since we know that from the cross-validation the best result is obtained when the number of components equals to 7, we retrain the model using this number of components. Subsequently, we generate the mean-squared and standard errors of the test data: pcr = PrincipalComponentRegression(num_components=best_dir) pcr.fit(x_train, y_train) # + # include intercept in beta beta_0 = np.mean(df_y[train_test=='T'].copy().values) beta = np.append(beta_0, pcr.beta_hat) # calculate the mse and std. error y_hat = x_test @ pcr.beta_hat error = (y_test - y_hat).T @ (y_test - y_hat) mse = error / len(y_test) std_error = np.sqrt(np.var((y_test - y_hat) ** 2) / (len(y_test) - 1)) pcr_dframe = pd.DataFrame({'PCR': np.append(beta, np.array([mse, std_error]))}) pcr_dframe['Term'] = ['Intercept'] + list(prostate_cancer.meta[:-1]) + ['Test Error', 'Std Error'] cols = pcr_dframe.columns[::-1] pcr_dframe = pcr_dframe[cols].replace(0, '') pcr_dframe.style.\ hide_index().\ set_caption('Estimated Coefficients').\ set_precision(3).\ apply(lambda x: ['font-weight: bold' if x['Term'] in ['Test Error', 'Std Error'] else '' for i in x], axis=1) # -
Notebooks/Chapter 03 - Linear Methods for Regression/3.5.1 Principal Components Regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import os import keras.backend as K os.environ["CUDA_VISIBLE_DEVICES"] = '-1' # - # + from utils.model import calc_memory_usage, plot_parameter_statistic, count_parameters from tbpp_model import TBPP512, TBPP512_dense, TBPP512_dense_separable for name in [ 'TBPP512', 'TBPP512_dense', 'TBPP512_dense_separable' ]: K.clear_session() model = eval(name+'()') print(name) #print(model.count_params()) count_parameters(model) calc_memory_usage(model) plot_parameter_statistic(model, layer_types=['Dense', 'Conv2D', 'SeparableConv2D', 'DepthwiseConv2D']) # + active="" # TBPP512 (VGG) 35,763,078 # TBPP512_dense 23,477,798 # TBPP512_dense_separable 2,226,374 # # 2226374 / 35763078 = 0.0622534223704123, 0.9377465776295877 less parameters # -
TBPP_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from __future__ import absolute_import from __future__ import division from __future__ import print_function import _init_paths import os import cv2 from opts import opts from detectors.detector_factory import detector_factory image_ext = ['jpg', 'jpeg', 'png', 'webp'] video_ext = ['mp4', 'mov', 'avi', 'mkv'] time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge'] def demo(opt): opt.demo == "/data1/wjb/Centernet_mod/images/17790319373_bd19b24cfc_k.jpg" opt.load_model == "/data1/wjb/Centernet_mod/models/ctdet_coco_dla_2x.pth" os.environ['CUDA_VISIBLE_DEVICES'] = '2' opt.debug = max(opt.debug, 1) Detector = detector_factory[opt.task] detector = Detector(opt) if opt.demo == 'webcam' or \ opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext: cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo) detector.pause = False while True: _, img = cam.read() #cv2.imshow('input', img) ret = detector.run(img) time_str = '' for stat in time_stats: time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat]) print(time_str) if cv2.waitKey(1) == 27: return # esc to quit else: if os.path.isdir(opt.demo): image_names = [] ls = os.listdir(opt.demo) for file_name in sorted(ls): ext = file_name[file_name.rfind('.') + 1:].lower() if ext in image_ext: image_names.append(os.path.join(opt.demo, file_name)) else: image_names = [opt.demo] for (image_name) in image_names: ret = detector.run(image_name) time_str = '' for stat in time_stats: time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat]) print(time_str) if __name__ == '__main__': opt = opts().init() demo(opt) # -
src/.ipynb_checkpoints/Untitled-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Create Table # Source: https://stackoverflow.com/questions/35160256 # + pycharm={"name": "#%%\n"} from IPython.display import HTML, display import tabulate table = [["Sun",696000,1989100000], ["Earth",6371,5973.6], ["Moon",1737,73.5], ["Mars",3390,641.85]] display(HTML(tabulate.tabulate(table, tablefmt='html')))
notebooks/my_notebook.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd # %matplotlib inline # - # Look for data in a project sub-folder df = pd.read_excel("potholes/2007-2017 POTHOLE.xls") df.dtypes df.shape # + # There was a limit on rows for old excel files # When you see 65534 or 65535 or 65536, get suspicious. The data is probably cropped arbitrarily. # - df.head() df.tail() df.Street.value_counts().head(20)
06-potholes.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # # Problem 62 # ## Cubic permutations # # The cube, $41063625$ $(345^3)$, can be permuted to produce two other cubes: $56623104$ $(384^3)$ and $66430125$ $(405^3)$. In fact, $41063625$ is the smallest cube which has exactly three permutations of its digits which are also cube. # # Find the smallest cube for which exactly five permutations of its digits are cube. # # ## Solution # + pycharm={"name": "#%%\n"} def compute(n: int) -> int: cubes, permutations = dict(), dict() for i in range(10 ** n): permutation = ''.join(sorted(str(i ** 3))) if permutation in cubes: permutations[permutation] += 1 if permutations[permutation] == n: return cubes[permutation] else: cubes[permutation] = i ** 3 permutations[permutation] = 1 # + pycharm={"name": "#%%\n"} compute(3) # + pycharm={"name": "#%%\n"} compute(5) # + pycharm={"name": "#%%\n"} # %timeit -n 100 -r 1 -p 6 compute(5)
problems/0062/solution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PythonData # language: python # name: pythondata # --- # Add Matplotlib inline magic command # %matplotlib inline import matplotlib.pyplot as plt import pandas as pd city_data_to_load = "Resources/city_data.csv" ride_data_to_load = "Resources/ride_data.csv" #Read the city data file and store it in a pandas DataFrame city_data_df = pd.read_csv(city_data_to_load) city_data_df.head(10) #Read the ride data file and store it in a pandas DataFrame ride_data_df = pd.read_csv(ride_data_to_load) ride_data_df.head(10) # missing value, get the columns and the rows that are not null. city_data_df.count() #Get the columns and the rows that are not null city_data_df.isnull().sum() #Get the data types of each columns city_data_df.dtypes #Get the unique values of the type of city city_data_df["type"].unique() #Get the number of data points from the Urban cities. sum(city_data_df["type"] == "Urban") sum(city_data_df["type"] == "Rural") #Get the columns and the rows that are not null ride_data_df.count() # Get the columns and the rows that are not null. ride_data_df.isnull().sum() # Get the data types of each column. ride_data_df.dtypes # + # Combine the data into a single dataset pyber_data_df = pd.merge(ride_data_df, city_data_df, how="left", on=["city", "city"]) # Display the DataFrame pyber_data_df.head() # - # Create the Urban city DataFrame. urban_cities_df = pyber_data_df[pyber_data_df["type"] == "Urban"] urban_cities_df.head() # Create the Suburban and Rural city DataFrames. suburban_cities_df = pyber_data_df[pyber_data_df["type"] == "Suburban"] rural_cities_df = pyber_data_df[pyber_data_df["type"] == "Rural"] rural_cities_df # Get the number of rides for urban cities. urban_ride_count = urban_cities_df.groupby(["city"]).count()["ride_id"] urban_ride_count.head() # + # Create the suburban and rural ride count. suburban_ride_count = suburban_cities_df.groupby(["city"]).count()["ride_id"] rural_ride_count = rural_cities_df.groupby(["city"]).count()["ride_id"] # - # Get average fare for each city in the urban cities. urban_avg_fare = urban_cities_df.groupby(["city"]).mean()["fare"] urban_avg_fare.head() # Get average fare for each city in the suburban and rural cities. suburban_avg_fare = suburban_cities_df.groupby(["city"]).mean()["fare"] rural_avg_fare = rural_cities_df.groupby(["city"]).mean()["fare"] # Get the average number of drivers for each urban city. urban_driver_count = urban_cities_df.groupby(["city"]).mean()["driver_count"] urban_driver_count.head() # Get the average number of drivers for each city for the suburban and rural cities. suburban_driver_count = suburban_cities_df.groupby(["city"]).mean()["driver_count"] rural_driver_count = rural_cities_df.groupby(["city"]).mean()["driver_count"] # Build the scatter plots for urban cities. plt.scatter(urban_ride_count, urban_avg_fare) # Build the scatter plots for urban cities. plt.scatter(urban_ride_count, urban_avg_fare, s=urban_driver_count); #Build the scatter plots for urban cities. plt.scatter(urban_ride_count, urban_avg_fare, s=10*urban_driver_count, c="coral", edgecolor = "black", linewidths = 1, alpha = 0.7, label = "Urban") plt.title("PyBer Ride-Sharing Data (2019)") plt.ylabel("Average Fare ($)") plt.xlabel("Total Number of Rides(Per City)") plt.grid(True) plt.legend(); # Build the scatter plots for suburban cities. plt.scatter(suburban_ride_count, suburban_avg_fare, s=10*suburban_driver_count, c="skyblue", edgecolor="black", linewidths=1, alpha=0.8, label="Suburban") plt.title("PyBer Ride-Sharing Data (2019)") plt.ylabel("Average Fare ($)") plt.xlabel("Total Number of Rides (Per City)") plt.grid(True) # Add the legend. plt.legend() # Build the scatter plots for rural cities. plt.scatter(rural_ride_count, rural_avg_fare, s=10*rural_driver_count, c="gold", edgecolor="black", linewidths=1, alpha=0.8, label="Rural") plt.title("PyBer Ride-Sharing Data (2019)") plt.ylabel("Average Fare ($)") plt.xlabel("Total Number of Rides (Per City)") plt.grid(True) # Add the legend. plt.legend() # ### Create a Bubble Chart for All Cities # + # Add the scatter charts for each type of city. plt.scatter(urban_ride_count, urban_avg_fare, s=10*urban_driver_count, c="coral", edgecolor="black", linewidths=1, alpha=0.8, label="Urban") plt.scatter(suburban_ride_count, suburban_avg_fare, s=10*suburban_driver_count, c="skyblue", edgecolor="black", linewidths=1, alpha=0.8, label="Suburban") plt.scatter(rural_ride_count, rural_avg_fare, s=10*rural_driver_count, c="gold", edgecolor="black", linewidths=1, alpha=0.8, label="Rural") # Incorporate the other graph properties plt.title("PyBer Ride-Sharing Data (2019)", fontsize=20) plt.ylabel("Average Fare ($)", fontsize=12) plt.xlabel("Total Number of Rides (Per City)", fontsize=12) plt.grid(True) # Add the legend. plt.legend() # Create a legend lgnd = plt.legend(fontsize="12", mode="Expanded", scatterpoints=1, loc="best", title="City Types") lgnd.legendHandles[0]._sizes = [75] lgnd.legendHandles[1]._sizes = [75] lgnd.legendHandles[2]._sizes = [75] lgnd.get_title().set_fontsize(12) # Incorporate a text label about circle size. plt.text(42, 35, "Note:\nCircle size correlates\nwith driver count per city.", fontsize="12") # Save the figure. plt.savefig("Resources/Fig1.png") # Show the plot plt.show() # - # Get summary statistics. urban_cities_df.describe() # Get summary statistics. suburban_ride_count.describe() # Calculate the mean of the ride count for each city type. round(urban_ride_count.mean(),2), round(suburban_ride_count.mean(),2), round(rural_ride_count.mean(),2) # Calculate the mode of the ride count for the urban cities. urban_ride_count.mode() # Import NumPy and the stats module from SciPy. import numpy as np import scipy.stats as sts # + # Calculate the measures of central tendency for the ride count for the urban cities. mean_urban_ride_count = np.mean(urban_ride_count) print(f"The mean for the ride counts for urban trips is {mean_urban_ride_count:.2f}.") median_urban_ride_count = np.median(urban_ride_count) print(f"The median for the ride counts for urban trips is {median_urban_ride_count}.") mode_urban_ride_count = sts.mode(urban_ride_count) print(f"The mode for the ride counts for urban trips is {mode_urban_ride_count}.") # - # Get the fares for the urban cities. urban_fares = urban_cities_df["fare"] urban_fares.head() # + # Calculate the measures of central tendency for the average fare for the urban cities. mean_urban_fares = np.mean(urban_fares) print(f"The mean fare price for urban trips is ${mean_urban_fares:.2f}.") median_urban_fares = np.median(urban_fares) print(f"The median fare price for urban trips is ${median_urban_fares:.2f}.") mode_urban_fares = sts.mode(urban_fares) print(f"The mode fare price for urban trips is {mode_urban_fares}.") # - # Get the driver count data from the urban cities. urban_drivers = urban_cities_df['driver_count'] urban_drivers.head() # Create a box-and-whisker plot for the urban cities ride count. x_labels = ["Urban"] fig, ax = plt.subplots() ax.boxplot(urban_ride_count, labels=x_labels) # Add the title, y-axis label and grid. ax.set_title('Ride Count Data (2019)') ax.set_ylabel('Number of Rides') ax.set_yticks(np.arange(10, 41, step=2.0)) ax.grid() plt.show() # Add all ride count box-and-whisker plots to the same graph. x_labels = ["Urban", "Suburban","Rural"] ride_count_data = [urban_ride_count, suburban_ride_count, rural_ride_count] fig, ax = plt.subplots(figsize=(10, 6)) ax.set_title('Ride Count Data (2019)',fontsize=20) ax.set_ylabel('Number of Rides',fontsize=14) ax.set_xlabel("City Types",fontsize=14) ax.boxplot(ride_count_data, labels=x_labels) ax.set_yticks(np.arange(0, 45, step=3.0)) ax.grid() # Save the figure. plt.savefig("Resources/Fig2.png") plt.show() # Create a box-and-whisker plot for the urban fare data. x_labels = ["Urban"] fig, ax = plt.subplots() ax.boxplot(urban_fares, labels=x_labels) # Add the title, y-axis label and grid. ax.set_title('Ride Fare Data (2019)') ax.set_ylabel('Fare($USD)') ax.set_yticks(np.arange(0, 51, step=5.0)) ax.grid() plt.show() print("Summary Statistics") urban_fares.describe() # Create the box-and-whisker plot for the urban driver count data. x_labels = ["Urban"] fig, ax = plt.subplots() ax.boxplot(urban_drivers,labels=x_labels) # Add the title, y-axis label and grid. ax.set_title('Driver Count Data (2019)') ax.set_ylabel('Number of Drivers)') ax.set_yticks(np.arange(0, 90, step=5.0)) ax.grid() plt.show() print("Summary Statistics") urban_drivers.describe() # Get the sum of the fares for each city type. sum_fares_by_type = pyber_data_df.groupby(["type"]).sum()["fare"] sum_fares_by_type # Get the sum of all the fares. total_fares = pyber_data_df["fare"].sum() total_fares # Calculate the percentage of fare for each city type. type_percents = 100 * sum_fares_by_type / total_fares type_percents # Calculate the percentage of fare for each city type. type_percents = 100 * pyber_data_df.groupby(["type"]).sum()["fare"] / pyber_data_df["fare"].sum() type_percents # Build the percentage of fares by city type pie chart. plt.pie(type_percents, labels=["Rural", "Suburban", "Urban"]) plt.show() # Build the percentage of fares by city type pie chart. plt.pie(type_percents, labels=["Rural", "Suburban", "Urban"], colors=["gold", "lightskyblue", "lightcoral"], explode=[0, 0, 0.1], autopct='%1.1f%%', shadow=True, startangle=150) plt.title("% of Total Fares by City Type") # Show Figure plt.show() # Import mpl to change the plot configurations using rcParams. import matplotlib as mpl # Build Pie Chart plt.subplots(figsize=(10, 6)) plt.pie(type_percents, labels=["Rural", "Suburban", "Urban"], colors=["gold", "lightskyblue", "lightcoral"], explode=[0, 0, 0.1], autopct='%1.1f%%', shadow=True, startangle=150) plt.title("% of Total Fares by City Type") # Change the default font size from 10 to 14. mpl.rcParams['font.size'] = 14 # Save Figure plt.savefig("Resources/Fig5.png") # Show Figure plt.show() pyber_data_df.groupby(["type"])
PyBer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline # # # Spine Placement Demo # # # Adjusting the location and appearance of axis spines. # import numpy as np import matplotlib.pyplot as plt # + fig = plt.figure() x = np.linspace(-np.pi, np.pi, 100) y = 2 * np.sin(x) ax = fig.add_subplot(2, 2, 1) ax.set_title('centered spines') ax.plot(x, y) ax.spines['left'].set_position('center') ax.spines['right'].set_color('none') ax.spines['bottom'].set_position('center') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax = fig.add_subplot(2, 2, 2) ax.set_title('zeroed spines') ax.plot(x, y) ax.spines['left'].set_position('zero') ax.spines['right'].set_color('none') ax.spines['bottom'].set_position('zero') ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax = fig.add_subplot(2, 2, 3) ax.set_title('spines at axes (0.6, 0.1)') ax.plot(x, y) ax.spines['left'].set_position(('axes', 0.6)) ax.spines['right'].set_color('none') ax.spines['bottom'].set_position(('axes', 0.1)) ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax = fig.add_subplot(2, 2, 4) ax.set_title('spines at data (1, 2)') ax.plot(x, y) ax.spines['left'].set_position(('data', 1)) ax.spines['right'].set_color('none') ax.spines['bottom'].set_position(('data', 2)) ax.spines['top'].set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') # - # Define a method that adjusts the location of the axis spines # # def adjust_spines(ax, spines): for loc, spine in ax.spines.items(): if loc in spines: spine.set_position(('outward', 10)) # outward by 10 points else: spine.set_color('none') # don't draw spine # turn off ticks where there is no spine if 'left' in spines: ax.yaxis.set_ticks_position('left') else: # no yaxis ticks ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: # no xaxis ticks ax.xaxis.set_ticks([]) # Create another figure using our new ``adjust_spines`` method # # # + fig = plt.figure() x = np.linspace(0, 2 * np.pi, 100) y = 2 * np.sin(x) ax = fig.add_subplot(2, 2, 1) ax.plot(x, y, clip_on=False) adjust_spines(ax, ['left']) ax = fig.add_subplot(2, 2, 2) ax.plot(x, y, clip_on=False) adjust_spines(ax, []) ax = fig.add_subplot(2, 2, 3) ax.plot(x, y, clip_on=False) adjust_spines(ax, ['left', 'bottom']) ax = fig.add_subplot(2, 2, 4) ax.plot(x, y, clip_on=False) adjust_spines(ax, ['bottom']) plt.show()
matplotlib/gallery_jupyter/ticks_and_spines/spine_placement_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Yandex DataSphere Kernel # language: python # name: python3 # --- # + [markdown] cellId="io6jozpdhcggy2u84jb7j" # ## Install dependencies # + cellId="sqtr4u9rj7ftgkb51at9l" # #!s # %pip install fastai~=2.1 # + [markdown] cellId="na8tpbu229at3nbosrlmk" # ### Start here, go to update fastai only if you don't have v2 # + cellId="wrshs7sf16cayravbtpbc6" # #!s # %pip show fastai # + cellId="0untly1t5sp8c6e9emv0n4t" # #!s from fastai.vision.all import * # + [markdown] cellId="ktv28ge2zidqalo3c9w0e" # ## Prepare dataset # + cellId="jj03b0pzlasw7l8m54hbvo" # #!s # %cd yc-fastai-test # + cellId="8lbwg7tuys6h7tl4cdien" # #!s import os path = Path('oxford-iiit-pet/images') if not os.path.isdir(path): path = untar_data(URLs.PETS, dest='.')/'images' # + [markdown] cellId="1ndip9e7n3ipg3mmht5qah" # ## Learn # + cellId="l0n7gm8zzcfv0aiw6svkm" # #!L ## For some awkward reason current directory doesn't survive instance type change # %cd yc-fastai-test # + cellId="zg2hdn0msqco0zntq6065b" # #!L def is_cat(x): if x[0].isupper(): return 'cat' else: return 'dog' dls = ImageDataLoaders.from_name_func( path, get_image_files(path), valid_pct=0.2, seed=42, label_func=is_cat, item_tfms=Resize(224)) learn = cnn_learner(dls, resnet34, metrics=error_rate) # + cellId="bcyncf7fdgf1r179n4oqln" # #!L learn.fine_tune(1) # + [markdown] cellId="hb8p7dpcugsjcbumovhs1r" # ## Show some results # + cellId="rchqhubbkg9lw214vb83xp" # #!L learn.recorder.plot_loss() #learn doesn't survive instance change!!! # + cellId="oyle9wisu6vy0nqfzmwve" # #!L interp = ClassificationInterpretation.from_learner(learn) interp.plot_confusion_matrix() # + cellId="o3qwupve22fs8b1h140bxh" # #!L interp.plot_top_losses(15, nrows=3)
yc-fastai-test.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from logicqubit.logic import * # + def qft(qr): for i in range(len(qr)): for j in range(i): qr[i].CU1(qr[j], pi/float(2**(i-j))) qr[i].H() def iqft(qr): for i in range(len(qr)): for j in range(i): qr[i].CU1(qr[j], -pi/float(2**(i-j))) qr[i].H() def swap(s1, s2): s2.CX(s1) s1.CX(s2) s2.CX(s1) # + logicQuBit = LogicQuBit(4, first_left=True) x1 = Qubit() x2 = Qubit() x3 = Qubit() x4 = Qubit() #x1.X() #x2.X() #x3.X() x4.X() iqft([x1,x2,x3,x4]) # first qubit 'x1' left swap(x1,x4) swap(x2,x3) # + # 360/16=22.5 os angulos são multiplos de 22.5 x1.getPsiAtAngles(degree = True) # + qft([x1,x2,x3,x4]) swap(x1,x4) swap(x2,x3) logicQuBit.Measure([x1,x2,x3,x4], True) logicQuBit.Plot() # -
qft 4q.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Redundancy and KDE/PG-means # # Is the redundancy high enough on the Radio Galaxy Zoo website? Do PG-means or KDE handle low redundancy cases differently? How often does taking the mean give an accurate or useful result (based on Norris labels)? If PG-means and KDE disagree, what do the disagreements look like? If there are similar results for both, which is faster? # # *Bonus question:* Can we model the redundancy needed for a given radio object? # + import itertools import logging import sys from astropy.coordinates import SkyCoord import h5py import matplotlib.pyplot as plt import numpy import sklearn sys.path.insert(1, '..') # Needs MongoDB running. import crowdastro.rgz_analysis.consensus as consensus, crowdastro.rgz_data as data from crowdastro.consensuses import pg_means ARCMIN = 1 / 60 CROWDASTRO_H5_PATH = '../data/crowdastro.h5' IMAGE_SIZE = 200 * 200 NORRIS_DAT_PATH = '../data/norris_2006_atlas_classifications_ra_dec_only.dat' TRAINING_H5_PATH = '../data/training.h5' # - # ## How often does KDE fail? # + n_kde_success = 0 n_kde_failure_not_caught = 0 n_kde_failure_caught = 0 n_kde_total = 0 for subject in data.get_all_subjects(survey='atlas', field='cdfs').limit(2000): c = consensus.consensus(subject['zooniverse_id'], None) for answer in c['answer'].values(): if 'ir_peak' in answer and answer['peak_data']['npeaks'] < 10: n_kde_success += 1 elif 'ir_peak' in answer: n_kde_failure_not_caught += 1 elif 'ir' in answer: n_kde_failure_caught += 1 n_kde_total += 1 # - print('KDE succeeded: {:.02%}'.format(n_kde_success / n_kde_total)) print('KDE failed (caught, mean used): {:.02%}'.format(n_kde_failure_caught / n_kde_total)) print('KDE failed (not caught): {:.02%}'.format(n_kde_failure_not_caught / n_kde_total)) # ## How often does PG-means fail? # + n_pg_success = 0 n_pg_failure = 0 n_pg_total = 0 # Taken mostly from crowdastro.consensuses.find_consensuses. with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5: class_positions = f_h5['/atlas/cdfs/classification_positions'] class_combinations = f_h5['/atlas/cdfs/classification_combinations'] ir_coords = f_h5['/{}/cdfs/numeric'.format('wise')][:, :2] pos_groups = itertools.groupby(class_positions, key=lambda z: z[0]) com_groups = itertools.groupby(class_combinations, key=lambda z: z['index']) for (i, pos_group), (j, com_group) in zip(pos_groups, com_groups): assert i == j com_group = list(com_group) pos_group = list(pos_group) total_classifications = 0 radio_counts = {} for _, full_com, _ in com_group: count = radio_counts.get(full_com, 0) count += 1 / (full_com.count(b'|') + 1) radio_counts[full_com] = count total_classifications += 1 / (full_com.count(b'|') + 1) radio_consensus = max(radio_counts, key=radio_counts.get) for radio_signature in radio_consensus.split(b'|'): n_pg_total += 1 percentage_consensus = (radio_counts[radio_consensus] / total_classifications) locations = [] for (_, x, y), (_, full, radio) in zip(pos_group, com_group): if full == radio_consensus and radio == radio_signature: locations.append((x, y)) locations = numpy.array(locations) locations = locations[~numpy.all(numpy.isnan(locations), axis=1)] (x, y), success = pg_means(locations) if not success: n_pg_failure += 1 else: n_pg_success += 1 if numpy.isnan(x) or numpy.isnan(y): continue # - print('PG-means succeeded: {:.02%}'.format(n_pg_success / n_pg_total)) print('PG-means failed (caught): {:.02%}'.format(n_pg_failure / n_pg_total)) # That's a surprisingly high number of failures. Most likely, the problem is a parameter problem, but instead of fiddling with parameters, let's just use a simpler method. # # ## Lowest BIC GMM def lowest_bic(locations): min_bic = float('inf') min_gmm = None for k in range(1, 5): # Assume no more than 5 candidate objects. Probably reasonable given ~20 clicks max. gmm = sklearn.mixture.GMM(n_components=k, covariance_type='full') try: gmm.fit(locations) except ValueError: break bic = gmm.bic(locations) if bic < min_bic: min_bic = bic min_gmm = gmm if not min_gmm: return locations.mean(axis=0), False, 'mean' if sum(w == max(min_gmm.weights_) for w in min_gmm.weights_) > 1: success = False reason = 'low_redundancy' else: success = True reason = '' return min_gmm.means_[min_gmm.weights_.argmax()], success, reason # + n_bic_success = 0 n_bic_failure_mean = 0 n_bic_failure_tie = 0 n_bic_total = 0 # Taken mostly from crowdastro.consensuses.find_consensuses. with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5: class_positions = f_h5['/atlas/cdfs/classification_positions'] class_combinations = f_h5['/atlas/cdfs/classification_combinations'] ir_coords = f_h5['/{}/cdfs/numeric'.format('wise')][:, :2] pos_groups = itertools.groupby(class_positions, key=lambda z: z[0]) com_groups = itertools.groupby(class_combinations, key=lambda z: z['index']) for (i, pos_group), (j, com_group) in zip(pos_groups, com_groups): assert i == j com_group = list(com_group) pos_group = list(pos_group) total_classifications = 0 radio_counts = {} for _, full_com, _ in com_group: count = radio_counts.get(full_com, 0) count += 1 / (full_com.count(b'|') + 1) radio_counts[full_com] = count total_classifications += 1 / (full_com.count(b'|') + 1) radio_consensus = max(radio_counts, key=radio_counts.get) for radio_signature in radio_consensus.split(b'|'): n_bic_total += 1 percentage_consensus = (radio_counts[radio_consensus] / total_classifications) locations = [] for (_, x, y), (_, full, radio) in zip(pos_group, com_group): if full == radio_consensus and radio == radio_signature: locations.append((x, y)) locations = numpy.array(locations) locations = locations[~numpy.all(numpy.isnan(locations), axis=1)] (x, y), success, reason = lowest_bic(locations) if not success and reason == 'mean': n_bic_failure_mean += 1 elif not success and reason == 'low_redundancy': n_bic_failure_tie += 1 elif not success and reason: raise ValueError('Unknown failure reason: {}'.format(reason)) else: n_bic_success += 1 if numpy.isnan(x) or numpy.isnan(y): continue # - print('BIC succeeded: {:.02%}'.format(n_bic_success / n_bic_total)) print('BIC failed (caught, mean used): {:.02%}'.format(n_bic_failure_mean / n_bic_total)) print('BIC failed (caught, best guess): {:.02%}'.format(n_bic_failure_tie / n_bic_total)) # This is a *little* misleading. The BIC method, since it uses Gaussian fitting, may fit a single Gaussian to a few points if it's the best fit it can find, reducing it to the mean. Let's compare the labels to Norris et al. from crowdastro.consensuses import lowest_bic_gmm with h5py.File(CROWDASTRO_H5_PATH, 'r') as f_h5: # Copied from crowdastro.consensuses. class_positions = f_h5['/atlas/cdfs/classification_positions'] class_combinations = f_h5['/atlas/cdfs/classification_combinations'] assert len(class_positions) == len(class_combinations) logging.debug('Finding consensuses for %d classifications.', len(class_combinations)) # Pre-build the IR tree. ir_coords = f_h5['/{}/cdfs/numeric'.format('swire')][:, :2] ir_tree = sklearn.neighbors.KDTree(ir_coords) cons_positions = [] cons_combinations = [] # Data integrity and assumptions checks. assert numpy.array_equal(class_positions[:, 0], class_combinations['index']) assert numpy.array_equal(class_positions[:, 0], sorted(class_positions[:, 0])) pos_groups = itertools.groupby(class_positions, key=lambda z: z[0]) com_groups = itertools.groupby(class_combinations, key=lambda z: z['index']) for (i, pos_group), (j, com_group) in zip(pos_groups, com_groups): assert i == j com_group = list(com_group) # For multiple iterations. pos_group = list(pos_group) total_classifications = 0 # Find the radio consensus. Be wary when counting: If there are multiple # AGNs identified in one subject, *that classification will appear # multiple times*. I'm going to deal with this by dividing the weight of # each classification by how many pipes it contains plus one. radio_counts = {} # Radio signature -> Count for _, full_com, _ in com_group: count = radio_counts.get(full_com, 0) count += 1 / (full_com.count(b'|') + 1) radio_counts[full_com] = count total_classifications += 1 / (full_com.count(b'|') + 1) for count in radio_counts.values(): # Despite the divisions, we should end up with integers overall. assert numpy.isclose(round(count), count) assert numpy.isclose(round(total_classifications), total_classifications) radio_consensus = max(radio_counts, key=radio_counts.get) # Find the location consensus. For each radio combination, run a # location consensus function on the positions associated with that # combination. for radio_signature in radio_consensus.split(b'|'): percentage_consensus = (radio_counts[radio_consensus] / total_classifications) locations = [] for (_, x, y), (_, full, radio) in zip(pos_group, com_group): if full == radio_consensus and radio == radio_signature: locations.append((x, y)) locations = numpy.array(locations) locations = locations[~numpy.all(numpy.isnan(locations), axis=1)] (x, y), success = lowest_bic_gmm(locations) if numpy.isnan(x) or numpy.isnan(y): logging.debug('Skipping NaN PG-means output.') continue # Match the (x, y) position to an IR object. dist, ind = ir_tree.query([(x, y)]) cons_positions.append((i, ind[0][0], success)) cons_combinations.append((i, radio_signature, percentage_consensus)) logging.debug('Found %d consensuses (before duplicate removal).', len(cons_positions)) # Remove duplicates. For training data, I don't really care if radio # combinations overlap (though I need to care if I generate a catalogue!) so # just take duplicated locations and pick the one with the highest radio # consensus that has success. cons_objects = {} # Maps IR index to (ATLAS index, success, # percentage_consensus) for (atlas_i, ir_i, success), (atlas_j, radio, percentage) in zip( cons_positions, cons_combinations): assert atlas_i == atlas_j if ir_i not in cons_objects: cons_objects[ir_i] = (atlas_i, success, percentage) continue if cons_objects[ir_i][1] and not success: # Preference successful KDE/PG-means. continue if not cons_objects[ir_i][1] and success: # Preference successful KDE/PG-means. cons_objects[ir_i] = (atlas_i, success, percentage) continue # If we get this far, we have the same success state. Choose based on # radio consensus. if percentage > cons_objects[ir_i][2]: cons_objects[ir_i] = (atlas_i, success, percentage) continue logging.debug('Found %d consensuses.', int(len(cons_objects))) cons_objects = numpy.array([(atlas_i, ir_i, success, percentage) for ir_i, (atlas_i, success, percentage) in sorted(cons_objects.items())]) # + # Load Norris labels. with h5py.File(TRAINING_H5_PATH, 'r') as training_h5: crowdsourced_labels = training_h5['labels'].value with h5py.File(CROWDASTRO_H5_PATH, 'r') as crowdastro_h5: ir_names = crowdastro_h5['/swire/cdfs/string'].value ir_positions = crowdastro_h5['/swire/cdfs/numeric'].value[:, :2] ir_tree = sklearn.neighbors.KDTree(ir_positions) with open(NORRIS_DAT_PATH, 'r') as norris_dat: norris_coords = [r.strip().split('|') for r in norris_dat] norris_labels = numpy.zeros((len(ir_positions))) for ra, dec in norris_coords: # Find a neighbour. skycoord = SkyCoord(ra=ra, dec=dec, unit=('hourangle', 'deg')) ra = skycoord.ra.degree dec = skycoord.dec.degree ((dist,),), ((ir,),) = ir_tree.query([(ra, dec)]) if dist < 0.1: norris_labels[ir] = 1 # - # Convert radio labels into IR labels. bic_labels = numpy.zeros(norris_labels.shape) for _, ir_i, _, _ in cons_objects: bic_labels[ir_i] = 1 sklearn.metrics.confusion_matrix(bic_labels, norris_labels) sklearn.metrics.confusion_matrix(crowdsourced_labels, norris_labels)
notebooks/38_redundancy.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] nbsphinx="hidden" # This notebook is part of the `nbsphinx` documentation: https://nbsphinx.readthedocs.io/. # - # # Controlling Notebook Execution # # Notebooks with no outputs are automatically executed during the Sphinx build process. # If, however, there is at least one output cell present, the notebook is not evaluated and included as is. # # The following notebooks show how this default behavior can be used and customized. # + [markdown] nbsphinx-toctree={} # * [Pre-Executing Notebooks](pre-executed.ipynb) # * [Explicitly Dis-/Enabling Notebook Execution](never-execute.ipynb) # * [Ignoring Errors](allow-errors.ipynb) # * [Ignoring Errors on a Per-Cell Basis](allow-errors-per-cell.ipynb) # * [Cell Execution Timeout](timeout.ipynb)
doc/executing-notebooks.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Domi_LabHQ # language: python # name: domi_labhq # --- # # PhD Bot - Reading documents # requirements.txt textract nltk beautifulsoup4 # ! pip install textract # ! pip install nltk # ! pip install beautifulsoup4 import pandas as pd import textract from collections import Counter # ## Extract text from .pdf or .txt # + text = textract.process('literature/Domi_paper_4_2020.pdf') # - webpage_txt = textract.process('literature/NYUSH.txt') webpage_txt text words = text.split() word_counts = Counter(words) df = pd.DataFrame.from_dict(word_counts, orient='index') df.columns=['count'] sorted_df = df.sort_values('count',ascending=False) sorted_df[len(sorted_df) - 100:] sorted_df[len(sorted_df) - 50:].plot(kind='bar') sorted_df[14:50].plot(kind='bar') df.plot(kind='bar') # ## Read sentence by sentence # from nltk.tokenize import sent_tokenize import nltk nltk.download('punkt') mytext = text.decode("utf-8") shorttext = text[:1000].decode("utf-8") supershorttext = text[:100].decode("utf-8") sent_tokenize(supershorttext) sent_list = sent_tokenize(shorttext) my_sentences = sent_tokenize(mytext) len(my_sentences) my_sentences[5] # ## Extract text in webpage # import requests from bs4 import BeautifulSoup url = 'https://wp.nyu.edu/xiangsun/publications/' res = requests.get(url) html_page = res.content html_page soup = BeautifulSoup(html_page, 'html.parser') html_text = soup.find_all(text=True) html_text
notebook/Reading.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import os import numpy as np from datetime import datetime from dateutil.relativedelta import relativedelta from data_generator import ( generate_customers, generate_products, generate_transactions, ) if __name__ == "__main__": np.random.seed(seed=42) products_data = { "house": [ "detergent", "kitchen roll", "bin liners", "shower gel", "scented candles", "fabric softener", "cling film", "aluminium foil", "toilet paper", "kitchen knife", "dishwasher tablets", "ice pack", ], "clothes": [ "men's dark green trousers", "women's shoes", "jumper", "men's belt", "women's black socks", "men's striped socks", "men's trainers", "women's blouse", "women's red dress", ], "fruit_veg": [ "avocado", "cherries", "scotch bonnets", "peppers", "broccoli", "potatoes", "grapes", "easy peeler", "mango", "lemon grass", "onions", "apples", "raspberries", ], "sweets": [ "carrot cake", "salted caramel dark chocolate", "gummy bears", "kombucha", "ice cream", "irn bru", ], "food": [ "steak", "chicken", "mince beef", "milk", "hummus", "activated charcoal croissant", "whole chicken", "tuna", "smoked salmon", "camembert", "pizza", "oats", "peanut butter", "almond milk", "lentil soup", "greek yoghurt", "parmesan", "coconut water", "chicken stock", "water", ], } products_cats_frequency = ( ["house"] * 15 + ["clothes"] * 5 + ["fruit_veg"] * 25 + ["sweets"] * 20 + ["food"] * 25 # + ["bws"] * 10 ) gen_id = "starter" output_location = f"./input_data/{gen_id}" os.makedirs(output_location, exist_ok=True) gen_customers = generate_customers(output_location, 137) product_id_lookup = generate_products(output_location, products_data) end_date = datetime.today() delta = relativedelta(months=3) start_date = end_date - delta generate_transactions( output_location, gen_customers, products_data, product_id_lookup, products_cats_frequency, start_date, end_date, )
input_data_generator/QA notebook - main_data_generator.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import suspect import nibabel import numpy from matplotlib import pyplot # %matplotlib inline img = suspect.image.load_dicom_volume("/home/jovyan/suspect/tests/test_data/siemens/mri/T1.0001.IMA") nimg = nibabel.nifti1.Nifti1Image(img["volume"].T, img["transform"]) nimg.to_filename("raw.nii.gz") def plot_slice(fname, z_idx=5): # Load the image and collect the data # and orientation information img = nibabel.load(fname) data = img.get_data() aff = img.get_affine() # Find the center of the brain matrix ctr = numpy.dot(numpy.linalg.inv(aff), [0, 0, 0, 1])[:3] # Plot the data vmin, vmax = (0, 1) if data.dtype == numpy.int16 else (30, 150) pyplot.imshow(numpy.rot90(data[:, :, ctr[2] + z_idx]), cmap="gray") pyplot.gca().set_axis_off() plot_slice("raw.nii.gz") from nipype.interfaces.fsl import BET skullstrip = BET() skullstrip.inputs.in_file = "raw.nii.gz" skullstrip.inputs.out_file = "brain.nii.gz" skullstrip.run()
examples/FSL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Taller lentejas # ### <NAME> - <NAME> - <NAME> import scipy.stats as ss import numpy as np # ## Toma de los datos # + # Tamaño de la muestra de lentejas n_lentejas = 35 # Medida del ancho de las lentejas en centímetros (cm) medida_lentejas = [0.7, 0.63, 0.64, 0.62, 0.60, 0.69, 0.67, 0.66, 0.65, 0.65, 0.61, 0.67, 0.66, 0.69, 0.61, 0.68, 0.62, 0.61, 0.63, 0.67, 0.60, 0.61, 0.60, 0.59, 0.70, 0.61, 0.67, 0.55, 0.60, 0.71, 0.61, 0.7, 0.62, 0.62, 0.63] # Medida del peso de las lentejas en gramos (g) peso_lentejas = [0.073, 0.0692, 0.0635, 0.0616, 0.0620, 0.0747, 0.0723, 0.0716, 0.0641, 0.0553, 0.0603, 0.0771, 0.0724, 0.0725, 0.0637, 0.0616, 0.0698, 0.0669, 0.0569, 0.0640, 0.0688, 0.0698, 0.0688, 0.0650, 0.0623, 0.0648, 0.0722, 0.0706, 0.0758, 0.0640, 0.0684, 0.0711, 0.0783, 0.0601, 0.0603] # - # ## Medidas de tendencia central # + # Promedio muestral de la medida del ancho de las lentejas en centímetros (cm) x__medida_lentejas = sum(medida_lentejas) / n_lentejas # Promedio muestral de la medida del peso de las lentejas en gramos (g) x__peso_lentejas = sum(peso_lentejas) / n_lentejas # Desviación estándar de la medida del ancho de las lentejas en centímetros (cm) s_medida_lentejas = (np.var(medida_lentejas))**0.5 # Desviación estándar de la medida del peso de las lentejas en gramos (g) s_peso_lentejas = (np.var(peso_lentejas))**0.5 # - print("Promedio muestral de la medida del ancho de las lentejas: {} cm".format(x__medida_lentejas)) print("Promedio muestral de la medida del peso de las lentejas: {} g".format(x__peso_lentejas)) print("Desviación estándar de la medida del ancho de las lentejas: {} cm".format(s_medida_lentejas)) print("Desviación estándar de la medida del peso de las lentejas: {} g".format(s_peso_lentejas)) # <hr/> # <hr/> # ## 1. Determine el tamaño de la muestra # La profesora Luisa, nos pide, como profesora de Estadística aplicada, que estimemos el peso promedio de una lenteja. Nos advierte que debe mantener el error dentro de 0.003 gramos, y mantener un nivel de confianza del 95%. # ¿Cuántas lentejas debemos pesar si la desviación estándar es 0.0056 gramos? alpha_lentejas_n = 0.95 s_lentejas_n = 0.0056 error_lentejas_n = 0.003 def n_avg(alpha, s, error): """ """ z_value = ss.norm.interval(alpha)[1] n = ((z_value**2)*(s**2)) / (error**2) return round(n) n_avg(alpha_lentejas_n, s_lentejas_n, error_lentejas_n) # **R/** Deberíamos tomar una muestra de **13** lentejas, para que de esta manera podamos estimar el peso promedio de las lentejas con un nivel de confianza del 95% y teniendo en cuenta un error menor de 0.003 gramos. # <hr/> # <hr/> # ## 2. Hipótesis 1 población muestra grande, 1% significancia # <NAME>, CEO de <NAME> le afirma a sus inversionistas que las lentejas de su compañía pesan en promedio 0.07 gramos, los inversionistas sacan una muestra de 35 lentejas con una media de 0.06722285714285714 gramos, y una desviación estándar de 0.005695165153219789. Con un nivel de signifancia del 1%, ¿Se puede respaldar la afirmación de <NAME>?. mu_lentejas_h = 0.07 n_lentejas_h = 35 x__lentejas_h = 0.06722285714285714 s_lentejas_h = 0.005695165153219789 alpha_lentejas_h = 0.01 # <u> **Paso 1**</u>: Plantear hipótesis # **Ho:** μ = 0.07 # # **Ha:** μ ≠ 0.07 # <u> **Paso 2**</u>: Nivel de significancia alpha_lentejas_h # <u> **Paso 3**</u>: Valores críticos def crit_val_norm(sign, tail): """ """ if tail == 'two': alpha = 1 - (sign/2) crit_val = ss.norm.ppf(1 - alpha), ss.norm.ppf(alpha) return crit_val if tail == 'left': alpha = 1 - sign crit_val = ss.norm.ppf(1 - alpha) return crit_val if tail == 'right': alpha = 1 - sign crit_val = ss.norm.ppf(alpha) return crit_val print("You must input a valid tail ('two', 'left' or 'right')") crit_lentejas_h = crit_val_norm(alpha_lentejas_h, 'two') crit_lentejas_h # <u> **Paso 4**</u>: Estadístico de prueba (Z) def get_z(x, mu, sd, **kwargs): """ """ if not kwargs: z = ((x - mu) / sd) return z else: n = kwargs.get('n', None) if (n <= 30): print("The sample size must be greater than 30.") else: z = ((x - mu) / (sd / n**0.5)) return z z_lentejas_h = get_z(x__lentejas_h, mu_lentejas_h, s_lentejas_h, n=n_lentejas_h) z_lentejas_h # <u> **Paso 5**</u>: Decisión # **SI RECHAZAMOS LA HIPÓTESIS NULA**, pues el estadístico de prueba *-2.884867825586016* es menor o mayor que los valores críticos obtenidos *-2.5758293035489004, 2.5758293035489004* # <u>**Paso 6**</u>: Conclusión # Con un nivel de significancia del 1%, se puede concluir que el valor promedio de las lentejas es **diferente** a 0.07 gramos. # # Por lo tanto, no hay evidencias suficientes para respaldar la afirmación de <NAME>. # <hr/> # <hr/> # ## 3. Intervalo 2 poblaciones muestras grandes, 95% confianza # Los inversionistas de Lentejas S.A, darán un apoyo especial al sembrado que produzca las lentejas de menor medida, para que de esta manera produzcan lentejas de mayor calidad. De las dos plantaciones existentes, se tomaron los siguientes datos: de la primera se midieron 35 lentejas, con una media de 0.6394285714285715, y una desviación estándar de 0.038243046586862006. De la segunda plantación se midieron 34 lentejas, con una media de 0.64, y una desviación estándar de 0.037. Determine un intervalo de confianza del 95% para la diferencia en la medida promedio de las lentejas en estas 2 plantaciones. # + n_lentejas_i1 = 35 x__lentejas_i1 = 0.6394285714285715 s_lentejas_i1 = 0.038243046586862006 n_lentejas_i2 = 34 x__lentejas_i2 = 0.64 s_lentejas_i2 = 0.037 alpha_lentejas_i = 0.95 # - def norm_2p(n1, n2, x_1, x_2, sd1, sd2, alpha): """ """ if (n1 <= 30 or n2 <= 30): print("The sample sizes must be greater than 30.") else: sd_error = ((sd1**2 / n1) + (sd2**2 / n2))**0.5 z_value = ss.norm.interval(alpha)[1] minimum = (x_1 - x_2) - (z_value * sd_error) maximum = (x_1 - x_2) + (z_value * sd_error) return minimum, maximum lentejas_i = norm_2p(n_lentejas_i1, n_lentejas_i2, x__lentejas_i1, x__lentejas_i2, s_lentejas_i1, s_lentejas_i2, alpha_lentejas_i) lentejas_i # **R/** Con un nivel de confianza del 95%, podemos observar que la diferencia entre las medidas promedio de las plantaciones está entre **-0.018325207610630334, 0.017182350467773222** centímetros. # # Entonces, como en el intervalo está incluído el valor de **cero**, podemos concluir que las medidas de las lentejas de las 2 plantaciones no tienen una diferencia significativa, por lo tanto no hay suficientes evidencias para demostrar que una plantación sea peor que la otra. Los inversionistas no deberían invertir en apoyo para ninguna de las 2 plantaciones en especial.
exercises/Taller Lentejas.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <table> # <tr><td> # <a href="https://nbviewer.jupyter.org/github/panayiotiska/Jupyter-Sentiment-Analysis-Video-games-reviews/blob/master/[Data_Exploration]Word-Clouds.ipynb"> # <img alt="start" src="figures/button_previous.jpg" width= 70% height= 70%> # </td><td> # <a href="https://nbviewer.jupyter.org/github/panayiotiska/Jupyter-Sentiment-Analysis-Video-games-reviews/blob/master/Index.ipynb"> # <img alt="start" src="figures/button_table-of-contents.jpg" width= 70% height= 70%> # </td><td> # <a href="https://nbviewer.jupyter.org/github/panayiotiska/Jupyter-Sentiment-Analysis-Video-games-reviews/blob/master/Vectorization.ipynb"> # <img alt="start" src="figures/button_next.jpg" width= 70% height= 70%> # </td></tr> # </table> # # Basic code structure # In this notebook the form and code structure of the sentiment analysis part is illustrated and explained in order to better understand the following notebooks. # # # The basic structure consists of the following steps: # # - Import data from a json file into a pandas dataframe. # - Reduce the number of classes to three classes. # - Transform the text/corpora to a cleaner version. # - Seperate the dataset to training and test set. # - Transform text to a numerical form by performing vectorization. # - Perform classification, predict and print the results/reports. # # + import numpy as np import matplotlib.pyplot as plt import pandas as pd from nltk.tokenize import word_tokenize from nltk import pos_tag from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from sklearn.preprocessing import LabelEncoder from collections import defaultdict from nltk.corpus import wordnet as wn from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import model_selection, naive_bayes, svm from sklearn.metrics import accuracy_score from collections import Counter #[1] Importing dataset dataset = pd.read_json(r"C:\Users\Panos\Desktop\Dissert\Code\Sample_Video_Games_5.json", lines=True, encoding='latin-1') dataset = dataset[['reviewText','overall']] #[2] Reduce number of classes ratings = [] for index,entry in enumerate(dataset['overall']): if entry == 1.0 or entry == 2.0: ratings.append(-1) elif entry == 3.0: ratings.append(0) elif entry == 4.0 or entry == 5.0: ratings.append(1) # - # The code above at first, reads the json file and and stores the data in a pandas(library) dataframe. The dataframe consists of two columns "reviewText" and "overall" as shown in the print area above. The reviewText column includes the string writen by the customer reviewing an item. The overall column contains an integer with a value from 1 to 5, representing the rating-score left by the customer for the corresponding item. # # The next step in the code reduces the range of the overall integer value from 5 (1 to 5) to 3 (-1 to 1) by saving the new values in the 'ratings' list. In other words the number of classes is reduced by the following procedure: the ratings rated with 1 or 2 stars are negative (-1), ratings rated with 3 stars are neutral (0) and ratings rated with 4 or 5 stars are positive, as shown below. # # &#11088; &#8594; Negative (-1) # # &#11088; &#11088; &#8594; Negative (-1) # # &#11088; &#11088; &#11088; &#8594; Neutral (0) # # &#11088; &#11088; &#11088; &#11088; &#8594; Positive (1) # # &#11088; &#11088; &#11088; &#11088; &#11088; &#8594; Positive (1) # + #[3] Cleaning the text import re import nltk nltk.download('stopwords') from nltk.corpus import stopwords corpus = [] for i in range(0, len(dataset)): review = re.sub('[^a-zA-Z]', ' ', dataset['reviewText'][i]) review = review.lower() review = review.split() review = [word for word in review if not word in set(stopwords.words('english'))] review = ' '.join(review) corpus.append(review) # - # In this code section, different cleaning/pre-processing techniques are applied in order to make the text more machine-friendly, remove unwanted tokens and match together identical tokens. # # In this example, all characters that do not belong to the english alphabet (from a to z and A to Z) are being removed. Also, all capital letters are transformed to small letters and stopwords are being removed using the nltk library. Stop-words usually refer to the most common words in a language which do not affect the meaning of a sentence but are mostly "auxiliary". # # In the next notebooks different pre-processing methods will be examined in order to achieve a better final accuracy as this is a critical part for sentiment analysis. Specifically, different stemming methods are being tested and regular expressions(regex) are being experimented in the final section of the project. # + #[4] Prepare Train and Test Data sets Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(corpus,ratings,test_size=0.3) # - # In this part, the dataset is split to the training and test set in order to be able to later calculate the accuracy and decide whether the model is favorable using the test-set. The test-set is a small part of the dataset left un-trained. In this example the test set consists of the 30% of the entire dataset. # + #[5] Encoding Encoder = LabelEncoder() Train_Y = Encoder.fit_transform(Train_Y) Test_Y = Encoder.fit_transform(Test_Y) # + #[6] Word Vectorization Tfidf_vect = TfidfVectorizer(max_features=10000) Tfidf_vect.fit(corpus) Train_X_Tfidf = Tfidf_vect.transform(Train_X) Test_X_Tfidf = Tfidf_vect.transform(Test_X) #the vocabulary that it has learned from the corpus print(Tfidf_vect.vocabulary_) #the vectorized data print(Train_X_Tfidf) # - # **A sample of the vocabulary that it has learned from the corpus:** # # {'installing': 846, 'game': 663, 'struggle': 1615, 'games': 669, 'windows': 1864, 'live': 957, 'bugs': 179} # # **A sample of the vectorized data:** # # (0, 1602) 0.2521459788527613 <br> # (0, 1536) 0.44611689308144253 <br> # (0, 1499) 0.3630663061451349 <br> # (0, 1221) 0.2521459788527613 <br> # (0, 971) 0.3097906090161147 <br> # (0, 949) 0.203737910991826 <br> # (0, 895) 0.44611689308144253 <br> # (0, 312) 0.24206473066707476 <br> # (0, 101) 0.38329154936001714 <br> # (1, 1829) 0.17313823592091127 <br> # (1, 1782) 0.2029133806474433 <br> # (1, 1702) 0.12345297644789534 <br> # (1, 1519) 0.14399237108946988 <br> # (1, 1476) 0.21421702520406033 <br> # (1, 1147) 0.11048831816230155 <br> # # Vectorization is being performed in this part to help represent text data in a multidimensional space using float values for the machine to be able to recognize and manipulate. In this example, the TD-IDF algorithm is used performing two steps, first counting the frequency of each token and then calculating the inverse document frequency and compining those together to extract the final vectorized values. # # In this project two vectorization algorithms are being tested, TF-IDF Vectorizer and Hashing Vectorizer. Both vectorizers are being explained in a latter notebook in more depth. # + #[7] Use the Naive Bayes Algorithms to Predict the outcome # fit the training dataset on the NB classifier Naive = naive_bayes.MultinomialNB() Naive.fit(Train_X_Tfidf,Train_Y) # predict the labels on validation dataset predictions_NB = Naive.predict(Test_X_Tfidf) # Use accuracy_score function to get the accuracy print("-----------------------Naive Bayes------------------------\n") print("Naive Bayes Accuracy Score -> ",accuracy_score(predictions_NB, Test_Y)*100) # Making the confusion matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(Test_Y, predictions_NB) print("\n",cm,"\n") # Printing a classification report of different metrics from sklearn.metrics import classification_report my_tags = ['Positive','Neutral','Negative'] print(classification_report(Test_Y, predictions_NB,target_names=my_tags)) # Export reports to files for later visualizations report_NB = classification_report(Test_Y, predictions_NB,target_names=my_tags, output_dict=True) report_NB_df = pd.DataFrame(report_NB).transpose() report_NB_df.to_csv(r'NB_report_TFIDFVect.csv', index = True, float_format="%.3f") # + #[8] Use the Support Vector Machine Algorithms to Predict the outcome # Classifier - Algorithm - SVM # fit the training dataset on the classifier SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto') SVM.fit(Train_X_Tfidf,Train_Y) # predict the labels on validation dataset predictions_SVM = SVM.predict(Test_X_Tfidf) # Use accuracy_score function to get the accuracy print("-----------------Support Vector Machine CM------------------\n") print("Accuracy Score -> ",accuracy_score(predictions_SVM, Test_Y)*100) cm = confusion_matrix(Test_Y, predictions_SVM) # Making the confusion matrix print("\n",cm,"\n") # Printing a classification report of different metrics print(classification_report(Test_Y, predictions_SVM,target_names=my_tags)) # Export reports to files for later visualizations report_SVM = classification_report(Test_Y, predictions_SVM,target_names=my_tags, output_dict=True) report_SVM_df = pd.DataFrame(report_SVM).transpose() report_SVM_df.to_csv(r'SVM_report_TFIDFVect.csv', index = True, float_format="%.3f") # - # In the final step classification algorithms are used to Predict the outcome. First of all the training set is fit on the classifier, then the labels are predicted on validation dataset and being stored in the predictions_SVM variable which is then used to calculate the accuracy, the confusion matrix and the classification report which have been discussed in a previous notebook. # In the very end, the reports are being exported in csv format files in order to be used for visualising the conclusion of the whole project and examine the course of the accuracy scores in the Results & Conclusion notebook. # <a href="https://nbviewer.jupyter.org/github/panayiotiska/Jupyter-Sentiment-Analysis-Video-games-reviews/blob/master/Vectorization.ipynb"> # <img alt="start" src="figures/button_next.jpg">
Code_Structure.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Summary: Python Script # ### Last Modified: 11/08/2020 # + # Import the libraries import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns sns.set() import json import csv # + # Read datasets SheetA = pd.read_csv('C:\\Users\\Leo\\Desktop\\Lodestone\\SheetA.csv') SheetB = pd.read_csv('C:\\Users\\Leo\\Desktop\\Lodestone\\SheetB.csv') SheetC = pd.read_csv('C:\\Users\\Leo\\Desktop\\Lodestone\\SheetC.csv') # + # Examining SheetA SheetA.head() # - SheetA.describe().T # + # Examining SheetB SheetB.head() # - SheetB.describe().T SheetB.info() # + # Examining SheetC SheetC.head() # - SheetC.describe().T SheetC.info() # ### Question 1: Name each sheet/table so they are meaningful standalone. # # ### Answer: Please see the below information for your reference. # + # Rename header for SheetA and create a dataframe SheetA_header=['Grill Type', 'Pre-Heat Time (In Seconds)', 'Fuel Cost Per Meal', 'Market Share', 'Ease Of Use', 'Life Span (In Years)', 'Initial Investment'] SheetA = pd.read_csv("C:\\Users\\Leo\\Desktop\\Lodestone\\SheetA.csv", header=None, skiprows=1, names=SheetA_header) SheetA = pd.DataFrame(SheetA) SheetA # SheetA has 1 categorical variable and 6 numerical variables # + # Check for NaN values valA=SheetA.isnull().values.any() if valA==True: print("Missing values present : ", SheetA.isnull().values.sum()) SheetA=SheetA.dropna() else: print("SheetA has no NaN value") # + len(SheetA.index) # SheetA has a total of 2 observations # + # SheetA details SheetA.info() # + # Rename header for SheetB and create a dataframe SheetB_header=['Material', 'Device ID', 'Grill Type', 'Satisfaction Level', 'Fuel Cost'] SheetB = pd.read_csv("C:\\Users\\Leo\\Desktop\\Lodestone\\SheetB.csv", header=None, skiprows=1, names=SheetB_header) SheetB = pd.DataFrame(SheetB) del SheetB['Device ID'] SheetB # Delete "Device ID" column due to it won't contribute to the analysis. SheetB now has 2 categorical variables and 2 numerical variables. # + # Check for NaN values valB=SheetB.isnull().values.any() if valB==True: print("Missing values present : ", SheetB.isnull().values.sum()) SheetB=SheetB.dropna() else: print("SheetB has no NaN value") # + len(SheetB.index) # SheetB has a total of 24 observations # + # SheetB details SheetB.info() # + # Count the amount of each material in SheetB SheetB.Material.value_counts() # + # Rename header for SheetC and create a dataframe SheetC_header=['Sample Index','Material', 'Frozen', 'Grill Type', 'Thumbs Up Score', 'Guess Grill Type Correct?'] SheetC = pd.read_csv("C:\\Users\\Leo\\Desktop\\Lodestone\\SheetC.csv", header=None, skiprows=1, names=SheetC_header) SheetC = pd.DataFrame(SheetC) del SheetC['Sample Index'] SheetC # Delete "Sample Index" column due to it won't contribute to the analysis. SheetC now has 4 categorical variables and 1 numerical variable # + # Check for NaN values valC=SheetC.isnull().values.any() if valC==True: print("Missing values present : ", SheetC.isnull().values.sum()) SheetC=SheetC.dropna() else: print("SheetC has no NaN value") # + len(SheetC.index) # SheetC has a total of 720 observations # + # SheetC details SheetC.info() # + # Count the amount of each material in SheetC SheetC.Material.value_counts() # - # ### Question 2: Which grill type is more fuel efficient based on sheet A? # # ### Answer: As per the bar plot, Charcoal costs more fuel per meal than propane, thus less efficient. Please see the below information for your reference. # + # Bar Plot (matplotlib package) plt.bar(SheetA['Grill Type'], SheetA['Fuel Cost Per Meal'], color=['red', 'blue'], align='center') plt.title('Fuel Efficiency Comparison') plt.xlabel('Grill Type') plt.ylabel('Fuel Cost Per Meal') # + # Bar Plot (seaborn package) sns.barplot(x='Grill Type', y='Fuel Cost Per Meal', data=SheetA, palette=('Set1')).set_title('Fuel Efficiency Comparison') # - # ### Question 3: Which grill type has more market share? # # ### Answer: Propane has a higher market share than charcoal by 20%. Please see the below information for your reference. # + # Bar Plot (matplotlib package) plt.bar(SheetA['Grill Type'], SheetA['Market Share'], color=['red', 'blue'], align='center') plt.title('Market Share Comparison') plt.xlabel('Grill Type') plt.ylabel('Market Share') # + # Bar Plot (seaborn package) sns.barplot(x='Grill Type', y='Market Share', data=SheetA, palette=('Set1')).set_title('Market Share Comparison') # - # ### Question 4: Based on the cookoff data which grill type cost more fuel on a long run? # # ### Answer: To get the answer, we need to use the SheetB data and take the total fuel cost used by the grill type. By taking the sum of fuel consumption per grill type, we can see that charcoal has much higher fuel consumption. Please see the below information for your reference. # + # Use groupby to get sum SheetB.groupby('Grill Type')['Fuel Cost'].sum() # - # ### Question 5: Considering that the average American grill owner buys a new grill every three years, which grill type would cost more based on the fuel cost and initial investment? # # ### Answer: By the plot, we can see that charcoal cost higher in the total cost than propane despite the low initial investment. Please see the below information for your reference. # + # Considering 3 meals per day i.e. for 3 years 3*3*365 total meals in 3 years. total_meals = 3*3*365 # Total cost = fuel cost*total_meals + initial investment SheetA['Total Cost'] = SheetA['Fuel Cost Per Meal']*total_meals + SheetA['Initial Investment'] # + # Adding the new column "Total Cost" into the dataframe SheetA['Total Cost'] SheetA # + # Bar Plot (matplotlib package) plt.bar(SheetA['Grill Type'], SheetA['Total Cost'], color=['red', 'blue'], align='center') plt.title('Total cost Comparison') plt.xlabel('Grill Type') plt.ylabel('Total Cost') # + # Bar Plot (seaborn package) sns.barplot(x='Grill Type', y='Total Cost', data=SheetA, palette=('Set1')).set_title('Total Cost Comparison') # - # ### Question 6: Which grill type is easier to use based on the user satisfaction score? Based on data, which grill is preferred? What factors might play a role? # # ### Answer: Based on the table, average user satisfaction is higher in the case of propane rather than charcoal, this can be attributed to lower fuel consumption and easiness to use. Please see the below information for your reference. # + # To get the answer, we need to use the SheetB data and take the average of Satisfaction Level by Grill Type SheetB.groupby('Grill Type')['Satisfaction Level'].mean() # - # ### Question 7: Please generate an aggregated dataset to present your conclusions. # # ### Answer: Please see the below information for your reference. # + # This is the previous answer to the question regarding which grill type cost more fuel on a long run SheetB.groupby('Grill Type')['Fuel Cost'].sum() # + # This is the aggregated dataset, I also add the mean, min, max, and std along with the sum to further prove the point SheetB.groupby('Grill Type').agg({'Fuel Cost': ['sum', 'mean', 'min', 'max', 'std']}) # + # This is the previous answer regarding user satisfaction scores between propane and charcoal SheetB.groupby('Grill Type')['Satisfaction Level'].mean() # + # This is the aggregated dataset, I also add the min, max, and std along with the mean to further prove the point SheetB.groupby('Grill Type').agg({'Satisfaction Level': ['mean', 'min', 'max', 'std']}) # + # Let's also make an aggregated dataset from SheetC to explore the relationship between Thumbs Up Score and Grill Type as a reference SheetC.groupby('Grill Type').agg({'Thumbs Up Score': ['sum', 'mean', 'min', 'max', 'std']}) # - # ### Question 8: Are your recommendations to the manufacturer different to recommendations you’d make to the user? # # ### Answer: For the manufacturer, I recommend producing more propane-based grill as they have a higher initial cost but also higher customer ratings in all aspects. For the customer, if the usage is low, a charcoal-based grill could also be a good choice due to its low initial investment. # # ### Let's explore different aspects by different forms of plots to get a clear picture of this case. Please see the below information for your reference. # + # Box Plot - Checking outliers for SheetB sns.boxplot(data = SheetB, x='Satisfaction Level') # The numerical variable "Satisfaction Level" looks fine. # + # Box Plot - Checking outliers for SheetB sns.boxplot(data = SheetB, x='Fuel Cost') # The numerical variable "Fuel Cost" looks fine. # + # Box Plot - Checking outliers for SheetC sns.boxplot(data = SheetC, x='Thumbs Up Score') # The numerical variable "Thumbs Up Score" looks fine. # + # Density Plot - Satisfaction Level by Grill Type sns.displot(SheetB, x='Satisfaction Level', hue='Grill Type', kind='kde', fill=True, palette=('Set1')) # We can see propane has a relatively higher customer satisfaction level than charcoal among those 3 materials. We can also get the idea of most customers' satisfaction ratings by the shape of the graph. # + # Density Plot - Fuel Cost by Grill Type sns.displot(SheetB, x='Fuel Cost', hue='Grill Type', kind='kde', fill=True, palette=('Set1')) # We can easily tell charcoal has a higher fuel cost than propane among those 3 materials # + # Density Plot - Satisfaction Level by Material sns.displot(SheetB, x='Satisfaction Level', hue='Material', kind='kde', palette=('Set1')) # Seems like most people tend to like hotdog among 2 other materials... # + # Bivariate Distribution and Joint Marginal Distribution - Satisfaction Level by Fuel Cost based on Grill Type sns.displot(SheetB, x='Satisfaction Level', y='Fuel Cost', hue='Grill Type', kind='kde', palette=('Set1')) # This plot can easily interpret that most people have great satisfaction levels when using propane plus it is low fuel cost, quite clear. sns.jointplot(data=SheetB, x="Satisfaction Level", y="Fuel Cost", hue='Grill Type', palette=('Set1')) # Joint plot with marginal distribution to use as a reference # + # Scatter Plot - Satisfaction Level by Fuel Cost based on Grill Type and Material sns.relplot(data=SheetB, x='Satisfaction Level', y='Fuel Cost', hue='Grill Type', style='Material', palette=('Set1')) # Using scatter plot to visualize categorical variables to use as a reference # + # Box Plot - Material by Satisfaction Level based on Grill Type sns.catplot(data = SheetB, x='Material', y='Satisfaction Level', hue='Grill Type', kind="box", palette=('Set1')) # According to the box plot, we can see the propane has a higher user satisfaction in all 3 materials # + # Violin Plot - Material by Fuel Cost based on Grill Type sns.violinplot(data=SheetB, x='Material', y='Fuel Cost', hue='Grill Type', palette=('Set1')) # We also can have a clear picture regarding fuel costs based on propane and charcoal among all 3 materials # + # Violin Plot - Grill Type by Thumbs Up Score based on Frozen sns.violinplot(data = SheetC, x='Grill Type', y='Thumbs Up Score', hue='Frozen', palette=('mako')) # It seems propane and charcoal have similar thumbs up scores despite the frozen status of the materials # + # Box Plot - Material by Thumbs Up Score based on Frozen sns.catplot(data = SheetC, x='Material', y='Thumbs Up Score', hue='Frozen', kind="box", palette=('mako')) # It seems hotdog and ground beef patty are very popular to most people with high thumbs up scores no matter they are frozen or not. # + # Box Plot - Material by Thumbs Up Score based on Grill Type sns.catplot(data = SheetC, x='Material', y='Thumbs Up Score', hue='Grill Type', kind="box", palette=('Set2')) # It seems charcoal hotdog and propane ground beef patty have high thumbs up scores in taste among most people # + # Count Plot - Grill Type by Guess Grill Type Correct sns.countplot(data=SheetC, x='Grill Type', hue='Guess Grill Type Correct?', palette=('mako')) # It seems most people can easily tell the difference between cooking in propane and charcoal # + # Pair Plot - Guess Grill Type Correct based on Frozen and Thumbs Up Score sns.pairplot(SheetC, hue = 'Guess Grill Type Correct?') # We can also have a general idea about how these variables interact with each other # - # ### Question: (Bonus) Transform SheetC.json to SheetC.csv # # ### Answer: Please see the below information for your reference. # + # Load SheetC.json to a variable raw_data with open('SheetC.json') as f: raw_data = json.load(f) # + # Now raw_data is a list type(raw_data) # + # Transform raw_data to a dictionary raw_dict = raw_data[0] # + #Now raw_dict is a dictionary type(raw_dict) # + # We have to get the value from key test_result for i in raw_dict: new_dict = raw_dict[i] # + # Now new_dict is a string type(new_dict) # + # Transform new_dict to a dictionary data = eval(new_dict) # + # Now data is a dictionary type(data) # + # Get the value from meta_data and survey_result, put them in dataframes df_meta_data= pd.DataFrame() df_survey_result= pd.DataFrame() for k1 in data: data1=data[k1] df1 = pd.DataFrame.from_dict(data1['meta_data'], orient='index').T df_meta_data = df_meta_data.append(df1) for k2 in data: data2=data[k2] df2 = pd.DataFrame.from_dict(data2['survey_result'], orient='index').T df_survey_result = df_survey_result.append(df2) # + # Get the value from sample_item_index, put it in dataframe df_sample_item_index=pd.DataFrame() for k3 in data: data3=data[k3] del data3['meta_data'] del data3['survey_result'] df3 = pd.DataFrame.from_dict(data3, orient='index').T df_sample_item_index = df_sample_item_index.append(df3) # + # Use concat to combine all three dataframes and change column order SheetC_temp=pd.concat([df_meta_data,df_survey_result,df_sample_item_index], axis=1) SheetC = SheetC_temp[['sample_item_index', 'item_material','is_frozen','grill_type','thumbs_up_score', 'guess_grill_correct']] SheetC # + # Export SheetC dataframe to SheetC.csv SheetC.to_csv(r'SheetC.csv', index = False)
Lodestone Interview Case Study Project/Case Study Grill Test in Python.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Running Sum and First Difference Algorithms # ### Calculus-like Operations # Convolution can change discrete signals in ways that resemble integration and differentiation. Since the terms "derivative" and "integral" specifically refer to operations on continuous signals, other names are given to their discrete counterparts. The discrete operation that mimics the first derivative is called the first difference. Likewise, the discrete form of the integral is called the running sum. It is also common to hear these operations called the discrete derivative and the discrete integral, although mathematicians frown when they hear these informal terms used. # + import sys sys.path.insert(0, '../../../') import numpy as np import matplotlib.pyplot as plt from Common import common_plots cplots = common_plots.Plot() # + file = {'x':'Signals/InputSignal_f32_1kHz_15kHz.dat'} x = np.loadtxt(file['x']) N,M = x.shape x = x.reshape(N*M, 1) # - cplots.plot_single(x.T, title='x[n]', style='line') # ## First Difference # This is the discrete version of the first derivative. Each sample in the output signal is equal to the difference between adjacent samples in the input signal. In other words, the output signal is the slope of the input signal. # # $$ y[n] = x[n] - x[n-1]$$ def first_difference(x): """ Function that calculates the first difference of an input signal x using the recursive equation y[n]=x[n]-x[n-1]. Parameters: x (numpy array): Array of numbers representing the input signal. Returns: numpy array: Returns first difference of input signal x. """ pass x_diff = first_difference(x) # + plt.rcParams["figure.figsize"] = (15,5) plt.subplot(1,2,1) cplots.plot_single(x.T, title='Input Signal', style='line') plt.subplot(1,2,2) cplots.plot_single(x_diff.T, title='First Difference', style='line') # - # ## Running Sum # This is the discrete version of the integral. Each sample in the output signal is equal to the sum of all samples in the input signal to the left. # # $$ y[n] = x[n] + y[n-1]$$ def running_sum(x): """ Function that calculates the running sum of an input signal x using the recursive equation y[n]=x[n]+y[n-1]. Parameters: x (numpy array): Array of numbers representing the input signal. Returns: numpy array: Returns running sum of input signal x. """ pass x_sum = running_sum(x) # + plt.rcParams["figure.figsize"] = (15,5) plt.subplot(1,2,1) cplots.plot_single(x.T, title='Input Signal', style='line') plt.subplot(1,2,2) cplots.plot_single(x_sum.T, title='First Difference', style='line') # - # ## Exercise: Add your functions to your Convolve class # As an exercise you will add your `first_difference` and `running_sum` functions to the class `Convolve`. from Common import convolution convolve = convolution.Convolve() cplots.plot_single(convolve.first_difference(x).T, title='First Difference', style='line') cplots.plot_single(convolve.running_sum(x).T, title='Running Sum', style='line')
06_Convolution/Student/02_Running_Sum_and_First_Difference/.ipynb_checkpoints/Running_Sum_First_Difference_Student-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: '' # language: python # name: '' # --- # + # Import most generic modules import importlib import pathlib import os import sys from datetime import datetime from IPython.display import display, Markdown import warnings warnings.filterwarnings("ignore") module_path = os.path.abspath(os.path.join("../..")) if module_path not in sys.path: sys.path.append(module_path) # + tags=["parameters"] report_name = f"{datetime.now().strftime('%Y%m%d_%H%M%S')}_econ_data" # - display(Markdown(f"# Economy Data - {datetime.now().strftime('%Y/%m/%d %H:%M:%S')}")) # ## <NAME> - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^DJI", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## <NAME> - 1 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^DJI", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=360, low_trend=True, ) # - # ## S&P 500 - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^GSPC", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## Market Volatility (VIX) - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^VIX", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=False, time_delta=720, high_trend=False, low_trend=False, ) # - # ## Market Volatility (VIX) - 1 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^VIX", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=False, time_delta=360, high_trend=False, low_trend=False, ) # - # ## Yield on 5-Year US Treasury Notes - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^FVX", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=False, time_delta=720, low_trend=False, high_trend=False, ) # - # ## Yield on 10-Year US Treasury Notes - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^TNX", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=False, time_delta=720, low_trend=False, high_trend=False, ) # - # ## Yield on 30-Year US Treasury Notes - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "^TYX", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=False, time_delta=720, low_trend=False, high_trend=False, ) # - # ## Gold - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "GLD", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, ) # - # ## Silver - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "SLV", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## Euro vs US Dollar - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "FXE", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=True, ) # - # ## Yen vs US Dollar - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "FXY", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=True, ) # - # ## Energy - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "XLE", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, ) # - # ## Crude Oil - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "CL=F", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## Real Estate - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "XLRE", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## Real Estate - 1 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "VNQ", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=360, low_trend=True, ) # - # ## Financials - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "XLF", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## Financials - 1 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "XLF", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=360, low_trend=True, ) # - # ## Materials - 2 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "XLB", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=720, low_trend=False, ) # - # ## Materials - 1 year trend with 20, 50 and 200 day SMA # + from gamestonk_terminal.economy import econ_data_helper econ_data_helper.draw_graph( "XLB", "", line_type="line", draw_mas=(20, 50, 200), draw_volume=True, time_delta=360, low_trend=True, ) # - # !jupyter nbconvert {report_name + ".ipynb"} --to html --no-input
gamestonk_terminal/jupyter/reports/econ_data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 选择 # ## 布尔类型、数值和表达式 # ![](../Photo/33.png) # - 注意:比较运算符的相等是两个等号,一个等到代表赋值 # - 在Python中可以用整型0来代表False,其他数字来代表True # - 后面还会讲到 is 在判断语句中的用发 # + a = id(1) b = id(1) print(a,b) # 因为a和b并不是同一个对象 a is b # - a = id(1) b = a a is b a = True b = False id(True) a == b a is b # ## 字符串的比较使用ASCII值 a = "jokar" b = "jokar" a > b # ## Markdown # - https://github.com/younghz/Markdown # 肯定会给发数据划分啊 # $\sum_{j=1}^{N}x_{j}$ # ## EP: # - <img src="../Photo/34.png"></img> # - 输入一个数字,判断其实奇数还是偶数 # ## 产生随机数字 # - 函数random.randint(a,b) 可以用来产生一个a和b之间且包括a和b的随机整数 import random random.randint(0,1) if condition: do someething else: other for iter_ in xxx: do something age = 10 Joker = eval(input('Name')) print(Joker) # 产生一个随机数,你去输入,如果你输入的数大于随机数,那么就告诉你太大了,反之,太小了, # 然后你一直输入,知道它满意为止 number =random.randint(0,5) for i in range(5): input_ = eval(input('>>')) if input_ > number: print('太大啦') if input_ < number: print('太小啦') if number == input_: print('正好') break for i in range(5): print(i) # ## 其他random方法 # - random.random 返回0.0到1.0之间前闭后开区间的随机浮点 # - random.randrange(a,b) 前闭后开 random.random() import matplotlib.pyplot as plt image=plt.imread('/Users/huwang/Downloads/cat.jpeg') print(image*random.random()) plt.imshow(image) # ## EP: # - 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字的和,并判定其是否正确 # - 进阶:写一个随机序号点名程序 # + number_1 = random.randrange(0,10) number_2 = random.randrange(0,10) while 1: sum_ = eval(input('>>')) if sum_ == (number_1 + number_2): print('Congratulations! Correct~') else: print('Sorry~SB.') # - # ## if语句 # - 如果条件正确就执行一个单向if语句,亦即当条件为真的时候才执行if内部的语句 # - Python有很多选择语句: # > - 单向if # - 双向if-else # - 嵌套if # - 多向if-elif-else # # - 注意:当语句含有子语句的时候,那么一定至少要有一个缩进,也就是说如果有儿子存在,那么一定要缩进 # - 切记不可tab键和space混用,单用tab 或者 space # - 当你输出的结果是无论if是否为真时都需要显示时,语句应该与if对齐 input_ = eval(input('>>')) if input_ > number: print('太大啦') if input_ < number: print('太小啦') if number == input_: print('正好') print('不要灰心') # 李文浩相亲测试树 # # 年龄 # 老 年轻 # 拜拜 # 帅 # 否 是 # 考虑一下 老婆 # 没有 有 # 马上结婚 回家的诱惑 # # 代码写不出来的立马分手,从此社会上有多出一个渣男/渣女. age = input('年轻嘛[y/n]') if age == 'y': handsome = input('帅否[y/n]') if handsome == 'y': wife = input('有没有老婆[y/n]') if wife == 'y': print('回家的诱惑') else: print('立马结婚') else: print('考虑一下') else: print('拜拜~') # ## EP: # - 用户输入一个数字,判断其实奇数还是偶数 # - 进阶:可以查看下4.5实例研究猜生日 # ## 双向if-else 语句 # - 如果条件为真,那么走if内部语句,否则走else内部语句 # ## EP: # - 产生两个随机整数number1和number2,然后显示给用户,使用户输入数字,并判定其是否正确,如果正确打印“you‘re correct”,否则打印正确错误 # ## 嵌套if 和多向if-elif-else # ![](../Photo/35.png) if score >= 80: gread = 'B' elif score>=90: gread = 'A' # ## EP: # - 提示用户输入一个年份,然后显示表示这一年的动物 # ![](../Photo/36.png) # - 计算身体质量指数的程序 # - BMI = 以千克为单位的体重除以以米为单位的身高的平方 # ![](../Photo/37.png) tizhong = eval(input('体重')) shengao = eval(input('身高')) BMI = tizhong / shengao ** 2 if BMI<18.5 : print('超清') elif 18.5<=BMI<25 : print('标准') elif 25<=BMI<30 : print('超重') else: print('超级无敌胖') # ## 逻辑运算符 # ![](../Photo/38.png) # ![](../Photo/39.png) # ![](../Photo/40.png) # ## EP: # - 判定闰年:一个年份如果能被4整除但不能被100整除,或者能被400整除,那么这个年份就是闰年 # - 提示用户输入一个年份,并返回是否是闰年 # - 提示用户输入一个数字,判断其是否为水仙花数 # ## 实例研究:彩票 # ![](../Photo/41.png) import random # + number = random.randint(10,99) print(number) N = input('>>') number_shi = number // 10 number_ge = number % 10 if N[0] == '0': N_shi = 0 else: N_shi = int(N) // 10 N_ge = int(N) % 10 if number == int(N): print('10000') # elif (number_shi == N_shi or number_shi==N_ge) and (number_ge == N_shi or number_ge==N_ge): elif number_shi + number_ge == N_shi + N_ge: print('3000') elif (number_ge ==N_ge or number_ge == N_shi) or (number_shi == N_ge or number_shi == N_shi): print('1000') # - a = "05" a[0] 05 // 10 # + Number = eval(input('>>')) bai = Number // 100 shi = Number // 10 % 10 ge = Number % 10 if bai**3 + shi **3 + ge **3 == Number: print('水仙花') else: print('不是水仙花') # - 223 // 10 # # Homework # - 1 # ![](../Photo/42.png) import math for i in range(3): a,b,c = map(float,input('Enter a, b, c: ').split(',')) d = b ** 2 - 4 * a * c if d > 0: r1 = (-b + math.sqrt(d)) / 2 * a r2 = (-b - math.sqrt(d)) / 2 * a print('The roots are %.6f and %.5f'%(r1,r2)) elif d == 0: r3 = (-b) / 2 * a print('The root is %d'%r3) else : print('The equation has no real roots') # - 2 # ![](../Photo/43.png) import random sun = int(input('输入两个整数的和:')); a = random.randint(0,100) b = random.randint(0,100) if sum == a + b: print('真') else : print('假,应该为{}'.format(a + b)) # - 3 # ![](../Photo/44.png) for i in range(2): jin = int(input('Enter today is day: ')) hou = int(input('Enter the number od days elapsed since today: ')) day = (hou + jin) % 7 if jin == 1: a = 'Monday' elif jin == 2: a = 'Tuesday' elif jin == 3: a = 'Wednesday' elif jin == 4: a = 'Thursday' elif jin == 5: a = 'Friday' elif jin == 6: a = 'Saturday' else: a = 'Sunday' if day == 1: print('Today is {} and the future day is Monday '.format(a)) elif day == 2: print('Today is {} and the future day is Tuesday '.format(a)) elif day == 3: print('Today is {} and the future day is Wednesday '.format(a)) elif day == 4: print('Today is {} and the future day is Thursday '.format(a)) elif day == 5: print('Today is {} and the future day is Friday '.format(a)) elif day == 6: print('Today is {} and the future day is Saturday '.format(a)) else: print('Today is {} and the future day is Sunday '.format(a)) # - 4 # ![](../Photo/45.png) a,b,c = map(int,input('输入三个整数').split(',')) d = 0; if a<b: d=b b=a a=d if a<c: d=c c=a a=d if b<c: d=c c=b b=d print('%d %d %d' %(c,b,a)) # - 5 # ![](../Photo/46.png) k,m = map(float,input('Enter weight and price for package 1: ').split(',')) k1,m1 = map(float,input('Enter weight and price for package 2: ').split(',')) a = k * m b = k1 * m1 if a > b: print('Package 2 has the better price.') elif a < b: print('Package 1 has the better price.') else: print('Package 1 the same to Package 2.') # - 6 # ![](../Photo/47.png) yue,nian = map(int,input('请输入月份及年份').split(',')) if nian % 4 == 0 and nian % 100 != 0 or nian % 400 == 0: if yue == '2': print('%d年%s月份有29天'%(nian,yue)) elif yue in (1,3,5,7,8,10,12): print('%d年%s月份有31天'%(nian,yue)) else: print('%d年%s月份有30天'%(nian,yue)) else: if yue == '2': print('%d年%s月份有28天'%(nian,yue)) elif yue in (1,3,5,7,8,10,12): print('%d年%s月份有31天'%(nian,yue)) else: print('%d年%s月份有30天'%(nian,yue)) # - 7 # ![](../Photo/48.png) import numpy as yb zf = yb.random.choice(['正面','反面']) cai=str(input('请输入正面,或者反面:')) if zf == cai: print('true') else: print('false') # - 8 # ![](../Photo/49.png) import numpy as np for i in range(2): dian = np.random.choice(['0','1','2']) me=str(input('scissor (0), rock(1), paper(2):')) if dian == '0': if me == '0': print('The computer is scissor. You are scissor too. It is a draw.') elif me == '1': print('The computer is scissor. You are rock. You won.') else: print('The computer is scissor. You are paper.You lose.') elif dian == '1': if me == '0': print('The computer is rock. You are scissor. You lose.') elif me == '1': print('The computer is rock. You are rock too. It is a drow.') else: print('The computer is rock. You are paper. You won.') else: if me == '0': print('The computer is paper. You are scissor. You won.') elif me == '1': print('The computer is paper. You are rock. You lose.') else: print('The computer is paper. You are paper too. It is a draw.') # - 9 # ![](../Photo/50.png) for i in range(2): year = int(input('Enter year: (e.g., 2008): ')) month = int(input('Enter month: 1-12: ')) day = int(input('Enter the day of the month: 1-31: ')) def zeleyizhi(q,m,k,j): h = (q + (26*(m + 1) // 10) + k + (k // 4) + (j // 4) + 5*j) % 7 return h def ny(): j = year // 100 if month == 1: m = 13 k = year % 100 - 1 elif month == 2: m = 14 k = year % 100 - 1 else: m = month k = year % 100 q = day return q, m, k, j,day,month,year if __name__ == '__main__': q,m,k,j,day,month,year = ny() h = zeleyizhi(q, m, k, j) if h == 0: week = 'Saturday' elif h == 1: week = 'Sunday' elif h == 2: week = 'Monday' elif h == 3: week = 'Tuesday' elif h == 4: week = 'Wednesday' elif h == 5: week = 'Thursday' elif h == 6: week = 'Friday' print('Day of the week is {}'.format(week)) # - 10 # ![](../Photo/51.png) import numpy as xp hs = xp.random.choice(['梅花','红桃','方块','黑桃']) dx = xp.random.choice(['Ace','2','3','4','5','6','7','8','9','Jack','Queen','King']) print('The card you picked is the {} of {}'.format(dx,hs)) # - 11 # ![](../Photo/52.png) for i in range(2): def palindrome(num): a = num b = 0 while a > 0: b = b * 10 + a % 10 a //= 10 return b == num num = int(input('Enter a three-digit interger : ')) a = palindrome(num) if a == True: print('%d is a palindrome' %num) else: print('%d is not a palindrome' %num) # - 12 # ![](../Photo/53.png) import math a,b,c = map(int,input('Enter three edges: ').split(',')) if a + b > c and a + c > b and b + c > a: l = a + b + c print('The perimeter is %d' % (a + b + c)) else: print('Error')
7.18.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- def reward(params): """ Available option: all_wheels_on_track (bool) True if car is on track, False otherwise x (float) x coordinate in meters y (float) y coordinate in meters distance_from_center (float) distance from car center to track center in meters is_left_of_center (bool) True if car is left of track cener, False otherwise heading (float) range of [0,360), this is the angle in degrees between the car's direction and the x-axis progress (float) range of [0,100], this is the percentage of the track completed steps (int) number of steps taken in the environment. This resets every time a new episode begins, and currently the maximum episode length is 200 speed (float) current speed of car in meters per second steering_angle (float) range of about [-30,30], this is the angle at which the wheels are turning track_width (float) the track width in meters """ if params['all_wheels_on_track']: return 1.0 else: return 0.0 from train import ModelBuilder, ModelTester # + model_builder = ModelBuilder() model_builder.set_reward(reward) model_builder.run_builder_tool() # - model_tester = ModelTester() model_tester._env.update_reward_func(reward) model_tester.run_test_tool()
TrainingDemo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/krishnaaxo/Drug_Discovery_AI/blob/main/DATA_Preparation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="o-4IOizard4P" # ## **Download PaDEL-Descriptor** # + colab={"base_uri": "https://localhost:8080/"} id="H0mjQ2PcrSe5" outputId="a993ff5e-56d2-4cee-e546-e2f05022edad" # ! wget https://raw.githubusercontent.com/krishnaaxo/Drug_Discovery_AI/main/padel.zip # ! wget https://raw.githubusercontent.com/krishnaaxo/Drug_Discovery_AI/main/padel.sh # + colab={"base_uri": "https://localhost:8080/"} id="64HnTL4tS-nA" outputId="6b00f6af-7d04-487d-d3d1-35bfeef521b7" # ! unzip padel.zip # + [markdown] id="QmxXXFa4wTNG" # ## **Load bioactivity data** # + [markdown] id="fcBvxkPWKFRV" # Download the curated ChEMBL bioactivity data that has been pre-processed from Parts 1 and 2 of this Bioinformatics Project series. Here we will be using the **bioactivity_data_3class_pIC50.csv** file that essentially contain the pIC50 values that we will be using for building a regression model. # + colab={"base_uri": "https://localhost:8080/"} id="JBs7QdJoubWC" outputId="104f8a95-2283-4737-d08e-8930fd2f1922" # ! wget https://raw.githubusercontent.com/krishnaaxo/Drug_Discovery_AI/main/acetylcholinesterase_04_bioactivity_data_3class_pIC50.csv # + id="Fpu5C7HlwV9s" import pandas as pd # + id="GCcE8J5XwjtB" df3 = pd.read_csv('acetylcholinesterase_04_bioactivity_data_3class_pIC50.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 419} id="60z_N6egNiSJ" outputId="e473dae9-e568-49be-ebb6-6bf7f6781376" df3 # + id="BJDJkY43R-9F" selection = ['canonical_smiles','molecule_chembl_id'] df3_selection = df3[selection] df3_selection.to_csv('molecule.smi', sep='\t', index=False, header=False) # + colab={"base_uri": "https://localhost:8080/"} id="nRSCoPVDSkf5" outputId="d9358b94-02bf-4b83-8b28-b337418173f4" # ! cat molecule.smi | head -5 # + colab={"base_uri": "https://localhost:8080/"} id="GlYaJ9pzUGjS" outputId="ab5e88da-592e-4002-e553-28a48cf1e1aa" # ! cat molecule.smi | wc -l # + [markdown] id="YzN_S4Quro5S" # ## **Calculate fingerprint descriptors** # # + [markdown] id="JsgTV-ByxdMa" # ### **Calculate PaDEL descriptors** # + colab={"base_uri": "https://localhost:8080/"} id="hSCopQvEiSMj" outputId="a2e4c3e6-c690-4201-aedc-5ab62204c36a" # ! cat padel.sh # + colab={"base_uri": "https://localhost:8080/"} id="6kN9jrGpS5nE" outputId="0f3222ea-8bc0-49e8-b9b0-59529a58d9af" # ! bash padel.sh # + colab={"base_uri": "https://localhost:8080/"} id="2p7rAVy_k_hH" outputId="53e5c99d-f920-4a78-dae6-0b6522cd1255" # ! ls -l # + [markdown] id="gUMlPfFrxicj" # ## **Preparing the X and Y Data Matrices** # + [markdown] id="30aa4WP4ZA8M" # ### **X data matrix** # + id="3g319qxVl7tY" df3_X = pd.read_csv('descriptors_output.csv') # + colab={"base_uri": "https://localhost:8080/", "height": 507} id="hBp1PTObFQDd" outputId="fca46710-4891-495e-a322-2ac753577b3e" df3_X # + colab={"base_uri": "https://localhost:8080/", "height": 507} id="dTJsDPBKq_hr" outputId="2c1f9bfc-2a24-47cd-ca81-d6c77768b0ad" df3_X = df3_X.drop(columns=['Name']) df3_X # + [markdown] id="6ePu814KZNBG" # ## **Y variable** # + [markdown] id="9ssfxFCdr7ua" # ### **Convert IC50 to pIC50** # + colab={"base_uri": "https://localhost:8080/"} id="FlhsSEW5FXTG" outputId="616dd965-a4d9-49d5-f171-ca812917baab" df3_Y = df3['pIC50'] df3_Y # + [markdown] id="eET6iZ1Aw3oe" # ## **Combining X and Y variable** # + colab={"base_uri": "https://localhost:8080/", "height": 439} id="FRBfBP3QxFJp" outputId="cc9226c1-3d55-4c03-d945-9d2e6b484755" dataset3 = pd.concat([df3_X,df3_Y], axis=1) dataset3 # + id="75npGyvhae0e" dataset3.to_csv('acetylcholinesterase_06_bioactivity_data_3class_pIC50_pubchem_fp.csv', index=False)
DATA_Preparation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="iFXsrSxJlzCW" # #Introduction # # ..... # # + [markdown] id="oIxLDLnF9g4B" # Check to see if jupyter lab uses the correct python interpreter with '!which python'. # It should be something like '/opt/anaconda3/envs/[environment name]/bin/python' (on Mac). # If not, try this: https://github.com/jupyter/notebook/issues/3146#issuecomment-352718675 # + colab={"base_uri": "https://localhost:8080/"} id="4ZwEkMzu9g4C" outputId="93606012-efdf-44b6-f697-8a5354bffa5f" # !which python # !python --version # + colab={"base_uri": "https://localhost:8080/"} id="2cwaNQyr9oDn" outputId="0400564b-750d-4a80-92d7-1b28c3680250" # !pip install html2text # + [markdown] id="J6aqBT6G9g4D" # # Install dependencies: # + id="H0q7BTlbiCWW" install_packages = False if install_packages: # !conda install tensorflow=2 -y # !conda install -c anaconda pandas -y # !conda install -c conda-forge tensorflow-hub -y # !conda install -c akode html2text -y # !conda install -c conda-forge tqdm -y # !conda install -c anaconda scikit-learn -y # !conda install -c conda-forge matplotlib -y # !conda install -c anaconda seaborn -y # + [markdown] id="g9saS2P09g4D" # # Imports # + id="xD4HKjy9TC-g" #imports import pandas as pd import numpy as np import os import time import tensorflow as tf import tensorflow_hub as hub import zipfile from html2text import HTML2Text from tqdm import tqdm import re from sklearn.metrics import pairwise_distances from sklearn.preprocessing import normalize import matplotlib.pyplot as plt import seaborn as sns # + [markdown] id="bLrw4ilF9g4E" # # Set pandas print options # This will improve readability of printed pandas dataframe. # # + id="SEH28FctS6SP" pd.set_option('display.max_rows', None) pd.set_option('display.max_columns', None) pd.set_option('display.width', None) pd.set_option('display.max_colwidth', None) # + [markdown] id="VatRSTkJ9g4E" # ## Set global Parameters # Set your parameters here: # # data_path: In this path put the data you have downloaded with YouTube Data Tools. # output_path: Tghe files generated in this notebook will be saved here. # # url_dict: URLs to models on Tensorflow hub are saved here. Other models are available there. # model_type: Define which model you would like to use. Choose one from url_dict # # new_embeddings: If this is true, new embeddings will be generated and saved at output_path. Otherwise, embeddings are loaded from Disc. # # # # + id="TL7Rvq_TD086" data_path = './data/comments_climate_change_trump.tab' output_path = "./output/" new_embeddings = True url_dict = { 'Transformer' : "https://tfhub.dev/google/universal-sentence-encoder-large/5", 'DAN' : "https://tfhub.dev/google/universal-sentence-encoder/4", 'Transformer_Multilingual': "https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3" } model_type = 'Transformer' #@param ['DAN','Transformer','Transformer_Multilingual'] # + [markdown] id="ZFHn_DIo9g4G" # ## Create output directory # Try to create the directory defined by output_path # + colab={"base_uri": "https://localhost:8080/"} id="NuvhsxjlZFhu" outputId="a7d08c54-4e4e-4f07-aad3-628478b3fdff" try: os.mkdir(output_path) except OSError: print ("Creation of the directory %s failed" % output_path) else: print ("Successfully created the directory %s " % output_path) # + [markdown] id="VO9QOLP_9DxJ" # # Load Data # Load you data as a pandas dataframe # + id="XHqOeot79g4G" if new_embeddings: data = pd.read_csv(data_path,sep='\t',header=(0)) data.head() # + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="FzU4myRQEOfw" outputId="a9c78f9d-d637-430d-b37d-bba7a01709a7" data.head() # + [markdown] id="_IUWISh1M5nz" # # Preprocessing # Preprocess your data: # - Drop empty rows # - Drop unused columns # + id="gbU3mk83MBIH" if new_embeddings: data = data.dropna(subset=['text', 'authorName']) # drop rows with no content data=data.drop(['id', 'replyCount','likeCount','authorChannelUrl','authorChannelId','isReplyTo','isReplyToName'],axis=1) # drop unused columns data.head() # + [markdown] id="qVqAAc3s9g4G" # - remove HTML-tags, links and usernames # + id="vIgxEvzcM39J" colab={"base_uri": "https://localhost:8080/"} outputId="c2928ed3-cca9-4243-ee23-f0af5a496671" if new_embeddings: # Remove HTML tags tqdm.pandas() h = HTML2Text() h.ignore_links = True data['cleaned'] = data['text'].progress_apply(lambda x: h.handle(x)) print( "Removed HTML Tags.") # Remove links http_link_pattern = r'http\S+' bitly_link_pattern = r'bit.ly/\S+' data['cleaned'] = data['cleaned'].str.replace(http_link_pattern, '') data['cleaned'] = data['cleaned'].str.replace(bitly_link_pattern, '') print( "Removed Links.") # Remove user names keep_names = ["earth", "Tide", "Geologist", "A Person", "Titanic", "adventure", "Sun", "The United States Of America"] # user names we want to keep user_names = [name for name in data['authorName'].unique() if (len(name)> 3 and name not in keep_names)] data['cleaned'] = data['cleaned'].str.replace('|'.join(map(re.escape, user_names)), '') print( "Removed user names.") # + [markdown] id="N0ODyI2A9g4H" # # Save or Load preprocessed data # # Save your data afte preprocessing, or load preprocessed data from disc. # + colab={"base_uri": "https://localhost:8080/", "height": 449} id="W0uP3D5IXvyT" outputId="e3fa173f-6059-4d45-fab3-b5cd22f91a6b" if new_embeddings: data.to_pickle(output_path+'data_preprocessed'+'.pkl') else: data = pd.read_pickle(output_path+'data_preprocessed'+'.pkl') data.head() # + [markdown] id="5YxXP5PtAFyV" # # Produce Text Embeddings with Universal Sentence Encoder # + [markdown] id="d_sStH0bXhFD" # ## Load Model # Load the model from TF-hub # + id="ugb89Xy7XdqR" colab={"base_uri": "https://localhost:8080/"} outputId="47c637db-ef2b-4c10-bf9d-e99144987d8d" hub_url = url_dict[model_type] if new_embeddings: print("Loading model. This will take some time...") embed = hub.load(hub_url) # + [markdown] id="smVQb22E9g4I" # ## Embed Documents # Produce embeddings of your documents. # + id="Yz3Qlca7YGEN" colab={"base_uri": "https://localhost:8080/"} outputId="834ead23-b28f-46f5-cd4c-744ff2c1da65" if new_embeddings: for k,g in data.groupby(np.arange(len(data))//200): if k == 0: embeddings = embed(g['cleaned']) else: embeddings_new = embed(g['cleaned']) embeddings = tf.concat(values=[embeddings,embeddings_new],axis = 0) print(k , end =" ") print("The embeddings vector is of fixed length {}".format(embeddings.shape[1])) np.save(output_path+'/embeddings'+model_type+'.npy', embeddings, allow_pickle=True, fix_imports=True) else: embeddings = np.load(output_path+'/embeddings'+model_type+'.npy', mmap_mode=None, allow_pickle=False, fix_imports=True, encoding='ASCII') embeddings.shape # + [markdown] id="oFTi_5lUAnJF" # ## Calculate Similarity Matrix with angular distance # # 'Following Cer et al. (2018), we first compute # the sentence embeddings u, v for an STS sentence # pair, and then score the sentence pair similarity # based on the angular distance between the two # embedding vectors d = − arccos (uv/||u|| ||v||).' # + colab={"base_uri": "https://localhost:8080/"} id="ZTMjSNyc9g4J" outputId="0c641819-2212-46c0-a62c-9fe0d0ca0cb4" from sklearn.metrics.pairwise import cosine_similarity def cos_sim(input_vectors): similarity = cosine_similarity(input_vectors) return similarity cosine_similarity_matrix = cos_sim(np.array(embeddings)) print(cosine_similarity_matrix) # + [markdown] id="U6JBhQDC9g4J" # # Plots Similarity # Plot and print a heat map showing the semantic contextual similarity between comments. # + colab={"base_uri": "https://localhost:8080/", "height": 362} id="Vl7_jFBa9g4K" outputId="2ad6b881-87e4-4854-d4ee-01a06bcf1d83" import seaborn as sns def plot_similarity(labels, features, rotation): corr = np.inner(features, features) sns.set(font_scale=1.2) g = sns.heatmap( corr, xticklabels=labels, yticklabels=labels, vmin=0, vmax=1, cmap="YlOrRd") g.set_xticklabels(labels, rotation=rotation) g.set_title("Semantic Textual Similarity") num_samples = 5 off_set = 10000 plot_similarity(data.iloc[off_set:off_set+num_samples]['cleaned'], embeddings[off_set:off_set+num_samples], 90) # + [markdown] id="RvfMRLZa9g4K" # # Show neighbours of a comment # Define which comment to analyze # + id="tweEdE9A9g4K" comment_index = 527 #324 comment = data["cleaned"][comment_index] comment_list = data["cleaned"].tolist() print(comment) # + [markdown] id="taGyiM5c9g4L" # Print similar comments. # + colab={"base_uri": "https://localhost:8080/"} id="pTmkn6Zt9g4L" outputId="b4c1e588-a245-4180-eeb9-f10c64e4198f" def get_top_similar(sentence, sentence_list, similarity_matrix, topN): # find the index of sentence in list index = sentence_list.index(sentence) # get the corresponding row in similarity matrix similarity_row = np.array(similarity_matrix[index, :]) # get the indices of top similar indices = similarity_row.argsort()[-topN:][::-1] return [sentence_list[i] for i in indices] for i, value in enumerate(get_top_similar(comment, comment_list, cosine_similarity_matrix, 20)): print("Top similar comment {}: {}".format(i+1, value))
Pipeline/florian_mies_assignment4/Assignment_4_Language_model.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # --- # + [markdown] origin_pos=0 # # 层和块 # :label:`sec_model_construction` # # 之前首次介绍神经网络时,我们关注的是具有单一输出的线性模型。 # 在这里,整个模型只有一个输出。 # 注意,单个神经网络 # (1)接受一些输入; # (2)生成相应的标量输出; # (3)具有一组相关 *参数*(parameters),更新这些参数可以优化某目标函数。 # # 然后,当考虑具有多个输出的网络时, # 我们利用矢量化算法来描述整层神经元。 # 像单个神经元一样,层(1)接受一组输入, # (2)生成相应的输出, # (3)由一组可调整参数描述。 # 当我们使用softmax回归时,一个单层本身就是模型。 # 然而,即使我们随后引入了多层感知机,我们仍然可以认为该模型保留了上面所说的基本架构。 # # 对于多层感知机而言,整个模型及其组成层都是这种架构。 # 整个模型接受原始输入(特征),生成输出(预测), # 并包含一些参数(所有组成层的参数集合)。 # 同样,每个单独的层接收输入(由前一层提供), # 生成输出(到下一层的输入),并且具有一组可调参数, # 这些参数根据从下一层反向传播的信号进行更新。 # # 事实证明,研究讨论“比单个层大”但“比整个模型小”的组件更有价值。 # 例如,在计算机视觉中广泛流行的ResNet-152架构就有数百层, # 这些层是由*层组*(groups of layers)的重复模式组成。 # 这个ResNet架构赢得了2015年ImageNet和COCO计算机视觉比赛 # 的识别和检测任务 :cite:`He.Zhang.Ren.ea.2016`。 # 目前ResNet架构仍然是许多视觉任务的首选架构。 # 在其他的领域,如自然语言处理和语音, # 层组以各种重复模式排列的类似架构现在也是普遍存在。 # # 为了实现这些复杂的网络,我们引入了神经网络*块*的概念。 # *块*(block)可以描述单个层、由多个层组成的组件或整个模型本身。 # 使用块进行抽象的一个好处是可以将一些块组合成更大的组件, # 这一过程通常是递归的,如 :numref:`fig_blocks`所示。 # 通过定义代码来按需生成任意复杂度的块, # 我们可以通过简洁的代码实现复杂的神经网络。 # # ![多个层被组合成块,形成更大的模型](../img/blocks.svg) # :label:`fig_blocks` # # 从编程的角度来看,块由*类*(class)表示。 # 它的任何子类都必须定义一个将其输入转换为输出的前向传播函数, # 并且必须存储任何必需的参数。 # 注意,有些块不需要任何参数。 # 最后,为了计算梯度,块必须具有反向传播函数。 # 在定义我们自己的块时,由于自动微分(在 :numref:`sec_autograd` 中引入) # 提供了一些后端实现,我们只需要考虑前向传播函数和必需的参数。 # # 在构造自定义块之前,(**我们先回顾一下多层感知机**) # ( :numref:`sec_mlp_concise` )的代码。 # 下面的代码生成一个网络,其中包含一个具有256个单元和ReLU激活函数的全连接隐藏层, # 然后是一个具有10个隐藏单元且不带激活函数的全连接输出层。 # # + origin_pos=1 tab=["mxnet"] from mxnet import np, npx from mxnet.gluon import nn npx.set_np() net = nn.Sequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() X = np.random.uniform(size=(2, 20)) net(X) # + [markdown] origin_pos=4 tab=["mxnet"] # 在这个例子中,我们通过实例化`nn.Sequential`来构建我们的模型, # 返回的对象赋给`net`变量。 # 接下来,我们反复调用`net`变量的`add`函数,按照想要执行的顺序添加层。 # 简而言之,`nn.Sequential`定义了一种特殊类型的`Block`, # 即在Gluon中表示块的类,它维护`Block`的有序列表。 # `add`函数方便将每个连续的`Block`添加到列表中。 # 请注意,每层都是`Dense`类的一个实例,`Dense`类本身就是`Block`的子类。 # 到目前为止,我们一直在通过`net(X)`调用我们的模型来获得模型的输出。 # 这实际上是`net.forward(X)`的简写, # 这是通过`Block`类的`__call__`函数实现的一个Python技巧。 # 前向传播(`forward`)函数非常简单:它将列表中的每个`Block`连接在一起, # 将每个`Block`的输出作为输入传递给下一层。 # # + [markdown] origin_pos=7 # ## [**自定义块**] # # 要想直观地了解块是如何工作的,最简单的方法就是自己实现一个。 # 在实现我们自定义块之前,我们简要总结一下每个块必须提供的基本功能: # # + [markdown] origin_pos=8 tab=["mxnet"] # 1. 将输入数据作为其前向传播函数的参数。 # 1. 通过前向传播函数来生成输出。请注意,输出的形状可能与输入的形状不同。例如,我们上面模型中的第一个全连接的层接收任意维的输入,但是返回一个维度256的输出。 # 1. 计算其输出关于输入的梯度,可通过其反向传播函数进行访问。通常这是自动发生的。 # 1. 存储和访问前向传播计算所需的参数。 # 1. 根据需要初始化模型参数。 # # + [markdown] origin_pos=10 # 在下面的代码片段中,我们从零开始编写一个块。 # 它包含一个多层感知机,其具有256个隐藏单元的隐藏层和一个10维输出层。 # 注意,下面的`MLP`类继承了表示块的类。 # 我们的实现只需要提供我们自己的构造函数(Python中的`__init__`函数)和前向传播函数。 # # + origin_pos=11 tab=["mxnet"] class MLP(nn.Block): # 用模型参数声明层。这里,我们声明两个全连接的层 def __init__(self, **kwargs): # 调用MLP的父类Block的构造函数来执行必要的初始化。 # 这样,在类实例化时也可以指定其他函数参数,例如模型参数params(稍后将介绍) super().__init__(**kwargs) self.hidden = nn.Dense(256, activation='relu') # 隐藏层 self.out = nn.Dense(10) # 输出层 # 定义模型的前向传播,即如何根据输入X返回所需的模型输出 def forward(self, X): return self.out(self.hidden(X)) # + [markdown] origin_pos=14 # 我们首先看一下前向传播函数,它以`X`作为输入, # 计算带有激活函数的隐藏表示,并输出其未规范化的输出值。 # 在这个`MLP`实现中,两个层都是实例变量。 # 要了解这为什么是合理的,可以想象实例化两个多层感知机(`net1`和`net2`), # 并根据不同的数据对它们进行训练。 # 当然,我们希望它们学到两种不同的模型。 # # 接着我们[**实例化多层感知机的层,然后在每次调用前向传播函数时调用这些层**]。 # 注意一些关键细节: # 首先,我们定制的`__init__`函数通过`super().__init__()` # 调用父类的`__init__`函数, # 省去了重复编写模版代码的痛苦。 # 然后,我们实例化两个全连接层, # 分别为`self.hidden`和`self.out`。 # 注意,除非我们实现一个新的运算符, # 否则我们不必担心反向传播函数或参数初始化, # 系统将自动生成这些。 # # 我们来试一下这个函数: # # + origin_pos=15 tab=["mxnet"] net = MLP() net.initialize() net(X) # + [markdown] origin_pos=18 # 块的一个主要优点是它的多功能性。 # 我们可以子类化块以创建层(如全连接层的类)、 # 整个模型(如上面的`MLP`类)或具有中等复杂度的各种组件。 # 我们在接下来的章节中充分利用了这种多功能性, # 比如在处理卷积神经网络时。 # # ## [**顺序块**] # # 现在我们可以更仔细地看看`Sequential`类是如何工作的, # 回想一下`Sequential`的设计是为了把其他模块串起来。 # 为了构建我们自己的简化的`MySequential`, # 我们只需要定义两个关键函数: # # 1. 一种将块逐个追加到列表中的函数。 # 1. 一种前向传播函数,用于将输入按追加块的顺序传递给块组成的“链条”。 # # 下面的`MySequential`类提供了与默认`Sequential`类相同的功能。 # # + origin_pos=19 tab=["mxnet"] class MySequential(nn.Block): def add(self, block): # 这里,block是Block子类的一个实例,我们假设它有一个唯一的名称。我们把它 # 保存在'Block'类的成员变量_children中。block的类型是OrderedDict。 # 当MySequential实例调用initialize函数时,系统会自动初始化_children # 的所有成员 self._children[block.name] = block def forward(self, X): # OrderedDict保证了按照成员添加的顺序遍历它们 for block in self._children.values(): X = block(X) return X # + [markdown] origin_pos=22 tab=["mxnet"] # `add`函数向有序字典`_children`添加一个块。 # 你可能会好奇为什么每个Gluon中的`Block`都有一个`_children`属性? # 以及为什么我们使用它而不是自己定义一个Python列表? # 简而言之,`_children`的主要优点是: # 在块的参数初始化过程中, # Gluon知道在`_children`字典中查找需要初始化参数的子块。 # # + [markdown] origin_pos=24 # 当`MySequential`的前向传播函数被调用时, # 每个添加的块都按照它们被添加的顺序执行。 # 现在可以使用我们的`MySequential`类重新实现多层感知机。 # # + origin_pos=25 tab=["mxnet"] net = MySequential() net.add(nn.Dense(256, activation='relu')) net.add(nn.Dense(10)) net.initialize() net(X) # + [markdown] origin_pos=28 # 请注意,`MySequential`的用法与之前为`Sequential`类编写的代码相同 # (如 :numref:`sec_mlp_concise` 中所述)。 # # ## [**在前向传播函数中执行代码**] # # `Sequential`类使模型构造变得简单, # 允许我们组合新的架构,而不必定义自己的类。 # 然而,并不是所有的架构都是简单的顺序架构。 # 当需要更强的灵活性时,我们需要定义自己的块。 # 例如,我们可能希望在前向传播函数中执行Python的控制流。 # 此外,我们可能希望执行任意的数学运算, # 而不是简单地依赖预定义的神经网络层。 # # 到目前为止, # 我们网络中的所有操作都对网络的激活值及网络的参数起作用。 # 然而,有时我们可能希望合并既不是上一层的结果也不是可更新参数的项, # 我们称之为*常数参数*(constant parameter)。 # 例如,我们需要一个计算函数 # $f(\mathbf{x},\mathbf{w}) = c \cdot \mathbf{w}^\top \mathbf{x}$的层, # 其中$\mathbf{x}$是输入, # $\mathbf{w}$是参数, # $c$是某个在优化过程中没有更新的指定常量。 # 因此我们实现了一个`FixedHiddenMLP`类,如下所示: # # + origin_pos=29 tab=["mxnet"] class FixedHiddenMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) # 使用get_constant函数创建的随机权重参数在训练期间不会更新(即为常量参数) self.rand_weight = self.params.get_constant( 'rand_weight', np.random.uniform(size=(20, 20))) self.dense = nn.Dense(20, activation='relu') def forward(self, X): X = self.dense(X) # 使用创建的常量参数以及relu和dot函数 X = npx.relu(np.dot(X, self.rand_weight.data()) + 1) # 复用全连接层。这相当于两个全连接层共享参数 X = self.dense(X) # 控制流 while np.abs(X).sum() > 1: X /= 2 return X.sum() # + [markdown] origin_pos=32 # 在这个`FixedHiddenMLP`模型中,我们实现了一个隐藏层, # 其权重(`self.rand_weight`)在实例化时被随机初始化,之后为常量。 # 这个权重不是一个模型参数,因此它永远不会被反向传播更新。 # 然后,神经网络将这个固定层的输出通过一个全连接层。 # # 注意,在返回输出之前,模型做了一些不寻常的事情: # 它运行了一个while循环,在$L_1$范数大于$1$的条件下, # 将输出向量除以$2$,直到它满足条件为止。 # 最后,模型返回了`X`中所有项的和。 # 注意,此操作可能不会常用于在任何实际任务中, # 我们只是向你展示如何将任意代码集成到神经网络计算的流程中。 # # + origin_pos=33 tab=["mxnet"] net = FixedHiddenMLP() net.initialize() net(X) # + [markdown] origin_pos=35 # 我们可以[**混合搭配各种组合块的方法**]。 # 在下面的例子中,我们以一些想到的方法嵌套块。 # # + origin_pos=36 tab=["mxnet"] class NestMLP(nn.Block): def __init__(self, **kwargs): super().__init__(**kwargs) self.net = nn.Sequential() self.net.add(nn.Dense(64, activation='relu'), nn.Dense(32, activation='relu')) self.dense = nn.Dense(16, activation='relu') def forward(self, X): return self.dense(self.net(X)) chimera = nn.Sequential() chimera.add(NestMLP(), nn.Dense(20), FixedHiddenMLP()) chimera.initialize() chimera(X) # + [markdown] origin_pos=39 # ## 效率 # # + [markdown] origin_pos=40 tab=["mxnet"] # 你可能会开始担心操作效率的问题。 # 毕竟,我们在一个高性能的深度学习库中进行了大量的字典查找、 # 代码执行和许多其他的Python代码。 # Python的问题[全局解释器锁](https://wiki.python.org/moin/GlobalInterpreterLock) # 是众所周知的。 # 在深度学习环境中,我们担心速度极快的GPU可能要等到CPU运行Python代码后才能运行另一个作业。 # # 提高Python速度的最好方法是完全避免使用Python。 # Gluon这样做的一个方法是允许*混合式编程*(hybridization),这将在后面描述。 # Python解释器在第一次调用块时执行它。 # Gluon运行时记录正在发生的事情,以及下一次它将对Python调用加速。 # 在某些情况下,这可以大大加快运行速度, # 但当控制流(如上所述)在不同的网络通路上引导不同的分支时,需要格外小心。 # 我们建议感兴趣的读者在读完本章后,阅读混合式编程部分( :numref:`sec_hybridize` )来了解编译。 # # + [markdown] origin_pos=43 # ## 小结 # # * 一个块可以由许多层组成;一个块可以由许多块组成。 # * 块可以包含代码。 # * 块负责大量的内部处理,包括参数初始化和反向传播。 # * 层和块的顺序连接由`Sequential`块处理。 # # ## 练习 # # 1. 如果将`MySequential`中存储块的方式更改为Python列表,会出现什么样的问题? # 1. 实现一个块,它以两个块为参数,例如`net1`和`net2`,并返回前向传播中两个网络的串联输出。这也被称为平行块。 # 1. 假设你想要连接同一网络的多个实例。实现一个函数,该函数生成同一个块的多个实例,并在此基础上构建更大的网络。 # # + [markdown] origin_pos=44 tab=["mxnet"] # [Discussions](https://discuss.d2l.ai/t/1828) #
submodules/resource/d2l-zh/mxnet/chapter_deep-learning-computation/model-construction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Self-Driving Car Engineer Nanodegree # # ## Deep Learning # # ## Project: Build a Traffic Sign Recognition Classifier # # In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary. # # > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n", # "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. # # In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project. # # The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file. # # # >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode. # --- # ## Step 0: Load The Data # + # Load pickled data import pickle # TODO: Fill this in based on where you saved the training and testing data training_file = '../data/train.p' validation_file= '../data/valid.p' testing_file = '../data/test.p' with open(training_file, mode='rb') as f: train = pickle.load(f) with open(validation_file, mode='rb') as f: valid = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_valid, y_valid = valid['features'], valid['labels'] X_test, y_test = test['features'], test['labels'] assert(len(X_train) == len(y_train)) assert(len(X_valid) == len(y_valid)) assert(len(X_test) == len(y_test)) print() print("Image Shape: {}".format(X_train[0].shape)) print() print("Training Set: {} samples".format(len(X_train))) print("Validation Set: {} samples".format(len(X_valid))) print("Test Set: {} samples".format(len(X_test))) # - # --- # # ## Step 1: Dataset Summary & Exploration # # The pickled data is a dictionary with 4 key/value pairs: # # - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels). # - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id. # - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image. # - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES** # # Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. # ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas # + ### Replace each question mark with the appropriate value. ### Use python, pandas or numpy methods rather than hard coding the results import numpy as np import csv # TODO: Number of training examples n_train = len(X_train) # TODO: Number of validation examples n_validation = len(X_valid) # TODO: Number of testing examples. n_test = len(X_test) # TODO: What's the shape of an traffic sign image? image_shape = X_train[0].shape # TODO: How many unique classes/labels there are in the dataset. n_classes = len(np.unique(y_train)) #List of traffic sign labels names with open('signnames.csv', mode='r') as infile: reader = csv.reader(infile) next(reader, infile) traffic_sign_labels = [rows[1] for rows in reader] print("Number of training examples =", n_train) print("Number of testing examples =", n_test) print("Validation Set: {} samples".format(len(X_valid))) print("Image data shape =", image_shape) print("Number of classes =", n_classes) # - # ### Include an exploratory visualization of the dataset # Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. # # The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python. # # **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others? # + ### Data exploration visualization code goes here. ### Feel free to use as many code cells as needed. import random import matplotlib.pyplot as plt # Visualizations will be shown in the notebook. # %matplotlib inline index = random.randint(0, len(X_train)) image = X_train[index].squeeze() plt.figure(figsize=(1,1)) plt.imshow(image, cmap="gray") print(y_train[index]) # + from collections import Counter #Determine count of each label in splits train_set = Counter(y_train) valid_set = Counter(y_valid) test_set = Counter(y_test) #Plot histogram of labels in each set #print("Visualize the reprentation of each label in all three splits") plt.figure(figsize=(12,6)) plt.title('Data Distribution') train_bar = plt.bar(range(len(train_set)), train_set.values(), color = 'g') valid_bar = plt.bar(range(len(valid_set)), valid_set.values(), color = 'y', bottom = list(train_set.values())) test_bar = plt.bar(range(len(test_set)), test_set.values(), color = 'r', bottom = list((train_set + valid_set).values())) plt.xticks(range(len(train_set)), train_set.keys()) plt.xlabel('Label') plt.ylabel('Count of Label Occurence') plt.legend([train_bar, valid_bar, test_bar], ['Training Data', 'Validation Data', 'Test Data']) #plt.savefig('.\\graphics\\datahistogram.png') plt.show() # - # ---- # # ## Step 2: Design and Test a Model Architecture # # Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). # # The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! # # With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. # # There are various aspects to consider when thinking about this problem: # # - Neural network architecture (is the network over or underfitting?) # - Play around preprocessing techniques (normalization, rgb to grayscale, etc) # - Number of examples per label (some have more than others). # - Generate fake data. # # Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. # ### Pre-process the Data Set (normalization, grayscale, etc.) # Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. # # Other pre-processing steps are optional. You can try different techniques to see if it improves performance. # # Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. # + ### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include ### converting to grayscale, etc. ### Feel free to use as many code cells as needed. # Convert to grayscale X_train_rgb = X_train X_train_gry = np.sum(X_train/3, axis=3, keepdims=True) X_valid_rgb = X_valid X_valid_gry = np.sum(X_valid/3, axis=3, keepdims=True) X_test_rgb = X_test X_test_gry = np.sum(X_test/3, axis=3, keepdims=True) X_train = X_train_gry X_valid = X_valid_gry X_test = X_test_gry print('RGB shape:', X_train_rgb.shape) print('Grayscale shape:', X_train_gry.shape) ## Normalize the train and test datasets to (-1,1) X_train_normalized = (X_train - 128)/128 X_valid_normalized = (X_valid - 128)/128 X_test_normalized = (X_test - 128)/128 X_train = X_train_normalized X_valid = X_valid_normalized X_test = X_test_normalized print() print(np.mean(X_train_normalized)) print(np.mean(X_valid_normalized)) print(np.mean(X_test_normalized)) # - """ Commenting this (augmentation) code as the accuracy of the model was reduced after augmentation #Declare pre-processing functions #References: #https://github.com/shazraz/Traffic-Sign-Classifier from tqdm import tqdm from skimage import transform import cv2 #Covert RGB image to grayscale def grayscale(img_set): gray_img_set = [] for img in img_set: gray_img_set.append(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)) return gray_img_set #Perform histogram equalization using basic algorithm on grayscale images def equalize(img_set): equal_img_set = [] for img in img_set: equal_img_set.append(np.expand_dims(cv2.equalizeHist(img), axis = 2)) return equal_img_set #Perform histogram equalization using CLAHE algorithm on grayscale images def clahe_equalize(img_set): clahe_img_set = [] clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4)) for img in img_set: clahe_img_set.append(np.expand_dims(clahe.apply(img), axis = 2)) return clahe_img_set #Perform histogram equalization using CLAHE algorithm on RGB->YUV->RGB images def clahe_equalize_RGB(img_set): clahe_RGB_img_set = [] clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(4,4)) for img in img_set: y, u, v = cv2.split(cv2.cvtColor(img, cv2.COLOR_RGB2YUV)) y = clahe.apply(y) img = cv2.merge((y,u,v)) img = cv2.cvtColor(img, cv2.COLOR_YUV2RGB) clahe_RGB_img_set.append(img) return clahe_RGB_img_set #Merge RGB and gray channels to create image set of depth 4 def merge_channels(img_gray_set, img_RGB_set): merge_set = [] assert(len(img_RGB_set) == len(img_gray_set)) for img1, img2 in zip(img_gray_set, img_RGB_set): img = cv2.merge((img1, img2)) merge_set.append(img) return merge_set #Normalize image set using mean/std of entire image set def normalize(img_set, mean, std): norm_img_set = [] for img in tqdm(img_set, total = len(img_set)): norm_img_set.append((img - mean)/std) return norm_img_set """ """ Commenting this (augmentation) code as the accuracy of the model was reduced after augmentation import math def transform_image(img, rt_range, xlate_range): #Define Transformations - rotation and translation pixels_x, pixels_y, channels = img.shape rt_angle = np.random.uniform(rt_range) - rt_range/2 xlate_x = np.random.uniform(xlate_range) - xlate_range/2 xlate_y = np.random.uniform(xlate_range) - xlate_range/2 M_rot = cv2.getRotationMatrix2D((pixels_y/2, pixels_x/2), rt_angle, 1) M_xlate = np.float32([[1,0,xlate_x],[0,1,xlate_y]]) #Transform image img = cv2.warpAffine(img, M_xlate, (pixels_y, pixels_x)) img = cv2.warpAffine(img, M_rot, (pixels_y, pixels_x)) return img def augment_set(X_data, y_data, labels, threshold, rt_range, xlate_range): #Declare all variables images_dict = {} n_imgs = 0 #total # of images that will be perturbed pixels_x, pixels_y, channels = X_data.shape[1:4] n_aug_imgs = {} #holds # of images that need to be created for each label #Determine indices of images that need to be augmented and their corresponding labels for label in labels: images_dict[label] = X_data[np.where(y_data == label)] n_aug_imgs[label] = threshold - images_dict[label].shape[0] n_imgs += images_dict[label].shape[0] #Determine how many perturbed images need to be created for each label to meet the threshold print ("# of images available to be perturbed:", n_imgs) print("# of images to be created per label", n_aug_imgs) print("Total # of images to be created:", sum(n_aug_imgs.values())) #Create empty array to hold perturbed images new_images = np.empty([sum(n_aug_imgs.values()), pixels_x, pixels_y, channels], dtype = np.uint8) new_labels = np.empty([sum(n_aug_imgs.values())], dtype = int) #print("The shapes of the new arrays are:", new_images.shape, new_labels.shape) #Loop through labels/image arrays, determine # of perturbations for each image in label then perturb image offset = 0 for label, img_array in tqdm(images_dict.items(), total = len(labels)): #print("The image counter has been reset, starting label", label) new_img_count = 0 n_pertubations = math.ceil(n_aug_imgs[label]/images_dict[label].shape[0]) #print("Labels {:d} has {:d} images and {:d} new images need to be created" #.format(label, images_dict[label].shape[0], n_aug_imgs[label])) for img_index, img in enumerate(img_array): #print("We will create {:d} pertubations for this image.".format(n_pertubations)) #create the desired number of pertubations for i in range(n_pertubations): #Create the new image and label new_images[i+img_index*n_pertubations+offset] = transform_image(img, rt_range, xlate_range) #print("The new label is", label, "at index", i+img_index*n_pertubations+offset) new_labels[i+img_index*n_pertubations+offset] = label new_img_count += 1 if (new_img_count == n_aug_imgs[label]): break if (new_img_count == n_aug_imgs[label]): break offset += n_aug_imgs[label] #print("{:d} new images created for label {:d}".format(new_img_count, label)) #print("The offset is now {:d}\n".format(offset)) aug_X_data = np.concatenate((X_data, new_images), 0) aug_y_data = np.concatenate((y_data, new_labels), 0) return (aug_X_data, aug_y_data) #Do image mirroring and rotations on labels that are invariant to the operation def basic_augment(X_data, y_data, xflip, yflip, rot120): #Define variables and dictionaries xflip_dict = {} yflip_dict = {} rot_dict = {} n_xflip_imgs, n_yflip_imgs, n_rot_imgs = 0, 0, 0 #Setup up rotation matrices pixels_x, pixels_y, channels = X_data.shape[1:4] M1 = cv2.getRotationMatrix2D((pixels_y/2,pixels_x/2), 120, 1) M2 = cv2.getRotationMatrix2D((pixels_y/2,pixels_x/2), 240, 1) #Populate dictonary with images to be operated on with key = label for old_label, new_label in xflip: xflip_dict[old_label] = X_data[np.where(y_data == old_label)] n_xflip_imgs += xflip_dict[old_label].shape[0] for old_label, new_label in yflip: yflip_dict[old_label] = X_data[np.where(y_data == old_label)] n_yflip_imgs += yflip_dict[old_label].shape[0] for old_label, new_label in rot120: rot_dict[old_label] = X_data[np.where(y_data == old_label)] n_rot_imgs += rot_dict[old_label].shape[0] #Create empty arrays to hold new images and their corresponding labels xflip_images = np.empty([n_xflip_imgs, pixels_x, pixels_y, channels], dtype = np.uint8) xflip_labels = np.empty([n_xflip_imgs], dtype = int) yflip_images = np.empty([n_yflip_imgs, pixels_x, pixels_y, channels], dtype = np.uint8) yflip_labels = np.empty([n_yflip_imgs], dtype = int) rot_images = np.empty([2*n_rot_imgs, pixels_x, pixels_y, channels], dtype = np.uint8) rot_labels = np.empty([2*n_rot_imgs], dtype = int) #Perform xflips, yflips and 120deg, 240deg rotations offset = 0 for label, img_array in xflip_dict.items(): if img_array.shape[0] != 0: for index, img in enumerate(img_array): xflip_images[index+offset] = cv2.flip(img,0) xflip_labels[index+offset] = label offset += img_array.shape[0] offset = 0 for label, img_array in yflip_dict.items(): if img_array.shape[0] != 0: for index, img in enumerate(img_array): yflip_images[index+offset] = cv2.flip(img,1) yflip_labels[index+offset] = yflip[np.where(yflip[:,0] == label),1][0][0] offset += img_array.shape[0] offset = 0 for label, img_array in rot_dict.items(): if img_array.shape[0] != 0: for index, img in enumerate(img_array): rot_images[index+offset] = cv2.warpAffine(img, M1, (pixels_y,pixels_x)) rot_images[index+n_rot_imgs+offset] = cv2.warpAffine(img, M2, (pixels_y,pixels_x)) rot_labels[index+offset] = label rot_labels[index+n_rot_imgs+offset] = label offset += img_array.shape[0] #Append arrays and return new training data set aug_X_data = np.concatenate((X_data, xflip_images, yflip_images, rot_images), 0) aug_y_data = np.concatenate((y_data, xflip_labels, yflip_labels, rot_labels), 0) print("Number of x-flipped images:", xflip_images.shape[0]) print("Number of y-flipped images:", yflip_images.shape[0]) print("Number of rotated images (2x the input):", rot_images.shape[0]) print("Total # of appended images:", xflip_images.shape[0] + yflip_images.shape[0] + rot_images.shape[0]) print("Original training data set had {:d} images".format(X_data.shape[0])) print("Augmented training data set has {:d} images\n".format(aug_X_data.shape[0])) return (aug_X_data, aug_y_data) """ """ Commenting this (augmentation) code as the accuracy of the model was reduced after augmentation #Do Preliminary Augmentation on Dataset (i.e. image mirroring and simple rotations) #Identify which labels can be flipped horizontally/vertically and determine the new label if changed [oldlabel, new label]. #This is not an exhaustive list since not all labels that can be flipped require augmentation labels_x_flip = np.array([[17,17]]) labels_y_flip = np.array([[19,20], [20,19], [22,22], [26,26], [30,30], [33,34], [34,33], [36,37], [37,36], [38,39]]) labels_rot_120 = np.array([[40,40]]) X_train_aug, y_train_aug = basic_augment(X_train, y_train, labels_x_flip, labels_y_flip, labels_rot_120) print("Lets visualize the new augmented data set. Some of the lables have been augmented but" " clearly, more augmentation is required:") #Plot histogram of labels in each set aug_train_set = Counter(y_train_aug) valid_set = Counter(y_valid) test_set = Counter(y_test) plt.figure(figsize=(12,5)) plt.title(' Augmented Data Distribution') aug_train_bar = plt.bar(range(len(aug_train_set)), aug_train_set.values(), color = 'g') valid_bar = plt.bar(range(len(valid_set)), valid_set.values(), color = 'y', bottom = list(aug_train_set.values())) test_bar = plt.bar(range(len(test_set)), test_set.values(), color = 'r', bottom = list((aug_train_set + valid_set).values())) plt.xticks(range(len(aug_train_set)), aug_train_set.keys()) plt.xlabel('Label') plt.ylabel('Count of Label Occurence') plt.legend([aug_train_bar, valid_bar, test_bar], ['Augmented Training Data', 'Validation Data', 'Test Data']) #plt.savefig('.\\graphics\\AugDataHistogram.png') plt.show() #Identify which labels need to be augmented labels_to_augment = [] n_threshold = 1500 for key in aug_train_set.keys(): if(aug_train_set[key] < n_threshold): labels_to_augment.append(key) print("The following {:d} labels have less than {:d} training samples and need to be augmented:\n" .format(len(labels_to_augment), n_threshold)) print(labels_to_augment) #Perform additional augmentation to get under-represented labels in the training set up to the threshold rt_range = 15 xlate_range = 5 X_train_aug2, y_train_aug2 = augment_set(X_train_aug, y_train_aug, labels_to_augment, n_threshold, rt_range, xlate_range) print("The final augmented training set has {:d} images".format(X_train_aug2.shape[0]))""" """ Commenting this (augmentation) code as the accuracy of the model was reduced after augmentation print("Let's plot that histogram again...") aug_train_set = Counter(y_train_aug2) valid_set = Counter(y_valid) test_set = Counter(y_test) plt.figure(figsize=(12,5)) plt.title(' Augmented Data Distribution') aug_train_bar = plt.bar(range(len(aug_train_set)), aug_train_set.values(), color = 'g') valid_bar = plt.bar(range(len(valid_set)), valid_set.values(), color = 'y', bottom = list(aug_train_set.values())) test_bar = plt.bar(range(len(test_set)), test_set.values(), color = 'r', bottom = list((aug_train_set + valid_set).values())) plt.xticks(range(len(aug_train_set)), aug_train_set.keys()) plt.xlabel('Label') plt.ylabel('Count of Label Occurence') plt.legend([aug_train_bar, valid_bar, test_bar], ['Augmented Training Data', 'Validation Data', 'Test Data']) #plt.savefig('.\\graphics\\FinalDataHistogram.png') plt.show() print("The data distribution is much more balanced. Let's take a look at some of the newly transformed images:") index = random.randint(34799, len(X_train_aug2)) n_images = 10 image = X_train_aug2[index:index+n_images] image_labels = y_train_aug2[index:index+n_images] plt.figure(figsize=(15,3)) for i in range(n_images): plt.subplot(1,n_images,i+1) plt.imshow(image[i]) plt.savefig('.\\graphics\\augmentation.png') plt.show() """ """ Commenting this (augmentation) code as the accuracy of the model was reduced after augmentation ### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include ### converting to grayscale, etc. ### Feel free to use as many code cells as needed. # Convert to grayscale X_train_rgb = X_train_aug2 X_train_gry = np.sum(X_train_aug2/3, axis=3, keepdims=True) X_valid_rgb = X_valid X_valid_gry = np.sum(X_valid/3, axis=3, keepdims=True) X_test_rgb = X_test X_test_gry = np.sum(X_test/3, axis=3, keepdims=True) X_train = X_train_gry X_valid = X_valid_gry X_test = X_test_gry print('RGB shape:', X_train_rgb.shape) print('Grayscale shape:', X_train_gry.shape) ## Normalize the train and test datasets to (-1,1) X_train_normalized = (X_train - 128)/128 X_valid_normalized = (X_valid - 128)/128 X_test_normalized = (X_test - 128)/128 X_train = X_train_normalized X_valid = X_valid_normalized X_test = X_test_normalized y_train = y_train_aug2 #from the new code print() print(np.mean(X_train_normalized)) print(np.mean(X_valid_normalized)) print(np.mean(X_test_normalized)) """ # ### Question 1 # Describe how you preprocessed the image data. What techniques were chosen and why did you choose these techniques? # ### Answer # My dataset preprocessing included converting to grayscale and normalizing the image data to the range (-1,1). This was done using the line of code X_train_normalized = (X_train - 128)/128. The resulting dataset mean wasn't exactly zero, but it was reduced from around 82 to roughly -0.35. This technique was suggested in the lessons. # + from sklearn.utils import shuffle X_train, y_train = shuffle(X_train, y_train) # - # ## Setup TensorFlow # The `EPOCH` and `BATCH_SIZE` values affect the training speed and model accuracy. # # You do not need to modify this section. # + import tensorflow as tf EPOCHS = 60 BATCH_SIZE = 100 # - # ### Model Architecture # #### LeNet-5 # [LeNet-5](http://yann.lecun.com/exdb/lenet/) neural network architecture. # # ![LeNet Architecture](lenet.png) # Source: Yan LeCun # # ### Input # The LeNet architecture accepts a 32x32xC image as input, where C is the number of color channels. Since we have converted the colored images are grayscale, C is 1 in this case. # # ### Architecture # **Layer 1: Convolutional.** The output shape is 28x28x6. # # **Activation.** Activation function. # # **Pooling.** The output shape is 14x14x6. # # **Layer 2: Convolutional.** The output shape is 10x10x16. # # **Activation.** Another activation function. # # **Pooling.** The output shape is 5x5x16. # # **Flatten.** Flattening the output shape of the final pooling layer such that it's 1D instead of 3D. The easiest way to do is by using `tf.contrib.layers.flatten`, which has been imported. # # **Layer 3: Fully Connected.** This should have 120 outputs. # # **Activation.** Your choice of activation function. # # **Layer 4: Fully Connected.** This should have 84 outputs. # # **Activation.** Your choice of activation function. # # **Layer 5: Fully Connected (Logits).** This should have 43 outputs. # # ### Output # Return the result of the 2nd fully connected layer. # + ### Define your architecture here. ### Feel free to use as many code cells as needed. from tensorflow.contrib.layers import flatten def LeNet(x): # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer mu = 0 sigma = 0.1 # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma)) conv1_b = tf.Variable(tf.zeros(6)) conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b # TODO: Activation. conv1 = tf.nn.relu(conv1) # TODO: Pooling. Input = 28x28x6. Output = 14x14x6. conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # TODO: Layer 2: Convolutional. Output = 10x10x16. conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma)) conv2_b = tf.Variable(tf.zeros(16)) conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b # TODO: Activation. conv2 = tf.nn.relu(conv2) # TODO: Pooling. Input = 10x10x16. Output = 5x5x16. conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') # TODO: Flatten. Input = 5x5x16. Output = 400. fc0 = flatten(conv2) # TODO: Layer 3: Fully Connected. Input = 400. Output = 120. fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma)) fc1_b = tf.Variable(tf.zeros(120)) fc1 = tf.add(tf.matmul(fc0, fc1_W), fc1_b) # TODO: Activation. fc1 = tf.nn.relu(fc1) # TODO: Layer 4: Fully Connected. Input = 120. Output = 84. fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma)) fc2_b = tf.Variable(tf.zeros(84)) fc2 = tf.add(tf.matmul(fc1, fc2_W), fc2_b) # TODO: Activation. fc2 = tf.nn.relu(fc2) # TODO: Layer 5: Fully Connected. Input = 84. Output = 43. fc3_W = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma)) fc3_b = tf.Variable(tf.zeros(43)) logits = tf.add(tf.matmul(fc2, fc3_W), fc3_b) return logits # - # ### Modified LeNet Model Architecture # # The LeNet architecture has been adapted from Sermanet/LeCunn traffic sign classification journal article def LeNet2(x): # Hyperparameters mu = 0 sigma = 0.1 # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6. W1 = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma)) x = tf.nn.conv2d(x, W1, strides=[1, 1, 1, 1], padding='VALID') b1 = tf.Variable(tf.zeros(6)) x = tf.nn.bias_add(x, b1) print("layer 1 shape:",x.get_shape()) # TODO: Activation. x = tf.nn.relu(x) # TODO: Pooling. Input = 28x28x6. Output = 14x14x6. x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') layer1 = x # TODO: Layer 2: Convolutional. Output = 10x10x16. W2 = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma)) x = tf.nn.conv2d(x, W2, strides=[1, 1, 1, 1], padding='VALID') b2 = tf.Variable(tf.zeros(16)) x = tf.nn.bias_add(x, b2) # TODO: Activation. x = tf.nn.relu(x) # TODO: Pooling. Input = 10x10x16. Output = 5x5x16. x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') layer2 = x # TODO: Layer 3: Convolutional. Output = 1x1x400. W3 = tf.Variable(tf.truncated_normal(shape=(5, 5, 16, 400), mean = mu, stddev = sigma)) x = tf.nn.conv2d(x, W3, strides=[1, 1, 1, 1], padding='VALID') b3 = tf.Variable(tf.zeros(400)) x = tf.nn.bias_add(x, b3) # TODO: Activation. x = tf.nn.relu(x) layer3 = x # TODO: Flatten. Input = 5x5x16. Output = 400. layer2flat = flatten(layer2) print("layer2flat shape:",layer2flat.get_shape()) # Flatten x. Input = 1x1x400. Output = 400. xflat = flatten(x) print("xflat shape:",xflat.get_shape()) # Concat layer2flat and x. Input = 400 + 400. Output = 800 x = tf.concat([xflat, layer2flat], 1) print("x shape:",x.get_shape()) # Dropout x = tf.nn.dropout(x, keep_prob) fc1 = x # TODO: Layer 4: Fully Connected. Input = 800. Output = 43. W4 = tf.Variable(tf.truncated_normal(shape=(800, 43), mean = mu, stddev = sigma)) b4 = tf.Variable(tf.zeros(43)) logits = tf.add(tf.matmul(x, W4), b4) #return logits return logits, layer1, layer2, layer3, fc1 x = tf.placeholder(tf.float32, (None, 32, 32, 1)) y = tf.placeholder(tf.int32, (None)) one_hot_y = tf.one_hot(y, 43) keep_prob = tf.placeholder(tf.float32) # probability to keep units # ### Question 2 # Describe what your final model architecture looks like including model type, layers, layer sizes, connectivity, etc.) # ### Answer # I first implemented the same architecture from the LeNet Lab, with no changes since my dataset is in grayscale. This model worked quite well but then I also implemented the Sermanet/LeCun model from their traffic sign classifier paper and saw an immediate improvement. The layers are set up are as below: # # 1. 5x5 convolution (32x32x1 in, 28x28x6 out) # 2. ReLU # 3. 2x2 max pool (28x28x6 in, 14x14x6 out) # 4. 5x5 convolution (14x14x6 in, 10x10x16 out) # 5. ReLU # 6. 2x2 max pool (10x10x16 in, 5x5x16 out) # 7. 5x5 convolution (5x5x6 in, 1x1x400 out) # 8. ReLu # 9. Flatten layers from numbers 8 (1x1x400 -> 400) and 6 (5x5x16 -> 400) # 10. Concatenate flattened layers to a single size-800 layer # 11. Dropout layer # 12. Fully connected layer (800 in, 43 out) # ### Train, Validate and Test the Model # A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation # sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting. # + ### Train your model here. ### Calculate and report the accuracy on the training and validation set. ### Once a final model architecture is selected, ### the accuracy on the test set should be calculated and reported as well. ### Feel free to use as many code cells as needed. rate = 0.0009 logits, layer1, layer2, layer3, fc1 = LeNet2(x) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits) loss_operation = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer(learning_rate = rate) training_operation = optimizer.minimize(loss_operation) # - # ## Model Evaluation # Evaluate how well the loss and accuracy of the model for a given dataset. # # You do not need to modify this section. # + correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1)) accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) saver = tf.train.Saver() def evaluate(X_data, y_data): num_examples = len(X_data) total_accuracy = 0 sess = tf.get_default_session() for offset in range(0, num_examples, BATCH_SIZE): batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE] accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 1.0}) total_accuracy += (accuracy * len(batch_x)) return total_accuracy / num_examples # - # ## Train the Model # Run the training data through the training pipeline to train the model. # # Before each epoch, shuffle the training set. # # After each epoch, measure the loss and accuracy of the validation set. # # Save the model after training. # # You do not need to modify this section. with tf.Session() as sess: sess.run(tf.global_variables_initializer()) num_examples = len(X_train) print("Training...") print() for i in range(EPOCHS): X_train, y_train = shuffle(X_train, y_train) for offset in range(0, num_examples, BATCH_SIZE): end = offset + BATCH_SIZE batch_x, batch_y = X_train[offset:end], y_train[offset:end] sess.run(training_operation, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5}) validation_accuracy = evaluate(X_valid, y_valid) print("EPOCH {} ...".format(i+1)) print("Validation Accuracy = {:.3f}".format(validation_accuracy)) print() saver.save(sess, './lenet') print("Model saved") # ### Question 3 # Describe how you trained your model. # ### Answer # Training of the model was inspired from the LeNet labs but below are the parameters/hyperparameters that were used during the training:- # - Type of optimiizer user - AdamOptimizer # - Batch size - 100 # - Number of Epochs - 60 # - Learning Rate - 0.0009 # - Keep Probability - 0.5 # ## Evaluate the Model # Once you are completely satisfied with your model, evaluate the performance of the model on the test set. # # Be sure to only do this once! # # If you were to measure the performance of your trained model on the test set, then improve your model, and then measure the performance of your model on the test set again, that would invalidate your test results. You wouldn't get a true measure of how well your model would perform against real data. # # You do not need to modify this section. with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('.')) test_accuracy = evaluate(X_test, y_test) print("Test Accuracy = {:.3f}".format(test_accuracy)) # ### Question 4 # Describe the approach taken for finding a solution and getting the validation set accuracy to be at least 0.93. # ### Answer # - Validation set accuracy of 95.5% # - Test set accuracy of 94.8% # # Results on the training data produced promising ~95% accuracy as you can see in the above 4 part of the code. # The approach included the below steps: # 1. Load the data and splitting in into training, testing and validation sets for images and labels both # 2. Explore the data to ensure it has been loaded correctly and the sizing aligns with the requirements of the model # 3. Pre-process the image data using techniques like grayscaling and normalizing # 4. Design and train the model which further included creating the models (in this case LeNet and LeNet2), training the model, validating the accuracy and storing the model results # 5. Test/Evaluating the model on the test data # --- # # ## Step 3: Test a Model on New Images # # To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type. # # You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. # ### Load and Output the Images # + # Reinitialize and re-import if starting a new kernel here import matplotlib.pyplot as plt # %matplotlib inline import tensorflow as tf import numpy as np import cv2 # + ### Load the images and plot them here. ### Feel free to use as many code cells as needed. #reading in an image import glob import matplotlib.image as mpimg fig, axs = plt.subplots(2,4, figsize=(4, 2)) fig.subplots_adjust(hspace = .2, wspace=.001) axs = axs.ravel() my_images = [] for i, img in enumerate(glob.glob('user-sample-images/*.png')): image = cv2.imread(img) axs[i].axis('off') axs[i].imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) my_images.append(image) my_images = np.asarray(my_images) my_images_gry = np.sum(my_images/3, axis=3, keepdims=True) my_images_normalized = (my_images_gry - 128)/128 print(my_images_normalized.shape) # - # ### Question 1 # Choose five German traffic signs found on the web and provide them in the report. For each image, discuss what quality or qualities might be difficult to classify. # ### Answer # I would think that darker images would be difficult to classify as compared to brighter ones. Also, the images with complex shapes would need more layers to classify as compared to an image with simple shapes such as squares or circles # ### Predict the Sign Type for Each Image # + ### Run the predictions here and use the model to output the prediction for each image. ### Make sure to pre-process the images with the same pre-processing pipeline used earlier. ### Feel free to use as many code cells as needed. my_labels = [34, 3, 18, 12, 38, 11, 25, 1] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver3 = tf.train.import_meta_graph('./lenet.meta') saver3.restore(sess, "./lenet") my_accuracy = evaluate(my_images_normalized, my_labels) print("Test Set Accuracy = {:.3f}".format(my_accuracy)) # - # ### Question 2 # Discuss the model's predictions on these new traffic signs and compare the results to predicting on the test set. # ### Answer # The model was able to correctly predict the test images for German traffic signs # 1. Turn left ahead # 2. Speed limit (60km/h) # 3. General caution # 4. Priority road # 5. Keep right # 6. Right-of-way at the next intersection # 7. Road work # 8. Speed limit (30km/h) # ### Analyze Performance # + ### Calculate the accuracy for these 5 new images. ### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images. ### General function to plot images def plot_figures(figures, nrows = 1, ncols=1, labels=None): fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(12, 14)) axs = axs.ravel() for index, title in zip(range(len(figures)), figures): #print(len(figures[title].shape)) if(len(figures[title].shape) == 3): axs[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) else: axs[index].imshow(figures[title], plt.gray()) #axs[index].imshow(cv2.cvtColor(figures[title], cv2.COLOR_BGR2RGB)) if(labels != None): axs[index].set_title(labels[index]) else: axs[index].set_title(title) axs[index].set_axis_off() plt.tight_layout() k_size = 5 softmax_logits = tf.nn.softmax(logits) top_k = tf.nn.top_k(softmax_logits, k=k_size) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.import_meta_graph('./lenet.meta') saver.restore(sess, "./lenet") my_softmax_logits = sess.run(softmax_logits, feed_dict={x: my_images_normalized, keep_prob: 1.0}) my_top_k = sess.run(top_k, feed_dict={x: my_images_normalized, keep_prob: 1.0}) for i in range(8): figures = {} labels = {} figures[0] = my_images[i] labels[0] = "Original" for j in range(k_size): # print('Guess {} : ({:.0f}%)'.format(j+1, 100*my_top_k[0][i][j])) labels[j+1] = 'Guess {} : ({:.0f}%)'.format(j+1, 100*my_top_k[0][i][j]) figures[j+1] = X_valid[np.argwhere(y_valid == my_top_k[1][i][j])[0]].squeeze() # print() plot_figures(figures, 1, 6, labels) # - # ### Output Top 5 Softmax Probabilities For Each Image Found on the Web # For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here. # # The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image. # # `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids. # # Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability: # # ``` # # (5, 6) array # a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, # 0.12789202], # [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, # 0.15899337], # [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , # 0.23892179], # [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , # 0.16505091], # [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, # 0.09155967]]) # ``` # # Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces: # # ``` # TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], # [ 0.28086119, 0.27569815, 0.18063401], # [ 0.26076848, 0.23892179, 0.23664738], # [ 0.29198961, 0.26234032, 0.16505091], # [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], # [0, 1, 4], # [0, 5, 1], # [1, 3, 5], # [1, 4, 3]], dtype=int32)) # ``` # # Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. # + ### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web. ### Feel free to use as many code cells as needed. fig, axs = plt.subplots(8,2, figsize=(9, 19)) axs = axs.ravel() for i in range(len(my_softmax_logits)*2): if i%2 == 0: axs[i].axis('off') axs[i].imshow(cv2.cvtColor(my_images[i//2], cv2.COLOR_BGR2RGB)) else: axs[i].bar(np.arange(n_classes), my_softmax_logits[(i-1)//2]) axs[i].set_ylabel('Softmax probability') # - # ### Question 3 # Describe how certain the model is when predicting on each of the five new images by looking at the softmax probabilities for each prediction. Provide the top 5 softmax probabilities for each image along with the sign type of each probability. # ### Answer # For all the test images, the model was able to accurately predict the traffic sign with a probability of 100%. The top five soft max probabilities were 100% as can seen in the image above. # ### Project Writeup # # Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. # > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", # "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. # --- # # ## Step 4 (Optional): Visualize the Neural Network's State with Test Images # # This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. # # Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable. # # For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. # # <figure> # <img src="visualize_cnn.png" width="380" alt="Combined Image" /> # <figcaption> # <p></p> # <p style="text-align: center;"> Your output should look something like this (above)</p> # </figcaption> # </figure> # <p></p> # # + ### Visualize your network's feature maps here. ### Feel free to use as many code cells as needed. # image_input: the test image being fed into the network to produce the feature maps # tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer # activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output # plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1): # Here make sure to preprocess your image_input in a way your network expects # with size, normalization, ect if needed # image_input = # Note: x should be the same name as your network's tensorflow data placeholder variable # If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function activation = tf_activation.eval(session=sess,feed_dict={x : image_input}) featuremaps = activation.shape[3] plt.figure(plt_num, figsize=(15,15)) for featuremap in range(featuremaps): plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number if activation_min != -1 & activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray") elif activation_max != -1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray") elif activation_min !=-1: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray") else: plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray") # - print("Let's take a look at an unprocessed image") plt.figure(figsize=(1,1)) #plt.imshow(my_images[0]) plt.imshow(cv2.cvtColor(my_images[0], cv2.COLOR_BGR2RGB)) plt.show() #Re-shape processed image to (1,32,32,4) image_input = np.expand_dims(my_images_normalized[0], 0) #Visualize feature maps for 1st conv layer. Depth is 6. with tf.Session() as sess: saver.restore(sess, "./lenet") outputFeatureMap(image_input, layer1) #Visualize feature maps for 2st conv layer. Depth is 16. with tf.Session() as sess: saver.restore(sess, "./lenet") outputFeatureMap(image_input, layer2)
Traffic_Sign_Classifier.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## MIDI util functions # + import rtmidi from time import sleep def open_midi_out_virtual_port(api=rtmidi.RtMidiOut.UNIX_JACK, clientName="PyMIDI"): """Open a midiout port.""" # Initialize midi out port midiout = rtmidi.RtMidiOut(api=rtmidi.RtMidiOut.UNIX_JACK, clientName="PyMIDI") # Print some debug info ports = range(midiout.getPortCount()) if ports: for i in ports: print(midiout.getPortName(i)) # Open the virtual port print("Opening port 0!") midiout.openVirtualPort() return midiout def program_change(midiout, channel, program): """Change the MIDI program (virtual instrument).""" msg = rtmidi.MidiMessage.programChange(channel, 1) midiout.sendMessage(msg) def controller_event(midiout, channel, controller, level): """Change controller to have level.""" msg = rtmidi.MidiMessage.controllerEvent(channel, controller, level) midiout.sendMessage(msg) def volume_change(midiout, channel, level): """Change volume to level.""" controller_event(midiout, channel, 7, level) def play_note_on(midiout, channel, note, velocity): """Play note with id note with given velocity until shutoff.""" msg = rtmidi.MidiMessage.noteOn(channel, note, velocity) midiout.sendMessage(msg) def play_note_off(midiout, channel, note): """Shutoff note with id note.""" msg = rtmidi.MidiMessage.noteOff(channel, note) midiout.sendMessage(msg) def play_note(midiout, channel, note, velocity, time=1): """Play note with id note with given velocity for time seconds.""" play_note_on(midiout, channel, note, velocity) sleep(time) play_note_off(midiout, channel, note) # - # ## Open the MIDI port midiout = open_midi_out_virtual_port() channel = 1 # ## Play some notes # + # Sound out a series of MIDI messages program_change(midiout, channel, 1) # Control change (volume) volume_change(midiout, channel, 100) # Play C5 a few times for _ in range(3): play_note(midiout, channel, note=64, velocity=127, time=1) # - # ## Close the port midiout.closePort() # TODO: Doesn't seem to work?
midi.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/RYCMDNT/CPEN-21A-CPE-1-2/blob/main/Operations%20and%20Expressions.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + colab={"base_uri": "https://localhost:8080/"} id="Lr6yOIJfiHWf" outputId="c208df7d-45c6-4f50-bd02-f69da2c7e184" #booleans represents one of two values: True or False print(20>9) print(20==20) print(2>10) # + colab={"base_uri": "https://localhost:8080/"} id="OXCMPsnEiuoC" outputId="29d33ef5-e90a-44a2-dd13-ffeb888b84ed" a=5 b=15 print(a==b) print(a>b) print(a<b) # + colab={"base_uri": "https://localhost:8080/"} id="vTK4mn0ZjS97" outputId="9f3bacd3-6425-49be-f1b6-d88be0805f0a" print(bool("Hello")) print(bool(15)) # + colab={"base_uri": "https://localhost:8080/"} id="kOJyqkiQjfwh" outputId="6c9a8032-316b-4a88-aab6-fa961e9c378d" print(bool(False)) print(bool("")) print(bool(0)) print(bool(None)) # + colab={"base_uri": "https://localhost:8080/"} id="fOviZTHqj_xh" outputId="1c6e417d-1a39-4799-bce8-4aeb652c77b7" def myFunction():return True print(myFunction()) # + id="mywy6MY4ks_i" outputId="dfab0585-6a6a-419d-a0ea-c5f07fbcaa57" colab={"base_uri": "https://localhost:8080/"} def myFunction():return True print(myFunction()) if myFunction(): print("YES") else: print("NO") # + id="F6rMehjllO9R" outputId="fd9288fb-0e06-4cbd-9d4c-fa65799f9791" colab={"base_uri": "https://localhost:8080/"} print(10+5) print(10-5) print(10/5) print(10*5)
Operations and Expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: ve_sales_prediction # language: python # name: ve_sales_prediction # --- # # <font color='#002726'> Data Science em Produção </font> # # =-=- ROSSMANN - STORE SALES PREDICTION -=-= # # <font color='#3F0094'> 0. Imports </font> # + # general use import numpy as np import pandas as pd # helper function import inflection # feature engineering and data analysis import seaborn as sns import matplotlib.pyplot as plt from IPython.display import Image from datetime import datetime, timedelta # machine learning # - # ## <font color='#200CF'> 0.1. Helper Functions </font> # + # Notebook Setups sns.set_style('darkgrid') sns.set_context('talk') sns.set_palette('Set2') # Functions def snakecase(list_of_names): """Returns a list of names in snake case, which refers to the style of writing in which each space is replaced by an underscore (_) character.""" new_list = list(map(inflection.underscore, list_of_names)) return new_list # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 0.2. Loading Data </font> # + hidden=true # loading historical data - including Sales df_sales_raw = pd.read_csv('../raw_data/train.csv', low_memory=False) # loading information about the stores df_store_raw = pd.read_csv('../raw_data/store.csv', low_memory=False) # merging dataframes df_raw = pd.merge(df_sales_raw, df_store_raw, how='left', on='Store') # + [markdown] hidden=true # ### <font color='#F37126'> Data Fields </font> # + [markdown] hidden=true # **Most of the fields are self-explanatory. The following are descriptions for those that aren't.** # # - **Id** - an Id that represents a (Store, Date) duple within the test set; # - **Store** - a unique Id for each store; # - **Sales** - the turnover for any given day (this is what you are predicting); # - **Customers** - the number of customers on a given day; # - **Open** - an indicator for whether the store was open: 0 = closed, 1 = open; # - **StateHoliday** - indicates a state holiday. Normally all stores, with few exceptions, are closed on state holidays. Note that all schools are closed on public holidays and weekends. a = public holiday, b = Easter holiday, c = Christmas, 0 = None; # - **SchoolHoliday** - indicates if the (Store, Date) was affected by the closure of public schools; # - **StoreType** - differentiates between 4 different store models: a, b, c, d; # - **Assortment** - describes an assortment level: a = basic, b = extra, c = extended; # - **CompetitionDistance** - distance in meters to the nearest competitor store; # - **CompetitionOpenSince[Month/Year]** - gives the approximate year and month of the time the nearest competitor was opened; # - **Promo** - indicates whether a store is running a promo on that day; # - **Promo2** - Promo2 is a continuing and consecutive promotion for some stores: 0 = store is not participating, 1 = store is participating; # - **Promo2Since[Year/Week]** - describes the year and calendar week when the store started participating in Promo2; # - **PromoInterval** - describes the consecutive intervals Promo2 is started, naming the months the promotion is started anew. E.g. "Feb,May,Aug,Nov" means each round starts in February, May, August, November of any given year for that store. # - # # <font color='#3F0094'> 1. Descriptive Data Analysis </font> # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.0. Dataframe in Progress Backup </font> # + hidden=true df1 = df_raw.copy() # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.1. Column Renaming </font> # + hidden=true # renaming df1 column names df1.columns = snakecase(df1.columns) # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.2. Data Dimension </font> # + hidden=true print(f'Store Dataframe - Number of Rows: {df1.shape[0]}. \nStore Dataframe - Number of Columns: {df1.shape[1]}.') # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.3. Data Types </font> # + hidden=true # dataframe data types df1.dtypes # + hidden=true # setting date column as datetime type df1['date'] = pd.to_datetime(df1['date']) # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.4. NA Check </font> # + hidden=true # checking NA - All NA values came from store.csv df1.isna().sum() # + hidden=true # checking NA using info() df1.info() # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.5. Filling in Missing/Null Values </font> # + [markdown] hidden=true # **Number of NA Values** # # competition_distance 2642 # # competition_open_since_month 323348 # competition_open_since_year 323348 # # promo2_since_week 508031 # promo2_since_year 508031 # promo_interval 508031 # + hidden=true # competition_distance # maximun distance x 2 max_dist_x_2 = df1['competition_distance'].max() * 2 # assuming competitors are twice as far away as the greatest distance found df1['competition_distance'] = df1['competition_distance'].apply(lambda x: max_dist_x_2 if np.isnan(x) else x) # competition_open_since_year # frequency per year of existing competition_open_since_year data frequency = df1['competition_open_since_year'].value_counts( normalize=True).reset_index().rename( columns={'index': 'year', 'competition_open_since_year': 'percent'}) # True/False missing/Null Series missing = df1['competition_open_since_year'].isna() # Using Numpy's random.choice to fill out missing data based on the frequency of existing info df1.loc[missing,'competition_open_since_year'] = np.random.choice(frequency.year, size=len(df1[missing]), p=frequency.percent) # competition_open_since_month # frequency per month of existing competition_open_since_month data frequency = df1['competition_open_since_month'].value_counts( normalize=True).reset_index().rename( columns={'index': 'month', 'competition_open_since_month': 'percent'}) # True/False missing/Null Series missing = df1['competition_open_since_month'].isna() # Using Numpy's random.choice to fill out missing data based on the frequency of existing info df1.loc[missing,'competition_open_since_month'] = np.random.choice(frequency.month, size=len(df1[missing]), p=frequency.percent) # promo2_since_week AND promo2_since_year # the same date of sale will be used as a reference to fill in the NA values # then a new timedelta column will be created (promo2_duration) #promo2_since_week df1['promo2_since_week'] = df1[['date', 'promo2_since_week']].apply(lambda x: x['date'].week if np.isnan(x['promo2_since_week']) else x['promo2_since_week'], axis=1) # promo2_since_year df1['promo2_since_year'] = df1[['date', 'promo2_since_year']].apply(lambda x: x['date'].year if np.isnan(x['promo2_since_year']) else x['promo2_since_year'], axis=1) # promo_interval # filling in NA with 'none' df1['promo_interval'].fillna(value='none', inplace=True) # creating a column with current month df1['curr_month'] = df1['date'].dt.strftime('%b') # creating a column to indicate whether promo2 is active df1['promo2_active'] = df1.apply(lambda x: 1 if (( x['curr_month'] in x['promo_interval'].split(',')) and ( x['date'] >= datetime.fromisocalendar(int(x['promo2_since_year']), int(x['promo2_since_week']), 1))) else 0, axis=1) # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 1.6. Changing Data Types </font> # + hidden=true df1.dtypes # + hidden=true # Changing DTypes from float to integer df1['competition_distance'] = df1['competition_distance'].astype(int) df1['competition_open_since_month'] = df1['competition_open_since_month'].astype(int) df1['competition_open_since_year'] = df1['competition_open_since_year'].astype(int) df1['promo2_since_week'] = df1['promo2_since_week'].astype(int) df1['promo2_since_year'] = df1['promo2_since_year'].astype(int) # - # ## <font color='#200CF'> 1.7. Descriptive Statistics </font> # + [markdown] heading_collapsed=true # ### <font color='#2365FF'> 1.7.0. Numeric vs Categorical - Attributes Split </font> # + hidden=true # dataframe - numeric attributes df_numeric = df1.select_dtypes(include=['int64', 'float64']) # dataframe - categorical attributes df_categorical = df1.select_dtypes(exclude=['int64', 'float64', 'datetime64[ns]']) # + [markdown] heading_collapsed=true # ### <font color='#2365FF'> 1.7.1. Numeric Attributes </font> # + hidden=true # using DF describe() method df1.describe().T # + hidden=true # central tendency metrics - mean, median ct_mean = df_numeric.apply(np.mean) ct_median = df_numeric.apply(np.median) # dispersion metrics - std, min, max, range, skew, kurtosis d_std = df_numeric.apply(np.std) d_min = df_numeric.apply(min) d_max = df_numeric.apply(max) d_range = df_numeric.apply(lambda x: x.max() - x.min()) d_skew = df_numeric.apply(lambda x: x.skew()) d_kurtosis = df_numeric.apply(lambda x: x.kurtosis()) metrics = pd.DataFrame({ 'min': d_min, 'max': d_max, 'range': d_range, 'mean': ct_mean, 'median': ct_median, 'std': d_std, 'skew': d_skew, 'kurtosis': d_kurtosis }) metrics # + [markdown] hidden=true # **competition_distance** # - Skew: highly skewed data, high positive value means that the right-hand tail is much longer than the left-hand tail. # - Kurtosis: increases as the tails become heavier, the high positive value indicates a very peaked curve. # # **competition_open_since_year** # - Skew: highly skewed data, high negative value means that the left-hand tail is longer than the right-hand tail. # - Kurtosis: increases as the tails become heavier, the high positive value indicates a very peaked curve. # # **sales** # - Skewness is close to zero, indicating that the data is not too skewed # + hidden=true # sales histogram - not considering when sales is zero ax = sns.histplot(data=df_numeric[df_numeric['sales'] > 0], x='sales', stat='proportion', bins=100) ax.figure.set_size_inches(14, 7) ax.set_title('Sales Histogram', fontsize=20, pad=10) median = np.median(df_numeric['sales']) ax.vlines(x=5744, ymin=0, ymax=0.07, linestyles='dashed', label='median', colors='firebrick') ax.annotate(f'median = {median}', xy=(7000, 0.061), fontsize=14, color='firebrick') ax; # + [markdown] heading_collapsed=true # ### <font color='#2365FF'> 1.7.2. Categorical Attributes </font> # + hidden=true # verifying unique valuesfor each categorical attribute df_categorical.apply(lambda x: len(x.unique())) # + [markdown] hidden=true # **BOXPLOT OF CATEGORICAL ATTRIBUTES** # + hidden=true # Boxplot - Categorical Attributes # not considering when: sales = 0 aux = df1[df1['sales'] > 0] plt.figure(figsize=(24,10)) plt.subplot(1, 3, 1) ax1 = sns.boxplot(x='state_holiday', y='sales', data=aux) ax1.set_title('Boxplot - state_holiday', fontsize=18, pad=10) ax1.set_xticklabels(labels=['None', 'Public', 'Easter', 'Christmas']) plt.subplot(1, 3, 2) ax2 = sns.boxplot(x='store_type', y='sales', data=aux) ax2.set_title('Boxplot - store_type', fontsize=18, pad=10) plt.subplot(1, 3, 3) ax3 = sns.boxplot(x='assortment', y='sales', data=aux) ax3.set_title('Boxplot - assortment', fontsize=18, pad=10) plt.show() # + [markdown] hidden=true # **BOXPLOT OF BINARY CATEGORICAL ATTRIBUTES** # + hidden=true # Boxplot - Binary ategorical Attributes plt.figure(figsize=(24,10)) plt.subplot(1, 3, 1) ax1 = sns.boxplot(x='promo', y='sales', data=df1) ax1.set_title('Boxplot - promo', fontsize=18, pad=10) plt.subplot(1, 3, 2) ax2 = sns.boxplot(x='promo2_active', y='sales', data=df1) ax2.set_title('Boxplot - promo2_active', fontsize=18, pad=10) plt.subplot(1, 3, 3) ax3 = sns.boxplot(x='school_holiday', y='sales', data=df1) ax3.set_title('Boxplot - school_holiday', fontsize=18, pad=10) plt.show() # - # # <font color='#3F0094'> 2. Feature Egineering </font> # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 2.0. Dataframe in Progress Backup </font> # + hidden=true df2 = df1.copy() # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 2.1. Mind Map </font> # + hidden=true # made on coggle.it Image('../img/mind_map01.png') # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 2.2. Hypothesis </font> # + [markdown] hidden=true # **Based on Descriptive Statistics and on Mind Map** # + [markdown] hidden=true # ### <font color='#2365FF'> 2.1.1. Store-related Hypothesis </font> # + [markdown] hidden=true # **a1.** The median sales of stores with the largest assortment should be the highest. # # **a2.** The average value of sales for a specific type of store is higher than the average value for other types (store types: a, b, c, d). # # **a3.** The average sales value of stores with competitors whose distance is less than 1000 meters is lower than the average value of other stores. # # **a4.** The average sales values of stores whose competitors opened less than 18 months ago are lower than the average values of other stores. # + [markdown] hidden=true # ### <font color='#2365FF'> 2.1.2. Product-related Hypothesis </font> # + [markdown] hidden=true # **b1.** The median sales value of stores should be greater when running a promo (promo) than when not running a promo. # # **b2.** The median sales value of stores with continuing and consecutive promotion (promo2) should be greater than those that do not have extended promotion. # # **b3.** The median sales value of stores on sale (promo2) for a longer period of time should be higher than stores on sale for a shorter period of time. # + [markdown] hidden=true # ### <font color='#2365FF'> 2.1.3. Time-related Hypothesis </font> # + [markdown] hidden=true # **c1.** The average ticket per customer should be lower during holiday periods. # # **c2.** Stores affected by the closure of public schools on school holidays should sell less, on average. # # **c3.** The revenue in the second half should be higher than in the first half. # # **c4.** Average sales during the winter should be higher than during the rest of the year. # # **c5.** Average sales during the weekend should be lower than during the rest of the week. # + [markdown] heading_collapsed=true # ## <font color='#200CF'> 2.3. Feature Engineering </font> # + hidden=true # year df2['year'] = df2['date'].dt.year # month df2['month'] = df2['date'].dt.month # day df2['day'] = df2['date'].dt.day # week_of_year df2['week_of_year'] = df2['date'].dt.isocalendar().week.astype('int64') # competition_months_old # calculating the competition period, extracting the days and dividing by 30 to get the period in months df2['competition_months_old'] = df2.apply(lambda x: ( x['date'] - datetime(year=x['competition_open_since_year'], month=x['competition_open_since_month'], day=1)).days / 30, axis=1).astype(int) # promo2_months_old # calculation method: zero(0) if promo2_active is zero(0) else (actual_date - promo2_starting_date) >> timedelta format # >> then use .days and divide by 30 to extract the number of months >> as integer df2['promo2_months_old'] = df2.apply(lambda x: 0 if x['promo2_active'] == 0 else ( x['date'] - datetime.fromisocalendar(x['promo2_since_year'], x['promo2_since_week'], 1)).days / 30, axis=1).astype(int) # assortment df2['assortment'] = df2['assortment'].map({'a': 'basic', 'b': 'extra', 'c': 'extended'}) # state_holiday df2['state_holiday'] = df2['state_holiday'].map({'0': 'none', 'a': 'public', 'b': 'easter', 'c': 'christmas'}) # =-=-=-=- WARNING: EDA USE ONLY -=-=-=-= # customer_avg_ticket df2['customers_avg_ticket'] = (df2['sales'] / df2['customers']) df2['customers_avg_ticket'].fillna(value=0, inplace=True) # - # # <font color='#3F0094'> 3. Feature Filtering </font> # ## <font color='#200CF'> 3.0. Dataframe in Progress Backup </font> df3 = df2.copy() # ## <font color='#200CF'> 3.1. Filtering Rows </font> # eliminating all records where stores are closed and sales are zero df3 = df3[(df3['open'] != 0) & (df3['sales'] > 0)] # ## <font color='#200CF'> 3.2. Filtering Columns </font> # **customers:** the number of customers will not be available to be used in the model prediction, as it is an unknown and variable value in the future. # # **open:** the open column has record 1 only. # # **promo_interval, curr_month:** auxiliary columns already used in the feature engineering step. # # **Important Warning:** the column **customers_avg_ticket** will only be used during EDA and will be discarded later. # list of columns to be droped cols_drop = ['customers', 'open', 'promo_interval', 'curr_month'] df3.drop(cols_drop, axis=1, inplace=True) df3.shape
notebooks/stg03_feature_filtering.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dtype # ## Data Type Object # Let's look into how you might generate positions from signals. To do that, we first need to know about `dtype` or data type objects in Numpy. # # A [data type object](https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.dtypes.html) is a class that represents the data. It's similar to a [data type](data type), but contains [more information](https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.dtypes.html) about the data. Let's see an example of a data type object in Numpy using the array `array`. # + import numpy as np array = np.arange(10) print(array) print(type(array)) print(array.dtype) # - # From this, we see `array` is a `numpy.ndarray` with the data `[0 1 2 3 4 5 6 7 8 9]` represented as `int64` (64-bit integer). # # Let's see what happens when we divide the data by 2 to generate not integer data. # + float_arr = array / 2 print(float_arr) print(type(float_arr)) print(float_arr.dtype) # - # The array returned has the values `[ 0. 0.5 1. 1.5 2. 2.5 3. 3.5 4. 4.5]`, which is what you would expect for divinding by 2. However, since this data can't be represeted by integers, the array is now represented as `float64` (64-bit float). # # How would we convert this back to `int64`? We'll use the [`ndarray.astype`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.astype.html) function to cast it from it's current type to the type of `int64` (`np.int64`). # + int_arr = float_arr.astype(np.int64) print(int_arr) print(type(int_arr)) print(int_arr.dtype) # - # This casts the data to `int64`, but all also changes the data. Since fractions can't be represented as integers, the decimal place is dropped. # # ## Signals to Positions # Now that you've seen how the a [data type object](https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.dtypes.html) is used in Numpy, let's see how to use it to generate positions from signals. Let's use `prices` array to represent the prices in dollars over time for a single stock. # + prices = np.array([1, 3, -2, 9, 5, 7, 2]) prices # - # For the positions, let's say we want to buy one share of stock when the price is above 2 dollars and the buy 3 more shares when it's above 4 dollars. We'll first need to generate the signal for these two positions. # + signal_one = prices > 2 signal_three = prices > 4 print(signal_one) print(signal_three) # - # This gives us the points in time for the signals above 2 dollars and above 4 dollars. To turn this into positions, we need to multiply each array by the respective amount to invest. We first need to turn each signal into an integer using the [`ndarray.astype`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.astype.html) function. # + signal_one = signal_one.astype(np.int) signal_three = signal_three.astype(np.int) print(signal_one) print(signal_three) # - # Now we multiply each array by the respective amount to invest. # + pos_one = 1 * signal_one pos_three = 3 * signal_three print(pos_one) print(pos_three) # - # If we add them together, we have the final position of the stock over time. # + long_pos = pos_one + pos_three print(long_pos) # - # ## Quiz # Using this information, implement `generate_positions` using Pandas's [`df.astype`](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.astype.html) function to convert `prices` to final positions using the following signals: # - Long 30 share of stock when the price is above 50 dollars # - Short 10 shares of stock when it's below 20 dollars # + import project_tests def generate_positions(prices): """ Generate the following signals: - Long 30 share of stock when the price is above 50 dollars - Short 10 shares when it's below 20 dollars Parameters ---------- prices : DataFrame Prices for each ticker and date Returns ------- final_positions : DataFrame Final positions for each ticker and date """ # TODO: Implement Function return 30 * (prices > 50).astype(np.int) - 10 * (prices < 20).astype(np.int) project_tests.test_generate_positions(generate_positions) # - # ## Quiz Solution # If you're having trouble, you can check out the quiz solution [here](dtype_solution.ipynb).
module01/lesson08_momentum_trading/05_dtype/dtype.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd import matplotlib.pyplot as plt # %matplotlib inline data = pd.read_csv('heart.csv') data.head() data.info() data.isnull().sum() data.describe() # + import seaborn as sns corr = data.corr() plt.figure(figsize = (15,15)) sns.heatmap(corr, annot = True) # - corr sns.set_style('whitegrid') sns.countplot(x = 'target', data = data) # + # dataset = pd.get_dummies(data, columns = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'ca', 'thal']) # - dataset = data.copy() dataset.head() X = dataset.drop(['target'], axis = 1) y = dataset['target'] X.columns # + from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42) # - from sklearn.ensemble import RandomForestClassifier model = RandomForestClassifier(n_estimators=20) model.fit(X_train, y_train) pred = model.predict(X_test) pred[:10] from sklearn.metrics import confusion_matrix confusion_matrix(y_test, pred) from sklearn.metrics import accuracy_score print(f"Accuracy of model is {round(accuracy_score(y_test, pred)*100, 2)}%") # ## Hyperparameter Tuning from sklearn.model_selection import RandomizedSearchCV classifier = RandomForestClassifier(n_jobs = -1) from scipy.stats import randint param_dist={'max_depth':[3,5,10,None], 'n_estimators':[10,100,200,300,400,500], 'max_features':randint(1,31), 'criterion':['gini','entropy'], 'bootstrap':[True,False], 'min_samples_leaf':randint(1,31), } search_clfr = RandomizedSearchCV(classifier, param_distributions = param_dist, n_jobs=-1, n_iter = 40, cv = 9) search_clfr.fit(X_train, y_train) params = search_clfr.best_params_ score = search_clfr.best_score_ print(params) print(score) claasifier=RandomForestClassifier(n_jobs=-1, n_estimators=400,bootstrap= False,criterion='gini',max_depth=5,max_features=3,min_samples_leaf= 7) classifier.fit(X_train, y_train) confusion_matrix(y_test, classifier.predict(X_test)) print(f"Accuracy is {round(accuracy_score(y_test, classifier.predict(X_test))*100,2)}%") import pickle pickle.dump(classifier, open('heart.pkl', 'wb'))
Notebooks/Heart_Disease_Prediction.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import cv2 img = cv2.imread('data/src/lena.jpg') print(type(img)) print(img.shape) img_flip_ud = cv2.flip(img, 0) cv2.imwrite('data/dst/lena_cv_flip_ud.jpg', img_flip_ud) img_flip_lr = cv2.flip(img, 1) cv2.imwrite('data/dst/lena_cv_flip_lr.jpg', img_flip_lr) img_flip_ud_lr = cv2.flip(img, -1) cv2.imwrite('data/dst/lena_cv_flip_ud_lr.jpg', img_flip_ud_lr)
notebook/opencv_flip.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Any learning algorithm will always have strengths and weaknesses: a single model is unlikely to fit every possible scenario. Ensembles combine multiple models to achieve higher generalization performance than any of the constituent models is capable of. How do we assemble the weak learners? We can use some sequential heuristics. For instance, given the current collection of models, we can add one more based on where that particular model performs well. Alternatively, we can look at all the correlations of the predictions between all models, and optimize for the most uncorrelated predictors. Since this latter is a global approach, it naturally maps to a quantum computer. But first, let's take a look a closer look at loss functions and regularization, two key concepts in machine learning. # # Loss Functions and Regularization # # If you can solve a problem by a classical computer -- let that be a laptop or a massive GPU cluster -- there is little value in solving it by a quantum computer that costs ten million dollars. The interesting question in quantum machine learning is whether there are problems in machine learning and AI that fit quantum computers naturally, but are challenging on classical hardware. This, however, requires a good understanding of both machine learning and contemporary quantum computers. # # In this course, we primarily focus on the second aspect, since there is no shortage of educational material on classical machine learning. However, it is worth spending a few minutes on going through some basics. # # Let us take a look at the easiest possible problem: the data points split into two, easily distinguishable sets. We randomly generate this data set: # + import matplotlib.pyplot as plt import numpy as np # %matplotlib inline c1 = np.random.rand(50, 2)/5 c2 = (-0.6, 0.5) + np.random.rand(50, 2)/5 data = np.concatenate((c1, c2)) labels = np.array([0] * 50 + [1] *50) plt.figure(figsize=(6, 6)) plt.subplot(111, xticks=[], yticks=[]) plt.scatter(data[:50, 0], data[:50, 1], color='navy') plt.scatter(data[50:, 0], data[50:, 1], color='c'); # - # Let's shuffle the data set into a training set that we are going to optimize over (2/3 of the data), and a test set where we estimate our generalization performance. idx = np.arange(len(labels)) np.random.shuffle(idx) # train on a random 2/3 and test on the remaining 1/3 idx_train = idx[:2*len(idx)//3] idx_test = idx[2*len(idx)//3:] X_train = data[idx_train] X_test = data[idx_test] y_train = labels[idx_train] y_test = labels[idx_test] # We will use the package `scikit-learn` to train various machine learning models. import sklearn import sklearn.metrics metric = sklearn.metrics.accuracy_score # Let's train a perceptron, which has a linear loss function $\frac{1}{N}\sum_{i=1}^N |h(x_i)-y_i)|$: from sklearn.linear_model import Perceptron model_1 = Perceptron() model_1.fit(X_train, y_train) print('accuracy (train): %5.2f'%(metric(y_train, model_1.predict(X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, model_1.predict(X_test)))) # It does a great job. It is a linear model, meaning its decision surface is a plane. Our dataset is separable by a plane, so let's try another linear model, but this time a support vector machine. If you eyeball our dataset, you will see that to define the separation between the two classes, actually only a few points close to the margin are relevant. These are called support vectors and support vector machines aim to find them. Its objective function measures The $C$ hyperparameter controls a regularization term that penalizes the objective for the number of support vectors: from sklearn.svm import SVC model_2 = SVC(kernel='linear', C=1) model_2.fit(X_train, y_train) print('accuracy (train): %5.2f'%(metric(y_train, model_2.predict(X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, model_2.predict(X_test)))) print('Number of support vectors:', sum(model_2.n_support_)) # It picks only a few datapoints out of the hundred. Let's change the hyperparameter to reduce the penalty: model_2 = SVC(kernel='linear', C=0.01) model_2.fit(X_train, y_train) print('accuracy (train): %5.2f'%(metric(y_train, model_2.predict(X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, model_2.predict(X_test)))) print('Number of support vectors:', sum(model_2.n_support_)) # You can see that the model gets confused by using two many datapoints in the final classifier. This is one example where regularization helps. # # Ensemble methods # # Ensembles yield better results when there is considerable diversity among the base classifiers. If diversity is sufficient, base classifiers make different errors, and a strategic combination may reduce the total error, ideally improving generalization performance. A constituent model in an ensemble is also called a base classifier or weak learner, and the composite model a strong learner. # # The generic procedure of ensemble methods has two steps. First, develop a set of base classifiers from the training data. Second, combine them to form the ensemble. In the simplest combination, the base learners vote, and the label prediction is based on majority. More involved methods weigh the votes of the base learners. # # Let us import some packages and define our figure of merit as accuracy in a balanced dataset. # + import matplotlib.pyplot as plt import numpy as np import sklearn import sklearn.datasets import sklearn.metrics # %matplotlib inline metric = sklearn.metrics.accuracy_score # - # We generate a random dataset of two classes that form concentric circles: # + np.random.seed(0) data, labels = sklearn.datasets.make_circles() idx = np.arange(len(labels)) np.random.shuffle(idx) # train on a random 2/3 and test on the remaining 1/3 idx_train = idx[:2*len(idx)//3] idx_test = idx[2*len(idx)//3:] X_train = data[idx_train] X_test = data[idx_test] y_train = 2 * labels[idx_train] - 1 # binary -> spin y_test = 2 * labels[idx_test] - 1 scaler = sklearn.preprocessing.StandardScaler() normalizer = sklearn.preprocessing.Normalizer() X_train = scaler.fit_transform(X_train) X_train = normalizer.fit_transform(X_train) X_test = scaler.fit_transform(X_test) X_test = normalizer.fit_transform(X_test) plt.figure(figsize=(6, 6)) plt.subplot(111, xticks=[], yticks=[]) plt.scatter(data[labels == 0, 0], data[labels == 0, 1], color='navy') plt.scatter(data[labels == 1, 0], data[labels == 1, 1], color='c'); # - # Let's train a perceptron: from sklearn.linear_model import Perceptron model_1 = Perceptron() model_1.fit(X_train, y_train) print('accuracy (train): %5.2f'%(metric(y_train, model_1.predict(X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, model_1.predict(X_test)))) # Since its decision surface is linear, we get a poor accuracy. Would a support vector machine with a nonlinear kernel fare better? from sklearn.svm import SVC model_2 = SVC(kernel='rbf') model_2.fit(X_train, y_train) print('accuracy (train): %5.2f'%(metric(y_train, model_2.predict(X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, model_2.predict(X_test)))) # It performs better on the training set, but at the cost of extremely poor generalization. # # Boosting is an ensemble method that explicitly seeks models that complement one another. The variation between boosting algorithms is how they combine weak learners. Adaptive boosting (AdaBoost) is a popular method that combines the weak learners in a sequential manner based on their individual accuracies. It has a convex objective function that does not penalize for complexity: it is likely to include all available weak learners in the final ensemble. Let's train AdaBoost with a few weak learners: from sklearn.ensemble import AdaBoostClassifier model_3 = AdaBoostClassifier(n_estimators=3) model_3.fit(X_train, y_train) print('accuracy (train): %5.2f'%(metric(y_train, model_3.predict(X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, model_3.predict(X_test)))) # Its performance is marginally better than that of the SVM. # # QBoost # # The idea of Qboost is that optimization on a quantum computer is not constrained to convex objective functions, therefore we can add arbitrary penalty terms and rephrase our objective [[1](#1)]. Qboost solves the following problem: # # $$ # \mathrm{argmin}_{w} \left(\frac{1}{N}\sum_{i=1}^{N}\left(\sum_{k=1}^{K}w_kh_k(x_i)- # y_i\right)^2+\lambda\|w\|_0\right), # $$ # # where $h_k(x_i)$ is the prediction of the weak learner $k$ for a training instance $k$. The weights in this formulation are binary, so this objective function is already maps to an Ising model. The regularization in the $l_0$ norm ensures sparsity, and it is not the kind of regularization we would consider classically: it is hard to optimize with this term on a digital computer. # # Let us expand the quadratic part of the objective: # # $$ # \mathrm{argmin}_{w} \left(\frac{1}{N}\sum_{i=1}^{N} # \left( \left(\sum_{k=1}^{K} w_k h_k(x_i)\right)^{2} - # 2\sum_{k=1}^{K} w_k h_k(\mathbf{x}_i)y_i + y_i^{2}\right) + \lambda \|w\|_{0} # \right). # $$ # # Since $y_i^{2}$ is just a constant offset, the optimization reduces to # # $$ # \mathrm{argmin}_{w} \left( # \frac{1}{N}\sum_{k=1}^{K}\sum_{l=1}^{K} w_k w_l # \left(\sum_{i=1}^{N}h_k(x_i)h_l(x_i)\right) - # \frac{2}{N}\sum_{k=1}^{K}w_k\sum_{i=1}^{N} h_k(x_i)y_i + # \lambda \|w\|_{0} \right). # $$ # # This form shows that we consider all correlations between the predictions of the weak learners: there is a summation of $h_k(x_i)h_l(x_i)$. Since this term has a positive sign, we penalize for correlations. On the other hand, the correlation with the true label, $h_k(x_i)y_i$, has a negative sign. The regularization term remains unchanged. # # Let us consider all three models from the previous section as weak learners. models = [model_1, model_2, model_3] # We calculate their predictions and set $\lambda$ to 1. The predictions are scaled to reflecting the averaging in the objective. # + n_models = len(models) predictions = np.array([h.predict(X_train) for h in models], dtype=np.float64) # scale hij to [-1/N, 1/N] predictions *= 1/n_models λ = 1 # - # We create the quadratic binary optimization of the objective function as we expanded above: w = np.dot(predictions, predictions.T) wii = len(X_train) / (n_models ** 2) + λ - 2 * np.dot(predictions, y_train) w[np.diag_indices_from(w)] = wii W = {} for i in range(n_models): for j in range(i, n_models): W[(i, j)] = w[i, j] # We solve the quadratic binary optimization with simulated annealing and read out the optimal weights: import dimod sampler = dimod.SimulatedAnnealingSampler() response = sampler.sample_qubo(W, num_reads=10) weights = list(response.first.sample.values()) # We define a prediction function to help with measuring accuracy: def predict(models, weights, X): n_data = len(X) T = 0 y = np.zeros(n_data) for i, h in enumerate(models): y0 = weights[i] * h.predict(X) # prediction of weak classifier y += y0 T += np.sum(y0) y = np.sign(y - T / (n_data*len(models))) return y print('accuracy (train): %5.2f'%(metric(y_train, predict(models, weights, X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, predict(models, weights, X_test)))) # The accuracy co-incides with our strongest weak learner's, the AdaBoost model. Looking at the optimal weights, this is apparent: weights # Only AdaBoost made it to the final ensemble. The first two models perform poorly and their predictions are correlated. Yet, if you remove regularization by setting $\lambda=0$ above, the second model also enters the ensemble, decreasing overall performance. This shows that the regularization is in fact important. # # Solving by QAOA # # Since eventually our problem is just an Ising model, we can also solve it on a gate-model quantum computer by QAOA. Let us explicitly map the binary optimization to the Ising model: h, J, offset = dimod.qubo_to_ising(W) # We have to translate the Ising couplings to be suitable for solving by the QAOA routine: # + from pyquil import Program, api from pyquil.paulis import PauliSum, PauliTerm from scipy.optimize import fmin_bfgs from grove.pyqaoa.qaoa import QAOA from forest_tools import * qvm_server, quilc_server, fc = init_qvm_and_quilc('/home/local/bin/qvm', '/home/local/bin/quilc') qvm = api.QVMConnection(endpoint=fc.sync_endpoint, compiler_endpoint=fc.compiler_endpoint) num_nodes = w.shape[0] ising_model = [] for i in range(num_nodes): ising_model.append(PauliSum([PauliTerm("Z", i, h[i])])) for j in range(i+1, num_nodes): ising_model.append(PauliSum([PauliTerm("Z", i, J[i, j]) * PauliTerm("Z", j, 1.0)])) # - # Next we run the optimization: p = 1 Hm = [PauliSum([PauliTerm("X", i, 1.0)]) for i in range(num_nodes)] qaoa = QAOA(qvm, qubits=range(num_nodes), steps=p, ref_ham=Hm, cost_ham=ising_model, store_basis=True, minimizer=fmin_bfgs, minimizer_kwargs={'maxiter': 50}) ν, γ = qaoa.get_angles() program = qaoa.get_parameterized_program()(np.hstack((ν, γ))) measures = qvm.run_and_measure(program, range(num_nodes), trials=100) measures = np.array(measures) # Let's look at the solutions found: hist = plt.hist([str(m) for m in measures]) # Finally, we extract the most likely solution: count = np.unique(measures, return_counts=True, axis=0) weights = count[0][np.argmax(count[1])] # Let's see the weights found by QAOA: weights # And the final accuracy: print('accuracy (train): %5.2f'%(metric(y_train, predict(models, weights, X_train)))) print('accuracy (test): %5.2f'%(metric(y_test, predict(models, weights, X_test)))) # # References # # [1] <NAME>., <NAME>., <NAME>., <NAME>. (2008). [Training a binary classifier with the quantum adiabatic algorithm](https://arxiv.org/abs/0811.0416). *arXiv:0811.0416*. <a id='1'></a>
09_Discrete_Optimization_and_Ensemble_Learning-forest.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 📃 Solution for Exercise 01 # # The goal is to write an exhaustive search to find the best parameters # combination maximizing the model statistical performance. # # Here we use a small subset of the Adult Census dataset to make to code # fast to execute. Once your code works on the small subset, try to # change `train_size` to a larger value (e.g. 0.8 for 80% instead of # 20%). # + import pandas as pd from sklearn.model_selection import train_test_split adult_census = pd.read_csv("../datasets/adult-census.csv") target_name = "class" target = adult_census[target_name] data = adult_census.drop(columns=[target_name, "education-num"]) data_train, data_test, target_train, target_test = train_test_split( data, target, train_size=0.2, random_state=42) # + from sklearn.compose import ColumnTransformer from sklearn.compose import make_column_selector as selector from sklearn.preprocessing import OrdinalEncoder categorical_preprocessor = OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1) preprocessor = ColumnTransformer( [('cat-preprocessor', categorical_preprocessor, selector(dtype_include=object))], remainder='passthrough', sparse_threshold=0) # This line is currently required to import HistGradientBoostingClassifier from sklearn.experimental import enable_hist_gradient_boosting from sklearn.ensemble import HistGradientBoostingClassifier from sklearn.pipeline import Pipeline model = Pipeline([ ("preprocessor", preprocessor), ("classifier", HistGradientBoostingClassifier(random_state=42)) ]) # - # # Use the previously defined model (called `model`) and using two nested `for` # loops, make a search of the best combinations of the `learning_rate` and # `max_leaf_nodes` parameters. In this regard, you will need to train and test # the model by setting the parameters. The evaluation of the model should be # performed using `cross_val_score`. We will use the following parameters # search: # - `learning_rate` for the values 0.01, 0.1, 1 and 10. This parameter controls # the ability of a new tree to correct the error of the previous sequence of # trees # - `max_leaf_nodes` for the values 3, 10, 30. This parameter controls the # depth of each tree. # + from sklearn.model_selection import cross_val_score learning_rate = [0.05, 0.1, 0.5, 1, 5] max_leaf_nodes = [3, 10, 30, 100] best_score = 0 best_params = {} for lr in learning_rate: for mln in max_leaf_nodes: print(f"Evaluating model with learning rate {lr:.3f}" f" and max leaf nodes {mln}... ", end="") model.set_params( classifier__learning_rate=lr, classifier__max_leaf_nodes=mln ) scores = cross_val_score(model, data_train, target_train, cv=2) mean_score = scores.mean() print(f"score: {mean_score:.3f}") if mean_score > best_score: best_score = mean_score best_params = {'learning-rate': lr, 'max leaf nodes': mln} print(f"Found new best model with score {best_score:.3f}!") print(f"The best accuracy obtained is {best_score:.3f}") print(f"The best parameters found are:\n {best_params}")
notebooks/parameter_tuning_sol_02.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="CebbOFtOMv6X" # ![JohnSnowLabs](https://nlp.johnsnowlabs.com/assets/images/logo.png) # # # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/colab/component_examples/sequence2sequence/T5_question_answering.ipynb) # # # `Open book` and `Closed book` question answering with Google's T5 # With the latest NLU release and Google's T5 you can answer **general knowledge based questions given no context** and in addition answer **questions on text databases**. # These questions can be asked in natural human language and answerd in just 1 line with NLU!. # # # # # ## What is a `open book question`? # You can imagine an `open book` question similar to an examen where you are allowed to bring in text documents or cheat sheets that help you answer questions in an examen. Kinda like bringing a history book to an history examen. # # In `T5's` terms, this means the model is given a `question` and an **additional piece of textual information** or so called `context`. # # This enables the `T5` model to answer questions on textual datasets like `medical records`,`newsarticles` , `wiki-databases` , `stories` and `movie scripts` , `product descriptions`, 'legal documents' and many more. # # You can answer `open book question` in 1 line of code, leveraging the latest NLU release and Google's T5. # All it takes is : # # ```python # nlu.load('answer_question').predict(""" # Where did Jebe die? # context: Ghenkis Khan recalled Subtai back to Mongolia soon afterwards, # and Jebe died on the road back to Samarkand""") # >>> Output: Samarkand # ``` # # Example for answering medical questions based on medical context # ``` python # question =''' # What does increased oxygen concentrations in the patient’s lungs displace? # context: Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. # Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. # Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment. # ''' # # # #Predict on text data with T5 # nlu.load('answer_question').predict(question) # >>> Output: carbon monoxide # ``` # # Take a look at this example on a recent news article snippet : # ```python # question1 = 'Who is <NAME>?' # question2 = 'Who is founder of Alibaba Group?' # question3 = 'When did <NAME> re-appear?' # question4 = 'How did Alibaba stocks react?' # question5 = 'Whom did <NAME> meet?' # question6 = 'Who did <NAME> hide from?' # # # from https://www.bbc.com/news/business-55728338 # news_article_snippet = """ context: # Alibaba Group founder <NAME> has made his first appearance since Chinese regulators cracked down on his business empire. # His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. # The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. # Alibaba shares surged 5% on Hong Kong's stock exchange on the news. # """ # # join question with context, works with Pandas DF aswell! # questions = [ # question1+ news_article_snippet, # question2+ news_article_snippet, # question3+ news_article_snippet, # question4+ news_article_snippet, # question5+ news_article_snippet, # question6+ news_article_snippet,] # nlu.load('answer_question').predict(questions) # ``` # This will output a Pandas Dataframe similar to this : # # |Answer|Question| # |-----|---------| # Alibaba Group founder| Who is <NAME>? | # |<NAME> |Who is founder of Alibaba Group? | # Wednesday | When did <NAME> re-appear? | # surged 5% | How did Alibaba stocks react? | # 100 rural teachers | Whom did <NAME> meet? | # Chinese regulators |Who did <NAME> hide from?| # # # # ## What is a `closed book question`? # A `closed book question` is the exact opposite of a `open book question`. In an examen scenario, you are only allowed to use what you have memorized in your brain and nothing else. # In `T5's` terms this means that T5 can only use it's stored weights to answer a `question` and is given **no aditional context**. # `T5` was pre-trained on the [C4 dataset](https://commoncrawl.org/) which contains **petabytes of web crawling data** collected over the last 8 years, including Wikipedia in every language. # # # This gives `T5` the broad knowledge of the internet stored in it's weights to answer various `closed book questions` # # You can answer `closed book question` in 1 line of code, leveraging the latest NLU release and Google's T5. # You need to pass one string to NLU, which starts which a `question` and is followed by a `context:` tag and then the actual context contents. # All it takes is : # # # ```python # nlu.load('en.t5').predict('Who is president of Nigeria?') # >>> <NAME> # ``` # # # ```python # nlu.load('en.t5').predict('What is the most spoken language in India?') # >>> Hindi # ``` # # # ```python # nlu.load('en.t5').predict('What is the capital of Germany?') # >>> Berlin # ``` # # # + id="s6p3BcAQYeBl" import os # ! apt-get update -qq > /dev/null # Install java # ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"] # ! pip install nlu pyspark==2.4.7 > /dev/null import nlu # + [markdown] id="CqI-ovPLjzH7" # # Closed book question answering example # + colab={"base_uri": "https://localhost:8080/"} id="FYZQHT4FYjlQ" outputId="33390dc2-02f7-4f18-dd0f-2ffde860d787" t5_closed_book = nlu.load('en.t5') # + id="uHK91QxwYn6y" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="f8b3c8fe-1f6a-4482-ba46-c64a47cec5a5" t5_closed_book.predict('What is the capital of Germany?') # + id="4IugHdKcZMTW" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="18f851dc-036e-4cee-bd21-68e850856b85" t5_closed_book.predict('Who is president of Nigeria?') # + id="dZfMDsyXqvZ0" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="813c4c72-747b-43f7-94ca-d6d421f92f29" t5_closed_book.predict('What is the most spoken language in India?') # + [markdown] id="3Bu-Beo7ZNps" # # Open Book question examples # # **Your context must be prefixed with `context:`** # # + id="886fxf0iZO5A" colab={"base_uri": "https://localhost:8080/"} outputId="49df08e1-983d-4409-98dd-efe0e3fb6190" t5_open_book = nlu.load('answer_question') # + [markdown] id="7SqPPK1mtmyz" # ## The context : # `<NAME> recalled Subtai back to Mongolia soon afterwards, and Jebe died on the road back to Samarkand` # + id="5koR8GOOZqUN" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="204d5010-0fae-4b3a-8544-0d116b79e142" t5_open_book.predict("""Where did Jebe die? context: Gh<NAME> recalled Subtai back to Mongolia soon afterwards, and Jebe died on the road back to Samarkand""" ) # + [markdown] id="-9wDlwfLigQl" # ## Todo Tesla Bitcoin News article!? # + [markdown] id="-OlZcBoGjPTS" # ## Open book question example on news article # + id="JSSQz8jxa4Bg" colab={"base_uri": "https://localhost:8080/", "height": 254} outputId="9f547e41-3639-4aff-e6a6-746e3362c756" question1 = 'Who is <NAME>?' question2 = 'Who is founder of Alibaba Group?' question3 = 'When did <NAME> re-appear?' question4 = 'How did Alibaba stocks react?' question5 = 'Whom did <NAME> meet?' question6 = 'Who did <NAME> hide from?' # from https://www.bbc.com/news/business-55728338 news_article_snippet = """ context: Alibaba Group founder <NAME> has made his first appearance since Chinese regulators cracked down on his business empire. His absence had fuelled speculation over his whereabouts amid increasing official scrutiny of his businesses. The billionaire met 100 rural teachers in China via a video meeting on Wednesday, according to local government media. Alibaba shares surged 5% on Hong Kong's stock exchange on the news. """ questions = [ question1+ news_article_snippet, question2+ news_article_snippet, question3+ news_article_snippet, question4+ news_article_snippet, question5+ news_article_snippet, question6+ news_article_snippet,] t5_open_book.predict(questions) # + id="vlpHM1m8ixDL" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="6e7b7334-3fe8-48e2-d777-e7ffcb431a74" # define Data, add additional context tag between sentence question =''' What does increased oxygen concentrations in the patient’s lungs displace? context: Hyperbaric (high-pressure) medicine uses special oxygen chambers to increase the partial pressure of O 2 around the patient and, when needed, the medical staff. Carbon monoxide poisoning, gas gangrene, and decompression sickness (the ’bends’) are sometimes treated using these devices. Increased O 2 concentration in the lungs helps to displace carbon monoxide from the heme group of hemoglobin. Oxygen gas is poisonous to the anaerobic bacteria that cause gas gangrene, so increasing its partial pressure helps kill them. Decompression sickness occurs in divers who decompress too quickly after a dive, resulting in bubbles of inert gas, mostly nitrogen and helium, forming in their blood. Increasing the pressure of O 2 as soon as possible is part of the treatment. ''' #Predict on text data with T5 t5_open_book.predict(question) # + [markdown] id="jbmPc4o5iSS-" # # Summarize # + colab={"base_uri": "https://localhost:8080/"} id="gHgQpl1Jc07e" outputId="fa9b17a7-a466-4ced-b945-a11be7f835f2" t5_sum = nlu.load('en.t5.base') # + colab={"base_uri": "https://localhost:8080/", "height": 136} id="zj-FbNaScofh" outputId="681b92d0-ae5b-441b-df3a-58caf88f220b" # Set the task on T5 t5_sum['t5'].setTask('summarize ') # define Data, add additional tags between sentences data = [ ''' The belgian duo took to the dance floor on monday night with some friends . manchester united face newcastle in the premier league on wednesday . red devils will be looking for just their second league away win in seven . louis van gaal’s side currently sit two points clear of liverpool in fourth . ''', ''' Calculus, originally called infinitesimal calculus or "the calculus of infinitesimals", is the mathematical study of continuous change, in the same way that geometry is the study of shape and algebra is the study of generalizations of arithmetic operations. It has two major branches, differential calculus and integral calculus; the former concerns instantaneous rates of change, and the slopes of curves, while integral calculus concerns accumulation of quantities, and areas under or between curves. These two branches are related to each other by the fundamental theorem of calculus, and they make use of the fundamental notions of convergence of infinite sequences and infinite series to a well-defined limit.[1] Infinitesimal calculus was developed independently in the late 17th century by <NAME> and <NAME>.[2][3] Today, calculus has widespread uses in science, engineering, and economics.[4] In mathematics education, calculus denotes courses of elementary mathematical analysis, which are mainly devoted to the study of functions and limits. The word calculus (plural calculi) is a Latin word, meaning originally "small pebble" (this meaning is kept in medicine – see Calculus (medicine)). Because such pebbles were used for calculation, the meaning of the word has evolved and today usually means a method of computation. It is therefore used for naming specific methods of calculation and related theories, such as propositional calculus, Ricci calculus, calculus of variations, lambda calculus, and process calculus.''' ] #Predict on text data with T5 # + colab={"base_uri": "https://localhost:8080/", "height": 106} id="UH7PqsbPfLem" outputId="95882722-fd21-4b9e-ab82-4bb5a719313e" text = """(Reuters) - Mastercard Inc said on Wednesday it was planning to offer support for some cryptocurrencies on its network this year, joining a string of big-ticket firms that have pledged similar support. The credit-card giant’s announcement comes days after Elon Musk’s Tesla Inc revealed it had purchased $1.5 billion of bitcoin and would soon accept it as a form of payment. Asset manager BlackRock Inc and payments companies Square and PayPal have also recently backed cryptocurrencies. Mastercard already offers customers cards that allow people to transact using their cryptocurrencies, although without going through its network. "Doing this work will create a lot more possibilities for shoppers and merchants, allowing them to transact in an entirely new form of payment. This change may open merchants up to new customers who are already flocking to digital assets," Mastercard said. (mstr.cd/3tLaPZM) Mastercard specified that not all cryptocurrencies will be supported on its network, adding that many of the hundreds of digital assets in circulation still need to tighten their compliance measures. Many cryptocurrencies have struggled to win the trust of mainstream investors and the general public due to their speculative nature and potential for money laundering. """ short = t5_sum.predict(text) short # + colab={"base_uri": "https://localhost:8080/", "height": 69} id="ActCNiVClRcT" outputId="91dbd046-29c7-4de8-e2ea-b19dff282dd3" short.T5.iloc[0] # + colab={"base_uri": "https://localhost:8080/"} id="vIDmj_JkmPmd" outputId="ccdad8c7-a4e8-45e3-846d-2d32682c8063" len('mastercard said on Wednesday it was planning to offer support for some cryptocurrencies on its network this year . the credit-card giant’s announcement comes days after Elon Musk’s Tesla Inc revealed it had purchased $1.5 billion of bitcoin . asset manager blackrock and payments companies Square and PayPal have also recently backed cryptocurrencies .') # + colab={"base_uri": "https://localhost:8080/"} id="geQxsepZmTbS" outputId="6728fb43-35f3-413e-811d-622e11b614f9" len(text) # + id="gkepJVvQiCod"
nlu/webinars_conferences_etc/NYC_DC_NLP_MEETUP/3_T5_question_answering_and_Text_summarization.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .jl # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Julia 1.6.3 # language: julia # name: julia-1.6 # --- # # Appendix A.2: Computational Cost # This notebook reproduces the analyses in the Appendix, Section A.2 of the manuscript, including Tables 1-3. # # > **Note:** By default, the computational cost experiments are run in parallel on 6 cores. If you have less cores available on your system, you should change that number according to your resources: `addprocs(5)` means that 5 processes on separates cores are added to the one being used already, so if you want to use, e.g., 4 cores, change that to `addprocs(3)`. If you do not want to run the code in parallel or have no sufficient resources to do to, you can comment out the line altogether and the code will run without further changes on a single core. # ## Setup # First, we set up the parallel computing: using Distributed addprocs(5) # Next, we load the Julia environment specified in the `Project.toml` and `Manifest.toml` files: First, we activate this environment, then install all dependencies (if some packages are not yet installed), and print out which packages and versions are currently in our environment. To make it available on all used processes, we use the `@everywhere` macro. # + @everywhere using Pkg; # all paths are relative to the `notebook` subfolder main folder, i.e., assuming `pwd()` outputs # ".../DeepDynamicodelingWithJust2TimePoints/notebooks" @everywhere Pkg.activate("../.") Pkg.instantiate() # - # Next, we load and precompile the necessary packages (in the versions specified by the `*.toml` files). @everywhere using BenchmarkTools @everywhere using DataFrames @everywhere using Distributed @everywhere using Distributions @everywhere using Random @everywhere using Flux @everywhere using DiffEqFlux @everywhere using OrdinaryDiffEq @everywhere using SharedArrays # Additionally, we import some user-defined functions, with different files for separate functionality, also using `@everywhere` to define them on all procs. @everywhere include("../src/simulation.jl") # for simulating data @everywhere include("../src/model.jl") # for initializing and training the model @everywhere include("../src/benchmarking.jl") # for plotting data and learned trajectories # ## Define ground truth developments # First, we define the ground-truth developments as solutions of the underlying two-dimensional linear ODE system with two distinct sets of parameters, corresponding to two groups of individuals with two distinct underlying development patterns. # + # define initial condition @everywhere true_u0 = Float32[2, 1] # define time span on which to solve the ODE @everywhere tspan = (0.0f0, 10.0f0) # define parameters for the two distinct groups @everywhere true_odeparams_group1 = Float32[-0.2, 0.00, 0.00, -0.2] @everywhere true_odeparams_group2 = Float32[-0.2, 0.00, 0.00, 0.2] # define corresponding ODE problems for the two groups @everywhere prob1 = ODEProblem(linear_2d_system,true_u0,tspan,true_odeparams_group1) @everywhere prob2 = ODEProblem(linear_2d_system,true_u0,tspan,true_odeparams_group2) # solve ODE systems to obtain "true" underlying trajectory in each group @everywhere dt=0.1 @everywhere sol_group1 = solve(prob1, Tsit5(), saveat = dt); @everywhere sol_group2 = solve(prob2, Tsit5(), saveat = dt); # - # ## Train model using benchmarking # Now, we train the model on varying numbers of individuals, time-dependent variables and baseline variables. We save all benchmark results, i.e., runtime, memory, and allocations, in a specific `SharedArray`, an array that allows for being used simultaneous by different processes while preventing them from getting in the way of each other. # ### Setup # + # define number of observations, variables and baseline variables to try @everywhere n_obs = [50, 100, 250, 500, 1000, 2000, 5000] @everywhere n_vars = [10, 20, 50, 100, 200] @everywhere n_baselinevars = [10, 20, 50, 100, 200] @everywhere lenobs, lenvars, lenbvars = length(n_obs), length(n_vars), length(n_baselinevars) # construct dataframe: n, p, q, time, memory, allocations benchmarkdf = DataFrame(n_obs = cat(n_obs, fill(100, lenvars + lenbvars), dims=1), n_vars = cat(fill(10, lenobs), n_vars, fill(10, lenbvars), dims=1), n_baselinevars = cat(fill(50, lenobs + lenvars), n_baselinevars, dims=1), time = fill(0.0, lenobs+lenvars+lenbvars), gctime = fill(0.0, lenobs+lenvars+lenbvars), memory = fill(0, lenobs+lenvars+lenbvars), allocs = fill(0, lenobs+lenvars+lenbvars) ) # get it to a shared array for distributed computing benchmarkarray = SharedArrays.SharedMatrix{Float64}(size(Matrix(benchmarkdf))); benchmarkarray[:,1:3] = Matrix(benchmarkdf)[:,1:3]; @everywhere eval($benchmarkarray); # - # ### Scenario 1: Fixed number of time-dep and baseline variables, varying number of observations @sync @distributed for n_ind in 1:lenobs # warmup (first run takes longer because of precompilation times and shouldnt be included) n_warmup, p_warmup, q_warmup, q_info_warmup = 100, 10, 10, 10 xs, x_baseline, tvals, group1, group2 = generate_all(n_warmup, p_warmup, q_warmup, q_info_warmup); trainingdata = zip(xs, x_baseline, tvals); zdim = nODEparams = 2 m = init_vae(p_warmup, q_warmup, zdim, nODEparams, prob1) L = loss_wrapper(m) ps = getparams(m) opt = ADAM(0.0005) for epoch in 1:35 Flux.train!(L, ps, trainingdata, opt) end println("warmup done") # now start for real n, p, q = n_obs[n_ind], 10, 50 println("n=$n, p=$p, q=$q") q_info = Int(q/5) xs, x_baseline, tvals, group1, group2 = generate_all(n, p, q, q_info); trainingdata = zip(xs, x_baseline, tvals); zdim = nODEparams = 2 m = init_vae(p, q, zdim, nODEparams, prob1) b = @benchmark run_benchmark($trainingdata, $m) samples=1 evals=1 println("training done") row = n_ind benchmarkarray[row,4] = b.times[1] # times benchmarkarray[row,5] = b.gctimes[1] # gctimes benchmarkarray[row,6] = b.memory # memory benchmarkarray[row,7] = b.allocs # allocations end # ### Scenario 2: Fixed number of observations and baseline variables, varying number of time-dependent variables @sync @distributed for p_ind in 1:lenvars n, p, q = 100, n_vars[p_ind], 50 println("n=$n, p=$p, q=$q") q_info = Int(q/5) xs, x_baseline, tvals, group1, group2 = generate_all(n, p, q, q_info); trainingdata = zip(xs, x_baseline, tvals); zdim = nODEparams = 2 m = init_vae(p, q, zdim, nODEparams, prob1) b = @benchmark run_benchmark($trainingdata, $m) samples=1 evals=1 println("training done") row = lenobs + p_ind benchmarkarray[row,4] = b.times[1] # times benchmarkarray[row,5] = b.gctimes[1] # gctimes benchmarkarray[row,6] = b.memory # memory benchmarkarray[row,7] = b.allocs # allocations end # ### Scenario 3: Fixed number of observations and time-dependent variables, varying number of baseline variables @sync @distributed for q_ind in 1:lenbvars n, p, q = 100, 10, n_baselinevars[q_ind] println("n=$n, p=$p, q=$q") q_info = Int(q/5) xs, x_baseline, tvals, group1, group2 = generate_all(n, p, q, q_info); trainingdata = zip(xs, x_baseline, tvals); zdim = nODEparams = 2 m = init_vae(p, q, zdim, nODEparams, prob1) b = @benchmark run_benchmark($trainingdata, $m) samples=1 evals=1 println("training done") row = lenobs + lenvars + q_ind benchmarkarray[row,4] = b.times[1] # times benchmarkarray[row,5] = b.gctimes[1] # gctimes benchmarkarray[row,6] = b.memory # memory benchmarkarray[row,7] = b.allocs # allocations end # ## Save results # First, we can optionally save the Julia object as `JLD2` file: # if desired: save as JLD2 file using JLD2 JLD2.@save "../benchmarkresults.jld2" benchmarkarray # and re-load from saved JLD2.@load "../benchmarkresults.jld2" benchmarkarray = eval(:benchmarkarray) # Now, we copy back the information from the `SharedArray` object to the benchmark dataframe, to export that to CSV format. # copy back to dataframe, to be saved later as CSV benchmarkdf[:,:time] = benchmarkarray[:,4] benchmarkdf[:,:gctime] = benchmarkarray[:,5] benchmarkdf[:,:memory] = benchmarkarray[:,6] benchmarkdf[:,:allocs] = benchmarkarray[:,7] benchmarkdf # Additionally, we turn the time and memory information into human-readable format and units: # + benchmarkdf[:,:time_in_seconds] = round.(benchmarkarray[:,4] .* 1e-9, digits=3) # turn memory into human-readable format (taken from BenchmarkTools.jl source code) benchmarkdf[:,:prettymemory] = prettymemory.(benchmarkarray[:,6]) benchmarkdf # - # Finally, we can export to CSV; re-creating Tables 1-3 from the manuscript. # + # save entire dataframe as CSV using CSV CSV.write("benchmarkresults.csv", benchmarkdf) # extract tables as in manuscript appendix and save as CSV files # different number of observations for fixed p (10) and q (50) benchmark_obs = benchmarkdf[1:7, [:n_obs, :time_in_seconds, :prettymemory]] @show(benchmark_obs) benchmark_vars = benchmarkdf[8:12, [:n_vars, :time_in_seconds, :prettymemory]] @show(benchmark_obs) benchmark_bvars = benchmarkdf[13:end, [:n_baselinevars, :time_in_seconds, :prettymemory]]; @show(benchmark_bvars) # - # We can now save the dataframes individualls as `.csv` files. CSV.write("../benchmarkresults_baselinevars.csv", benchmark_bvars) CSV.write("../benchmarkresults_obs.csv", benchmark_obs) CSV.write("../benchmarkresults_vars.csv", benchmark_vars)
notebooks/SecA2_ComputationalCost.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # How do I make my custom QRenderer # # This notebook demonstrates how to create a user-defined QRenderer. These steps are needed when you intend to configure Qiskit Metal to interface with your favorite (and presently not supported) external tool/simulator. # # To execute all the steps in this notebook, you will be modifying the core code. Therefore, we assume that you have installed qiskit-metal from the Github repository, using the README instructions, which will install qiskit-metal in 'editable' mode. # # ### Preparations # To get started, enable [automatic reloading of modules] (https://ipython.readthedocs.io/en/stable/config/extensions/autoreload.html?highlight=autoreload). This will allow to modify the source code and immediately observe the effects of the changes in the notebook, without the need for reinitiating the kernel or reinstalling the package. # + tags=[] # %load_ext autoreload # %autoreload 2 # - # Pre-load all the Qiskit Metal libraries that are needed for the rest of this notebook. # + import qiskit_metal as metal from qiskit_metal import designs, draw from qiskit_metal import MetalGUI, Dict, Headings from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket from qiskit_metal.qlibrary.qubits.transmon_cross import TransmonCross from qiskit_metal.renderers.renderer_gds.gds_renderer import QGDSRenderer # - # ## Integrating the user-defined renderer with the rest of Qiskit Metal # ### Architectural insights # This section will give you the architectural overview of how Qiskit Metal manages renderers, and how you can add your own. # # We will refer to your custom renderer as the `skeleton` renderer, since we will not code tool-specific methods/classes, but only worry about how to bootstrap one without functionality. # # Note that all renderers (existing `gds`, `hfss` and `q3d` as well as the newly created `skeleton`) have to be identified in the config.py file. Therefore, you will be required to modify the `qiskit_metal/config.py` file. # # The following image describe how the QRenderer (superclass of all renderers) interacts with the rest of Qiskit Metal. The key take-away is that creating a QDesign class object initiates all the QRenderer subclass objects as well. Specifically, the `QDesign.__init__()` method reads the `renderers_to_load` dictionary from the config.py file, which enumerates which QRenderers subclasses need to be instantiated. After instantiating the renderer objects, the `QDesign.__init__()` registers them in the `QDesign._renderers` dictionary for later reference. # ![QDesign Data Flow_skeleton_660.jpg](attachment:68e1e214-00fe-404e-ad9c-869a18d226f8.jpg) # ### QRenderer inheritance and subclass management # # Presently, the config.py file references three QRenderers subclasses, which handle the `gds`, `hfss` and `q3d` interfaces. Explicitly, QGDSRenderer is a subclass of QRenderer. Both QHFSSRenderer and QQ3DRenderer subclass from QAnsysRenderer. The class QAnsysRenderer is a subclass of QRenderer. # # The `renderers_to_load` dictionary in the config.py file needs to be updated to inform Qiskit Metal about the new renderer `skeleton` you are going to create. `renderers_to_load` stores the explicit path and class name so that Qiskit Metal will load to memory by default only those specified renderers. This happens during the `QDesign.__init__()`. # # For this notebook, we created a sample class named QSkeletonRender in `tutorials/resources/skeleton_renderer`. This class is your skeleton to develop a new QRenderer subclass. Feel free to edit the class content at will. If you change the path to the file, please reflect that in the remainder of this notebook. Presently, you can find the production QRenderers subclasses in the package directory `qiskit_metal.renderers`. # ### TODO: Let's tell Qiskit Metal where to find your new custom renderer # As the first step, please locate and open the file config.py in the qiskit-metal package and edit the `renderers_to_load` dictionary to add the new renderer `skeleton`, like so: # # `renderers_to_load = Dict( # hfss=Dict(path_name='qiskit_metal.renderers.renderer_ansys.hfss_renderer', # class_name='QHFSSRenderer'), # q3d=Dict(path_name='qiskit_metal.renderers.renderer_ansys.q3d_renderer', # class_name='QQ3DRenderer'), # gds=Dict(path_name='qiskit_metal.renderers.renderer_gds.gds_renderer', # class_name='QGDSRenderer'), # skeleton=Dict(path_name='tutorials.resources.skeleton_renderer', # class_name='QSkeletonRenderer'), # )` # ### Confirm QDesign is able to load your renderer # Create a QDesign instance. design = designs.DesignPlanar() # If you modified the config.py correctly, the previous command should have instantiated and registered the `skeleton` renderer. Verify that by inspecting the renderers dictionary property of the QDesign instance. # # If executing the next cell does not show the `skeleton` renderer in the list, please make sure you correctly updated the `setup.py` file, next you could try resetting the jupyter notebook kernel, or restarting jupyter notebook. design.renderers.keys() # For convenience, let's create a short-handle alias to refer to the renderer during the remainder of this notebook. a_skeleton = design.renderers.skeleton # ## Interact with your new user-custom renderer # # ### Verify and modify the options of your renderer # # In the QSkeletonRenderer class some sample `default_options` class parameter has been defined. <br> # `default_options = Dict( # number_of_bones='206') # ` # # The instance `a_skeleton` will contain a dictionary `options` that is initiated using the `default_options`. (This works similarly to `options` for QComponents, which has been introduced in the jupyter notebooks found in the folder: `tutorials/2 Front End User`.) # # You can access and modify the options in the QSkeletonRenderer class instance as follows. For example, let's update the skeleton from that of a human to that of a dog (319 bones). a_skeleton.options.number_of_bones = '319' a_skeleton.options # Original values will continue being accessible like so: a_skeleton.get_template_options(design) # ### Populate a sample QDesign to demonstrate interaction with the renderer # This portion is described in notebooks within directory `tutorials/2 Front End User`. Some of the options have been made distinctly different to show what can be done, i.e. fillet value, fillet='25um', varies for each cpw. However, that may not be what user will implement for their design. # + gui = MetalGUI(design) design.overwrite_enabled = True from qiskit_metal.qlibrary.qubits.transmon_pocket import TransmonPocket from qiskit_metal.qlibrary.tlines.meandered import RouteMeander # + ## Custom options for all the transmons options = dict( pad_width = '425 um', pad_gap = '80 um', pocket_height = '650um', # Adding 4 connectors (see below for defaults) connection_pads=dict( a = dict(loc_W=+1,loc_H=+1), b = dict(loc_W=-1,loc_H=+1, pad_height='30um'), c = dict(loc_W=+1,loc_H=-1, pad_width='200um'), d = dict(loc_W=-1,loc_H=-1, pad_height='50um') ) ) ## Create 4 TransmonPockets q1 = TransmonPocket(design, 'Q1', options = dict( pos_x='+2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_02', **options)) q2 = TransmonPocket(design, 'Q2', options = dict( pos_x='+0.0mm', pos_y='-0.9mm', orientation = '90', gds_cell_name='FakeJunction_02', **options)) q3 = TransmonPocket(design, 'Q3', options = dict( pos_x='-2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_01',**options)) q4 = TransmonPocket(design, 'Q4', options = dict( pos_x='+0.0mm', pos_y='+0.9mm', orientation = '90', gds_cell_name='my_other_junction', **options)) options = Dict( meander=Dict( lead_start='0.1mm', lead_end='0.1mm', asymmetry='0 um') ) def connect(component_name: str, component1: str, pin1: str, component2: str, pin2: str, length: str, asymmetry='0 um', flip=False, fillet='50um'): """Connect two pins with a CPW.""" myoptions = Dict( fillet=fillet, pin_inputs=Dict( start_pin=Dict( component=component1, pin=pin1), end_pin=Dict( component=component2, pin=pin2)), lead=Dict( start_straight='0.13mm', end_straight='0.13mm' ), total_length=length) myoptions.update(options) myoptions.meander.asymmetry = asymmetry myoptions.meander.lead_direction_inverted = 'true' if flip else 'false' return RouteMeander(design, component_name, myoptions) asym = 90 cpw1 = connect('cpw1', 'Q1', 'd', 'Q2', 'c', '5.7 mm', f'+{asym}um', fillet='25um') cpw2 = connect('cpw2', 'Q3', 'c', 'Q2', 'a', '5.6 mm', f'-{asym}um', flip=True, fillet='100um') cpw3 = connect('cpw3', 'Q3', 'a', 'Q4', 'b', '5.5 mm', f'+{asym}um', fillet='75um') cpw4 = connect('cpw4', 'Q1', 'b', 'Q4', 'd', '5.8 mm', f'-{asym}um', flip=True) gui.rebuild() gui.autoscale() # - # ### Export list of the design QGeometries to file using your custom QSkeletonRenderer # The QSkeletonRenderer class contains several sample methods. Let's use one intended to print out the name of the QGeometry tables to a text file (Remember: QGeometry contains the list of the raw layout shapes that compose the design, which we have created in the previous cell). a_skeleton.write_qgeometry_table_names_to_file('./simple_output.txt') # Here another example where we sub select a single QComponent instance (`cpw1`) of type RouteMeander. This will only export the name of tables containing shapes related to that instance, which in this case is only paths, and not junctions or poly. a_skeleton.write_qgeometry_table_names_to_file('./simple_output_cpw1.txt',highlight_qcomponents=['cpw1']) # ## What if my new tool requires additional parameters that Qiskit Metal does not natively support? # ### QRenderers can request special tool parameters from the user # # External tools, such as Ansys, might require special parameters to be able to render (interpret) correctly the QGeometries that Qiskit Metal wants to pass (render) to them. Every tool might need a different set of special parameters, thus we architected a solution that allows individual QRenderer's to communicate to qiskit-metal what additional parameters their associated tool requires. # # The implementation consists of enabling the QRenderers to add new columns (parameters) and tables (geometry types) to the QGeometry table collection. The QRenderer should also specify what is the default values to use to populate those columns/tables. The user can then update them to a value different then default by editing them at run-time, which can happen through the QComponent options (or directly, for advanced users). Note that older QComponents remain valid also for newer QRenderer's, thanks to the defaults provided by the QRenderer. # # Our QSkeletonRenderer class for example is designed to add a `a_column_name` column to the `junction` table, with default value `a_default_value`. This is implemented by creating the following class parameter: # <br>`element_table_data = dict(junction=dict(a_column_name='a_default_value'))` # # Note that the final column name will be `skeleton_a_column_name` because the provided column name is prefixed with the renderer name (`QSkeletonRenderer.name`). # # The method that executes the magic described above is `QRenderer.load()`, which is called from the `QSkeletonRenderer.__init__()`. # ### Let's observe and update the additional properties that our QSkeletonRenderer needs # First, make sure that the registration of the QRenderer added the additional parameter as expected. Search for the column `skeleton_a_column_name` in the qgeometry table `junction` design.qgeometry.tables['junction'] # If you cannot locate the new column (might need to scroll to the far right), then something must be amiss, so please start over this notebook and execute all of the cells. # # Once you can locate the new column, and observe the set default value, let's not try to update the value in the column by modifying the design of the correspondent QComponent. All we need to do is pass a different set of options to the component, like so: # + q1.options.skeleton_a_column_name = 'q1 skeleton' q2.options.skeleton_a_column_name = 'q2 skeleton' q3.options.skeleton_a_column_name = 'q3 skeleton' q4.options.skeleton_a_column_name = 'q4 skeleton' gui.rebuild() gui.autoscale() design.qgeometry.tables['junction'] # - # You can also create the components by directly passing the options you know the renderer will require, like so: # + q1.delete() q2.delete() q3.delete() q4.delete() q1 = TransmonPocket(design, 'Q1', options = dict( pos_x='+2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_02', skeleton_a_column_name='q1 skeleton 2', **options)) q2 = TransmonPocket(design, 'Q2', options = dict( pos_x='+0.0mm', pos_y='-0.9mm', orientation = '90', gds_cell_name='FakeJunction_02', skeleton_a_column_name='q2 skeleton 2', **options)) q3 = TransmonPocket(design, 'Q3', options = dict( pos_x='-2.55mm', pos_y='+0.0mm', gds_cell_name='FakeJunction_01', skeleton_a_column_name='q3 skeleton 2', **options)) q4 = TransmonPocket(design, 'Q4', options = dict( pos_x='+0.0mm', pos_y='+0.9mm', orientation = '90', gds_cell_name='my_other_junction', skeleton_a_column_name='q4 skeleton 2', **options)) design.qgeometry.tables['junction'] # - # ## Can my user-defined renderer change/interact with the design? # ### Accessing information and methods # It is possible that the result of a rendering action, or analysis requires a design update back to qiskit-metal. This can be achieved without the user intervention by simply controlling the QDesign instance from within the QRenderer. # # Just as an example, the next three cells inspect the current design QComponent, QGeometry table, and QRenderer names. a_skeleton.design.components.keys() a_skeleton.design.qgeometry.tables.keys() a_skeleton.design.renderers.keys() # The base QRenderer class comes with useful methods to more easily access some of the information. You will find more method described in the QRenderer documentation. The example below for example returns the QComponent's IDs. a_skeleton.get_unique_component_ids(highlight_qcomponents = ['Q1', 'Q1', 'Q4', 'cpw1', 'cpw2', 'cpw3', 'cpw4']) # The following instead shows three ways to access the same QGeometry table. a_skeleton.design.components['Q1'].qgeometry_table('junction') # via QComonent name a_skeleton.design._components[9].qgeometry_table('junction') # via QComponent ID q1.qgeometry_table('junction') # via the QComponent instance # The method `QSkeletonRenderer.get_qgeometry_tables_for_skeleton()` exemplifies how to iterate through chips and tables. from tutorials.resources.skeleton_renderer import QSkeletonRenderer # ?QSkeletonRenderer.get_qgeometry_tables_for_skeleton # ### Communicate state # We can also interact with any other method of the QDesign instance, for example we can generate a warning into the logger as shown in the next cell. This is particularly useful to document problems with the user-defined QRenderer execution # Purposefully generates an warning message. a_skeleton.logger.warning('Show a warning message for plugin developer.') # ## Qiskit Metal Version # + tags=[] metal.about(); # + # This command below is if the user wants to close the Metal GUI. # gui.main_window.close()
tutorials/3 Renderers/3.4 How do I make my custom QRenderer.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from pathlib import Path import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from matplotlib.patches import Circle, Rectangle mpl.rc_file("matplotlibrc") Path("plots").mkdir(exist_ok=True) # + def fi(l, a, offset): x = l - offset return ( np.heaviside(x + a, 0) * np.heaviside(a - x, 0) * (a - x) * (x + a) / (4.0 * a ** 3 / 3.0) ) xlim = (-1, 1) titles = ("No-go Boundary", "Stop-go Boundary", "Reflecting Boundary") xs_full = np.linspace(-0.5, 1.5, 200) dx_full = xs_full[1] - xs_full[0] xs_domain = np.linspace(0, 1, 200) dx_domain = xs_domain[1] - xs_domain[0] delta = 0.75 offset = 0.4 normz = np.sum([fi(x, delta, offset) for x in xs_domain]) * dx_domain fig, axes = plt.subplots(2, 3, figsize=(6.3, 3.8), sharex=True, sharey="row") # Visualise the boundary conditions. # Rectangle height. rect_h = 1.5 # Rectangle width. w = 0.6 # Circle centre. centre = [0.5, 0.3] # Circle radius. r = 0.1 # Dotted arrow length. l = 1.05 for ax in axes[0]: ax.add_patch(Rectangle([-w, 0.0], w, rect_h, ec="none", fc="0.9")) ax.vlines(0, 0, rect_h, colors="0.5", lw=1.3) ax.add_patch(Circle(centre, r, ec="0.2", fc="none")) direction = (1 / 2 ** 0.5) * np.array([-1, 1]) xytext = np.asarray(centre) + r * direction xy = xytext + l * direction ax.annotate( "", xy=xy, xytext=xytext, arrowprops=dict(arrowstyle="->", ls="--", lw=1) ) # Plot a small solid arrow towards the end to avoid gaps in the arrowhead. ax.annotate( "", xy=xy, xytext=xytext + (l - 0.1) * direction, arrowprops=dict(arrowstyle="->", ls="-", lw=1), ) angle = 10 * np.pi / 180 direction2 = np.array([-np.sin(angle), np.cos(angle)]) xytext2 = np.asarray(centre) + r * direction2 xy2 = xytext2 + l * direction2 axes[0][0].annotate( "", xy=xy2, xytext=xytext2, arrowprops=dict(arrowstyle="->", ls="-", lw=1.1) ) axes[0][1].annotate( "", xy=[0, 0.8], xytext=xytext, arrowprops=dict(arrowstyle="->", ls="-", lw=1.1) ) direction3 = (1 / 2 ** 0.5) * np.array([1, 1]) xytext3 = np.array([0, 0.8]) # Draw the reflect arrow - just as long as the part of the dotted arrow within the # boundary xy3 = xytext3 + (l - np.linalg.norm(xytext - np.array([0, 0.8]))) * direction3 axes[0][2].annotate( "", xy=xy3, xytext=xytext3, arrowprops=dict(arrowstyle="->", ls="-", lw=1.1) ) axes[0][2].annotate( "", xy=[0, 0.8], xytext=xytext, arrowprops=dict(ls="-", arrowstyle="-", lw=1.1) ) # Plot the original and transformed distributions. x = xs_full - offset y = [fi(x, delta, offset) for x in xs_full] x_d = xs_domain - offset y_domain_data = [ [fi(x, delta, offset) / normz for x in xs_domain], [fi(x, delta, offset) for x in xs_domain], [ fi(x, delta, offset) + fi(-x, delta, offset) + fi(x, delta, 2 - offset) for x in xs_domain ], ] line_c = "#154ed4" for ax, y_d in zip(axes[1], y_domain_data): ax.plot(x, y, c="k", linestyle="--") ax.plot(x_d, y_d, c=line_c) ax.fill_between(x_d, y_d, 0, alpha=0.2) # Plot the dirca deltas. for i in (0, -1): axes[1][1].vlines(x_d[i], y_domain_data[1][i], 2, colors=line_c) for ax in axes[1]: ax.set_xlabel("$\ell$") axes[1][0].set_ylabel("$P(\ell|x)$") for ax in axes.flatten(): ax.set_xlim(*xlim) for ax in axes[0]: ax.axis("off") ax.axis("scaled") ax.set_ylim(0, rect_h * 1.1) for ax in axes[1]: ax.set_ylim(0, 1.5) # Add f_i label. ax.text(0.78, 0.15, r"$f_i(\ell)$", fontsize=11) for ax, title in zip(axes[0], titles): ax.set_title(title) fig.tight_layout() fig.savefig((Path("plots") / "pdf_bc_case").with_suffix(".pdf"))
examples/pdf_bc_cases.ipynb