repo_name
stringlengths
6
77
path
stringlengths
8
215
license
stringclasses
15 values
content
stringlengths
335
154k
ffmmjj/desafio-dados-2016
experiments/Diplomas x Desempenho.ipynb
apache-2.0
quest_professor_df = pd.read_csv('/Users/bonifacio/projects/desafio-dados-2016/dados/microdados_saeb_2011/Dados/TS_QUEST_PROFESSOR.csv', sep=';') pedagogia = quest_professor_df['TX_RESP_Q004'] == 'D' matematica = quest_professor_df['TX_RESP_Q004'] == 'E' letras = quest_professor_df['TX_RESP_Q004'] == 'F' normal = quest_professor_df['TX_RESP_Q004'] == 'G' outros = quest_professor_df['TX_RESP_Q004'] == 'H' index_diplomados = pedagogia | matematica | letras | normal | outros quest_professor_df['DIPLOMADO'] = index_diplomados quest_professor_df['CONTAGEM'] = 1 diplomados_por_escola = quest_professor_df[index_diplomados][['DIPLOMADO', 'ID_ESCOLA']].groupby('ID_ESCOLA', as_index=False).sum() professores_por_escola = quest_professor_df[['CONTAGEM', 'ID_ESCOLA']].groupby('ID_ESCOLA', as_index=False).sum() professores_df = pd.merge(diplomados_por_escola, professores_por_escola, on='ID_ESCOLA') professores_df['TAXA_DIPLOMADOS'] = professores_df['DIPLOMADO'] / professores_df['CONTAGEM'] resultado_escola = pd.read_csv('/Users/bonifacio/projects/desafio-dados-2016/dados/microdados_saeb_2011/Dados/TS_RESULTADO_ESCOLA.csv', sep=';') resultado_escola['MEDIA_LP'] = resultado_escola['MEDIA_LP'].str.strip().str.replace(',', '.').str.replace('^$', '-1').astype(float) resultado_escola['MEDIA_MT'] = resultado_escola['MEDIA_MT'].str.strip().str.replace(',', '.').str.replace('^$', '-1').astype(float) merged_df = pd.merge(resultado_escola, professores_df, on='ID_ESCOLA') plt.scatter(merged_df['TAXA_DIPLOMADOS'], merged_df['MEDIA_LP']) plt.ylabel('Media LP escola') plt.xlabel('Professores com diploma') plt.show() plt.scatter(merged_df['TAXA_DIPLOMADOS'], merged_df['MEDIA_MT']) plt.ylabel('Media MT escola') plt.xlabel('Professores com diploma') plt.show() """ Explanation: Cruzando quantidade de diplomas com notas dos alunos End of explanation """ quest_professor_df['DIPLOMA_LETRAS'] = letras diplomas_letras_por_escola = quest_professor_df[letras][['DIPLOMA_LETRAS', 'ID_ESCOLA']].groupby('ID_ESCOLA', as_index=False).sum() professores_df = pd.merge(diplomas_letras_por_escola, professores_por_escola, on='ID_ESCOLA') professores_df['TAXA_DIPLOMADOS_LETRAS'] = professores_df['DIPLOMA_LETRAS'] / professores_df['CONTAGEM'] letras_df = pd.merge(resultado_escola, professores_df, on='ID_ESCOLA') plt.scatter(letras_df['TAXA_DIPLOMADOS_LETRAS'], letras_df['MEDIA_LP']) plt.ylabel('Media LP escola') plt.xlabel('Professores com diploma de letras') plt.show() """ Explanation: Cruzando diplomas de letras com notas de língua portuguesa End of explanation """ quest_professor_df['DIPLOMA_MATEMATICA'] = letras diplomas_matematica_por_escola = quest_professor_df[letras][['DIPLOMA_MATEMATICA', 'ID_ESCOLA']].groupby('ID_ESCOLA', as_index=False).sum() professores_df = pd.merge(diplomas_matematica_por_escola, professores_por_escola, on='ID_ESCOLA') professores_df['TAXA_DIPLOMADOS_MATEMATICA'] = professores_df['DIPLOMA_MATEMATICA'] / professores_df['CONTAGEM'] matematica_df = pd.merge(resultado_escola, professores_df, on='ID_ESCOLA') plt.scatter(matematica_df['TAXA_DIPLOMADOS_MATEMATICA'], matematica_df['MEDIA_MT']) plt.ylabel('Media MT escola') plt.xlim(0,1.1) plt.ylim(-10,400) plt.xlabel('Professores com diploma de matematica') plt.show() """ Explanation: Cruzando diplomas de matemática com notas de matemática End of explanation """
andersonamaral/Sao-Paulo-Crime-Study
Sao_Paulo_Homicidios_Dolosos.ipynb
apache-2.0
list = ['Homicídio qualificado (art. 121, §2o.)'] list df.head() """ Explanation: Vou selecionar homicídio qualificado, Lesão Corporal seguida de morte, que são os 2 crimes com dolo que resultam em morte. End of explanation """ for i in list: df = df[df['RUBRICA']==i] df.head(3) df['DATA_OCORRENCIA_BO'] = pd.to_datetime(df['DATA_OCORRENCIA_BO']) """ Explanation: Abaixo faço um for loop pra construir um dataframe apenas desses dois crimes: End of explanation """ import matplotlib.pyplot as plt import seaborn as sns %matplotlib inline sns.set_style('darkgrid') df['dia_da_semana'] = df['DATA_OCORRENCIA_BO'].dt.weekday_name df['mes'] = df['DATA_OCORRENCIA_BO'].dt.month semana = pd.DataFrame(df['dia_da_semana'].value_counts()) semana semana.plot(title = 'Distribuição dos Homicídios por dia da Semana' , kind='barh',color = 'red', use_index=True, legend=True, sort_columns=True) mes = pd.DataFrame(df['mes'].value_counts()) mes.head() mes.plot(title = 'Distribuição dos Homicídios por dia mês' , kind='barh',color = 'blue', use_index=True, legend=True, sort_columns=True) top_10_horario_crimes = pd.DataFrame(df['HORA_OCORRENCIA_BO'].value_counts().head(10)) top_10_horario_crimes top_10_horario_crimes.plot(title = 'Top 10 de horarios de maior ocorrencia de crimes' , kind='barh',color = 'purple', use_index=True, legend=False, sort_columns=True) top_10_logradouros_mais_homicidios = pd.DataFrame(df['LOGRADOURO'].value_counts().head(10)) top_10_logradouros_mais_homicidios top_10_logradouros_mais_homicidios.plot(title = 'Top 10 logradouros com maior ocorrencia de homicídios' , kind='bar',color = 'green', use_index=True, legend=False, sort_columns=True) df.info() motivacao_homicidios = pd.DataFrame(df['DESDOBRAMENTO'].value_counts()) motivacao_homicidios.plot(title = 'Motivação dos homicídios' , kind='bar',color = 'pink', use_index=True, legend=True) top_10_idades_vitimas = pd.DataFrame(df['IDADE_PESSOA'].value_counts().head(10)) top_10_idades_vitimas top_10_idades_vitimas.index.name = 'Idades' top_10_idades_vitimas.plot(title = 'As idades mais comuns das vítimas' , kind='bar',color = 'orange', use_index=True, legend=False) top_10_idades_vitimas.index.name = 'Idades' import pandas as pd values = [[1,2], [2,5]] df = pd.DataFrame(values, columns=['Type A', 'Type B'], index=['Index 1','Index 2']) df.columns.name = 'Type' df.index.name = 'Index' df.plot(lw=2, colormap='jet', marker='.', markersize=10, title='Video streaming dropout by category') sexo = pd.DataFrame(df['SEXO_PESSOA'].value_counts()) sexo nd : str ‘line’ : line plot (default) ‘bar’ : vertical bar plot ‘barh’ : horizontal bar plot ‘hist’ : histogram ‘box’ : boxplot ‘kde’ : Kernel Density Estimation plot ‘density’ : same as ‘kde’ ‘area’ : area plot ‘pie’ : pie plot ‘scatter’ : scatter plot ‘hexbin’ : hexbin plot sexo.plot(title = 'Sexo das vítimas' , kind='pie', use_index=True, legend=False , subplots = True) delegacia = pd.DataFrame(df['NOME_DELEGACIA'].value_counts()) delegacia.head() top10_delegacias_homicidios = delegacia.head(10) top10_delegacias_homicidios top10_delegacias_homicidios.plot(title = 'As 10 delegacias com maior registro de homicidios' , kind='pie', use_index=True, legend=False , subplots = True) """ Explanation: Quais os meses, dias da semana e horários mais comuns de ocorrências de homicídio qualificado ? End of explanation """ df['NUM_BO'].plot(legend=True,figsize=(10,4)) timestamp = df[['DATA_OCORRENCIA_BO','HORA_OCORRENCIA_BO']] #allDays = pd.DataFrame(df.set_index('timestamp').groupby(pd.TimeGrouper('1H')).sum()) timestamp.head() timestamp['DATA_OCORRENCIA_BO'] = timestamp['DATA_OCORRENCIA_BO'].astype(str) timestamp.info() datetime = timestamp['DATA_OCORRENCIA_BO'] + ' ' + timestamp['HORA_OCORRENCIA_BO'] datetime = pd.DataFrame(datetime) datetime.head() datetime = pd.to_datetime(datetime[0]) datetime.head() datetime = pd.DataFrame(datetime) datetime = datetime.rename(columns={0: 'data_e_hora'}) datetime df = pd.merge(df, datetime, left_index=True, right_index=True, how='outer') df.info() df """ Explanation: Agora, os estudos usando Time Series End of explanation """ df[['HORA_OCORRENCIA_BO','data_e_hora']].head() df.set_index(keys = 'indices', append=True, inplace=True) df.sort(columns='DATA_OCORRENCIA_BO', axis=0, ascending=True, inplace=False) df.isnull().sum() df.info() def count(num): if num > 0: return 1 else: return 'nan' df['quantidade'] = df['mes'].apply(count) df df_agrupado_por_dia = pd.DataFrame(df.set_index('data_e_hora').groupby(pd.TimeGrouper('24H')).sum()) df_agrupado_por_dia df['data_e_hora'].max() df_agrupado_por_dia['quantidade'].plot(legend=True,figsize=(16,8)) """ Explanation: Abaixo, só confirmando o datetime criado! End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/ncc/cmip6/models/noresm2-lmec/atmoschem.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-lmec', 'atmoschem') """ Explanation: ES-DOC CMIP6 Model Properties - Atmoschem MIP Era: CMIP6 Institute: NCC Source ID: NORESM2-LMEC Topic: Atmoschem Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry. Properties: 84 (39 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:24 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --> Software Properties 3. Key Properties --> Timestep Framework 4. Key Properties --> Timestep Framework --> Split Operator Order 5. Key Properties --> Tuning Applied 6. Grid 7. Grid --> Resolution 8. Transport 9. Emissions Concentrations 10. Emissions Concentrations --> Surface Emissions 11. Emissions Concentrations --> Atmospheric Emissions 12. Emissions Concentrations --> Concentrations 13. Gas Phase Chemistry 14. Stratospheric Heterogeneous Chemistry 15. Tropospheric Heterogeneous Chemistry 16. Photo Chemistry 17. Photo Chemistry --> Photolysis 1. Key Properties Key properties of the atmospheric chemistry 1.1. Model Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview of atmospheric chemistry model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 Name of atmospheric chemistry model code. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "troposhere" # "stratosphere" # "mesosphere" # "mesosphere" # "whole atmosphere" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.3. Chemistry Scheme Scope Is Required: TRUE    Type: ENUM    Cardinality: 1.N Atmospheric domains covered by the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.4. Basic Approximations Is Required: TRUE    Type: STRING    Cardinality: 1.1 Basic approximations made in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "3D mass/mixing ratio for gas" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 1.5. Prognostic Variables Form Is Required: TRUE    Type: ENUM    Cardinality: 1.N Form of prognostic variables in the atmospheric chemistry component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 1.6. Number Of Tracers Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Number of advected tracers in the atmospheric chemistry model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.family_approach') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.7. Family Approach Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Atmospheric chemistry calculations (not advection) generalized into families of species? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 1.8. Coupling With Chemical Reactivity Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --> Software Properties Software properties of aerosol code 2.1. Repository Is Required: FALSE    Type: STRING    Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.2. Code Version Is Required: FALSE    Type: STRING    Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2.3. Code Languages Is Required: FALSE    Type: STRING    Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Operator splitting" # "Integrated" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3. Key Properties --> Timestep Framework Timestepping in the atmospheric chemistry model 3.1. Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Mathematical method deployed to solve the evolution of a given variable End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.2. Split Operator Advection Timestep Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Timestep for chemical species advection (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.3. Split Operator Physical Timestep Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Timestep for physics (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.4. Split Operator Chemistry Timestep Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Timestep for chemistry (in seconds). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 3.5. Split Operator Alternate Order Is Required: FALSE    Type: BOOLEAN    Cardinality: 0.1 ? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 3.6. Integrated Timestep Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 Timestep for the atmospheric chemistry model (in seconds) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Explicit" # "Implicit" # "Semi-implicit" # "Semi-analytic" # "Impact solver" # "Back Euler" # "Newton Raphson" # "Rosenbrock" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 3.7. Integrated Scheme Type Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Specify the type of timestep scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order ** 4.1. Turbulence Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.2. Convection Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.3. Precipitation Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.4. Emissions Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.5. Deposition Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.6. Gas Phase Chemistry Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.9. Photo Chemistry Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 4.10. Aerosols Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --> Tuning Applied Tuning methodology for atmospheric chemistry component 5.1. Description Is Required: TRUE    Type: STRING    Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.2. Global Mean Metrics Used Is Required: FALSE    Type: STRING    Cardinality: 0.N List set of metrics of the global mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.3. Regional Metrics Used Is Required: FALSE    Type: STRING    Cardinality: 0.N List of regional metrics of mean state used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5.4. Trend Metrics Used Is Required: FALSE    Type: STRING    Cardinality: 0.N List observed trend metrics used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Grid Atmospheric chemistry grid 6.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Describe the general structure of the atmopsheric chemistry grid End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 6.2. Matches Atmosphere Grid Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 * Does the atmospheric chemistry grid match the atmosphere grid?* End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Grid --> Resolution Resolution in the atmospheric chemistry grid 7.1. Name Is Required: TRUE    Type: STRING    Cardinality: 1.1 This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Canonical Horizontal Resolution Is Required: FALSE    Type: STRING    Cardinality: 0.1 Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.3. Number Of Horizontal Gridpoints Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Total number of horizontal (XY) points (or degrees of freedom) on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 7.4. Number Of Vertical Levels Is Required: FALSE    Type: INTEGER    Cardinality: 0.1 Number of vertical levels resolved on computational grid. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 7.5. Is Adaptive Grid Is Required: FALSE    Type: BOOLEAN    Cardinality: 0.1 Default is False. Set true if grid resolution changes during execution. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Transport Atmospheric chemistry transport 8.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 General overview of transport implementation End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 8.2. Use Atmospheric Transport Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is transport handled by the atmosphere, rather than within atmospheric cehmistry? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.transport.transport_details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Transport Details Is Required: FALSE    Type: STRING    Cardinality: 0.1 If transport is handled within the atmospheric chemistry scheme, describe it. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Emissions Concentrations Atmospheric chemistry emissions 9.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview atmospheric chemistry emissions End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Vegetation" # "Soil" # "Sea surface" # "Anthropogenic" # "Biomass burning" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10. Emissions Concentrations --> Surface Emissions ** 10.1. Sources Is Required: FALSE    Type: ENUM    Cardinality: 0.N Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 10.2. Method Is Required: FALSE    Type: ENUM    Cardinality: 0.N Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.3. Prescribed Climatology Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted at the surface and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.5. Interactive Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted at the surface and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10.6. Other Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted at the surface and specified via any other method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Aircraft" # "Biomass burning" # "Lightning" # "Volcanos" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11. Emissions Concentrations --> Atmospheric Emissions TO DO 11.1. Sources Is Required: FALSE    Type: ENUM    Cardinality: 0.N Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Climatology" # "Spatially uniform mixing ratio" # "Spatially uniform concentration" # "Interactive" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 11.2. Method Is Required: FALSE    Type: ENUM    Cardinality: 0.N Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.3. Prescribed Climatology Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant)) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.4. Prescribed Spatially Uniform Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted in the atmosphere and prescribed as spatially uniform End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.5. Interactive Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an interactive method End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11.6. Other Emitted Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of chemical species emitted in the atmosphere and specified via an "other method" End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12. Emissions Concentrations --> Concentrations TO DO 12.1. Prescribed Lower Boundary Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of species prescribed at the lower boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Prescribed Upper Boundary Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of species prescribed at the upper boundary. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13. Gas Phase Chemistry Atmospheric chemistry transport 13.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview gas phase atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "HOx" # "NOy" # "Ox" # "Cly" # "HSOx" # "Bry" # "VOCs" # "isoprene" # "H2O" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13.2. Species Is Required: FALSE    Type: ENUM    Cardinality: 0.N Species included in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.3. Number Of Bimolecular Reactions Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of bi-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.4. Number Of Termolecular Reactions Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of ter-molecular reactions in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of reactions in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of reactions in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.7. Number Of Advected Species Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of advected species in the gas phase chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 13.8. Number Of Steady State Species Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.9. Interactive Dry Deposition Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.10. Wet Deposition Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 13.11. Wet Oxidation Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14. Stratospheric Heterogeneous Chemistry Atmospheric chemistry startospheric heterogeneous chemistry 14.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview stratospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Cly" # "Bry" # "NOy" # TODO - please enter value(s) """ Explanation: 14.2. Gas Phase Species Is Required: FALSE    Type: ENUM    Cardinality: 0.N Gas phase species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Polar stratospheric ice" # "NAT (Nitric acid trihydrate)" # "NAD (Nitric acid dihydrate)" # "STS (supercooled ternary solution aerosol particule))" # TODO - please enter value(s) """ Explanation: 14.3. Aerosol Species Is Required: FALSE    Type: ENUM    Cardinality: 0.N Aerosol species included in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 14.4. Number Of Steady State Species Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of steady state species in the stratospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.5. Sedimentation Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 14.6. Coagulation Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15. Tropospheric Heterogeneous Chemistry Atmospheric chemistry tropospheric heterogeneous chemistry 15.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview tropospheric heterogenous atmospheric chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Gas Phase Species Is Required: FALSE    Type: STRING    Cardinality: 0.1 List of gas phase species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "Sulphate" # "Nitrate" # "Sea salt" # "Dust" # "Ice" # "Organic" # "Black carbon/soot" # "Polar stratospheric ice" # "Secondary organic aerosols" # "Particulate organic matter" # TODO - please enter value(s) """ Explanation: 15.3. Aerosol Species Is Required: FALSE    Type: ENUM    Cardinality: 0.N Aerosol species included in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 15.4. Number Of Steady State Species Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of steady state species in the tropospheric heterogeneous chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.5. Interactive Dry Deposition Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 15.6. Coagulation Is Required: TRUE    Type: BOOLEAN    Cardinality: 1.1 Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16. Photo Chemistry Atmospheric chemistry photo chemistry 16.1. Overview Is Required: TRUE    Type: STRING    Cardinality: 1.1 Overview atmospheric photo chemistry End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # TODO - please enter value(s) """ Explanation: 16.2. Number Of Reactions Is Required: TRUE    Type: INTEGER    Cardinality: 1.1 The number of reactions in the photo-chemistry scheme. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Offline (clear sky)" # "Offline (with clouds)" # "Online" # TODO - please enter value(s) """ Explanation: 17. Photo Chemistry --> Photolysis Photolysis scheme 17.1. Method Is Required: TRUE    Type: ENUM    Cardinality: 1.1 Photolysis scheme End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.2. Environmental Conditions Is Required: FALSE    Type: STRING    Cardinality: 0.1 Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.) End of explanation """
afronski/playground-notes
introduction-to-big-data-with-apache-spark/solutions/lab1_word_count_student.ipynb
mit
wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat'] wordsRDD = sc.parallelize(wordsList, 4) # Print out the type of wordsRDD print type(wordsRDD) """ Explanation: + Word Count Lab: Building a word count application This lab will build on the techniques covered in the Spark tutorial to develop a simple word count application. The volume of unstructured text in existence is growing dramatically, and Spark is an excellent tool for analyzing this type of data. In this lab, we will write code that calculates the most common words in the Complete Works of William Shakespeare retrieved from Project Gutenberg. This could also be scaled to find the most common words on the Internet. During this lab we will cover: Part 1: Creating a base RDD and pair RDDs Part 2: Counting with pair RDDs Part 3: Finding unique words and a mean value Part 4: Apply word count to a file Note that, for reference, you can look up the details of the relevant methods in Spark's Python API Part 1: Creating a base RDD and pair RDDs In this part of the lab, we will explore creating a base RDD with parallelize and using pair RDDs to count words. (1a) Create a base RDD We'll start by generating a base RDD by using a Python list and the sc.parallelize method. Then we'll print out the type of the base RDD. End of explanation """ def makePlural(word): """Adds an 's' to `word`. Note: This is a simple function that only adds an 's'. No attempt is made to follow proper pluralization rules. Args: word (str): A string. Returns: str: A string with 's' added to it. """ return word + 's' print makePlural('cat') # One way of completing the function def makePlural(word): return word + 's' print makePlural('cat') # Load in the testing code and check to see if your answer is correct # If incorrect it will report back '1 test failed' for each failed test # Make sure to rerun any cell you change before trying the test again from test_helper import Test # TEST Pluralize and test (1b) Test.assertEquals(makePlural('rat'), 'rats', 'incorrect result: makePlural does not add an s') """ Explanation: (1b) Pluralize and test Let's use a map() transformation to add the letter 's' to each string in the base RDD we just created. We'll define a Python function that returns the word with an 's' at the end of the word. Please replace &lt;FILL IN&gt; with your solution. If you have trouble, the next cell has the solution. After you have defined makePlural you can run the third cell which contains a test. If you implementation is correct it will print 1 test passed. This is the general form that exercises will take, except that no example solution will be provided. Exercises will include an explanation of what is expected, followed by code cells where one cell will have one or more &lt;FILL IN&gt; sections. The cell that needs to be modified will have # TODO: Replace &lt;FILL IN&gt; with appropriate code on its first line. Once the &lt;FILL IN&gt; sections are updated and the code is run, the test cell can then be run to verify the correctness of your solution. The last code cell before the next markdown section will contain the tests. End of explanation """ pluralRDD = wordsRDD.map(makePlural) print pluralRDD.collect() # TEST Apply makePlural to the base RDD(1c) Test.assertEquals(pluralRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'], 'incorrect values for pluralRDD') """ Explanation: (1c) Apply makePlural to the base RDD Now pass each item in the base RDD into a map() transformation that applies the makePlural() function to each element. And then call the collect() action to see the transformed RDD. End of explanation """ pluralLambdaRDD = wordsRDD.map(lambda w: w + 's') print pluralLambdaRDD.collect() # TEST Pass a lambda function to map (1d) Test.assertEquals(pluralLambdaRDD.collect(), ['cats', 'elephants', 'rats', 'rats', 'cats'], 'incorrect values for pluralLambdaRDD (1d)') """ Explanation: (1d) Pass a lambda function to map Let's create the same RDD using a lambda function. End of explanation """ pluralLengths = (pluralRDD .map(lambda w: len(w)) .collect()) print pluralLengths # TEST Length of each word (1e) Test.assertEquals(pluralLengths, [4, 9, 4, 4, 4], 'incorrect values for pluralLengths') """ Explanation: (1e) Length of each word Now use map() and a lambda function to return the number of characters in each word. We'll collect this result directly into a variable. End of explanation """ wordPairs = wordsRDD.map(lambda w: (w, 1)) print wordPairs.collect() # TEST Pair RDDs (1f) Test.assertEquals(wordPairs.collect(), [('cat', 1), ('elephant', 1), ('rat', 1), ('rat', 1), ('cat', 1)], 'incorrect value for wordPairs') """ Explanation: (1f) Pair RDDs The next step in writing our word counting program is to create a new type of RDD, called a pair RDD. A pair RDD is an RDD where each element is a pair tuple (k, v) where k is the key and v is the value. In this example, we will create a pair consisting of ('&lt;word&gt;', 1) for each word element in the RDD. We can create the pair RDD using the map() transformation with a lambda() function to create a new RDD. End of explanation """ wordsGrouped = wordPairs.groupByKey() for key, value in wordsGrouped.collect(): print '{0}: {1}'.format(key, list(value)) # TEST groupByKey() approach (2a) Test.assertEquals(sorted(wordsGrouped.mapValues(lambda x: list(x)).collect()), [('cat', [1, 1]), ('elephant', [1]), ('rat', [1, 1])], 'incorrect value for wordsGrouped') """ Explanation: Part 2: Counting with pair RDDs Now, let's count the number of times a particular word appears in the RDD. There are multiple ways to perform the counting, but some are much less efficient than others. A naive approach would be to collect() all of the elements and count them in the driver program. While this approach could work for small datasets, we want an approach that will work for any size dataset including terabyte- or petabyte-sized datasets. In addition, performing all of the work in the driver program is slower than performing it in parallel in the workers. For these reasons, we will use data parallel operations. (2a) groupByKey() approach An approach you might first consider (we'll see shortly that there are better ways) is based on using the groupByKey() transformation. As the name implies, the groupByKey() transformation groups all the elements of the RDD with the same key into a single list in one of the partitions. There are two problems with using groupByKey(): The operation requires a lot of data movement to move all the values into the appropriate partitions. The lists can be very large. Consider a word count of English Wikipedia: the lists for common words (e.g., the, a, etc.) would be huge and could exhaust the available memory in a worker. Use groupByKey() to generate a pair RDD of type ('word', iterator). End of explanation """ import itertools wordCountsGrouped = wordsGrouped.map(lambda (k, v): (k, sum(list(v)))) print wordCountsGrouped.collect() # TEST Use groupByKey() to obtain the counts (2b) Test.assertEquals(sorted(wordCountsGrouped.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect value for wordCountsGrouped') """ Explanation: (2b) Use groupByKey() to obtain the counts Using the groupByKey() transformation creates an RDD containing 3 elements, each of which is a pair of a word and a Python iterator. Now sum the iterator using a map() transformation. The result should be a pair RDD consisting of (word, count) pairs. End of explanation """ wordCounts = wordPairs.reduceByKey(lambda a,b: a + b) print wordCounts.collect() # TEST Counting using reduceByKey (2c) Test.assertEquals(sorted(wordCounts.collect()), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect value for wordCounts') """ Explanation: (2c) Counting using reduceByKey A better approach is to start from the pair RDD and then use the reduceByKey() transformation to create a new pair RDD. The reduceByKey() transformation gathers together pairs that have the same key and applies the function provided to two values at a time, iteratively reducing all of the values to a single value. reduceByKey() operates by applying the function first within each partition on a per-key basis and then across the partitions, allowing it to scale efficiently to large datasets. End of explanation """ # TODO: Replace <FILL IN> with appropriate code wordCountsCollected = (wordsRDD .map(lambda w: (w, 1)) .reduceByKey(lambda a,b: a + b) .collect()) print wordCountsCollected # TEST All together (2d) Test.assertEquals(sorted(wordCountsCollected), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect value for wordCountsCollected') """ Explanation: (2d) All together The expert version of the code performs the map() to pair RDD, reduceByKey() transformation, and collect in one statement. End of explanation """ # TODO: Replace <FILL IN> with appropriate code uniqueWords = wordsRDD.distinct().count() print uniqueWords # TEST Unique words (3a) Test.assertEquals(uniqueWords, 3, 'incorrect count of uniqueWords') """ Explanation: Part 3: Finding unique words and a mean value (3a) Unique words Calculate the number of unique words in wordsRDD. You can use other RDDs that you have already created to make this easier. End of explanation """ from operator import add totalCount = (wordCounts .map(lambda (w, c): c) .reduce(add)) average = totalCount / float(uniqueWords) print totalCount print round(average, 2) # TEST Mean using reduce (3b) Test.assertEquals(round(average, 2), 1.67, 'incorrect value of average') """ Explanation: (3b) Mean using reduce Find the mean number of words per unique word in wordCounts. Use a reduce() action to sum the counts in wordCounts and then divide by the number of unique words. First map() the pair RDD wordCounts, which consists of (key, value) pairs, to an RDD of values. End of explanation """ from operator import add def wordCount(wordListRDD): """Creates a pair RDD with word counts from an RDD of words. Args: wordListRDD (RDD of str): An RDD consisting of words. Returns: RDD of (str, int): An RDD consisting of (word, count) tuples. """ return wordListRDD.map(lambda w: (w, 1)).reduceByKey(add) print wordCount(wordsRDD).collect() # TEST wordCount function (4a) Test.assertEquals(sorted(wordCount(wordsRDD).collect()), [('cat', 2), ('elephant', 1), ('rat', 2)], 'incorrect definition for wordCount function') """ Explanation: Part 4: Apply word count to a file In this section we will finish developing our word count application. We'll have to build the wordCount function, deal with real world problems like capitalization and punctuation, load in our data source, and compute the word count on the new data. (4a) wordCount function First, define a function for word counting. You should reuse the techniques that have been covered in earlier parts of this lab. This function should take in an RDD that is a list of words like wordsRDD and return a pair RDD that has all of the words and their associated counts. End of explanation """ import re def removePunctuation(text): """Removes punctuation, changes to lower case, and strips leading and trailing spaces. Note: Only spaces, letters, and numbers should be retained. Other characters should should be eliminated (e.g. it's becomes its). Leading and trailing spaces should be removed after punctuation is removed. Args: text (str): A string. Returns: str: The cleaned up string. """ pattern = re.compile("[^a-z0-9 ]") return pattern.sub('', text.strip().lower()) print removePunctuation('Hi, you!') print removePunctuation(' No under_score!') # TEST Capitalization and punctuation (4b) Test.assertEquals(removePunctuation(" The Elephant's 4 cats. "), 'the elephants 4 cats', 'incorrect definition for removePunctuation function') """ Explanation: (4b) Capitalization and punctuation Real world files are more complicated than the data we have been using in this lab. Some of the issues we have to address are: Words should be counted independent of their capitialization (e.g., Spark and spark should be counted as the same word). All punctuation should be removed. Any leading or trailing spaces on a line should be removed. Define the function removePunctuation that converts all text to lower case, removes any punctuation, and removes leading and trailing spaces. Use the Python re module to remove any text that is not a letter, number, or space. Reading help(re.sub) might be useful. End of explanation """ # Just run this code import os.path baseDir = os.path.join('data') inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt') fileName = os.path.join(baseDir, inputPath) shakespeareRDD = (sc .textFile(fileName, 8) .map(removePunctuation)) print '\n'.join(shakespeareRDD .zipWithIndex() # to (line, lineNum) .map(lambda (l, num): '{0}: {1}'.format(num, l)) # to 'lineNum: line' .take(15)) """ Explanation: (4c) Load a text file For the next part of this lab, we will use the Complete Works of William Shakespeare from Project Gutenberg. To convert a text file into an RDD, we use the SparkContext.textFile() method. We also apply the recently defined removePunctuation() function using a map() transformation to strip out the punctuation and change all text to lowercase. Since the file is large we use take(15), so that we only print 15 lines. End of explanation """ shakespeareWordsRDD = shakespeareRDD.flatMap(lambda l: l.split(' ')) shakespeareWordCount = shakespeareWordsRDD.count() print shakespeareWordsRDD.top(5) print shakespeareWordCount # TEST Words from lines (4d) # This test allows for leading spaces to be removed either before or after # punctuation is removed. Test.assertTrue(shakespeareWordCount == 927631 or shakespeareWordCount == 928908, 'incorrect value for shakespeareWordCount') Test.assertEquals(shakespeareWordsRDD.top(5), [u'zwaggerd', u'zounds', u'zounds', u'zounds', u'zounds'], 'incorrect value for shakespeareWordsRDD') """ Explanation: (4d) Words from lines Before we can use the wordcount() function, we have to address two issues with the format of the RDD: The first issue is that that we need to split each line by its spaces. The second issue is we need to filter out empty lines. Apply a transformation that will split each element of the RDD by its spaces. For each element of the RDD, you should apply Python's string split() function. You might think that a map() transformation is the way to do this, but think about what the result of the split() function will be. End of explanation """ shakeWordsRDD = shakespeareWordsRDD.filter(lambda w: w != '') shakeWordCount = shakeWordsRDD.count() print shakeWordCount # TEST Remove empty elements (4e) Test.assertEquals(shakeWordCount, 882996, 'incorrect value for shakeWordCount') """ Explanation: (4e) Remove empty elements The next step is to filter out the empty elements. Remove all entries where the word is ''. End of explanation """ # TODO: Replace <FILL IN> with appropriate code top15WordsAndCounts = wordCount(shakeWordsRDD).takeOrdered(15, lambda (w, c): c * -1) print '\n'.join(map(lambda (w, c): '{0}: {1}'.format(w, c), top15WordsAndCounts)) # TEST Count the words (4f) Test.assertEquals(top15WordsAndCounts, [(u'the', 27361), (u'and', 26028), (u'i', 20681), (u'to', 19150), (u'of', 17463), (u'a', 14593), (u'you', 13615), (u'my', 12481), (u'in', 10956), (u'that', 10890), (u'is', 9134), (u'not', 8497), (u'with', 7771), (u'me', 7769), (u'it', 7678)], 'incorrect value for top15WordsAndCounts') """ Explanation: (4f) Count the words We now have an RDD that is only words. Next, let's apply the wordCount() function to produce a list of word counts. We can view the top 15 words by using the takeOrdered() action; however, since the elements of the RDD are pairs, we need a custom sort function that sorts using the value part of the pair. You'll notice that many of the words are common English words. These are called stopwords. In a later lab, we will see how to eliminate them from the results. Use the wordCount() function and takeOrdered() to obtain the fifteen most common words and their counts. End of explanation """
pycrystem/pycrystem
doc/demos/09 Angular Correlations of Amorphous Materials.ipynb
gpl-3.0
data_path = "data/09/PdNiP_test.hspy" %matplotlib inline import pyxem as pxm import hyperspy.api as hs pxm.__version__ data = hs.load("./data/09/PdNiP_test.hspy") """ Explanation: Angular Correlations of Amorphous Materials This notebook demonstrates caclulating the angular correlation of diffraction patterns recorded from an amorphous (or crystalline) material. The dataset used for this demonstration is a 4-D STEM dataset of a PdNiP deposited thin film glass aquired using a DE-16 Camera and a 200keV FEI-Titan electron microscopt at 100 fps. The probe size was ~2-nm and step size was .365 nm so there there is singificant probe overlap in the probe positions. This functionality has been checked to run with pyxem-0.13.2 (May 2021). Bugs are always possible, do not trust the code blindly, and if you experience any issues please report them here: https://github.com/pyxem/pyxem-demos/issues Background Angular Correlations are a very natural extension to variance type studies. They offer more insight into the symmetry of the strucutures being studied as well as offering the ability to be studied spatially. Mathmatically, the Angular correlation is the angular-autocorrelation of some polar unwrapped diffraction pattern I(k). <p style="text-align: center;"> $ C(k,\phi) = \frac{<I(k, \theta)*I(k, \theta+\phi)>_\theta - <I(k,\theta)>^2_\theta }{<I(k, \theta)>^2_\theta} $ </p> This is simlar to the radial ("r") variance often calculated in Fluctuation Electron Microscopy. Contents <a href='#loa'> Importing & Visualization</a> <a href='#s2'> Polar Reprojection</a> <a href='#s3'> Angular Correlation</a> <a href='#s4'> Power Spectrum and Correlation Maps</a> <a id='s1'></a> 1 - Importing and Visualization This section goes over loading the data from the data folder and visualizing the data for further use. End of explanation """ data.set_signal_type("electron_diffraction") data.beam_energy=200 data.unit = "k_nm^-1" mask =data.get_direct_beam_mask(20) # Affine correction from fitting an ellipse import numpy as np center=(31.2,31.7) affine=np.array([[ 1.03725511, -0.02662789, 0. ], [-0.02662789, 1.01903215, 0. ], [ 0. , 0. , 1. ]]) data.set_ai(center=center) rad = data.get_azimuthal_integral2d(npt=100) """ Explanation: <a id='s2'></a> 2 - Polar Reprojection This section deals with converting the signal to a polar signal. This is probably the most important and difficult part of the analysis. Even small distortions in the pattern or misinterpertation of the center of the diffraction pattern will negitively affect the ability to determine correlations. There is still some ongoing development on methods for identifying and correcting for these distortions but a good check is always to perform the correct and make sure that the first amorphous ring is a line after the polar reprojection. In general your eye should be very good at identifying that. Another thing to notice is that after the correlation if you have small splititing in all of your peaks(especially the self correlation) then most likely your center isn't completely correct. End of explanation """ rad.sum().plot() """ Explanation: Note: This isn't perfect, as you can see there is still some distortion that an affine transformation could fix, but for the purposes of this demo this it will suffice End of explanation """ summed = rad.sum() mask = ((summed>4e6)+(summed<3e5)) mask.plot() rad.plot(vmax=4000) cor = rad.get_angular_correlation(mask=mask) cor.plot() cor = rad.map(pxm.utils.correlation_utils._correlation, inplace=False, axis=1, normalize=True) cor.isig[:].plot(vmax=1, vmin=-1) """ Explanation: <a id='s3'></a> 3 - Angular Correlations This section deals with converting the signal to a correlation signal. The most important part here is to properly mask the data. This is important for example if you have a beam stop End of explanation """ power = cor.get_angular_power() import matplotlib.pyplot as plt f = plt.figure(figsize=(15,10)) power.plot_symmetries(k_region = [3.,4.5],fig=f) """ Explanation: <a id='s4'></a> 4 - Power Spectrum and Correlation Maps This section deals with visualization of the correlations as correlation maps. These are spatial maps of the strucutre in some material. End of explanation """
NuGrid/NuPyCEE
regression_tests/temp/RTS_plot_functions.ipynb
bsd-3-clause
#from imp import * #s=load_source('sygma','/home/nugrid/nugrid/SYGMA/SYGMA_online/SYGMA_dev/sygma.py') #import mpld3 #mpld3.enable_notebook() import sygma as s reload(s) import matplotlib.pyplot as plt %matplotlib inline s1=s.sygma(iniZ=0.02,dt=1e7,tend=2e7) """ Explanation: Regression test suite: Test of all plotting functions Plotting functions are called in the order as they appear in the code. Each field calls first the function with default input and then with user-specified input. You can find the documentation <a href="doc/sygma.html">here</a>. $\odot$ Plotting functions tests End of explanation """ s1.plot_yield_input() #[1,3,5,12][Fe/H] s1.plot_yield_input(fig=2,xaxis='mini',yaxis='[Fe/H]',iniZ=0.0001,masses=[1,3,12,25],marker='s',color='r',shape='-') s1.plot_yield_input(fig=3,xaxis='[C/H]',yaxis='[Fe/H]',iniZ=0.0001,masses=[1,3,12,25],marker='x',color='b',shape='--') """ Explanation: plot_yield_input To plot the yield data End of explanation """ s1.plot_mass() s1.plot_mass(specie='N',shape='--',marker='x') #s1.plot_mass_multi() #s1.plot_mass_multi(fig=1,specie=['C','N'],ylims=[],source='all',norm=False,label=[],shape=['-','--'],marker=['o','D'],color=['r','b'],markevery=20) #plt.legend() """ Explanation: The following commands plot the ISM metallicity in spectroscopic notation. s1.plot_mass End of explanation """ s1.plot_massfrac() s1.plot_massfrac(yaxis='He-4',shape='--',marker='x') """ Explanation: s1.plot_massfrac End of explanation """ s1.plot_spectro() s1.plot_spectro(yaxis='[O/Fe]',marker='x',shape='--') """ Explanation: s1.plot_spectro End of explanation """ s1.plot_totmasses() s1.plot_totmasses(source='agb',shape='--',marker='x') s1.plot_totmasses(mass='stars',shape=':',marker='^') """ Explanation: s1.plot_totmasses End of explanation """ import sygma as s reload(s) s1=s.sygma(iolevel=0,mgal=1e11,dt=1e7,tend=1.3e10,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=-1,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=True, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn',pop3_table='yield_tables/popIII_h1.txt') #s1.plot_sn_distr(rate=True,label1='SN1a, rate',label2='SNII, rate',marker1='o',marker2='s') s1.plot_sn_distr(fig=4,rate=False,label1='SN1a, number',label2='SNII number',marker1='d',marker2='p') ##plt.xlim(1e6,1e10) #plt.ylabel('Number/Rate') s1.plot_sn_distr() s1.plot_sn_distr(fig=5,rate=True,rate_only='',xaxis='time',label1='SN1a',label2='SN2',shape1=':',shape2='--',marker1='o',marker2='s',color1='k',color2='b',markevery=20) """ Explanation: Test of SNIa and SNII rate plots End of explanation """ #s1=s.sygma(iolevel=0,mgal=1e11,dt=1e6,tend=1.3e10,imf_type='salpeter',imf_bdys=[1,30],special_timesteps=-1,iniZ=-1,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt',sn1a_on=True, sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn',pop3_table='yield_tables/popIII_h1.txt') #s1.plot_sn_distr(rate=True,label1='SN1a, rate',label2='SNII, rate',marker1='o',marker2='s') #s1.plot_sn_distr(rate=False,label1='SN1a, number',label2='SNII number',marker1='d',marker2='p') #plt.xlim(1e6,1e10) #plt.ylabel('Number/Rate') #s1=s.sygma(iniZ=0.0001,dt=1e9,tend=2e9) #s2=s.sygma(iniZ=0.02)#,dt=1e7,tend=2e9) reload(s) s1=s.sygma(iolevel=0,iniZ=0.02,dt=1e8,tend=1e9) #standart not workign #s2=s.sygma(iniZ=0.02,dt=1e8,tend=1e10) """ Explanation: One point at the beginning for only 1 starburst End of explanation """ s1.plot_mass_range_contributions() s1.plot_mass_range_contributions(fig=7,specie='O',rebin=0.5,label='',shape='-',marker='o',color='b',markevery=20,extralabel=False,log=False) #s1.plot_mass_range_contributions(fig=7,specie='O',prodfac=True,rebin=0.5,label='',shape='-',marker='o',color='r',markevery=20,extralabel=False,log=False) """ Explanation: plot_mass_range_contributions End of explanation """ import sygma as s reload(s) ssp1=s.sygma(iolevel=0,dt=1e8,mgal=1e11,starbursts=[0.1,0.1],tend=1e9,special_timesteps=-1,imf_type='kroupa',imf_bdys=[0.1,100],sn1a_on=False,hardsetZ=0.0001,table='yield_tables/isotope_yield_table_h1.txt', sn1a_table='yield_tables/sn1a_h1.txt', iniabu_table='yield_tables/iniabu/iniab1.0E-04GN93_alpha_h1.ppn') ssp1.plot_star_formation_rate() ssp1.plot_star_formation_rate(fig=6,marker='o',shape=':') ssp1.plot_mass_range_contributions(fig=7,specie='H',prodfac=False,rebin=-1,time=-1,label='Total burst',shape='-',marker='o',color='r',markevery=20,extralabel=False,log=False) ssp1.plot_mass_range_contributions(fig=7,specie='H',prodfac=False,rebin=-1,time=1e8,label='Burst at 1e8',shape='-',marker='o',color='b',markevery=20,extralabel=False,log=False) """ Explanation: Tests with two starbursts End of explanation """ #s1.write_evol_table(elements=['H','He','C']) s1.write_evol_table(elements=['H'],isotopes=['H-1'],table_name='gce_table.txt',interact=False) """ Explanation: write_evol_table End of explanation """
roebius/deeplearning_keras2
nbs2/seq2seq-translation.ipynb
apache-2.0
import unicodedata, string, re, random, time, math, torch, torch.nn as nn from torch.autograd import Variable from torch import optim import torch.nn.functional as F import keras, numpy as np from keras.preprocessing import sequence """ Explanation: Requirements End of explanation """ SOS_token = 0 EOS_token = 1 class Lang: def __init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"} self.n_words = 2 # Count SOS and EOS def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 """ Explanation: Loading data files The data for this project is a set of many thousands of English to French translation pairs. This question on Open Data Stack Exchange pointed me to the open translation site http://tatoeba.org/ which has downloads available at http://tatoeba.org/eng/downloads - and better yet, someone did the extra work of splitting language pairs into individual text files here: http://www.manythings.org/anki/ The English to French pairs are too big to include in the repo, so download to data/fra.txt before continuing. The file is a tab separated list of translation pairs: I am cold. Je suis froid. We'll need a unique index per word to use as the inputs and targets of the networks later. To keep track of all this we will use a helper class called Lang which has word &rarr; index (word2index) and index &rarr; word (index2word) dictionaries, as well as a count of each word word2count to use to later replace rare words. End of explanation """ # Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) # Lowercase, trim, and remove non-letter characters def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) return s """ Explanation: The files are all in Unicode, to simplify we will turn Unicode characters to ASCII, make everything lowercase, and trim most punctuation. End of explanation """ def readLangs(lang1, lang2, pairs_file, reverse=False): print("Reading lines...") # Read the file and split into lines lines = open('data/%s' % (pairs_file)).read().strip().split('\n') # Split every line into pairs and normalize pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines] # Reverse pairs, make Lang instances if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs """ Explanation: To read the data file we will split the file into lines, and then split lines into pairs. The files are all English &rarr; Other Language, so if we want to translate from Other Language &rarr; English I added the reverse flag to reverse the pairs. End of explanation """ MAX_LENGTH = 10 eng_prefixes = ( "i am ", "i m ", "he is", "he s ", "she is", "she s", "you are", "you re ", "we are", "we re ", "they are", "they re " ) def filterPair(p): return len(p[0].split(' ')) < MAX_LENGTH and \ len(p[1].split(' ')) < MAX_LENGTH and \ p[1].startswith(eng_prefixes) def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] """ Explanation: Since there are a lot of example sentences and we want to train something quickly, we'll trim the data set to only relatively short and simple sentences. Here the maximum length is 10 words (that includes ending punctuation) and we're filtering to sentences that translate to the form "I am" or "He is" etc. (accounting for apostrophes replaced earlier). End of explanation """ def prepareData(lang1, lang2, pairs_file, reverse=False): input_lang, output_lang, pairs = readLangs(lang1, lang2, pairs_file, reverse) print("Read %s sentence pairs" % len(pairs)) pairs = filterPairs(pairs) print("Trimmed to %s sentence pairs" % len(pairs)) print("Counting words...") for pair in pairs: input_lang.addSentence(pair[0]) output_lang.addSentence(pair[1]) print("Counted words:") print(input_lang.name, input_lang.n_words) print(output_lang.name, output_lang.n_words) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepareData('eng', 'fra', 'fra.txt', True) print(random.choice(pairs)) def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')]+[EOS_token] def variableFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) return Variable(torch.LongTensor(indexes).unsqueeze(0)) def variablesFromPair(pair): input_variable = variableFromSentence(input_lang, pair[0]) target_variable = variableFromSentence(output_lang, pair[1]) return (input_variable, target_variable) def index_and_pad(lang, dat): return sequence.pad_sequences([indexesFromSentence(lang, s) for s in dat], padding='post').astype(np.int64) fra, eng = list(zip(*pairs)) fra = index_and_pad(input_lang, fra) eng = index_and_pad(output_lang, eng) def get_batch(x, y, batch_size=16): idxs = np.random.permutation(len(x))[:batch_size] return x[idxs], y[idxs] """ Explanation: The full process for preparing the data is: Read text file and split into lines, split lines into pairs Normalize text, filter by length and content Make word lists from sentences in pairs End of explanation """ class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1): super(EncoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, num_layers=n_layers) def forward(self, input, hidden): output, hidden = self.gru(self.embedding(input), hidden) return output, hidden # TODO: other inits def initHidden(self, batch_size): return Variable(torch.zeros(1, batch_size, self.hidden_size)) """ Explanation: The Encoder The encoder of a seq2seq network is a RNN that outputs some value for every word from the input sentence. For every input word the encoder outputs a vector and a hidden state, and uses the hidden state for the next input word. End of explanation """ class DecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1): super(DecoderRNN, self).__init__() self.embedding = nn.Embedding(output_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, num_layers=n_layers) # TODO use transpose of embedding self.out = nn.Linear(hidden_size, output_size) self.sm = nn.LogSoftmax() def forward(self, input, hidden): emb = self.embedding(input).unsqueeze(1) # NB: Removed relu res, hidden = self.gru(emb, hidden) output = self.sm(self.out(res[:,0])) return output, hidden """ Explanation: Simple Decoder In the simplest seq2seq decoder we use only last output of the encoder. This last output is sometimes called the context vector as it encodes context from the entire sequence. This context vector is used as the initial hidden state of the decoder. At every step of decoding, the decoder is given an input token and hidden state. The initial input token is the start-of-string &lt;SOS&gt; token, and the first hidden state is the context vector (the encoder's last hidden state). End of explanation """ class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1, max_length=MAX_LENGTH): super(AttnDecoderRNN, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.n_layers = n_layers self.dropout_p = dropout_p self.max_length = max_length self.embedding = nn.Embedding(self.output_size, self.hidden_size) self.attn = nn.Linear(self.hidden_size * 2, self.max_length) self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size) self.dropout = nn.Dropout(self.dropout_p) self.gru = nn.GRU(self.hidden_size, self.hidden_size) self.out = nn.Linear(self.hidden_size, self.output_size) def forward(self, input, hidden, encoder_output, encoder_outputs): embedded = self.embedding(input).view(1, 1, -1) embedded = self.dropout(embedded) attn_weights = F.softmax(self.attn(torch.cat((embedded[0], hidden[0]), 1))) attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) output = torch.cat((embedded[0], attn_applied[0]), 1) output = self.attn_combine(output).unsqueeze(0) for i in range(self.n_layers): output = F.relu(output) output, hidden = self.gru(output, hidden) output = F.log_softmax(self.out(output[0])) return output, hidden, attn_weights def initHidden(self): return Variable(torch.zeros(1, 1, self.hidden_size)) """ Explanation: Attention Decoder If only the context vector is passed betweeen the encoder and decoder, that single vector carries the burden of encoding the entire sentence. Attention allows the decoder network to "focus" on a different part of the encoder's outputs for every step of the decoder's own outputs. First we calculate a set of attention weights. These will be multiplied by the encoder output vectors to create a weighted combination. The result (called attn_applied in the code) should contain information about that specific part of the input sequence, and thus help the decoder choose the right output words. Calculating the attention weights is done with another feed-forward layer attn, using the decoder's input and hidden state as inputs. Because there are sentences of all sizes in the training data, to actually create and train this layer we have to choose a maximum sentence length (input length, for encoder outputs) that it can apply to. Sentences of the maximum length will use all the attention weights, while shorter sentences will only use the first few. End of explanation """ def train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): batch_size, input_length = input_variable.size() target_length = target_variable.size()[1] encoder_hidden = encoder.initHidden(batch_size).cuda() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() loss = 0 encoder_output, encoder_hidden = encoder(input_variable, encoder_hidden) decoder_input = Variable(torch.LongTensor([SOS_token]*batch_size)).cuda() decoder_hidden = encoder_hidden for di in range(target_length): decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden) #, encoder_output, encoder_outputs) targ = target_variable[:, di] # print(decoder_output.size(), targ.size(), target_variable.size()) loss += criterion(decoder_output, targ) decoder_input = targ loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.data[0] / target_length def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) def trainEpochs(encoder, decoder, n_epochs, print_every=1000, plot_every=100, learning_rate=0.01): start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every encoder_optimizer = optim.RMSprop(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.RMSprop(decoder.parameters(), lr=learning_rate) criterion = nn.NLLLoss().cuda() for epoch in range(1, n_epochs + 1): training_batch = get_batch(fra, eng) input_variable = Variable(torch.LongTensor(training_batch[0])).cuda() target_variable = Variable(torch.LongTensor(training_batch[1])).cuda() loss = train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print_loss_total += loss plot_loss_total += loss if epoch % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print('%s (%d %d%%) %.4f' % (timeSince(start, epoch / n_epochs), epoch, epoch / n_epochs * 100, print_loss_avg)) if epoch % plot_every == 0: plot_loss_avg = plot_loss_total / plot_every plot_losses.append(plot_loss_avg) plot_loss_total = 0 showPlot(plot_losses) """ Explanation: Note: There are other forms of attention that work around the length limitation by using a relative position approach. Read about "local attention" in Effective Approaches to Attention-based Neural Machine Translation. Training To train we run the input sentence through the encoder, and keep track of every output and the latest hidden state. Then the decoder is given the &lt;SOS&gt; token as its first input, and the last hidden state of the decoder as its first hidden state. "Teacher forcing" is the concept of using the real target outputs as each next input, instead of using the decoder's guess as the next input. Using teacher forcing causes it to converge faster but when the trained network is exploited, it may exhibit instability. End of explanation """ # TODO: Make this change during training teacher_forcing_ratio = 0.5 def attn_train(input_variable, target_variable, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): encoder_hidden = encoder.initHidden() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_length = input_variable.size()[0] target_length = target_variable.size()[0] encoder_outputs = Variable(torch.zeros(max_length, encoder.hidden_size)) loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder(input_variable[ei], encoder_hidden) encoder_outputs[ei] = encoder_output[0][0] decoder_input = Variable(torch.LongTensor([[SOS_token]])) decoder_hidden = encoder_hidden use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False if use_teacher_forcing: # Teacher forcing: Feed the target as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_output, encoder_outputs) loss += criterion(decoder_output[0], target_variable[di]) decoder_input = target_variable[di] # Teacher forcing else: # Without teacher forcing: use its own predictions as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_output, encoder_outputs) topv, topi = decoder_output.data.topk(1) ni = topi[0][0] decoder_input = Variable(torch.LongTensor([[ni]])) loss += criterion(decoder_output[0], target_variable[di]) if ni == EOS_token: break loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.data[0] / target_length """ Explanation: Attention End of explanation """ import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np %matplotlib inline def showPlot(points): plt.figure() fig, ax = plt.subplots() loc = ticker.MultipleLocator(base=0.2) # this locator puts ticks at regular intervals ax.yaxis.set_major_locator(loc) plt.plot(points) """ Explanation: Plotting results Plotting is done with matplotlib, using the array of loss values plot_losses saved while training. End of explanation """ def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH): input_variable = variableFromSentence(input_lang, sentence).cuda() input_length = input_variable.size()[0] encoder_hidden = encoder.initHidden(1).cuda() encoder_output, encoder_hidden = encoder(input_variable, encoder_hidden) decoder_input = Variable(torch.LongTensor([SOS_token])).cuda() decoder_hidden = encoder_hidden decoded_words = [] # decoder_attentions = torch.zeros(max_length, max_length) for di in range(max_length): # decoder_output, decoder_hidden, decoder_attention = decoder( decoder_output, decoder_hidden = decoder(decoder_input, decoder_hidden) #, encoder_output, encoder_outputs) # decoder_attentions[di] = decoder_attention.data topv, topi = decoder_output.data.topk(1) ni = topi[0][0] if ni == EOS_token: decoded_words.append('<EOS>') break else: decoded_words.append(output_lang.index2word[ni]) decoder_input = Variable(torch.LongTensor([ni])).cuda() return decoded_words,0#, decoder_attentions[:di+1] def evaluateRandomly(encoder, decoder, n=10): for i in range(n): pair = random.choice(pairs) print('>', pair[0]) print('=', pair[1]) output_words, attentions = evaluate(encoder, decoder, pair[0]) output_sentence = ' '.join(output_words) print('<', output_sentence) print('') """ Explanation: Evaluation Evaluation is mostly the same as training, but there are no targets so we simply feed the decoder's predictions back to itself for each step. Every time it predicts a word we add it to the output string, and if it predicts the EOS token we stop there. We also store the decoder's attention outputs for display later. End of explanation """ #TODO: # - Test set # - random teacher forcing # - attention # - multi layers # - bidirectional encoding hidden_size = 256 encoder1 = EncoderRNN(input_lang.n_words, hidden_size).cuda() attn_decoder1 = DecoderRNN(hidden_size, output_lang.n_words).cuda() trainEpochs(encoder1, attn_decoder1, 15000, print_every=500, learning_rate=0.005) evaluateRandomly(encoder1, attn_decoder1) """ Explanation: Training and Evaluating Note: If you run this notebook you can train, interrupt the kernel, evaluate, and continue training later. Comment out the lines where the encoder and decoder are initialized and run trainEpochs again. End of explanation """ output_words, attentions = evaluate(encoder1, attn_decoder1, "je suis trop froid .") plt.matshow(attentions.numpy()) """ Explanation: Visualizing Attention A useful property of the attention mechanism is its highly interpretable outputs. Because it is used to weight specific encoder outputs of the input sequence, we can imagine looking where the network is focused most at each time step. You could simply run plt.matshow(attentions) to see attention output displayed as a matrix, with the columns being input steps and rows being output steps: NOTE: This only works when using the attentional decoder, if you've been following the notebook to this point you are using the standard decoder. End of explanation """ def showAttention(input_sentence, output_words, attentions): # Set up figure with colorbar fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(attentions.numpy(), cmap='bone') fig.colorbar(cax) # Set up axes ax.set_xticklabels([''] + input_sentence.split(' ') + ['<EOS>'], rotation=90) ax.set_yticklabels([''] + output_words) # Show label at every tick ax.xaxis.set_major_locator(ticker.MultipleLocator(1)) ax.yaxis.set_major_locator(ticker.MultipleLocator(1)) plt.show() def evaluateAndShowAttention(input_sentence): output_words, attentions = evaluate(encoder1, attn_decoder1, input_sentence) print('input =', input_sentence) print('output =', ' '.join(output_words)) showAttention(input_sentence, output_words, attentions) evaluateAndShowAttention("elle a cinq ans de moins que moi .") evaluateAndShowAttention("elle est trop petit .") evaluateAndShowAttention("je ne crains pas de mourir .") evaluateAndShowAttention("c est un jeune directeur plein de talent .") """ Explanation: For a better viewing experience we will do the extra work of adding axes and labels: End of explanation """
FunOnTheUpfield/DataVicGovAuPTDataCleaner
TramBoardingAlighting_DataCleaner.ipynb
gpl-3.0
import pandas as pd rawtram = './raw/Tram Boardings and Alightings 2011 - data.XLS' df = pd.read_excel(rawtram,sheetname='Data', header=0,converters={'Route_Number':str,'Tram_Tracker_ID':str, 'Metlink_Stop_ID':str, 'VicgridX':str, 'VicgridY':str}) df """ Explanation: Tram Boarding and Alighting Data Cleaner Data Source https://www.data.vic.gov.au/data/dataset/tram-boardings-and-alightings-at-tram-stops-2015 Data Coverage period 01/01/2011 to 31/12/2011 (7:00am to 7:00pm weighted observations) Data faults and an assumption that is made to work around the fault Data appears to have a the missing column 'Day Type' column, a categorical variable classifying boarding alighting result as either "Weekday", "Saturday" or "Sunday". This column is mentioned in the data definition but missing from the data. Most of the tram boarding / alighting data appears to be grouped into collections of three rows where the first nine columns are identical. Data that does not match this pattern has only a single row. It is assumed that the first instance of a row with nine identical columns is "Weekday", The second and third (when they exist) are assumed to be "Saturday" and "Sunday" respectively. For the purposes of comparison with bus boarding alighting data, only Weekday columns are of interest. Step 1: Download raw tram boarding data, save a local copy in ./raw directory Download Tram boardings and alightings xls file manually. The web page has a 'I consent to terms and conditions / I am not a robot' button that prevents automated downloading (or at least makes it harder than I expected). Save file to './raw' directory End of explanation """ bystop = df.groupby('Metlink_Stop_ID').first() """ Explanation: Step 2: Subset out the weekday data: The first nine columns (the non - 'boarding, alighting' values) are repeated three times. This appears to be a fault relating to missing 'Day Type' column. The first instance is assumed to be the "weekday" total (so comparable to the bus data. End of explanation """ # Group by stop, populate NA fields with zeros bystop['wk7am7pm'] = bystop.Boardings.fillna(0) + bystop.Alightings.fillna(0) # Write to CSV file bystop.to_csv('./clean/TramStopTraffic.csv') bystop """ Explanation: Step 3: Create a .csv file with boarding and alighting data for each stop This script groups all the reported tram boardings and alightings for a given stop If multiple routes use the same stop the results from multiple routes will be combined into a single "boarding" value and a single "alighting" value. Results are saved as './clean/TramStopTraffic.csv' End of explanation """
mne-tools/mne-tools.github.io
0.22/_downloads/d0650bb5ca9f8c789ed4763f3c3f895e/plot_linear_model_patterns.ipynb
bsd-3-clause
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Romain Trachel <trachelr@gmail.com> # Jean-Remi King <jeanremi.king@gmail.com> # # License: BSD (3-clause) import mne from mne import io, EvokedArray from mne.datasets import sample from mne.decoding import Vectorizer, get_coef from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline # import a linear classifier from mne.decoding from mne.decoding import LinearModel print(__doc__) data_path = sample.data_path() sample_path = data_path + '/MEG/sample' """ Explanation: Linear classifier on sensor data with plot patterns and filters Here decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG data in sensor space. Fit a linear classifier with the LinearModel object providing topographical patterns which are more neurophysiologically interpretable :footcite:HaufeEtAl2014 than the classifier filters (weight vectors). The patterns explain how the MEG and EEG data were generated from the discriminant neural sources which are extracted by the filters. Note patterns/filters in MEG data are more similar than EEG data because the noise is less spatially correlated in MEG than EEG. End of explanation """ raw_fname = sample_path + '/sample_audvis_filt-0-40_raw.fif' event_fname = sample_path + '/sample_audvis_filt-0-40_raw-eve.fif' tmin, tmax = -0.1, 0.4 event_id = dict(aud_l=1, vis_l=3) # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, preload=True) raw.filter(.5, 25, fir_design='firwin') events = mne.read_events(event_fname) # Read epochs epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, decim=2, baseline=None, preload=True) del raw labels = epochs.events[:, -1] # get MEG and EEG data meg_epochs = epochs.copy().pick_types(meg=True, eeg=False) meg_data = meg_epochs.get_data().reshape(len(labels), -1) """ Explanation: Set parameters End of explanation """ clf = LogisticRegression(solver='lbfgs') scaler = StandardScaler() # create a linear model with LogisticRegression model = LinearModel(clf) # fit the classifier on MEG data X = scaler.fit_transform(meg_data) model.fit(X, labels) # Extract and plot spatial filters and spatial patterns for name, coef in (('patterns', model.patterns_), ('filters', model.filters_)): # We fitted the linear model onto Z-scored data. To make the filters # interpretable, we must reverse this normalization step coef = scaler.inverse_transform([coef])[0] # The data was vectorized to fit a single model across all time points and # all channels. We thus reshape it: coef = coef.reshape(len(meg_epochs.ch_names), -1) # Plot evoked = EvokedArray(coef, meg_epochs.info, tmin=epochs.tmin) evoked.plot_topomap(title='MEG %s' % name, time_unit='s') """ Explanation: Decoding in sensor space using a LogisticRegression classifier End of explanation """ X = epochs.pick_types(meg=False, eeg=True) y = epochs.events[:, 2] # Define a unique pipeline to sequentially: clf = make_pipeline( Vectorizer(), # 1) vectorize across time and channels StandardScaler(), # 2) normalize features across trials LinearModel( LogisticRegression(solver='lbfgs'))) # 3) fits a logistic regression clf.fit(X, y) # Extract and plot patterns and filters for name in ('patterns_', 'filters_'): # The `inverse_transform` parameter will call this method on any estimator # contained in the pipeline, in reverse order. coef = get_coef(clf, name, inverse_transform=True) evoked = EvokedArray(coef, epochs.info, tmin=epochs.tmin) evoked.plot_topomap(title='EEG %s' % name[:-1], time_unit='s') """ Explanation: Let's do the same on EEG data using a scikit-learn pipeline End of explanation """
GoogleCloudPlatform/vertex-ai-samples
notebooks/community/sdk/sdk_automl_image_classification_online_export_edge.ipynb
apache-2.0
import os # Google Cloud Notebook if os.path.exists("/opt/deeplearning/metadata/env_version"): USER_FLAG = "--user" else: USER_FLAG = "" ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG """ Explanation: Vertex SDK: AutoML training image classification model for export to edge <table align="left"> <td> <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_online_export_edge.ipynb"> <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab </a> </td> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_online_export_edge.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> <td> <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_image_classification_online_export_edge.ipynb"> Open in Google Cloud Notebooks </a> </td> </table> <br/><br/><br/> Overview This tutorial demonstrates how to use the Vertex SDK to create image classification models to export as an Edge model using a Google Cloud AutoML model. Dataset The dataset used for this tutorial is the Flowers dataset from TensorFlow Datasets. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of flower an image is from a class of five flowers: daisy, dandelion, rose, sunflower, or tulip. Objective In this tutorial, you create a AutoML image classification model from a Python script using the Vertex SDK, and then export the model as an Edge model in TFLite format. You can alternatively create models with AutoML using the gcloud command-line tool or online using the Cloud Console. The steps performed include: Create a Vertex Dataset resource. Train the model. Export the Edge model from the Model resource to Cloud Storage. Download the model locally. Make a local prediction. Costs This tutorial uses billable components of Google Cloud: Vertex AI Cloud Storage Learn about Vertex AI pricing and Cloud Storage pricing, and use the Pricing Calculator to generate a cost estimate based on your projected usage. Set up your local development environment If you are using Colab or Google Cloud Notebooks, your environment already meets all the requirements to run this notebook. You can skip this step. Otherwise, make sure your environment meets this notebook's requirements. You need the following: The Cloud Storage SDK Git Python 3 virtualenv Jupyter notebook running in a virtual environment with Python 3 The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions: Install and initialize the SDK. Install Python 3. Install virtualenv and create a virtual environment that uses Python 3. Activate the virtual environment. To install Jupyter, run pip3 install jupyter on the command-line in a terminal shell. To launch Jupyter, run jupyter notebook on the command-line in a terminal shell. Open this notebook in the Jupyter Notebook Dashboard. Installation Install the latest version of Vertex SDK for Python. End of explanation """ ! pip3 install -U google-cloud-storage $USER_FLAG if os.environ["IS_TESTING"]: ! pip3 install --upgrade tensorflow $USER_FLAG """ Explanation: Install the latest GA version of google-cloud-storage library as well. End of explanation """ import os if not os.getenv("IS_TESTING"): # Automatically restart kernel after installs import IPython app = IPython.Application.instance() app.kernel.do_shutdown(True) """ Explanation: Restart the kernel Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages. End of explanation """ PROJECT_ID = "[your-project-id]" # @param {type:"string"} if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]": # Get your GCP project id from gcloud shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null PROJECT_ID = shell_output[0] print("Project ID:", PROJECT_ID) ! gcloud config set project $PROJECT_ID """ Explanation: Before you begin GPU runtime This tutorial does not require a GPU runtime. Set up your Google Cloud project The following steps are required, regardless of your notebook environment. Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs. Make sure that billing is enabled for your project. Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage. If you are running this notebook locally, you will need to install the Cloud SDK. Enter your project ID in the cell below. Then run the cell to make sure the Cloud SDK uses the right project for all the commands in this notebook. Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $. End of explanation """ REGION = "us-central1" # @param {type: "string"} """ Explanation: Region You can also change the REGION variable, which is used for operations throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you. Americas: us-central1 Europe: europe-west4 Asia Pacific: asia-east1 You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services. Learn more about Vertex AI regions End of explanation """ from datetime import datetime TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S") """ Explanation: Timestamp If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial. End of explanation """ # If you are running this notebook in Colab, run this cell and follow the # instructions to authenticate your GCP account. This provides access to your # Cloud Storage bucket and lets you submit training jobs and prediction # requests. import os import sys # If on Google Cloud Notebook, then don't execute this code if not os.path.exists("/opt/deeplearning/metadata/env_version"): if "google.colab" in sys.modules: from google.colab import auth as google_auth google_auth.authenticate_user() # If you are running this notebook locally, replace the string below with the # path to your service account key and run this cell to authenticate your GCP # account. elif not os.getenv("IS_TESTING"): %env GOOGLE_APPLICATION_CREDENTIALS '' """ Explanation: Authenticate your Google Cloud account If you are using Google Cloud Notebooks, your environment is already authenticated. Skip this step. If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth. Otherwise, follow these steps: In the Cloud Console, go to the Create service account key page. Click Create service account. In the Service account name field, enter a name, and click Create. In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin. Click Create. A JSON file that contains your key downloads to your local environment. Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell. End of explanation """ BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"} if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]": BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP """ Explanation: Create a Cloud Storage bucket The following steps are required, regardless of your notebook environment. When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions. Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization. End of explanation """ ! gsutil mb -l $REGION $BUCKET_NAME """ Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket. End of explanation """ ! gsutil ls -al $BUCKET_NAME """ Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents: End of explanation """ import google.cloud.aiplatform as aip """ Explanation: Set up variables Next, set up some variables used throughout the tutorial. Import libraries and define constants End of explanation """ aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME) """ Explanation: Initialize Vertex SDK for Python Initialize the Vertex SDK for Python for your project and corresponding bucket. End of explanation """ IMPORT_FILE = ( "gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv" ) """ Explanation: Tutorial Now you are ready to start creating your own AutoML image classification model. Location of Cloud Storage training data. Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage. End of explanation """ if "IMPORT_FILES" in globals(): FILE = IMPORT_FILES[0] else: FILE = IMPORT_FILE count = ! gsutil cat $FILE | wc -l print("Number of Examples", int(count[0])) print("First 10 rows") ! gsutil cat $FILE | head """ Explanation: Quick peek at your data This tutorial uses a version of the Flowers dataset that is stored in a public Cloud Storage bucket, using a CSV index file. Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows. End of explanation """ dataset = aip.ImageDataset.create( display_name="Flowers" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE], import_schema_uri=aip.schema.dataset.ioformat.image.single_label_classification, ) print(dataset.resource_name) """ Explanation: Create the Dataset Next, create the Dataset resource using the create method for the ImageDataset class, which takes the following parameters: display_name: The human readable name for the Dataset resource. gcs_source: A list of one or more dataset index files to import the data items into the Dataset resource. import_schema_uri: The data labeling schema for the data items. This operation may take several minutes. End of explanation """ dag = aip.AutoMLImageTrainingJob( display_name="flowers_" + TIMESTAMP, prediction_type="classification", multi_label=False, model_type="MOBILE_TF_LOW_LATENCY_1", base_model=None, ) print(dag) """ Explanation: Create and run training pipeline To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline. Create training pipeline An AutoML training pipeline is created with the AutoMLImageTrainingJob class, with the following parameters: display_name: The human readable name for the TrainingJob resource. prediction_type: The type task to train the model for. classification: An image classification model. object_detection: An image object detection model. multi_label: If a classification task, whether single (False) or multi-labeled (True). model_type: The type of model for deployment. CLOUD: Deployment on Google Cloud CLOUD_HIGH_ACCURACY_1: Optimized for accuracy over latency for deployment on Google Cloud. CLOUD_LOW_LATENCY_: Optimized for latency over accuracy for deployment on Google Cloud. MOBILE_TF_VERSATILE_1: Deployment on an edge device. MOBILE_TF_HIGH_ACCURACY_1:Optimized for accuracy over latency for deployment on an edge device. MOBILE_TF_LOW_LATENCY_1: Optimized for latency over accuracy for deployment on an edge device. base_model: (optional) Transfer learning from existing Model resource -- supported for image classification only. The instantiated object is the DAG (directed acyclic graph) for the training job. End of explanation """ model = dag.run( dataset=dataset, model_display_name="flowers_" + TIMESTAMP, training_fraction_split=0.8, validation_fraction_split=0.1, test_fraction_split=0.1, budget_milli_node_hours=8000, disable_early_stopping=False, ) """ Explanation: Run the training pipeline Next, you run the DAG to start the training job by invoking the method run, with the following parameters: dataset: The Dataset resource to train the model. model_display_name: The human readable name for the trained model. training_fraction_split: The percentage of the dataset to use for training. test_fraction_split: The percentage of the dataset to use for test (holdout data). validation_fraction_split: The percentage of the dataset to use for validation. budget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour). disable_early_stopping: If True, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements. The run method when completed returns the Model resource. The execution of the training pipeline will take upto 20 minutes. End of explanation """ # Get model resource ID models = aip.Model.list(filter="display_name=flowers_" + TIMESTAMP) # Get a reference to the Model Service client client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"} model_service_client = aip.gapic.ModelServiceClient(client_options=client_options) model_evaluations = model_service_client.list_model_evaluations( parent=models[0].resource_name ) model_evaluation = list(model_evaluations)[0] print(model_evaluation) """ Explanation: Review model evaluation scores After your model has finished training, you can review the evaluation scores for it. First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when you deployed the model or you can list all of the models in your project. End of explanation """ response = model.export_model( artifact_destination=BUCKET_NAME, export_format_id="tflite", sync=True ) model_package = response["artifactOutputUri"] """ Explanation: Export as Edge model You can export an AutoML image classification model as a Edge model which you can then custom deploy to an edge device or download locally. Use the method export_model() to export the model to Cloud Storage, which takes the following parameters: artifact_destination: The Cloud Storage location to store the SavedFormat model artifacts to. export_format_id: The format to save the model format as. For AutoML image classification there is just one option: tf-saved-model: TensorFlow SavedFormat for deployment to a container. tflite: TensorFlow Lite for deployment to an edge or mobile device. edgetpu-tflite: TensorFlow Lite for TPU tf-js: TensorFlow for web client coral-ml: for Coral devices sync: Whether to perform operational sychronously or asynchronously. End of explanation """ ! gsutil ls $model_package # Download the model artifacts ! gsutil cp -r $model_package tflite tflite_path = "tflite/model.tflite" """ Explanation: Download the TFLite model artifacts Now that you have an exported TFLite version of your model, you can test the exported model locally, but first downloading it from Cloud Storage. End of explanation """ import tensorflow as tf interpreter = tf.lite.Interpreter(model_path=tflite_path) interpreter.allocate_tensors() input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() input_shape = input_details[0]["shape"] print("input tensor shape", input_shape) """ Explanation: Instantiate a TFLite interpreter The TFLite version of the model is not a TensorFlow SavedModel format. You cannot directly use methods like predict(). Instead, one uses the TFLite interpreter. You must first setup the interpreter for the TFLite model as follows: Instantiate an TFLite interpreter for the TFLite model. Instruct the interpreter to allocate input and output tensors for the model. Get detail information about the models input and output tensors that will need to be known for prediction. End of explanation """ test_items = ! gsutil cat $IMPORT_FILE | head -n1 test_item = test_items[0].split(",")[0] with tf.io.gfile.GFile(test_item, "rb") as f: content = f.read() test_image = tf.io.decode_jpeg(content) print("test image shape", test_image.shape) test_image = tf.image.resize(test_image, (224, 224)) print("test image shape", test_image.shape, test_image.dtype) test_image = tf.cast(test_image, dtype=tf.uint8).numpy() """ Explanation: Get test item You will use an arbitrary example out of the dataset as a test item. Don't be concerned that the example was likely used in training the model -- we just want to demonstrate how to make a prediction. End of explanation """ import numpy as np data = np.expand_dims(test_image, axis=0) interpreter.set_tensor(input_details[0]["index"], data) interpreter.invoke() softmax = interpreter.get_tensor(output_details[0]["index"]) label = np.argmax(softmax) print(label) """ Explanation: Make a prediction with TFLite model Finally, you do a prediction using your TFLite model, as follows: Convert the test image into a batch of a single image (np.expand_dims) Set the input tensor for the interpreter to your batch of a single image (data). Invoke the interpreter. Retrieve the softmax probabilities for the prediction (get_tensor). Determine which label had the highest probability (np.argmax). End of explanation """ delete_all = True if delete_all: # Delete the dataset using the Vertex dataset object try: if "dataset" in globals(): dataset.delete() except Exception as e: print(e) # Delete the model using the Vertex model object try: if "model" in globals(): model.delete() except Exception as e: print(e) # Delete the endpoint using the Vertex endpoint object try: if "endpoint" in globals(): endpoint.delete() except Exception as e: print(e) # Delete the AutoML or Pipeline trainig job try: if "dag" in globals(): dag.delete() except Exception as e: print(e) # Delete the custom trainig job try: if "job" in globals(): job.delete() except Exception as e: print(e) # Delete the batch prediction job using the Vertex batch prediction object try: if "batch_predict_job" in globals(): batch_predict_job.delete() except Exception as e: print(e) # Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object try: if "hpt_job" in globals(): hpt_job.delete() except Exception as e: print(e) if "BUCKET_NAME" in globals(): ! gsutil rm -r $BUCKET_NAME """ Explanation: Cleaning up To clean up all Google Cloud resources used in this project, you can delete the Google Cloud project you used for the tutorial. Otherwise, you can delete the individual resources you created in this tutorial: Dataset Pipeline Model Endpoint AutoML Training Job Batch Job Custom Job Hyperparameter Tuning Job Cloud Storage Bucket End of explanation """
dipanjank/ml
data_analysis/acute_inflammations.ipynb
gpl-3.0
import numpy as np import pandas as pd %pylab inline pylab.style.use('ggplot') import seaborn as sns data_df = pd.read_csv('diagnosis.csv', sep='\t', decimal=',', header=None) data_df.head() """ Explanation: Acute Inflammations Dataset - UCI Analysis of the UCI dataset https://archive.ics.uci.edu/ml/datasets/Acute+Inflammations. End of explanation """ data_df.columns = ['temp', 'nausea', 'lumber_pain', 'urine_pushing', 'micturiation_pain', 'burning', 'inflammation', 'nephritis'] for c in data_df.columns[1:]: data_df.loc[:, c] = data_df.loc[:, c].map(lambda v : 1.0 if v == 'yes' else 0.0) data_df.head() feature_df = data_df.loc[:, data_df.columns[:-2]] target_df = data_df.loc[:, data_df.columns[-2:]] feature_df.head() target_df.head() """ Explanation: Attribute Information a1 Temperature of patient { 35C-42C } a2 Occurrence of nausea { yes, no } a3 Lumbar pain { yes, no } a4 Urine pushing (continuous need for urination) { yes, no } a5 Micturition pains { yes, no } a6 Burning of urethra, itch, swelling of urethra outlet { yes, no } d1 decision: Inflammation of urinary bladder { yes, no } d2 decision: Nephritis of renal pelvis origin { yes, no } End of explanation """ sns.lmplot(data=data_df, x='temp', hue='inflammation', y='inflammation', fit_reg=False) for fname in feature_df.columns[1:]: pylab.figure() sns.countplot(y=fname, hue="inflammation", data=data_df) """ Explanation: Bivariate Analysis - Inflammation End of explanation """ import statsmodels.formula.api as sm inflammation_model = sm.logit( formula='inflammation ~ lumber_pain + urine_pushing + micturiation_pain', data=data_df ) inflammation_result = inflammation_model.fit(method='lbfgs') inflammation_result.summary() """ Explanation: The Logistic Regression Model for Inflammation End of explanation """ sns.lmplot(data=data_df, x='temp', hue='nephritis', y='nephritis', fit_reg=False) for fname in feature_df.columns[1:]: pylab.figure() sns.countplot(y=fname, hue="nephritis", data=data_df) """ Explanation: Bivariate Analysis - Nephritis End of explanation """ nephritis_model = sm.ols( formula='nephritis ~ temp + nausea + urine_pushing + lumber_pain + burning', data=data_df ) nephritis_result = nephritis_model.fit() nephritis_result.summary() """ Explanation: The Logistic Regression Model for Nephritis End of explanation """
AllenDowney/ThinkBayes2
examples/double_dice.ipynb
mit
# Configure Jupyter so figures appear in the notebook %matplotlib inline # Configure Jupyter to display the assigned value after an assignment %config InteractiveShell.ast_node_interactivity='last_expr_or_assign' import numpy as np import pandas as pd from fractions import Fraction """ Explanation: The double dice problem This notebook demonstrates a way of doing simple Bayesian updates using the table method, with a Pandas DataFrame as the table. Copyright 2018 Allen Downey MIT License: https://opensource.org/licenses/MIT End of explanation """ class BayesTable(pd.DataFrame): def __init__(self, hypo, prior=1, **options): columns = ['hypo', 'prior', 'likelihood', 'unnorm', 'posterior'] super().__init__(columns=columns, **options) self.hypo = hypo self.prior = prior def mult(self): self.unnorm = self.prior * self.likelihood def norm(self): nc = np.sum(self.unnorm) self.posterior = self.unnorm / nc return nc def update(self): self.mult() return self.norm() def reset(self): return BayesTable(self.hypo, self.posterior) """ Explanation: The BayesTable class Here's the class that represents a Bayesian table. End of explanation """ hypo = [Fraction(sides) for sides in [4, 6, 8, 12]] table = BayesTable(hypo) """ Explanation: The double dice problem Suppose I have a box that contains one each of 4-sided, 6-sided, 8-sided, and 12-sided dice. I choose a die at random, and roll it twice without letting you see the die or the outcome. I report that I got the same outcome on both rolls. 1) What is the posterior probability that I rolled each of the dice? 2) If I roll the same die again, what is the probability that I get the same outcome a third time? Solution Here's a BayesTable that represents the four hypothetical dice. End of explanation """ table.likelihood = 1/table.hypo table """ Explanation: Since we didn't specify prior probabilities, the default value is equal priors for all hypotheses. They don't have to be normalized, because we have to normalize the posteriors anyway. Now we can specify the likelihoods: if a die has n sides, the chance of getting the same outcome twice is 1/n. So the likelihoods are: End of explanation """ table.update() table table.posterior.astype(float) """ Explanation: Now we can use update to compute the posterior probabilities: End of explanation """ total = 0 for _, row in table.iterrows(): total += row.posterior / row.hypo total """ Explanation: The 4-sided die is most likely because you are more likely to get doubles on a 4-sided die than on a 6-, 8-, or 12- sided die. Part two The second part of the problem asks for the (posterior predictive) probability of getting the same outcome a third time, if we roll the same die again. If the die has n sides, the probability of getting the same value again is 1/n, which should look familiar. To get the total probability of getting the same outcome, we have to add up the conditional probabilities: P(n | data) * P(same outcome | n) The first term is the posterior probability; the second term is 1/n. End of explanation """ table2 = table.reset() table2.likelihood = 1/table.hypo table2 table2.update() table2 """ Explanation: This calculation is similar to the first step of the update, so we can also compute it by 1) Creating a new table with the posteriors from table. 2) Adding the likelihood of getting the same outcome a third time. 3) Computing the normalizing constant. End of explanation """
arcyfelix/Courses
17-09-17-Python-for-Financial-Analysis-and-Algorithmic-Trading/04-Visualization-Matplotlib-Pandas/04a-Matplotlib/01 - Matplotlib Concepts Lecture.ipynb
apache-2.0
import matplotlib.pyplot as plt """ Explanation: <a href='http://www.pieriandata.com'> <img src='../../Pierian_Data_Logo.png' /></a> Matplotlib Overview Lecture Introduction Matplotlib is the "grandfather" library of data visualization with Python. It was created by John Hunter. He created it to try to replicate MatLab's (another programming language) plotting capabilities in Python. So if you happen to be familiar with matlab, matplotlib will feel natural to you. It is an excellent 2D and 3D graphics library for generating scientific figures. Some of the major Pros of Matplotlib are: Generally easy to get started for simple plots Support for custom labels and texts Great control of every element in a figure High-quality output in many formats Very customizable in general Matplotlib allows you to create reproducible figures programmatically. Let's learn how to use it! Before continuing this lecture, I encourage you just to explore the official Matplotlib web page: http://matplotlib.org/ Installation You'll need to install matplotlib first with either: conda install matplotlib or pip install matplotlib Importing Import the matplotlib.pyplot module under the name plt (the tidy way): End of explanation """ %matplotlib inline """ Explanation: You'll also need to use this line to see plots in the notebook: End of explanation """ import numpy as np x = np.linspace(0, 5, 11) y = x ** 2 x y """ Explanation: That line is only for jupyter notebooks, if you are using another editor, you'll use: plt.show() at the end of all your plotting commands to have the figure pop up in another window. Basic Example Let's walk through a very simple example using two numpy arrays: Example Let's walk through a very simple example using two numpy arrays. You can also use lists, but most likely you'll be passing numpy arrays or pandas columns (which essentially also behave like arrays). The data we want to plot: End of explanation """ plt.plot(x, y, 'r') # 'r' is the color red plt.xlabel('X Axis Title Here') plt.ylabel('Y Axis Title Here') plt.title('String Title Here') plt.show() """ Explanation: Basic Matplotlib Commands We can create a very simple line plot using the following ( I encourage you to pause and use Shift+Tab along the way to check out the document strings for the functions we are using). End of explanation """ # plt.subplot(nrows, ncols, plot_number) plt.subplot(1, 2, 1) plt.plot(x, y, 'r--') # More on color options later plt.subplot(1, 2, 2) plt.plot(y, x, 'g*-'); """ Explanation: Creating Multiplots on Same Canvas End of explanation """ # Create Figure (empty canvas) fig = plt.figure() # Add set of axes to figure axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # left, bottom, width, height (range 0 to 1) # Plot on that set of axes axes.plot(x, y, 'b') axes.set_xlabel('Set X Label') # Notice the use of set_ to begin methods axes.set_ylabel('Set y Label') axes.set_title('Set Title') """ Explanation: Matplotlib Object Oriented Method Now that we've seen the basics, let's break it all down with a more formal introduction of Matplotlib's Object Oriented API. This means we will instantiate figure objects and then call methods or attributes from that object. Introduction to the Object Oriented Method The main idea in using the more formal Object Oriented method is to create figure objects and then just call methods or attributes off of that object. This approach is nicer when dealing with a canvas that has multiple plots on it. To begin we create a figure instance. Then we can add axes to that figure: End of explanation """ # Creates blank canvas fig = plt.figure() axes1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # main axes axes2 = fig.add_axes([0.2, 0.5, 0.4, 0.3]) # inset axes # Larger Figure Axes 1 axes1.plot(x, y, 'b') axes1.set_xlabel('X_label_axes2') axes1.set_ylabel('Y_label_axes2') axes1.set_title('Axes 2 Title') # Insert Figure Axes 2 axes2.plot(y, x, 'r') axes2.set_xlabel('X_label_axes2') axes2.set_ylabel('Y_label_axes2') axes2.set_title('Axes 2 Title'); """ Explanation: Code is a little more complicated, but the advantage is that we now have full control of where the plot axes are placed, and we can easily add more than one axis to the figure: End of explanation """ # Use similar to plt.figure() except use tuple unpacking to grab fig and axes fig, axes = plt.subplots() # Now use the axes object to add stuff to plot axes.plot(x, y, 'r') axes.set_xlabel('x') axes.set_ylabel('y') axes.set_title('title'); """ Explanation: subplots() The plt.subplots() object will act as a more automatic axis manager. Basic use cases: End of explanation """ # Empty canvas of 1 by 2 subplots fig, axes = plt.subplots(nrows = 1, ncols = 2) # Axes is an array of axes to plot on axes """ Explanation: Then you can specify the number of rows and columns when creating the subplots() object: End of explanation """ for ax in axes: ax.plot(x, y, 'b') ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title('title') # Display the figure object fig """ Explanation: We can iterate through this array: End of explanation """ fig, axes = plt.subplots(nrows=1, ncols=2) for ax in axes: ax.plot(x, y, 'g') ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title('title') fig plt.tight_layout() """ Explanation: A common issue with matplolib is overlapping subplots or figures. We ca use fig.tight_layout() or plt.tight_layout() method, which automatically adjusts the positions of the axes on the figure canvas so that there is no overlapping content: End of explanation """ fig = plt.figure(figsize = (8, 4), dpi = 100) """ Explanation: Figure size, aspect ratio and DPI Matplotlib allows the aspect ratio, DPI and figure size to be specified when the Figure object is created. You can use the figsize and dpi keyword arguments. * figsize is a tuple of the width and height of the figure in inches * dpi is the dots-per-inch (pixel per inch). For example: End of explanation """ fig, axes = plt.subplots(figsize=(12,3)) axes.plot(x, y, 'r') axes.set_xlabel('x') axes.set_ylabel('y') axes.set_title('title'); """ Explanation: The same arguments can also be passed to layout managers, such as the subplots function: End of explanation """ fig.savefig("filename.png") """ Explanation: Saving figures Matplotlib can generate high-quality output in a number formats, including PNG, JPG, EPS, SVG, PGF and PDF. To save a figure to a file we can use the savefig method in the Figure class: End of explanation """ fig.savefig("filename.png", dpi=200) """ Explanation: Here we can also optionally specify the DPI and choose between different output formats: End of explanation """ ax.set_title("title"); """ Explanation: Legends, labels and titles Now that we have covered the basics of how to create a figure canvas and add axes instances to the canvas, let's look at how decorate a figure with titles, axis labels, and legends. Figure titles A title can be added to each axis instance in a figure. To set the title, use the set_title method in the axes instance: End of explanation """ ax.set_xlabel("x") ax.set_ylabel("y"); """ Explanation: Axis labels Similarly, with the methods set_xlabel and set_ylabel, we can set the labels of the X and Y axes: End of explanation """ fig = plt.figure() ax = fig.add_axes([0,0,1,1]) ax.plot(x, x ** 2, label = "x**2") ax.plot(x, x ** 3, label= "x**3") ax.legend() """ Explanation: Legends You can use the label="label text" keyword argument when plots or other objects are added to the figure, and then using the legend method without arguments to add the legend to the figure: End of explanation """ # Lots of options.... ax.legend(loc = 1) # upper right corner ax.legend(loc = 2) # upper left corner ax.legend(loc = 3) # lower left corner ax.legend(loc = 4) # lower right corner # .. many more options are available # Most common to choose ax.legend(loc = 0) # let matplotlib decide the optimal location fig """ Explanation: Notice how are legend overlaps some of the actual plot! The legend function takes an optional keyword argument loc that can be used to specify where in the figure the legend is to be drawn. The allowed values of loc are numerical codes for the various places the legend can be drawn. See the documentation page for details. Some of the most common loc values are: End of explanation """ # MATLAB style line color and style fig, ax = plt.subplots() ax.plot(x, x ** 2, 'b.-') # blue line with dots ax.plot(x, x ** 3, 'g--') # green dashed line """ Explanation: Setting colors, linewidths, linetypes Matplotlib gives you a lot of options for customizing colors, linewidths, and linetypes. There is the basic MATLAB like syntax (which I would suggest you avoid using for more clairty sake: Colors with MatLab like syntax With matplotlib, we can define the colors of lines and other graphical elements in a number of ways. First of all, we can use the MATLAB-like syntax where 'b' means blue, 'g' means green, etc. The MATLAB API for selecting line styles are also supported: where, for example, 'b.-' means a blue line with dots: End of explanation """ fig, ax = plt.subplots() ax.plot(x, x + 1, color = "blue", alpha=0.5) # half-transparant ax.plot(x, x + 2, color = "#8B008B") # RGB hex code ax.plot(x, x + 3, color = "#FF8C00") # RGB hex code """ Explanation: Colors with the color= parameter We can also define colors by their names or RGB hex codes and optionally provide an alpha value using the color and alpha keyword arguments. Alpha indicates opacity. End of explanation """ fig, ax = plt.subplots(figsize=(12,6)) ax.plot(x, x + 1, color = "red", linewidth = 0.25) ax.plot(x, x + 2, color = "red", linewidth = 0.50) ax.plot(x, x + 3, color = "red", linewidth = 1.00) ax.plot(x, x + 4, color = "red", linewidth = 2.00) # possible linestype options ‘-‘, ‘–’, ‘-.’, ‘:’, ‘steps’ ax.plot(x, x + 5, color = "green", lw = 3, linestyle = '-') ax.plot(x, x + 6, color = "green", lw = 3, ls = '-.') ax.plot(x, x + 7, color = "green", lw = 3, ls = ':') # custom dash line, = ax.plot(x, x + 8, color = "black", lw = 1.50) line.set_dashes([5, 10, 15, 10]) # format: line length, space length, ... # possible marker symbols: marker = '+', 'o', '*', 's', ',', '.', '1', '2', '3', '4', ... ax.plot(x, x + 9, color = "blue", lw = 3, ls = '-', marker = '+') ax.plot(x, x + 10, color = "blue", lw = 3, ls = '--', marker = 'o') ax.plot(x, x + 11, color = "blue", lw = 3, ls = '-', marker = 's') ax.plot(x, x + 12, color = "blue", lw = 3, ls = '--', marker = '1') # marker size and color ax.plot(x, x + 13, color = "purple", lw = 1, ls = '-', marker = 'o', markersize = 2) ax.plot(x, x + 14, color = "purple", lw = 1, ls = '-', marker = 'o', markersize = 4) ax.plot(x, x + 15, color = "purple", lw = 1, ls = '-', marker = 'o', markersize = 8, markerfacecolor = "red") ax.plot(x, x + 16, color = "purple", lw = 1, ls = '-', marker = 's', markersize = 8, markerfacecolor = "yellow", markeredgewidth = 3, markeredgecolor = "green"); """ Explanation: Line and marker styles To change the line width, we can use the linewidth or lw keyword argument. The line style can be selected using the linestyle or ls keyword arguments: End of explanation """ fig, axes = plt.subplots(1, 3, figsize = (12, 4)) axes[0].plot(x, x ** 2, x, x ** 3) axes[0].set_title("default axes ranges") axes[1].plot(x, x ** 2, x, x ** 3) axes[1].axis('tight') axes[1].set_title("tight axes") axes[2].plot(x, x ** 2, x, x ** 3) axes[2].set_ylim([0, 60]) axes[2].set_xlim([2, 5]) axes[2].set_title("custom axes range"); """ Explanation: Control over axis appearance In this section we will look at controlling axis sizing properties in a matplotlib figure. Plot range We can configure the ranges of the axes using the set_ylim and set_xlim methods in the axis object, or axis('tight') for automatically getting "tightly fitted" axes ranges: End of explanation """ plt.scatter(x, y) from random import sample data = sample(range(1, 1000), 100) plt.hist(data) data = [np.random.normal(0, std, 100) for std in range(1, 4)] # rectangular box plot plt.boxplot(data, vert = True, patch_artist = True); """ Explanation: Special Plot Types There are many specialized plots we can create, such as barplots, histograms, scatter plots, and much more. Most of these type of plots we will actually create using pandas. But here are a few examples of these type of plots: End of explanation """
slundberg/shap
notebooks/api_examples/plots/text.ipynb
mit
import shap import transformers import nlp import torch import numpy as np import scipy as sp # load a BERT sentiment analysis model tokenizer = transformers.DistilBertTokenizerFast.from_pretrained("distilbert-base-uncased") model = transformers.DistilBertForSequenceClassification.from_pretrained( "distilbert-base-uncased-finetuned-sst-2-english" ).cuda() # define a prediction function def f(x): tv = torch.tensor([tokenizer.encode(v, padding='max_length', max_length=500, truncation=True) for v in x]).cuda() outputs = model(tv)[0].detach().cpu().numpy() scores = (np.exp(outputs).T / np.exp(outputs).sum(-1)).T val = sp.special.logit(scores[:,1]) # use one vs rest logit units return val # build an explainer using a token masker explainer = shap.Explainer(f, tokenizer) # explain the model's predictions on IMDB reviews imdb_train = nlp.load_dataset("imdb")["train"] shap_values = explainer(imdb_train[:10], fixed_context=1) """ Explanation: text plot This notebook is designed to demonstrate (and so document) how to use the shap.plots.text function. It uses a distilled PyTorch BERT model from the transformers package to do sentiment analysis of IMDB movie reviews. Note that the prediction function we define takes a list of strings and returns a logit value for the positive class. End of explanation """ # plot the first sentence's explanation shap.plots.text(shap_values[3]) """ Explanation: Single instance text plot When we pass a single instance to the text plot we get the importance of each token overlayed on the original text that corresponds to that token. Red regions correspond to parts of the text that increase the output of the model when they are included, while blue regions decrease the output of the model when they are included. In the context of the sentiment analysis model here red corresponds to a more positive review and blue a more negative review. Note that importance values returned for text models are often hierarchical and follow the structure of the text. Nonlinear interactions between groups of tokens are often saved and can be used during the plotting process. If the Explanation object passed to the text plot has a .hierarchical_values attribute, then small groups of tokens with strong non-linear effects among them will be auto-merged together to form coherent chunks. When the .hierarchical_values attribute is present it also means that the explainer may not have completely enumerated all possible token perturbations and so has treated chunks of the text as essentially a single unit. This happens since we often want to explain a text model while evaluating it fewer times than the numbers of tokens in the document. Whenever a region of the input text is not split by the explainer, it is show by the text plot as a single unit. The force plot above the text is designed to provide an overview of how all the parts of the text combine to produce the model's output. See the force plot notebook for more details, but the general structure of the plot is positive red features "pushing" the model output higher while negative blue features "push" the model output lower. The force plot provides much more quantitative information than the text coloring. Hovering over a chuck of text will underline the portion of the force plot that corresponds to that chunk of text, and hovering over a portion of the force plot will underline the corresponding chunk of text. Note that clicking on any chunk of text will show the sum of the SHAP values attributed to the tokens in that chunk (clicked again will hide the value). End of explanation """ # plot the first sentence's explanation shap.plots.text(shap_values[:3]) """ Explanation: Multiple instance text plot When we pass a multi-row explanation object to the text plot we get the single instance plots for each input instance scaled so they have consistent comparable x-axis and color ranges. End of explanation """ shap.plots.bar(shap_values.abs.sum(0)) """ Explanation: Summarizing text explanations While plotting several instance-level explanations using the text plot can be very informative, sometime you want global summaries of the impact of tokens over the a large set of instances. See the Explanation object documentation for more details, but you can easily summarize the importance of tokens in a dataset by collapsing a multi-row explanation object over all it's rows (in this case by summing). Doing this treats every text input token type as a feature, so the collapsed Explanation object will have as many columns as there were unique tokens in the orignal multi-row explanation object. If there are hierarchical values present in the Explanation object then any large groups are divided up and each token in the gruop is given an equal share of the overall group importance value. End of explanation """ shap.plots.bar(shap_values.abs.max(0)) """ Explanation: Note that how you summarize the importance of features can make a big difference. In the plot above the a token was very importance both because it had an impact on the model, and because it was very common. Below we instead summize the instances using the max function to see the largest impact of a token in any instance. End of explanation """ shap.plots.bar(shap_values[:,"but"]) shap.plots.bar(shap_values[:,"but"]) """ Explanation: You can also slice out a single token from all the instances by using that token as an input name (note that the gray values to the left of the input names are the original text that the token was generated from). End of explanation """ import numpy as np from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import shap import torch tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-es") model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-es").cuda() s=["In this picture, there are four persons: my father, my mother, my brother and my sister."] explainer = shap.Explainer(model,tokenizer) shap_values = explainer(s) """ Explanation: Text-To-Text Visualization End of explanation """ shap.plots.text(shap_values) """ Explanation: Text-To-Text Visualization contains the input text to the model on the left side and output text on the right side (in the default layout). On hovering over a token on the right (output) side the importance of each input token is overlayed on it, and is signified by the background color of the token. Red regions correspond to parts of the text that increase the output of the model when they are included, while blue regions decrease the output of the model when they are included. The explanation for a particular output token can be anchored by clickling on the output token (it can be un-anchored by clicking again). Note that similar to the single output plots described above, importance values returned for text models are often hierarchical and follow the structure of the text. Small groups of tokens with strong non-linear effects among them will be auto-merged together to form coherent chunks. Similarly, The explainer may not have completely enumerated all possible token perturbations and so has treated chunks of the text as essentially a single unit. This preprocessing is done for each output token, and the merging behviour can differ for each output token, since the interation effects might be different for each output token. The merged chunks can be viewed by hovering over the input text, once an output token is anchored. All the tokens of a merged chunk are made bold. Once the ouput text is anchored the input tokens can be clicked on to view the exact shap value (Hovering over input token also brings up a tooltip with the values). Auto merged tokens show the total values divided over the number of tokens in that chunk. Hovering over the input text shows the SHAP value for that token for each output token. This is again signified by the background color of the output token. This can be anchored by clicking on the input token. Note: The color scaling for all token (input and output) are consistent and the brightest red is assigned to the maximum SHAP value of input tokens for any output token. Note: The layout of the two pieces of text can be changed by using the 'Layout' Drop down menu. End of explanation """
arnoldlu/lisa
ipynb/examples/android/workloads/Android_Recents_Fling.ipynb
apache-2.0
import logging from conf import LisaLogging LisaLogging.setup() %pylab inline import os from time import sleep # Support to access the remote target import devlib from env import TestEnv # Import support for Android devices from android import Screen, Workload from devlib.utils.android import adb_command # Support for trace events analysis from trace import Trace # Suport for FTrace events parsing and visualization import trappy """ Explanation: EAS Testing - Recents Fling on Android The goal of this experiment is to collect frame statistics while swiping up and down tabs of recently opened applications on a Pixel device running Android with an EAS kernel. This process is name Recents Fling. The Analysis phase will consist in comparing EAS with other schedulers, that is comparing sched governor with: - interactive - performance - powersave - ondemand For this experiment it is recommended to open many applications so that we can swipe over more recently opened applications. End of explanation """ import os os.environ['ANDROID_HOME'] = '/ext/android-sdk-linux/' """ Explanation: Test environment setup For more details on this please check out examples/utils/testenv_example.ipynb. devlib requires the ANDROID_HOME environment variable configured to point to your local installation of the Android SDK. If you have not this variable configured in the shell used to start the notebook server, you need to run a cell to define where your Android SDK is installed or specify the ANDROID_HOME in your target configuration. In case more than one Android device are conencted to the host, you must specify the ID of the device you want to target in my_target_conf. Run adb devices on your host to get the ID. End of explanation """ # Setup a target configuration my_conf = { # Target platform and board "platform" : 'android', "board" : 'pixel', # Device ID "device" : "HT6670300102", # Android home "ANDROID_HOME" : "/home/vagrant/lisa/tools/android-sdk-linux", # Folder where all the results will be collected "results_dir" : "Android_RecentsFling", # Define devlib modules to load "modules" : [ 'cpufreq' # enable CPUFreq support ], # FTrace events to collect for all the tests configuration which have # the "ftrace" flag enabled "ftrace" : { "events" : [ "sched_switch", "sched_load_avg_cpu", "cpu_frequency", "cpu_capacity" ], "buffsize" : 10 * 1024, }, # Tools required by the experiments "tools" : [ 'trace-cmd' ], } # Initialize a test environment using: te = TestEnv(my_conf) target = te.target """ Explanation: In case more than one Android device are conencted to the host, you must specify the ID of the device you want to target in my_target_conf. Run adb devices on your host to get the ID. End of explanation """ def set_performance(): target.cpufreq.set_all_governors('performance') def set_powersave(): target.cpufreq.set_all_governors('powersave') def set_interactive(): target.cpufreq.set_all_governors('interactive') def set_sched(): target.cpufreq.set_all_governors('sched') def set_ondemand(): target.cpufreq.set_all_governors('ondemand') for cpu in target.list_online_cpus(): tunables = target.cpufreq.get_governor_tunables(cpu) target.cpufreq.set_governor_tunables( cpu, 'ondemand', **{'sampling_rate' : tunables['sampling_rate_min']} ) # CPUFreq configurations to test confs = { 'performance' : { 'label' : 'prf', 'set' : set_performance, }, 'powersave' : { 'label' : 'pws', 'set' : set_powersave, }, 'interactive' : { 'label' : 'int', 'set' : set_interactive, }, 'sched' : { 'label' : 'sch', 'set' : set_sched, }, 'ondemand' : { 'label' : 'odm', 'set' : set_ondemand, } } # The set of results for each comparison test results = {} def open_apps(n): """ Open `n` apps on the device :param n: number of apps to open :type n: int """ # Get a list of third-party packages android_version = target.getprop('ro.build.version.release') if android_version >= 'N': packages = target.execute('cmd package list packages | cut -d: -f 2') packages = packages.splitlines() else: packages = target.execute('pm list packages -3 | cut -d: -f 2') packages = packages.splitlines() # As a safe fallback let's use a list of standard Android AOSP apps which are always available if len(packages) < 8: packages = [ 'com.android.messaging', 'com.android.calendar', 'com.android.settings', 'com.android.calculator2', 'com.android.email', 'com.android.music', 'com.android.deskclock', 'com.android.contacts', ] LAUNCH_CMD = 'monkey -p {} -c android.intent.category.LAUNCHER 1 ' if n > len(packages): n = len(packages) logging.info('Trying to open %d apps...', n) started = 0 for app in packages: logging.debug(' Launching %s', app) try: target.execute(LAUNCH_CMD.format(app)) started = started + 1 logging.info(' %2d starting %s...', started, app) except Exception: pass if started >= n: break # Close Recents target.execute('input keyevent KEYCODE_HOME') def recentsfling_run(exp_dir): # Unlock device screen (assume no password required) target.execute('input keyevent 82') # Configure screen to max brightness and no dimming Screen.set_brightness(target, percent=100) Screen.set_dim(target, auto=False) Screen.set_timeout(target, 60*60*10) # 10 hours should be enought for an experiment # Open Recents on the target device target.execute('input keyevent KEYCODE_APP_SWITCH') # Allow the activity to start sleep(5) # Reset framestats collection target.execute('dumpsys gfxinfo --reset') w, h = target.screen_resolution x = w/2 yl = int(0.2*h) yh = int(0.9*h) logging.info('Start Swiping Recents') for i in range(5): # Simulate two fast UP and DOWN swipes target.execute('input swipe {} {} {} {} 50'.format(x, yl, x, yh)) sleep(0.3) target.execute('input swipe {} {} {} {} 50'.format(x, yh, x, yl)) sleep(0.7) logging.info('Swiping Recents Completed') # Reset screen brightness and auto dimming Screen.set_defaults(target) # Get frame stats framestats_file = os.path.join(exp_dir, "framestats.txt") adb_command(target.adb_name, 'shell dumpsys gfxinfo com.android.systemui > {}'.format(framestats_file)) # Close Recents target.execute('input keyevent KEYCODE_HOME') return framestats_file def experiment(governor, exp_dir): os.system('mkdir -p {}'.format(exp_dir)); logging.info('------------------------') logging.info('Run workload using %s governor', governor) confs[governor]['set']() # Start FTrace te.ftrace.start() ### Run the benchmark ### framestats_file = recentsfling_run(exp_dir) # Stop FTrace te.ftrace.stop() # Collect and keep track of the trace trace_file = os.path.join(exp_dir, 'trace.dat') te.ftrace.get_trace(trace_file) # Parse trace tr = Trace(te.platform, exp_dir, events=my_conf['ftrace']['events']) # return all the experiment data return { 'dir' : exp_dir, 'framestats_file' : framestats_file, 'trace_file' : trace_file, 'ftrace' : tr.ftrace, 'trace' : tr } """ Explanation: Support Functions This set of support functions will help us running the benchmark using different CPUFreq governors. End of explanation """ N_APPS = 20 open_apps(N_APPS) # Give apps enough time to open sleep(5) """ Explanation: Run Flinger Prepare Environment End of explanation """ # Unlock device screen (assume no password required) target.execute('input keyevent 82') # Run the benchmark in all the configured governors for governor in confs: test_dir = os.path.join(te.res_dir, governor) results[governor] = experiment(governor, test_dir) """ Explanation: Run workload and collect traces End of explanation """ for governor in confs: framestats_file = results[governor]['framestats_file'] print "Frame Statistics for {} governor".format(governor.upper()) !sed '/Stats since/,/99th/!d;/99th/q' $framestats_file print "" """ Explanation: UI Performance Analysis End of explanation """
phoebe-project/phoebe2-docs
2.3/tutorials/latex_repr.ipynb
gpl-3.0
#!pip install -I "phoebe>=2.3,<2.4" import phoebe from phoebe import u # units import numpy as np logger = phoebe.logger() """ Explanation: Advanced: Parameter Latex Representation Setup Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab). End of explanation """ b = phoebe.default_binary() b.add_distribution({'teff@primary': phoebe.gaussian_around(100), 'teff@secondary': phoebe.gaussian_around(150), 'requiv@primary': phoebe.uniform_around(0.2)}) """ Explanation: The latex representations of parameters are mostly used while plotting distributions... so let's just create a few dummy distributions so that we can see how they're labeled when plotting. End of explanation """ _ = b.plot_distribution_collection(show=True) """ Explanation: Default Representation By default, whenever parameters themselves are referenced in plotting (like when calling b.plot_distribution_collection, a latex representation of the parameter name, along with the component or dataset, when applicable, is used. End of explanation """ print(b.filter(qualifier='latex_repr')) """ Explanation: Overriding Component Labels By default, the component labels themselves are used within this latex representation. These labels can be changed internally with b.rename_component. However, sometimes it is convenient to use a different naming convention for the latex representation. For example, let's say that we wanted to keep the python-labels as-is ('primary', 'secondary', and 'binary'), but use 'A', 'B', and 'AB' in the latex representations, respectively. These latex-representations are stored in the latex_repr parameters. End of explanation """ print(b.components) """ Explanation: These are blank (empty string) by default, in which case the actual component labels are used while plotting. End of explanation """ b.set_value(qualifier='latex_repr', component='primary', value='A') b.set_value(qualifier='latex_repr', component='secondary', value='B') b.set_value(qualifier='latex_repr', component='binary', value='AB') _ = b.plot_distribution_collection(show=True) """ Explanation: If we set these, then the latex_repr parameters will take precedence over the component labels End of explanation """ b.get_parameter(qualifier='teff', component='primary', context='component') print(b.get_parameter(qualifier='teff', component='primary', context='component').latexfmt) """ Explanation: Overriding Parameter Latex "Templates" Internally each parameter has a "template" for how to represent its name in latex. Let's look at those attributes for the parameters we have been plotting here. End of explanation """ b.get_parameter(qualifier='teff', component='primary', context='component')._latexfmt = 'T_{{ \mathrm{{ {component} }}}}' _ = b.plot_distribution_collection(show=True) """ Explanation: When plotting, the {component} portion of this string is replaced with latex_repr (if not empty) and otherwise the component label itself. Changing this template isn't technically supported (since there are no checks to make sure the string is valid), but if you insist, you can change the underlying string as follows:: End of explanation """
UCBerkeleySETI/blimpy
tutorial/blimpy_voyager_tour.ipynb
bsd-3-clause
%matplotlib inline import blimpy as bl import numpy as np import pylab as plt # For the purposes of illustration, I will assume that the Voyager files have been placed in /opt/voyager_data/. # Here is a link to the web folder holding Voyager files: http://blpd0.ssl.berkeley.edu/Voyager_data/ VOYAGER_DIR = '/opt/voyager_data/' voyager_fil = VOYAGER_DIR + 'Voyager1.single_coarse.fine_res.fil' voyager_h5 = VOYAGER_DIR + 'Voyager1.single_coarse.fine_res.h5' """ Explanation: Welcome to the Blimpy Voyager Tour! We know there should be Voyager telemetry at around 8419 MHz, let's extract it and run some tests to make sure we're doing it right, End of explanation """ ff = bl.Waterfall(voyager_fil, f_start=8419.24, f_stop=8419.35) hf = bl.Waterfall(voyager_h5, f_start=8419.24, f_stop=8419.35) print(ff.data.shape) print(hf.data.shape) print(ff.container.populate_freqs().shape) print(hf.container.populate_freqs().shape) print(ff.data[0].max(), ff.data[0].argmax()) print(hf.data[0].max(), hf.data[0].argmax()) print(ff.data[-1].max(), ff.data[-1].argmax()) print(hf.data[-1].max(), hf.data[-1].argmax()) # Assert data is loaded to the same shape and has same values assert ff.data.shape == hf.data.shape == (16, 1, 39370) assert np.allclose(ff.data, hf.data) # Check the Voyager carrier has the known amplitudes at first and last integration assert np.allclose(ff.data[0].max(), hf.data[0].max(), 3.09333e+11) assert np.allclose(ff.data[-1].max(), hf.data[-1].max(), 2.74257e+11) # Check the tone is in the same bin for both assert ff.data[0].argmax() == hf.data[0].argmax() == 18959 assert ff.data[-1].argmax() == hf.data[-1].argmax() == 18996 plt.subplot(2,1,1) ff.plot_spectrum() plt.subplot(2,1,2) hf.plot_spectrum() plt.tight_layout() """ Explanation: Load ff from a Voyager FIL file and hf from the equivalent Voyager HDF5 file. Perform some assertion testing and visual confirmation. End of explanation """ fw = bl.Waterfall(voyager_fil, f_start=8419.24, f_stop=8419.35) hw = bl.Waterfall(voyager_h5, f_start=8419.24, f_stop=8419.35) print(fw.data.shape) print(hw.data.shape) print(hw.data[0].max(), hw.data[0].argmax()) print(fw.data[0].max(), fw.data[0].argmax()) print(hw.data[-1].max(), hw.data[-1].argmax()) print(fw.data[-1].max(), fw.data[-1].argmax()) # Assert data is loaded to the same shape and has same values assert hw.data.shape == fw.data.shape == (16, 1, 39370) assert np.allclose(hw.data, fw.data) # Check the Voyager carrier has the known amplitudes at first and last integration assert np.allclose(hw.data[0].max(), fw.data[0].max(), 3.09333e+11) assert np.allclose(hw.data[-1].max(), fw.data[-1].max(), 2.74257e+11) # Check the tone is in the same bin for both assert hw.data[0].argmax() == fw.data[0].argmax() == 18959 assert hw.data[-1].argmax() == fw.data[-1].argmax() == 18996 # And plot plt.subplot(2,1,1) fw.plot_spectrum() plt.subplot(2,1,2) hw.plot_spectrum() plt.tight_layout() """ Explanation: Find the maximum and minimum values. Perform some assertion testing and visual confirmation. End of explanation """ all_readers = [ff, hf, ff, hf] plt.figure(figsize=(8, 8)) for ii, rr in enumerate(all_readers): plt.subplot(4, 1, ii+1) rr.plot_spectrum() plt.tight_layout() """ Explanation: Sanity check: all spectra are the same? End of explanation """ ff = bl.Waterfall(voyager_fil) hf = bl.Waterfall(voyager_h5) all_readers = [ff, hf] plt.figure(figsize=(8, 8)) for ii, rr in enumerate(all_readers): plt.subplot(4, 1, ii+1) rr.plot_spectrum(f_start=8419.24, f_stop=8419.35) plt.tight_layout() plt.figure(figsize=(8, 8)) for ii, rr in enumerate(all_readers): plt.subplot(4, 1, ii+1) rr.plot_spectrum(f_start=8419.29685, f_stop=8419.2972) plt.tight_layout() plt.figure(figsize=(8, 8)) for ii, rr in enumerate(all_readers): plt.subplot(4, 1, ii+1) rr.plot_waterfall(f_start=8419.29685, f_stop=8419.2971) plt.tight_layout() for ii, rr in enumerate(all_readers): f, d = rr.grab_data(f_start=8419.29, f_stop=8419.30) print(f.shape, d.shape) assert f.shape == (3580,) assert d.shape == (16, 3580) for ii, rr in enumerate(all_readers): f, d = rr.grab_data(f_start=8419.29685, f_stop=8419.2971) print(f.shape, d.shape) assert f.shape == (91,) assert d.shape == (16, 91) """ Explanation: Re-load full files and then plot extracted portions and perform more tests. End of explanation """
DavidNorman/tensorflow
tensorflow/lite/g3doc/models/style_transfer/overview.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. End of explanation """ from __future__ import absolute_import, division, print_function, unicode_literals try: # %tensorflow_version only exists in Colab. import tensorflow.compat.v2 as tf except Exception: pass tf.enable_v2_behavior() import IPython.display as display import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['figure.figsize'] = (12,12) mpl.rcParams['axes.grid'] = False import numpy as np import time import functools """ Explanation: Artistic Style Transfer with TensorFlow Lite <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/lite/models/style_transfer/overview"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/style_transfer/overview.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/g3doc/models/style_transfer/overview.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/tensorflow/tensorflow/lite/g3doc/models/style_transfer/overview.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> One of the most exciting developments in deep learning to come out recently is artistic style transfer, or the ability to create a new image, known as a pastiche, based on two input images: one representing the artistic style and one representing the content. Using this technique, we can generate beautiful new artworks in a range of styles. This tutorial shows how to use a pre-trained TensorFlow Lite model to apply style transfer on any pair of content and style image. You can use the pre-trained model to add style transfer to your own mobile applications. The model is open-sourced on GitHub. You can retrain the model with different parameters (e.g. increase content layers' weights to make the output image look more like the content image). Understand the model architecture This Artistic Style Transfer model consists of two submodels: 1. Style Prediciton Model: A MobilenetV2-based neural network that takes an input style image to a 100-dimension style bottleneck vector. 1. Style Transform Model: A neural network that takes apply a style bottleneck vector to a content image and creates a stylized image. If your app only needs to support a fixed set of style images, you can compute their style bottleneck vectors in advance, and exclude the Style Prediction Model from your app's binary. Setup Import dependencies. End of explanation """ content_path = tf.keras.utils.get_file('belfry.jpg','https://storage.googleapis.com/khanhlvg-public.appspot.com/arbitrary-style-transfer/belfry-2611573_1280.jpg') style_path = tf.keras.utils.get_file('style23.jpg','https://storage.googleapis.com/khanhlvg-public.appspot.com/arbitrary-style-transfer/style23.jpg') style_predict_path = tf.keras.utils.get_file('style_predict.tflite', 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/arbitrary_style_transfer/style_predict_quantized_256.tflite') style_transform_path = tf.keras.utils.get_file('style_transform.tflite', 'https://storage.googleapis.com/download.tensorflow.org/models/tflite/arbitrary_style_transfer/style_transfer_quantized_dynamic.tflite') """ Explanation: Download the content and style images, and the pre-trained TensorFlow Lite models. End of explanation """ # Function to load an image from a file, and add a batch dimension. def load_img(path_to_img): img = tf.io.read_file(path_to_img) img = tf.image.decode_image(img, channels=3) img = tf.image.convert_image_dtype(img, tf.float32) img = img[tf.newaxis, :] return img # Function to pre-process style image input. def preprocess_style_image(style_image): # Resize the image so that the shorter dimension becomes 256px. target_dim = 256 shape = tf.cast(tf.shape(style_image)[1:-1], tf.float32) short_dim = min(shape) scale = target_dim / short_dim new_shape = tf.cast(shape * scale, tf.int32) style_image = tf.image.resize(style_image, new_shape) # Central crop the image. style_image = tf.image.resize_with_crop_or_pad(style_image, target_dim, target_dim) return style_image # Function to pre-process content image input. def preprocess_content_image(content_image): # Central crop the image. shape = tf.shape(content_image)[1:-1] short_dim = min(shape) content_image = tf.image.resize_with_crop_or_pad(content_image, short_dim, short_dim) return content_image # Load the input images. content_image = load_img(content_path) style_image = load_img(style_path) # Preprocess the input images. preprocessed_content_image = preprocess_content_image(content_image) preprocessed_style_image = preprocess_style_image(style_image) print('Style Image Shape:', preprocessed_content_image.shape) print('Content Image Shape:', preprocessed_style_image.shape) """ Explanation: Pre-process the inputs The content image and the style image must be RGB images with pixel values being float32 numbers between [0..1]. The style image size must be (1, 256, 256, 3). We central crop the image and resize it. The content image can be any size. However, as we trained the model using square-cropped data, cropping the content image to a square results in better stylized image. End of explanation """ def imshow(image, title=None): if len(image.shape) > 3: image = tf.squeeze(image, axis=0) plt.imshow(image) if title: plt.title(title) plt.subplot(1, 2, 1) imshow(preprocessed_content_image, 'Content Image') plt.subplot(1, 2, 2) imshow(preprocessed_style_image, 'Style Image') """ Explanation: Visualize the inputs End of explanation """ # Function to run style prediction on preprocessed style image. def run_style_predict(preprocessed_style_image): # Load the model. interpreter = tf.lite.Interpreter(model_path=style_predict_path) # Set model input. interpreter.allocate_tensors() input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]["index"], preprocessed_style_image) # Calculate style bottleneck. interpreter.invoke() style_bottleneck = interpreter.tensor( interpreter.get_output_details()[0]["index"] )() return style_bottleneck # Calculate style bottleneck for the preprocessed style image. style_bottleneck = run_style_predict(preprocessed_style_image) print('Style Bottleneck Shape:', style_bottleneck.shape) """ Explanation: Run style transfer with TensorFlow Lite Style prediction End of explanation """ # Run style transform on preprocessed style image def run_style_transform(style_bottleneck, preprocessed_content_image): # Load the model. interpreter = tf.lite.Interpreter(model_path=style_transform_path) # Set model input. input_details = interpreter.get_input_details() interpreter.resize_tensor_input(input_details[0]["index"], preprocessed_content_image.shape) interpreter.allocate_tensors() # Set model inputs. interpreter.set_tensor(input_details[0]["index"], preprocessed_content_image) interpreter.set_tensor(input_details[1]["index"], style_bottleneck) interpreter.invoke() # Transform content image. stylized_image = interpreter.tensor( interpreter.get_output_details()[0]["index"] )() return stylized_image # Stylize the content image using the style bottleneck. stylized_image = run_style_transform(style_bottleneck, preprocessed_content_image) # Visualize the output. imshow(stylized_image, 'Stylized Image') """ Explanation: Style transform End of explanation """ # Calculate style bottleneck of the content image. style_bottleneck_content = run_style_predict( preprocess_style_image(content_image) ) # Define content blending ratio between [0..1]. # 0.0: 0% style extracts from content image. # 1.0: 100% style extracted from content image. content_blending_ratio = 0.5 #@param {type:"slider", min:0, max:1, step:0.01} # Blend the style bottleneck of style image and content image style_bottleneck_blended = content_blending_ratio * style_bottleneck_content \ + (1 - content_blending_ratio) * style_bottleneck # Stylize the content image using the style bottleneck. stylized_image_blended = run_style_transform(style_bottleneck_blended, preprocessed_content_image) # Visualize the output. imshow(stylized_image_blended, 'Blended Stylized Image') """ Explanation: Style blending We can blend the style of content image into the stylized output, which in turn making the output look more like the content image. End of explanation """
statsmodels/statsmodels.github.io
v0.13.1/examples/notebooks/generated/distributed_estimation.ipynb
bsd-3-clause
import numpy as np from scipy.stats.distributions import norm from statsmodels.base.distributed_estimation import DistributedModel def _exog_gen(exog, partitions): """partitions exog data""" n_exog = exog.shape[0] n_part = np.ceil(n_exog / partitions) ii = 0 while ii < n_exog: jj = int(min(ii + n_part, n_exog)) yield exog[ii:jj, :] ii += int(n_part) def _endog_gen(endog, partitions): """partitions endog data""" n_endog = endog.shape[0] n_part = np.ceil(n_endog / partitions) ii = 0 while ii < n_endog: jj = int(min(ii + n_part, n_endog)) yield endog[ii:jj] ii += int(n_part) """ Explanation: Distributed Estimation This notebook goes through a couple of examples to show how to use distributed_estimation. We import the DistributedModel class and make the exog and endog generators. End of explanation """ X = np.random.normal(size=(1000, 25)) beta = np.random.normal(size=25) beta *= np.random.randint(0, 2, size=25) y = norm.rvs(loc=X.dot(beta)) m = 5 """ Explanation: Next we generate some random data to serve as an example. End of explanation """ debiased_OLS_mod = DistributedModel(m) debiased_OLS_fit = debiased_OLS_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: This is the most basic fit, showing all of the defaults, which are to use OLS as the model class, and the debiasing procedure. End of explanation """ from statsmodels.genmod.generalized_linear_model import GLM from statsmodels.genmod.families import Gaussian debiased_GLM_mod = DistributedModel( m, model_class=GLM, init_kwds={"family": Gaussian()} ) debiased_GLM_fit = debiased_GLM_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: Then we run through a slightly more complicated example which uses the GLM model class. End of explanation """ from statsmodels.base.distributed_estimation import _est_regularized_naive, _join_naive naive_OLS_reg_mod = DistributedModel( m, estimation_method=_est_regularized_naive, join_method=_join_naive ) naive_OLS_reg_params = naive_OLS_reg_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: We can also change the estimation_method and the join_method. The below example show how this works for the standard OLS case. Here we using a naive averaging approach instead of the debiasing procedure. End of explanation """ from statsmodels.base.distributed_estimation import ( _est_unregularized_naive, DistributedResults, ) naive_OLS_unreg_mod = DistributedModel( m, estimation_method=_est_unregularized_naive, join_method=_join_naive, results_class=DistributedResults, ) naive_OLS_unreg_params = naive_OLS_unreg_mod.fit( zip(_endog_gen(y, m), _exog_gen(X, m)), fit_kwds={"alpha": 0.2} ) """ Explanation: Finally, we can also change the results_class used. The following example shows how this work for a simple case with an unregularized model and naive averaging. End of explanation """
leoferres/prograUDD
clases/05-Control-Flow.ipynb
mit
x = -15 if x == 0: print(x, "es cero") elif x > 0: print(x, "es positivo") elif x < 0: print(x, "es negativo") else: print(x, "es algo que ni idea...") """ Explanation: Control de flujo de programas Bueno, al fin y al cabo llegamos al punto fundamental de la programación. Sin fors o ifs, los programas no son nada más que órdenes ejecutadas secuencialmente una después de la otra. Con los loops y las condiciones, uno puede ejecutar bloques de código sólo si ciertas condiciones son verdadres o se cumplen, o podemos ejecutar código "para siempre! Hoy vamos a cubrir los "condicionales" (incluyendo if, elif y else), los "loops" (he escuchado varias traducciones de "loop" pero ninguna me gusta), incluyendo for, while y algunos términos asociados como break, continue y pass. Condicionales: if-elif-else Los condicionales, a veces también llamados if-then, permite que ejecutemos sólo ciertas partes de código dependiendo en alguna condición booleana (T o F). Por ejemplo: End of explanation """ for N in [2, 3, 5, 7]: print(N, end=' ') # imprimir todos en una misma linea """ Explanation: Noten en particular los dos puntos (:) y el espacio para definir los bloques de código. Las instrucciones if-then están en casi todos los lenguajes. Python tambien tiene elif que es una contracción de else if. Los bloques else if son opcionales y pueden incluir todos los que quieran. Loops (ciclos??) Los loops en Python son una manera de ejecutar código repetidamente. Supongamos que queremos imprimir todos los elementos de una lista: End of explanation """ for c in "python": print(c) """ Explanation: Fíjense las partes del loop: el for, una variable N para mantener estado, el operador de membresía in y la lista misma. Es casi lenguaje natural. En particular, a la derecha (donde está la lista) puede ser cualquier iterador (un objeto que podamos "recorrer"), ver aqui. End of explanation """ for i in range(10): print(i, end=' ') """ Explanation: Uno de los iteradores más comunes de Python es el objeto range(), que genera una secuencia de números. End of explanation """ # range de 5 a 10 list(range(5, 10)) # range de 0 a 10 de a 2 list(range(0, 10, 2)) """ Explanation: Noten que siempre las iteraciones empiezan con 0 y que por convención el último elemento del objeto no se incluye (el número "10" en este caso). El objeto range puede tomar secuencias (rangos, por eso el nombre) más complicados, por ejemplo: End of explanation """ i = 0 while i < 10: print(i, end=' ') i += 1 """ Explanation: El objeto raneg() produce un iterable. Ciclos while El otro tipo de ciclo en Python es el while, que "itera" hasta que alguna condición booleana se cumpla. End of explanation """ for n in range(20): # si el resto es 0, salte lo que queda de la iteración if n % 2 == 0: continue print(n, end=' ') """ Explanation: break y continue break salta fuera del loop completamente continue se salta lo que queda de la iteración y vuelve al for End of explanation """
mne-tools/mne-tools.github.io
0.13/_downloads/plot_compute_covariance.ipynb
bsd-3-clause
import os.path as op import mne from mne.datasets import sample """ Explanation: Computing covariance matrix End of explanation """ data_path = sample.data_path() raw_empty_room_fname = op.join( data_path, 'MEG', 'sample', 'ernoise_raw.fif') raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname, add_eeg_ref=False) raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif') raw = mne.io.read_raw_fif(raw_fname, add_eeg_ref=False) raw.set_eeg_reference() raw.info['bads'] += ['EEG 053'] # bads + 1 more """ Explanation: Source estimation method such as MNE require a noise estimations from the recordings. In this tutorial we cover the basics of noise covariance and construct a noise covariance matrix that can be used when computing the inverse solution. For more information, see BABDEEEB. End of explanation """ noise_cov = mne.compute_raw_covariance(raw_empty_room, tmin=0, tmax=None) """ Explanation: The definition of noise depends on the paradigm. In MEG it is quite common to use empty room measurements for the estimation of sensor noise. However if you are dealing with evoked responses, you might want to also consider resting state brain activity as noise. First we compute the noise using empty room recording. Note that you can also use only a part of the recording with tmin and tmax arguments. That can be useful if you use resting state as a noise baseline. Here we use the whole empty room recording to compute the noise covariance (tmax=None is the same as the end of the recording, see :func:mne.compute_raw_covariance). End of explanation """ events = mne.find_events(raw) epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.0, baseline=(-0.2, 0.0)) """ Explanation: Now that you the covariance matrix in a python object you can save it to a file with :func:mne.write_cov. Later you can read it back to a python object using :func:mne.read_cov. You can also use the pre-stimulus baseline to estimate the noise covariance. First we have to construct the epochs. When computing the covariance, you should use baseline correction when constructing the epochs. Otherwise the covariance matrix will be inaccurate. In MNE this is done by default, but just to be sure, we define it here manually. End of explanation """ noise_cov_baseline = mne.compute_covariance(epochs) """ Explanation: Note that this method also attenuates the resting state activity in your source estimates. End of explanation """ noise_cov.plot(raw_empty_room.info, proj=True) noise_cov_baseline.plot(epochs.info) """ Explanation: Plot the covariance matrices Try setting proj to False to see the effect. Notice that the projectors in epochs are already applied, so proj parameter has no effect. End of explanation """ cov = mne.compute_covariance(epochs, tmax=0., method='auto') """ Explanation: How should I regularize the covariance matrix? The estimated covariance can be numerically unstable and tends to induce correlations between estimated source amplitudes and the number of samples available. The MNE manual therefore suggests to regularize the noise covariance matrix (see cov_regularization), especially if only few samples are available. Unfortunately it is not easy to tell the effective number of samples, hence, to choose the appropriate regularization. In MNE-Python, regularization is done using advanced regularization methods described in [1]_. For this the 'auto' option can be used. With this option cross-validation will be used to learn the optimal regularization: End of explanation """ evoked = epochs.average() evoked.plot_white(cov) """ Explanation: This procedure evaluates the noise covariance quantitatively by how well it whitens the data using the negative log-likelihood of unseen data. The final result can also be visually inspected. Under the assumption that the baseline does not contain a systematic signal (time-locked to the event of interest), the whitened baseline signal should be follow a multivariate Gaussian distribution, i.e., whitened baseline signals should be between -1.96 and 1.96 at a given time sample. Based on the same reasoning, the expected value for the global field power (GFP) is 1 (calculation of the GFP should take into account the true degrees of freedom, e.g. ddof=3 with 2 active SSP vectors): End of explanation """ covs = mne.compute_covariance(epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True) evoked = epochs.average() evoked.plot_white(covs) """ Explanation: This plot displays both, the whitened evoked signals for each channels and the whitened GFP. The numbers in the GFP panel represent the estimated rank of the data, which amounts to the effective degrees of freedom by which the squared sum across sensors is divided when computing the whitened GFP. The whitened GFP also helps detecting spurious late evoked components which can be the consequence of over- or under-regularization. Note that if data have been processed using signal space separation (SSS) [2], gradiometers and magnetometers will be displayed jointly because both are reconstructed from the same SSS basis vectors with the same numerical rank. This also implies that both sensor types are not any longer statistically independent. These methods for evaluation can be used to assess model violations. Additional introductory materials can be found here &lt;https://goo.gl/ElWrxe&gt;. For expert use cases or debugging the alternative estimators can also be compared: End of explanation """
dsacademybr/PythonFundamentos
Cap08/DesafioDSA_Solucao/Missao2/missao2.ipynb
gpl-3.0
import math class PrimeGenerator(object): def generate_primes(self, max_num): # Implemente aqui sua solução def _cross_off(self, array, prime): # Implemente aqui sua solução def _next_prime(self, array, prime): # Implemente aqui sua solução """ Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 7</font> Download: http://github.com/dsacademybr Missão: Gerar uma lista de números primos. Nível de Dificuldade: Médio Premissas É correto que 1 não seja considerado um número primo?      * Sim Podemos assumir que as entradas são válidas?      * Não Podemos supor que isso se encaixa na memória?      * Sim Teste Cases None -> Exception Not an int -> Exception 20 -> [False, False, True, True, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, True] Algoritmo Para um número ser primo, ele deve ser 2 ou maior e não pode ser divisível por outro número diferente de si mesmo (e 1). Todos os números não-primos são divisíveis por um número primo. Use uma matriz (array) para manter o controle de cada número inteiro até o máximo Comece em 2, termine em sqrt (max)      * Podemos usar o sqrt (max) em vez do max porque:          * Para cada valor que divide o número de entrada uniformemente, há um complemento b onde a * b = n          * Se a> sqrt (n) então b <sqrt (n) porque sqrt (n ^ 2) = n      * "Cross off" todos os números divisíveis por 2, 3, 5, 7, ... configurando array [index] para False Animação do Wikipedia: Solução End of explanation """ %%writefile missao2.py from nose.tools import assert_equal, assert_raises class TestMath(object): def test_generate_primes(self): prime_generator = PrimeGenerator() assert_raises(TypeError, prime_generator.generate_primes, None) assert_raises(TypeError, prime_generator.generate_primes, 98.6) assert_equal(prime_generator.generate_primes(20), [False, False, True, True, False, True, False, True, False, False, False, True, False, True, False, False, False, True, False, True]) print('Sua solução foi executada com sucesso! Parabéns!') def main(): test = TestMath() test.test_generate_primes() if __name__ == '__main__': main() %run -i missao2.py """ Explanation: Teste da Solução End of explanation """
par2/lamana
docs/_demo_pinned.ipynb
bsd-3-clause
#------------------------------------------------------------------------------ import pandas as pd import lamana as la #import LamAna as la %matplotlib inline #%matplotlib nbagg # PARAMETERS ------------------------------------------------------------------ # Build dicts of geometric and material parameters load_params = {'R' : 12e-3, # specimen radius 'a' : 7.5e-3, # support ring radius 'r' : 2e-4, # radial distance from center loading 'P_a' : 1, # applied load 'p' : 5, # points/layer } # Quick Form: a dict of lists mat_props = {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33], } # Standard Form: a dict of dicts # mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, # 'Poissons': {'HA': 0.25, 'PSu': 0.33}} # What geometries to test? # Make tuples of desired geometeries to analyze: outer - {inner...-....}_i - middle # Current Style g1 = ('0-0-2000') # Monolith g2 = ('1000-0-0') # Bilayer g3 = ('600-0-800') # Trilayer g4 = ('500-500-0') # 4-ply g5 = ('400-200-800') # Short-hand; <= 5-ply g6 = ('400-200-400S') # Symmetric g7 = ('400-[200]-800') # General convention; 5-ply g8 = ('400-[100,100]-800') # General convention; 7-plys g9 = ('400-[100,100]-400S') # General and Symmetric convention; 7-plys '''Add to test set''' g13 = ('400-[150,50]-800') # Dissimilar inner_is g14 = ('400-[25,125,50]-800') geos_most = [g1, g2, g3, g4, g5] geos_special = [g6, g7, g8, g9] geos_full = [g1, g2, g3, g4, g5, g6, g7, g8, g9] geos_dissimilar = [g13, g14] # Future Style #geos1 = ((400-400-400),(400-200-800),(400-350-500)) # same total thickness #geos2 = ((400-400-400), (400-500-1600), (400-200-800)) # same outer thickness #import pandas as pd pd.set_option('display.max_columns', 10) pd.set_option('precision', 4) """ Explanation: This notebook is pinned. It's should not be run at at time beyond pre-dev phase. This is design to test regression between session through a dev-release cycle. Demonstration The following demonstration includes basic and intermediate uses of the LamAna Project library. It is intended to exhaustively reference all API features, therefore some advandced demonstrations will favor technical detail. Tutorial: Basic User Input Startup End of explanation """ case1 = la.distributions.Case(load_params, mat_props) # instantiate a User Input Case Object through distributions case1.apply(['400-200-800']) case1.plot() """ Explanation: Goal: Generate a Plot in 3 Lines of Code End of explanation """ # Original case1.load_params # Series View case1.parameters # Original case1.mat_props # DataFrame View case1.properties # Equivalent Standard Form case1.properties.to_dict() """ Explanation: That's it! The rest of this demonstration showcases API functionality of the LamAna project. Calling Case attributes Passed in arguments are acessible, but can be displayed as pandas Series and DataFrames. End of explanation """ case1.materials = ['PSu', 'HA'] case1.properties """ Explanation: Reset material order. Changes are relfected in the properties view and stacking order. End of explanation """ case1.materials = ['PSu', 'HA', 'HA'] case1.properties case1.materials # get reorderd list of materials case1._materials case1.apply(geos_full) case1.snapshots[-1] '''Need to bypass pandas abc ordering of indicies.''' """ Explanation: Serial resets End of explanation """ mat_props2 = {'HA' : [5.3e10, 0.25], 'PSu' : [2.8e9, 0.33], } case1 = la.distributions.Case(load_params, mat_props2) case1.properties """ Explanation: Reset the parameters End of explanation """ case2 = la.distributions.Case(load_params, mat_props) case2.apply(geos_full) # default model Wilson_LT """ Explanation: apply() Geometries and LaminateModels Construct a laminate using geometric, matrial paramaters and geometries. End of explanation """ case2.Geometries # using an attribute, __repr__ print(case2.Geometries) # uses __str__ case2.Geometries[0] # indexing """ Explanation: Access the user input geometries End of explanation """ bilayer = case2.Geometries[1] # (1000.0-[0.0]-0.0) trilayer = case2.Geometries[2] # (600.0-[0.0]-800.0) #bilayer == trilayer bilayer != trilayer """ Explanation: We can compare Geometry objects with builtin Python operators. This process directly compares GeometryTuples in the Geometry class. End of explanation """ case2.middle case2.inner case2.inner[-1] case2.inner[-1][0] # List indexing allowed [first[0] for first in case2.inner] # iterate case2.outer """ Explanation: Get all thicknesses for selected layers. End of explanation """ case2.LMs """ Explanation: A general and very important object is the LaminateModel. End of explanation """ fiveplys = ['400-[200]-800', '350-400-500', '200-100-1400'] oddplys = ['400-200-800', '350-400-500', '400.0-[100.0,100.0]-800.0'] mix = fiveplys + oddplys mix # Non-unique, repeated 5-plys case_ = la.distributions.Case(load_params, mat_props) case_.apply(mix) case_.LMs # Unique case_ = la.distributions.Case(load_params, mat_props) case_.apply(mix, unique=True) case_.LMs """ Explanation: Sometimes might you want to throw in a bunch of geometry strings from different groups. If there are repeated strings in different groups (set intersections), you can tell Case to only give a unique result. For instane, here we combine two groups of geometry strings, 5-plys and odd-plys. Clearly these two groups overlap, and there are some repeated geometries (one with different conventions). Using the unique keyword, Case only operates on a unique set of Geometry objects (independent of convention), resulting in a unique set of LaminateModels. End of explanation """ case2.snapshots[-1] """ Explanation: DataFrame Access You can get a quick view of the stack using the snapshot method. This gives access to a Construct - a DataFrame converted stack. End of explanation """ '''Consider head command for frames list''' #case2.frames ##with pd.set_option('display.max_columns', None): # display all columns, within this context manager ## case2.frames[5] case2.frames[5].head() '''Extend laminate attributes''' case3 = la.distributions.Case(load_params, mat_props) case3.apply(geos_dissimilar) #case3.frames """ Explanation: We can easily view entire laminate DataFrames using the frames attribute. This gives access to LaminateModels (DataFrame) objects, which extends the stack view so that laminate theory is applied to each row. End of explanation """ case4 = la.distributions.Case(load_params, mat_props) case4.apply(['400-[100,100,100]-0']) case4.frames[0][['layer', 'matl', 'type']] ; '''Add functionality to customize material type.''' """ Explanation: NOTE, for even plies, the material is set alternate for each layer. Thus outers layers may be different materials. End of explanation """ '''Show Geometry first then case use.''' """ Explanation: Totaling The distributions.Case class has useful properties available for totaling specific layers for a group of laminates as lists. As these properties return lists, these results can be sliced and iterated. End of explanation """ case2.total case2.total_middle case2.total_middle case2.total_inner_i case2.total_outer case2.total_outer[4:-1] # slicing [inner_i[-1]/2.0 for inner_i in case2.total_inner_i] # iterate """ Explanation: .total property End of explanation """ G1 = case2.Geometries[-1] G1 G1.total # laminate thickness (um) G1.total_inner_i # inner_i laminae G1.total_inner_i[0] # inner_i lamina pair sum(G1.total_inner_i) # inner total G1.total_inner # inner total """ Explanation: Geometry Totals The total attribute used in Case actually dervive from attributes for Geometry objects individually. On Geometry objects, they return specific thicknesses instead of lists of thicknesses. End of explanation """ case2.LMs[5].Middle case2.LMs[5].Inner_i """ Explanation: LaminateModel Attributes Access the LaminateModel object directly using the LMs attribute. End of explanation """ case2.LMs[5].tensile """ Explanation: Laminates are assumed mirrored at the neutral axis, but dissimilar inner_i thicknesses are allowed. End of explanation """ LM = case2.LMs[4] LM.LMFrame.tail(7) """ Explanation: Separate from the case attributes, Laminates have useful attributes also, such as nplies, p and its own total. End of explanation """ LM.extrema LM.p # number of rows per group LM.nplies # number of plies LM.total # total laminate thickness (m) LM.Geometry '''Overload the min and max special methods.''' LM.max_stress # max interfacial failure stress """ Explanation: Often the extreme stress values (those at the interfaces) are most important. This is equivalent to p=2. End of explanation """ LM.min_stress '''Redo tp return series of bool an index for has_attrs''' LM.has_neutaxis LM.has_discont LM.is_special LM.FeatureInput '''Need to fix FeatureInput and Geometry inside LaminateModel''' """ Explanation: NOTE: this feature gives a different result for p=1 since a single middle cannot report two interfacial values; INDET. End of explanation """ case2 = la.distributions.Case(load_params, mat_props) case2.apply(geos_full) bilayer_LM = case2.LMs[1] trilayer_LM = case2.LMs[2] trilayer_LM == trilayer_LM #bilayer_LM == trilayer_LM bilayer_LM != trilayer_LM """ Explanation: As with Geometry objects, we can compare LaminateModel objects also. ~~This process directly compares two defining components of a LaminateModel object: the LM DataFrame (LMFrame) and FeatureInput. If either is False, the equality returns False.~~ End of explanation """ #bilayer_LM.FeatureInput == trilayer_LM.FeatureInput # gives detailed traceback '''Fix FI DataFrame with dict.''' bilayer_LM.FeatureInput #bilayer_LM.LMFrame == trilayer_LM.LMFrame # gives detailed traceback """ Explanation: Use python and pandas native comparison tracebacks that to understand the errors directly by comparing FeatureInput dict and LaminateModel DataFrame. End of explanation """ '''Find a way to remove all but interfacial points.''' """ Explanation: plot() LT Geometries CAVEAT: it is recommended to use at least p=2 for calculating stress. Less than two points for odd plies is indeterminant in middle rows, which can raise exceptions. End of explanation """ from lamana.utils import tools as ut from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() #%matplotlib nbagg # Quick plotting case4 = ut.laminator(dft.geos_standard) for case in case4.values(): for LM in case.LMs: df = LM.LMFrame df.plot(x='stress_f (MPa/N)', y='d(m)', title='Unnormalized Distribution') df.plot(x='stress_f (MPa/N)', y='k', title='Normalized Distribution') """ Explanation: We try to quickly plot simple stress distriubtions with native pandas methods. We have two variants for displaying distributions: - Unnoormalized: plotted by the height (`d_`). Visaully: thicknesses vary, material slopes are constant. - Normalized: plotted by the relative fraction level (`k_`). Visually: thicknesses are constant, material slopes vary. Here we plot with the nbagg matplotlib backend to generatre interactive figures. NOTE: for Normalized plots, slope can vary for a given material. End of explanation """ case3 = la.distributions.Case(load_params, mat_props) case3.apply(['400-200-800'], model='Wilson_LT') case3.plot() """ Explanation: While we get reasonable stress distribution plots rather simply, LamAna offers some plotting methods pertinent to laminates than assisting with visualization. Demo - An example illustration of desired plotting of multiple geometries from distributions. This is image of results from legacy code used for comparison. We can plot the stress distribution for a case of a single geometry. End of explanation """ five_plies = ['350-400-500', '400-200-800', '200-200-1200', '200-100-1400', '100-100-1600', '100-200-1400', '300-400-600'] case4 = la.distributions.Case(load_params, mat_props) case4.apply(five_plies, model='Wilson_LT') case4.plot() '''If different plies or patterns, make new caselet (subplot)''' '''[400-200-800, '300-[400,200]-600'] # non-congruent? equi-ply''' '''[400-200-800, '400-200-0'] # odd/even ply''' # currently superimposes plots. Just needs to separate. """ Explanation: We can also plot multiple geometries of similar total thickness. End of explanation """ LM = case4.LMs[0] LM.to_xlsx(delete=True) # or `to_csv()` """ Explanation: Exporting Saving data is critical for future analysis. LamAna offers two formas for exporting your data and parameters. Parameters used to make calculations such as the FeatureInput information are saved as "dashboards" in different forms. - '.xlsx': (default); convient for storing multiple calculationa amd dashboards as se[arate worksheets in a Excel workbook. - '.csv': universal format; separate files for data and dashboard. The lowest level to export data is for a LaminateModel object. End of explanation """ case4.to_xlsx(temp=True, delete=True) # or `to_csv()` """ Explanation: <div class="alert alert-warning">**NOTE** For demonstration purposes, the `temp` and `delete` are activated. This will create temporary files in the OS temp directory and automatically delete them. For practical use, ignore setting these flags.</div> The latter LaminateModel data was saved to an .xlsx file in the default export folder. The filepath is returned (currently suppressed with the ; line). The next level to export data is for a case. This will save all files comprise in a case. If exported to csv format, files are saved seperately. In xlsx format, a single file is made where each LaminateModel data and dashboard are saved as seperate worksheets. End of explanation """ #------------------------------------------------------------------------------ import pandas as pd import lamana as la %matplotlib inline #%matplotlib nbagg # PARAMETERS ------------------------------------------------------------------ # Build dicts of loading parameters and and material properties load_params = {'R' : 12e-3, # specimen radius 'a' : 7.5e-3, # support ring radius 'r' : 2e-4, # radial distance from center loading 'P_a' : 1, # applied load 'p' : 5, # points/layer } # # Quick Form: a dict of lists # mat_props = {'HA' : [5.2e10, 0.25], # 'PSu' : [2.7e9, 0.33],} # Standard Form: a dict of dicts mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} # What geometries to test? # Make tuples of desired geometeries to analyze: outer - {inner...-....}_i - middle # Current Style g1 = ('0-0-2000') # Monolith g2 = ('1000-0-0') # Bilayer g3 = ('600-0-800') # Trilayer g4 = ('500-500-0') # 4-ply g5 = ('400-200-800') # Short-hand; <= 5-ply g6 = ('400-200-400S') # Symmetric g7 = ('400-[200]-800') # General convention; 5-ply g8 = ('400-[100,100]-800') # General convention; 7-plys g9 = ('400-[100,100]-400S') # General and Symmetric convention; 7-plys '''Add to test set''' g13 = ('400-[150,50]-800') # Dissimilar inner_is g14 = ('400-[25,125,50]-800') geos_most = [g1, g2, g3, g4, g5] geos_special = [g6, g7, g8, g9] geos_full = [g1, g2, g3, g4, g5, g6, g7, g8, g9] geos_dissimilar = [g13, g14] """ Explanation: Tutorial: Intermediate So far, the barebones objects have been discussed and a lot can be accomplished with the basics. For users who have some experience with Python and Pandas, here are some intermediate techniques to reduce repetitious actions. This section dicusses the use of abstract base classes intended for reducing redundant tasks such as multiple case creation and default parameter definitions. Custom model subclassing is also discussed. End of explanation """ # Geometry object la.input_.Geometry('100-200-1600') """ Explanation: Exploring LamAna Objects This is brief introduction to underlying objects in this package. We begin with an input string that is parsed and converted into a Geometry object. This is part of the input_ module. End of explanation """ # FeatureInput FI = { 'Geometry': la.input_.Geometry('400.0-[200.0]-800.0'), 'Materials': ['HA', 'PSu'], 'Model': 'Wilson_LT', 'Parameters': load_params, 'Properties': mat_props, 'Globals': None, } """ Explanation: This object has a number of handy methods. This information is shipped with parameters and properties in FeatureInput. A FeatureInput is simply a dict. This currently does have not an official class but is it import for other objects. End of explanation """ # Stack object la.constructs.Stack(FI) # Laminate object la.constructs.Laminate(FI) # LaminateModel object la.constructs.LaminateModel(FI) """ Explanation: The following objects are serially inherited and part of the constructs module. These construct the DataFrame represention of a laminate. The code to decouple LaminateModel from Laminate was merged in verions 0.4.13. End of explanation """ cases1 = la.distributions.Cases(['400-200-800', '350-400-500', '400-200-0', '1000-0-0'], load_params=load_params, mat_props=mat_props, model= 'Wilson_LT', ps=[3,4,5]) cases1 """ Explanation: The latter cells verify these objects are successfully decoupled. That's all for now. Generating Multiple Cases We've already seen we can generate a case object and plots with three lines of code. However, sometimes it is necessary to generate different cases. These invocations can be tedious with three lines of code per case. Have no fear. A simple way to produce more cases is to instantiate a Cases object. Below we will create a Cases which houses multiples cases that: - share similiar loading parameters/material properties and laminate theory model with - different numbers of datapoints, p End of explanation """ # Gettable cases1[0] # normal dict key selection cases1[-1] # negative indices cases1[-2] # negative indicies # Sliceable cases1[0:2] # range of dict keys cases1[0:3] # full range of dict keys cases1[:] # full range cases1[1:] # start:None cases1[:2] # None:stop cases1[:-1] # None:negative index cases1[:-2] # None:negative index #cases1[0:-1:-2] # start:stop:step; NotImplemented #cases1[::-1] # reverse; NotImplemented # Viewable cases1 cases1.LMs # Iterable for i, case in enumerate(cases1): # __iter__ values print(case) #print(case.LMs) # access LaminateModels # Writable #cases1.to_csv() # write to file # Selectable cases1.select(nplies=[2,4]) # by # plies cases1.select(ps=[3,4]) # by points/DataFrame rows cases1.select(nplies=[2,4], ps=[3,4], how='intersection') # by set operations """ Explanation: Cases() accepts a list of geometry strings. Given appropriate default keywords, this lone argument will return a dict-like object of cases with indicies as keys. The model and ps keywords have default values. A Cases() object has some interesting characteristics (this is not a dict): if user-defined, tries to import Defaults() to simplify instantiations dict-like storage and access of cases list-like ordering of cases gettable: list-like, get items by index (including negative indicies) sliceable: slices the dict keys of the Cases object viewable: contained LaminateModels iterable: by values (unlike normal dicts, not by keys) writable: write DataFrames to csv files selectable: perform set operations and return unique subsets End of explanation """ set(geos_most).issubset(geos_full) # confirm repeated items mix = geos_full + geos_most # contains repeated items # Repeated Subset cases2 = la.distributions.Cases(mix, load_params=load_params, mat_props=mat_props) cases2.LMs # Unique Subset cases2 = la.distributions.Cases(mix, load_params=load_params, mat_props=mat_props, unique=True) cases2.LMs """ Explanation: LamainateModels can be compared using set theory. Unique subsets of LaminateModels can be returned from a mix of repeated geometry strings. We will use the default model and ps values. End of explanation """ from lamana.input_ import BaseDefaults bdft = BaseDefaults() # geometry String Attributes bdft.geo_inputs # all dict key-values bdft.geos_all # all geo strings bdft.geos_standard # static bdft.geos_sample # active; grows # Geometry Object Attributes; mimics latter bdft.Geo_objects # all dict key-values bdft.Geos_all # all Geo objects # more ... # Custom FeatureInputs #bdft.get_FeatureInput() # quick builds #bdft.get_materials() # convert to std. form """ Explanation: Subclassing Custom Default Parameters We observed the benefits of using implicit, default keywords (models, ps) in simplifying the writing of Cases() instantiations. In general, the user can code explicit defaults for load_params and mat_props by subclassing BaseDefaults() from inputs_. While subclassing requires some extra Python knowledge, this is a relatively simple process that reduces a significant amount of redundant code, leading to a more effiencient anaytical setting. The BaseDefaults contains a dict various geometry strings and Geometry objects. Rather than defining examples for various geometry plies, the user can call from all or a groupings of geometries. End of explanation """ # Example Defaults from LamAna.models.Wilson_LT class Defaults(BaseDefaults): '''Return parameters for building distributions cases. Useful for consistent testing. Dimensional defaults are inheirited from utils.BaseDefaults(). Material-specific parameters are defined here by he user. - Default geometric and materials parameters - Default FeatureInputs Examples ======== >>>dft = Defaults() >>>dft.load_params {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,} >>>dft.mat_props {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} >>>dft.FeatureInput {'Geometry' : '400-[200]-800', 'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}, 'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],}, 'Custom' : None, 'Model' : Wilson_LT, } ''' def __init__(self): BaseDefaults.__init__(self) '''DEV: Add defaults first. Then adjust attributes.''' # DEFAULTS ------------------------------------------------------------ # Build dicts of geometric and material parameters self.load_params = {'R' : 12e-3, # specimen radius 'a' : 7.5e-3, # support ring radius 'p' : 5, # points/layer 'P_a' : 1, # applied load 'r' : 2e-4, # radial distance from center loading } self.mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} # ATTRIBUTES ---------------------------------------------------------- # FeatureInput self.FeatureInput = self.get_FeatureInput(self.Geo_objects['standard'][0], load_params=self.load_params, mat_props=self.mat_props, ##custom_matls=None, model='Wilson_LT', global_vars=None) '''Use Classic_LT here''' from lamana.distributions import Cases # Auto load_params and mat_params dft = Defaults() cases3 = Cases(dft.geos_full, model='Wilson_LT') #cases3 = la.distributions.Cases(dft.geos_full, model='Wilson_LT') cases3 '''Refine idiom for importing Cases ''' """ Explanation: The latter geometric defaults come out of the box when subclassed from BaseDefaults. If custom geometries are desired, the user can override the geo_inputs dict, which automatically builds the Geo_objects dict. Users can override three categories of defaults parameters: geometric variables loading parameters material properties As mentioned, some geometric variables are provided for general laminate dimensions. The other parameters cannot be predicted and need to be defined by the user. Below is an example of a Defaults() subclass. If a custom model has been implemented (see next section), it is convention to place Defaults() and all other custom code within this module. If a custom model is implemented an located in the models directory, Cases will automatically search will the designated model modules, locate the load_params and mat_props attributes and load them automatically for all Cases instantiations. End of explanation """ cases1.plot(extrema=False) """ Explanation: Subclassing Custom Models One of the most powerful feauteres of LamAna is the ability to define customized modifications to the Laminate Theory models. Code for laminate theories (i.e. Classic_LT, Wilson_LT) are are located in the models directory. These models can be simple functions or sublclass from BaseModels in the theories module. Either approach is acceptable (see narrative docs for more details on creating custom models. This ability to add custom code make this library extensibile to use a larger variety of models. Plotting Cases An example of multiple subplots is show below. Using a former case, notice each subplot is indepent, woth separate geometries for each. LamAna treats each subplot as a subset or "caselet": End of explanation """ const_total = ['350-400-500', '400-200-800', '200-200-1200', '200-100-1400', '100-100-1600', '100-200-1400',] const_outer = ['400-550-100', '400-500-200', '400-450-300', '400-400-400', '400-350-500', '400-300-600', '400-250-700', '400-200-800', '400-0.5-1199'] const_inner = ['400-400-400', '350-400-500', '300-400-600', '200-400-700', '200-400-800', '150-400-990', '100-400-1000', '50-400-1100',] const_middle = ['100-700-400', '150-650-400', '200-600-400', '250-550-400', '300-400-500', '350-450-400', '400-400-400', '450-350-400', '750-0.5-400'] case1_ = const_total case2_ = const_outer case3_ = const_inner case4_ = const_middle cases_ = [case1_, case2_, case3_, case4_] cases3 = la.distributions.Cases(cases_, load_params=load_params, mat_props=mat_props, model= 'Wilson_LT', ps=[2,3]) cases3.plot(extrema=False) """ Explanation: Each caselet can also be a separate case, plotting multiple geometries for each as accomplished with Case. End of explanation """ '''Fix importing cases''' from lamana.distributions import Cases """ Explanation: See Demo notebooks for more examples of plotting. More on Cases End of explanation """ from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() %matplotlib inline str_caselets = ['350-400-500', '400-200-800', '400-[200]-800'] list_caselets = [['400-400-400', '400-[400]-400'], ['200-100-1400', '100-200-1400',], ['400-400-400', '400-200-800','350-400-500',], ['350-400-500']] case1 = la.distributions.Case(dft.load_params, dft.mat_props) case2 = la.distributions.Case(dft.load_params, dft.mat_props) case3 = la.distributions.Case(dft.load_params, dft.mat_props) case1.apply(['400-200-800', '400-[200]-800']) case2.apply(['350-400-500', '400-200-800']) case3.apply(['350-400-500', '400-200-800', '400-400-400']) case_caselets = [case1, case2, case3] mixed_caselets = [['350-400-500', '400-200-800',], [['400-400-400', '400-[400]-400'], ['200-100-1400', '100-200-1400',]], [case1, case2,] ] dict_caselets = {0: ['350-400-500', '400-200-800', '200-200-1200', '200-100-1400', '100-100-1600', '100-200-1400'], 1: ['400-550-100', '400-500-200', '400-450-300', '400-400-400', '400-350-500', '400-300-600'], 2: ['400-400-400', '350-400-500', '300-400-600', '200-400-700', '200-400-800', '150-400-990'], 3: ['100-700-400', '150-650-400', '200-600-400', '250-550-400', '300-400-500', '350-450-400'], } cases = Cases(str_caselets) #cases = Cases(str_caselets, combine=True) #cases = Cases(list_caselets) #cases = Cases(list_caselets, combine=True) #cases = Cases(case_caselets) #cases = Cases(case_caselets, combine=True) # collapse to one plot #cases = Cases(str_caselets, ps=[2,5]) #cases = Cases(list_caselets, ps=[2,3,5,7]) #cases = Cases(case_caselets, ps=[2,5]) #cases = Cases([], combine=True) # test raises # For next versions #cases = Cases(dict_caselets) #cases = Cases(mixed_caselets) #cases = Cases(mixed_caselets, combine=True) cases cases.LMs '''BUG: Following cell raises an Exception in Python 2. Comment to pass nb reg test in pytest.''' cases.caselets '''get out tests from code''' '''run tests''' '''test set seletions''' """ Explanation: Applying caselets The term "caselet" is defined in LPEP 003. Most importantly, the various types a caselet represents is handled by Cases and discussed here. In 0.4.4b3+, caselets are contained in lists. LPEP entertains the idea of containing caselets in dicts. End of explanation """ from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() cases = Cases(dft.geo_inputs['5-ply'], ps=[2,3,4]) len(cases) # test __len__ cases.get(1) # __getitem__ #cases[2] = 'test' # __setitem__; not implemented cases[0] # select cases[0:2] # slice (__getitem__) del cases[1] # __delitem__ cases # test __repr__ print(cases) # test __str__ cases == cases # test __eq__ not cases != cases # test __ne__ for i, case in enumerate(cases): # __iter__ values print(case) #print(case.LMs) cases.LMs # peek inside cases cases.frames # get a list of DataFrames directly cases #cases.to_csv() # write to file """ Explanation: Characteristics End of explanation """ str_caselets = ['350-400-500', '400-200-800', '400-[200]-800'] str_caselets2 = [['350-400-500', '350-[400]-500'], ['400-200-800', '400-[200]-800']] list_caselets = [['400-400-400', '400-[400]-400'], ['200-100-1400', '100-200-1400',], ['400-400-400', '400-200-800','350-400-500',], ['350-400-500']] case1 = la.distributions.Case(dft.load_params, dft.mat_props) case2 = la.distributions.Case(dft.load_params, dft.mat_props) case3 = la.distributions.Case(dft.load_params, dft.mat_props) case1.apply(['400-200-800', '400-[200]-800']) case2.apply(['350-400-500', '400-200-800']) case3.apply(['350-400-500', '400-200-800', '400-400-400']) case_caselets = [case1, case2, case3] """ Explanation: Unique Cases from Intersecting Caselets Cases can check if caselet is unique by comparing the underlying geometry strings. Here we have a non-unique caselets of different types. We get unique results within each caselet using the unique keyword. Notice, different caselets could have similar LaminateModels. End of explanation """ #----------------------------------------------------------+ # Iterating Over Cases from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() # Multiple cases, Multiple LMs cases = Cases(dft.geos_full, ps=[2,5]) # two cases (p=2,5) for i, case in enumerate(cases): # iter case values() print('Case #: {}'.format(i)) for LM in case.LMs: print(LM) print("\nYou iterated several cases (ps=[2,5]) comprising many LaminateModels.") # A single case, single LM cases = Cases(['400-[200]-800']) # a single case and LM (manual) for i, case_ in enumerate(cases): # iter i and case for LM in case_.LMs: print(LM) print("\nYou processed a case and LaminateModel w/iteration. (Recommended)\n") # Single case, multiple LMs cases = Cases(dft.geos_full) # auto, default p=5 for case in cases: # iter case values() for LM in case.LMs: print(LM) print("\nYou iterated a single case of many LaminateModels.") """ Explanation: The following cells attempt to print the LM objects. Cases objects unordered and thus print in random orders. It is important to note that once set operations are performed, order is no longer a preserved. This is related to how Python handles hashes. This applies to Cases() in two areas: The unique keyword optionally invoked during instantiation. Any use of set operation via the how keyword within the Cases.select() method. Revamped Idioms Gotcha: Although a Cases instance is a dict, as if 0.4.4b3, it's __iter__ method has been overriden to iterate the values by default (not the keys as in Python). This choice was decided since keys are uninformative integers, while the values (curently cases )are of interest, which saves from typing .items() when interating a Cases instance. python &gt;&gt;&gt; cases = Cases() &gt;&gt;&gt; for i, case in cases.items() # python &gt;&gt;&gt; ... print(case) &gt;&gt;&gt; for case in cases: # modified &gt;&gt;&gt; ... print(case) This behavior may change in future versions. End of explanation """ # Iterating Over Cases from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() #geometries = set(dft.geos_symmetric).union(dft.geos_special + dft.geos_standard + dft.geos_dissimilar) #cases = Cases(geometries, ps=[2,3,4]) cases = Cases(dft.geos_special, ps=[2,3,4]) # Reveal the full listdft.geos_specia # for case in cases: # iter case values() # for LM in case.LMs: # print(LM) # Test union of lists #geometries cases '''Right now a case shares p, size. cases share geometries and size.''' cases[0:2] '''Hard to see where these comem from. Use dict?''' cases.LMs cases.LMs[0:6:2] cases.LMs[0:4] """ Explanation: Selecting From cases, subsets of LaminateModels can be chosen. select is a method that performs on and returns sets of LaminateModels. Plotting functions are not implement for this method directly, however the reulsts can be used to make new cases instances from which .plot() is accessible. Example access techniques using Cases. Access all cases : cases Access specific cases : cases[0:2] Access all LaminateModels : cases.LMs Access LaminateModels (within a case) : cases.LMs[0:2] Select a subset of LaminateModels from all cases : cases.select(ps=[3,4]) End of explanation """ cases.select(nplies=[2,4]) cases.select(ps=[2,4]) cases.select(nplies=4) cases.select(ps=3) """ Explanation: Selections from latter cases. End of explanation """ cases.select(nplies=4, ps=3) # union; default cases.select(nplies=4, ps=3, how='intersection') # intersection """ Explanation: Advanced techniques: multiple selections. Set operations have been implemented in the selection method of Cases which enables filtering of unique LaminateModels that meet given conditions for nplies and ps. union: all LMs that meet either conditions (or) intersection: LMs that meet both conditions (and) difference: LMs symmetric difference: End of explanation """ cases.select(nplies=4, ps=3, how='difference') # difference cases.select(nplies=4) - cases.select(ps=3) # set difference '''How does this work?''' cases.select(nplies=4, ps=3, how='symm diff') # symm difference cases.select(nplies=[2,4], ps=[3,4], how='union') cases.select(nplies=[2,4], ps=[3,4], how='intersection') cases.select(nplies=[2,4], ps=3, how='difference') cases.select(nplies=4, ps=[3,4], how='symmeric difference') """ Explanation: By default, difference is subtracted as set(ps) - set(nplies). Currently there is no implementation for the converse difference, but set operations still work. End of explanation """ import numpy as np a = [] b = 1 c = np.int64(1) d = [1,2] e = [1,2,3] f = [3,4] test = 1 test in a #test in b #test is a test is c # if test is a or test is c: # True from lamana.utils import tools as ut ut.compare_set(d, e) ut.compare_set(b, d, how='intersection') ut.compare_set(d, b, how='difference') ut.compare_set(e, f, how='symmertric difference') ut.compare_set(d, e, test='issubset') ut.compare_set(e, d, test='issuperset') ut.compare_set(d, f, test='isdisjoint') set(d) ^ set(e) ut.compare_set(d,e, how='symm') g1 = dft.Geo_objects['5-ply'][0] g2 = dft.Geo_objects['5-ply'][1] cases = Cases(dft.geos_full, ps=[2,5]) # two cases (p=2,5) for i, case in enumerate(cases): # iter case values() for LM in case.LMs: print(LM) """ Explanation: Current logic seems to return a union. Enhancing selection algorithms with set operations Need logic to append LM for the following: all, either, neither (and, or, not or) a, b are int a, b are list a, b are mixed b, a are mixed End of explanation """ #PYTEST_VALIDATE_IGNORE_OUTPUT hash('400-200-800') #PYTEST_VALIDATE_IGNORE_OUTPUT hash('400-[200]-800') """ Explanation: In order to compare objects in sets, they must be hashable. The simple requirement equality is include whatever makes the hash of a equal to the hash of b. Ideally, we should hash the Geometry object, but the inner values is a list which is unhashable due to its mutability. Conventiently however, strings are not hashable. We can try to hash the geometry input string once they have been converted to General Convention as unique identifiers for the geometry object. This requires some reorganization in Geometry. ~~isolate a converter function _to_gen_convention()~~ privative all functions invisible to the API ~~hash the converted geo_strings~~ ~~privatize _geo_strings. This cannot be altered by the user.~~ Here we see the advantage to using geo_strings as hashables. They are inheirently hashable. UPDATE: decided to make a hashalbe version of the GeometryTuple End of explanation """ #PYTEST_VALIDATE_IGNORE_OUTPUT hash((case.LMs[0].Geometry, case.LMs[0].p)) case.LMs[0] L = [LM for case in cases for LM in case.LMs] L[0] L[8] #PYTEST_VALIDATE_IGNORE_OUTPUT hash((L[0].Geometry, L[0].p)) #PYTEST_VALIDATE_IGNORE_OUTPUT hash((L[1].Geometry, L[1].p)) set([L[0]]) != set([L[8]]) """ Explanation: Need to make Laminate class hashable. Try to use unique identifiers such as Geometry and p. End of explanation """ from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() mix = dft.Geos_full + dft.Geos_all mix set(mix) """ Explanation: Use sets to filter unique geometry objects from Defaults(). End of explanation """ mix = dft.geos_most + dft.geos_standard # 400-[200]-800 common to both cases3a = Cases(mix, combine=True, unique=True) cases3a.LMs load_params['p'] = 5 cases3b5 = la.distributions.Case(load_params, dft.mat_props) cases3b5.apply(mix) cases3b5.LMs[:-1] """ Explanation: Mixing Geometries See above. Looks like comparing the order of these lists give different results. This test has been quarantine from the repo until a solution is found. End of explanation """ '''Add how to build Defaults()''' # Case Building from Defaults import lamana as la from lamana.utils import tools as ut from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() ##dft = ut.Defaults() # user-definable case2 = la.distributions.Case(dft.load_params, dft.mat_props) case2.apply(dft.geos_full) # multi plies #LM = case2.LMs[0] #LM.LMFrame print("\nYou have built a case using user-defined defaults to set geometric \ loading and material parameters.") case2 """ Explanation: Idiomatic Case Making As we transition to more automated techniques, tf parameters are to be reused multiple times, it can be helpful to store them as default values. End of explanation """ # Automatic Case Building import lamana as la from lamana.utils import tools as ut #Single Case dft = wlt.Defaults() ##dft = ut.Defaults() case3 = ut.laminator(dft.geos_full) # auto, default p=5 case3 = ut.laminator(dft.geos_full, ps=[5]) # declared #case3 = ut.laminator(dft.geos_full, ps=[1]) # LFrame rollbacks print("\nYou have built a case using higher-level API functions.") case3 # How to get values from a single case (Python 3 compatible) list(case3.values()) """ Explanation: Finally, if building several cases is required for the same parameters, we can use higher-level API tools to help automate the process. Note, for every case that is created, a seperate Case() instantiation and Case.apply() call is required. These techniques obviate such redundancies. End of explanation """ # Multiple Cases cases1 = ut.laminator(dft.geos_full, ps=[2,3,4,5]) # multi ply, multi p print("\nYou have built many cases using higher-level API functions.") cases1 # How to get values from multiple cases (Python 3 compatible) list(cases1.values()) """ Explanation: Cases are differentiated by different ps. End of explanation """ # Iterating Over Cases # Latest style case4 = ut.laminator(['400-[200]-800']) # a sinle case and LM for i, case_ in case4.items(): # iter p and case for LM in case_.LMs: print(LM) print("\nYou processed a case and LaminateModel w/iteration. (Recommended)\n") case5 = ut.laminator(dft.geos_full) # auto, default p=5 for i, case in case5.items(): # iter p and case with .items() for LM in case.LMs: print(LM) for case in case5.values(): # iter case only with .values() for LM in case.LMs: print(LM) print("\nYou processed many cases using Case object methods.") # Convert case dict to generator case_gen1 = (LM for p, case in case4.items() for LM in case.LMs) # Generator without keys case_gen2 = (LM for case in case4.values() for LM in case.LMs) print("\nYou have captured a case in a generator for later, one-time use.") """ Explanation: Python 3 no longer returns a list for .values() method, so list used to evalate a the dictionary view. While consuming a case's, dict value view with list() works in Python 2 and 3, iteration with loops and comprehensions is a preferred technique for both single and mutiple case processing. After cases are accessed, iteration can access the contetnts of all cases. Iteration is the preferred technique for processing cases. It is most general, cleaner, Py2/3 compatible out of the box and agrees with The Zen of Python: There should be one-- and preferably only one --obvious way to do it. End of explanation """ # Style Comparisons dft = wlt.Defaults() ##dft = ut.Defaults() case1 = la.distributions.Case(load_params, mat_props) case1.apply(dft.geos_all) cases = ut.laminator(geos=dft.geos_all) case2 = cases # Equivalent calls print(case1) print(case2) print("\nYou have used classic and modern styles to build equivalent cases.") """ Explanation: We will demonstrate comparing two techniques for generating equivalent cases. End of explanation """
yyl/btc-price-analysis
notes/news_prediction.ipynb
gpl-2.0
score_data = pd.read_csv("../data/indico_nyt_bitcoin.csv", index_col='time', parse_dates=[0], date_parser=lambda x: datetime.datetime.strptime(x, time_format)) score_data.head() """ Explanation: Indico.io sentiment score analysis End of explanation """ weekly_score = score_data.resample('w', how='mean').loc['2013':].fillna(0.5) weekly_score.head() weekly_score.plot() """ Explanation: Compute average sentiment score per week make it 0.5 if no news that week. End of explanation """ time_format = "%Y-%m-%dT%H:%M:%S" data = pd.read_csv("../data/price.csv", names=['time', 'price'], index_col='time', parse_dates=[0], date_parser=lambda x: datetime.datetime.strptime(x[:-6], time_format)) bpi = data.resample('w', how='ohlc') bpi.index.name = 'time' bpi = pd.DataFrame(bpi['price']['close']).loc['2013':] bpi.head() trend_bpi = pd.merge(weekly_score, bpi, how='right', left_index=True, right_index=True) trend_bpi.columns = ['sentiment', 'close_price'] trend_bpi.head() """ Explanation: read bitcoin price data End of explanation """ trend_bpi.plot(secondary_y='close_price') trend_bpi.corr() """ Explanation: add news volume data End of explanation """ time_format = "%Y-%m-%dT%H:%M:%SZ" alchemy_data = pd.read_csv("../data/alchemy_nyt_bitcoin.csv" , index_col='time', parse_dates=[0], date_parser=lambda x: datetime.datetime.strptime(x, time_format)) alchemy_data.head() alchemy_data.alchemy_score.plot(kind='hist') alchemy_data.describe() weekly_alchemy = alchemy_data.resample('w', how='mean').loc['2013':].fillna(0.0) weekly_alchemy.head() weekly_alchemy.plot(kind='hist') weekly_alchemy.describe() alchemy_bpi = pd.merge(weekly_alchemy, bpi, how='right', left_index=True, right_index=True) alchemy_bpi.columns = ['sentiment', 'close_price'] alchemy_bpi.head() alchemy_bpi.plot(secondary_y='close_price') merged_data = pd.merge(alchemy_bpi, weekly_score, how='right', left_index=True, right_index=True) merged_data.head() merged_data.plot(secondary_y='close_price') merged_data.corr() """ Explanation: AlchemyAPI sentiment score End of explanation """ daily_alchemy = alchemy_data.resample('d', how='mean').loc['2013':].fillna(0.0) daily_alchemy.head() daily_price = data.resample('d', how='ohlc') daily_price.index.name = 'time' daily_price = pd.DataFrame(daily_price['price']['close']).loc['2013':] daily_price.head() daily_data = pd.merge(daily_price, daily_alchemy, how='right', left_index=True, right_index=True) daily_data.head() daily_data.plot(secondary_y='close') """ Explanation: Daily score analysis Previously we only consider average weekly score. As news spread pretty fast, it is possible that its effect is occurring in shorter time. Therefore we try to plot daily score data. End of explanation """ alchemy_bpi['avg_sentiment'] = pd.rolling_mean(alchemy_bpi.sentiment, 1) alchemy_bpi.head() alchemy_bpi['avg_shifted'] = alchemy_bpi['avg_sentiment'].shift(1) alchemy_bpi.head() alchemy_bpi['order']= 'NA' alchemy_bpi['diff'] = alchemy_bpi.sentiment - alchemy_bpi.avg_shifted alchemy_bpi.head() ## SII_diff >= diff => search interest rises this week => price rises next week alchemy_bpi.loc[alchemy_bpi['diff'] > 0,'order'] = False ## SII_diff < diff => search interest falls this week => price falls next week alchemy_bpi.loc[alchemy_bpi['diff'] < 0,'order'] = True alchemy_bpi.head() alchemy_bpi['trend'] = alchemy_bpi.close_price > alchemy_bpi.close_price.shift(1) alchemy_bpi.head() total_predict = alchemy_bpi[alchemy_bpi.order!='NA'].order.count() total_correct = alchemy_bpi[alchemy_bpi.order==alchemy_bpi.trend].order.count() print "TP+TN: %f (%d/%d)" % (total_correct/float(total_predict), total_correct, total_predict) alchemy_bpi.corr() """ Explanation: Prediction Weekly prediction first End of explanation """ daily_data = pd.merge(daily_price, daily_alchemy, how='right', left_index=True, right_index=True) daily_data['avg_sentiment'] = pd.rolling_mean(daily_data.alchemy_score, 1) daily_data.head() daily_data['avg_shifted'] = daily_data['avg_sentiment'].shift(3) daily_data.head() daily_data['order']= 'NA' daily_data['diff'] = daily_data.alchemy_score - daily_data.avg_shifted daily_data.head() ## SII_diff >= diff => search interest rises this week => price rises next week daily_data.loc[daily_data['diff'] > 0,'order'] = True ## SII_diff < diff => search interest falls this week => price falls next week daily_data.loc[daily_data['diff'] < 0,'order'] = False daily_data.head() daily_data['trend'] = daily_data.close > daily_data.close.shift(1) daily_data.head() total_predict = daily_data[daily_data.order!='NA'].order.count() total_correct = daily_data[daily_data.order==daily_data.trend].order.count() print "TP+TN: %f (%d/%d)" % (total_correct/float(total_predict), total_correct, total_predict) """ Explanation: Daily prediction End of explanation """
phoebe-project/phoebe2-docs
2.3/tutorials/compute.ipynb
gpl-3.0
#!pip install -I "phoebe>=2.3,<2.4" """ Explanation: Compute Now that we have datasets added to our Bundle, our next step is to run the forward model and compute a synthetic model for each of these datasets. Setup Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab). End of explanation """ import phoebe from phoebe import u # units import numpy as np import matplotlib.pyplot as plt logger = phoebe.logger() b = phoebe.default_binary() """ Explanation: Now we'll import our packages and initialize the default PHOEBE bundle. End of explanation """ b.add_dataset('orb', compute_times=phoebe.linspace(0,10,10), dataset='orb01') b.add_dataset('lc', compute_times=phoebe.linspace(0,1,101), dataset='lc01') """ Explanation: And we'll attach some dummy datasets. See the datasets tutorial for more details. End of explanation """ print(b.computes) print(b.filter(context='compute')) b.set_value(qualifier='irrad_method', value='none') """ Explanation: Default Compute Options Any default Bundle already has a set of default compute options to run the backend for PHOEBE 2. In most cases, you can just edit the options in this default set of compte options. End of explanation """ b.add_compute(phoebe.compute.phoebe, compute='preview', irrad_method='none') print(b.filter(compute='preview', context='compute')) b.add_compute('phoebe', compute='detailed', irrad_method='wilson') print(b.get_compute('detailed')) """ Explanation: Adding Compute Options In other cases, we may want to manually add additional sets of compute options. This syntax should look very familiar by now, it takes a function (or the name of a recognized function in phoebe.parameters.compute) and then any kwargs to set in that ParameterSet, passed to b.add_compute. Let's say that we want to create two sets of compute options - in this example, we'll create one called 'preview' which will cut some corners to quickly get us a model, and one called 'detailed' which will get a much more precise model but likely take longer. As with other tags, the string you provide for the compute tag is up to you (so long as it doesn't raise an error because it conflicts with other tags). End of explanation """ print(b.filter(qualifier='enabled', dataset='lc01')) """ Explanation: Editing Compute Options Backend-Specific Compute Options Most of the parameters in the compute options are specific to the backend being used. Here, of course, we're using the PHOEBE 2.0 backend - but for details on other backends see the Advanced: Alternate Backends Tutorial. The PHOEBE compute options are described in the tutorial on their relevant dataset types: Light Curves/Fluxes (lc) Radial Velocities (rv) Line Profiles (lp) Orbits (orb) Meshes (mesh) Enabling/Disabling Datasets By default, synthetic models will be created for all datasets in the Bundle when run_compute is called. But you can disable a dataset to have run_compute ignore that dataset. This is handled by a BoolParameter with the qualifier 'enabled' - and has a copy that lives in each set of compute options Let's say we wanted to compute the orbit but not light curve - so we want to set enabled@lc01: End of explanation """ b.set_value(qualifier='enabled', dataset='lc01', compute='preview', value=False) print(b.filter(qualifier='enabled', dataset='lc01')) """ Explanation: as you can see, there is a copy for both of our compute options ('preview' and 'detailed'). If we know which set of compute options we'll be using, or only want to enable/disable for a given set, then we can do that (we could also use b.disable_dataset and b.enable_dataset: End of explanation """ b.set_value_all('enabled@lc01', True) print(b.filter(qualifier='enabled', dataset='lc01')) """ Explanation: or to enable/disable a dataset for all sets of compute options, we can use the set_value_all method: End of explanation """ b.run_compute(compute='preview') print(b.models) """ Explanation: If the enabled parameter is missing for a set of compute options - it is likely that that particular backend does not support that dataset type. Running Compute run_compute takes arguments for the compute tag as well as the model tag for the resulting synthetic model(s). You do not need to provide the compute tag if only 0 or 1 set of compute options exist in the Bundle. If there are no compute options, the default PHOEBE 2 options will be added on your behalf and used. If there is a single set of compute options, those will be assumed. In our case, we have two compute options in the Bundle (with tags 'preview' and 'detailed') so we must provide an argument for compute. If you do not provide a tag for the model, one will be created for you called 'latest'. Note that the 'latest' model will be overwritten without throwing any errors, whereas other named models can only be overwritten if you pass overwrite=True (see the run_compute API docs for details). In general, though, if you want to maintain the results from previous calls to run_compute, you must provide a NEW model tag. End of explanation """ b.set_value(qualifier='incl', kind='orbit', value=90) b.run_compute(compute='preview', model='run_with_incl_90') b.set_value(qualifier='incl', kind='orbit', value=85) b.run_compute(compute='preview', model='run_with_incl_85') b.set_value(qualifier='incl', kind='orbit', value=80) b.run_compute(compute='preview', model='run_with_incl_80') """ Explanation: Storing/Tagging Models Now let's compute models for three different 'versions' of parameters. By providing a model tag, we can keep the synthetics for each of these different runs in the bundle - which will be handy later on for plotting and comparing models. End of explanation """ print(b.models) """ Explanation: We will now have three new sets of synthetics which can be compared, plotted, or removed. End of explanation """ b.remove_model('latest') print(b.models) """ Explanation: To remove a model, call remove_model. End of explanation """ b.filter(model='run_with_incl_90') b.filter(component='primary', model='run_with_incl_90') b.get_parameter(qualifier='us', component='primary', model='run_with_incl_90') b.get_value(qualifier='us', dataset='orb01', component='primary', model='run_with_incl_90')[:10] """ Explanation: Accessing Synthetics from Models The synthetics can be accessed by their dataset and model tags. End of explanation """
miky-kr5/Presentations
EVI - 2018/EVI 04/Modulo3.ipynb
cc0-1.0
datos1 = pd.DataFrame([24, np.nan, np.nan, 23,np.nan, 12, np.nan, 17, np.nan, 2 ,5], columns = list('A')) datos1 """ Explanation: Manipulación y Análisis de Datos con Python <br> Pre-procesamiento de datos <br> Manejo de datos faltantes End of explanation """ datos1.dropna(subset=['A'], axis= 0, inplace= True) datos1 """ Explanation: <br> En dropna() el argumento subset considera la etiqueta para seleccionar el conjunto a descartar, axis=0 descarta filas (axis=1 columnas) y inplace= True hace que los cambios se ejecuten directamente en DataFrame. End of explanation """ datos1 = pd.DataFrame([24, np.nan, np.nan, 23,np.nan, 12, np.nan, 17, np.nan, 2 ,5], columns = list('A')) media = datos1['A'].mean() media """ Explanation: <br> La función replace() permite reemplazar valores faltantes en el DataFrame por valores nuevos. En nuestro ejemplo, reemplazaremos con el promedio, que se calcula con la función mean(). End of explanation """ datos1['A'].replace(np.nan, media, inplace = True) datos1 """ Explanation: <br> Ahora usamos la función replace() End of explanation """ import pandas as pd compra_1 = pd.Series({'Nombre': 'Adelis', 'Artículo comprado': 'Libro', 'Costo': 1200}) compra_2 = pd.Series({'Nombre': 'Miguel', 'Artículo comprado': 'Raspberry pi 3', 'Costo': 15000}) compra_3 = pd.Series({'Nombre': 'Jaime', 'Artículo comprado': 'Balón', 'Costo': 5000}) df = pd.DataFrame([compra_1, compra_2, compra_3], index=['Tienda 1', 'Tienda 1', 'Tienda 2']) df """ Explanation: <br> Transformando los datos <br> Mezclando y combinando DataFrames End of explanation """ df['Fecha'] = ['Diciembre 1', 'Febrero 4', 'Mediados de Julio'] df df['Entregado'] = 'Sí' df df['Retroalimentación'] = ['Positiva', None, 'Negativa'] df """ Explanation: <br> Podemos agregar elementos al DataFrame de la siguiente manera: End of explanation """ adf = df.reset_index() adf """ Explanation: <br> Pandas reset_index () es un método para restablecer el índice de un DataFrame. El establece como índices una lista de enteros que van desde 0 hasta la longitud de los datos. End of explanation """ empleados_df = pd.DataFrame([{'Nombre': 'Adriana', 'Función': 'Gerente de ventas'}, {'Nombre': 'Andrés', 'Función': 'Vendedor 1'}, {'Nombre': 'Cristóbal', 'Función': 'Gerente de departamento'}]) empleados_df = empleados_df.set_index('Nombre') grado_df = pd.DataFrame([{'Nombre': 'Andrés', 'Grado': 'Nivel 3'}, {'Nombre': 'Cristóbal', 'Grado': 'Nivel 1'}, {'Nombre': 'Adriana', 'Grado': 'Nivel 2'}]) grado_df = grado_df.set_index('Nombre') print(empleados_df.head()) print() print(grado_df.head()) """ Explanation: <br> Podemos tener un par de tablas de datos que nos interese unir o combinar en un mismo DataFrame. End of explanation """ df_info_empleados=pd.merge(empleados_df, grado_df, how='outer', left_index=True, right_index=True) df_info_empleados """ Explanation: <br> pd.merge() conecta filas en el DataFrames basado en una o más teclas. Para los conocedores de SQL esta función hace unión de bases de datos por columnas o índices. End of explanation """ fecha_ingreso_df = pd.DataFrame([{'Nombre': 'Adriana', 'Fecha de Ingreso': '20/06/2013'}, {'Nombre': 'Andrés', 'Fecha de Ingreso': '10/01/2018'}, {'Nombre': 'Cristóbal', 'Fecha de Ingreso': '20/03/2011'}]) fecha_ingreso_df = fecha_ingreso_df.set_index('Nombre') art_vendidos_df = pd.DataFrame([{'Nombre': 'Adriana', 'Art.Vendidos/Total Art.': 123/10000}, {'Nombre': 'Andrés', 'Art.Vendidos/Total Art.': 1450/10000}, {'Nombre': 'Cristóbal', 'Art.Vendidos/Total Art.': 5000/10000}]) art_vendidos_df = art_vendidos_df.set_index('Nombre') print(fecha_ingreso_df.head()) print(art_vendidos_df.head()) """ Explanation: <br> Otros ejemplos de cómo variar el parámetro how se pueden encontrar en el libro Python for Data Analysis - McKinney. <br> Supongamos que tenemos ahora un nuevo DataFrame que coincide en número de filas con el anterior. Por ejemplo: End of explanation """ new_data = pd.concat([df_info_empleados, fecha_ingreso_df, art_vendidos_df], axis=1) new_data """ Explanation: <br> pd.concat() pega o apila objetos a lo largo de un eje. End of explanation """ pd.concat([df_info_empleados, fecha_ingreso_df, art_vendidos_df], axis=0) """ Explanation: <br> Hay mucho más que aprender! Por ejemplo: ¿Qué sucede si axis=0? R: pues posiblemente el resultado sea que Pandas pegue todos los valores y sus índices. Como se muestra a continuación: End of explanation """ new_data new_data['Art.Vendidos/Total Art.']= new_data['Art.Vendidos/Total Art.']*100 new_data.rename(columns = {'Art.Vendidos/Total Art.': '% Art. Vendidos'}, inplace = True) new_data """ Explanation: <br> Otra transformación de interés podría ser hacer algún cálculo sobre una columna entera. En nuestro ejemplo, supongamos que deseamos colocar % de artículos vendidos y cambiar la etiqueta de esa columna. End of explanation """ dimension1 = pd.DataFrame([168.7, 170.0, 150.3, 168.7, 145.2, 200.0, 175.4, 163.0, 230.0, 129.6, 178.2], columns = list('L')) dimension1.rename(columns = {'L': 'Largo'}, inplace = True) dimension2 = pd.DataFrame([68.3, 60.2, 65.0, 68.3, 45.9, 70.0, 75.1, 63.5, 65.2, 68.7, 78], columns = list('A')) dimension2.rename(columns = {'A': 'Ancho'}, inplace = True) dimension3 = pd.DataFrame([46.8, 47.0, 45.0, 46.8, 45.3, 40.9, 45.6, 43.8, 46.8, 49.0, 47.2], columns = list('A')) dimension3.rename(columns = {'A': 'Alto'}, inplace = True) dimensiones = pd.concat([dimension1, dimension2, dimension3], axis=1) dimensiones """ Explanation: <br> Normalizando datos <br> Tomemos un DataFrame que representa dimensiones de cajas a ser vendidas en un almacén. End of explanation """ dimensiones['Largo'] = dimensiones['Largo']/dimensiones['Largo'].max() dimensiones['Ancho'] = dimensiones['Ancho']/dimensiones['Ancho'].max() dimensiones['Alto'] = dimensiones['Alto']/dimensiones['Alto'].max() dimensiones """ Explanation: <br> Método de "Escala de característica simple": se divide cada valor por el valor máximo para esa característica, $x_{nuevo} = \frac{x_{viejo}}{x_{máximo}}$ End of explanation """ dimensiones['Largo'] = (dimensiones['Largo']-dimensiones['Largo'].min())/(dimensiones['Largo'].max() - dimensiones['Largo'].min()) dimensiones['Ancho'] = (dimensiones['Ancho']-dimensiones['Ancho'].min())/(dimensiones['Ancho'].max() - dimensiones['Ancho'].min()) dimensiones['Alto'] = (dimensiones['Alto']-dimensiones['Alto'].min())/(dimensiones['Alto'].max() - dimensiones['Alto'].min()) dimensiones """ Explanation: <br> Método Mínimo - Máximo: toma cada valor, $x_{viejo}$ le resta el mínimo valor de esa característica y luego se divide por el rango de esa característica, es decir, $x_{nuevo} = \frac{x_{viejo} - x_{mínimo}}{x_{máximo} - x_{mínimo}}$ End of explanation """ dimensiones['Largo'] = (dimensiones['Largo']-dimensiones['Largo'].mean())/(dimensiones['Largo'].std()) dimensiones['Ancho'] = (dimensiones['Ancho']-dimensiones['Ancho'].mean())/(dimensiones['Ancho'].std()) dimensiones['Alto'] = (dimensiones['Alto']-dimensiones['Alto'].mean())/(dimensiones['Alto'].std()) dimensiones """ Explanation: <br> Método Puntaje estándar: End of explanation """ import numpy as np df = pd.read_csv('Automobile_data.csv') df.head() df.describe() """ Explanation: <br> Estadística descriptiva <br> Tabla de resumen estadístico End of explanation """ np.random.seed(1500) #generación aleatoria números dfb = pd.DataFrame(np.random.randn(10,5)) #DataFrame de dimensiones 10x5 dfb.boxplot(return_type='axes') #Grafico de caja de cada categoría. dfb.head() """ Explanation: <br> Gráficos de cajas (o Boxplots) <br> Vamos a generar datos aleatoriamente y hacer un gráfico de caja. End of explanation """ x = df['length'] #Variable Largo y = df['width'] #Variable Ancho z =df['height'] #Variable Alto dfbp = pd.DataFrame([x,y,z]).T #Creando un DataFrame con las dimensiones de los autosmóviles dfbp.boxplot(fontsize=13, return_type='axes') #Gráfico de caja de las 3 variables #Tarea!!!!! Normalice estos datos y haga el nuevo gráfico de caja """ Explanation: <br> Tomemos los datos del archivo Automobile_data.csv para crear un gráfico de caja de 3 variables que definen las dimensiones de los automóviles. End of explanation """ np.random.seed(14000) #Generación de números aleatorios pdhist = pd.Series(np.random.randn(1000)) #Serie de números aleatorios pdhist.hist(normed=True) # Muestra las barras pdhist.plot(fontsize=13, kind='kde') #Gráfico de barras (kde = Kernel Density Estimation plot. Haga la prueba con 'hist') """ Explanation: <br> Gráficos de barras (o histogramas) <br> Vamos a generar datos aleatoriamente y hacer un gráfico de barras. End of explanation """ import matplotlib.pyplot as plt p = df['price'] #Seleccionamos la variable price pdf = pd.Series(p) #Convertimos la selección en una serie de Pandas pdf.hist(normed=True) # Muestra las barras pdf.plot(fontsize=11, kind = 'hist') #Gráfico de barras plt.xlabel('Precio',fontsize=13) plt.ylabel('Frecuencia', fontsize=13) """ Explanation: <br> Utilicemos los datos de Automobile_data.csv para hacer un gráfico de barras o histograma de la variable price (precio). End of explanation """ import matplotlib.pyplot as plt x= df['engine-size'] #Variable predictora y= df['price'] #Variable objetivo o que deseamos predecir plt.scatter(x, y) #Gráfico de dispersión en Matplotlib plt.title('Gráfico de dispersión de Tamaño del motor Vs. Precio', fontsize=13)#Nombre del gráfico plt.xlabel('Tamaño del motor', fontsize=13)#Etiquetal del eje-x plt.ylabel('Precio', fontsize=13)#Etiqueta del eje-y """ Explanation: <br> Este gráfico de barras nos indica que hay un número alto de automóviles con precio menor a 10000, entre otras cosas .... ¿Qué cosas? ;) <br> Gráfico de dispersión <br> Este gráfico de dispersión muestra la relación entre las variables tamaño del motor y precio. End of explanation """ import matplotlib.pyplot as plt from scipy import stats x=df['engine-size'] #Variable predictora y= df['price'] #Variable objetivo o que deseamos predecir slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) line = slope*x+intercept plt.plot(x,y,'o', x, line) ax = plt.gca() fig = plt.gcf() plt.xlabel('Tamaño del motor', fontsize=9)#Etiquetal del eje-x plt.ylabel('Precio', fontsize=9)#Etiqueta del eje-y plt.title('Gráfico de dispersión de Tamaño del motor Vs. Precio', fontsize=13)#Nombre del gráfico """ Explanation: <br> Correlación entre variables Tomememos las dos variables del ejemplo anterior... End of explanation """ import matplotlib.pyplot as plt from scipy import stats x=df['highway-mpg'] #Variable predictora y= df['price'] #Variable objetivo o que deseamos predecir slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) line = slope*x+intercept plt.plot(x,y,'o', x, line) ax = plt.gca() fig = plt.gcf() plt.xlabel('Millas por galón en autopista', fontsize=9)#Etiquetal del eje-x plt.ylabel('Precio', fontsize=9)#Etiqueta del eje-y plt.title('Gráfico de dispersión de Millas por galón en autopista Vs. Precio', fontsize=13)#Nombre del gráfico """ Explanation: El gráfico de dispersión anterior revela que hay una relación lineal positiva entre el tamaño del motor y el precio del auto. Es decir, a medida que aumenta el tamaño del motor aumenta el precio. Este gráfico de dispersión revela que hay una relación lineal negativa entre las millas que recorre el auto por combustible que usa y el precio del mismo. Es decir, mientras más millas por galón el auto es más económico. End of explanation """ from scipy import stats stats.pearsonr(df['horsepower'], df['price']) """ Explanation: Ahora calculemos el coeficiente de correlación y el p-valor entre las variables 'Caballos de Fuerza' y 'Precio' usando 'stats.pearson()' End of explanation """
astro313/REU2017
Exercises.ipynb
mit
# Put your code here pass # only run this cell after you finished writing your code %load beginner_soln.py """ Explanation: Part 2: Demonstration Exercises Here are some sample exercises to work through. They demonstrate many techniques that we use all the time. Beginner Level This exercise is designed for those who are fairly new to python and coding in general. It asks you to read in a list of numbers from a file and to write an algorithm to sort the list. Using the techniques described in the example code above, read in the "beginner.txt" file in this directory and store its contents as a list (f.readlines() will be useful). Print your list. The list you've read in will be a list of strings. Write a for loop that converts each string in the list to an integer (using range(len(list))...). Print your updated list. Next, create a second, empty list to store the sorted data in. Now write a for loop that loops over the list you read in from file and: stores the first entry looks at each successive entry in the list and compares it to the stored entry. If an entry is less than the stored entry, replace the stored entry with this new lowest value. Congratulations, you've now found the lowest value in the list. Take the value stored in your for loop and add it to your second list (using the list.append() method). Use the list.remove(x) method to remove the value you've just added to the second list from the first list. Now repeat the process in steps 4 and 5 for each value in the initial list (do this by embedding steps 4 and 5 in a for loop; the syntax range(len(list)) will be useful here). [Note, you also could use a while statement, but we'll stick with for loops]. Print out your newly sorted list to make sure your algorithm worked. If time permits, add a variable verbose, that when it's true you print out the list at each step of the way. If time permits, come up with a more efficient method for sorting the list (there are many: it's fine to use google to see what sorting algorithms are out there. And of course, there's a python sort command - see if you can figure out how it works). End of explanation """ # Put your code here pass # only run this cell after you finished writing your code %load beginner_soln.py """ Explanation: Intermediate Level This exercise is designed for those who are already somewhat comfortable with python and want to learn more about exploiting its capabilities. It asks you to read in a file containing 10 time series, each containing a gaussian radio pulse. Then, using numpy and matplotlib, it asks you to plot the pulse, measure the pulse's signal to noise ratio, and output values in a nicely formatted table. Read in the file "intermediate.txt" in this directory. The file contains 10 rows of comma separated numbers. Each row represents the amount of signal output from a radio antenna as a function of time (in 1 second time intervals). Loop through the lines in the file (f.readlines() will be useful here). For each line, do the following: Convert the line from one long string into a numpy array of floats. Using matplotlib.pyplot make a plot of the data you just read in as a function of time (hint: you'll have to figure out how many time steps are present in the data). Using the capabilities of numpy, find the value of the maximum flux in your time series. Excluding your pulse, (the pulse is in the first half of the time series, so you can cheat and just limit yourself to the second half of the time series) calculate the rms noise in your spectrum. (Recall that the rms is the root mean square - find the mean of the squares of all the points, then take the square root. You might also use np.std() and compare the results (and think about why they are different, if they are different)). Do a simple estimate of the signal to noise ratio of the pulse as peakflux/rms. Using a formatted string, print the output signal to noise, peakflux and rms to a descriptive table, rounding each number to two decimal places. If time permits figure out how to display all your time series on top of one another at the end, rather than having the plots pop up one at a time. If time permits mess around with fitting the gaussian pulse and come up with other estimates of the signal to noise ratio. End of explanation """ # put your code here.... """ Explanation: Exercise with APLpy and plotting fits images In this exercise, you will use aplpy ("apple pie") to make an image of a field of ALFALFA data. Read in the fits file "HI1020_21.mapPYM2.5b.fits" in this directory, and plot it in inverted greyscale. Overplot a contour at 0.13 mJy/beam. There are two groups of galaxies in the image. Put a box around each one. Label the lower left group NGC 3227 group, and the upper right group the NGC 3190 group Make your axis labels bold, and give the figure a thick border Save a .png and .eps version of the figure This is a piece of code used to make a figure from Leisman et al. 2016! End of explanation """
google/eng-edu
ml/testing-debugging/testing-debugging-classification.ipynb
apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2018 Google LLC. End of explanation """ # Reset environment for a new run % reset -f # Load Libraries from os.path import join # for joining file pathnames import pandas as pd import tensorflow as tf from tensorflow import keras import numpy as np import matplotlib.pyplot as plt import unittest import sys # Set Pandas display options pd.options.display.max_rows = 10 pd.options.display.float_format = '{:.1f}'.format # Load data mnistDf_backup = pd.read_csv( "https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv", sep=",", header=None) # Shuffle data mnistDf_backup.sample(frac=1).reset_index(drop=True) # Use the first 5000 examples for faster prototyping mnistDf = mnistDf_backup[0:5000] mnistDf.head() """ Explanation: Case Study: Debugging in Classification This Colab quickly demonstrates a few concepts related to debugging classification models. You will explore potential problems in implementing these tasks: Calculating loss for classification problems. Optimizing your model Applying regularization. Following best practices in development and debugging. Please make a copy of this Colab before running it. Click on File, and then click on Save a copy in Drive. Load MNIST Data MNIST is a dataset of images of the numbers 0 to 9. The problem is to classify the images as numbers. Setup libraries and load the MNIST dataset. Display the first few rows to verify that the data loaded. You'll explore the data format after the data loads. End of explanation """ showExample = 1000 # @param digitData = np.reshape(mnistDf.iloc[showExample,0:-1],[28,28]) print digitData """ Explanation: Understanding the Data Format Each row represents one labeled example. Column 0 represents the label that a human rater has assigned for one handwritten digit. For example, if Column 0 contains '6', then a human rater interpreted the handwritten character as the digit '6'. The ten digits 0-9 are each represented, with a unique class label for each possible digit. Thus, this is a multi-class classification problem with 10 classes. Columns 1 through 784 contain the feature values, one per pixel for the 28×28=784 pixel values. The pixel values are on a gray scale in which 0 represents white, 255 represents black, and values between 0 and 255 represent shades of gray. Most of the pixel values are 0; you may want to take a minute to confirm that they aren't all 0. Modify the form below and run the code to view data for a given example. End of explanation """ %hide_result # hides result of cell computation # Calculate the number of classes numClasses = mnistDf.iloc[:,0].unique().shape[0] # Plot histogram of class distribution plt.hist(mnistDf.iloc[:,0], bins=range(numClasses+1)) plt.xticks(range(numClasses+1)) """ Explanation: Do you have Imbalanced Classes? As we read in the course, imbalanced classes make classification harder. Let's look at the distribution of classes. Do you have imbalanced classes? End of explanation """ # Shuffle data mnistDf = mnistDf.sample(frac=1).reset_index(drop=True) # Split dataset into data and labels mnistData = mnistDf.iloc[:,1:-1].copy(deep=True) mnistLabels = mnistDf.iloc[:,0].copy(deep=True) """ Explanation: The preceding graph shows that the 10 classes are roughly equally represented. Shuffle and Split Dataset As part of Data Debugging best practices, ensure your splits are statistically equivalent by shuffling your data to remove any pre-existing order. End of explanation """ def minMaxScaler(arr): min = np.min(arr) max = np.max(arr) arr = (arr-min)/max return arr for featureIdx in range(mnistData.shape[1]): mnistData.iloc[:,featureIdx] = minMaxScaler(mnistData.iloc[:,featureIdx]) mnistData.describe() """ Explanation: Process Data Scale the data values to [0,1] since the values are bounded to [0,255] and do not contain outliers. Then check that the scaled data values are as expected by generating summary statistics using the DataFrame.describe() function. Run the following cell to scale data and generate statistics. This cell takes a few minutes to run. End of explanation """ # First reload your data mnistData = mnistDf.iloc[:,1:-1].copy(deep=True) # Explore your data """ Explanation: Oh no! Some of your features are all NaN. What do you think the cause is? Hint: While NaNs have many causes, in this case, the NaN values are caused by the properties of your data. Use the next code cell to explore your data. Then check the next cell for the solution. Try to find the solution yourself. Debugging NaNs and exploring your data are important skills. End of explanation """ mnistData.describe() """ Explanation: Solution Start exploring your data by generating a high-level summary using Dataframe.describe(). End of explanation """ # Redefine the scaling function to check for zeros def minMaxScaler(arr): max = np.max(arr) if(max!=0): # avoid /0 min = np.min(arr) arr = (arr-min)/max return arr # Reload data mnistData = mnistDf.iloc[:,1:-1].copy(deep=True) # Scale data for featureIdx in range(mnistData.shape[1]): mnistData.iloc[:,featureIdx] = minMaxScaler(mnistData.iloc[:,featureIdx]) """ Explanation: Because some of the feature columns are all zeros, the scaling function divided by 0 (because np.max returns 0). The division by 0 resulted in NaN values. This result shows you how easily NaNs can arise in engineered data. The describe function will not detect every occurrence of NaN (or None). Instead, use the command DataFrame.isnull().any(). Note: Given the maximum value of the feature data is 255, you could simply divide the input by 255 instead of using min-max scaling, and avoid introducing NaNs. However, this example purposely uses min-max scaling to show how NaNs can appear in engineered data. Now let's try scaling the data again. End of explanation """ np.sum(mnistLabels==1)*1.0/mnistLabels.shape[0]*100 """ Explanation: You should follow best practice and prevent this bug from recurring by writing a unit test to check for not having NaN values in your engineered data. Remove All-Zero Features? You might think that getting NaNs and discovering that some features were all-zero is good luck because those features can be discarded. However, your training data and validation data might have different all-zero features. Since you should not use validation data to make modeling decisions, you cannot remove only those features that are all-zero in both. Furthermore, data in the future might have different characteristics. There are pros and cons in either case. This Colab keeps the features since reducing the feature set is not a concern. Establish Baseline Following development best practices, you should establish a baseline. The simplest baseline is predicting the most common class. You saw that the most common class is 1. Let's check the accuracy when always predicting 1. End of explanation """ def showClassificationResults(trainHistory): """Function to: * Print final loss & accuracy. * Plot loss & accuracy curves. Args: trainHistory: object returned by model.fit """ # Print final loss and accuracy print("Final training loss: " + str(trainHistory.history['loss'][-1])) print("Final validation loss: " + str(trainHistory.history['val_loss'][-1])) print("Final training accuracy: " + str(trainHistory.history['acc'][-1])) print("Final validation accuracy: " + str(trainHistory.history['val_acc'][-1])) # Plot loss and accuracy curves f = plt.figure(figsize=(10,4)) axLoss = f.add_subplot(121) axAcc = f.add_subplot(122) axLoss.plot(trainHistory.history['loss']) axLoss.plot(trainHistory.history['val_loss']) axLoss.legend(['Training loss', 'Validation loss'], loc='best') axLoss.set_xlabel('Training epochs') axLoss.set_ylabel('Loss') axAcc.plot(trainHistory.history['acc']) axAcc.plot(trainHistory.history['val_acc']) axAcc.legend(['Training accuracy', 'Validation accuracy'], loc='best') axAcc.set_xlabel('Training epochs') axAcc.set_ylabel('Accuracy') """ Explanation: Your baseline accuracy is about 11%. Should be easy to beat, right? Train a Linear Model Let's start nice and easy with a linear model. All we need is an accuracy > 11%. First, let's define a function to plot our loss and accuracy curves. The function will also print the final loss and accuracy. Instead of using verbose=1, you can call the function. End of explanation """ model = None # Define model = keras.Sequential() model.add(keras.layers.Dense(mnistData.shape[1], activation='linear', input_dim=mnistData.shape[1])) model.add(keras.layers.Dense(1, activation='linear')) # Compile model.compile(optimizer="adam", loss='mse', metrics=['accuracy']) # Train trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100, validation_split=0.2, verbose=0) # Plot showClassificationResults(trainHistory) """ Explanation: Now train a linear model with an output layer and a hidden layer. End of explanation """ model = None # Define model = keras.Sequential() model.add(keras.layers.Dense(mnistData.shape[1], activation='linear', input_dim = mnistData.shape[1])) model.add(keras.layers.Dense(10, activation='softmax')) # Compile model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Train trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100, validation_split=0.1, verbose=0) # Plot showClassificationResults(trainHistory) """ Explanation: Wow, that accuracy is terrible! What could the cause be? Hint: You followed the same procedure as for the previous regression problem. Do you need an adaptation for a classification problem? Experiment with the code above or skip to the solution below. Solution In regression, the last layer uses a linear activation function. In classification, the last layer cannot use a linear transform. Instead, one option is a softmax transform. Furthermore, in regression, the loss is calculated using MSE while in classification, loss is calculated using crossentropy. Before running your model, if you wrote a test to validate the output values, your test would detect the anomalous output. You'll look at such a test later. Move onto the next section to fix the loss calculation. Fixing Loss Calculation Since your labels are integers instead of one-hot encodings, use sparse_categorical_crossentropy instead of categorical_crossentropy so that you avoid converting the integers to one-hot encoding. Retrain the model with the new loss calculation by running the following cell. Look through the code to note the changes. What do you think of the result? End of explanation """ model = None # Define model = keras.Sequential() model.add(keras.layers.Dense(mnistData.shape[1], activation='', # use 'relu' input_dim=mnistData.shape[1])) model.add(keras.layers.Dense(10, activation='softmax')) # Compile model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Train trainHistory = model.fit(mnistData, mnistLabels, epochs=20, batch_size=100, validation_split=0.1, verbose=0) # Plot showClassificationResults(trainHistory) """ Explanation: Your loss curves are much better. Your accuracy has improved too. You're on the right track. Train a Nonlinear Model Switch to a nonlinear model by modifying the code below to use relu activation functions instead of linear activation functions. Run the code. What do you observe? End of explanation """ model = None # Define model = keras.Sequential() model.add(keras.layers.Dense(mnistData.shape[1], activation='relu', input_dim = mnistData.shape[1])) model.add(keras.layers.Dense(mnistData.shape[1], activation='relu')) model.add(keras.layers.Dense(10,activation='softmax')) # Compile model.compile(optimizer="adam", loss='sparse_categorical_crossentropy', metrics=['accuracy']) # Train trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100, validation_split=0.1, verbose=0) # Plot showClassificationResults(trainHistory) """ Explanation: The quality of the nonlinear model is significantly better than of the linear model. Progress! Move onto the next section. Adding a Second Layer Increasing the model's capacity significantly improved your results. Perhaps you can continue this strategy by adding a second relu layer. Run the following code cell to train the model with another relu layer. End of explanation """ %hide_result # hides result of cell computation f = plt.figure(figsize=(10,3)) ax = f.add_subplot(1,2,1) plt.hist(mnistLabels[0:len(mnistLabels)*8/10], bins=range(numClasses+1)) plt.xticks(range(numClasses+1)) ax2 = f.add_subplot(1,2,2,) plt.hist(mnistLabels[len(mnistLabels)*8/10:-1], bins=range(numClasses+1)) plt.xticks(range(numClasses+1)) """ Explanation: Guess what. Your previous model had training and validation accuracies of 100% and 95%. You can't do much better than that! So your new accuracy is about the same. How high can you push your accuracy? With this configuration the highest training and validation accuracies appear to be 100% and 96% respectively. Since the neural net returns similar accuracy with 1 or 2 layers, let's use the simpler model with 1 layer. Does your model begin to overfit the training data if you train for long enough? (Your model starts overfitting training data at the point when your validation loss starts increasing.) Check for Training/Validation Data Skew Our validation accuracy is a little worse than our training accuracy. While this result is always expected, you should check for typical errors. The commonest cause is having different distributions of data and labels in training and validation. Confirm that the distribution of classes in training and validation data is similar. End of explanation """ from keras import regularizers model = None # Define lambda dropoutLambda = 0.5 #@param # Define model model = keras.Sequential() model.add(keras.layers.Dense(mnistData.shape[1], input_dim=mnistData.shape[1], activation='relu')) model.add(keras.layers.Dropout(dropoutLambda, noise_shape=(1, mnistData.shape[1]))) model.add(keras.layers.Dense(10, activation='softmax')) # Compile model.compile(optimizer = "adam", loss = 'sparse_categorical_crossentropy', metrics = ['accuracy']) # Train trainHistory = model.fit(mnistData, mnistLabels, epochs=30, batch_size=500, validation_split=0.1, verbose=0) # Plot showClassificationResults(trainHistory) """ Explanation: Apply Dropout Regularization Dropout regularization is a common regularization method that removes a random selection of a fixed number of units in a network layer for a single gradient step. Typically, dropout will improve generalization at a dropout rate of between 10% and 50% of neurons. Try to reduce the divergence between training and validation loss by using dropout regularization with values between 0.1 and 0.5. Dropout does not improve the results in this case. However, at a dropout of 0.5, the difference in loss decreases, though both training and validation loss decrease in absolute terms. End of explanation """ from sklearn.metrics import classification_report mnistPred = model.predict_classes(x = mnistData) print(classification_report(mnistLabels, mnistPred)) """ Explanation: Sample results using dropout regularization after 30 epochs: Lambda | Training Loss | Validation Loss ------- | ------------------------------------------------------ 0.1 | 0.99 | 0.95 0.2 | 0.99 | 0.95 0.3 | 0.99 | 0.95 0.5 | 0.97 | 0.94 Check Accuracy for Data Slices For classification problems, you should always check the metrics by class to ensure your model predicts well across all classes. Check accuracy on the 10 classes by running the next cell by using the function sklearn.metrics.classification_report from the scikit-learn library. In the output, the rows with indices 0 to 9 correspond to the classes for the labels 0 to 9. The columns "Precision", "Recall", and "F1-Score" correspond to the respective classification metrics for each class. "Support" is the number of examples for the class in question. For example, for the label "4", when predicting on 464 examples labelled "4", the model has a precision of 0.98, a recall of 0.97, and a F1 score of 0.98. The classification metrics are very uniform across all classes, which is perfect. In your classification problem, in case any metric is lower for a class, then you should investigate why the model has lower-quality predictions for that class. End of explanation """ print("Mean of actual labels: " + str(np.mean(mnistLabels))) print("Standard deviation of actual labels: " + str(np.std(mnistLabels))) """ Explanation: Testing for Anomalous Values In the section Train a Linear Model, you debugged an incorrect calculation of loss. Before running your model, if you wrote a test to validate the output values, your test would detect the anomalous output. For example, you could test whether the distribution of predicted labels on the training dataset is similar to the actual distribution of training labels. A simple statistical implementation of this concept is to compare the standard deviation and mean of the predicted and actual labels. First, check the standard deviation and mean of the actual labels. End of explanation """ class mlTest(unittest.TestCase): '''Class to test statistics of predicted output on training data against statistics of labels to validate that model predictions are in the] expected range. ''' def testStd(self): y = model.predict(mnistData) yStd = np.std(y) yStdActual = np.std(mnistLabels) deltaStd = 0.05 errorMsg = 'Std. dev. of predicted values ' + str(yStd) + \ ' and actual values ' + str(yStdActual) + \ ' differs by >' + str(deltaStd) + '.' self.assertAlmostEqual(yStd, yStdActual, delta=deltaStd, msg=errorMsg) def testMean(self): y = model.predict(mnistData) yMean = np.mean(y) yMeanActual = np.mean(mnistLabels) deltaMean = 0.05 errorMsg = 'Mean of predicted values ' + str(yMean) + \ ' and actual values ' + str(yMeanActual) + \ ' differs by >' + str(deltaMean) + '.' self.assertAlmostEqual(yMean, yMeanActual, delta=deltaMean, msg=errorMsg) """ Explanation: Write tests to check if the mean and standard deviation of the predicted labels falls within the expected range. The expected range defined in the tests below is somewhat arbitrary. In practice, you will tune the range thresholds to accommodate natural variation in predictions. End of explanation """ #@title Train model and run tests model = None # Define model = keras.Sequential() model.add(keras.layers.Dense(mnistData.shape[1], activation='linear', input_dim=mnistData.shape[1])) model.add(keras.layers.Dense(1, activation='linear')) # Compile model.compile(optimizer="adam", loss='mse', metrics=['accuracy']) # Train trainHistory = model.fit(mnistData, mnistLabels, epochs=10, batch_size=100, validation_split=0.1, verbose=0) suite = unittest.TestLoader().loadTestsFromTestCase(mlTest) unittest.TextTestRunner(verbosity=1, stream=sys.stderr).run(suite) """ Explanation: Run the following cell to train a model with the wrong loss calculation and execute the tests. The tests should fail. End of explanation """ yPred = model.predict(mnistData) plt.hist(yPred, bins=range(11)) """ Explanation: Since the tests fail, check the data distribution of predicted labels for anomalies. End of explanation """
jonathanmorgan/msu_phd_work
data/article_loading/proquest_hnp/ChristianScienceMonitor/proquest_hnp-article_loading-ChristianScienceMonitor.ipynb
lgpl-3.0
debug_flag = False """ Explanation: <h1>Table of Contents<span class="tocSkip"></span></h1> <div class="toc"><ul class="toc-item"><li><span><a href="#Introduction" data-toc-modified-id="Introduction-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Introduction</a></span></li><li><span><a href="#Setup" data-toc-modified-id="Setup-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Setup</a></span><ul class="toc-item"><li><span><a href="#Setup---Debug" data-toc-modified-id="Setup---Debug-2.1"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Setup - Debug</a></span></li><li><span><a href="#Setup---Imports" data-toc-modified-id="Setup---Imports-2.2"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>Setup - Imports</a></span></li><li><span><a href="#Setup---working-folder-paths" data-toc-modified-id="Setup---working-folder-paths-2.3"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Setup - working folder paths</a></span></li><li><span><a href="#Setup---logging" data-toc-modified-id="Setup---logging-2.4"><span class="toc-item-num">2.4&nbsp;&nbsp;</span>Setup - logging</a></span></li><li><span><a href="#Setup---virtualenv-jupyter-kernel" data-toc-modified-id="Setup---virtualenv-jupyter-kernel-2.5"><span class="toc-item-num">2.5&nbsp;&nbsp;</span>Setup - virtualenv jupyter kernel</a></span></li><li><span><a href="#Setup---Initialize-Django" data-toc-modified-id="Setup---Initialize-Django-2.6"><span class="toc-item-num">2.6&nbsp;&nbsp;</span>Setup - Initialize Django</a></span></li><li><span><a href="#Setup---Initialize-LoggingHelper" data-toc-modified-id="Setup---Initialize-LoggingHelper-2.7"><span class="toc-item-num">2.7&nbsp;&nbsp;</span>Setup - Initialize LoggingHelper</a></span></li><li><span><a href="#Setup---initialize-ProquestHNPNewspaper" data-toc-modified-id="Setup---initialize-ProquestHNPNewspaper-2.8"><span class="toc-item-num">2.8&nbsp;&nbsp;</span>Setup - initialize ProquestHNPNewspaper</a></span><ul class="toc-item"><li><span><a href="#load-from-database" data-toc-modified-id="load-from-database-2.8.1"><span class="toc-item-num">2.8.1&nbsp;&nbsp;</span>load from database</a></span></li><li><span><a href="#set-up-manually" data-toc-modified-id="set-up-manually-2.8.2"><span class="toc-item-num">2.8.2&nbsp;&nbsp;</span>set up manually</a></span></li></ul></li></ul></li><li><span><a href="#Find-articles-to-be-loaded" data-toc-modified-id="Find-articles-to-be-loaded-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Find articles to be loaded</a></span><ul class="toc-item"><li><span><a href="#Uncompress-files" data-toc-modified-id="Uncompress-files-3.1"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Uncompress files</a></span></li><li><span><a href="#Work-with-uncompressed-files" data-toc-modified-id="Work-with-uncompressed-files-3.2"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Work with uncompressed files</a></span></li><li><span><a href="#parse-and-load-XML-files" data-toc-modified-id="parse-and-load-XML-files-3.3"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>parse and load XML files</a></span></li><li><span><a href="#build-list-of-all-ObjectTypes" data-toc-modified-id="build-list-of-all-ObjectTypes-3.4"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>build list of all ObjectTypes</a></span></li><li><span><a href="#map-files-to-types" data-toc-modified-id="map-files-to-types-3.5"><span class="toc-item-num">3.5&nbsp;&nbsp;</span>map files to types</a></span><ul class="toc-item"><li><span><a href="#explore-all-known-object-types" data-toc-modified-id="explore-all-known-object-types-3.5.1"><span class="toc-item-num">3.5.1&nbsp;&nbsp;</span>explore all known object types</a></span></li><li><span><a href="#files-in-archive-CSM_20170929191926_00001---1994" data-toc-modified-id="files-in-archive-CSM_20170929191926_00001---1994-3.5.2"><span class="toc-item-num">3.5.2&nbsp;&nbsp;</span>files in archive CSM_20170929191926_00001 - 1994</a></span></li></ul></li></ul></li><li><span><a href="#TODO" data-toc-modified-id="TODO-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>TODO</a></span></li></ul></div> Introduction Back to Table of Contents This is a notebook that expands on the OpenCalais code in the file article_coding.py, also in this folder. It includes more sections on selecting publications you want to submit to OpenCalais as an example. It is intended to be copied and re-used. Setup Back to Table of Contents Setup - Debug Back to Table of Contents End of explanation """ import datetime import glob import logging import lxml import os import six import xml import xmltodict import zipfile """ Explanation: Setup - Imports Back to Table of Contents End of explanation """ # paper identifier paper_identifier = "ChristianScienceMonitor" archive_identifier = None # source source_paper_folder = "/mnt/hgfs/projects/phd/proquest_hnp/proquest_hnp/data" source_paper_path = "{}/{}".format( source_paper_folder, paper_identifier ) # uncompressed uncompressed_paper_folder = "/mnt/hgfs/projects/phd/proquest_hnp/uncompressed" uncompressed_paper_path = "{}/{}".format( uncompressed_paper_folder, paper_identifier ) # make sure an identifier is set before you make a path here. if ( ( archive_identifier is not None ) and ( archive_identifier != "" ) ): # identifier is set. source_archive_file = "{}.zip".format( archive_identifier ) source_archive_path = "{}/{}".format( source_paper_path, source_archive_file ) uncompressed_archive_path = "{}/{}".format( uncompressed_paper_path, archive_identifier ) #-- END check to see if archive_identifier present. --# %pwd # current working folder current_working_folder = "/home/jonathanmorgan/work/django/research/work/phd_work/data/article_loading/proquest_hnp/{}".format( paper_identifier ) current_datetime = datetime.datetime.now() current_date_string = current_datetime.strftime( "%Y-%m-%d-%H-%M-%S" ) """ Explanation: Setup - working folder paths Back to Table of Contents What data are we looking at? End of explanation """ logging_file_name = "{}/research-data_load-{}-{}.log.txt".format( current_working_folder, paper_identifier, current_date_string ) logging.basicConfig( level = logging.DEBUG, format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', filename = logging_file_name, filemode = 'w' # set to 'a' if you want to append, rather than overwrite each time. ) """ Explanation: Setup - logging Back to Table of Contents configure logging for this notebook's kernel (If you do not run this cell, you'll get the django application's logging configuration. End of explanation """ # init django django_init_folder = "/home/jonathanmorgan/work/django/research/work/phd_work" django_init_path = "django_init.py" if( ( django_init_folder is not None ) and ( django_init_folder != "" ) ): # add folder to front of path. django_init_path = "{}/{}".format( django_init_folder, django_init_path ) #-- END check to see if django_init folder. --# %run $django_init_path # context_text imports from context_text.article_coding.article_coding import ArticleCoder from context_text.article_coding.article_coding import ArticleCoding from context_text.article_coding.open_calais_v2.open_calais_v2_article_coder import OpenCalaisV2ArticleCoder from context_text.collectors.newsbank.newspapers.GRPB import GRPB from context_text.collectors.newsbank.newspapers.DTNB import DTNB from context_text.models import Article from context_text.models import Article_Subject from context_text.models import Newspaper from context_text.shared.context_text_base import ContextTextBase # context_text_proquest_hnp from context_text_proquest_hnp.proquest_hnp_newspaper_helper import ProquestHNPNewspaperHelper """ Explanation: Setup - virtualenv jupyter kernel Back to Table of Contents If you are using a virtualenv, make sure that you: have installed your virtualenv as a kernel. choose the kernel for your virtualenv as the kernel for your notebook (Kernel --> Change kernel). Since I use a virtualenv, need to get that activated somehow inside this notebook. One option is to run ../dev/wsgi.py in this notebook, to configure the python environment manually as if you had activated the sourcenet virtualenv. To do this, you'd make a code cell that contains: %run ../dev/wsgi.py This is sketchy, however, because of the changes it makes to your Python environment within the context of whatever your current kernel is. I'd worry about collisions with the actual Python 3 kernel. Better, one can install their virtualenv as a separate kernel. Steps: activate your virtualenv: workon research in your virtualenv, install the package ipykernel. pip install ipykernel use the ipykernel python program to install the current environment as a kernel: python -m ipykernel install --user --name &lt;env_name&gt; --display-name "&lt;display_name&gt;" sourcenet example: python -m ipykernel install --user --name sourcenet --display-name "research (Python 3)" More details: http://ipython.readthedocs.io/en/stable/install/kernel_install.html Setup - Initialize Django Back to Table of Contents First, initialize my dev django project, so I can run code in this notebook that references my django models and can talk to the database using my project's settings. End of explanation """ # python_utilities from python_utilities.logging.logging_helper import LoggingHelper # init my_logging_helper = LoggingHelper() my_logging_helper.set_logger_name( "proquest_hnp-article-loading-{}".format( paper_identifier ) ) log_message = None """ Explanation: Setup - Initialize LoggingHelper Back to Table of Contents Create a LoggingHelper instance to use to log debug and also print at the same time. Preconditions: Must be run after Django is initialized, since python_utilities is in the django path. End of explanation """ my_paper = ProquestHNPNewspaperHelper() paper_instance = my_paper.initialize_from_database( paper_identifier ) my_paper.source_all_papers_folder = source_paper_folder my_paper.destination_all_papers_folder = uncompressed_paper_folder print( my_paper ) print( paper_instance ) """ Explanation: Setup - initialize ProquestHNPNewspaper Back to Table of Contents Create an initialize an instance of ProquestHNPNewspaper for this paper. load from database Back to Table of Contents End of explanation """ my_paper = ProquestHNPNewspaperHelper() my_paper.paper_identifier = paper_identifier my_paper.source_all_papers_folder = source_paper_folder my_paper.source_paper_path = source_paper_path my_paper.destination_all_papers_folder = uncompressed_paper_folder my_paper.destination_paper_path = uncompressed_paper_path my_paper.paper_start_year = 1908 my_paper.paper_end_year = 1994 my_newspaper = Newspaper.objects.get( id = 8 ) my_paper.newspaper = my_newspaper """ Explanation: set up manually Back to Table of Contents End of explanation """ phnp_newspaper_instance = my_paper.create_PHNP_newspaper() print( phnp_newspaper_instance ) """ Explanation: If desired, add to database. End of explanation """ # create folder to hold the results of decompressing paper's zip files. did_uncomp_paper_folder_exist = my_paper.make_dest_paper_folder() """ Explanation: Find articles to be loaded Back to Table of Contents Specify which folder of XML files should be loaded into system, then process all files within the folder. The compressed archives from proquest_hnp just contain publication XML files, no containing folder. To process: uncompresed paper folder ( &lt;paper_folder&gt; ) - make a folder in /mnt/hgfs/projects/phd/proquest_hnp/uncompressed for the paper whose data you are working with, named the same as the paper's folder in /mnt/hgfs/projects/phd/proquest_hnp/proquest_hnp/data. for example, for the Boston Globe, name it "BostonGlobe". uncompressed archive folder ( &lt;archive_folder&gt; ) - inside a given paper's folder in uncompressed, for each archive file, create a folder named the same as the archive file, but with no ".zip" at the end. For example, for the file "BG_20171002210239_00001.zip", make a folder named "BG_20171002210239_00001". path should be "&lt;paper_folder&gt;/&lt;archive_name_no_zip&gt;. unzip the archive into this folder: unzip &lt;path_to_zip&gt; -d &lt;archive_folder&gt; Uncompress files Back to Table of Contents See if the uncompressed paper folder exists. If not, set flag and create it. End of explanation """ # decompress the files my_paper.uncompress_paper_zip_files() """ Explanation: For each *.zip file in the paper's source folder: parse file name from path returned by glob. parse the part before ".zip" from the file name. This is referred to subsequently as the "archive identifier". check if folder named the same as the "archive identifier" is present. If no: create it. then, uncompress the archive into it. If yes: output a message. Don't want to uncompress if it was already uncompressed once. End of explanation """ %cd $uncompressed_paper_path %ls """ Explanation: Work with uncompressed files Back to Table of Contents Change working directories to the uncompressed paper path. End of explanation """ # loop over files in the current archive folder path. object_type_to_count_map = my_paper.process_archive_object_types( uncompressed_archive_path ) """ Explanation: parse and load XML files Back to Table of Contents Load one of the files into memory and see what we can do with it. Beautiful Soup? Looks like the root element is "Record", then the high-level type of the article is "ObjectType". ObjectType values: Advertisement ... Good options for XML parser: lxml.etree - https://stackoverflow.com/questions/12290091/reading-xml-file-and-fetching-its-attributes-value-in-python xmltodict - https://docs.python-guide.org/scenarios/xml/ beautifulsoup using lxml End of explanation """ xml_folder_list = glob.glob( "{}/*".format( uncompressed_paper_path ) ) print( "folder_list: {}".format( xml_folder_list ) ) # build map of all object types for a paper to the overall counts of each paper_object_type_to_count_map = my_paper.process_paper_object_types() """ Explanation: Processing 5752 files in /mnt/hgfs/projects/phd/proquest_hnp/uncompressed/BostonGlobe/BG_20171002210239_00001 ----&gt; XML file count: 5752 Counters: - Processed 5752 files - No Record: 0 - No ObjectType: 0 - No ObjectType value: 0 ObjectType values and occurrence counts: - A|d|v|e|r|t|i|s|e|m|e|n|t: 1902 - Article|Feature: 1792 - N|e|w|s: 53 - Commentary|Editorial: 36 - G|e|n|e|r|a|l| |I|n|f|o|r|m|a|t|i|o|n: 488 - S|t|o|c|k| |Q|u|o|t|e: 185 - Advertisement|Classified Advertisement: 413 - E|d|i|t|o|r|i|a|l| |C|a|r|t|o|o|n|/|C|o|m|i|c: 31 - Correspondence|Letter to the Editor: 119 - Front Matter|Table of Contents: 193 - O|b|i|t|u|a|r|y: 72 - F|r|o|n|t| |P|a|g|e|/|C|o|v|e|r| |S|t|o|r|y: 107 - I|m|a|g|e|/|P|h|o|t|o|g|r|a|p|h: 84 - Marriage Announcement|News: 6 - I|l|l|u|s|t|r|a|t|i|o|n: 91 - R|e|v|i|e|w: 133 - C|r|e|d|i|t|/|A|c|k|n|o|w|l|e|d|g|e|m|e|n|t: 30 - News|Legal Notice: 17 build list of all ObjectTypes Back to Table of Contents Loop over all folders in the paper path. For each folder, grab all files in the folder. For each file, parse XML, then get the ObjectType value and if it isn't already in map of obect types to counts, add it. Increment count. From command line, in the uncompressed BostonGlobe folder: find . -type f -iname "*.xml" | wc -l resulted in 11,374,500 articles. That is quite a few. End of explanation """ news_object_type_list = [] news_object_type_list.append( 'Article|Feature' ) news_object_type_list.append( 'Feature|Article' ) news_object_type_list.append( 'F|r|o|n|t| |P|a|g|e|/|C|o|v|e|r| |S|t|o|r|y' ) """ Explanation: Example output: XML file count: 5752 Counters: - Processed 5752 files - No Record: 0 - No ObjectType: 0 - No ObjectType value: 0 ObjectType values and occurrence counts: - A|d|v|e|r|t|i|s|e|m|e|n|t: 2114224 - Feature|Article: 5271887 - I|m|a|g|e|/|P|h|o|t|o|g|r|a|p|h: 249942 - O|b|i|t|u|a|r|y: 625143 - G|e|n|e|r|a|l| |I|n|f|o|r|m|a|t|i|o|n: 1083164 - S|t|o|c|k| |Q|u|o|t|e: 202776 - N|e|w|s: 140274 - I|l|l|u|s|t|r|a|t|i|o|n: 106925 - F|r|o|n|t| |P|a|g|e|/|C|o|v|e|r| |S|t|o|r|y: 386421 - E|d|i|t|o|r|i|a|l| |C|a|r|t|o|o|n|/|C|o|m|i|c: 78993 - Editorial|Commentary: 156342 - C|r|e|d|i|t|/|A|c|k|n|o|w|l|e|d|g|e|m|e|n|t: 68356 - Classified Advertisement|Advertisement: 291533 - R|e|v|i|e|w: 86889 - Table of Contents|Front Matter: 69798 - Letter to the Editor|Correspondence: 202071 - News|Legal Notice: 24053 - News|Marriage Announcement: 41314 - B|i|r|t|h| |N|o|t|i|c|e: 926 - News|Military/War News: 3 - U|n|d|e|f|i|n|e|d: 5 - Article|Feature: 137526 - Front Matter|Table of Contents: 11195 - Commentary|Editorial: 3386 - Marriage Announcement|News: 683 - Correspondence|Letter to the Editor: 7479 - Legal Notice|News: 1029 - Advertisement|Classified Advertisement: 12163 map files to types Back to Table of Contents Choose a directory, then loop over the files in the directory to build a map of types to lists of file names. End of explanation """ # get list of all object types master_object_type_list = my_paper.get_all_object_types() print( "Object Types: {}".format( master_object_type_list ) ) # directory to work in. uncompressed_archive_folder = "CSM_20170929191926_00001" uncompressed_archive_path = "{}/{}".format( uncompressed_paper_path, uncompressed_archive_folder ) print( 'Uncompressed archive folder: {}'.format( uncompressed_archive_path ) ) # build map of file types to lists of files of that type in specified folder. object_type_to_file_path_map = my_paper.map_archive_folder_files_to_types( uncompressed_archive_path ) # which types do we want to preview? #types_to_output = news_object_type_list types_to_output = [ "Advertisement|Classified Advertisement" ] types_to_output = [ "A|d|v|e|r|t|i|s|e|m|e|n|t" ] types_to_output = [ 'Advertisement|Classified Advertisement' ] types_to_output = [ 'Article|Feature' ] types_to_output = [ 'B|i|r|t|h| |N|o|t|i|c|e' ] types_to_output = [ 'Classified Advertisement|Advertisement' ] types_to_output = [ 'Commentary|Editorial' ] types_to_output = [ 'Correspondence|Letter to the Editor' ] types_to_output = [ 'C|r|e|d|i|t|/|A|c|k|n|o|w|l|e|d|g|e|m|e|n|t' ] types_to_output = [ 'E|d|i|t|o|r|i|a|l| |C|a|r|t|o|o|n|/|C|o|m|i|c' ] types_to_output = [ 'Editorial|Commentary' ] types_to_output = [ 'Feature|Article' ] types_to_output = [ 'Front Matter|Table of Contents' ] types_to_output = [ 'F|r|o|n|t| |P|a|g|e|/|C|o|v|e|r| |S|t|o|r|y' ] types_to_output = [ 'G|e|n|e|r|a|l| |I|n|f|o|r|m|a|t|i|o|n' ] types_to_output = [ 'I|l|l|u|s|t|r|a|t|i|o|n' ] types_to_output = [ 'I|m|a|g|e|/|P|h|o|t|o|g|r|a|p|h' ] types_to_output = [ 'Legal Notice|News' ] types_to_output = [ 'Letter to the Editor|Correspondence' ] types_to_output = [ 'Marriage Announcement|News' ] types_to_output = [ 'N|e|w|s' ] types_to_output = [ 'News|Legal Notice' ] types_to_output = [ 'News|Marriage Announcement' ] types_to_output = [ 'News|Military/War News' ] types_to_output = [ 'O|b|i|t|u|a|r|y' ] types_to_output = [ 'R|e|v|i|e|w' ] types_to_output = [ 'S|t|o|c|k| |Q|u|o|t|e' ] types_to_output = [ 'Table of Contents|Front Matter' ] types_to_output = [ 'Table Of Contents|Front Matter' ] types_to_output = [ 'U|n|d|e|f|i|n|e|d' ] # declare variables xml_file_path_list = None xml_file_path_count = None xml_file_path_example_list = None xml_file_path = None xml_file = None xml_dict = None xml_string = None # loop over types for object_type in types_to_output: # print type and count xml_file_path_list = object_type_to_file_path_map.get( object_type, [] ) xml_file_path_count = len( xml_file_path_list ) xml_file_path_example_list = xml_file_path_list[ : 10 ] print( "\n- {} - {} files:".format( object_type, xml_file_path_count ) ) for xml_file_path in xml_file_path_example_list: print( "----> {}".format( xml_file_path ) ) # try to parse the file with open( xml_file_path ) as xml_file: # parse XML xml_dict = xmltodict.parse( xml_file.read() ) #-- END with open( xml_file_path ) as xml_file: --# # pretty-print xml_string = xmltodict.unparse( xml_dict, pretty = True ) # output print( xml_string ) #-- END loop over example file paths. --# #-- END loop over object types. --# """ Explanation: explore all known object types Back to Table of Contents Look at all known object types to see which contain actual news content. End of explanation """ # directory to work in. uncompressed_archive_folder = "CSM_20170929191926_00001" uncompressed_archive_path = "{}/{}".format( uncompressed_paper_path, uncompressed_archive_folder ) print( 'Uncompressed archive folder: {}'.format( uncompressed_archive_path ) ) # build map of file types to lists of files of that type in specified folder. object_type_to_file_path_map = my_paper.map_archive_folder_files_to_types( uncompressed_archive_path ) # which types do we want to preview? types_to_output = news_object_type_list # declare variables xml_file_path_list = None xml_file_path_count = None xml_file_path_example_list = None xml_file_path = None xml_file = None xml_dict = None xml_string = None # loop over types for object_type in types_to_output: # print type and count xml_file_path_list = object_type_to_file_path_map.get( object_type, [] ) xml_file_path_count = len( xml_file_path_list ) xml_file_path_example_list = xml_file_path_list[ : 10 ] print( "\n- {} - {} files:".format( object_type, xml_file_path_count ) ) for xml_file_path in xml_file_path_example_list: print( "----> {}".format( xml_file_path ) ) # try to parse the file with open( xml_file_path ) as xml_file: # parse XML xml_dict = xmltodict.parse( xml_file.read() ) #-- END with open( xml_file_path ) as xml_file: --# # pretty-print xml_string = xmltodict.unparse( xml_dict, pretty = True ) # output print( xml_string ) #-- END loop over example file paths. --# #-- END loop over object types. --# """ Explanation: files in archive CSM_20170929191926_00001 - 1994 Back to Table of Contents Archive details: ID: 903 Newspaper: 3 - ChristianScienceMonitor - Christian Science Monitor, The archive_identifier: CSM_20170929191926_00001 min_date: 1994-01-03 max_date: 1994-12-30 path: /mnt/hgfs/projects/phd/proquest_hnp/uncompressed/ChristianScienceMonitor/CSM_20170929191926_00001 End of explanation """
mne-tools/mne-tools.github.io
dev/_downloads/c69e0120935518121b8298ecac72eed8/35_dipole_orientations.ipynb
bsd-3-clause
import mne import numpy as np from mne.datasets import sample from mne.minimum_norm import make_inverse_operator, apply_inverse data_path = sample.data_path() meg_path = data_path / 'MEG' / 'sample' evokeds = mne.read_evokeds(meg_path / 'sample_audvis-ave.fif') left_auditory = evokeds[0].apply_baseline() fwd = mne.read_forward_solution( meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif') mne.convert_forward_solution(fwd, surf_ori=True, copy=False) noise_cov = mne.read_cov(meg_path / 'sample_audvis-cov.fif') subject = 'sample' subjects_dir = data_path / 'subjects' trans_fname = meg_path / 'sample_audvis_raw-trans.fif' """ Explanation: The role of dipole orientations in distributed source localization When performing source localization in a distributed manner (i.e., using MNE/dSPM/sLORETA/eLORETA), the source space is defined as a grid of dipoles that spans a large portion of the cortex. These dipoles have both a position and an orientation. In this tutorial, we will look at the various options available to restrict the orientation of the dipoles and the impact on the resulting source estimate. See inverse_orientation_constraints for related information. Load data Load everything we need to perform source localization on the sample dataset. End of explanation """ lh = fwd['src'][0] # Visualize the left hemisphere verts = lh['rr'] # The vertices of the source space tris = lh['tris'] # Groups of three vertices that form triangles dip_pos = lh['rr'][lh['vertno']] # The position of the dipoles dip_ori = lh['nn'][lh['vertno']] dip_len = len(dip_pos) dip_times = [0] white = (1.0, 1.0, 1.0) # RGB values for a white color actual_amp = np.ones(dip_len) # misc amp to create Dipole instance actual_gof = np.ones(dip_len) # misc GOF to create Dipole instance dipoles = mne.Dipole(dip_times, dip_pos, actual_amp, dip_ori, actual_gof) trans = mne.read_trans(trans_fname) fig = mne.viz.create_3d_figure(size=(600, 400), bgcolor=white) coord_frame = 'mri' # Plot the cortex mne.viz.plot_alignment( subject=subject, subjects_dir=subjects_dir, trans=trans, surfaces='white', coord_frame=coord_frame, fig=fig) # Mark the position of the dipoles with small red dots mne.viz.plot_dipole_locations( dipoles=dipoles, trans=trans, mode='sphere', subject=subject, subjects_dir=subjects_dir, coord_frame=coord_frame, scale=7e-4, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=180, distance=0.25) """ Explanation: The source space Let's start by examining the source space as constructed by the :func:mne.setup_source_space function. Dipoles are placed along fixed intervals on the cortex, determined by the spacing parameter. The source space does not define the orientation for these dipoles. End of explanation """ fig = mne.viz.create_3d_figure(size=(600, 400)) # Plot the cortex mne.viz.plot_alignment( subject=subject, subjects_dir=subjects_dir, trans=trans, surfaces='white', coord_frame='head', fig=fig) # Show the dipoles as arrows pointing along the surface normal mne.viz.plot_dipole_locations( dipoles=dipoles, trans=trans, mode='arrow', subject=subject, subjects_dir=subjects_dir, coord_frame='head', scale=7e-4, fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=180, distance=0.1) """ Explanation: Fixed dipole orientations While the source space defines the position of the dipoles, the inverse operator defines the possible orientations of them. One of the options is to assign a fixed orientation. Since the neural currents from which MEG and EEG signals originate flows mostly perpendicular to the cortex :footcite:HamalainenEtAl1993, restricting the orientation of the dipoles accordingly places a useful restriction on the source estimate. By specifying fixed=True when calling :func:mne.minimum_norm.make_inverse_operator, the dipole orientations are fixed to be orthogonal to the surface of the cortex, pointing outwards. Let's visualize this: End of explanation """ # Compute the source estimate for the left auditory condition in the sample # dataset. inv = make_inverse_operator(left_auditory.info, fwd, noise_cov, fixed=True) stc = apply_inverse(left_auditory, inv, pick_ori=None) # Visualize it at the moment of peak activity. _, time_max = stc.get_peak(hemi='lh') brain_fixed = stc.plot(surface='white', subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400)) mne.viz.set_3d_view(figure=brain_fixed, focalpoint=(0., 0., 50)) """ Explanation: Restricting the dipole orientations in this manner leads to the following source estimate for the sample data: End of explanation """ fig = mne.viz.create_3d_figure(size=(600, 400)) # Plot the cortex mne.viz.plot_alignment( subject=subject, subjects_dir=subjects_dir, trans=trans, surfaces='white', coord_frame='head', fig=fig) # Show the three dipoles defined at each location in the source space mne.viz.plot_alignment( subject=subject, subjects_dir=subjects_dir, trans=trans, fwd=fwd, surfaces='white', coord_frame='head', fig=fig) mne.viz.set_3d_view(figure=fig, azimuth=180, distance=0.1) """ Explanation: The direction of the estimated current is now restricted to two directions: inward and outward. In the plot, blue areas indicate current flowing inwards and red areas indicate current flowing outwards. Given the curvature of the cortex, groups of dipoles tend to point in the same direction: the direction of the electromagnetic field picked up by the sensors. Loose dipole orientations Forcing the source dipoles to be strictly orthogonal to the cortex makes the source estimate sensitive to the spacing of the dipoles along the cortex, since the curvature of the cortex changes within each ~10 square mm patch. Furthermore, misalignment of the MEG/EEG and MRI coordinate frames is more critical when the source dipole orientations are strictly constrained :footcite:LinEtAl2006. To lift the restriction on the orientation of the dipoles, the inverse operator has the ability to place not one, but three dipoles at each location defined by the source space. These three dipoles are placed orthogonally to form a Cartesian coordinate system. Let's visualize this: End of explanation """ # Make an inverse operator with loose dipole orientations inv = make_inverse_operator(left_auditory.info, fwd, noise_cov, fixed=False, loose=1.0) # Compute the source estimate, indicate that we want a vector solution stc = apply_inverse(left_auditory, inv, pick_ori='vector') # Visualize it at the moment of peak activity. _, time_max = stc.magnitude().get_peak(hemi='lh') brain_mag = stc.plot(subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400), overlay_alpha=0) mne.viz.set_3d_view(figure=brain_mag, focalpoint=(0., 0., 50)) """ Explanation: When computing the source estimate, the activity at each of the three dipoles is collapsed into the XYZ components of a single vector, which leads to the following source estimate for the sample data: End of explanation """ # Set loose to 0.2, the default value inv = make_inverse_operator(left_auditory.info, fwd, noise_cov, fixed=False, loose=0.2) stc = apply_inverse(left_auditory, inv, pick_ori='vector') # Visualize it at the moment of peak activity. _, time_max = stc.magnitude().get_peak(hemi='lh') brain_loose = stc.plot(subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400), overlay_alpha=0) mne.viz.set_3d_view(figure=brain_loose, focalpoint=(0., 0., 50)) """ Explanation: Limiting orientations, but not fixing them Often, the best results will be obtained by allowing the dipoles to have somewhat free orientation, but not stray too far from a orientation that is perpendicular to the cortex. The loose parameter of the :func:mne.minimum_norm.make_inverse_operator allows you to specify a value between 0 (fixed) and 1 (unrestricted or "free") to indicate the amount the orientation is allowed to deviate from the surface normal. End of explanation """ # Only retain vector magnitudes stc = apply_inverse(left_auditory, inv, pick_ori=None) # Visualize it at the moment of peak activity _, time_max = stc.get_peak(hemi='lh') brain = stc.plot(surface='white', subjects_dir=subjects_dir, initial_time=time_max, time_unit='s', size=(600, 400)) mne.viz.set_3d_view(figure=brain, focalpoint=(0., 0., 50)) """ Explanation: Discarding dipole orientation information Often, further analysis of the data does not need information about the orientation of the dipoles, but rather their magnitudes. The pick_ori parameter of the :func:mne.minimum_norm.apply_inverse function allows you to specify whether to return the full vector solution ('vector') or rather the magnitude of the vectors (None, the default) or only the activity in the direction perpendicular to the cortex ('normal'). End of explanation """
oasis-open/cti-python-stix2
docs/guide/creating.ipynb
bsd-3-clause
from stix2 import Indicator indicator = Indicator(name="File hash for malware variant", pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']", pattern_type="stix") print(indicator.serialize(pretty=True)) """ Explanation: Creating STIX Content Creating STIX Domain Objects To create a STIX object, provide keyword arguments to the type's constructor: End of explanation """ indicator2 = Indicator(type='indicator', pattern_type="stix", pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") """ Explanation: Certain required attributes of all objects will be set automatically if not provided as keyword arguments: If not provided, type will be set automatically to the correct type. You can also provide the type explicitly, but this is not necessary: End of explanation """ indicator3 = Indicator(type='xxx', pattern_type="stix", pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") """ Explanation: Passing a value for type that does not match the class being constructed will cause an error: End of explanation """ indicator4 = Indicator(id="campaign--63ce9068-b5ab-47fa-a2cf-a602ea01f21a", pattern_type="stix", pattern="[file:hashes.md5 = 'd41d8cd98f00b204e9800998ecf8427e']") """ Explanation: If not provided, id will be generated randomly. If you provide an id argument, it must begin with the correct prefix: End of explanation """ indicator = Indicator() """ Explanation: For indicators, pattern and pattern_type are required and cannot be set automatically. Trying to create an indicator that is missing one of these properties will result in an error: End of explanation """ indicator['name'] """ Explanation: However, the required valid_from attribute on Indicators will be set to the current time if not provided as a keyword argument. Once created, the object acts like a frozen dictionary. Properties can be accessed using the standard Python dictionary syntax: End of explanation """ indicator.name """ Explanation: Or access properties using the standard Python attribute syntax: End of explanation """ indicator['name'] = "This is a revised name" indicator.name = "This is a revised name" """ Explanation: <div class="alert alert-warning"> **Warning** Note that there are several attributes on these objects used for method names. Accessing those will return a bound method, not the attribute value. </div> Attempting to modify any attributes will raise an error: End of explanation """ from stix2 import Malware malware = Malware(name="Poison Ivy", is_family=False) print(malware.serialize(pretty=True)) """ Explanation: To update the properties of an object, see the Versioning section. Creating a Malware object follows the same pattern: End of explanation """ from stix2 import Relationship relationship = Relationship(relationship_type='indicates', source_ref=indicator.id, target_ref=malware.id) print(relationship.serialize(pretty=True)) """ Explanation: As with indicators, the type, id, created, and modified properties will be set automatically if not provided. For Malware objects, the is_family property must be provided. You can see the full list of SDO classes here. Creating Relationships STIX 2 Relationships are separate objects, not properties of the object on either side of the relationship. They are constructed similarly to other STIX objects. The type, id, created, and modified properties are added automatically if not provided. Callers must provide the relationship_type, source_ref, and target_ref properties. End of explanation """ relationship2 = Relationship(indicator, 'indicates', malware) print(relationship2.serialize(pretty=True)) """ Explanation: The source_ref and target_ref properties can be either the ID's of other STIX objects, or the STIX objects themselves. For readability, Relationship objects can also be constructed with the source_ref, relationship_type, and target_ref as positional (non-keyword) arguments: End of explanation """ from stix2 import Bundle bundle = Bundle(indicator, malware, relationship) print(bundle.serialize(pretty=True)) """ Explanation: Creating Bundles STIX Bundles can be created by passing objects as arguments to the Bundle constructor. All required properties (type, id, and spec_version) will be set automatically if not provided, or can be provided as keyword arguments: End of explanation """ from stix2 import IPv4Address ip4 = IPv4Address( value="177.60.40.7", resolves_to_refs=["mac-addr--43f380fd-37c6-476d-8643-60849bf9240e"] ) print(ip4.serialize(pretty=True)) """ Explanation: Creating Cyber Observable References Cyber Observable Objects have properties that can reference other Cyber Observable Objects. In order to create those references, either supply the ID string of the object being referenced, or pass in the object itself. For example, the IPv4Address object has a resolves_to_refs property which must hold a list of references to MACAddress objects. We could specify the id string: End of explanation """ from stix2 import MACAddress mac_addr_a = MACAddress(value="a1:b2:c3:d4:e5:f6") mac_addr_b = MACAddress(value="a7:b8:c9:d0:e1:f2") ip4_valid_refs = IPv4Address( value="177.60.40.7", resolves_to_refs=[mac_addr_a.id, mac_addr_b.id] ) print(ip4_valid_refs.serialize(pretty=True)) """ Explanation: Or we could create the MACAddress object(s) beforehand and then pass them in: End of explanation """
zhiyzuo/python-scopus
Quick-Start.ipynb
mit
import pyscopus pyscopus.__version__ from pyscopus import Scopus key = 'YOUR_OWN_API' scopus = Scopus(key) """ Explanation: PyScopus: Quick Start PyScopus is a Python wrapper of Elsevier Scopus API. More details of this Python package can be found here. <hr> Import Scopus class and initialize with your own API Key End of explanation """ search_df = scopus.search("KEY(topic modeling)", count=30) print(search_df.head(10)) """ Explanation: <hr> General Search End of explanation """ full_text_link_arr = search_df.full_text.values full_text_link_arr """ Explanation: Full text link End of explanation """ full_text = scopus.retrieve_full_text(full_text_link_arr[2]) start = 39500 full_text[start:start+10000] """ Explanation: For those with full text links, you are able to get all the text by calling scopus.retrieve_full_text() End of explanation """ author_result_df = scopus.search_author("AUTHLASTNAME(Zuo) and AUTHFIRST(Zhiya) and AFFIL(Iowa)") print(author_result_df) """ Explanation: <hr> Search for a specific author End of explanation """ zuo_info_dict = scopus.retrieve_author('57189222659') zuo_info_dict.keys() print('\n'.join(zuo_info_dict['affiliation-history'].name.values)) """ Explanation: Then we can retrieve more detailed info about the author we are looking for using his/her author_id: End of explanation """ zuo_pub_df = scopus.search_author_publication('57189222659') zuo_pub_df[['title', 'cover_date', 'publication_name', 'scopus_id']].sort_values('cover_date').reset_index(drop=True) """ Explanation: Search for his publications explicitly End of explanation """ pub_info = scopus.retrieve_abstract('85049552190', './') pub_info cat 85049552190.json """ Explanation: Abstract retrieval If the 2nd argument download_path is not given, the JSON response would not be saved End of explanation """ pub_citations_df = scopus.retrieve_citation(scopus_id_array=['85049552190', '85004154180'], year_range=[2016, 2018]) print(pub_citations_df) """ Explanation: <hr> Note that Searching for articles in specific journals (venues) is not supported anymore since this can be easily done by general search. <hr> Citation count retrieval Note that the use of citation overview API needs to be approved by Elsevier. End of explanation """ meta_df, citescore_df, sj_rank_df = scopus.search_serial('informetrics') meta_df """ Explanation: Serial Title Metadata If interested in meta information and metrics at publication venue level (e.g., journal/conference), we can now use search_serial or retrieve_serial Search by title End of explanation """ citescore_df """ Explanation: See more about CiteScore End of explanation """ sj_rank_df.head(2) """ Explanation: The last dataframe below lists the rank/percentile of this serial in each subject area it is assigned to across years - More about subject area code in Scopus link End of explanation """ meta_df, citescore_df, sj_rank_df = scopus.retrieve_serial('2330-1643') meta_df citescore_df sj_rank_df.head(2) """ Explanation: Retrieve by ISSN Given a ISSN, we can use retrieve_serial: End of explanation """ uiowa = scopus.retrieve_affiliation('60024324') uiowa """ Explanation: Affiliation End of explanation """
greenelab/GCB535
30_ML-III/ML_3_Inclass_Homework.ipynb
bsd-3-clause
# numpy provides python tools to easily load comma separated files. import numpy as np # use numpy to load disease #1 data d1 = np.loadtxt(open("../30_Data_ML-III/D1.csv", "rb"), delimiter=",") # features are all rows for columns before 200 # The canonical way to name this is that X is our matrix of # examples by features. X1 = d1[:,:200] # labels are in all rows at the 200th column # The canonical way to name this is that y is our vector of # labels. y1 = d1[:,200] # use numpy to load disease #2 data d2 = np.loadtxt(open("../30_Data_ML-III/D2.csv", "rb"), delimiter=",") # features are all rows for columns before 200 X2 = d2[:,:200] # labels are in all rows at the 200th column y2 = d2[:,200] """ Explanation: Discussion (20 mins) Discuss your thoughts about the pre-lab reading material with your table. As a group, come up with specific concerns, if any, that you have with the approaches used or the criticisms about the approaches. Game time! (40 mins) We have machine learning at our fingertips, and we've seen some of the dangers. Now we're going to spend this week on a game. In this game, we have two goals: 1) We want to build the best predictor that we can, but 2)at all times we want to have an accurate idea of how well the predictor works. For this game, we've managed to get our hands on some data about two diseases (D1 and D2). Each of these datasets has features in columns and examples in rows. Each feature represents a clinical measurement, while each row represents a person. We want to be able to predict whether or not a person has a disease (the last column). We'll supply you with four datasets for each disease throughout the week. For the first day, we've given you two of them. We also provide example code to read the data. From there, the path that you take is up to you. We do not know the best predictor or even what the maximum achievable accuracy for these data! This is a chance to experiment and find out what best captures disease status. We can use anything in the scikit-learn toolkit. It's a powerful set of tools. We use it regularly in our own lab, so this exercise is hands on with the real thing. First, let's get loading both datasets out of the way: End of explanation """ # First we need to import svms from sklearn from sklearn.svm import SVC """ Explanation: Implement an SVM! We've already learned about support vector machines. Now we're going to implement one. We need to find out how to use this thing! We ran some code in the previous notebook that did this for us, but now we need to make things work on our own. Googling for "svm sklearn classifier" gets us to this page. This page has documentation for the package. Partway down the page, we see: "SVC, NuSVC and LinearSVC are classes capable of performing multi-class classification on a dataset." As we keep reading, we see that SVC provides an implementation. Let's try that! We get to the documentation for SVC and it says many things. At the top, there's a box that says: class sklearn.svm.SVC(C=1.0, kernel='rbf', degree=3, gamma='auto', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape=None, random_state=None) How should we interpret all of this? The first part tells us where a function lives, so the SVC function lives in sklearn.svm. It seems we're going to need to import it from there. End of explanation """ # Get an SVC with default parameters as our algorithm classifier = SVC() # Fit the classifier to our datasets classifier.fit(X1, y1) # Apply the classifier back to our data and get an accuracy measure train_score = classifier.score(X1, y1) # Print the accuracy print(train_score) """ Explanation: The parts inside the parentheses give us the ability to set or change parameters. Anything with an equals sign after it has a default parameter set. In this case, the default C is set to 1.0. There's also a box that gives some description of what each parameter is (only a few of them may make sense to us right now). If we scroll to the bottom of the box, we'll get some examples provided by the helpful sklearn team, though they don't know about the names of our datasets. They'll often use the standard name X for features and y for labels. Let's go ahead and run an SVM using all the defaults on our data End of explanation """ # Get an SVC with a high C classifier = SVC(C = 100) # Fit the classifier to our datasets classifier.fit(X1, y1) # Apply the classifier back to our data and get an accuracy measure train_score = classifier.score(X1, y1) # Print the accuracy print(train_score) import sklearn """ Explanation: Ouch! Only about 50% accuracy. That's painful! We learned that we could modify C to make the algorithm try to fit the data we show it better. Let's ramp up C and see what happens! End of explanation """ # Import the function to split our data: from sklearn.cross_validation import train_test_split # Split things into training and testing - let's have 30% of our data end up as testing X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=.33) """ Explanation: Nice! 100% accuracy. This seems like we're on the right track. What we'd really like to do is figure out how we do on held out testing data though. Fortunately, sklearn provides a helper function to make holding out some of the data easy. This function is called train_test_split and we can find its documentation. If we weren't sure where to go, the sklearn documentation has a full section on cross validation. Note: Software changes over time. The current release of sklearn on CoCalc is 0.17. There's a new version, 0.18, also available. There are also minor version numbers (e.g. the final 1 in 0.17.1). These don't change functionality. Between the two major versions the location of the train_test_split function changed. If you ever want to know what version of sklearn you're working with, you can create a code block and run this code: import sklearn print(sklearn.__version__) Make sure that when you look at the documentation, you choose the version that matches what you're working with. Let's go ahead and split our data into training and testing portions. End of explanation """ # Get an SVC again using C = 100 classifier = SVC(C = 100) # Fit the classifier to the training data: classifier.fit(X1_train, y1_train) # Now we're going to apply it to the training labels first: train_score = classifier.score(X1_train, y1_train) # We're also going to applying it to the testing labels: test_score = classifier.score(X1_test, y1_test) print("Training Accuracy: " + str(train_score)) print("Testing Accuracy: " + str(test_score)) """ Explanation: Now let's go ahead and train our classifier on the training data and test it on some held out test data End of explanation """ # First, we need to import the classifier from sklearn.tree import DecisionTreeClassifier # Now we're going to get a decision tree classifier with the default parameters classifier = DecisionTreeClassifier() # The 'fit' syntax is the same classifier.fit(X1_train, y1_train) # As is the 'score' syntax train_score = classifier.score(X1_train, y1_train) test_score = classifier.score(X1_test, y1_test) print("Training Accuracy: " + str(train_score)) print("Testing Accuracy: " + str(test_score)) """ Explanation: Nice! Now we can see that while our training accuracy is very high, our testing accuracy is much lower. We could say that our model has "overfit" to the data. We learned about overfitting before. You'll get a chance to play with this SVM a bit more below. Before we move to that though, we want to show you how easy it is to use a different classifier. You might imagine that a classifier could be composed of a cascading series of rules. If this is true, then consider that. Otherwise, consider this other thing. This type of algorithm is called a decision tree, and we're going to rain one now. sklearn has a handy decision tree classifier that we can use. By using the SVM classifier, we've already learned most of what we need to know to use it. End of explanation """ # Now we're going to get a decision tree classifier with selected parameters classifier = DecisionTreeClassifier(max_features=8, max_depth=3) # The 'fit' syntax is the same classifier.fit(X1_train, y1_train) # As is the 'score' syntax train_score = classifier.score(X1_train, y1_train) test_score = classifier.score(X1_test, y1_test) print("Training Accuracy: " + str(train_score)) print("Testing Accuracy: " + str(test_score)) """ Explanation: Oof! That's pretty overfit! We're perfect on the training data but basically flipping a coin on the held out data. A DecisionTreeClassifier has two parameters max_features and max_depth that can really help us prevent overfitting. Let's train a very small tree (no more than 8 features) that's very short (no more than 3 deep). End of explanation """
dkirkby/quantum-demo
jupyter/WavePacket.ipynb
mit
%pylab inline import matplotlib.animation from IPython.display import HTML """ Explanation: Wave Packets End of explanation """ def solve(k0=10., sigmax=0.25, V0=0., mass=1., tmax=0.25, nwave=15, nx=500, nt=10): """ Solve for the evolution of a 1D Gaussian wave packet. Parameters ---------- k0 : float Central wavenumber, which determines the group velocity of the wave packet and can be negative, zero, or positive. sigmax : float Initial wave packet sigma. Smaller values lead to faster spreading. V0 : float Constant potential in units of hbar. Changes the (unphysical) phase velocities but not the group velocity. mass : float Particle mass in units of hbar. tmax : float Amount of time to simulate on a uniform grid in arbitrary units. nwave : int Wave packet is approximated by 2*nwave+1 plane waves centered on k0. nx : int Number of grid points to use in x. nt : int Number of grid points to use in t. """ t = np.linspace(0, tmax, nt).reshape(-1, 1) # Calculate the group velocity at k0. vgroup0 = k0 / mass # Calculate the distance traveled by the wave packet. dist = np.abs(vgroup0) * tmax # Calculate the spreading of the wave packet. spread = np.sqrt((sigmax ** 4 + (t / mass) ** 2) / sigmax ** 2) # Calculate an x-range that keeps the packet visible during tmax. nsigmas = 1.5 tails = nsigmas * (spread[0] + spread[-1]) xrange = max(tails + dist, 2 * tails) x0 = nsigmas * spread[0] + 0.5 * (xrange - tails) - 0.5 * vgroup0 * tmax - 0.5 * xrange x = np.linspace(-0.5 * xrange, 0.5 * xrange, nx) - x0 # Build grid of k values to use, centered on k0. nsigmas = 2.0 sigmak = 1. / sigmax k = k0 + sigmak * np.linspace(-nsigmas, +nsigmas, 2 * nwave + 1).reshape(-1, 1, 1) # Calculate coefficients c(k). ck = np.exp(-0.5 * (k - k0) ** 2 * sigmax ** 2) # Calculate omega(k) omega = k ** 2 / (2 * mass) + V0 # Calculate the (un-normalized) evolution of each wavenumber. psi = ck * np.cos(k * x - omega * t) # Calculate the (x,y) coordinates of a tracer for each wavenumber. xtrace = np.zeros((nt, 2 * nwave + 1)) nz = k != 0 xtrace[:, nz.ravel()] = ((k[nz] / (2 * mass) + V0 / k[nz]) * t) ytrace = ck.reshape(-1) # Calculate the motion of the center of the wave packet. xcenter = vgroup0 * t return x, psi, xtrace, ytrace, xcenter """ Explanation: A particle with total energy $E$ in a region of constant potential $V_0$ has a wave number $$ k = \pm \frac{2m}{\hbar^2}(E - V_0) $$ and dispersion relation $$ \omega(k) = \frac{E(k)}{\hbar} = \frac{\hbar k^2}{2m} + \frac{V_0}{\hbar} $$ leading to phase and group velocities $$ v_p(k) = \frac{\omega(k)}{k} = \frac{\hbar k}{2 m} + \frac{V_0}{\hbar k} \quad, \quad v_g(k) = \frac{d\omega}{dk}(k) = \frac{\hbar k}{m} \; . $$ Consider an initial state at $t=0$ that is a normalized Gaussian wavepacket $$ \Psi(x,0) = \pi^{-1/4} \sigma_x^{-1/2} e^{i k_0 x}\, \exp\left(-\frac{1}{2} \frac{x^2}{\sigma_x^2}\right) $$ with a central group velocity $v_g = \hbar k_0 / m$. We can expand this state's time evolution in plane waves as $$ \Psi(x,t) = \frac{1}{\sqrt{2\pi}} \int_{-\infty}^{+\infty} c(k) \exp\left[ i (k x - \omega(k) t)\right]\, dk $$ with coefficients $$ c(k) = \frac{1}{\sqrt{2\pi}}\,\int_{-\infty}^{+\infty} \Psi(x,0)\, e^{-i k x} dx = \pi^{-1/4} \sigma_x^{1/2}\, \exp\left( -\frac{1}{2} (k - k_0)^2 \sigma_x^2\right) \; . $$ Approximate the integral over $k$ with a discrete sum of values $k_i$ centered on $k_0$ then the (un-normalized) real part of the wave function is $$ \text{Re}\Psi(x,t) \simeq \sum_{i=-N}^{+N} c(k_i) \cos\left[ k x_i - \left(\frac{\hbar k_i^2}{2m} + \frac{V_0}{\hbar}\right) t \right] \; . $$ End of explanation """ def animate(k0=10., sigmax=0.25, V0=0., mass=1., nt=30, save=None, height=480, width=720): x, psi, xt, yt, xc = solve(k0, sigmax, V0, mass, nt=nt) nw, nt, nx = psi.shape nwave = (nw - 1) // 2 psi_sum = np.sum(psi, axis=0) ymax = 1.02 * np.max(np.abs(psi_sum)) dy = 0.95 * ymax * np.arange(-nwave, nwave + 1) / nwave assert len(dy) == nw artists = [] dpi = 100. dot = 0.006 * height figure = plt.figure(figsize=(width / dpi, height / dpi), dpi=dpi, frameon=False) ax = figure.add_axes([0, 0, 1, 1]) plt.axis('off') for i in range(2 * nwave + 1): artists += ax.plot(x, psi[i, 0] + dy[i], lw=2 * dot, c='b', alpha=0.2) artists.append(ax.axvline(xc[0], c='r', ls='-', lw=dot, alpha=0.4)) artists += ax.plot(xt[0], yt + dy, 'b.', ms=2.5 * dot, alpha=0.4, lw=0) artists += ax.plot(x, psi_sum[0], 'r-', lw=2.5 * dot, alpha=0.5) ax.set_xlim(x[0], x[-1]) ax.set_ylim(-ymax, +ymax) def init(): return artists def update(j): for i in range(2 * nwave + 1): artists[i].set_ydata(psi[i, j] + dy[i]) artists[-3].set_xdata([xc[j], xc[j]]) artists[-2].set_xdata(xt[j]) artists[-1].set_ydata(psi_sum[j]) return artists animation = matplotlib.animation.FuncAnimation( figure, update, init_func=init, frames=nt, repeat=True, blit=True) if save: meta = dict( title='Gaussian quantum wavepacket superposition in 1D', artist='David Kirkby <dkirkby@uci.edu>', comment='https://dkirkby.github.io/quantum-demo/', copyright='Copyright (c) 2018 David Kirkby') engine = 'imagemagick' if save.endswith('.gif') else 'ffmpeg' writer = matplotlib.animation.writers[engine](fps=30, metadata=meta) animation.save(save, writer=writer) return animation """ Explanation: Build an animation using the solver above. Each frame shows: - The real part of the component plane waves in blue, with increasing $k$ (decreasing $\lambda$) moving up the plot. Each component is vertically offset and has an amplitude of $c(k)$. - The sum of each plane is shown in red and represents the real part of the wave packet wave function. - Blue dots trace the motion of each plane from the initial wave packet center, each traveling at their phase velocity $v_p(k)$. - A red vertical line moves with the central group velocity $v_g(k_0)$. The main points to note are: - In the intial frame, the blue plane waves all interfere constructively at the center of the wave packet, but become progressively more incoherent moving away from the peak. - Each blue plane wave propagates at a different phase velocity $v_p(k)$, causing the blue tracer dots to separate horizontally over time. - Changes in the constant potential $V_0$ lead to different phase velocities but an identical combined red wave function and group velocity. - The center of the wave packet travels at the central group velocity $v_g(k_0)$ indicated by the vertical red line. - The red wave packet spreads as it propagates. End of explanation """ animate(V0=-50, nt=300, save='wavepacket0.mp4'); animate(V0=0, nt=300, height=480, width=720, save='wavepacket1.mp4'); animate(V0=50, nt=300, height=480, width=720, save='wavepacket2.mp4'); """ Explanation: Simulate with different values of the constant potential: $$ \begin{aligned} V_0 &= -50 \quad &\Rightarrow \quad v_p(k_0) &= 0 \ V_0 &= 0 \quad &\Rightarrow \quad v_p(k_0) &= v_g(k_0) / 2 \ V_0 &= +50 \quad &\Rightarrow \quad v_p(k_0) &= v_g(k_0) \ \end{aligned} $$ End of explanation """ #HTML(animate().to_html5_video()) """ Explanation: Uncomment and run the line below to display an animation inline: End of explanation """ animate(V0=0, nt=100, height=150, width=200, save='wavepacket1.gif'); """ Explanation: Convert video to the open-source Theora format using, e.g. ffmpeg -i wavepacket0.mp4 -codec:v libtheora -qscale:v 7 wavepacket0.ogv Produce a smaller GIF animation for wikimedia. Note that this file is slightly larger (~1Mb) than the MP4 files, despite the lower quality. End of explanation """
adukic/nd101
tv-script-generation/dlnd_tv_script_generation.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper data_dir = './data/simpsons/moes_tavern_lines.txt' text = helper.load_data(data_dir) # Ignore notice, since we don't use it for analysing the data text = text[81:] """ Explanation: TV Script Generation In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern. Get the Data The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc.. End of explanation """ view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) scenes = text.split('\n\n') print('Number of scenes: {}'.format(len(scenes))) sentence_count_scene = [scene.count('\n') for scene in scenes] print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene))) sentences = [sentence for scene in scenes for sentence in scene.split('\n')] print('Number of lines: {}'.format(len(sentences))) word_count_sentence = [len(sentence.split()) for sentence in sentences] print('Average number of words in each line: {}'.format(np.average(word_count_sentence))) print() print('The sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ import numpy as np import problem_unittests as tests def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ vocab = set(text) vocab_to_int = {c: i for i, c in enumerate(vocab)} int_to_vocab = dict(enumerate(vocab)) return vocab_to_int, int_to_vocab """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) """ Explanation: Implement Preprocessing Functions The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below: - Lookup Table - Tokenize Punctuation Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call vocab_to_int - Dictionary to go from the id to word, we'll call int_to_vocab Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab) End of explanation """ def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenize dictionary where the key is the punctuation and the value is the token """ punctuation_={ ".": "||period||", ",": "||comma||", '"': "||quotation_mark||", ";": "||semicolon||", "!": "||exclamation_mark||", "?": "||question_mark||", "(": "||left_parentheses", ")": "||right_parentheses", "--": "||dash||", "\n": "||return||" } return punctuation_ """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) """ Explanation: Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!". Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( . ) - Comma ( , ) - Quotation Mark ( " ) - Semicolon ( ; ) - Exclamation mark ( ! ) - Question mark ( ? ) - Left Parentheses ( ( ) - Right Parentheses ( ) ) - Dash ( -- ) - Return ( \n ) This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||". End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import numpy as np import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) """ Explanation: Build the Neural Network You'll build the components necessary to build a RNN by implementing the following functions below: - get_inputs - get_init_cell - get_embed - build_rnn - build_nn - get_batches Check the Version of TensorFlow and Access to GPU End of explanation """ def get_inputs(): """ Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate) """ input_ = tf.placeholder(tf.int32, [None, None], name="input") targets = tf.placeholder(tf.int32, [None, None], name="targets") learning_rate = tf.placeholder(tf.float32, None, name="lr") return input_, targets, learning_rate """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_inputs(get_inputs) """ Explanation: Input Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Input text placeholder named "input" using the TF Placeholder name parameter. - Targets placeholder - Learning Rate placeholder Return the placeholders in the following the tuple (Input, Targets, LearingRate) End of explanation """ n_rnn_layers = 1 def get_init_cell(batch_size, rnn_size): """ Create an RNN Cell and initialize it. :param batch_size: Size of batches :param rnn_size: Size of RNNs :return: Tuple (cell, initialize state) """ lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5) cell = tf.contrib.rnn.MultiRNNCell([drop] * n_rnn_layers) initial_state = tf.identity(cell.zero_state(batch_size, tf.float32), name="initial_state") return cell, initial_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_init_cell(get_init_cell) """ Explanation: Build RNN Cell and Initialize Stack one or more BasicLSTMCells in a MultiRNNCell. - The Rnn size should be set using rnn_size - Initalize Cell State using the MultiRNNCell's zero_state() function - Apply the name "initial_state" to the initial state using tf.identity() Return the cell and initial state in the following tuple (Cell, InitialState) End of explanation """ def get_embed(input_data, vocab_size, embed_dim): """ Create embedding for <input_data>. :param input_data: TF placeholder for text input. :param vocab_size: Number of words in vocabulary. :param embed_dim: Number of embedding dimensions :return: Embedded input. """ embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1), dtype=tf.float32) embedding = tf.nn.embedding_lookup(embedding, input_data) return embedding """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_embed(get_embed) """ Explanation: Word Embedding Apply embedding to input_data using TensorFlow. Return the embedded sequence. End of explanation """ def build_rnn(cell, inputs): """ Create a RNN using a RNN Cell :param cell: RNN Cell :param inputs: Input text data :return: Tuple (Outputs, Final State) """ outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) return outputs, tf.identity(final_state, name="final_state") """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_rnn(build_rnn) """ Explanation: Build RNN You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN. - Build the RNN using the tf.nn.dynamic_rnn() - Apply the name "final_state" to the final state using tf.identity() Return the outputs and final_state state in the following tuple (Outputs, FinalState) End of explanation """ def build_nn(cell, rnn_size, input_data, vocab_size): """ Build part of the neural network :param cell: RNN cell :param rnn_size: Size of rnns :param input_data: Input data :param vocab_size: Vocabulary size :return: Tuple (Logits, FinalState) """ embedding = get_embed(input_data, vocab_size, rnn_size) outputs, final_state = build_rnn(cell, embedding) logits = tf.contrib.layers.fully_connected(outputs, num_outputs=vocab_size, activation_fn=None, weights_initializer = tf.truncated_normal_initializer(0.1), biases_initializer=tf.zeros_initializer()) return logits, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_nn(build_nn) """ Explanation: Build the Neural Network Apply the functions you implemented above to: - Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function. - Build RNN using cell and your build_rnn(cell, inputs) function. - Apply a fully connected layer with a linear activation and vocab_size as the number of outputs. Return the logits and final state in the following tuple (Logits, FinalState) End of explanation """ def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ n_batches = int(len(int_text) / (batch_size * seq_length)) xdata = np.array(int_text[: n_batches * batch_size * seq_length]) ydata = np.array(int_text[1: n_batches * batch_size * seq_length + 1]) x_batches = np.split(xdata.reshape(batch_size, -1), n_batches, 1) y_batches = np.split(ydata.reshape(batch_size, -1), n_batches, 1) return np.asarray(list(zip(x_batches, y_batches))) def get_batches2(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ n_batches = len(int_text) // (batch_size * seq_length) int_text = int_text[:n_batches * (batch_size * seq_length)] batches = [] for i in range(0, len(int_text), (batch_size * seq_length)): x_batches = [] y_batches = [] x = int_text[i:i + (batch_size * seq_length)] y = int_text[i + 1:i + (batch_size * seq_length) + 1] for ii in range(batch_size): x_batches.append(x[ii:ii+seq_length]) y_batches.append(y[ii:ii+seq_length]) batches.append([x_batches, y_batches]) return np.array(batches) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_batches(get_batches) """ Explanation: Batches Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements: - The first element is a single batch of input with the shape [batch size, sequence length] - The second element is a single batch of targets with the shape [batch size, sequence length] If you can't fill the last batch with enough data, drop the last batch. For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) would return a Numpy array of the following: ``` [ # First Batch [ # Batch of Input [[ 1 2 3], [ 7 8 9]], # Batch of targets [[ 2 3 4], [ 8 9 10]] ], # Second Batch [ # Batch of Input [[ 4 5 6], [10 11 12]], # Batch of targets [[ 5 6 7], [11 12 13]] ] ] ``` End of explanation """ # Number of Epochs num_epochs = 100 # Batch Size batch_size = 256 # RNN Size rnn_size = 512 # Sequence Length seq_length = 50 # Learning Rate learning_rate = 0.01 # Show stats for every n number of batches show_every_n_batches = 10 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ save_dir = './save' """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set num_epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set seq_length to the length of sequence. Set learning_rate to the learning rate. Set show_every_n_batches to the number of batches the neural network should print progress. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from tensorflow.contrib import seq2seq train_graph = tf.Graph() with train_graph.as_default(): vocab_size = len(int_to_vocab) input_text, targets, lr = get_inputs() input_data_shape = tf.shape(input_text) cell, initial_state = get_init_cell(input_data_shape[0], rnn_size) logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function cost = seq2seq.sequence_loss( logits, targets, tf.ones([input_data_shape[0], input_data_shape[1]])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ batches = get_batches(int_text, batch_size, seq_length) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(num_epochs): state = sess.run(initial_state, {input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { input_text: x, targets: y, initial_state: state, lr: learning_rate} train_loss, state, _ = sess.run([cost, final_state, train_op], feed) # Show every <show_every_n_batches> batches if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0: print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( epoch_i, batch_i, len(batches), train_loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_dir) print('Model Trained and Saved') """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params((seq_length, save_dir)) """ Explanation: Save Parameters Save seq_length and save_dir for generating a new TV script. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() seq_length, load_dir = helper.load_params() """ Explanation: Checkpoint End of explanation """ def get_tensors(loaded_graph): """ Get input, initial state, final state, and probabilities tensor from <loaded_graph> :param loaded_graph: TensorFlow graph loaded from file :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) """ input_ = loaded_graph.get_tensor_by_name("input:0") initial_state_ = loaded_graph.get_tensor_by_name("initial_state:0") final_state_ = loaded_graph.get_tensor_by_name("final_state:0") probs_ = loaded_graph.get_tensor_by_name("probs:0") return input_, initial_state_, final_state_, probs_ """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_tensors(get_tensors) """ Explanation: Implement Generate Functions Get Tensors Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names: - "input:0" - "initial_state:0" - "final_state:0" - "probs:0" Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) End of explanation """ def pick_word(probabilities, int_to_vocab): """ Pick the next word in the generated text :param probabilities: Probabilites of the next word :param int_to_vocab: Dictionary of word ids as the keys and words as the values :return: String of the predicted word """ return int_to_vocab[np.argmax(probabilities)] """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_pick_word(pick_word) """ Explanation: Choose Word Implement the pick_word() function to select the next word using probabilities. End of explanation """ gen_length = 200 # homer_simpson, moe_szyslak, or Barney_Gumble prime_word = 'moe_szyslak' """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_dir + '.meta') loader.restore(sess, load_dir) # Get Tensors from loaded model input_text, initial_state, final_state, probs = get_tensors(loaded_graph) # Sentences generation setup gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {input_text: np.array([[1]])}) # Generate sentences for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [probs, final_state], {input_text: dyn_input, initial_state: prev_state}) pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\n ', '\n') tv_script = tv_script.replace('( ', '(') print(tv_script) """ Explanation: Generate TV Script This will generate the TV script for you. Set gen_length to the length of TV script you want to generate. End of explanation """
apryor6/apryor6.github.io
visualizations/seaborn/.ipynb_checkpoints/colors-checkpoint.ipynb
mit
%matplotlib inline import pandas as pd import matplotlib.pyplot as plt import seaborn as sns plt.rcParams['figure.figsize'] = (20.0, 10.0) df = pd.read_csv('../../datasets/movie_metadata.csv') df.head() """ Explanation: seaborn.countplot Bar graphs are useful for displaying relationships between categorical data and at least one numerical variable. seaborn.countplot is a barplot where the dependent variable is the number of instances of each instance of the independent variable. dataset: IMDB 5000 Movie Dataset End of explanation """ # split each movie's genre list, then form a set from the unwrapped list of all genres categories = set([s for genre_list in df.genres.unique() for s in genre_list.split("|")]) # one-hot encode each movie's classification for cat in categories: df[cat] = df.genres.transform(lambda s: int(cat in s)) # drop other columns df = df[['director_name','genres','duration'] + list(categories)] df.head() # convert from wide to long format and remove null classificaitons df = pd.melt(df, id_vars=['duration'], value_vars = list(categories), var_name = 'Category', value_name = 'Count') df = df.loc[df.Count>0] # add an indicator whether a movie is short or long, split at 100 minutes runtime df['islong'] = df.duration.transform(lambda x: int(x > 100)) # sort in descending order #df = df.loc[df.groupby('Category').transform(sum).sort_values('Count', ascending=False).index] df.head() """ Explanation: For the bar plot, let's look at the number of movies in each category, allowing each movie to be counted more than once. End of explanation """ p = sns.countplot(data=df, x = 'Category') """ Explanation: Basic plot End of explanation """ p = sns.countplot(data=df, x = 'Category', hue = 'islong') """ Explanation: color by a category End of explanation """ p = sns.countplot(data=df, y = 'Category', hue = 'islong') """ Explanation: make plot horizontal End of explanation """ p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.5) """ Explanation: Saturation End of explanation """ p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'deep') p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'muted') p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'pastel') p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'bright') p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'dark') p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'colorblind') p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = ((50/255, 132/255.0, 191/255.0), (255/255.0, 232/255.0, 0/255.0))) p = sns.countplot(data=df, y = 'Category', hue = 'islong', saturation=.9, palette = 'Dark2') help(sns.color_palette) help(sns.countplot) df.dtypes p.get_figure().savefig('../figures/barplot.png') """ Explanation: Various palettes End of explanation """
tpin3694/tpin3694.github.io
python/creating_counts_of_items.ipynb
mit
from collections import Counter """ Explanation: Title: Create Counts Of Items Slug: creating_counts_of_items Summary: Create Counts Of Items in Python. Date: 2016-01-23 12:00 Category: Python Tags: Basics Authors: Chris Albon Interesting in learning more? Check out Fluent Python Preliminaries End of explanation """ # Create a counter of the fruits eaten today fruit_eaten = Counter(['Apple', 'Apple', 'Apple', 'Banana', 'Pear', 'Pineapple']) # View counter fruit_eaten """ Explanation: Create A Counter End of explanation """ # Update the count for 'Pineapple' (because you just ate an pineapple) fruit_eaten.update(['Pineapple']) # View the counter fruit_eaten """ Explanation: Update The Count For An Element End of explanation """ # View the items with the top 3 counts fruit_eaten.most_common(3) """ Explanation: View The Items With The Highest Counts End of explanation """
QuantStack/quantstack-talks
2019-06-26-GeoPython/notebooks/vaex.ipynb
bsd-3-clause
import vaex import numpy as np np.warnings.filterwarnings('ignore') dstaxi = vaex.open('src/nyc_taxi2015.hdf5') # mmapped, doesn't cost extra memory dstaxi.plot_widget("pickup_longitude", "pickup_latitude", f="log", backend="ipyleaflet", shape=600) dstaxi.plot_widget("dropoff_longitude", "dropoff_latitude", f="log", backend="ipyleaflet", z="dropoff_hour", type="slice", z_shape=24, shape=400, z_relative=True, limits=[None, None, (-0.5, 23.5)]) ds = vaex.datasets.helmi_de_zeeuw.fetch() ds.plot_widget("x", "y", f="log", limits=[-20, 20]) ds.plot_widget("Lz", "E", f="log") """ Explanation: Ipyleaflet with vaex Repository: https://github.com/vaexio/vaex Installation: conda install -c conda-forge vaex End of explanation """ import ipyvolume as ipv import numpy as np np.warnings.filterwarnings('ignore') ipv.example_ylm(); N = 1000 x, y, z = np.random.random((3, N)) fig = ipv.figure() scatter = ipv.scatter(x, y, z, marker='box') ipv.show() scatter.x = scatter.x + 0.1 scatter.color = "green" scatter.size = 5 scatter.color = np.random.random((N,3)) scatter.size = 2 ipv.figure() ipv.style.use('dark') quiver = ipv.quiver(*ipv.datasets.animated_stream.fetch().data[:,::,::4], size=5) ipv.animation_control(quiver, interval=200) ipv.show() ipv.style.use('light') quiver.size = np.random.random(quiver.x.shape) * 10 quiver.color = np.random.random(quiver.x.shape + (3,)) quiver.geo = "cat" # stereo quiver.geo = "arrow" N = 1000*1000 x, y, z = np.random.random((3, N)).astype('f4') ipv.figure() s = ipv.scatter(x, y, z, size=0.2) ipv.show() s.size = 0.1 #ipv.screenshot(width=2048, height=2048) plot3d = ds.plot_widget("x", "y", "z", vx="vx", vy="vy", vz="vz", backend="ipyvolume", f="log1p", shape=100, smooth_pre=0.5) plot3d.vcount_limits = [50, 100000] plot3d.backend.quiver.color = "red" import ipywidgets as widgets widgets.ColorPicker() widgets.jslink((plot3d.backend.quiver, 'color'), (_, 'value')) ipv.save("kapteyn-lunch-talk-2018.html") !open kapteyn-lunch-talk-2018.html # webrtc demo if time permits """ Explanation: ipyvolume 3d plotting for Python in the Jupyter notebook based on IPython widgets using WebGL Glyphs, volume rendering, surfaces/meshes/lines/isosurfaces Live documentation http://ipyvolume.readthedocs.io/en/stable/ Installation $ conda install -c conda-forge ipyvolume $ pip install ipyvolume End of explanation """ import vaex #gaia = vaex.open("ws://gaia:9000/gaia-dr1") gaia = vaex.open('/Users/maartenbreddels/datasets/gaia/gaia-dr1-minimal_f4.hdf5') %matplotlib inline f"{len(gaia):,}" ra_dec_limits = [[0, 360], [-90, 90]] gaia.set_active_fraction(0.01) gaia.plot_widget("ra", "dec", limits=ra_dec_limits) gaia.mean("phot_g_mean_mag", selection=True) gaia.plot1d("phot_g_mean_mag", selection=False, n=True, limits=[10, 22]) gaia.plot1d("phot_g_mean_mag", selection=True, show=True, n=True, limits=[10, 22]) """ Explanation: A Billion stars in the Jupyter notebook End of explanation """
ES-DOC/esdoc-jupyterhub
notebooks/fio-ronm/cmip6/models/sandbox-2/toplevel.ipynb
gpl-3.0
# DO NOT EDIT ! from pyesdoc.ipython.model_topic import NotebookOutput # DO NOT EDIT ! DOC = NotebookOutput('cmip6', 'fio-ronm', 'sandbox-2', 'toplevel') """ Explanation: ES-DOC CMIP6 Model Properties - Toplevel MIP Era: CMIP6 Institute: FIO-RONM Source ID: SANDBOX-2 Sub-Topics: Radiative Forcings. Properties: 85 (42 required) Model descriptions: Model description details Initialized From: -- Notebook Help: Goto notebook help page Notebook Initialised: 2018-02-15 16:54:01 Document Setup IMPORTANT: to be executed each time you run the notebook End of explanation """ # Set as follows: DOC.set_author("name", "email") # TODO - please enter value(s) """ Explanation: Document Authors Set document authors End of explanation """ # Set as follows: DOC.set_contributor("name", "email") # TODO - please enter value(s) """ Explanation: Document Contributors Specify document contributors End of explanation """ # Set publication status: # 0=do not publish, 1=publish. DOC.set_publication_status(0) """ Explanation: Document Publication Specify document publication status End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: Document Table of Contents 1. Key Properties 2. Key Properties --&gt; Flux Correction 3. Key Properties --&gt; Genealogy 4. Key Properties --&gt; Software Properties 5. Key Properties --&gt; Coupling 6. Key Properties --&gt; Tuning Applied 7. Key Properties --&gt; Conservation --&gt; Heat 8. Key Properties --&gt; Conservation --&gt; Fresh Water 9. Key Properties --&gt; Conservation --&gt; Salt 10. Key Properties --&gt; Conservation --&gt; Momentum 11. Radiative Forcings 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect 24. Radiative Forcings --&gt; Aerosols --&gt; Dust 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt 28. Radiative Forcings --&gt; Other --&gt; Land Use 29. Radiative Forcings --&gt; Other --&gt; Solar 1. Key Properties Key properties of the model 1.1. Model Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Top level overview of coupled model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.model_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 1.2. Model Name Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Name of coupled model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 2. Key Properties --&gt; Flux Correction Flux correction properties of the model 2.1. Details Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how flux corrections are applied in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3. Key Properties --&gt; Genealogy Genealogy and history of the model 3.1. Year Released Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Year the model was released End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.2. CMIP3 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP3 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.3. CMIP5 Parent Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 CMIP5 parent if any End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 3.4. Previous Name Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Previously known as End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4. Key Properties --&gt; Software Properties Software properties of model 4.1. Repository Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Location of code for this component. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.2. Code Version Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Code version identifier. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.3. Code Languages Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N Code language(s). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 4.4. Components Structure Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "OASIS" # "OASIS3-MCT" # "ESMF" # "NUOPC" # "Bespoke" # "Unknown" # "None" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 4.5. Coupler Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Overarching coupling framework for model. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 5. Key Properties --&gt; Coupling ** 5.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of coupling in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.2. Atmosphere Double Flux Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Atmosphere grid" # "Ocean grid" # "Specific coupler grid" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 5.3. Atmosphere Fluxes Calculation Grid Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Where are the air-sea fluxes calculated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 5.4. Atmosphere Relative Winds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6. Key Properties --&gt; Tuning Applied Tuning methodology for model 6.1. Description Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency. End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.2. Global Mean Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List set of metrics/diagnostics of the global mean state used in tuning model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.3. Regional Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.4. Trend Metrics Used Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.N List observed trend metrics/diagnostics used in tuning model/component (such as 20th century) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.5. Energy Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 6.6. Fresh Water Balance Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7. Key Properties --&gt; Conservation --&gt; Heat Global heat convervation properties of the model 7.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how heat is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 7.6. Land Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how heat is conserved at the land/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8. Key Properties --&gt; Conservation --&gt; Fresh Water Global fresh water convervation properties of the model 8.1. Global Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh_water is conserved globally End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.2. Atmos Ocean Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.3. Atmos Land Interface Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Describe if/how fresh water is conserved at the atmosphere/land coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.4. Atmos Sea-ice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.5. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.6. Runoff Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how runoff is distributed and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.7. Iceberg Calving Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how iceberg calving is modeled and conserved End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.8. Endoreic Basins Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how endoreic basins (no ocean access) are treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 8.9. Snow Accumulation Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe how snow accumulation over land and over sea-ice is treated End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 9. Key Properties --&gt; Conservation --&gt; Salt Global salt convervation properties of the model 9.1. Ocean Seaice Interface Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how salt is conserved at the ocean/sea-ice coupling interface End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 10. Key Properties --&gt; Conservation --&gt; Momentum Global momentum convervation properties of the model 10.1. Details Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Describe if/how momentum is conserved in the model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.overview') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 11. Radiative Forcings Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5) 11.1. Overview Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Overview of radiative forcings (GHG and aerosols) implementation in model End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 12. Radiative Forcings --&gt; Greenhouse Gases --&gt; CO2 Carbon dioxide forcing 12.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 12.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 13. Radiative Forcings --&gt; Greenhouse Gases --&gt; CH4 Methane forcing 13.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 13.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 14. Radiative Forcings --&gt; Greenhouse Gases --&gt; N2O Nitrous oxide forcing 14.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 14.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 15. Radiative Forcings --&gt; Greenhouse Gases --&gt; Tropospheric O3 Troposheric ozone forcing 15.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 15.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 16. Radiative Forcings --&gt; Greenhouse Gases --&gt; Stratospheric O3 Stratospheric ozone forcing 16.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 16.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17. Radiative Forcings --&gt; Greenhouse Gases --&gt; CFC Ozone-depleting and non-ozone-depleting fluorinated gases forcing 17.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "Option 1" # "Option 2" # "Option 3" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 17.2. Equivalence Concentration Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Details of any equivalence concentrations used End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 17.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 18. Radiative Forcings --&gt; Aerosols --&gt; SO4 SO4 aerosol forcing 18.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 18.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 19. Radiative Forcings --&gt; Aerosols --&gt; Black Carbon Black carbon aerosol forcing 19.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 19.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 20. Radiative Forcings --&gt; Aerosols --&gt; Organic Carbon Organic carbon aerosol forcing 20.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 20.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 21. Radiative Forcings --&gt; Aerosols --&gt; Nitrate Nitrate forcing 21.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 21.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 22. Radiative Forcings --&gt; Aerosols --&gt; Cloud Albedo Effect Cloud albedo effect forcing (RFaci) 22.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 22.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 22.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 23. Radiative Forcings --&gt; Aerosols --&gt; Cloud Lifetime Effect Cloud lifetime effect forcing (ERFaci) 23.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.2. Aerosol Effect On Ice Clouds Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative effects of aerosols on ice clouds are represented? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 23.3. RFaci From Sulfate Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Radiative forcing from aerosol cloud interactions from sulfate aerosol only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 23.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 24. Radiative Forcings --&gt; Aerosols --&gt; Dust Dust forcing 24.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 24.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25. Radiative Forcings --&gt; Aerosols --&gt; Tropospheric Volcanic Tropospheric volcanic forcing 25.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 25.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26. Radiative Forcings --&gt; Aerosols --&gt; Stratospheric Volcanic Stratospheric volcanic forcing 26.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in historical simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # Valid Choices: # "Type A" # "Type B" # "Type C" # "Type D" # "Type E" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 How explosive volcanic aerosol is implemented in future simulations End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 26.4. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 27. Radiative Forcings --&gt; Aerosols --&gt; Sea Salt Sea salt forcing 27.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 27.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "M" # "Y" # "E" # "ES" # "C" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 28. Radiative Forcings --&gt; Other --&gt; Land Use Land use forcing 28.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.) End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only') # PROPERTY VALUE: # Set as follows: DOC.set_value(value) # Valid Choices: # True # False # TODO - please enter value(s) """ Explanation: 28.2. Crop Change Only Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: BOOLEAN&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.1 Land use change represented via crop change only? End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 28.3. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision') # PROPERTY VALUE(S): # Set as follows: DOC.set_value("value") # Valid Choices: # "N/A" # "irradiance" # "proton" # "electron" # "cosmic ray" # "Other: [Please specify]" # TODO - please enter value(s) """ Explanation: 29. Radiative Forcings --&gt; Other --&gt; Solar Solar forcing 29.1. Provision Is Required: TRUE&nbsp;&nbsp;&nbsp;&nbsp;Type: ENUM&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 1.N How solar forcing is provided End of explanation """ # PROPERTY ID - DO NOT EDIT ! DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information') # PROPERTY VALUE: # Set as follows: DOC.set_value("value") # TODO - please enter value(s) """ Explanation: 29.2. Additional Information Is Required: FALSE&nbsp;&nbsp;&nbsp;&nbsp;Type: STRING&nbsp;&nbsp;&nbsp;&nbsp;Cardinality: 0.1 Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.). End of explanation """
massimo-nocentini/PhD
notebooks/pascal-array-doubly-indexed-unfolding.ipynb
apache-2.0
%run "../src/start_session.py" %run "../src/recurrences.py" import oeis """ Explanation: <p> <img src="http://www.cerm.unifi.it/chianti/images/logo%20unifi_positivo.jpg" alt="UniFI logo" style="float:left;width:20%;height:20%;"> <div align="right"> Massimo Nocentini<br> <small> <br>September 27, 2016: refactoring and sync </small> </div> </p> <div align="center"> <br><b>Abstract</b><br> In this notebook we study the *Pascal triangle*, locking at it recursively: using it's $A$-sequence, we perform a series of unfolding using the main recurrence relation, where subscripts dependends on *two* dimensions, as rewriting rule. This is a natural enhancement to the case of recurrences where subscripts have *one* dimension only, as the *Fibonacci* sequence; on the other hand, Pascal array is a deeply studied and well know triangle, yet simple, toy we can play with. </div> <br> End of explanation """ d = IndexedBase('d') n, k = symbols('n k') pascal_recurrence_spec = recurrence_spec(recurrence_eq=Eq(d[n+1, k+1], d[n, k] + d[n, k+1]), recurrence_symbol=d, variables=[n,k]) pascal_recurrence_spec unfolded = pascal_recurrence_spec.unfold(depth=2) unfolded instantiated = unfolded.instantiate(strategy=raw(substitutions={n:9,k:4})) instantiated known_binomials = {d[n,k]:binomial(n,k) for n in [7] for k in range(2,6)} checked = instantiated.instantiate(strategy=raw(substitutions=known_binomials)) checked checked.subsume() based_recurrence_spec = unfolded.instantiate(strategy=based(arity=doubly_indexed())) based_recurrence_spec based_recurrence_spec.subsume() ipython_latex_description(rec_spec=pascal_recurrence_spec, depths=range(6), arity=doubly_indexed()) """ Explanation: Pascal array $\mathcal{P}$ This notebook studies the Riordan array $\mathcal{P}$, aka the Pascal triangle, defined according to the following definition: $$\mathcal{P}=\left(\frac{1}{1-t}, \frac{t}{1-t}\right)$$ with $A$-sequence $A(t)=1+t$ and $Z$-sequence $Z(t)=1$. End of explanation """ s = oeis.oeis_search(id=7318) s() """ Explanation: OEIS content about $\mathcal{P}$ End of explanation """
akhambhati/rs-NMF_CogControl
Analysis_Notebooks/e02-Detect_Dynamic_Subgraphs.ipynb
gpl-3.0
try: %load_ext autoreload %autoreload 2 %reset except: print 'NOT IPYTHON' from __future__ import division import os os.environ['MKL_NUM_THREADS'] = '1' os.environ['NUMEXPR_NUM_THREADS'] = '1' os.environ['OMP_NUM_THREADS'] = '1' import sys import glob import numpy as np import pandas as pd import seaborn as sns import scipy.stats as stats import scipy.io as io import h5py import matplotlib import matplotlib.pyplot as plt from matplotlib import rcParams echobase_path = '/Users/akhambhati/Developer/hoth_research/Echobase' #echobase_path = '/data/jag/akhambhati/hoth_research/Echobase' sys.path.append(echobase_path) import Echobase convert_conn_vec_to_adj_matr = Echobase.Network.Transforms.configuration.convert_conn_vec_to_adj_matr convert_adj_matr_to_cfg_matr = Echobase.Network.Transforms.configuration.convert_adj_matr_to_cfg_matr subgraph = Echobase.Network.Partitioning.Subgraph rcParams = Echobase.Plotting.fig_format.update_rcparams(rcParams) path_Remotes = '/Users/akhambhati/Remotes' #path_Remotes = '/data/jag/bassett-lab/akhambhati' path_CoreData = path_Remotes + '/CORE.fMRI_cogcontrol.medaglia' path_PeriphData = path_Remotes + '/RSRCH.NMF_CogControl' path_InpData = path_PeriphData + '/e01-FuncNetw' path_ExpData = path_PeriphData + '/e02-FuncSubg' path_Figures = './e02-Figures' for path in [path_CoreData, path_PeriphData, path_ExpData, path_Figures]: if not os.path.exists(path): print('Path: {}, does not exist'.format(path)) os.makedirs(path) """ Explanation: Table of Contents <p><div class="lev1 toc-item"><a href="#Initialize-Environment" data-toc-modified-id="Initialize-Environment-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Initialize Environment</a></div><div class="lev1 toc-item"><a href="#Optimize-Dynamic-Subgraphs" data-toc-modified-id="Optimize-Dynamic-Subgraphs-2"><span class="toc-item-num">2&nbsp;&nbsp;</span>Optimize Dynamic Subgraphs</a></div><div class="lev2 toc-item"><a href="#Generate-Cross-Validation-Parameter-Sets" data-toc-modified-id="Generate-Cross-Validation-Parameter-Sets-21"><span class="toc-item-num">2.1&nbsp;&nbsp;</span>Generate Cross-Validation Parameter Sets</a></div><div class="lev2 toc-item"><a href="#SGE-Helper-Script-for-NMF-Cross-Validation" data-toc-modified-id="SGE-Helper-Script-for-NMF-Cross-Validation-22"><span class="toc-item-num">2.2&nbsp;&nbsp;</span>SGE Helper Script for NMF Cross-Validation</a></div><div class="lev2 toc-item"><a href="#Quality-Measures-in-Parameter-Space" data-toc-modified-id="Quality-Measures-in-Parameter-Space-23"><span class="toc-item-num">2.3&nbsp;&nbsp;</span>Quality Measures in Parameter Space</a></div><div class="lev1 toc-item"><a href="#Detect-Dynamic-Subgraphs" data-toc-modified-id="Detect-Dynamic-Subgraphs-3"><span class="toc-item-num">3&nbsp;&nbsp;</span>Detect Dynamic Subgraphs</a></div><div class="lev2 toc-item"><a href="#Map-NMF-Consensus-to-Identify-Seed-Subgraphs" data-toc-modified-id="Map-NMF-Consensus-to-Identify-Seed-Subgraphs-31"><span class="toc-item-num">3.1&nbsp;&nbsp;</span>Map NMF Consensus to Identify Seed Subgraphs</a></div><div class="lev2 toc-item"><a href="#Reduce-Seed-Subgraphs-to-Consensus-Subgraphs" data-toc-modified-id="Reduce-Seed-Subgraphs-to-Consensus-Subgraphs-32"><span class="toc-item-num">3.2&nbsp;&nbsp;</span>Reduce Seed Subgraphs to Consensus Subgraphs</a></div><div class="lev2 toc-item"><a href="#Generate-Surrogate-Subgraphs-using-Consensus-Subgraphs" data-toc-modified-id="Generate-Surrogate-Subgraphs-using-Consensus-Subgraphs-33"><span class="toc-item-num">3.3&nbsp;&nbsp;</span>Generate Surrogate Subgraphs using Consensus Subgraphs</a></div><div class="lev2 toc-item"><a href="#Plot-Subgraphs" data-toc-modified-id="Plot-Subgraphs-34"><span class="toc-item-num">3.4&nbsp;&nbsp;</span>Plot Subgraphs</a></div><div class="lev1 toc-item"><a href="#Detect-Dynamic-Subgraphs-(Split-half-validation)" data-toc-modified-id="Detect-Dynamic-Subgraphs-(Split-half-validation)-4"><span class="toc-item-num">4&nbsp;&nbsp;</span>Detect Dynamic Subgraphs (Split-half validation)</a></div><div class="lev2 toc-item"><a href="#Map-NMF-Consensus-to-Identify-Seed-Subgraphs" data-toc-modified-id="Map-NMF-Consensus-to-Identify-Seed-Subgraphs-41"><span class="toc-item-num">4.1&nbsp;&nbsp;</span>Map NMF Consensus to Identify Seed Subgraphs</a></div><div class="lev2 toc-item"><a href="#Reduce-Seed-Subgraphs-to-Consensus-Subgraphs" data-toc-modified-id="Reduce-Seed-Subgraphs-to-Consensus-Subgraphs-42"><span class="toc-item-num">4.2&nbsp;&nbsp;</span>Reduce Seed Subgraphs to Consensus Subgraphs</a></div><div class="lev2 toc-item"><a href="#Test-Retest-Reliability" data-toc-modified-id="Test-Retest-Reliability-43"><span class="toc-item-num">4.3&nbsp;&nbsp;</span>Test-Retest Reliability</a></div><div class="lev1 toc-item"><a href="#Subgraphs-of-Brain-Systems" data-toc-modified-id="Subgraphs-of-Brain-Systems-5"><span class="toc-item-num">5&nbsp;&nbsp;</span>Subgraphs of Brain Systems</a></div><div class="lev2 toc-item"><a href="#Load-Subgraphs-and-Expression" data-toc-modified-id="Load-Subgraphs-and-Expression-51"><span class="toc-item-num">5.1&nbsp;&nbsp;</span>Load Subgraphs and Expression</a></div><div class="lev2 toc-item"><a href="#Compute-a-Core-Periphery-Score" data-toc-modified-id="Compute-a-Core-Periphery-Score-52"><span class="toc-item-num">5.2&nbsp;&nbsp;</span>Compute a Core-Periphery Score</a></div><div class="lev2 toc-item"><a href="#Plot-Subgraph-Adjacency-Matrices" data-toc-modified-id="Plot-Subgraph-Adjacency-Matrices-53"><span class="toc-item-num">5.3&nbsp;&nbsp;</span>Plot Subgraph Adjacency Matrices</a></div><div class="lev1 toc-item"><a href="#Visualize-Subgraphs" data-toc-modified-id="Visualize-Subgraphs-6"><span class="toc-item-num">6&nbsp;&nbsp;</span>Visualize Subgraphs</a></div><div class="lev2 toc-item"><a href="#Surface-render-brain-systems" data-toc-modified-id="Surface-render-brain-systems-61"><span class="toc-item-num">6.1&nbsp;&nbsp;</span>Surface render brain systems</a></div><div class="lev3 toc-item"><a href="#Plot-brain-systems" data-toc-modified-id="Plot-brain-systems-611"><span class="toc-item-num">6.1.1&nbsp;&nbsp;</span>Plot brain systems</a></div><div class="lev2 toc-item"><a href="#Circle-Plot" data-toc-modified-id="Circle-Plot-62"><span class="toc-item-num">6.2&nbsp;&nbsp;</span>Circle Plot</a></div> # Initialize Environment End of explanation """ # Load configuration matrix df = np.load('{}/Population.Configuration_Matrix.npz'.format(path_InpData)) cfg_matr = df['cfg_matr'] cfg_obs_lut = df['cfg_obs_lut'] n_subj = len(df['cfg_key_label'][()]['Subject_ID']) # Generate folds n_fold = 4 n_obs_per_fold = np.size(cfg_obs_lut) / n_fold assert n_obs_per_fold.is_integer() n_obs_per_fold = int(n_obs_per_fold) rand_obs = np.random.permutation(np.size(cfg_obs_lut)) fold_obs = rand_obs.reshape(n_fold, n_obs_per_fold) fold_list = [list(ff) for ff in fold_obs] # Cross-Validation Progress str_path = '{}/NMF_CrossValidation.Progress.txt'.format(path_ExpData) if os.path.exists(str_path): os.remove(str_path) # Get parameter search space param_list = Echobase.Network.Partitioning.Subgraph.optimize_nmf.gen_random_sampling_paramset( rank_range=(3, 51), alpha_range=(1e-2, 5.0), beta_range=(1e-2, 5.0), n_param=1000, fold_list=fold_list, str_path=str_path) # Save param_list for sge run np.savez('{}/NMF_CrossValidation.Param_List.npz'.format(path_ExpData), param_list=param_list) """ Explanation: Optimize Dynamic Subgraphs Generate Cross-Validation Parameter Sets End of explanation """ # Map NMF xval to all the parameter sets job_str = './NMF_xval.py {} {} {}'.format(echobase_path, path_InpData, path_ExpData) qsub_str = 'qsub -t 1-{} {}'.format(len(param_list), job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') # Reduce NMF xval output to a qmeas_list path_xval_out = glob.glob('{}/NMF_CrossValidation.Param.*.npz'.format(path_ExpData)) qmeas_list = [np.load(pth)['qmeas_dict'][()] for pth in path_xval_out] """ Explanation: SGE Helper Script for NMF Cross-Validation End of explanation """ param_list = np.load('{}/NMF_CrossValidation.Param_List.npz'.format(path_ExpData))['param_list'] all_param, opt_param = Echobase.Network.Partitioning.Subgraph.optimize_nmf.find_optimum_xval_paramset(param_list, qmeas_list, search_pct=5.0) n_bin = 100 srch_pct = 25.0 for param in ['rank', 'alpha', 'beta']: err_mean = stats.binned_statistic(all_param[param], all_param[qmeas], statistic=np.nanmean, bins=n_bin) err_std = stats.binned_statistic(all_param[param], all_param[qmeas], statistic=np.nanstd, bins=n_bin) mean_ix = np.flatnonzero(err_mean[0] < np.nanpercentile(err_mean[0], srch_pct)) std_ix = np.flatnonzero(err_std[0] < np.nanpercentile(err_std[0], srch_pct)) opt_param[param] = np.nanmean(err_mean[1][np.intersect1d(mean_ix, std_ix)]) if param == 'rank': opt_param[param] = int(np.round(opt_param[param])) print print('Optimal Rank: {}'.format(opt_param['rank'])) print('Optimal Alpha: {}'.format(opt_param['alpha'])) print('Optimal Beta: {}'.format(opt_param['beta'])) np.savez('{}/NMF_CrossValidation.Optimal_Param.npz'.format(path_ExpData), opt_param=opt_param, all_param=all_param) ### Amass all parameters and compute optima based on discussion in (Ng (1997). ICML) opt_dict = np.load('{}/NMF_CrossValidation.Optimal_Param.npz'.format(path_ExpData)) opt_param = opt_dict['opt_param'][()] all_param = opt_dict['all_param'][()] # Generate quality measure plots for qmeas in ['test_error']: for param in ['rank', 'alpha', 'beta']: ax_jp = sns.jointplot(all_param[param], all_param[qmeas], kind='kde', space=0, n_levels=60, shade_lowest=False) ax = ax_jp.ax_joint ax.plot([opt_param[param], opt_param[param]], [ax.get_ylim()[0], ax.get_ylim()[1]], lw=1.0, alpha=0.75, linestyle='--') ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.set_xlabel(param) ax.set_ylabel(qmeas) plt.savefig('{}/NMF_Optimization.{}.{}.svg'.format(path_Figures, param, qmeas)) plt.show() plt.close() """ Explanation: Quality Measures in Parameter Space End of explanation """ # Map NMF consensus to all the parameter sets n_opt = 1000 job_str = './NMF_consensus_map.py {} {} {}'.format(echobase_path, path_InpData, path_ExpData) qsub_str = 'qsub -t 1-{} {}'.format(n_opt, job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') """ Explanation: Detect Dynamic Subgraphs Map NMF Consensus to Identify Seed Subgraphs WARNING: Will Delete Existing Output End of explanation """ # Reduce NMF consensus from all seed subgraphs job_str = './NMF_consensus_reduce.py {} {} {}'.format(echobase_path, path_InpData, path_ExpData) qsub_str = 'qsub {}'.format(job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') """ Explanation: Reduce Seed Subgraphs to Consensus Subgraphs WARNING: Will Delete Existing Output End of explanation """ # Map NMF surrogate n_opt = 1000 job_str = './NMF_surrogate_map.py {} {} {}'.format(echobase_path, path_InpData, path_ExpData) qsub_str = 'qsub -t 1-{} {}'.format(n_opt, job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') """ for nn in xrange(n_opt): if not os.path.exists('{}/NMF_Surrogate.Param.{}.npz'.format(path_ExpData, nn)): qsub_str = 'qsub -t {}-{} {}'.format(nn+1, nn+1, job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') """ """ Explanation: Generate Surrogate Subgraphs using Consensus Subgraphs End of explanation """ %matplotlib inline # Load the consensus data data = np.load("{}/NMF_Consensus.Param.All.npz".format(path_ExpData), mmap_mode='r') #data = np.load("{}/NMF_Surrogate.Param.50.npz".format(path_ExpData), mmap_mode='r') fac_subnet = data['fac_subnet'] fac_coef = data['fac_coef'] n_fac = fac_subnet.shape[0] n_conn = fac_subnet.shape[1] n_win = fac_coef.shape[1] # Plot subgraph matrix plt.figure() ax = plt.subplot(111) mat = ax.matshow(fac_subnet.T, aspect=float(n_fac)/n_conn, cmap='rainbow') #plt.colorbar(mat, ax=ax) ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') #ax.set_xticks(np.linspace(0, 80, 5)) ax.set_ylabel('Functional Interactions') ax.set_xlabel('Subgraphs') plt.savefig('{}/Subgraph-Cfg_Matrix.svg'.format(path_Figures)) plt.show() plt.close() # Plot subgraph adjacency plt.figure() n_row = np.floor(np.sqrt(n_fac)) n_col = np.ceil(n_fac / n_row) for ii, subg in enumerate(fac_subnet): adj = convert_conn_vec_to_adj_matr(subg) ax = plt.subplot(n_row, n_col, ii+1) mat = ax.matshow(adj, cmap='rainbow') #, vmin=0, vmax=1) #plt.colorbar(mat, ax=ax) ax.set_axis_off() plt.savefig('{}/Subgraph-Adj_Matrices.svg'.format(path_Figures)) plt.show() plt.close() # Plot Coefficients plt.figure() ax = plt.subplot(111) fac_coef = fac_coef.T norm_fac = fac_coef - fac_coef.mean(axis=0) for ff in xrange(n_fac): ax.plot(ff + norm_fac[:, ff] / (3*np.std(norm_fac[:, ff])), color=[66/256., 152/256., 221./256]) # Axis Settings ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.set_ylim([-1, n_fac+1]) ax.set_xlim([0, int(n_win/28*3)]) ax.set_ylabel('Subgraphs') ax.set_xlabel('Time Windows') plt.savefig('{}/Subgraph-Coefs.svg'.format(path_Figures)) plt.show() plt.close() """ Explanation: Plot Subgraphs End of explanation """ # Map NMF consensus to all the parameter sets n_opt = 1000 job_str = './NMF_consensus_map-split_half.py {} {} {}'.format(echobase_path, path_InpData, path_ExpData) qsub_str = 'qsub -t 1-{} {}'.format(n_opt, job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') """ Explanation: Detect Dynamic Subgraphs (Split-half validation) Map NMF Consensus to Identify Seed Subgraphs WARNING: Will Delete Existing Output End of explanation """ # Reduce NMF consensus from all seed subgraphs job_str = './NMF_consensus_reduce-split_half.py {} {} {}'.format(echobase_path, path_InpData, path_ExpData) qsub_str = 'qsub {}'.format(job_str) os.chdir('./e02-SGE_Scripts/') !sh {qsub_str} os.chdir('../') """ Explanation: Reduce Seed Subgraphs to Consensus Subgraphs WARNING: Will Delete Existing Output End of explanation """ import scipy.optimize as sciopt data_A = np.load('{}/NMF_Consensus.Param.All.npz'.format(path_ExpData), mmap_mode='r') fac_subnet_A = data_A['fac_subnet'] data_B = np.load('{}/NMF_Consensus.Param.A.All.npz'.format(path_ExpData), mmap_mode='r') fac_subnet_B = data_B['fac_subnet'] n_fac = fac_subnet_A.shape[0] cost_matrix = np.zeros((n_fac, n_fac)) for fac_ii in xrange(fac_subnet_A.shape[0]): for fac_jj in xrange(fac_subnet_B.shape[0]): cost_matrix[fac_ii, fac_jj] = np.linalg.norm(fac_subnet_A[fac_ii] - fac_subnet_B[fac_jj]) old_A_ii, new_A_ii = sciopt.linear_sum_assignment(cost_matrix) fac_subnet_A[new_A_ii, :] = fac_subnet_A[old_A_ii, :] true_rho = np.array([stats.pearsonr(fac_subnet_A[fac_i, :], fac_subnet_B[fac_i, :]) for fac_i in xrange(n_fac)]) true_rho, true_pv = true_rho[:, 0], true_rho[:, 1] null_rho = [] for fac_i in xrange(n_fac): for fac_j in xrange(n_fac): if fac_i == fac_j: continue null_rho.append(stats.pearsonr(fac_subnet_A[fac_i, :], fac_subnet_B[fac_j, :])[0]) null_rho = np.array(null_rho) ### Plot Test-Retest Reliability plt.figure(dpi=300) ax = plt.subplot(111) clr = [] all_pv = [] for rho in np.sort(true_rho)[::-1]: all_pv.append(np.mean(null_rho > rho)) for is_sig in Echobase.Statistics.FDR.fdr.bhp(all_pv): if is_sig: clr.append('b') else: clr.append('k') ax.bar(xrange(len(true_rho)), np.sort(true_rho)[::-1], color=clr, lw=0) ax.hlines(np.percentile(null_rho, 95), -1, n_fac, color='r', lw=1.0, linestyle='--'); ax.hlines(np.percentile(null_rho, 5), -1, n_fac, color='r', lw=1.0, linestyle='--'); ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.set_xlabel('Subgraph Pairs') ax.set_ylabel('Pearson r Between Subgraphs') ax.set_xlim([-1, n_fac]) ax.set_ylim([-0.5, 1.0]) plt.savefig('{}/Test_Retest.svg'.format(path_Figures)) plt.show() """ Explanation: Test-Retest Reliability End of explanation """ # Grab the subgraphs and expression from consensus NMF df_nmf = np.load("{}/NMF_Consensus.Param.All.npz".format(path_ExpData), mmap_mode='r') fac_subnet = df_nmf['fac_subnet'] fac_coef = df_nmf['fac_coef'] n_fac = fac_subnet.shape[0] n_conn = fac_subnet.shape[1] n_node = np.int(np.ceil(np.sqrt(n_conn*2))) n_obs = fac_coef.shape[1] # Retrieve the configuration matrix df_cfg = np.load('{}/Population.Configuration_Matrix.npz'.format(path_InpData)) cfg_obs_lut = np.array(df_cfg['cfg_obs_lut'], dtype=np.int) # Retrieve the Yeo Systems Assignments for Lausanne 125 df_to_yeo = np.load('{}/Lausanne125_to_Yeo.npz'.format(path_InpData)) n_laus = len(df_to_yeo['yeo_lbl']) n_yeo = len(df_to_yeo['yeo_name']) """ Explanation: Subgraphs of Brain Systems Load Subgraphs and Expression End of explanation """ sys_subgraph_path = '{}/Yeo_Subgraph.All.npz'.format(path_ExpData) if os.path.exists(sys_subgraph_path): df_subg = np.load('{}/Yeo_Subgraph.All.npz'.format(path_ExpData)) else: system_subgraph = [] for fac_ii in xrange(n_fac): print('Processing: {} of {}'.format(fac_ii+1, n_fac)) sel_subnet = fac_subnet[fac_ii, :] sel_coef = fac_coef[fac_ii, :] adj_roi = convert_conn_vec_to_adj_matr(sel_subnet) # Compute Brain System Adjacency Matrices n_yeo = len(df_to_yeo['yeo_name']) adj_yeo = np.zeros((n_yeo, n_yeo)) # Permutation n_perm = 10000 alpha = 0.05 / len(df_to_yeo['yeo_triu'][0]) sel_subnet_null = np.array([np.random.permutation(sel_subnet) for n_ii in xrange(n_perm)]) adj_yeo_null = np.zeros((n_perm, n_yeo, n_yeo)) # Mean inter/intra system edge wt for sys_ix, sys_iy in zip(*df_to_yeo['yeo_triu']): sys1 = df_to_yeo['yeo_name'][sys_ix] sys2 = df_to_yeo['yeo_name'][sys_iy] sys1_ix = np.flatnonzero(df_to_yeo['yeo_lbl'][df_to_yeo['laus_triu'][0]] == sys1) sys1_iy = np.flatnonzero(df_to_yeo['yeo_lbl'][df_to_yeo['laus_triu'][1]] == sys1) sys2_ix = np.flatnonzero(df_to_yeo['yeo_lbl'][df_to_yeo['laus_triu'][0]] == sys2) sys2_iy = np.flatnonzero(df_to_yeo['yeo_lbl'][df_to_yeo['laus_triu'][1]] == sys2) inter_sys_ii = np.unique(np.concatenate((np.intersect1d(sys1_ix, sys2_iy), np.intersect1d(sys1_iy, sys2_ix)))) # Populate a full adjacency matrix adj_yeo[sys_ix, sys_iy] = np.mean(sel_subnet[inter_sys_ii]) adj_yeo[sys_iy, sys_ix] = np.mean(sel_subnet[inter_sys_ii]) adj_yeo_null[:, sys_ix, sys_iy] = np.mean(sel_subnet_null[:, inter_sys_ii], axis=1) adj_yeo_null[:, sys_iy, sys_ix] = np.mean(sel_subnet_null[:, inter_sys_ii], axis=1) # Compute core-periphery scores intra_sys = np.diag(adj_yeo) inter_sys = np.sum(np.triu(adj_yeo, k=1) + np.triu(adj_yeo, k=1).T, axis=1) / (n_yeo-1) # Compute core-periphery null scores null_intra_sys = np.array([np.diag(aa) for aa in adj_yeo_null]) null_inter_sys = np.array([np.sum(np.triu(aa, k=1) + np.triu(aa, k=1).T, axis=1) / (n_yeo-1) for aa in adj_yeo_null]) # Threshold adj_yeo[(np.mean(adj_yeo_null > adj_yeo, axis=0) >= alpha)] = 0 # Generate subgraph dictionary system_subgraph.append({'Subgraph_ID': fac_ii+1, 'subnet_yeo': adj_yeo, 'subnet_roi': adj_roi[df_to_yeo['sort_laus_to_yeo'], :][:, df_to_yeo['sort_laus_to_yeo']], 'intra_sys': intra_sys, 'inter_sys': inter_sys, 'null_intra_sys': null_intra_sys, 'null_inter_sys': null_inter_sys, 'expr_coef': sel_coef}) np.savez('{}/Yeo_Subgraph.All.npz'.format(path_ExpData), system_subgraph=system_subgraph) """ Explanation: Compute a Core-Periphery Score End of explanation """ # Plot the subgraphs plt.figure(figsize=(5,5), dpi=300); for ii, fac_ii in enumerate(xrange(n_fac)): #enumerate(sort_fac): sel_fac_subnet = fac_subnet_B[fac_ii, :] adj = convert_conn_vec_to_adj_matr(sel_fac_subnet) adj_yeo = adj[df_to_yeo['sort_laus_to_yeo'], :][:, df_to_yeo['sort_laus_to_yeo']] # Plot ax = plt.subplot(4, 4, ii+1) mat = ax.matshow(adj_yeo, cmap='magma', vmin=1.1*sel_fac_subnet.min()); #plt.colorbar(mat, ax=ax, fraction=0.046, pad=0.04) for xx in df_to_yeo['yeo_adj_demarc']: ax.vlines(xx, 0, n_laus, color='w', lw=0.25) ax.hlines(xx, 0, n_laus, color='w', lw=0.25) ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_tick_params(width=0) ax.xaxis.set_tick_params(width=0) ax.grid(False) ax.tick_params(axis='both', which='major', pad=-3) ax.set_xticks((df_to_yeo['yeo_adj_demarc'][:-1] + (np.diff(df_to_yeo['yeo_adj_demarc']) * 0.5))); ax.set_xticklabels(df_to_yeo['yeo_name'], fontsize=3.0, rotation=45) ax.set_yticks((df_to_yeo['yeo_adj_demarc'][:-1] + (np.diff(df_to_yeo['yeo_adj_demarc']) * 0.5))); ax.set_yticklabels(df_to_yeo['yeo_name'], fontsize=3.0, rotation=45) plt.show() """ Explanation: Plot Subgraph Adjacency Matrices End of explanation """ pixmap_path = '{}/brain_system_pixmap.npz'.format(path_Figures) if os.path.exists(pixmap_path): brain_system_pixmap = np.load(pixmap_path) else: from mayavi import mlab import nibabel as nib brain_system_pixmap = {} brain_system_rgbcol = {} sys_scalar = [15, 18, 6, 0, 3, 4, 2, 8, 10] view_angle = {'Sag_PA': [0.0, 90.0], 'Sag_AP': [180.0, 90.0]} # Get the pial surface recons pial_hemi = {'LH': {}, 'RH': {}} pial_hemi['LH']['vert'], pial_hemi['LH']['tria'] = nib.freesurfer.io.read_geometry('{}/BrainRenderSubject15/surf/lh.pial'.format(path_CoreData)) pial_hemi['RH']['vert'], pial_hemi['RH']['tria'] = nib.freesurfer.io.read_geometry('{}/BrainRenderSubject15/surf/rh.pial'.format(path_CoreData)) # Get the Lausanne label files for each ROI label_files = [] for roi in df_to_yeo['df_laus_yeo']: laus_lbl = roi[1].lower() hemi = roi[2].lower() # Parse the atlas name and find the label file if it exists lbl_file = '{}.{}.label'.format(hemi, laus_lbl) lbl_file = lbl_file.replace(' ', '') label_files.append('{}/BrainRenderSubject15/label/regenerated_{}_125/{}'.format(path_CoreData, hemi, lbl_file)) # Iterate over hemisphere of the pial surface for hemi in pial_hemi.keys(): n_vert = len(pial_hemi[hemi]['vert']) # Iterate over brain system for sys_id, sys_lbl in enumerate(df_to_yeo['yeo_name']): print(sys_lbl) sys_ix = np.flatnonzero(df_to_yeo['yeo_lbl'] == sys_lbl) # Find the label file for each ROI and get vertices if sys_lbl == 'SUB': pial_scalars = sys_scalar[sys_id]*np.ones(n_vert) else: pial_scalars = 15*np.ones(n_vert) for roi_ix, (roi, lbl_file) in enumerate(zip(df_to_yeo['df_laus_yeo'], label_files)): if roi[2] != hemi: continue if not os.path.exists(lbl_file): continue # Load the file and add scalar to the vertices parc_lbl = nib.freesurfer.io.read_label(lbl_file) if roi_ix in sys_ix: pial_scalars[parc_lbl] = sys_scalar[sys_id] else: pial_scalars[parc_lbl] = 15 # Plot the colored Brain System fig = mlab.figure(bgcolor=(1.0, 1.0, 1.0)) src = mlab.pipeline.triangular_mesh_source(pial_hemi[hemi]['vert'][:,0], pial_hemi[hemi]['vert'][:,1], pial_hemi[hemi]['vert'][:,2], pial_hemi[hemi]['tria'], scalars=pial_scalars, opacity=0.75, figure=fig) norms = mlab.pipeline.poly_data_normals(src, figure=fig) norms.filter.splitting = False surf = mlab.pipeline.surface(norms, figure=fig) surf.parent.scalar_lut_manager.set(lut_mode='Vega20', data_range=[0, 20], use_default_range=False) lut = surf.module_manager.scalar_lut_manager.lut.table.to_array() lut[188:213, 3] = 220 surf.module_manager.scalar_lut_manager.lut.table = lut # Rotate the view and save a screenshot pixmap = {} for ang in view_angle.keys(): mlab.view(azimuth=view_angle[ang][0], elevation=view_angle[ang][1]) pixmap[ang] = mlab.screenshot(mode='rgba') mlab.close(all=True) # Save to system pixmap dictionary if not sys_lbl in brain_system_pixmap.keys(): brain_system_pixmap[sys_lbl] = {} if not hemi in brain_system_pixmap[sys_lbl].keys(): brain_system_pixmap[sys_lbl][hemi] = pixmap # Save RGB brain_system_rgbcol[sys_lbl] = lut[(sys_scalar[sys_id]*lut.shape[0]/20.), :] np.savez(pixmap_path, brain_system_pixmap=brain_system_pixmap, brain_system_rgbcol=brain_system_rgbcol) """ Explanation: Visualize Subgraphs Surface render brain systems End of explanation """ sys_pixmap = brain_system_pixmap['brain_system_pixmap'][()] for sys_id in sys_pixmap.keys(): for hemi_id in ['RH']: #sys_pixmap[sys_id].keys(): for plane_id in sys_pixmap[sys_id][hemi_id].keys(): plt.figure(dpi=300.0) ax = plt.subplot(111) ax.imshow(sys_pixmap[sys_id][hemi_id][plane_id]); ax.set_axis_off(); plt.savefig('{}/System_Pixmap.{}.{}.{}.svg'.format(path_Figures, sys_id, hemi_id, plane_id)) plt.close() """ Explanation: Plot brain systems End of explanation """ from matplotlib.offsetbox import AnnotationBbox, OffsetImage, TextArea from scipy import ndimage # Assign color for each circle node based on associated brain system color node_clr = np.array([brain_system_pixmap['brain_system_rgbcol'][()][sys_lbl][:3] / 255 for sys_lbl in df_to_yeo['yeo_lbl'][df_to_yeo['sort_laus_to_yeo']]]) # Arrange nodes around the circle system_pos = [] node_rads = np.linspace(0, 2*np.pi - (2*np.pi/n_laus), n_laus) for sys_ii, sys_nm in enumerate(df_to_yeo['yeo_name']): sys_rad = np.mean(node_rads[df_to_yeo['yeo_lbl'] == sys_nm]) dd = 16 system_pos.append((sys_rad, dd)) # Render the circle plot for fac_ii in xrange(n_fac): sel_subnet = df_subg['system_subgraph'][fac_ii]['subnet_roi'] sys_con = convert_adj_matr_to_cfg_matr(np.expand_dims(sel_subnet, axis=0)).squeeze() fig, ax = Echobase.Plotting.render_circle_connectivity.draw(conn_list=sys_con, conn_pct=[99, 100], conn_cmap='YlGnBu', conn_linewidth=0.5, node_color=node_clr) fig.savefig('{}/Circle_Subgraph.{}.svg'.format(path_Figures, fac_ii+1)) plt.close() """ Explanation: Circle Plot End of explanation """
luisvalesilva/digitre
digitre/digitre.ipynb
mit
# Standard library import datetime import time # Third party libraries import numpy as np import matplotlib.pyplot as plt %matplotlib inline # Digitre code import digitre_preprocessing as prep import digitre_model import digitre_classifier # Reload digitre code in the same session (during development) import imp imp.reload(prep) imp.reload(digitre_model) imp.reload(digitre_classifier) # Latest update str(datetime.datetime.now()) """ Explanation: <a id="Top"></a> ___ ___ ___ _____ /\__\ /\ \ /\__\ /::\ \ ___ /:/ _/_ ___ ___ /::\ \ /:/ _/_ /:/\:\ \ /\__\ /:/ /\ \ /\__\ /\__\ /:/\:\__\ /:/ /\__\ /:/ \:\__\ /:/__/ /:/ /::\ \ /:/__/ /:/ / /:/ /:/ / /:/ /:/ _/_ /:/__/ \:|__| /::\ \ /:/__\/\:\__\ /::\ \ /:/__/ /:/_/:/__/___ /:/_/:/ /\__\ \:\ \ /:/ / \/\:\ \__ \:\ \ /:/ / \/\:\ \__ /::\ \ \:\/:::::/ / \:\/:/ /:/ / \:\ /:/ / ~~\:\/\__\ \:\ /:/ / ~~\:\/\__\ /:/\:\ \ \::/~~/~~~~ \::/_/:/ / \:\/:/ / \::/ / \:\/:/ / \::/ / \/__\:\ \ \:\~~\ \:\/:/ / \::/ / /:/ / \::/ / /:/ / \:\__\ \:\__\ \::/ / \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ \/__/ December 2016 Table of contents Build prediction model with tflearn and tensorflow Multilayer convolutional neural network Serialize trained CNN model for serving Classify digit examples from Digitre Example step-by-step preprocessing Classify preprocessed images End of explanation """ # Data loading and preprocessing X, Y, testX, testY = digitre_model.load_data() #X = X.reshape([-1, 28, 28, 1]) #testX = testX.reshape([-1, 28, 28, 1]) # Plot functions def plot_digit(digit, show=True, file_name=None): plt.imshow(digit, cmap = 'Greys', interpolation = 'none') plt.tick_params(axis='both', which='both', bottom='off', top='off', labelbottom='off', right='off', left='off', labelleft='off') if file_name is not None: plt.savefig(file_name) if show: plt.show() def plot_digits(digits, rows, columns): for i, digit in enumerate(digits): plt.subplot(rows, columns, i+1) plot_digit(digit, show=False) plt.show() # Plot a few training examples X_eg = X[10:20,:,:,:] X_eg = [digit.reshape(28, 28) for digit in X_eg] plot_digits(X_eg, 2, 5) # Visualization # Used "tensorboard_verbose=0", meaning Loss & Metric # Run "$ tensorboard --logdir='/tmp/tflearn_logs'" ### Fit model using all data (merge training and test data) # Done from command line: # $ python digitre_model.py -f 'cnn_alldata.tflearn' -a -e 20 # Training Step: 20320 | total loss: 0.642990.9401 | val_loss: 0.052 # | Adam | epoch: 020 | loss: 0.64299 - acc: 0.9401 | val_loss: 0.05263 - val_acc: 0.9866 -- iter: 65000/65000 # -- # ----- # Completed training in # 3.5 hr. # ----- # ... Saving trained model as " cnn_alldata.tflearn " """ Explanation: <a id="TF"></a> Build prediction model with tflearn and tensorflow <a id="CNN"></a> Multilayer convolutional neural network Code based on this tflearn example, with CNN architecture modeled after TensorFlow's tutorial Deep MNIST for experts. End of explanation """ with open('b64_2_preprocessing.txt', 'r') as f: eg_2 = f.read() # Preview base64 encoded image print(eg_2[:500]) eg_2 = prep.b64_str_to_np(eg_2) eg_2.shape # Plot the example handwritten digit plot_digit(eg_2, file_name='b64_2_preprocessing_1.png') eg_2 = prep.crop_img(eg_2) plot_digit(eg_2, file_name='b64_2_preprocessing_2.png') eg_2 = prep.center_img(eg_2) plot_digit(eg_2, file_name='b64_2_preprocessing_3.png') eg_2 = prep.resize_img(eg_2) eg_2.shape plot_digit(eg_2, file_name='b64_2_preprocessing_4.png') eg_2 = prep.min_max_scaler(eg_2, final_range=(0, 1)) plot_digit(eg_2) # Plot processed Digitre image together with MNIST example plot_digits([eg_2, X_eg[6]], 1, 2) # Save MNIST example too plot_digit(X_eg[6], file_name='MNIST_2.png') eg_2.max() eg_2.shape """ Explanation: <a id="Digitre"></a> Classify digit examples from Digitre <a id="Prep"></a> Example step-by-step preprocessing Take example base64-encoded handwritten digit images (generated from html canvas element) and preprocess step-by-step to a format ready for classification model. Compare with MNIST example. End of explanation """ # Instantiate Classifier (loads the tflearn pre-trained model) model = digitre_classifier.Classifier(file_name='cnn.tflearn') # Classify same example digit with open('b64_2_preprocessing.txt', 'r') as f: eg_2 = f.read() eg_2 = model.preprocess(eg_2) pred = np.around(model.classify(eg_2)[0], 2) pred from altair import Chart, Data, X, Y, Axis, Scale # Plot prediction def prob_distribution_plot(pred): prediction = pred.reshape([10]) data = Data(values=[{'x': i, 'y': value} for i, value in enumerate(pred)]) plot = Chart(data).mark_bar(color='#f6755e').encode( x=X('x:O', axis=Axis(title='Digit', labelAngle=0.5, tickLabelFontSize=15, titleFontSize=15)), y=Y('y:Q', axis=Axis(format='%', title='Probability', tickLabelFontSize=15, titleFontSize=15), scale=Scale(domain=(0, 1)))) return plot prob_distribution_plot(pred) from altair import Chart, Data, X, Y, Axis # Plot prediction def prob_distribution_plot(pred): prediction = pred.reshape([10]) data = Data(values=[{'x': i, 'y': value} for i, value in enumerate(prediction)]) plot = Chart(data).mark_bar(color='#f6755e').encode( x=X('x:O', axis=Axis(title='Digit', labelAngle=0.5, tickLabelFontSize=15, titleFontSize=15)), y=Y('y:Q', axis=Axis(format='%', title='Probability', tickLabelFontSize=15, titleFontSize=15))) return plot.to_json(indent=2) prob_distribution_plot(pred) """ Explanation: <a id="Class"></a> Classify preprocessed images End of explanation """
turbomanage/training-data-analyst
courses/machine_learning/deepdive2/structured/labs/5b_deploy_keras_ai_platform_babyweight.ipynb
apache-2.0
import os """ Explanation: LAB 5b: Deploy and predict with Keras model on Cloud AI Platform. Learning Objectives Setup up the environment Deploy trained Keras model to Cloud AI Platform Online predict from model on Cloud AI Platform Batch predict from model on Cloud AI Platform Introduction In this notebook, we'll deploying our Keras model to Cloud AI Platform and creating predictions. We will set up the environment, deploy a trained Keras model to Cloud AI Platform, online predict from deployed model on Cloud AI Platform, and batch predict from deployed model on Cloud AI Platform. Each learning objective will correspond to a #TODO in this student lab notebook -- try to complete this notebook first and then review the solution notebook. Set up environment variables and load necessary libraries Import necessary libraries. End of explanation """ %%bash PROJECT=$(gcloud config list project --format "value(core.project)") echo "Your current GCP Project Name is: "$PROJECT # Change these to try this notebook out PROJECT = "cloud-training-demos" # TODO: Replace with your PROJECT BUCKET = PROJECT # defaults to PROJECT REGION = "us-central1" # TODO: Replace with your REGION os.environ["BUCKET"] = BUCKET os.environ["REGION"] = REGION os.environ["TFVERSION"] = "2.0" %%bash gcloud config set compute/region $REGION """ Explanation: Lab Task #1: Set environment variables. Set environment variables so that we can use them throughout the entire lab. We will be using our project name for our bucket, so you only need to change your project and region. End of explanation """ %%bash gsutil ls gs://${BUCKET}/babyweight/trained_model %%bash MODEL_LOCATION=$(gsutil ls -ld -- gs://${BUCKET}/babyweight/trained_model/2* \ | tail -1) gsutil ls ${MODEL_LOCATION} """ Explanation: Check our trained model files Let's check the directory structure of our outputs of our trained model in folder we exported the model to in our last lab. We'll want to deploy the saved_model.pb within the timestamped directory as well as the variable values in the variables folder. Therefore, we need the path of the timestamped directory so that everything within it can be found by Cloud AI Platform's model deployment service. End of explanation """ %%bash MODEL_NAME="babyweight" MODEL_VERSION="ml_on_gcp" MODEL_LOCATION=# TODO: Add GCS path to saved_model.pb file. echo "Deleting and deploying $MODEL_NAME $MODEL_VERSION from $MODEL_LOCATION" # gcloud ai-platform versions delete ${MODEL_VERSION} --model ${MODEL_NAME} # gcloud ai-platform models delete ${MODEL_NAME} gcloud ai-platform models create ${MODEL_NAME} --regions ${REGION} gcloud ai-platform versions create ${MODEL_VERSION} \ --model=${MODEL_NAME} \ --origin=${MODEL_LOCATION} \ --runtime-version=1.14 \ --python-version=3.5 """ Explanation: Lab Task #2: Deploy trained model. Deploying the trained model to act as a REST web service is a simple gcloud call. Complete #TODO by providing location of saved_model.pb file to Cloud AI Platoform model deployment service. The deployment will take a few minutes. End of explanation """ from oauth2client.client import GoogleCredentials import requests import json MODEL_NAME = # TODO: Add model name MODEL_VERSION = # TODO: Add model version token = GoogleCredentials.get_application_default().get_access_token().access_token api = "https://ml.googleapis.com/v1/projects/{}/models/{}/versions/{}:predict" \ .format(PROJECT, MODEL_NAME, MODEL_VERSION) headers = {"Authorization": "Bearer " + token } data = { "instances": [ { "is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39 }, { "is_male": "False", "mother_age": 29.0, "plurality": "Single(1)", "gestation_weeks": 38 }, { "is_male": "True", "mother_age": 26.0, "plurality": "Triplets(3)", "gestation_weeks": 39 }, # TODO: Create another instance ] } response = requests.post(api, json=data, headers=headers) print(response.content) """ Explanation: Lab Task #3: Use model to make online prediction. Complete __#TODO__s for both the Python and gcloud Shell API methods of calling our deployed model on Cloud AI Platform for online prediction. Python API We can use the Python API to send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances. End of explanation """ %%writefile inputs.json {"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39} # TODO: Create another instance """ Explanation: The predictions for the four instances were: 5.33, 6.09, 2.50, and 5.86 pounds respectively when I ran it (your results might be different). gcloud shell API Instead we could use the gcloud shell API. Create a newline delimited JSON file with one instance per line and submit using gcloud. End of explanation """ %%bash gcloud ai-platform predict \ --model=# TODO: Add model name \ --json-instances=inputs.json \ --version=# TODO: Add model version """ Explanation: Now call gcloud ai-platform predict using the JSON we just created and point to our deployed model and version. End of explanation """ %%bash INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs gsutil cp inputs.json $INPUT gsutil -m rm -rf $OUTPUT gcloud ai-platform jobs submit prediction babypred_$(date -u +%y%m%d_%H%M%S) \ --data-format=TEXT \ --region ${REGION} \ --input-paths=$INPUT \ --output-path=$OUTPUT \ --model=# TODO: Add model name \ --version=# TODO: Add model version """ Explanation: Lab Task #4: Use model to make batch prediction. Batch prediction is commonly used when you have thousands to millions of predictions. It will create an actual Cloud AI Platform job for prediction. Complete __#TODO__s so we can call our deployed model on Cloud AI Platform for batch prediction. End of explanation """
YaniLozanov/Software-University
Python/Jupyter notebook/06.Drawing Figures with Loops/Jupyter notebook/Drawing Figures with Loops.ipynb
mit
asterisk = 10 for i in range(0,10): print("*" * asterisk) """ Explanation: <h1 align="center">Drawing Figures with Loops</h1> <h2>01.Rectangle of 10 x 10 Stars</h2> Problem: Write a program that draws a rectangle of 10 x 10 asterisks on the console. End of explanation """ n = int(input()) for i in range(0, n): print("*" * n) """ Explanation: <h2>02.Rectangle of N x N Stars</h2> Problem: Write a program that introduces a positive integer n and the console rectangle of n * n asterisks. End of explanation """ n = int(input()) for i in range(0, n): print("* " * n) """ Explanation: <h2>03.Square of Stars</h2> Problem: Write a program that reads the console number n and draws a square of n * n asterisks. The Difference with the previous task is that there is a space between every two asterisks. End of explanation """ n = int(input()) for i in range(0, n): print("$ " * (i + 1)) """ Explanation: <h2>04.Triangle of Dollars</h2> Problem: Write a program that enters a number n and triangle prints from dollars. End of explanation """ n = int(input()) minuses = n - 2 print("+ " + "- " * minuses + "+") for i in range(0, n - 2): print("| " + "- " * minuses + "|") print("+ " + "- " * minuses + "+") """ Explanation: <h2>05.Square Frame</h2> Problem: Write a program that introduces a positive integer n and draws the console square frame with size n * n. End of explanation """ n = int(input()) spaces = ' ' asterisk = '*' for i in range(1, (n * 2) ): if i <= n: spaces = n - i asterisk = i print(" " * spaces + "* " * asterisk) else: spaces = i - n asterisk = n * 2 - i print(" " * spaces + "* " * asterisk) """ Explanation: <h2>06.Rhombus of Stars</h2> Problem: Write a program that introduces a positive integer n and prints a diamond-sized nugget. End of explanation """ n = int(input()) spaces = " " for i in range(0, n + 1): spaces = n - i print(" " * spaces + "*" * i + " | " + "*" * i + " " * spaces) """ Explanation: <h2>07.Christmas Tree</h2> Problem: Write a program that enters a number n (1 ≤ n ≤ 100) and prints a Christmas tree size n. End of explanation """ n = int(input()) asterisk = "*" slash = "/" spaces = " " asterisk = 2 * n spaces = n print("*" * asterisk + " " * spaces + "*" * asterisk) for i in range(1, (n - 2) + 1): if n % 2 == 0: if i == n / 2 - 1: slash = 2 * n - 2 print("*" + "/" * slash + "*" + "|" * n + "*" + "/" * slash + "*") else: spaces = 2 *n - 2 print("*" + "/" * spaces + "*" + " " * n + "*" + "/" * spaces + "*") else: if i == n // 2: slash = 2 * n - 2 print("*" + "/" * slash + "*" + "|" * n + "*" + "/" * slash + "*") else: spaces = 2 *n - 2 print("*" + "/" * spaces + "*" + " " * n + "*" + "/" * spaces + "*") """ Explanation: <h2>08.Sunglasses</h2> Problem: Write a program that introduces an integer n (3 ≤ n ≤ 100) and print sunglasses of size 5 * n x n. End of explanation """ n = int(input()) asterisk = "*" minuses = "-" for i in range(1, n + 1): if n % 2 == 0 and i % 2 == 0: asterisk = i minuses = (n - i ) // 2 print("-" * minuses + "*" * asterisk + "-" * minuses) elif n % 2 != 0 and i % 2 != 0: asterisk = i minuses = (n - i) // 2 print("-" * minuses + "*" * asterisk + "-" * minuses) for i in range(0 , n // 2): print("|" + "*" * (n -2) + "|") """ Explanation: <h2>09.House</h2> Problem: Write a program that introduces a number n (2 ≤ n ≤ 100) and prints a cot with a size n x n. End of explanation """
raazesh-sainudiin/scalable-data-science
_360-in-525/2018/02/SimonLindgren/MeTooInJupyterIpythonNBAction/Simon_MetooStep3.ipynb
unlicense
import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.ticker as ticker """ Explanation: Simon #metoo step 3 End of explanation """ df = pd.DataFrame.from_csv("topicmodel.csv", index_col=None) df = df.sort_index() df.rename(columns={'Unnamed: 0': 'day'}, inplace=True) df = df.set_index('day') # Converting the index as date df.index = pd.to_datetime(df.index) df.head() """ Explanation: We use the seaborn package to plot how topics are distributed over time in tweets. We are dealing with categorical data, where the category in this case is the point in time. For each point in time, we see any topics with a probability &gt; X as "existing". We need a dataframe with one categorical column (time) and one with a value (topicnumber). End of explanation """ outfile = open("strongtopics.csv", "w") outfile.write("day,topic\n") for row in df.iterrows(): day = str(row[0]) values = row[1].to_dict() for k,v in values.items(): if v > 0.01: outfile.write(day + "," + k + "\n") df2 = pd.DataFrame.from_csv("strongtopics.csv", index_col=None) df2 = df2.sort_values(by="day") """ Explanation: Convert the imported dataframe rows to dicts in a list. End of explanation """ df2.head() """ Explanation: Now, we have a dataframe with all topics that are more probable than 0.3 for each hour. End of explanation """ %matplotlib inline plt.figure(figsize=(10,10)) sns.swarmplot(x="day", y="topic", data=df2) plt.savefig('test.pdf') plt.show() """ Explanation: A SET OF TOPICS ARE TALKED ABOUT THROUGHOUT WHILE OTHERS ARE MORE RANDOM End of explanation """ df3=df2.groupby('day').count().reset_index() df3.columns=['day','strong topics'] df3 = df3.sort_values(by="day") df3 = df3.reset_index(drop=True) df3.head() df3['day'] = df3.index df3 df3.to_csv("df3.csv") plt.figure(figsize=(10,10)) sns.regplot(x="day", y="strong topics", data=df3) plt.savefig('test2.pdf') """ Explanation: THE TOPIC COMPLEXITY IS INCREASING OVER TIME, THE HASHTAG LOSES FOCUS End of explanation """
mroberge/hydrofunctions
docs/notebooks/Data_Catalog.ipynb
mit
import hydrofunctions as hf karthaus = hf.NWIS('01542500', 'iv', period='P1D') """ Explanation: Requesting A Data Catalog Almost every site or 'station' in the NWIS network collects more than one type of data. A simple way to find out what gets collected at a station would be to request everything collected over the past day, like this: End of explanation """ karthaus """ Explanation: You can list what is contained in the request: End of explanation """ output = hf.data_catalog('01585200') """ Explanation: The basic NWIS object will provide a list of every parameter collected at the site, the frequency of observations for that parameter, the name of the parameter, and the units of the observations. It also tells you the date and time of the first and last observation in the request. This is great, but it doesn't tell you when a parameter was first collected, or if a parameter was discontinued. If you leave out the 'period' part of the request, the USGS will give you the most recent value for every parameter, no matter how old, but this still doesn't tell you when observations were first collected. For more detailed information about the parameters collected at a site, request a 'data catalog' using the data_catalog() function. This will return a hydroRDB object containing a table (dataframe) with a row for every parameter that you request, and a header that describes every column in the dataset. Some of the most useful information in the data catalog are the: data type code: describes the frequency of observations dv: daily values uv, rt, or iv: 'real time' data collected more frequently than daily sv: site visits conducted irregularly ad: values listed in the USGS Annual Water Reports more information: https://waterservices.usgs.gov/rest/Site-Service.html#outputDataTypeCd parameter code: describes the type of data collected statistic code: describes the statistic used to report the parameter begin date, end date: the first and last observation made for this parameter count_nu: the number of observations made between the start and end dates. More information about the values in the Data Catalog are located in the header, and also from https://waterservices.usgs.gov/rest/Site-Service.html For more information about a site and the data collected at the site, try these sources: To access information about the site itself, use the site_file() function. To access the rating curve at a site (for translating water stage into discharge), use the rating_curve() function. To access field data collected by USGS personnel during site visits, use the field_meas() function. To access the annual peak discharges at a site, use the peaks() function. To access daily, monthly, or annual statistics for data at a site, use the stats() function. Example Usage End of explanation """ print(output.header) # Transposing the table to show all of the columns as rows: output.table.T """ Explanation: Our new 'output' is a hydroRDB object. It has several useful properties, including: .table, which returns a dataframe of the data. Each row corresponds to a different parameter. .header, which is the original descriptive header provided by the USGS. It lists and describes the variables in the dataset. .columns, which is a list of the column names .dtypes, which is a list of the data types and column widths for each variable in the USGS RDB format. If you print or evaluate the hydroRDB object, it will return a tuple of the header and dataframe table. End of explanation """
cvxgrp/cvxpylayers
examples/torch/tutorial.ipynb
apache-2.0
import cvxpy as cp import matplotlib.pyplot as plt import numpy as np import torch from cvxpylayers.torch import CvxpyLayer torch.set_default_dtype(torch.double) np.set_printoptions(precision=3, suppress=True) """ Explanation: Cvxpylayers tutorial End of explanation """ n = 7 # Define variables & parameters x = cp.Variable() y = cp.Parameter(n) # Define objective and constraints objective = cp.sum_squares(y - x) constraints = [] # Synthesize problem prob = cp.Problem(cp.Minimize(objective), constraints) # Set parameter values y.value = np.random.randn(n) # Solve problem in one line prob.solve(requires_grad=True) print("solution:", "%.3f" % x.value) print("analytical solution:", "%.3f" % np.mean(y.value)) """ Explanation: Parametrized convex optimization problem $$ \begin{array}{ll} \mbox{minimize} & f_0(x;\theta)\ \mbox{subject to} & f_i(x;\theta) \leq 0, \quad i=1, \ldots, m\ & A(\theta)x=b(\theta), \end{array} $$ with variable $x \in \mathbf{R}^n$ and parameters $\theta\in\Theta\subseteq\mathbf{R}^p$ objective and inequality constraints $f_0, \ldots, f_m$ are convex in $x$ for each $\theta$, i.e., their graphs curve upward equality constraints are linear for a given value of $\theta$, find a value for $x$ that minimizes objective, while satisfying constraints we can efficiently solve these globally with near total reliability Solution map Solution $x^\star$ is an implicit function of $\theta$ When unique, define solution map as function $x^\star = \mathcal S(\theta)$ Need to call numerical solver to evaluate This function is often differentiable In a series of papers we showed how to analytically differentiate this function, using the implicit function theorem Benefits of analytical differentiation: works with nonsmooth objective/constraints, low memory usage, don't compound errors CVXPY High level domain-specific language (DSL) for convex optimization Define variables, parameters, objective and constraints Synthesize into problem object, then call solve method We've added derivatives to CVXPY (forward and backward) CVXPYlayers * Convert CVXPY problems into callable, differentiable Pytorch and Tensorflow modules in one line Applications learning convex optimization models (structured prediction): https://stanford.edu/~boyd/papers/learning_copt_models.html learning decision-making policies (reinforcement learning): https://stanford.edu/~boyd/papers/learning_cocps.html machine learning hyper-parameter tuning and feature engineering: https://stanford.edu/~boyd/papers/lsat.html repairing infeasible or unbounded optimization problems: https://stanford.edu/~boyd/papers/auto_repair_cvx.html as protection layers in neural networks: http://physbam.stanford.edu/~fedkiw/papers/stanford2019-10.pdf custom neural network layers (sparsemax, csoftmax, csparsemax, LML): https://locuslab.github.io/2019-10-28-cvxpylayers/ and many more... Average example Find the average of a vector: \begin{equation} \begin{array}{ll} \mbox{minimize} & \sum_{i=1}^n (y_i - x)^2 \end{array} \end{equation} Variable $x$, parameters $y\in\mathbf{R}^n$ The solution map is clearly: $$x=\sum_{i=1}^n y_i / n$$ End of explanation """ # Set gradient wrt x x.gradient = np.array([1.]) # Differentiate in one line prob.backward() print("gradient:", y.gradient) print("analytical gradient:", np.ones(y.size) / n) """ Explanation: The gradient is simply: $$\nabla_y x = (1/n)\mathbf{1}$$ End of explanation """ n = 7 # Define variables & parameters x = cp.Variable() y = cp.Parameter(n) # Define objective and constraints objective = cp.norm1(y - x) constraints = [] # Synthesize problem prob = cp.Problem(cp.Minimize(objective), constraints) # Set parameter values y.value = np.random.randn(n) # Solve problem in one line prob.solve(requires_grad=True) print("solution:", "%.3f" % x.value) print("analytical solution:", "%.3f" % np.median(y.value)) # Set gradient wrt x x.gradient = np.array([1.]) # Differentiate in one line prob.backward() print("gradient:", y.gradient) g = np.zeros(y.size) g[y.value == np.median(y.value)] = 1. print("analytical gradient:", g) """ Explanation: Median example Finding the median of a vector: \begin{equation} \begin{array}{ll} \mbox{minimize} & \sum_{i=1}^n |y_i - x|, \end{array} \end{equation} Variable $x$, parameters $y\in\mathbf{R}^n$ Solution: $$x=\mathbf{median}(y)$$ Gradient (no duplicates): $$(\nabla_y x)_i = \begin{cases} 1 & y_i = \mathbf{median}(y) \ 0 & \text{otherwise}. \end{cases}$$ End of explanation """ from sklearn.datasets import make_blobs from sklearn.model_selection import train_test_split torch.manual_seed(0) np.random.seed(0) n = 2 N = 60 X, y = make_blobs(N, n, centers=np.array([[2, 2], [-2, -2]]), cluster_std=3) Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=.5) Xtrain, Xtest, ytrain, ytest = map( torch.from_numpy, [Xtrain, Xtest, ytrain, ytest]) Xtrain.requires_grad_(True) m = Xtrain.shape[0] a = cp.Variable((n, 1)) b = cp.Variable((1, 1)) X = cp.Parameter((m, n)) Y = ytrain.numpy()[:, np.newaxis] log_likelihood = (1. / m) * cp.sum( cp.multiply(Y, X @ a + b) - cp.logistic(X @ a + b) ) regularization = - 0.1 * cp.norm(a, 1) - 0.1 * cp.sum_squares(a) prob = cp.Problem(cp.Maximize(log_likelihood + regularization)) fit_logreg = CvxpyLayer(prob, [X], [a, b]) torch.manual_seed(0) np.random.seed(0) n = 1 N = 60 X = np.random.randn(N, n) theta = np.random.randn(n) y = X @ theta + .5 * np.random.randn(N) Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=.5) Xtrain, Xtest, ytrain, ytest = map( torch.from_numpy, [Xtrain, Xtest, ytrain, ytest]) Xtrain.requires_grad_(True) m = Xtrain.shape[0] # set up variables and parameters a = cp.Variable(n) b = cp.Variable() X = cp.Parameter((m, n)) Y = cp.Parameter(m) lam = cp.Parameter(nonneg=True) alpha = cp.Parameter(nonneg=True) # set up objective loss = (1/m)*cp.sum(cp.square(X @ a + b - Y)) reg = lam * cp.norm1(a) + alpha * cp.sum_squares(a) objective = loss + reg # set up constraints constraints = [] prob = cp.Problem(cp.Minimize(objective), constraints) # convert into pytorch layer in one line fit_lr = CvxpyLayer(prob, [X, Y, lam, alpha], [a, b]) # this object is now callable with pytorch tensors fit_lr(Xtrain, ytrain, torch.zeros(1), torch.zeros(1)) # sweep over values of alpha, holding lambda=0, evaluating the gradient along the way alphas = np.logspace(-3, 2, 200) test_losses = [] grads = [] for alpha_vals in alphas: alpha_tch = torch.tensor([alpha_vals], requires_grad=True) alpha_tch.grad = None a_tch, b_tch = fit_lr(Xtrain, ytrain, torch.zeros(1), alpha_tch) test_loss = (Xtest @ a_tch.flatten() + b_tch - ytest).pow(2).mean() test_loss.backward() test_losses.append(test_loss.item()) grads.append(alpha_tch.grad.item()) plt.semilogx() plt.plot(alphas, test_losses, label='test loss') plt.plot(alphas, grads, label='analytical gradient') plt.plot(alphas[:-1], np.diff(test_losses) / np.diff(alphas), label='numerical gradient', linestyle='--') plt.legend() plt.xlabel("$\\alpha$") plt.show() # sweep over values of lambda, holding alpha=0, evaluating the gradient along the way lams = np.logspace(-3, 2, 200) test_losses = [] grads = [] for lam_vals in lams: lam_tch = torch.tensor([lam_vals], requires_grad=True) lam_tch.grad = None a_tch, b_tch = fit_lr(Xtrain, ytrain, lam_tch, torch.zeros(1)) test_loss = (Xtest @ a_tch.flatten() + b_tch - ytest).pow(2).mean() test_loss.backward() test_losses.append(test_loss.item()) grads.append(lam_tch.grad.item()) plt.semilogx() plt.plot(lams, test_losses, label='test loss') plt.plot(lams, grads, label='analytical gradient') plt.plot(lams[:-1], np.diff(test_losses) / np.diff(lams), label='numerical gradient', linestyle='--') plt.legend() plt.xlabel("$\\lambda$") plt.show() # compute the gradient of the test loss wrt all the training data points, and plot plt.figure(figsize=(10, 6)) a_tch, b_tch = fit_lr(Xtrain, ytrain, torch.tensor([.05]), torch.tensor([.05]), solver_args={"eps": 1e-8}) test_loss = (Xtest @ a_tch.flatten() + b_tch - ytest).pow(2).mean() test_loss.backward() a_tch_test, b_tch_test = fit_lr(Xtest, ytest, torch.tensor([0.]), torch.tensor([0.]), solver_args={"eps": 1e-8}) plt.scatter(Xtrain.detach().numpy(), ytrain.numpy(), s=20) plt.plot([-5, 5], [-3*a_tch.item() + b_tch.item(),3*a_tch.item() + b_tch.item()], label='train') plt.plot([-5, 5], [-3*a_tch_test.item() + b_tch_test.item(), 3*a_tch_test.item() + b_tch_test.item()], label='test') Xtrain_np = Xtrain.detach().numpy() Xtrain_grad_np = Xtrain.grad.detach().numpy() ytrain_np = ytrain.numpy() for i in range(Xtrain_np.shape[0]): plt.arrow(Xtrain_np[i], ytrain_np[i], -.1 * Xtrain_grad_np[i][0], 0.) plt.legend() plt.show() # move the training data points in the direction of their gradients, and see the train line get closer to the test line plt.figure(figsize=(10, 6)) Xtrain_new = torch.from_numpy(Xtrain_np - .15 * Xtrain_grad_np) a_tch, b_tch = fit_lr(Xtrain_new, ytrain, torch.tensor([.05]), torch.tensor([.05]), solver_args={"eps": 1e-8}) plt.scatter(Xtrain_new.detach().numpy(), ytrain.numpy(), s=20) plt.plot([-5, 5], [-3*a_tch.item() + b_tch.item(),3*a_tch.item() + b_tch.item()], label='train') plt.plot([-5, 5], [-3*a_tch_test.item() + b_tch_test.item(), 3*a_tch_test.item() + b_tch_test.item()], label='test') plt.legend() plt.show() """ Explanation: Elastic-net regression example We are given training data $(x_i, y_i){i=1}^{N}$, where $x_i\in\mathbf{R}$ are inputs and $y_i\in\mathbf{R}$ are outputs. Suppose we fit a model for this regression problem by solving the elastic-net problem \begin{equation} \begin{array}{ll} \mbox{minimize} & \frac{1}{N}\sum{i=1}^N (ax_i + b - y_i)^2 + \lambda |a| + \alpha a^2, \end{array} \label{eq:trainlinear} \end{equation} where $\lambda,\alpha>0$ are hyper-parameters. We hope that the test loss $\mathcal{L}^{\mathrm{test}}(a,b) = \frac{1}{M}\sum_{i=1}^M (a\tilde x_i + b - \tilde y_i)^2$ is small, where $(\tilde x_i, \tilde y_i)_{i=1}^{M}$ is our test set. First, we set up our problem, where ${x_i, y_i}_{i=1}^N$, $\lambda$, and $\alpha$ are our parameters. End of explanation """
iurilarosa/thesis
scritti/slides/tf summary/.ipynb_checkpoints/presentation_template-checkpoint.ipynb
gpl-3.0
 """ Explanation: Cover Slide 1 End of explanation """ from IPython.display import Image Image(filename='img/colorcontext.png', width=400) from IPython.display import YouTubeVideo YouTubeVideo('2G8V00SkTvY') def f(x): """a docstring""" return x**2 """ Explanation: Cover Slide 2 Headline Slide End of explanation """ plt.plot([1,2,3,4]) plt.ylabel('some numbers') plt.show() """ Explanation: Headline Subslide End of explanation """
jegibbs/phys202-2015-work
assignments/assignment10/ODEsEx01.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt import numpy as np import seaborn as sns from scipy.integrate import odeint from IPython.html.widgets import interact, fixed """ Explanation: Ordinary Differential Equations Exercise 1 Imports End of explanation """ def derivs(yvec, t, h, f, ): x = yvec[0] y = yvec[1] dx = dy = return np.array([dx, dy]) def solve_euler(derivs, y0, x): """Solve a 1d ODE using Euler's method. Parameters ---------- derivs : function The derivative of the diff-eq with the signature deriv(y,x) where y and x are floats. y0 : float The initial condition y[0] = y(x[0]). x : np.ndarray, list, tuple The array of times at which of solve the diff-eq. Returns ------- y : np.ndarray Array of solutions y[i] = y(x[i]) """ assert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2]) """ Explanation: Euler's method Euler's method is the simplest numerical approach for solving a first order ODE numerically. Given the differential equation $$ \frac{dy}{dx} = f(y(x), x) $$ with the initial condition: $$ y(x_0)=y_0 $$ Euler's method performs updates using the equations: $$ y_{n+1} = y_n + h f(y_n,x_n) $$ $$ h = x_{n+1} - x_n $$ Write a function solve_euler that implements the Euler method for a 1d ODE and follows the specification described in the docstring: End of explanation """ def solve_midpoint(derivs, y0, x): """Solve a 1d ODE using the Midpoint method. Parameters ---------- derivs : function The derivative of the diff-eq with the signature deriv(y,x) where y and x are floats. y0 : float The initial condition y[0] = y(x[0]). x : np.ndarray, list, tuple The array of times at which of solve the diff-eq. Returns ------- y : np.ndarray Array of solutions y[i] = y(x[i]) """ # YOUR CODE HERE raise NotImplementedError() assert np.allclose(solve_euler(lambda y, x: 1, 0, [0,1,2]), [0,1,2]) """ Explanation: The midpoint method is another numerical method for solving the above differential equation. In general it is more accurate than the Euler method. It uses the update equation: $$ y_{n+1} = y_n + h f\left(y_n+\frac{h}{2}f(y_n,x_n),x_n+\frac{h}{2}\right) $$ Write a function solve_midpoint that implements the midpoint method for a 1d ODE and follows the specification described in the docstring: End of explanation """ def solve_exact(x): """compute the exact solution to dy/dx = x + 2y. Parameters ---------- x : np.ndarray Array of x values to compute the solution at. Returns ------- y : np.ndarray Array of solutions at y[i] = y(x[i]). """ # YOUR CODE HERE raise NotImplementedError() assert np.allclose(solve_exact(np.array([0,1,2])),np.array([0., 1.09726402, 12.39953751])) """ Explanation: You are now going to solve the following differential equation: $$ \frac{dy}{dx} = x + 2y $$ which has the analytical solution: $$ y(x) = 0.25 e^{2x} - 0.5 x - 0.25 $$ First, write a solve_exact function that compute the exact solution and follows the specification described in the docstring: End of explanation """ # YOUR CODE HERE raise NotImplementedError() assert True # leave this for grading the plots """ Explanation: In the following cell you are going to solve the above ODE using four different algorithms: Euler's method Midpoint method odeint Exact Here are the details: Generate an array of x values with $N=11$ points over the interval $[0,1]$ ($h=0.1$). Define the derivs function for the above differential equation. Using the solve_euler, solve_midpoint, odeint and solve_exact functions to compute the solutions using the 4 approaches. Visualize the solutions on a sigle figure with two subplots: Plot the $y(x)$ versus $x$ for each of the 4 approaches. Plot $\left|y(x)-y_{exact}(x)\right|$ versus $x$ for each of the 3 numerical approaches. Your visualization should have legends, labeled axes, titles and be customized for beauty and effectiveness. While your final plot will use $N=10$ points, first try making $N$ larger and smaller to see how that affects the errors of the different approaches. End of explanation """
fonnesbeck/PyMC3_Oslo
notebooks/3. Theano.ipynb
cc0-1.0
from theano import function, shared from theano import tensor as T import theano x = T.dscalar('x') y = T.dscalar('y') """ Explanation: Theano While most of PyMC3's user-facing features are written in pure Python, it leverages Theano (Bergstra et al., 2010) to transparently transcode models to C and compile them to machine code, thereby boosting performance. Theano is a library that allows expressions to be defined using generalized vector data structures called tensors, which are tightly integrated with the popular NumPy ndarray data structure, and similarly allow for broadcasting and advanced indexing, just as NumPy arrays do. Theano also automatically optimizes the likelihood's computational graph for speed and provides simple GPU integration. Theano is a Python library that allows you to define, optimize, and evaluate mathematical expressions involving multi-dimensional arrays efficiently. Theano features: tight integration with numpy – Use numpy.ndarray in Theano-compiled functions. transparent use of a GPU – Perform data-intensive calculations up to 140x faster than with CPU.(float32 only) efficient symbolic differentiation – Theano does your derivatives for function with one or many inputs. speed and stability optimizations – Get the right answer for log(1+x) even when x is really tiny. dynamic C code generation – Evaluate expressions faster. extensive unit-testing and self-verification – Detect and diagnose errors. Theano is part programming language, part compiler. It is often used to build machine learning, just as packages like TensorFlow are, though it is not in itself a machine learning toolkit; think of it as a mathematical toolkit. Installing Theano The easiest way to install Theano is to build it from source, using pip: bash pip install --upgrade --no-deps git+git://github.com/Theano/Theano.git however, if you have PyMC3 installed, then Theano will already be available. Adding Two Scalars To get us started with Theano and get a feel of what we're working with, let's make a simple function: add two numbers together. Here is how you do it: Step 1 - Declaring Variables End of explanation """ type(x) x.type T.dscalar """ Explanation: In Theano, all symbols must be typed. In particular, T.dscalar is the type we assign to "0-dimensional arrays (scalar) of doubles (d)". It is a Theano type. End of explanation """ z = x + y """ Explanation: Step 2 - Symbolic Expressions The second step is to combine x and y into their sum z: End of explanation """ from theano.printing import pp print(pp(z)) """ Explanation: z is yet another Variable which represents the addition of x and y. You can use the pp function to pretty-print out the computation associated to z. End of explanation """ f = function([x, y], z) """ Explanation: Step 3 - Compiling a Function The last step is to create a function taking x and y as inputs and returning z as output: End of explanation """ f(2, 3) f(16.3, 12.1) """ Explanation: The first argument to function() is a list of Variables that will be provided as inputs to the function. The second argument is a single Variable or a list of Variables. For either case, the second argument is what we want to see as output when we apply the function. f may then be used like a normal Python function. Now we can call the function: End of explanation """ from theano import printing printing.pydotprint(f, 'images/f.png') from IPython.display import Image Image('images/f.png', width='80%') """ Explanation: If you are following along and typing into an interpreter, you may have noticed that there was a slight delay in executing the function instruction. Behind the scenes, f was being compiled into C code. Internally, Theano builds a graph structure composed of interconnected Variable nodes, op nodes and apply nodes. An apply node represents the application of an op to some variables. It is important to draw the difference between the definition of a computation represented by an op and its application to some actual data which is represented by the apply node. Here is the expression graph corresponding to the addition of x and y: End of explanation """ x = T.dmatrix('x') y = T.dmatrix('y') z = x + y f = function([x, y], z) """ Explanation: A Variable is the main data structure you work with when using Theano. By calling T.dscalar with a string argument, you create a Variable representing a floating-point scalar quantity with the given name. If you provide no argument, the symbol will be unnamed. Names are not required, but they can help debugging. Adding Two Matrices If we want to work with matrices instead of scalars, the only change from the previous example is that you need to instantiate x and y using the matrix Types: End of explanation """ f([[1, 2], [3, 4]], [[10, 20], [30, 40]]) """ Explanation: dmatrix is the Type for matrices of doubles. Then we can use our new function on 2D arrays: End of explanation """ x = T.dmatrix('x') """ Explanation: The following types are available: byte: bscalar, bvector, bmatrix, brow, bcol, btensor3, btensor4 16-bit integers: wscalar, wvector, wmatrix, wrow, wcol, wtensor3, wtensor4 32-bit integers: iscalar, ivector, imatrix, irow, icol, itensor3, itensor4 64-bit integers: lscalar, lvector, lmatrix, lrow, lcol, ltensor3, ltensor4 float: fscalar, fvector, fmatrix, frow, fcol, ftensor3, ftensor4 double: dscalar, dvector, dmatrix, drow, dcol, dtensor3, dtensor4 complex: cscalar, cvector, cmatrix, crow, ccol, ctensor3, ctensor4 An example of a slightly more interesting function is the logistic curve. Let's create a matrix, and apply the logistic transformation to it: End of explanation """ s = 1 / (1 + T.exp(-x)) logistic = function([x], s) logistic([[0, 1], [-1, -2]]) """ Explanation: The logistic transformation: End of explanation """ a, b = T.dmatrices('a', 'b') # Operations diff = a - b abs_diff = abs(diff) diff_squared = diff ** 2 """ Explanation: Theano supports functions with multiple outputs. For example, we can compute the elementwise difference, absolute difference, and squared difference between two matrices a and b at the same time. End of explanation """ f = function([a, b], [diff, abs_diff, diff_squared]) f([[1, 1], [1, 1]], [[0, 1], [2, 3]]) """ Explanation: When we use the function f, it returns the three computed results as a list. End of explanation """ from theano import In x, y, w = T.dscalars('x', 'y', 'w') z = (x + y) * w g = function([x, In(y, value=1), In(w, value=2, name='w_by_name')], z) print('g(33) = {}'.format(g(33))) print('g(33, 0, 1) = {}'.format(g(33, 0, 1))) print('g(33, w_by_name=1) = {}'.format(g(33, w_by_name=1))) print('g(33, w_by_name=1, y=0) = {}'.format(g(33, w_by_name=1, y=0))) """ Explanation: Setting a Default Value for an Argument Let's say you want to define a function that adds two numbers, except that if you only provide one number, the other input is assumed to be one. In Python, the default value for parameters achieves this effect. In Theano we make use of the <a href="http://deeplearning.net/software/theano/library/compile/io.html#function-inputs">In</a> class, which allows you to specify properties of your function's parameters with greater detail. Here we give a default value of 1 for y by creating an In instance with its value field set to 1. Inputs with default values must follow inputs without default values (like Python's functions). There can be multiple inputs with default values. These parameters can be set positionally or by name, as in standard Python. End of explanation """ state = shared(0) inc = T.iscalar('inc') accumulator = function([inc], state, updates=[(state, state+inc)]) """ Explanation: Maintaining State with Shared Variables It is also possible to make a function with an internal state. For example, let’s say we want to make an accumulator: at the beginning, the state is initialized to zero. Then, on each function call, the state is incremented by the function’s argument. First let’s define the accumulator function. It adds its argument to the internal state, and returns the old state value. End of explanation """ print(state.get_value()) print(accumulator(1)) print(state.get_value()) print(accumulator(300)) print(state.get_value()) """ Explanation: This code introduces a couple of new concepts. The shared function constructs so-called shared variables. state = shared(0) These are hybrid symbolic and non-symbolic variables whose value may be shared between multiple functions. Shared variables can be used in symbolic expressions but they also have an internal value that defines the value taken by this symbolic variable in all the functions that use it. It is called a shared variable because its value is shared between many functions. The value can be accessed and modified by the get_value and set_value methods. The other new thing in this code is the updates parameter of function. updates=[(state, state+inc) updates must be supplied with a list of pairs of the form (shared-variable, new expression). It can also be a dictionary whose keys are shared-variables and values are the new expressions. Here, the accumulator replaces the state‘s value with the sum of state and the increment amount inc. End of explanation """ state.set_value(-1) print(accumulator(3)) print(state.get_value()) """ Explanation: It is possible to reset the state. Just use the set_value method: End of explanation """ decrementor = function([inc], state, updates=[(state, state-inc)]) print(decrementor(2)) print(state.get_value()) """ Explanation: As we mentioned above, you can define more than one function to use the same shared variable. These functions can all update the value. End of explanation """ def make_vector(): """ Create and return a new Theano vector. """ pass def make_matrix(): """ Create and return a new Theano matrix. """ pass def elemwise_mul(a, b): """ a: A theano matrix b: A theano matrix Calcuate the elementwise product of a and b and return it """ pass def matrix_vector_mul(a, b): """ a: A theano matrix b: A theano vector Calculate the matrix-vector product of a and b and return it """ pass a = make_vector() b = make_vector() c = elemwise_mul(a, b) d = make_matrix() e = matrix_vector_mul(d, c) f = function([a, b, d], e) import numpy as np rng = np.random.RandomState([1, 2, 3]) a_value = rng.randn(5).astype(a.dtype) b_value = rng.rand(5).astype(b.dtype) c_value = a_value * b_value d_value = rng.randn(5, 5).astype(d.dtype) expected = np.dot(d_value, c_value) actual = f(a_value, b_value, d_value) assert np.allclose(actual, expected) print("SUCCESS!") """ Explanation: You might be wondering why the updates mechanism exists. You can always achieve a similar result by returning the new expressions, and working with them in NumPy as usual. While the updates mechanism can be a syntactic convenience, it is mainly there for efficiency. Updates to shared variables can sometimes be done more quickly using in-place algorithms (e.g. low-rank matrix updates). Also, Theano has more control over where and how shared variables are allocated, which is one of the important elements of getting good performance on the GPU. Exercise: Create and manipulate Theano objects To give you some practice with basic Theano data structures and functions, try making the operations below work by implementing the functions that are needed. End of explanation """ import numpy as np rng = np.random dose = np.array([-0.86, -0.3 , -0.05, 0.73]) deaths = np.array([0, 1, 3, 5]) training_steps = 1000 """ Explanation: Example: Logistic regression Here is a non-trivial example, which uses Theano to estimate the parameters of a logistic regression model using gradient information. We will use the bioassay example as a test case: End of explanation """ x = T.vector("x") y = T.vector("y") w = theano.shared(1., name="w") b = theano.shared(0., name="b") print("Initial model:", w.get_value(), b.get_value()) """ Explanation: We first declare Theano symbolic variables: End of explanation """ # Probability that target = 1 p_1 = 1 / (1 + T.exp(-(x*w + b))) # The prediction threshold prediction = p_1 > 0.5 # Cross-entropy loss function xent = -y * T.log(p_1) - (5-y) * T.log(1-p_1) # The cost to minimize cost = xent.mean() # Compute the gradient of the cost gw, gb = T.grad(cost, [w, b]) """ Explanation: ... then construct the expression graph: End of explanation """ step = theano.shared(10., name='step') train = theano.function( inputs=[x, y], outputs=[prediction, xent], updates=((w, w - step * gw), (b, b - step * gb), (step, step * 0.99))) predict = theano.function(inputs=[x], outputs=prediction) """ Explanation: Compile Theano functions: End of explanation """ for i in range(training_steps): pred, err = train(dose, deaths) print("Final model:", w.get_value(), b.get_value()) %matplotlib inline import matplotlib.pyplot as plt logit = lambda x: 1. / (1 + np.exp(-x)) xvals = np.linspace(-1, 1) plt.plot(xvals, logit(7.8*xvals + .85)) plt.plot(dose, deaths/5., 'ro') """ Explanation: Train model: End of explanation """ def grad_sum(x, y, z): """ x: A theano variable y: A theano variable z: A theano expression involving x and y Returns dz / dx + dz / dy """ pass x = T.scalar() y = T.scalar() z = x + y s = grad_sum(x, y, z) assert s.eval({x: 0, y: 0}) == 2 print("SUCCESS!") """ Explanation: Exercises: Gradients and functions Let's try using the Theano automatic gradient system to compute derivatives. End of explanation """ from theano.tensor.shared_randomstreams import RandomStreams srng = RandomStreams(seed=234) rv_u = srng.uniform((2,2)) f = function([], rv_u) f() f() """ Explanation: Random Numbers Because in Theano you first express everything symbolically and afterwards compile this expression to get functions, using pseudo-random numbers is not as straightforward as it is in NumPy. The way to think about putting randomness into Theano’s computations is to put random variables in your graph. Theano will allocate a NumPy RandomStream object (a random number generator) for each such variable, and draw from it as necessary. We will call this sort of sequence of random numbers a random stream. End of explanation """ k = T.iscalar("k") A = T.vector("A") # Symbolic description of the result result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A, outputs_info=T.ones_like(A), non_sequences=A, n_steps=k) # We only care about A**k, but scan has provided us with A**1 through A**k. # Discard the values that we don't care about. Scan is smart enough to # notice this and not waste memory saving them. final_result = result[-1] # compiled function that returns A**k power = theano.function(inputs=[A,k], outputs=final_result, updates=updates) print(power(range(10),2)) print(power(range(10),4)) """ Explanation: Looping in Theano The scan function provides the ability to write loops in Theano. We are not able to use Python for loops with Theano because Theano needs to be able to build and optimize the expression graph before compiling it into faster code, and be able to use symbolic differentiation for calculating gradients. Simple loop with accumulation Assume that, given $k$ you want to get $A^k$ using a loop. More precisely, if $A$ is a tensor you want to compute $A^k$ elementwise. The python code might look like: python result = 1 for i in range(k): result = result * A There are three things here that we need to handle: the initial value assigned to result, the accumulation of results in result, and the unchanging variable A. Unchanging variables are passed to scan as non_sequences. Initialization occurs in outputs_info, and the accumulation happens automatically. The equivalent Theano code would be: End of explanation """ coefficients = theano.tensor.vector("coefficients") x = T.scalar("x") # Generate the components of the polynomial components, updates = theano.scan(fn=lambda coefficient, power, val: coefficient * (val ** power), outputs_info=None, sequences=[coefficients, theano.tensor.arange(1000)], non_sequences=x) # Sum them up polynomial = components.sum() # Compile a function calculate_polynomial = theano.function(inputs=[coefficients, x], outputs=polynomial) # Test test_coefficients = np.asarray([1, 0, 2], dtype=np.float32) test_value = 3 print(calculate_polynomial(test_coefficients, test_value)) """ Explanation: Let us go through the example line by line. What we did is first to construct a function (using a lambda expression) that given prior_result and A returns prior_result * A. The order of parameters is fixed by scan: the output of the prior call to fn is the first parameter, followed by all non-sequences. Next we initialize the output as a tensor with same shape and dtype as A, filled with ones. We give A to scan as a non sequence parameter and specify the number of steps k to iterate over our lambda expression. Scan returns a tuple containing our result (result) and a dictionary of updates (empty in this case). Note that the result is not a matrix, but a 3D tensor containing the value of $A^k$ for each step. We want the last value (after k steps) so we compile a function to return just that. Note that there is an optimization, that at compile time will detect that you are using just the last value of the result and ensure that scan does not store all the intermediate values that are used. So do not worry if A and k are large. In addition to looping a fixed number of times, scan can iterate over the leading dimension of tensors (similar to Python’s list comprehension for x in a_list). The tensor(s) to be looped over should be provided to scan using the sequence keyword argument. Here’s an example that builds a symbolic calculation of a polynomial from a list of its coefficients: End of explanation """ from pymc3.examples import glm_linear from pymc3 import sample, Metropolis, NUTS with glm_linear.model: trace_metropolis = sample(1000, step=Metropolis()) with glm_linear.model: trace_nuts = sample(1000, step=NUTS()) traceplot(trace_metropolis); traceplot(trace_nuts); """ Explanation: Gradient-based sampling methods in PyMC3 PyMC3 has the standard sampling algorithms like adaptive Metropolis-Hastings and adaptive slice sampling, but PyMC3's most capable step method is the No-U-Turn Sampler. NUTS is especially useful on models that have many continuous parameters, a situation where other MCMC algorithms work very slowly. It takes advantage of information about where regions of higher probability are, based on the gradient of the log posterior-density. This helps it achieve dramatically faster convergence on large problems than traditional sampling methods achieve. PyMC3 relies on Theano to analytically compute model gradients via automatic differentiation of the posterior density. NUTS also has several self-tuning strategies for adaptively setting the tunable parameters of Hamiltonian Monte Carlo. For random variables that are undifferentiable (namely, discrete variables) NUTS cannot be used, but it may still be used on the differentiable variables in a model that contains undifferentiable variables. Comparison of Metropolis and NUTS As an informal comparison, we will demonstrate samples generated from a simple statistical model using both the Metropolis and NUTS sampler in PyMC3. The set of examples includes a univariate linear model that is fit to simulated data via the glm module. python with Model() as model: glm.glm('y ~ x', data) The model contains three parameters (intercept, slope and sampling standard deviation), each of which is continuous, so the model can be fit by either algorithm. We will run a short chain for each, and compare the output graphically: End of explanation """ from pymc3 import Exponential, Poisson, switch, log, sum as tsum, exp, Model, Potential disasters_data = np.array([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6, 3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5, 2, 2, 3, 4, 2, 1, 3, 2, 2, 1, 1, 1, 1, 3, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2, 3, 3, 1, 1, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]) years = len(disasters_data) def log_sum_exp(x): return log(tsum(exp(x))) with Model() as marginal_model: # Priors for pre- and post-switch mean number of disasters early_mean = Exponential('early_mean', lam=1.) late_mean = Exponential('late_mean', lam=1.) # Generate all possible combinations of switchpoints and years year_idx = np.arange(years) s, t = np.array([(t,s) for t in year_idx for s in year_idx]).T # Marginalize the switchpoint log_like = log_sum_exp(-log(years) + Poisson.dist(switch(t<s, early_mean, late_mean)) .logp(disasters_data[t]).reshape((years, years)).sum(1)) # Data likelihood disasters = Potential('disasters', log_like) with marginal_model: trace_marginal = sample(2000) from pymc3 import plot_posterior plot_posterior(trace_marginal[1000:]) from pymc3 import effective_n effective_n(trace_marginal[1000:]) """ Explanation: The samples from Metropolis shows very poor mixing during the first 1000 iterations, and has clearly converged. The NUTS samples are more homogeneous, with better mixing and less autocorrelation. Marginalization of discrete parameters Gradient-based MCMC samplers like HMC and NUTS do not support the sampling of discrete parameters because discrete parameters, being non-continuous, do not support the calculation of gradients. In PyMC3, users are left with two options when dealing with models that include discrete random variables: Fit the model using a mixed sampling scheme, with gradient-based samplers for the continuous parameters and Metropolis samplers for the discrete parameters Marginalize the discrete parameters out of the model The advantage of the first option is that it is easy; in fact, PyMC3 will do this for you automatically unless you specify otherwise. The disadvantage is that, at best, you sacrifice the efficiency of gradient-based sampling, since you would still need to run models an order of magnitude longer (or more) to allow the discrete variables to converge. Also, the effect on the gradient-based sampler of having other variables not used in the gradient calculation changing at each iteration is not well-known. The advantage of margnialization is that your entire model can then be fit using gradient-based methods, which maximizes sampling efficiency (in terms of effective sample size per iteration). The disadvantage is that it requires some math in order to correctly marginalize the discrete variables. Marginalization is feasible if the discrete variables are bounded. We can try re-formulating the coal mining disasters example from earlier in the course. Recall that the model was described as the following: $$\begin{array}{ccc} (y_t | \tau, \lambda_1, \lambda_2) \sim\text{Poisson}\left(r_t\right), & r_t=\left{ \begin{array}{lll} \lambda_1 &\text{if}& t< \tau\ \lambda_2 &\text{if}& t\ge \tau \end{array}\right.,&t\in[t_l,t_h]\ \tau \sim \text{DiscreteUniform}(t_l, t_h)\ \lambda_1\sim \text{Exponential}(a)\ \lambda_2\sim \text{Exponential}(b) \end{array}$$ We wish to eliminate the discrete changepoint $\tau$ by marginalization. That is, we would like to express the factorization of the joint probability as follows: $$\pi(y, \lambda_1, \lambda_2) = L(y | \lambda_1, \lambda_2) \pi(\lambda_1, \lambda_2)$$ the marginalization proceeds by summing over the values of the discrete parameter (the years in the time series): $$\begin{array}{} L(y | \lambda_1, \lambda_2) &= \sum_{t=0}^{110} L(Y | \lambda_1, \lambda_2, \tau=t) p(\tau=t) \ &= \sum_{t=0}^{110} U(t | 0, 110) \prod_{s=0}^{110} \text{Poisson}(y_{s < t}| \lambda_1) \text{Poisson}(y_{s >= t}| \lambda_2) \end{array}$$ End of explanation """ from pymc3 import DiscreteUniform with Model() as original_model: # Prior for distribution of switchpoint location switchpoint = DiscreteUniform('switchpoint', lower=0, upper=years) # Priors for pre- and post-switch mean number of disasters early_mean = Exponential('early_mean', lam=1.) late_mean = Exponential('late_mean', lam=1.) # Allocate appropriate Poisson rates to years before and after current # switchpoint location idx = np.arange(years) rate = switch(switchpoint >= idx, early_mean, late_mean) # Data likelihood disasters = Poisson('disasters', rate, observed=disasters_data) with original_model: trace_original = sample(2000, njobs=2) effective_n(trace_original[1000:]) """ Explanation: Let's compare this with the original formulation: End of explanation """
graphistry/pygraphistry
demos/demos_databases_apis/arango/arango_tutorial.ipynb
bsd-3-clause
!pip install python-arango --user -q from arango import ArangoClient import pandas as pd import graphistry def paths_to_graph(paths, source='_from', destination='_to', node='_id'): nodes_df = pd.DataFrame() edges_df = pd.DataFrame() for graph in paths: nodes_df = pd.concat([ nodes_df, pd.DataFrame(graph['vertices']) ], ignore_index=True) edges_df = pd.concat([ edges_df, pd.DataFrame(graph['edges']) ], ignore_index=True) nodes_df = nodes_df.drop_duplicates([node]) edges_df = edges_df.drop_duplicates([node]) return graphistry.bind(source=source, destination=destination, node=node).nodes(nodes_df).edges(edges_df) def graph_to_graphistry(graph, source='_from', destination='_to', node='_id'): nodes_df = pd.DataFrame() for vc_name in graph.vertex_collections(): nodes_df = pd.concat([nodes_df, pd.DataFrame([x for x in graph.vertex_collection(vc_name)])], ignore_index=True) edges_df = pd.DataFrame() for edge_def in graph.edge_definitions(): edges_df = pd.concat([edges_df, pd.DataFrame([x for x in graph.edge_collection(edge_def['edge_collection'])])], ignore_index=True) return graphistry.bind(source=source, destination=destination, node=node).nodes(nodes_df).edges(edges_df) """ Explanation: ArangoDB with Graphistry We explore Game of Thrones data in ArangoDB to show how Arango's graph support interops with Graphistry pretty quickly. This tutorial shares two sample transforms: * Visualize the full graph * Visualize the result of a traversal query Each runs an AQL query via python-arango, automatically converts to pandas, and plots with graphistry. Setup End of explanation """ # To specify Graphistry account & server, use: # graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com') # For more options, see https://github.com/graphistry/pygraphistry#configure client = ArangoClient(protocol='http', host='localhost', port=8529) db = client.db('GoT', username='root', password='1234') """ Explanation: Connect End of explanation """ paths = db.graph('theGraph').traverse( start_vertex='Characters/4814', direction='outbound', strategy='breadthfirst' )['paths'] g = paths_to_graph(paths) g.bind(point_title='name').plot() """ Explanation: Demo 1: Traversal viz Use python-arango's traverse() call to descendants of Ned Stark Convert result paths to pandas and Graphistry Plot, and instead of using raw Arango vertex IDs, use the first name End of explanation """ g = graph_to_graphistry( db.graph('theGraph') ) g.bind(point_title='name').plot() """ Explanation: Demo 2: Full graph Use python-arango on a graph to identify and download the involved vertex/edge collections Convert the results to pandas and Graphistry Plot, and instead of using raw Arango vertex IDs, use the first name End of explanation """
rescu/brainstorm
simple_harmonic_oscillator/simple_harmonic_oscillator.ipynb
mit
%matplotlib inline import numpy as np import matplotlib.pyplot as plt def undamped_oscillator_euler(x0,v0,k,m,tmax,dt): """ Numerically integrate the equation of motion for an undamped harmonic oscillator using a simple euler method. """ # calculate the number of time steps num_time_steps = np.floor(tmax/dt) time = np.linspace(0, tmax, num_time_steps) # define arrays for position and velocity x = np.zeros(num_time_steps) v = np.zeros(num_time_steps) # apply initial conditions x[0] = x0 v[0] = v0 #define constants omega = np.sqrt(k/m) # use F = ma and the euler method to integrate the equation of motion for i in range(1,len(time)): a = -k/m * x[i-1] x[i] = v[i-1]*dt + 0.5 * a * dt**2 + x[i-1] v[i] = a * dt + v[i-1] return (x,v,time) def undamped_oscillator_exact_pos(A,omega,phi,t): return A*np.cos(omega*t + phi) def undamped_oscillator_exact_vel(A,omega,phi,t): return -A*omega*np.sin(omega*t+phi) # initial conditions for the simulation x0 = 1 v0 = 0 k = 10 m = 1 tmax = 10 dt = .1 # results we derived from our analytical analysis above omega = np.sqrt(k/m) phi = np.arctan(-v0 / (omega*x0)) A = np.sqrt(x0**2+v0**2/omega**2) # generate numerical trajectory given initial conditions x,v,t = undamped_oscillator_euler(x0,v0,k,m,tmax,dt) ax1 = plt.subplot(211) ax2 = plt.subplot(212) ax1.plot(t,x) ax1.plot(t,undamped_oscillator_exact_pos(A,omega,phi,t),linestyle='--') ax1.set_ylabel('x(t)') ax2.plot(t,v) ax2.set_ylabel('v(t)') ax2.set_xlabel('t') ax2.plot(t,undamped_oscillator_exact_vel(A,omega,phi,t),linestyle='--') plt.tight_layout() """ Explanation: Classical Harmonic Oscillator Many problems in physics come down to this simple relation: $$ \ddot{x} = -\omega^2 x $$ where $x$ can be any quantity and $\omega$ can be any combination of relevant constants. The resultant motion is known as "simple harmonic motion", i.e. $$ x(t) = A \cos(\omega t + \phi) $$ Where $A$ is the amplitude of the motion, $\omega$ is the collection of various constants from before, and $\phi$ is a phase that is set by the initial conditions of the problem. It is traditional to consider a spring-mass system where a spring with rest length $x_0$ and spring constant $k$ is attached to a mass $m$. If we write down our expression for $\ddot{x}$ using Newton's second law we find $$ m \ddot{x} = -k x $$ which reduces to $$ \ddot{x} = -\omega^2 x $$ where $$ \omega = \sqrt{\frac{k}{m}} $$ We can see that the oscillation frequency of the spring mass system is determined by the stiffness, $k$, of the spring and mass, $m$, we have attached to it. The Analytical Solution Given a spring-mass system with mass $m$ and spring constant $k$ we can derive the motion of the system analytically and compare our results from a numerical simulation. We know the solution has form $$ x(t) = A \cos(\omega t + \phi)\ v(t) = -A \omega \sin(\omega t + \phi) $$ so we can apply our initial conditions $$ x(0) = x_0 = A \cos(\phi) \ v(0) = v_0 = -A \omega \sin(\phi) $$ Dividing one equation by another we get $$ \tan(\phi) = \frac{ -v_0 }{ \omega x_0 } $$ or $$ \phi = \tan^{-1}\left( \frac{-v_0}{\omega x_0} \right) $$ and we can plug this result back into our initial condition for position and find that $$ A = \sqrt{ x_0^2 + \left(\frac{v_0}{\omega}\right)^2} $$ End of explanation """ def oscillator_energy(x,v,k,m): return 0.5*m*v**2 + 0.5*k*x**2 ax = plt.subplot(111) ax.plot(t, oscillator_energy(x,v,k,m)) ax.plot(t, oscillator_energy( undamped_oscillator_exact_pos(A,omega,phi,t), undamped_oscillator_exact_vel(A,omega,phi,t), k, m )) """ Explanation: We notice that after a few oscillations our numerical solution does not agree so well with our analytical result. We can quantify this by looking at the deviation in the energy as a function of time. The kinetic energy of the system is $$ T = \frac{1}{2} m \dot{x}^2 $$ and the potential energy is $$ V = \frac{1}{2} k x^2 $$ There is no external work being done on the spring-mass system, so the total energy of the system is conserved, i.e. $$ E_{tot} = T + V = \mathrm{const} $$ Exercise: Show that $ T + V = \frac{1}{2} k A^2 $ End of explanation """ # initial conditions for the simulation x0 = 1 v0 = 0 k = 10 m = 1 tmax = 10 # results we derived from our analytical analysis above omega = np.sqrt(k/m) phi = np.arctan(-v0 / (omega*x0)) A = np.sqrt(x0**2+v0**2/omega**2) exact_Etot = 0.5 * k * A**2 # list of dts to step through dts = np.logspace(-1,-5,30) errors = np.zeros(dts.size) for i,dt in enumerate(dts): x,v,t = undamped_oscillator_euler(x0,v0,k,m,tmax,dt) errors[i] = 100 * (oscillator_energy(x[-1],v[-1],k,m) - exact_Etot) / exact_Etot ax = plt.subplot(111) plt.loglog(dts,errors,'bo--') ax.set_xlabel('dt') ax.set_ylabel('Energy Error (%)') """ Explanation: Error vs time step study End of explanation """
vitojph/2016progpln
notebooks/12-word2vec.ipynb
mit
import gensim, logging, os logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) """ Explanation: Ejemplo de word2vec con gensim En la siguiente celda, importamos las librerías necesarias y configuramos los mensajes de los logs. End of explanation """ class Corpus(object): '''Clase Corpus que permite leer de manera secuencial un directorio de documentos de texto''' def __init__(self, directorio): self.directory = directorio def __iter__(self): for fichero in os.listdir(self.directory): for linea in open(os.path.join(self.directory, fichero)): yield linea.split() """ Explanation: Entrenamiento de un modelo Implemento una clase Corpus con un iterador sobre un directorio que contiene ficheros de texto. Utilizaré una instancia de Corpus para poder procesar de manera más eficiente una colección, sin necesidad de cargarlo previamente en memoria. End of explanation """ CORPUSDIR = '/opt/textos/efe/txt/' oraciones = Corpus(CORPUSDIR) #model = gensim.models.Word2Vec(oraciones, min_count=10, size=150, workers=2) # el modelo puede entrenarse en dos pasos sucesivos pero por separado #model = gensim.models.Word2Vec() # modelo vacío #model.build_vocab(oraciones) # primera pasada para crear la lista de vocabulario #model.train(other_sentences) # segunda pasada para calcula vectores """ Explanation: CORPUSDIR contiene una colección de noticias en español (normalizada previamente a minúsculas y sin signos de puntuación) con alrededor de 150 millones de palabras. Entrenamos un modelo en un solo paso, ignorando aquellos tokens que aparecen menos de 10 veces, para descartar erratas. End of explanation """ #model.save('/opt/textos/efe/efe.model.w2v') """ Explanation: Una vez completado el entrenamiento (después de casi 30 minutos), guardamos el modelo en disco. End of explanation """ model = gensim.models.Word2Vec.load('/opt/textos/efe/efe.model.w2v') """ Explanation: En el futuro, podremos utilizar este modelo cargándolo en memoria con la instrucción: End of explanation """ print(model.corpus_count) """ Explanation: Probando nuestro modelo El objeto model contiene una enorme matriz de números: una tabla, donde cada fila es uno de los términos del vocabulario reconocido y cada columna es una de las características que permiten modelar el significado de dicho término. En nuestro modelo, tal y como está entrenado, tenemos más de 26 millones de términos: End of explanation """ print(model['azul'], '\n') print(model['verde'], '\n') print(model['microsoft']) """ Explanation: Cada término del vocabulario está representado como un vector con 150 dimensiones: 105 características. Podemos acceder al vector de un término concreto: End of explanation """ print('hombre - mujer', model.similarity('hombre', 'mujer')) print('madrid - parís', model.similarity('madrid', 'parís')) print('perro - gato', model.similarity('perro', 'gato')) print('gato - periódico', model.similarity('gato', 'periódico')) """ Explanation: Estos vectores no nos dicen mucho, salvo que contienen números muy pequeños :-/ El mismo objeto model permite acceder a una serie de funcionalidades ya implementadas que nos van a permitir evaluar formal e informalmente el modelo. Por el momento, nos contentamos con los segundo: vamos a revisar visualmente los significados que nuestro modelo ha aprendido por su cuenta. Podemos calcular la similitud semántica entre dos términos usando el método similarity, que nos devuelve un número entre 0 y 1: End of explanation """ lista1 = 'madrid barcelona gonzález washington'.split() print('en la lista', ' '.join(lista1), 'sobra:', model.doesnt_match(lista1)) lista2 = 'psoe pp ciu epi'.split() print('en la lista', ' '.join(lista2), 'sobra:', model.doesnt_match(lista2)) lista3 = 'publicaron declararon soy negaron'.split() print('en la lista', ' '.join(lista3), 'sobra:', model.doesnt_match(lista3)) lista3 = 'homero saturno cervantes shakespeare cela'.split() print('en la lista', ' '.join(lista3), 'sobra:', model.doesnt_match(lista3)) """ Explanation: Podemos seleccionar el término que no encaja a partir de una determinada lista de términos usando el método doesnt_match: End of explanation """ terminos = 'psoe chicago sevilla aznar podemos estuvieron'.split() for t in terminos: print(t, '==>', model.most_similar(t), '\n') """ Explanation: Podemos buscar los términos más similares usando el método most_similar de nuestro modelo: End of explanation """ print('==> alcalde + mujer - hombre') most_similar = model.most_similar(positive=['alcalde', 'mujer'], negative=['hombre'], topn=3) for item in most_similar: print(item) print('==> madrid + filipinas - españa') most_similar = model.most_similar(positive=['madrid', 'filipinas'], negative=['españa'], topn=3) for item in most_similar: print(item) print('==> michel + fútbol + argentina - españa') most_similar = model.most_similar(positive=['michel', 'fútbol', 'argentina'], negative=['españa'], topn=3) for item in most_similar: print(item) """ Explanation: Con el mismo método most_similar podemos combinar vectores de palabras tratando de jugar con los rasgos semánticos de cada una de ellas para descubrir nuevas relaciones. End of explanation """
JungeAlexander/dl
chapter_9_cnn.ipynb
mit
import matplotlib.cm as cm import matplotlib.pyplot as plt import tensorflow.contrib.keras as keras %matplotlib inline """ Explanation: Convolutional neural networks (CNNs) in keras Lots of keras examples, some including CNNs available here: https://github.com/fchollet/keras/tree/master/examples Specifically, this notebook is based on the following example training a CNN on the MNIST dataset of hand-written digits: https://github.com/fchollet/keras/blob/master/examples/mnist_cnn.py End of explanation """ # Dataset of 60,000 28x28 grayscale images of the 10 digits, along with a test set of 10,000 images. (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() # input image dimensions and class counts img_rows, img_cols = 28, 28 num_classes = 10 x_train[0].shape y_train[0] plt.imshow(x_train[0], cmap=cm.binary) # images are expected as 3D tensors with the third dimension containing different image channels; reshape x to a # 3D tensore with single color channel, the grayscale channel x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1) input_shape = (img_rows, img_cols, 1) x_train[0].shape # convert X to [0,1] x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') y_train[:5] # convert to a one hot encoding of the class labels y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) y_train[:5] """ Explanation: Dataset pre-processing End of explanation """ batch_size = 128 epochs = 7 # increasing this would probably make sense but takes longer to compute # houses a linear stack of layers model = keras.models.Sequential() # add layers to the sequential model model.add(keras.layers.Conv2D(32, # 32 filters/kernels kernel_size=(3, 3), # filter size of 3x3 pixels activation='relu', input_shape=input_shape)) model.add(keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(keras.layers.Dropout(0.25)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(128, activation='relu')) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(num_classes, activation='softmax')) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy']) """ Explanation: Specifying the CNN model End of explanation """ keras.utils.plot_model(model, to_file='chapter_9_cnn.png', show_shapes=True) """ Explanation: The model can be visualized as follows: End of explanation """ model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test)) """ Explanation: A convolutional layer 'Conv2D' lookse like this: A max pooling layer 'MaxPooling2D' lookse like this: Training the model End of explanation """ score = model.evaluate(x_test, y_test, verbose=0) print('Test loss:', score[0]) print('Test accuracy:', score[1]) """ Explanation: Testing the model End of explanation """
gcgruen/homework
foundations-homework/05/homework-05-gruen-spotify.ipynb
mit
import requests lil_response = requests.get ('https://api.spotify.com/v1/search?query=Lil&type=artist&country=US&limit=50') lil_data = lil_response.json() print(type(lil_data)) lil_data.keys() lil_data['artists'].keys() lil_artists = lil_data['artists']['items'] #check on what elements are in that list: #print (lil_artists[0]) """ Explanation: Homework 05 Spotify Gianna-Carina Gruen 2016-06-07 End of explanation """ for artist in lil_artists: print(artist['name'], "has a popularity score of", artist['popularity']) """ Explanation: 1) With "Lil Wayne" and "Lil Kim" there are a lot of "Lil" musicians. Do a search and print a list of 50 that are playable in the USA (or the country of your choice), along with their popularity score. End of explanation """ #http://stackoverflow.com/questions/2600191/how-can-i-count-the-occurrences-of-a-list-item-in-python from collections import Counter genre_list = [] for genre in lil_artists: if genre['genres'] != []: genre_list = genre['genres'] + genre_list c = Counter(genre_list) print("These are the counts for each genre:", c) #https://docs.python.org/2/library/collections.html most_common = Counter(genre_list).most_common(1) print("The most common genre is:",most_common) for artist in lil_artists: if artist['genres'] == []: print(artist['name'], "has a popularity score of", artist['popularity'], "But there are no genres listed for this artist.") else: artist_genres = artist['genres'] print(artist['name'], "has a popularity score of", artist['popularity'], "This artist is associated with", ', '.join(artist_genres)) # http://stackoverflow.com/questions/5850986/joining-elements-of-a-list-python """ Explanation: 2) What genres are most represented in the search results? Edit your previous printout to also display a list of their genres in the format "GENRE_1, GENRE_2, GENRE_3". If there are no genres, print "No genres listed". Tip: "how to join a list Python" might be a helpful search End of explanation """ most_popular_score = 0 most_popular_name = [] for artist in lil_artists: if artist['popularity'] > most_popular_score: most_popular_name = artist['name'] most_popular_score = artist['popularity'] print(most_popular_name, "is the most popular, with a rating of", most_popular_score) second_max_popular = 0 for artist in lil_artists: if artist['popularity'] >= second_max_popular and artist['popularity'] < most_popular_score: second_max_popular = artist['popularity'] print(artist['name'], "is the second most popular with a popularity rating of",artist['popularity'], "compared to", most_popular_name, "who has a rating of", most_popular_score) """ Explanation: 3) Use a for loop to determine who BESIDES Lil Wayne has the highest popularity rating. End of explanation """ most_followers = 0 for artist in lil_artists: if artist['followers']['total'] > most_followers: most_followers = artist['followers']['total'] print(artist['name'], "has the largest number followers:", artist['followers']['total']) print("The second most popular Lils have the following amount of followers:") second_most_followers = 0 for artist in lil_artists: if artist['popularity'] >= second_max_popular and artist['popularity'] < 86: second_max_popular = artist['popularity'] if artist['followers']['total'] > second_most_followers: second_most_followers = artist['followers']['total'] print(artist['name'], artist['followers']['total']) """ Explanation: Is it the same artist who has the largest number of followers? End of explanation """ kim_popularity = 0 for artist in lil_artists: if artist['name'] == "Lil' Kim": kim_popularity = (artist['popularity']) for artist in lil_artists: if artist['popularity'] > kim_popularity: print(artist['name'], "has a popularity of", artist['popularity'], "which is higher than that of Lil' Kim.") """ Explanation: 4) Print a list of Lil's that are more popular than Lil' Kim. End of explanation """ #for artist in lil_artists: #print(artist['name'], artist['id']) #Lil Dicky 1tqhsYv8yBBdwANFNzHtcr toptracks_Dicky_response = requests.get('https://api.spotify.com/v1/artists/1tqhsYv8yBBdwANFNzHtcr/top-tracks?country=US') toptracks_Dicky_data = toptracks_Dicky_response.json() tracks_Dicky = toptracks_Dicky_data['tracks'] print("THESE ARE THE TOP TRACKS OF LIL DICKY:") for track in tracks_Dicky: print(track['name']) #Lil Jon 7sfl4Xt5KmfyDs2T3SVSMK toptracks_Jon_response = requests.get('https://api.spotify.com/v1/artists/7sfl4Xt5KmfyDs2T3SVSMK/top-tracks?country=US') toptracks_Jon_data = toptracks_Jon_response.json() tracks_Jon = toptracks_Jon_data['tracks'] print("THESE ARE THE TOP TRACKS OF LIL JON:") for track in tracks_Jon: print(track['name']) """ Explanation: 5) Pick two of your favorite Lils to fight it out, and use their IDs to print out their top tracks. Tip: You're going to be making two separate requests, be sure you DO NOT save them into the same variable. End of explanation """ print(tracks_Dicky[0].keys()) """ Explanation: 6) Will the world explode if a musicians swears? Get an average popularity for their explicit songs vs. their non-explicit songs. End of explanation """ explicit_Dicky_count = 0 non_explicit_Dicky_count = 0 explicit_popularity_Dicky_sum = 0 non_explicit_popularity_Dicky_sum = 0 for track in tracks_Dicky: if track['explicit'] == True: explicit_Dicky_count = explicit_Dicky_count + 1 explicit_popularity_Dicky_sum = explicit_popularity_Dicky_sum + track['popularity'] else: non_explicit_Dicky_count = non_explicit_Dicky_count + 1 non_explicit_popularity_Dicky_sum = non_explicit_popularity_Dicky_sum + track['popularity'] print("The average popularity of explicit Lil Dicky songs is", explicit_popularity_Dicky_sum / explicit_Dicky_count) if non_explicit_Dicky_count == 0: print("There are no non-explicit Lil Dicky songs.") else: print("The average popularity of non-explicit Lil Dicky songs is:", non_explicit_popularity_Dicky_sum / non_explicit_Dicky_count) explicit_Jon_count = 0 non_explicit_Jon_count = 0 explicit_popularity_Jon_sum = 0 non_explicit_popularity_Jon_sum = 0 for track in tracks_Jon: if track['explicit'] == True: explicit_Jon_count = explicit_Jon_count + 1 explicit_popularity_Jon_sum = explicit_popularity_Jon_sum + track['popularity'] else: non_explicit_Jon_count = non_explicit_Jon_count + 1 non_explicit_popularity_Jon_sum = non_explicit_popularity_Jon_sum + track['popularity'] print("The average popularity of explicit Lil Jon songs is", explicit_popularity_Jon_sum / explicit_Jon_count) if non_explicit_Jon_count == 0: print("There are no non-explicit Lil Jon songs.") else: print("The average popularity of non-explicit Lil Jon songs is:", non_explicit_popularity_Jon_sum / non_explicit_Jon_count) """ Explanation: First solution -- this felt like a lot of repeating and as if there was a more efficient way to do it. Turns out, there is! With some explanation from Soma first -- see below. End of explanation """ #function writing def add(a, b): value = a + b print("the sum of", a, "and", b, "is", value) add(5, 7) add(1, 2) add(4, 55) """ Explanation: Soma explaining how to write functions in 30 seconds of Lab: End of explanation """ def average_popularity(a, b): explicit_count = 0 non_explicit_count = 0 explicit_popularity_sum = 0 non_explicit_popularity_sum = 0 for track in a: if track['explicit'] == True: explicit_count = explicit_count + 1 explicit_popularity_sum = explicit_popularity_sum + track['popularity'] else: non_explicit_count = non_explicit_count + 1 non_explicit_popularity_sum = non_explicit_popularity_sum + track['popularity'] if explicit_count == 0: print("There are no explicit songs by", b) else: print("The average popularity of explicit songs by", b, "is", explicit_popularity_sum / explicit_count) if non_explicit_count == 0: print("There are no non-explicit songs by", b) else: print("The average popularity of non-explicit songs by", b, "is", non_explicit_popularity_sum / non_explicit_count) average_popularity(tracks_Dicky, "Lil Dicky") average_popularity(tracks_Jon, "Lil Jon") """ Explanation: Based on that, I re-wrote my above code using a function End of explanation """ def explicit_minutes(a, b): explicit_milliseconds = 0 non_explicit_milliseconds = 0 for track in a: if track['explicit'] == True: explicit_milliseconds = explicit_milliseconds + track['duration_ms'] else: non_explicit_milliseconds = non_explicit_milliseconds + track['duration_ms'] if explicit_milliseconds !=0: print(b, "has", explicit_milliseconds / 6000 , "minutes of explicit music.") if non_explicit_milliseconds !=0: print(b, "has", non_explicit_milliseconds / 6000, "minutes of non-explicit music.") else: print(b, "has", "has no non-explicit music.") explicit_minutes(tracks_Dicky, "Lil Dicky") explicit_minutes(tracks_Jon, "Lil Jon") """ Explanation: How many minutes of explicit songs do they have? Non-explicit? End of explanation """ import requests biggieT_response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&limit=50') biggieT_data = biggieT_response.json() biggieT_artists = biggieT_data['artists']['items'] artist_count = 0 for artist in biggieT_artists: artist_count = artist_count + 1 print("There are in total", artist_count, "Biggies.") import requests import math offset_valueB = 0 biggieT_response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&limit=50&offset=' + str(offset_valueB) + '') biggieT_data = biggieT_response.json() biggieT_artists = biggieT_data['artists']['items'] offset_limitB = biggieT_data['artists']['total'] offset_valueL = 0 lilT_response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&limit=50&offset=' + str(offset_valueL) + '') lilT_data = lilT_response.json() lilT_artists = lilT_data['artists']['items'] offset_limitL = lilT_data['artists']['total'] page_countB = math.ceil(offset_limitB/ 50) print("The page count for all the Biggies is:", page_countB) page_countL = math.ceil(offset_limitL/ 50) print("The page count for all the Lils is:", page_countL) print("If you made 1 request every 5 seconds, it will take", page_countL * 5, "seconds for all the Lils requests to process. Whereas for the Biggies it's", page_countB* 5, ", so the total amount of time is", page_countB*5 + page_countL*5, "seconds.") artist_count = 0 offset_value = 0 for page in range(0, 1): biggieT_response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&limit=50&offset=' + str(offset_valueB) + '') biggieT_data = biggieT_response.json() biggieT_artists = biggieT_data['artists']['items'] for artist in lilT_artists: artist_count = artist_count + 1 offset_value = offset_value + 50 print("There are in total", artist_count, "Biggies.") artist_count = 0 offset_value = 0 for page in range(0, 91): lilT_response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&limit=50&offset=' + str(offset_value) + '') lilT_data = lilT_response.json() lilT_artists = lilT_data['artists']['items'] for artist in lilT_artists: artist_count = artist_count + 1 offset_value = offset_value + 50 print("There are in total", artist_count, "Lils.") """ Explanation: 7) Since we're talking about Lils, what about Biggies? How many total "Biggie" artists are there? How many total "Lil"s? If you made 1 request every 5 seconds, how long would it take to download information on all the Lils vs the Biggies? End of explanation """ # tried to solve it with a function as well, but didn't work out, gave an error message. So back to the old way. biggie50_response = requests.get('https://api.spotify.com/v1/search?query=biggie&type=artist&limit=50') biggie50_data = biggie50_response.json() biggie50_artists = biggie50_data['artists']['items'] popularity_biggie50 = 0 for artist in biggie50_artists: popularity_biggie50 = popularity_biggie50 + artist['popularity'] print("The average popularity of the top50 Biggies is", popularity_biggie50 / 50) lil50_response = requests.get('https://api.spotify.com/v1/search?query=lil&type=artist&limit=50') lil50_data = lil50_response.json() lil50_artists = lil50_data['artists']['items'] popularity_lil50 = 0 for artist in lil50_artists: popularity_lil50 = popularity_lil50 + artist['popularity'] print("The average popularity of the top50 Lils is", popularity_lil50 / 50) if popularity_biggie50 > popularity_lil50: print("The top50 Biggies are on average more popular than the top50 Lils.") if popularity_biggie50 == popularity_lil50: print("The top50 Biggies are on average as popular as the top50 Lils.") else: print("The top50 Lils are on average more popular than the top50 Biggies.") """ Explanation: 8) Out of the top 50 "Lil"s and the top 50 "Biggie"s, who is more popular on average? End of explanation """
CPernet/LanguageDecision
notebooks/exploratory/2017-07-16-ddm-mixed-data.ipynb
gpl-3.0
%matplotlib inline %cd .. import warnings; warnings.filterwarnings('ignore') """ Explanation: Mixed Data DDM DDM using both patient and matched control data End of explanation """ from utils import matparser, data_compiler import glob data_dir = 'data/controls/' matparser.parse_dir(data_dir) out_dir = "data/controls.csv" data_compiler.compile_dir(data_dir, out_dir) """ Explanation: Start by parsing the .mat files from the matched controls End of explanation """ !cat data/controls.csv | grep -v nan > data/controls_clean.csv """ Explanation: Clean up - remove nan entries as these cause hddm to fail End of explanation """ !cat data/patients_clean.csv | sed 1d | awk -v d="patient" -F"," 'BEGIN { OFS = "," } {$5=d; print}' | tr -d $'\r' > data/patients_tagged.csv !cat data/controls_clean.csv | sed 1d | awk -v d="control" -F"," 'BEGIN { OFS = "," } {$5=d; print}' | tr -d $'\r' > data/controls_tagged.csv !echo "response,rt,subj_idx,stim,subj_type" > data/combined_clean.csv !cat data/patients_tagged.csv >> data/combined_clean.csv; sed 1d data/controls_tagged.csv >> data/combined_clean.csv """ Explanation: Merge patient and matched control data to single .csv file First, create tagged versions of the csv files - include "tag" column to differentiate between patient and control End of explanation """ import hddm data = hddm.load_csv('data/combined_clean.csv') model = hddm.HDDM(data, depends_on={'v': ['stim', 'subj_type'], 'a': 'subj_type'}) model.find_starting_values() model.sample(6000, burn=20) """ Explanation: Build HDDM model End of explanation """
gdementen/larray
doc/source/tutorial/tutorial_sessions.ipynb
gpl-3.0
%xmode Minimal from larray import * """ Explanation: Working With Sessions Import the LArray library: End of explanation """ # define some scalars, axes and arrays variant = 'baseline' country = Axis('country=Belgium,France,Germany') gender = Axis('gender=Male,Female') time = Axis('time=2013..2017') population = zeros([country, gender, time]) births = zeros([country, gender, time]) deaths = zeros([country, gender, time]) # create an empty session and objects one by one after s = Session() s.variant = variant s.country = country s.gender = gender s.time = time s.population = population s.births = births s.deaths = deaths print(s.summary()) # or create a session in one step by passing all objects to the constructor s = Session(variant=variant, country=country, gender=gender, time=time, population=population, births=births, deaths=deaths) print(s.summary()) """ Explanation: Three Kinds Of Sessions They are three ways to group objects in LArray: Session: is an ordered dict-like container with special I/O methods. Although the autocomplete* feature on the objects stored in the session is available in the larray-editor, it is not available in development tools like PyCharm making it cumbersome to use. CheckedSession: provides the same methods as Session objects but are defined in a completely different way (see example below). The autocomplete* feature is both available in the larray-editor and in development tools (PyCharm). In addition, the type of each stored object is protected. Optionally, it is possible to constrain the axes and dtype of arrays using CheckedArray. CheckedParameters: is a special version of CheckedSession in which the value of all stored objects (parameters) is frozen after initialization. * Autocomplete is the feature in which development tools try to predict the variable or function a user intends to enter after only a few characters have been typed (like word completion in cell phones). Creating Sessions Session Create a session: End of explanation """ class Demography(CheckedSession): # (convention is to declare parameters (read-only objects) in capital letters) # Declare 'VARIANT' parameter as of type string. # 'VARIANT' will be initialized when a 'Demography' session will be created VARIANT: str # declare variables with an initialization value. # Their type is deduced from their initialization value. COUNTRY = Axis('country=Belgium,France,Germany') GENDER = Axis('gender=Male,Female') TIME = Axis('time=2013..2017') population = zeros([COUNTRY, GENDER, TIME], dtype=int) births = zeros([COUNTRY, GENDER, TIME], dtype=int) # declare 'deaths' with constrained axes and dtype. # Its type (Array), axes and dtype are not modifiable. # It will be initialized with 0 deaths: CheckedArray([COUNTRY, GENDER, TIME], int) = 0 d = Demography(VARIANT='baseline') print(d.summary()) """ Explanation: CheckedSession The syntax to define a checked-session is a bit specific: python class MySession(CheckedSession): # Variables can be declared in two ways: # a) by specifying only the type of the variable (to be initialized later) var1: Type # b) by giving an initialization value. # In that case, the type is deduced from the initialization value var2 = initialization value # Additionally, axes and dtype of Array variables can be constrained # using the special type CheckedArray arr1: CheckedArray([list, of, axes], dtype) = initialization value Check the example below: End of explanation """ # create a new Session object and load all arrays, axes, groups and metadata # from all CSV files located in the passed directory csv_dir = get_example_filepath('demography_eurostat') s = Session(csv_dir) # create a new Session object and load all arrays, axes, groups and metadata # stored in the passed Excel file filepath_excel = get_example_filepath('demography_eurostat.xlsx') s = Session(filepath_excel) # create a new Session object and load all arrays, axes, groups and metadata # stored in the passed HDF5 file filepath_hdf = get_example_filepath('demography_eurostat.h5') s = Session(filepath_hdf) print(s.summary()) """ Explanation: Loading and Dumping Sessions One of the main advantages of grouping arrays, axes and groups in session objects is that you can load and save all of them in one shot. Like arrays, it is possible to associate metadata to a session. These can be saved and loaded in all file formats. Loading Sessions (CSV, Excel, HDF5) To load the items of a session, you have two options: 1) Instantiate a new session and pass the path to the Excel/HDF5 file or to the directory containing CSV files to the Session constructor: End of explanation """ # create a session containing 3 axes, 2 groups and one array 'population' filepath = get_example_filepath('population_only.xlsx') s = Session(filepath) print(s.summary()) # call the load method on the previous session and add the 'births' and 'deaths' arrays to it filepath = get_example_filepath('births_and_deaths.xlsx') s.load(filepath) print(s.summary()) """ Explanation: 2) Call the load method on an existing session and pass the path to the Excel/HDF5 file or to the directory containing CSV files as first argument: End of explanation """ births_and_deaths_session = Session() # use the names argument to only load births and deaths arrays births_and_deaths_session.load(filepath_hdf, names=['births', 'deaths']) print(births_and_deaths_session.summary()) """ Explanation: The load method offers some options: 1) Using the names argument, you can specify which items to load: End of explanation """ s = Session() # with display=True, the load method will print a message # each time a new item is loaded s.load(filepath_hdf, display=True) """ Explanation: 2) Setting the display argument to True, the load method will print a message each time a new item is loaded: End of explanation """ # save items of a session in CSV files. # Here, the save method will create a 'demography' directory in which CSV files will be written s.save('demography') # save the session to an HDF5 file s.save('demography.h5') # save the session to an Excel file s.save('demography.xlsx') """ Explanation: Dumping Sessions (CSV, Excel, HDF5) To save a session, you need to call the save method. The first argument is the path to a Excel/HDF5 file or to a directory if items are saved to CSV files: End of explanation """ # use the names argument to only save births and deaths arrays s.save('demography.h5', names=['births', 'deaths']) # load session saved in 'demography.h5' to see its content Session('demography.h5').names """ Explanation: <div class="alert alert-info"> Note: Concerning the CSV and Excel formats, the metadata is saved in one Excel sheet (CSV file) named `__metadata__(.csv)`. This sheet (CSV file) name cannot be changed. </div> The save method has several arguments: 1) Using the names argument, you can specify which items to save: End of explanation """ population = read_csv('./demography/population.csv') pop_ses = Session([('population', population)]) # by setting overwrite to False, the destination file is updated instead of overwritten. # The items already stored in the file but not present in the session are left intact. # On the contrary, the items that exist in both the file and the session are completely overwritten. pop_ses.save('demography.h5', overwrite=False) # load session saved in 'demography.h5' to see its content Session('demography.h5').names """ Explanation: 2) By default, dumping a session to an Excel or HDF5 file will overwrite it. By setting the overwrite argument to False, you can choose to update the existing Excel or HDF5 file: End of explanation """ # with display=True, the save method will print a message # each time an item is dumped s.save('demography.h5', display=True) """ Explanation: 3) Setting the display argument to True, the save method will print a message each time an item is dumped: End of explanation """ # load a session representing the results of a demographic model filepath_hdf = get_example_filepath('demography_eurostat.h5') s = Session(filepath_hdf) # print the content of the session print(s.names) """ Explanation: Exploring Content To get the list of items names of a session, use the names shortcut (be careful that the list is sorted alphabetically and does not follow the internal order!): End of explanation """ # print the content of the session print(s.summary()) """ Explanation: To get more information of items of a session, the summary will provide not only the names of items but also the list of labels in the case of axes or groups and the list of axes, the shape and the dtype in the case of arrays: End of explanation """ s['population'] """ Explanation: Selecting And Filtering Items Session objects work like ordinary dict Python objects. To select an item, use the usual syntax &lt;session_var&gt;['&lt;item_name&gt;']: End of explanation """ s.population """ Explanation: A simpler way consists in the use the syntax &lt;session_var&gt;.&lt;item_name&gt;: End of explanation """ s_selected = s['population', 'births', 'deaths'] s_selected.names """ Explanation: <div class="alert alert-warning"> **Warning:** The syntax ``session_var.item_name`` will work as long as you don't use any special character like ``, ; :`` in the item's name. </div> To return a new session with selected items, use the syntax &lt;session_var&gt;[list, of, item, names]: End of explanation """ d_selected = d['births', 'deaths'] # test if v_selected is a checked-session print('is still a check-session?', isinstance(d_selected, CheckedSession)) #test if v_selected is a normal session print('is now a normal session?', isinstance(d_selected, Session)) """ Explanation: <div class="alert alert-warning"> **Warning:** The same selection as above can be applied on a checked-session **but the returned object is a normal session and NOT a checked-session**. This means that you will loose all the benefits (autocomplete, protection on type, axes and dtype) of checked-sessions. </div> End of explanation """ # select only arrays of a session s.filter(kind=Array) # selection all items with a name starting with a letter between a and k s.filter(pattern='[a-k]*') """ Explanation: The filter method allows you to select all items of the same kind (i.e. all axes, or groups or arrays) or all items with names satisfying a given pattern: End of explanation """ d_filtered = d.filter(pattern='[a-k]*') # test if v_selected is a checked-session print('is still a check-session?', isinstance(d_filtered, CheckedSession)) #test if v_selected is a normal session print('is now a normal session?', isinstance(d_filtered, Session)) """ Explanation: <div class="alert alert-warning"> **Warning:** Using the *filter()* method on a checked-session **will return a normal session and NOT a checked-session**. This means that you will loose all the benefits (autocomplete, protection on type, axes and dtype) of checked-sessions. </div> End of explanation """ # iterate over item names for key in s.keys(): print(key) # iterate over items for value in s.values(): if isinstance(value, Array): print(value.info) else: print(repr(value)) print() # iterate over names and items for key, value in s.items(): if isinstance(value, Array): print(key, ':') print(value.info) else: print(key, ':', repr(value)) print() """ Explanation: Iterating over Items Like the built-in Python dict objects, Session objects provide methods to iterate over items: End of explanation """ class Demography(CheckedSession): COUNTRY = Axis('country=Belgium,France,Germany') GENDER = Axis('gender=Male,Female') TIME = Axis('time=2013..2017') population = zeros([COUNTRY, GENDER, TIME], dtype=int) # declare the deaths array with constrained axes and dtype deaths: CheckedArray([COUNTRY, GENDER, TIME], int) = 0 d = Demography() print(d.summary()) """ Explanation: Manipulating Checked Sessions Note: this section only concerns objects declared in checked-sessions. Let's create a simplified version of the Demography checked-session we have defined above: End of explanation """ # The population variable was initialized with the zeros() function which returns an Array object. # The declared type of the population variable is Array and is protected d.population = Axis('population=child,teenager,adult,elderly') """ Explanation: One of the specificities of checked-sessions is that the type of the contained objects is protected (it cannot change). Any attempt to assign a value of different type will raise an error: End of explanation """ AGE = Axis('age=0..100') d.deaths = zeros([d.COUNTRY, AGE, d.GENDER, d.TIME]) """ Explanation: The death array has been declared as a CheckedArray. As a consequence, its axes are protected. Trying to assign a value with incompatible axes raises an error: End of explanation """ d.deaths = 1.2 d.deaths """ Explanation: The deaths array is also constrained by its declared dtype int. This means that if you try to assign a value of type float instead of int, the value will be converted to int if possible: End of explanation """ d.deaths = 'undead' """ Explanation: or raise an error: End of explanation """ # misspell population (forgot the 'a') d.popultion = 0 """ Explanation: It is possible to add a new variable after the checked-session has been initialized but in that case, a warning message is printed (in case you misspelled the name of variable while trying to modify it): End of explanation """ # get population, births and deaths in millions s_div = s / 1e6 s_div.population """ Explanation: Arithmetic Operations On Sessions Session objects accept binary operations with a scalar: End of explanation """ from larray import random random_increment = random.choice([-1, 0, 1], p=[0.3, 0.4, 0.3], axes=s.population.axes) * 1000 random_increment # add some variables of a session by a common array s_rand = s['population', 'births', 'deaths'] + random_increment s_rand.population """ Explanation: with an array (please read the documentation of the random.choice function first if you don't know it): End of explanation """ # compute the difference between each array of the two sessions s_diff = s - s_rand s_diff.births """ Explanation: with another session: End of explanation """ # add the next year to all arrays def add_next_year(array): if 'time' in array.axes.names: last_year = array.time.i[-1] return array.append('time', 0, last_year + 1) else: return array s_with_next_year = s.apply(add_next_year) print('population array before calling apply:') print(s.population) print() print('population array after calling apply:') print(s_with_next_year.population) """ Explanation: Applying Functions On All Arrays In addition to the classical arithmetic operations, the apply method can be used to apply the same function on all arrays. This function should take a single element argument and return a single value: End of explanation """ # add the next year to all arrays. # Use the 'copy_values_from_last_year flag' to indicate # whether or not to copy values from the last year def add_next_year(array, copy_values_from_last_year): if 'time' in array.axes.names: last_year = array.time.i[-1] value = array[last_year] if copy_values_from_last_year else 0 return array.append('time', value, last_year + 1) else: return array s_with_next_year = s.apply(add_next_year, True) print('population array before calling apply:') print(s.population) print() print('population array after calling apply:') print(s_with_next_year.population) """ Explanation: It is possible to pass a function with additional arguments: End of explanation """ # load a session representing the results of a demographic model filepath_hdf = get_example_filepath('demography_eurostat.h5') s = Session(filepath_hdf) # create a copy of the original session s_copy = s.copy() # 'element_equals' compare arrays one by one s.element_equals(s_copy) # 'equals' returns True if all items of the two sessions have exactly the same items s.equals(s_copy) # slightly modify the 'population' array for some labels combination s_copy.population += random_increment # the 'population' array is different between the two sessions s.element_equals(s_copy) # 'equals' returns False if at least one item of the two sessions are different in values or axes s.equals(s_copy) # reset the 'copy' session as a copy of the original session s_copy = s.copy() # add an array to the 'copy' session s_copy.gender_ratio = s_copy.population.ratio('gender') # the 'gender_ratio' array is not present in the original session s.element_equals(s_copy) # 'equals' returns False if at least one item is not present in the two sessions s.equals(s_copy) """ Explanation: It is also possible to apply a function on non-Array objects of a session. Please refer the documentation of the apply method. Comparing Sessions Being able to compare two sessions may be useful when you want to compare two different models expected to give the same results or when you have updated your model and want to see what are the consequences of the recent changes. Session objects provide the two methods to compare two sessions: equals and element_equals: The equals method will return True if all items from both sessions are identical, False otherwise. The element_equals method will compare items of two sessions one by one and return an array of boolean values. End of explanation """ # reset the 'copy' session as a copy of the original session s_copy = s.copy() # slightly modify the 'population' array for some labels combination s_copy.population += random_increment s_check_same_values = s == s_copy s_check_same_values.population """ Explanation: The == operator return a new session with boolean arrays with elements compared element-wise: End of explanation """ s_check_same_values.time """ Explanation: This also works for axes and groups: End of explanation """ s_check_different_values = s != s_copy s_check_different_values.population """ Explanation: The != operator does the opposite of == operator: End of explanation """
quantumlib/ReCirq
docs/guide/data_analysis.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 Google End of explanation """ try: import recirq except ImportError: !pip install --quiet git+https://github.com/quantumlib/ReCirq """ Explanation: Data analysis <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://quantumai.google/cirq/experiments/guide/data_analysis"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/quantumlib/ReCirq/blob/master/docs/guide/data_analysis.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/quantumlib/ReCirq/blob/master/docs/guide/data_analysis.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/ReCirq/docs/guide/data_analysis.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a> </td> </table> This is the follow up to the data collection tutorial. We have measured bitstrings for the single-qubit circuit $R_y(\theta)$ for various thetas. In this analysis, we compute $\langle Z \rangle (\theta)$, compare to the analytically expected true value, and fit to a depolarizing noise model with T1 decay during readout. Setup Install the ReCirq package: End of explanation """ import cirq import recirq from recirq.readout_scan.tasks import EXPERIMENT_NAME, DEFAULT_BASE_DIR """ Explanation: Now import Cirq, ReCirq and the module dependencies: End of explanation """ recirq.fetch_guide_data_collection_data() """ Explanation: Load data We can use utilities in ReCirq to query the filesystem and load in a dataset. Please recall that all tasks have an associated EXPERIMENT_NAME and a dataset_id which define the top two hierarchies in the filesystem. We import these values from the data collection script to ensure consistency. If you're running this notebook in Colab or you haven't yet gone through the Data Collection tutorial, we will download a pre-generated copy of the data for analysis. End of explanation """ import numpy as np import pandas as pd records = [] # Load all data, do some light processing for record in recirq.iterload_records(dataset_id='2020-02-tutorial', base_dir=DEFAULT_BASE_DIR): # Expand task dataclass into columns recirq.flatten_dataclass_into_record(record, 'task') # Unwrap BitArray into np.ndarray all_bitstrings = [ba.bits for ba in record['all_bitstrings']] # Compute <Z> record['z_vals'] = [np.mean((-1)**bitstrings, axis=0).item() for bitstrings in all_bitstrings] # Don't need to carry around the full array of bits anymore del record['all_bitstrings'] records.append(record) df = pd.DataFrame(records) print(len(df)) df.head() """ Explanation: recirq.iterload_records uses these two bits of information to iterate over records saved using recirq.save (in the data collection script. This also gives you a chance to do post-processing on the data. In general, you should do some massaging of the data and put the results into a pandas DataFrame. DataFrames are great for doing statistics and visualizations across tabular data. End of explanation """ %matplotlib inline from matplotlib import pyplot as plt entry = df.iloc[0] # Pick the first qubit plt.plot([], []) # advance color cycle in anticipation of future analysis plt.plot(entry['thetas'], entry['z_vals'], 'o-') plt.xlabel('Theta', fontsize=14) plt.ylabel(r'$\langle Z \rangle$', fontsize=14) plt.title("Qubit {}".format(entry['qubit']), fontsize=14) plt.tight_layout() """ Explanation: Plot the data A good first step. End of explanation """ qubit = cirq.LineQubit(0) thetas = df.iloc[0]['thetas'] class _DummyMeasurementGate(cirq.IdentityGate): """A dummy measurement used to trick simulators into applying readout error when using PauliString.expectation_from_xxx.""" def _measurement_key_(self): return 'dummy!' def __repr__(self): if self.num_qubits() == 1: return '_DummyMeasurementGate' return '_DummyMeasurementGate({!r})'.format(self.num_qubits()) def __str__(self): if (self.num_qubits() == 1): return 'dummyM' else: return 'dummyM({})'.format(self.num_qubits()) def _circuit_diagram_info_(self, args): from cirq import protocols return protocols.CircuitDiagramInfo( wire_symbols=('dM',) * self.num_qubits(), connected=True) def dummy_measure(qubits): return _DummyMeasurementGate(num_qubits=len(qubits)).on(*qubits) def get_circuit(theta): return cirq.Circuit([ cirq.ry(theta).on(qubit), dummy_measure([qubit]) ]) true_z_vals = [] for theta in thetas: wf = cirq.final_state_vector(get_circuit(theta)) op = cirq.Z(qubit) * 1. true_z_val = op.expectation_from_state_vector(wf, qubit_map={qubit:0}, check_preconditions=False) true_z_vals.append(np.real_if_close(true_z_val).item()) true_z_vals = np.array(true_z_vals) true_z_vals fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4)) ax1.plot(thetas, true_z_vals, '-', label='True') ax1.plot(entry['thetas'], entry['z_vals'], 'o-', label='Data') ax2.plot([], []) # advance color cycle ax2.plot(entry['thetas'], np.abs(true_z_vals - entry['z_vals']), 'o-', label='|Data - True|') ax1.legend(loc='best', frameon=False) ax2.legend(loc='best', frameon=False) ax1.set_xlabel('Theta', fontsize=14) ax2.set_xlabel('Theta', fontsize=14) fig.tight_layout() """ Explanation: How does it compare to analytical results? You could imagine setting up a separate task for computing and saving analytic results. For this single qubit example, we'll just compute it on the fly. End of explanation """ import scipy.optimize import cirq.contrib.noise_models as ccn def get_obj_func(data_expectations): all_results = [] def obj_func(x): depol_prob, decay_prob, readout_prob = x if depol_prob < 0 or decay_prob < 0 or readout_prob < 0: # emulate constraints by returning a high cost if we # stray into invalid territory return 1000 sim = cirq.DensityMatrixSimulator( noise=ccn.DepolarizingWithDampedReadoutNoiseModel( depol_prob=depol_prob, decay_prob=decay_prob, bitflip_prob=readout_prob)) results = [] for theta in thetas: density_result = sim.simulate(get_circuit(theta)) op = cirq.Z(qubit) * 1. true_z_val = op.expectation_from_state_vector( density_result.final_density_matrix, qubit_map=density_result.qubit_map, check_preconditions=False) results.append(np.real_if_close(true_z_val).item()) results = np.array(results) all_results.append(results) cost = np.sum(np.abs(results - data_expectations)) return cost return obj_func, all_results def print_result(x): depol_prob, decay_prob, readout_prob = x print(f'depol = {depol_prob:.2%}') print(f'decay = {decay_prob:.2%}') print(f'readout = {readout_prob:.2%}') dfb = df dfb = dfb.head(5) # Remove this to do all qubits len(dfb) # Initial values depol_prob = 0.01 decay_prob = 0.01 readout_prob = 0.01 opt_results = [] for i, entry in dfb.iterrows(): ofunc, results = get_obj_func(entry['z_vals']) opt_result = scipy.optimize.minimize(ofunc, [depol_prob, decay_prob, readout_prob], method='nelder-mead', options={'disp': True}) label = f"{entry['qubit'].row}, {entry['qubit'].col}" print("Qubit", label) print_result(opt_result.x) opt_results.append(opt_result) data_expectations = entry['z_vals'] fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11, 4)) ax1.plot(thetas, true_z_vals, label='True') ax1.plot(thetas, data_expectations, 'o-', label=f'{label} Data') ax1.plot(thetas, results[-1], '.-', label='Fit') ax2.plot([], []) # advance color cycle ax2.plot(thetas, np.abs(true_z_vals - data_expectations), 'o-', label='|Data - True|') ax2.plot(thetas, np.abs(true_z_vals - results[-1]), '-', label='|Fit - True|') ax1.legend(loc='best') ax2.legend(loc='best') fig.tight_layout() plt.show() """ Explanation: Learn a model Our experimental data has some wiggles in it, but it also has a clear pattern of deviation from the true values. We can hypothesize a (parameterized) noise model and then use function minimization to fit the noise model parameters. End of explanation """
vzygouras/personal
Reciprocity Analysis Data Cleaning.ipynb
mit
import pandas as pd import glob import os import matplotlib.pyplot as plt import seaborn as sns import warnings import numpy as np import statsmodels.api as sm warnings.filterwarnings('ignore') pd.set_option("display.max_rows", None, "display.max_columns", None) """ Explanation: Reciprocity Analysis: Data Cleaning Viki Zygouras - 5/19/2020 End of explanation """ path = r'reciprocityLargeFamilyData/' all_files = glob.glob(os.path.join(path, "*.xlsx")) df_from_each_file = [pd.read_excel(f) for f in all_files] totalData = pd.DataFrame() #clean/drop any unnecessary columns reciprocityData = totalData.append(df_from_each_file, ignore_index= False) reciprocityData = reciprocityData.drop(columns = ['anastasia code', 'emotion','positive/negative', 'Unnamed: 3', 'Anastasia Code','Postive or Negative?', 'Anastasia Coding', 'Anastasia Codes', 'Emotion', 'Positive / Negative']) reciprocityData = reciprocityData.reset_index(drop = True) reciprocityData = reciprocityData.drop_duplicates() reciprocityData = reciprocityData.dropna(subset=['Title']) """ Explanation: Read in files: End of explanation """ #Split the participant code from label reciprocityData["Participant"] = [val[0] for val in reciprocityData["Title"].str.partition("-", False)] #Split agent name from label reciprocityData["Agent"] = [val[2].partition("-")[0].lower() for val in reciprocityData["Title"].str.partition("-", False)] #Split either Active or Observing from the label reciprocityData['Activity'] = [val[2] for val in reciprocityData["Title"].str.split("-", False)] #Boolean 1 if verbal 0 if nonverbal reciprocityData['Verbal'] = [val[3] for val in reciprocityData["Title"].str.split("-")] reciprocityData['Verbal'] = [0 if elt == '26' else 1 for elt in reciprocityData['Verbal']] #Split behavior number from label reciprocityData['Behavior'] = [val[4] for val in reciprocityData["Title"].str.split("-")] reciprocityData = reciprocityData.astype({'Behavior': 'int64'}) """ Explanation: Data Cleaning Parse out interaction labels: End of explanation """ #Parse out positive and not positive - will be 1 if positive and 0 if not positive reciprocityData['Positive'] = ['E' if val[-2] != 'P' and val[-2] != 'NP' else val[-2] for val in reciprocityData["Title"].str.split("-")] reciprocityData['Positive'] = reciprocityData['Positive'].map({'P': 1, 'NP': 0, 'E': 999}) #Parse out if the agent and participant's sentiment matches or not reciprocityData['InSync'] = ['E' if val[-1] != 'M' and val[-1] != 'S' else val[-1] for val in reciprocityData["Title"].str.split("-")] reciprocityData['InSync'] = reciprocityData['InSync'].map({'S': 1, 'M': 0, 'E': 999}) """ Explanation: Will use 1 to indicate positive interaction and 0 to indicate negative. 999 means the value within our coding scheme should not be assigned to the following categories. End of explanation """ #assign 0 to physical behaviors except for 23 for i in range(7,29): if i != 23: reciprocityData['Verbal'] = np.where((reciprocityData.Behavior == i), 0, reciprocityData.Verbal) #assign 1 to verbal behaviors verbalBehaviorNum = [1,2,3,4,5,6,29,30,31,32,33,34,35,36,37] for i in verbalBehaviorNum: reciprocityData['Verbal'] = np.where((reciprocityData.Behavior == i), 1, reciprocityData.Verbal) #check that the above code corrected for human errors in behavior 11 reciprocityData[reciprocityData['Behavior'] == 11] """ Explanation: Correct previously defined verbal/nonverbal to behaviors account for human error: End of explanation """ notincluded = [7,8,9,12,27,29] for i in notincluded: reciprocityData['Positive'] = np.where((reciprocityData.Behavior == i), 999, reciprocityData.Positive) reciprocityData['InSync'] = np.where((reciprocityData.Behavior == i), 999, reciprocityData.InSync) reciprocityData['Verbal'] = np.where((reciprocityData.Behavior == i), 999, reciprocityData.Verbal) """ Explanation: Update tags above for behaviors that shouldn't be considered for P/NP and M/S: End of explanation """ reciprocityData['EndTime'] = pd.to_datetime(reciprocityData['End'], format = '%H:%M:%S:%f') reciprocityData['StartTime'] = pd.to_datetime(reciprocityData['Start'], format = '%H:%M:%S:%f') reciprocityData['Duration'] = reciprocityData['EndTime'] - reciprocityData['StartTime'] reciprocityData = reciprocityData.drop(['StartTime', 'EndTime', 'Title'], axis = 1) """ Explanation: Adding duration of a given interaction: End of explanation """ #drop subclips that weren't exported correctly from KYNO for average duration calculation incorrectlyExported = reciprocityData[reciprocityData['Duration'] > '00:01:00.000000'] #clips under the 1 minute threshold removes outlier duration clips actualDurations = reciprocityData[reciprocityData['Duration'] < '00:01:00.000000'] actualDurations['Duration'].mean() """ Explanation: Average Duration (Seconds) of an Interaction: End of explanation """ participants = pd.read_excel(r"Participants.xlsx") behaviors = pd.read_excel(r"behaviors.xlsx") pData = pd.DataFrame() pData = pData.append([participants['Participant'],participants['Type'], participants['Family'] , participants['Age']]).T pData = pData.iloc[:38] temp_combined = pd.merge(pData, reciprocityData, on=['Participant']) combined = pd.merge(temp_combined, behaviors, on = ['Behavior']) """ Explanation: Add info from "Participants.csv" in Study 3 Data Analysis and "behaviors.xls" in Behavioral Analysis: End of explanation """ combined['isJibo'] = [1 if value == 'jibo' else 0 for value in combined['Agent']] combined['isComputer'] = [1 if value == 'computer' else 0 for value in combined['Agent']] combined['isAlexa'] = [1 if value == 'alexa' else 0 for value in combined['Agent']] combined['isChild'] = [1 if value == 'C' else 0 for value in combined['Type']] combined['isAdult'] = [1 if value == 'A' else 0 for value in combined['Type']] combined['isActive'] = [1 if value == 'Active' else 0 for value in combined['Activity']] combined['isObserving'] = [1 if value == 'Obs' else 0 for value in combined['Activity']] """ Explanation: Convert categorical columns to numeric with one-hot encoding: End of explanation """ combined.tail(20) """ Explanation: View raw data: End of explanation """ combined.to_csv("ReciprocityAnalysis.csv", index = False) """ Explanation: Export dataframe as CSV to make a copy: End of explanation """ print("Number of clips coded: ", len(combined)) print("Number of Families: ", len(set(combined['Family']))) print("Families Included: ", set(combined['Family'])) print("Number of Participants: ", len(set(combined['Participant']))) print("Average # of subclips per family: ", len(combined)/12) set(combined['Description']) """ Explanation: General Metrics: End of explanation """ rel = combined[combined['Positive'] == 0] rel[rel['Family'] == 'F02'].tail(10) """ Explanation: View negative interactions from Family 2 End of explanation """ behaviors = list(set(combined['Description'])) #will make the count table for number of participants that exhibit a behavior with an agent (behavior x agent) agents = ['jibo', 'alexa', 'computer'] def makeContingencyTable(df): table = [] for agent in agents: agentCounts = pd.DataFrame(columns = [agent]) labels = pd.DataFrame(columns = ['Description']) for i in range(len(behaviors)): agentDf = df[df['Agent'] == agent] behaviorDf = agentDf[agentDf['Description'] == behaviors[i]] numParticipants = len(set(behaviorDf['Participant'])) agentCounts.loc[i] = [numParticipants] labels.loc[i] = [behaviors[i]] table.append(agentCounts) table.append(labels) combo = pd.concat(table, axis = 1) combo = combo[['Description', 'jibo', 'alexa', 'computer']] return combo #use this code to make contigency table based on behavior frequency and not particiants (behavior x agent) # def makeContingencyTable(df): # data = df[['Description', 'Agent']] # table = sm.stats.Table.from_data(data) # contingencyTable = table.table_orig # return contingencyTable #Run these to get the complement for behavior counts #calculate the total number of interactions per agent # alexaTotal = len(combined[combined['Agent'] == 'alexa']) # jiboTotal = len(combined[combined['Agent'] == 'jibo']) # computerTotal = len(combined[combined['Agent'] == 'computer']) # jiboComplement = general['jibo'].apply(lambda x: jiboTotal - x) # alexaComplement = general['alexa'].apply(lambda x: alexaTotal - x) # computerComplement = general['computer'].apply(lambda x: computerTotal - x) """ Explanation: Fisher Test Data: <span style="color:red">NOTE: There are no libraries in python that currently support Fisher tests for tables larger than 2x2 - need to use R instead. </span> Build and Export Contingency Tables for fisher testing in R: End of explanation """ #all behaviors no splits general = makeContingencyTable(combined) jiboComplement = general['jibo'].apply(lambda x: 33 - x) alexaComplement = general['alexa'].apply(lambda x: 33 - x) computerComplement = general['computer'].apply(lambda x: 33 - x) complement = pd.concat([general['Description'], jiboComplement, alexaComplement, computerComplement], axis = 1) """ Explanation: Create tables for each category: End of explanation """ notPositive = combined[combined['Positive'] == 0] notPositive = makeContingencyTable(notPositive) positive = combined[combined['Positive'] == 1] positive = makeContingencyTable(positive) verbal = combined[combined['Verbal'] == 1] verbal = makeContingencyTable(verbal) nonverbal = combined[combined['Verbal'] == 0] nonverbal = makeContingencyTable(nonverbal) sync = combined[combined['InSync'] == 1] sync = makeContingencyTable(sync) mismatch = combined[combined['InSync'] == 0] mismatch = makeContingencyTable(mismatch) tableNames = ['General','Complement', 'Positive', 'Not Positive', 'InSync', 'Mismatch', 'Verbal', 'NonVerbal'] tableList = [general, complement, positive, notPositive, sync, mismatch, verbal, nonverbal] positive """ Explanation: Using 1 and 0 to filter on category columns ensures that ineligible behaviors (999) aren't included End of explanation """ for i in range(len(tableList)): tableList[i].to_csv(tableNames[i] + "ContingencyTable.csv", index = False) """ Explanation: Export tables to be used in R: End of explanation """
jwjohnson314/data-803
notebooks/multiclass-classification-random-forests-cv.ipynb
mit
multinom = LogisticRegressionCV(n_jobs=-1, refit=True, multi_class='multinomial', random_state=0) multinom.fit(Xtr, ytr) multinom_preds = multinom.predict(Xte) print(accuracy_score(yte, multinom_preds)) # untuned (default) random forest model rf_model_1 = RF(n_jobs = -1, random_state = 0) rf_model_1.fit(Xtr, ytr) rf_preds_1 = rf_model_1.predict(Xte) print(accuracy_score(yte, rf_preds_1)) # adjusting the number of trees scores = [] for i in np.arange(10, 310, 10): rf_model = RF(n_estimators = i, max_features = 8, n_jobs = -1, random_state = 0) rf_model.fit(Xtr, ytr) rf_preds = rf_model.predict(Xte) scores.append(accuracy_score(yte, rf_preds)) figsize(12, 6) plt.plot(range(len(scores)), scores, marker='o') plt.xlabel('number of trees') labs = np.arange(10, 310, 10) plt.xticks(range(len(scores)), labs, rotation=90) plt.ylabel('accuracy'); print(np.argmax(scores)*10+10) # number of trees at argmax print(np.max(scores)) rf_model = RF(n_estimators = 110, n_jobs = -1, random_state = 0) rf_model.fit(Xtr, ytr) rf_preds = rf_model.predict(Xte) print(accuracy_score(yte, rf_preds)) """ Explanation: Very high accuracy for a linear model on a highly nonlinear problem. End of explanation """ # tuning max_features for i in [3, 5, 8, 9, 10, 12, 15]: rf_model = RF(n_estimators = 4000, max_features = i, n_jobs = -1, random_state = 0) # 4000 for stability rf_model.fit(Xtr, ytr) rf_preds = rf_model.predict(Xte) print('max_features = %s; accuracy score: %s' % (i, accuracy_score(yte, rf_preds))) final = RF(n_estimators=120, max_features=12, n_jobs=-1, random_state=0) final.fit(X, y) lm_cv_scores = cross_val_score(linear_model.fit(X, y), X=X, y=y, scoring='accuracy') print(lm_cv_scores, np.mean(lm_cv_scores)) mn_cv_scores = cross_val_score(multinom.fit(X, y), X=X, y=y, scoring='accuracy') print(mn_cv_scores, np.mean(mn_cv_scores)) rf_cv_scores = cross_val_score(final, X=X, y=y, scoring='accuracy') print(rf_cv_scores, np.mean(rf_cv_scores)) """ Explanation: Another parameter that is always worth an attempt at tuning is the 'max_features' parameter. This controls the number of predictors chosen (at random) at each node. The default, which is usually decent, is $$\text{max_features} = \sqrt{\text{number of predictors}}.$$ End of explanation """ np.mean(X) Xsc = X - np.mean(X) lm_cv_scaled_scores = cross_val_score(linear_model.fit(Xsc, y), X=Xsc, y=y, scoring='accuracy') print(lm_cv_scaled_scores, np.mean(lm_cv_scaled_scores)) mn_cv_scaled_scores = cross_val_score(multinom.fit(Xsc, y), X=Xsc, y=y, scoring='accuracy') print(mn_cv_scaled_scores, np.mean(mn_cv_scaled_scores)) """ Explanation: Other quick adjustments: for images, it's common to simply mean-center the data. Scaling is not typically done as the pixels in an image are already scaled: End of explanation """ pca = PCA().fit(X) pca.explained_variance_ratio_ Xre = PCA(n_components=2, whiten=True).fit_transform(X) Xretr, Xrete, ytr, yte = train_test_split(Xre, y, test_size=0.2, random_state=0) figsize(10, 10) plt.scatter(Xretr[:,0], Xretr[:, 1], color='blue', label='train') plt.scatter(Xrete[:,0], Xrete[:,1], color='red', label='test') plt.legend(loc='best'); Xval = np.vstack([Xtr, Xte]) print(Xtr.shape) print(Xval.shape) sets = np.zeros(1797) sets[1438:] = 1 # tag the test data val_mod = LogisticRegressionCV(scoring='accuracy', random_state=0, n_jobs=-1).fit(Xval, sets) roc_auc_score(sets, val_mod.predict(X)) # model can't distinguish train and test sets """ Explanation: Maybe we can understand the validation error with PCA? End of explanation """
vprusso/youtube_tutorials
data_structures/bloom_filter/Bloom Filters and Pokemon.ipynb
gpl-3.0
bit_vector = [0] * 20 print(bit_vector) """ Explanation: In this post, we will briefly go over the probabilistic data structure referred to as a Bloom filter. We'll be using Pokemon to help us in understanding the general concept of how to make use of such a data structure. TL;DR Bloom filters: "Lightweight" version of a hash table. Both hash tables and Bloom filters support efficient insertions and lookups. Bloom filters are more space efficient than hash tables, but this comes at the cost of having "false positives" for entry lookup. That is, Bloom filters can say with certainty that an element has not been inserted (no possibility of false negatives), but may indicate an element has been inserted when it has in fact not been (false positive). When should I use a Bloom filter? I want a data structure that allows for fast lookups and insertions. I care about how much space the data structure uses. I don't care if the data structure sometimes indicates an item is present when in fact it is not." Example: I run a website and want to keep track of IP addresses that are blocked. I don't particularly care if a blocked IP address is occasionally able to access my website, but I do care if someone not on the blocked list is unable to access the site. More examples found on Wikipedia. Bloom Filter: Toy Example in Python In order to illustrate how a Bloom filter works let's consider a toy example. We start with a bit vector; a vector whose elements are $0$ or $1$. To start, we initialize the bit vector to all zeros. For the purposes of this toy example, we will restrict our attention to a bit vector of size $20$. End of explanation """ import pyhash """ Explanation: The next ingredient we require is the use of a couple hash functions, that is, a function that maps data of arbitrary size to data of a fixed size. The types of hash functions used in Bloom filters are generally not of the "cryptographic variety", for example, one usually wouldn't use something like MD5. Non-cryptographic hash functions like Murmur and FNV are mostly used, primarily for their speed over most cryptographic hash functions. There is a nice module in Python called pyhash that consists solely of non-cryptographic hashes. End of explanation """ # Define FNV and Murmur hash functions from Pyhash. fnv_hasher = pyhash.fnv1_32() murmur_hasher = pyhash.murmur3_32() # Calculate output of FNV and Murmur hash for Pikachu and Charmander. fnv_hash_pikachu = fnv_hasher("Pikachu") % 20 murmur_hash_pikachu = murmur_hasher("Pikachu") % 20 fnv_hash_charmander = fnv_hasher("Charmander") % 20 murmur_hash_charmander = murmur_hasher("Charmander") % 20 # Print the output of FNV and Murmur hashes. print("FNV hash output for Pikachu: " + str(fnv_hash_pikachu)) print("Murmur hash output for Pikachu: " + str(murmur_hash_pikachu)) print("FNV hash output for Charmander: " + str(fnv_hash_charmander)) print("Murmur hash output for Charmander: " + str(murmur_hash_charmander)) # Flip the bits of bit_vector in the corresponding locations from above hashes. bit_vector[fnv_hash_pikachu] = 1 bit_vector[murmur_hash_pikachu] = 1 bit_vector[fnv_hash_charmander] = 1 bit_vector[murmur_hash_charmander] = 1 print(bit_vector) """ Explanation: Let's combine the bit vector and non-cryptographic hash functions to put together a toy example of a Bloom filter. In our example, let's say we're using our Bloom filter as a Pokedex; a device to keep track of the Pokemon we have caught. Each time we catch a Pokemon, we update our Pokedex by running the name of the Pokemon through two hash functions. The output of the hashes indicates which bits to flip in our bit vector. Hashing the strings "Pikachu" and "Charmander" using the FNV hash algorithm mod 20 (since 20 is the size of our bit vector in this example) results in 13 and 5. Likewise, hashing the same strings using the Murmur hashing algorithm mod 20 results in 10 and 9, respectively. We use the outputs of the above hash algorithms to flip the bits located at the respective indices. For instance: The following Python code achieves what we described above: End of explanation """ # Calculate output of FNV and Murmur hash for Bulbasaur. fnv_hash_bulbasaur = fnv_hasher("Bulbasaur") % 20 murmur_hash_bulbasaur = murmur_hasher("Bulbasaur") % 20 # Print the FNV and Murmur hashes of Bulbasaur. print("FNV hash output for Bulbasaur: " + str(fnv_hash_bulbasaur)) print("Murmur hash output for Bulbasaur: " + str(murmur_hash_bulbasaur)) """ Explanation: Walking in the tall grass, a wild Bulbasaur appears! Let's consult our Bloom filter Pokedex to see if we've already captured Bulbasaur or not End of explanation """ print(bit_vector[fnv_hash_bulbasaur]) print(bit_vector[murmur_hash_bulbasaur]) """ Explanation: Looks like the outputs of hashing "Bulbasaur" (mod 20) result in an output of $11$ and $8$ for the FNV and Murmur hashes respectively. Consulting our Bloom filter bit vector, we check whether the bits are flipped "on" or "off" at these indices: End of explanation """ # Flip the bits in the Bloom filter to indicate that we now have captured Bulbasaur. bit_vector[fnv_hash_bulbasaur] = 1 bit_vector[murmur_hash_bulbasaur] = 1 print(bit_vector) """ Explanation: Since the entries for both of the hash functions for Bulbasaur result in $0$, Bulbasaur is not in our Pokedex. Let's capture Bulbasaur and appropriately modify out Bloom filter Pokedex to reflect this. End of explanation """ # 150 of the 151 Pokemon (excluding Pidgey) caught_pokemon = ["Bulbasaur", "Ivysaur", "Venusaur", "Charmander", "Charmeleon", "Charizard", "Squirtle", "Wartortle", "Blastoise", "Caterpie", "Metapod", "Butterfree", "Weedle", "Kakuna", "Beedrill", "Pidgeotto", "Pidgeot", "Rattata", "Raticate", "Spearow", "Fearow", "Ekans", "Arbok", "Pikachu", "Raichu", "Sandshrew", "Sandslash", "Nidoran (female)", "Nidorina", "Nidoqueen", "Nidoran (male)", "Nidorino", "Nidoking", "Clefairy", "Clefable", "Vulpix", "Ninetales", "Jigglypuff", "Wigglytuff", "Zubat", "Golbat", "Oddish", "Gloom", "Vileplume", "Paras", "Parasect", "Venonat", "Venomoth", "Diglet", "Dugtrio", "Meowth", "Persian", "Psyduck", "Golduck", "Mankey", "Primeape", "Growlithe", "Arcanine", "Poliwag", "Poliwhirl", "Poliwrath", "Abra", "Kadabra", "Alakazam", "Machop", "Machoke", "Machamp", "Bellsprout", "Weepinbell", "Victreebel", "Tentacool", "Tentacruel", "Geodude", "Graveler", "Golem", "Ponyta", "Rapidash", "Slowpoke", "Slowbro", "Magnemite", "Magneton", "Farfetc'd", "Doduo", "Dodrio", "Seel", "Dewgong", "Grimer", "Muk", "Shellder", "Cloyster", "Gastly", "Haunter", "Gengar", "Onyx", "Drowzee", "Hypno", "Krabby", "Kingler", "Voltorb", "Electrode", "Exeggcute", "Exeggutor", "Cubone", "Marowak", "Hitmonlee", "Hitmonchan", "Lickitung", "Koffing", "Weezing", "Rhyhorn", "Rhydon", "Chansey", "Tangela", "Kangaskhan", "Horsea", "Seadra", "Goldeen", "Seaking", "Staryu", "Starmie", "Mr. Mime", "Scyther", "Jynx", "Electabuzz", "Magmar", "Pinsir", "Tauros", "Magikarp", "Gyrados", "Lapras", "Ditto", "Eevee", "Vaporeon", "Jolteon", "Flareon", "Porygon", "Omanyte", "Omastar", "Kabuto", "Kabutops", "Aerodactyl", "Snorlax", "Articuno", "Zapdos", "Moltres", "Dratini", "Dragonair", "Dragonite", "Mewtwo", "Mew" ] pokedex_bloom_filter = [0] * 20 # Update the Bloom filter positions of the bit vector. for pokemon in caught_pokemon: fnv_hash = fnv_hasher(pokemon) % 20 murmur_hash = murmur_hasher(pokemon) % 20 pokedex_bloom_filter[fnv_hash] = 1 pokedex_bloom_filter[murmur_hash] = 1 # The Pokedex Bloom filter. print(pokedex_bloom_filter) """ Explanation: BULBASAUR was caught! How big does our Bloom filter need to be? In our toy example, the size of the Bloom filter consists of 20 entries. For our Pokedexing needs, the length of the filter is most likely too small to be useful. To see why, let's assume we've caught 150 of the 151 of the Pokemon. End of explanation """ fnv_hash_pidgey = fnv_hasher("Pidgey") % 20 murmur_hash_pidgey = murmur_hasher("Pidgey") % 20 print(pokedex_bloom_filter[fnv_hash_pidgey]) print(pokedex_bloom_filter[murmur_hash_pidgey]) """ Explanation: Unbeknownst to our Pokemon trainer, the last Pokemon they have yet to catch is the elusive Pidgey. As luck should have it, the trainer walks into some tall grass and a wild Pidgey appears! Before deciding to catch Pidgey, the trainor pulls out their Pokedex to see if they're already caught it. End of explanation """ from __future__ import division import math # Total number of Pokemon in the Blue/Red universe. n = 151 # Selecting parameter m to be larger than what we require. m = 1000 # Calculating the optimal k to determine how many hash functions we should use. k = (m//n)*math.log(2,math.e) print(k) """ Explanation: Hmm, okay, well the entries in the Bloom filter indicate that Pidget may be in the Pokedex already. As mentioned before, Bloom filters will only give false positives, never false negatives. That is to say that our Pokedex will never say that Pidgey is not in our Pokedex if it actually is, but it may say that Pidgey is in our Pokedex, even if it actually isn't. This is precisely the issue we're having at the moment. We, the audience, know that Pidgey is not in the Pokedex. Since the size of the Bloom filter is only of size $20$ and the total number of Pokemon is $151$, there will inevitably be some hash collisions leading to a situation like the one we're encountering now. Unfortunately, our Pokemon trainer decides that Pokeballs are expensive and misses their opportunity to complete their Pokedex. If Professor Oak gave our trainer a Bloom filter Pokedex with a larger number of entries, he could have reduced the probability of this unfortunate event. As it happens, the rate of false positives is approximately $$(1 - e^{-kn/m})^k$$, where $k$ is the number of hash functions used, $n$ is the number of inserted elements, and $m$ is the total number of bits, or equivalently, the length of the bit vector used for the Bloom filter. More information on the above equation can be found on the Wikipedia page where there is a more in-depth discussion on the probability of obtaining false positives. So in order to reduce the probability of collisions, we need to tweak the parameters $k$ and $m$; the number of hash functions we use and the size of the bit vector, respectively. Assuming that the hash function selects an index in the bit vector completely at random, the probability that an element in the Bloom filter is not set to $1$ can be calculated by $$ 1 - \frac{1}{m}.$$ By taking this to the $k^{th}$ power, we have the probability that the elements in the Bloom filter are not set to $1$ which can be calculated by $$ \left( 1 - \frac{1}{m} \right)^k.$$ If we have inserted $n$ Pokemon into our Bloom filter, the probability that a certain bit in the vector is still $0$ is given by $$\left( 1 - \frac{1}{m} \right)^{kn}.$$ For instance, in our toy example, we had that the size of the bit vector for the Bloom filter was $m = 20$. We made use of $k = 2$ hash functions, FNV and Murmur. When we caught Pikachu and Charmander, this sets the value corresponding to the total number of Pokemon in our Pokedex to $n = 2$. Therefore the probability that a certain element in the bit vector is still $0$ before capturing Bulbasaur is given as $$ \left( 1 - \frac{1}{20} \right)^{4} \approx 0.81450. $$ Not terrible, but as we catch more Pokemon, this number drops quite a bit. Since there are 151 Pokemon in total, it doesn't take long for the probability of any element in the bit vector to still be $0$ to be quite low. For instance, even when we've caught 20 Pokemon, there's only a $0.1285$ chance of any element in the vector still being $0$ $$ \left( 1 - \frac{1}{20} \right)^{40} \approx 0.1285. $$ One thing that Professor Oak must do then is to stop being so stingy and allocate more space for the Bloom filter. The other important thing to keep note of is the number of hash functions to use. If we have too many hash functions, then our Bloom filter will be quite slow, not to mention the entries in our Bloom filter will fill up faster with more hash functions in use. Alternatively, if we limit our hash functions, that increases the chance that we obtain a higher number of false positives. Luckily, if we select both $m$ and $n$, the following formula is known to give us an optimal value for $k$, the number of hash functions to use $$ k = (m/n)\ln(2), $$ such that the probability that the Bloom filter erroneously claims that a Pokemon is in the Pokedex is minimized. Again, that function is given as $$ \left(1 - e^{-kn/m} \right)^k. $$ Using this, let's try to come up with a Bloom filter that performs better than our toy example for the purposes of being used for a Pokedex. First, (at least for Pokemon Red and Blue) the total number of Pokemon one can encounter is 151 So we can pick $n = 151$. For $m$, we can pick something quite a bit larger than that, just to be safe. Let's set $m = 1000$ as the number of entries in our bit vector for our Bloom filter. Using these parameters, we can compute $k$. End of explanation """ # How likely is it with the parameters n,m, and k that we encounter a false positive? (1 - math.e**(-k*n/m))**k """ Explanation: With these selected parameters, the probability that we encounter a false positive is End of explanation """
therealAJ/python-sandbox
data-science/learning/ud1/DataScience/MeanMedianMode.ipynb
gpl-3.0
import numpy as np incomes = np.random.normal(27000, 15000, 10000) np.mean(incomes) """ Explanation: Mean, Median, Mode, and introducing NumPy Mean vs. Median Let's create some fake income data, centered around 27,000 with a normal distribution and standard deviation of 15,000, with 10,000 data points. (We'll discuss those terms more later, if you're not familiar with them.) Then, compute the mean (average) - it should be close to 27,000: End of explanation """ %matplotlib inline import matplotlib.pyplot as plt plt.hist(incomes, 50) plt.show() """ Explanation: We can segment the income data into 50 buckets, and plot it as a histogram: End of explanation """ np.median(incomes) """ Explanation: Now compute the median - since we have a nice, even distribution it too should be close to 27,000: End of explanation """ incomes = np.append(incomes, [1000000000]) """ Explanation: Now we'll add Donald Trump into the mix. Darn income inequality! End of explanation """ np.median(incomes) np.mean(incomes) """ Explanation: The median won't change much, but the mean does: End of explanation """ ages = np.random.randint(18, high=90, size=500) ages from scipy import stats stats.mode(ages) """ Explanation: Mode Next, let's generate some fake age data for 500 people: End of explanation """
karthikrangarajan/intro-to-sklearn
Notebook_anatomy.ipynb
bsd-3-clause
print('hello world!') """ Explanation: Basic Anatomy of a Notebook and General Guide Note this a is Python 3-flavored Jupyter notebook My Disclaimers: Notebooks are no substitute for an IDE for developing apps. Notebooks are not suitable for debugging code (yet). They are no substitute for publication quality publishing, however they are very useful for interactive blogging My <b>main</b> use of notebooks are for interactive teaching mostly and as a playground for some code that I might like to share at some point (I can add useful and pretty markup text, pics, videos, etc). I'm a fan also because github render's ipynb files nicely (even better than r-markdown for some reason). Shortcuts!!! A complete list is here, but these are my favorites: Mode | What | Shortcut ------------- | ------------- | ------------- Either (Press Esc to enter) | Run cell | Shift-Enter Command | Add cell below | B Command | Add cell above | A Command | Delete a cell | d-d Command | Go into edit mode | Enter Edit (Press Enter to enable) | Indent | Clrl-] Edit | Unindent | Ctrl-[ Edit | Comment section | Ctrl-/ Edit | Function introspection | Shift-Tab Try some below End of explanation """ import json # hit Tab at end of this to see all methods json. # hit Shift-Tab within parenthesis of method to see full docstring json.loads() ?sum() import json ?json """ Explanation: In this figure are a few labels of notebook parts I will refer to OK, change this cell to markdown to see some examples (you'll recognize this if you speak markdown) This will be Heading1 first thing second thing third thing A horizontal rule: Indented text Code snippet: python import numpy as np a2d = np.random.randn(100).reshape(10, 10) LaTeX inline equation: $\Delta =\sum_{i=1}^N w_i (x_i - \bar{x})^2$ LaTeX table: First Header | Second Header ------------- | ------------- Content Cell | Content Cell Content Cell | Content Cell HTML: <img src='http://www.elm.org/wp-content/uploads/2014/05/gold-star.jpg' alt="You get a gold star" width="42" height="42" align="left"> As you can see on your jupyter homepage, you can open up any notebook <b>NB: You can return to the homepage by clicking the Jupyter icon in the very upper left corner at any time</b> You can also Upload a notebook (button on upper right) As well as start a new notebook with a specific kernel (button to the right of Upload) So, what's that number after In or Out? That's the order of running this cell relative to other cells (useful for keeping track of what order cells have been run). When you save this notebook that number along with any output shown will also be saved. To <b>reset</b> a notebook go to Cell -> All Output -> Clear and then Save it. You can do something like this to render a publicly available notebook on github statically (this I do as a backup for presentations and course stuff): http://nbviewer.jupyter.org/github/&lt;username&gt;/&lt;repo name&gt;/blob/master/&lt;notebook name&gt;.ipynb like:<br> http://nbviewer.jupyter.org/github/michhar/rpy2_sample_notebooks/blob/master/TestingRpy2.ipynb <br> Also, you can upload or start a new <b>interactive</b>, free notebook by going here:<br> https://tmpnb.org <br> The nifty thing about Jupyter notebooks (and the .ipynb files which you can download and upload) is that you can share these. They are just written in JSON language. I put them up in places like GitHub and point people in that direction. Some people (like this guy who misses the point I think) really dislike notebooks, but they are really good for what they are good at - sharing code ideas plus neat notes and stuff in dev, teaching interactively, even chaining languages together in a polyglot style. And doing all of this on github works really well (as long as you remember to always clear your output before checking in - version control can get a bit crazy otherwise). Some additional features tab completion function introspection help End of explanation """
par2/lamana
docs/demo.ipynb
bsd-3-clause
#------------------------------------------------------------------------------ import pandas as pd import lamana as la #import LamAna as la %matplotlib inline #%matplotlib nbagg # PARAMETERS ------------------------------------------------------------------ # Build dicts of geometric and material parameters load_params = {'R' : 12e-3, # specimen radius 'a' : 7.5e-3, # support ring radius 'r' : 2e-4, # radial distance from center loading 'P_a' : 1, # applied load 'p' : 5, # points/layer } # Quick Form: a dict of lists mat_props = {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33], } # Standard Form: a dict of dicts # mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, # 'Poissons': {'HA': 0.25, 'PSu': 0.33}} # What geometries to test? # Make tuples of desired geometeries to analyze: outer - {inner...-....}_i - middle # Current Style g1 = ('0-0-2000') # Monolith g2 = ('1000-0-0') # Bilayer g3 = ('600-0-800') # Trilayer g4 = ('500-500-0') # 4-ply g5 = ('400-200-800') # Short-hand; <= 5-ply g6 = ('400-200-400S') # Symmetric g7 = ('400-[200]-800') # General convention; 5-ply g8 = ('400-[100,100]-800') # General convention; 7-plys g9 = ('400-[100,100]-400S') # General and Symmetric convention; 7-plys '''Add to test set''' g13 = ('400-[150,50]-800') # Dissimilar inner_is g14 = ('400-[25,125,50]-800') geos_most = [g1, g2, g3, g4, g5] geos_special = [g6, g7, g8, g9] geos_full = [g1, g2, g3, g4, g5, g6, g7, g8, g9] geos_dissimilar = [g13, g14] # Future Style #geos1 = ((400-400-400),(400-200-800),(400-350-500)) # same total thickness #geos2 = ((400-400-400), (400-500-1600), (400-200-800)) # same outer thickness #import pandas as pd pd.set_option('display.max_columns', 10) pd.set_option('precision', 4) """ Explanation: Demonstration The following demonstration includes basic and intermediate uses of the LamAna Project library. It is intended to exhaustively reference all API features, therefore some advandced demonstrations will favor technical detail. Tutorial: Basic User Input Startup End of explanation """ case1 = la.distributions.Case(load_params, mat_props) # instantiate a User Input Case Object through distributions case1.apply(['400-200-800']) case1.plot() """ Explanation: Goal: Generate a Plot in 3 Lines of Code End of explanation """ # Original case1.load_params # Series View case1.parameters # Original case1.mat_props # DataFrame View case1.properties # Equivalent Standard Form case1.properties.to_dict() """ Explanation: That's it! The rest of this demonstration showcases API functionality of the LamAna project. Calling Case attributes Passed in arguments are acessible, but can be displayed as pandas Series and DataFrames. End of explanation """ case1.materials = ['PSu', 'HA'] case1.properties """ Explanation: Reset material order. Changes are relfected in the properties view and stacking order. End of explanation """ case1.materials = ['PSu', 'HA', 'HA'] case1.properties case1.materials # get reorderd list of materials case1._materials case1.apply(geos_full) case1.snapshots[-1] '''Need to bypass pandas abc ordering of indicies.''' """ Explanation: Serial resets End of explanation """ mat_props2 = {'HA' : [5.3e10, 0.25], 'PSu' : [2.8e9, 0.33], } case1 = la.distributions.Case(load_params, mat_props2) case1.properties """ Explanation: Reset the parameters End of explanation """ case2 = la.distributions.Case(load_params, mat_props) case2.apply(geos_full) # default model Wilson_LT """ Explanation: apply() Geometries and LaminateModels Construct a laminate using geometric, matrial paramaters and geometries. End of explanation """ case2.Geometries # using an attribute, __repr__ print(case2.Geometries) # uses __str__ case2.Geometries[0] # indexing """ Explanation: Access the user input geometries End of explanation """ bilayer = case2.Geometries[1] # (1000.0-[0.0]-0.0) trilayer = case2.Geometries[2] # (600.0-[0.0]-800.0) #bilayer == trilayer bilayer != trilayer """ Explanation: We can compare Geometry objects with builtin Python operators. This process directly compares GeometryTuples in the Geometry class. End of explanation """ case2.middle case2.inner case2.inner[-1] case2.inner[-1][0] # List indexing allowed [first[0] for first in case2.inner] # iterate case2.outer """ Explanation: Get all thicknesses for selected layers. End of explanation """ case2.LMs """ Explanation: A general and very important object is the LaminateModel. End of explanation """ fiveplys = ['400-[200]-800', '350-400-500', '200-100-1400'] oddplys = ['400-200-800', '350-400-500', '400.0-[100.0,100.0]-800.0'] mix = fiveplys + oddplys mix # Non-unique, repeated 5-plys case_ = la.distributions.Case(load_params, mat_props) case_.apply(mix) case_.LMs # Unique case_ = la.distributions.Case(load_params, mat_props) case_.apply(mix, unique=True) case_.LMs """ Explanation: Sometimes might you want to throw in a bunch of geometry strings from different groups. If there are repeated strings in different groups (set intersections), you can tell Case to only give a unique result. For instane, here we combine two groups of geometry strings, 5-plys and odd-plys. Clearly these two groups overlap, and there are some repeated geometries (one with different conventions). Using the unique keyword, Case only operates on a unique set of Geometry objects (independent of convention), resulting in a unique set of LaminateModels. End of explanation """ case2.snapshots[-1] """ Explanation: DataFrame Access You can get a quick view of the stack using the snapshot method. This gives access to a Construct - a DataFrame converted stack. End of explanation """ '''Consider head command for frames list''' #case2.frames ##with pd.set_option('display.max_columns', None): # display all columns, within this context manager ## case2.frames[5] case2.frames[5].head() '''Extend laminate attributes''' case3 = la.distributions.Case(load_params, mat_props) case3.apply(geos_dissimilar) #case3.frames """ Explanation: We can easily view entire laminate DataFrames using the frames attribute. This gives access to LaminateModels (DataFrame) objects, which extends the stack view so that laminate theory is applied to each row. End of explanation """ case4 = la.distributions.Case(load_params, mat_props) case4.apply(['400-[100,100,100]-0']) case4.frames[0][['layer', 'matl', 'type']] ; '''Add functionality to customize material type.''' """ Explanation: NOTE, for even plies, the material is set alternate for each layer. Thus outers layers may be different materials. End of explanation """ '''Show Geometry first then case use.''' """ Explanation: Totaling The distributions.Case class has useful properties available for totaling specific layers for a group of laminates as lists. As these properties return lists, these results can be sliced and iterated. End of explanation """ case2.total case2.total_middle case2.total_middle case2.total_inner_i case2.total_outer case2.total_outer[4:-1] # slicing [inner_i[-1]/2.0 for inner_i in case2.total_inner_i] # iterate """ Explanation: .total property End of explanation """ G1 = case2.Geometries[-1] G1 G1.total # laminate thickness (um) G1.total_inner_i # inner_i laminae G1.total_inner_i[0] # inner_i lamina pair sum(G1.total_inner_i) # inner total G1.total_inner # inner total """ Explanation: Geometry Totals The total attribute used in Case actually dervive from attributes for Geometry objects individually. On Geometry objects, they return specific thicknesses instead of lists of thicknesses. End of explanation """ case2.LMs[5].Middle case2.LMs[5].Inner_i """ Explanation: LaminateModel Attributes Access the LaminateModel object directly using the LMs attribute. End of explanation """ case2.LMs[5].tensile """ Explanation: Laminates are assumed mirrored at the neutral axis, but dissimilar inner_i thicknesses are allowed. End of explanation """ LM = case2.LMs[4] LM.LMFrame.tail(7) """ Explanation: Separate from the case attributes, Laminates have useful attributes also, such as nplies, p and its own total. End of explanation """ LM.extrema LM.p # number of rows per group LM.nplies # number of plies LM.total # total laminate thickness (m) LM.Geometry '''Overload the min and max special methods.''' LM.max_stress # max interfacial failure stress """ Explanation: Often the extreme stress values (those at the interfaces) are most important. This is equivalent to p=2. End of explanation """ LM.min_stress '''Redo tp return series of bool an index for has_attrs''' LM.has_neutaxis LM.has_discont LM.is_special LM.FeatureInput '''Need to fix FeatureInput and Geometry inside LaminateModel''' """ Explanation: NOTE: this feature gives a different result for p=1 since a single middle cannot report two interfacial values; INDET. End of explanation """ case2 = la.distributions.Case(load_params, mat_props) case2.apply(geos_full) bilayer_LM = case2.LMs[1] trilayer_LM = case2.LMs[2] trilayer_LM == trilayer_LM #bilayer_LM == trilayer_LM bilayer_LM != trilayer_LM """ Explanation: As with Geometry objects, we can compare LaminateModel objects also. ~~This process directly compares two defining components of a LaminateModel object: the LM DataFrame (LMFrame) and FeatureInput. If either is False, the equality returns False.~~ End of explanation """ #bilayer_LM.FeatureInput == trilayer_LM.FeatureInput # gives detailed traceback '''Fix FI DataFrame with dict.''' bilayer_LM.FeatureInput #bilayer_LM.LMFrame == trilayer_LM.LMFrame # gives detailed traceback """ Explanation: Use python and pandas native comparison tracebacks that to understand the errors directly by comparing FeatureInput dict and LaminateModel DataFrame. End of explanation """ '''Find a way to remove all but interfacial points.''' """ Explanation: plot() LT Geometries CAVEAT: it is recommended to use at least p=2 for calculating stress. Less than two points for odd plies is indeterminant in middle rows, which can raise exceptions. End of explanation """ from lamana.utils import tools as ut from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() #%matplotlib nbagg # Quick plotting case4 = ut.laminator(dft.geos_standard) for case in case4.values(): for LM in case.LMs: df = LM.LMFrame df.plot(x='stress_f (MPa/N)', y='d(m)', title='Unnormalized Distribution') df.plot(x='stress_f (MPa/N)', y='k', title='Normalized Distribution') """ Explanation: We try to quickly plot simple stress distriubtions with native pandas methods. We have two variants for displaying distributions: - Unnoormalized: plotted by the height (`d_`). Visaully: thicknesses vary, material slopes are constant. - Normalized: plotted by the relative fraction level (`k_`). Visually: thicknesses are constant, material slopes vary. Here we plot with the nbagg matplotlib backend to generatre interactive figures. NOTE: for Normalized plots, slope can vary for a given material. End of explanation """ case3 = la.distributions.Case(load_params, mat_props) case3.apply(['400-200-800'], model='Wilson_LT') case3.plot() """ Explanation: While we get reasonable stress distribution plots rather simply, LamAna offers some plotting methods pertinent to laminates than assisting with visualization. Demo - An example illustration of desired plotting of multiple geometries from distributions. This is image of results from legacy code used for comparison. We can plot the stress distribution for a case of a single geometry. End of explanation """ five_plies = ['350-400-500', '400-200-800', '200-200-1200', '200-100-1400', '100-100-1600', '100-200-1400', '300-400-600'] case4 = la.distributions.Case(load_params, mat_props) case4.apply(five_plies, model='Wilson_LT') case4.plot() '''If different plies or patterns, make new caselet (subplot)''' '''[400-200-800, '300-[400,200]-600'] # non-congruent? equi-ply''' '''[400-200-800, '400-200-0'] # odd/even ply''' # currently superimposes plots. Just needs to separate. """ Explanation: We can also plot multiple geometries of similar total thickness. End of explanation """ LM = case4.LMs[0] LM.to_xlsx(delete=True) # or `to_csv()` """ Explanation: Exporting Saving data is critical for future analysis. LamAna offers two formas for exporting your data and parameters. Parameters used to make calculations such as the FeatureInput information are saved as "dashboards" in different forms. - '.xlsx': (default); convient for storing multiple calculationa amd dashboards as se[arate worksheets in a Excel workbook. - '.csv': universal format; separate files for data and dashboard. The lowest level to export data is for a LaminateModel object. End of explanation """ case4.to_xlsx(temp=True, delete=True) # or `to_csv()` """ Explanation: <div class="alert alert-warning">**NOTE** For demonstration purposes, the `temp` and `delete` are activated. This will create temporary files in the OS temp directory and automatically delete them. For practical use, ignore setting these flags.</div> The latter LaminateModel data was saved to an .xlsx file in the default export folder. The filepath is returned (currently suppressed with the ; line). The next level to export data is for a case. This will save all files comprise in a case. If exported to csv format, files are saved seperately. In xlsx format, a single file is made where each LaminateModel data and dashboard are saved as seperate worksheets. End of explanation """ #------------------------------------------------------------------------------ import pandas as pd import lamana as la %matplotlib inline #%matplotlib nbagg # PARAMETERS ------------------------------------------------------------------ # Build dicts of loading parameters and and material properties load_params = {'R' : 12e-3, # specimen radius 'a' : 7.5e-3, # support ring radius 'r' : 2e-4, # radial distance from center loading 'P_a' : 1, # applied load 'p' : 5, # points/layer } # # Quick Form: a dict of lists # mat_props = {'HA' : [5.2e10, 0.25], # 'PSu' : [2.7e9, 0.33],} # Standard Form: a dict of dicts mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} # What geometries to test? # Make tuples of desired geometeries to analyze: outer - {inner...-....}_i - middle # Current Style g1 = ('0-0-2000') # Monolith g2 = ('1000-0-0') # Bilayer g3 = ('600-0-800') # Trilayer g4 = ('500-500-0') # 4-ply g5 = ('400-200-800') # Short-hand; <= 5-ply g6 = ('400-200-400S') # Symmetric g7 = ('400-[200]-800') # General convention; 5-ply g8 = ('400-[100,100]-800') # General convention; 7-plys g9 = ('400-[100,100]-400S') # General and Symmetric convention; 7-plys '''Add to test set''' g13 = ('400-[150,50]-800') # Dissimilar inner_is g14 = ('400-[25,125,50]-800') geos_most = [g1, g2, g3, g4, g5] geos_special = [g6, g7, g8, g9] geos_full = [g1, g2, g3, g4, g5, g6, g7, g8, g9] geos_dissimilar = [g13, g14] """ Explanation: Tutorial: Intermediate So far, the barebones objects have been discussed and a lot can be accomplished with the basics. For users who have some experience with Python and Pandas, here are some intermediate techniques to reduce repetitious actions. This section dicusses the use of abstract base classes intended for reducing redundant tasks such as multiple case creation and default parameter definitions. Custom model subclassing is also discussed. End of explanation """ # Geometry object la.input_.Geometry('100-200-1600') """ Explanation: Exploring LamAna Objects This is brief introduction to underlying objects in this package. We begin with an input string that is parsed and converted into a Geometry object. This is part of the input_ module. End of explanation """ # FeatureInput FI = { 'Geometry': la.input_.Geometry('400.0-[200.0]-800.0'), 'Materials': ['HA', 'PSu'], 'Model': 'Wilson_LT', 'Parameters': load_params, 'Properties': mat_props, 'Globals': None, } """ Explanation: This object has a number of handy methods. This information is shipped with parameters and properties in FeatureInput. A FeatureInput is simply a dict. This currently does have not an official class but is it import for other objects. End of explanation """ # Stack object la.constructs.Stack(FI) # Laminate object la.constructs.Laminate(FI) # LaminateModel object la.constructs.LaminateModel(FI) """ Explanation: The following objects are serially inherited and part of the constructs module. These construct the DataFrame represention of a laminate. The code to decouple LaminateModel from Laminate was merged in verions 0.4.13. End of explanation """ cases1 = la.distributions.Cases(['400-200-800', '350-400-500', '400-200-0', '1000-0-0'], load_params=load_params, mat_props=mat_props, model= 'Wilson_LT', ps=[3,4,5]) cases1 """ Explanation: The latter cells verify these objects are successfully decoupled. That's all for now. Generating Multiple Cases We've already seen we can generate a case object and plots with three lines of code. However, sometimes it is necessary to generate different cases. These invocations can be tedious with three lines of code per case. Have no fear. A simple way to produce more cases is to instantiate a Cases object. Below we will create a Cases which houses multiples cases that: - share similiar loading parameters/material properties and laminate theory model with - different numbers of datapoints, p End of explanation """ # Gettable cases1[0] # normal dict key selection cases1[-1] # negative indices cases1[-2] # negative indicies # Sliceable cases1[0:2] # range of dict keys cases1[0:3] # full range of dict keys cases1[:] # full range cases1[1:] # start:None cases1[:2] # None:stop cases1[:-1] # None:negative index cases1[:-2] # None:negative index #cases1[0:-1:-2] # start:stop:step; NotImplemented #cases1[::-1] # reverse; NotImplemented # Viewable cases1 cases1.LMs # Iterable for i, case in enumerate(cases1): # __iter__ values print(case) #print(case.LMs) # access LaminateModels # Writable #cases1.to_csv() # write to file # Selectable cases1.select(nplies=[2,4]) # by # plies cases1.select(ps=[3,4]) # by points/DataFrame rows cases1.select(nplies=[2,4], ps=[3,4], how='intersection') # by set operations """ Explanation: Cases() accepts a list of geometry strings. Given appropriate default keywords, this lone argument will return a dict-like object of cases with indicies as keys. The model and ps keywords have default values. A Cases() object has some interesting characteristics (this is not a dict): if user-defined, tries to import Defaults() to simplify instantiations dict-like storage and access of cases list-like ordering of cases gettable: list-like, get items by index (including negative indicies) sliceable: slices the dict keys of the Cases object viewable: contained LaminateModels iterable: by values (unlike normal dicts, not by keys) writable: write DataFrames to csv files selectable: perform set operations and return unique subsets End of explanation """ set(geos_most).issubset(geos_full) # confirm repeated items mix = geos_full + geos_most # contains repeated items # Repeated Subset cases2 = la.distributions.Cases(mix, load_params=load_params, mat_props=mat_props) cases2.LMs # Unique Subset cases2 = la.distributions.Cases(mix, load_params=load_params, mat_props=mat_props, unique=True) cases2.LMs """ Explanation: LamainateModels can be compared using set theory. Unique subsets of LaminateModels can be returned from a mix of repeated geometry strings. We will use the default model and ps values. End of explanation """ from lamana.input_ import BaseDefaults bdft = BaseDefaults() # geometry String Attributes bdft.geo_inputs # all dict key-values bdft.geos_all # all geo strings bdft.geos_standard # static bdft.geos_sample # active; grows # Geometry Object Attributes; mimics latter bdft.Geo_objects # all dict key-values bdft.Geos_all # all Geo objects # more ... # Custom FeatureInputs #bdft.get_FeatureInput() # quick builds #bdft.get_materials() # convert to std. form """ Explanation: Subclassing Custom Default Parameters We observed the benefits of using implicit, default keywords (models, ps) in simplifying the writing of Cases() instantiations. In general, the user can code explicit defaults for load_params and mat_props by subclassing BaseDefaults() from inputs_. While subclassing requires some extra Python knowledge, this is a relatively simple process that reduces a significant amount of redundant code, leading to a more effiencient anaytical setting. The BaseDefaults contains a dict various geometry strings and Geometry objects. Rather than defining examples for various geometry plies, the user can call from all or a groupings of geometries. End of explanation """ # Example Defaults from LamAna.models.Wilson_LT class Defaults(BaseDefaults): '''Return parameters for building distributions cases. Useful for consistent testing. Dimensional defaults are inheirited from utils.BaseDefaults(). Material-specific parameters are defined here by he user. - Default geometric and materials parameters - Default FeatureInputs Examples ======== >>>dft = Defaults() >>>dft.load_params {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,} >>>dft.mat_props {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} >>>dft.FeatureInput {'Geometry' : '400-[200]-800', 'Geometric' : {'R' : 12e-3, 'a' : 7.5e-3, 'p' : 1, 'P_a' : 1, 'r' : 2e-4,}, 'Materials' : {'HA' : [5.2e10, 0.25], 'PSu' : [2.7e9, 0.33],}, 'Custom' : None, 'Model' : Wilson_LT, } ''' def __init__(self): BaseDefaults.__init__(self) '''DEV: Add defaults first. Then adjust attributes.''' # DEFAULTS ------------------------------------------------------------ # Build dicts of geometric and material parameters self.load_params = {'R' : 12e-3, # specimen radius 'a' : 7.5e-3, # support ring radius 'p' : 5, # points/layer 'P_a' : 1, # applied load 'r' : 2e-4, # radial distance from center loading } self.mat_props = {'Modulus': {'HA': 5.2e10, 'PSu': 2.7e9}, 'Poissons': {'HA': 0.25, 'PSu': 0.33}} # ATTRIBUTES ---------------------------------------------------------- # FeatureInput self.FeatureInput = self.get_FeatureInput(self.Geo_objects['standard'][0], load_params=self.load_params, mat_props=self.mat_props, ##custom_matls=None, model='Wilson_LT', global_vars=None) '''Use Classic_LT here''' from lamana.distributions import Cases # Auto load_params and mat_params dft = Defaults() cases3 = Cases(dft.geos_full, model='Wilson_LT') #cases3 = la.distributions.Cases(dft.geos_full, model='Wilson_LT') cases3 '''Refine idiom for importing Cases ''' """ Explanation: The latter geometric defaults come out of the box when subclassed from BaseDefaults. If custom geometries are desired, the user can override the geo_inputs dict, which automatically builds the Geo_objects dict. Users can override three categories of defaults parameters: geometric variables loading parameters material properties As mentioned, some geometric variables are provided for general laminate dimensions. The other parameters cannot be predicted and need to be defined by the user. Below is an example of a Defaults() subclass. If a custom model has been implemented (see next section), it is convention to place Defaults() and all other custom code within this module. If a custom model is implemented an located in the models directory, Cases will automatically search will the designated model modules, locate the load_params and mat_props attributes and load them automatically for all Cases instantiations. End of explanation """ cases1.plot(extrema=False) """ Explanation: Subclassing Custom Models One of the most powerful feauteres of LamAna is the ability to define customized modifications to the Laminate Theory models. Code for laminate theories (i.e. Classic_LT, Wilson_LT) are are located in the models directory. These models can be simple functions or sublclass from BaseModels in the theories module. Either approach is acceptable (see narrative docs for more details on creating custom models. This ability to add custom code make this library extensibile to use a larger variety of models. Plotting Cases An example of multiple subplots is show below. Using a former case, notice each subplot is indepent, woth separate geometries for each. LamAna treats each subplot as a subset or "caselet": End of explanation """ const_total = ['350-400-500', '400-200-800', '200-200-1200', '200-100-1400', '100-100-1600', '100-200-1400',] const_outer = ['400-550-100', '400-500-200', '400-450-300', '400-400-400', '400-350-500', '400-300-600', '400-250-700', '400-200-800', '400-0.5-1199'] const_inner = ['400-400-400', '350-400-500', '300-400-600', '200-400-700', '200-400-800', '150-400-990', '100-400-1000', '50-400-1100',] const_middle = ['100-700-400', '150-650-400', '200-600-400', '250-550-400', '300-400-500', '350-450-400', '400-400-400', '450-350-400', '750-0.5-400'] case1_ = const_total case2_ = const_outer case3_ = const_inner case4_ = const_middle cases_ = [case1_, case2_, case3_, case4_] cases3 = la.distributions.Cases(cases_, load_params=load_params, mat_props=mat_props, model= 'Wilson_LT', ps=[2,3]) cases3.plot(extrema=False) """ Explanation: Each caselet can also be a separate case, plotting multiple geometries for each as accomplished with Case. End of explanation """ '''Fix importing cases''' from lamana.distributions import Cases """ Explanation: See Demo notebooks for more examples of plotting. More on Cases End of explanation """ from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() %matplotlib inline str_caselets = ['350-400-500', '400-200-800', '400-[200]-800'] list_caselets = [['400-400-400', '400-[400]-400'], ['200-100-1400', '100-200-1400',], ['400-400-400', '400-200-800','350-400-500',], ['350-400-500']] case1 = la.distributions.Case(dft.load_params, dft.mat_props) case2 = la.distributions.Case(dft.load_params, dft.mat_props) case3 = la.distributions.Case(dft.load_params, dft.mat_props) case1.apply(['400-200-800', '400-[200]-800']) case2.apply(['350-400-500', '400-200-800']) case3.apply(['350-400-500', '400-200-800', '400-400-400']) case_caselets = [case1, case2, case3] mixed_caselets = [['350-400-500', '400-200-800',], [['400-400-400', '400-[400]-400'], ['200-100-1400', '100-200-1400',]], [case1, case2,] ] dict_caselets = {0: ['350-400-500', '400-200-800', '200-200-1200', '200-100-1400', '100-100-1600', '100-200-1400'], 1: ['400-550-100', '400-500-200', '400-450-300', '400-400-400', '400-350-500', '400-300-600'], 2: ['400-400-400', '350-400-500', '300-400-600', '200-400-700', '200-400-800', '150-400-990'], 3: ['100-700-400', '150-650-400', '200-600-400', '250-550-400', '300-400-500', '350-450-400'], } cases = Cases(str_caselets) #cases = Cases(str_caselets, combine=True) #cases = Cases(list_caselets) #cases = Cases(list_caselets, combine=True) #cases = Cases(case_caselets) #cases = Cases(case_caselets, combine=True) # collapse to one plot #cases = Cases(str_caselets, ps=[2,5]) #cases = Cases(list_caselets, ps=[2,3,5,7]) #cases = Cases(case_caselets, ps=[2,5]) #cases = Cases([], combine=True) # test raises # For next versions #cases = Cases(dict_caselets) #cases = Cases(mixed_caselets) #cases = Cases(mixed_caselets, combine=True) cases cases.LMs '''BUG: Following cell raises an Exception in Python 2. Comment to pass nb reg test in pytest.''' cases.caselets '''get out tests from code''' '''run tests''' '''test set seletions''' """ Explanation: Applying caselets The term "caselet" is defined in LPEP 003. Most importantly, the various types a caselet represents is handled by Cases and discussed here. In 0.4.4b3+, caselets are contained in lists. LPEP entertains the idea of containing caselets in dicts. End of explanation """ from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() cases = Cases(dft.geo_inputs['5-ply'], ps=[2,3,4]) len(cases) # test __len__ cases.get(1) # __getitem__ #cases[2] = 'test' # __setitem__; not implemented cases[0] # select cases[0:2] # slice (__getitem__) del cases[1] # __delitem__ cases # test __repr__ print(cases) # test __str__ cases == cases # test __eq__ not cases != cases # test __ne__ for i, case in enumerate(cases): # __iter__ values print(case) #print(case.LMs) cases.LMs # peek inside cases cases.frames # get a list of DataFrames directly cases #cases.to_csv() # write to file """ Explanation: Characteristics End of explanation """ str_caselets = ['350-400-500', '400-200-800', '400-[200]-800'] str_caselets2 = [['350-400-500', '350-[400]-500'], ['400-200-800', '400-[200]-800']] list_caselets = [['400-400-400', '400-[400]-400'], ['200-100-1400', '100-200-1400',], ['400-400-400', '400-200-800','350-400-500',], ['350-400-500']] case1 = la.distributions.Case(dft.load_params, dft.mat_props) case2 = la.distributions.Case(dft.load_params, dft.mat_props) case3 = la.distributions.Case(dft.load_params, dft.mat_props) case1.apply(['400-200-800', '400-[200]-800']) case2.apply(['350-400-500', '400-200-800']) case3.apply(['350-400-500', '400-200-800', '400-400-400']) case_caselets = [case1, case2, case3] """ Explanation: Unique Cases from Intersecting Caselets Cases can check if caselet is unique by comparing the underlying geometry strings. Here we have a non-unique caselets of different types. We get unique results within each caselet using the unique keyword. Notice, different caselets could have similar LaminateModels. End of explanation """ #----------------------------------------------------------+ # Iterating Over Cases from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() # Multiple cases, Multiple LMs cases = Cases(dft.geos_full, ps=[2,5]) # two cases (p=2,5) for i, case in enumerate(cases): # iter case values() print('Case #: {}'.format(i)) for LM in case.LMs: print(LM) print("\nYou iterated several cases (ps=[2,5]) comprising many LaminateModels.") # A single case, single LM cases = Cases(['400-[200]-800']) # a single case and LM (manual) for i, case_ in enumerate(cases): # iter i and case for LM in case_.LMs: print(LM) print("\nYou processed a case and LaminateModel w/iteration. (Recommended)\n") # Single case, multiple LMs cases = Cases(dft.geos_full) # auto, default p=5 for case in cases: # iter case values() for LM in case.LMs: print(LM) print("\nYou iterated a single case of many LaminateModels.") """ Explanation: The following cells attempt to print the LM objects. Cases objects unordered and thus print in random orders. It is important to note that once set operations are performed, order is no longer a preserved. This is related to how Python handles hashes. This applies to Cases() in two areas: The unique keyword optionally invoked during instantiation. Any use of set operation via the how keyword within the Cases.select() method. Revamped Idioms Gotcha: Although a Cases instance is a dict, as if 0.4.4b3, it's __iter__ method has been overriden to iterate the values by default (not the keys as in Python). This choice was decided since keys are uninformative integers, while the values (curently cases )are of interest, which saves from typing .items() when interating a Cases instance. python &gt;&gt;&gt; cases = Cases() &gt;&gt;&gt; for i, case in cases.items() # python &gt;&gt;&gt; ... print(case) &gt;&gt;&gt; for case in cases: # modified &gt;&gt;&gt; ... print(case) This behavior may change in future versions. End of explanation """ # Iterating Over Cases from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() #geometries = set(dft.geos_symmetric).union(dft.geos_special + dft.geos_standard + dft.geos_dissimilar) #cases = Cases(geometries, ps=[2,3,4]) cases = Cases(dft.geos_special, ps=[2,3,4]) # Reveal the full listdft.geos_specia # for case in cases: # iter case values() # for LM in case.LMs: # print(LM) # Test union of lists #geometries cases '''Right now a case shares p, size. cases share geometries and size.''' cases[0:2] '''Hard to see where these comem from. Use dict?''' cases.LMs cases.LMs[0:6:2] cases.LMs[0:4] """ Explanation: Selecting From cases, subsets of LaminateModels can be chosen. select is a method that performs on and returns sets of LaminateModels. Plotting functions are not implement for this method directly, however the reulsts can be used to make new cases instances from which .plot() is accessible. Example access techniques using Cases. Access all cases : cases Access specific cases : cases[0:2] Access all LaminateModels : cases.LMs Access LaminateModels (within a case) : cases.LMs[0:2] Select a subset of LaminateModels from all cases : cases.select(ps=[3,4]) End of explanation """ cases.select(nplies=[2,4]) cases.select(ps=[2,4]) cases.select(nplies=4) cases.select(ps=3) """ Explanation: Selections from latter cases. End of explanation """ cases.select(nplies=4, ps=3) # union; default cases.select(nplies=4, ps=3, how='intersection') # intersection """ Explanation: Advanced techniques: multiple selections. Set operations have been implemented in the selection method of Cases which enables filtering of unique LaminateModels that meet given conditions for nplies and ps. union: all LMs that meet either conditions (or) intersection: LMs that meet both conditions (and) difference: LMs symmetric difference: End of explanation """ cases.select(nplies=4, ps=3, how='difference') # difference cases.select(nplies=4) - cases.select(ps=3) # set difference '''How does this work?''' cases.select(nplies=4, ps=3, how='symm diff') # symm difference cases.select(nplies=[2,4], ps=[3,4], how='union') cases.select(nplies=[2,4], ps=[3,4], how='intersection') cases.select(nplies=[2,4], ps=3, how='difference') cases.select(nplies=4, ps=[3,4], how='symmeric difference') """ Explanation: By default, difference is subtracted as set(ps) - set(nplies). Currently there is no implementation for the converse difference, but set operations still work. End of explanation """ import numpy as np a = [] b = 1 c = np.int64(1) d = [1,2] e = [1,2,3] f = [3,4] test = 1 test in a #test in b #test is a test is c # if test is a or test is c: # True from lamana.utils import tools as ut ut.compare_set(d, e) ut.compare_set(b, d, how='intersection') ut.compare_set(d, b, how='difference') ut.compare_set(e, f, how='symmertric difference') ut.compare_set(d, e, test='issubset') ut.compare_set(e, d, test='issuperset') ut.compare_set(d, f, test='isdisjoint') set(d) ^ set(e) ut.compare_set(d,e, how='symm') g1 = dft.Geo_objects['5-ply'][0] g2 = dft.Geo_objects['5-ply'][1] cases = Cases(dft.geos_full, ps=[2,5]) # two cases (p=2,5) for i, case in enumerate(cases): # iter case values() for LM in case.LMs: print(LM) """ Explanation: Current logic seems to return a union. Enhancing selection algorithms with set operations Need logic to append LM for the following: all, either, neither (and, or, not or) a, b are int a, b are list a, b are mixed b, a are mixed End of explanation """ #PYTEST_VALIDATE_IGNORE_OUTPUT hash('400-200-800') #PYTEST_VALIDATE_IGNORE_OUTPUT hash('400-[200]-800') """ Explanation: In order to compare objects in sets, they must be hashable. The simple requirement equality is include whatever makes the hash of a equal to the hash of b. Ideally, we should hash the Geometry object, but the inner values is a list which is unhashable due to its mutability. Conventiently however, strings are not hashable. We can try to hash the geometry input string once they have been converted to General Convention as unique identifiers for the geometry object. This requires some reorganization in Geometry. ~~isolate a converter function _to_gen_convention()~~ privative all functions invisible to the API ~~hash the converted geo_strings~~ ~~privatize _geo_strings. This cannot be altered by the user.~~ Here we see the advantage to using geo_strings as hashables. They are inheirently hashable. UPDATE: decided to make a hashalbe version of the GeometryTuple End of explanation """ #PYTEST_VALIDATE_IGNORE_OUTPUT hash((case.LMs[0].Geometry, case.LMs[0].p)) case.LMs[0] L = [LM for case in cases for LM in case.LMs] L[0] L[8] #PYTEST_VALIDATE_IGNORE_OUTPUT hash((L[0].Geometry, L[0].p)) #PYTEST_VALIDATE_IGNORE_OUTPUT hash((L[1].Geometry, L[1].p)) set([L[0]]) != set([L[8]]) """ Explanation: Need to make Laminate class hashable. Try to use unique identifiers such as Geometry and p. End of explanation """ from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() mix = dft.Geos_full + dft.Geos_all mix set(mix) """ Explanation: Use sets to filter unique geometry objects from Defaults(). End of explanation """ mix = dft.geos_most + dft.geos_standard # 400-[200]-800 common to both cases3a = Cases(mix, combine=True, unique=True) cases3a.LMs load_params['p'] = 5 cases3b5 = la.distributions.Case(load_params, dft.mat_props) cases3b5.apply(mix) cases3b5.LMs[:-1] """ Explanation: Mixing Geometries See above. Looks like comparing the order of these lists give different results. This test has been quarantine from the repo until a solution is found. End of explanation """ '''Add how to build Defaults()''' # Case Building from Defaults import lamana as la from lamana.utils import tools as ut from lamana.models import Wilson_LT as wlt dft = wlt.Defaults() ##dft = ut.Defaults() # user-definable case2 = la.distributions.Case(dft.load_params, dft.mat_props) case2.apply(dft.geos_full) # multi plies #LM = case2.LMs[0] #LM.LMFrame print("\nYou have built a case using user-defined defaults to set geometric \ loading and material parameters.") case2 """ Explanation: Idiomatic Case Making As we transition to more automated techniques, tf parameters are to be reused multiple times, it can be helpful to store them as default values. End of explanation """ # Automatic Case Building import lamana as la from lamana.utils import tools as ut #Single Case dft = wlt.Defaults() ##dft = ut.Defaults() case3 = ut.laminator(dft.geos_full) # auto, default p=5 case3 = ut.laminator(dft.geos_full, ps=[5]) # declared #case3 = ut.laminator(dft.geos_full, ps=[1]) # LFrame rollbacks print("\nYou have built a case using higher-level API functions.") case3 # How to get values from a single case (Python 3 compatible) list(case3.values()) """ Explanation: Finally, if building several cases is required for the same parameters, we can use higher-level API tools to help automate the process. Note, for every case that is created, a seperate Case() instantiation and Case.apply() call is required. These techniques obviate such redundancies. End of explanation """ # Multiple Cases cases1 = ut.laminator(dft.geos_full, ps=[2,3,4,5]) # multi ply, multi p print("\nYou have built many cases using higher-level API functions.") cases1 # How to get values from multiple cases (Python 3 compatible) list(cases1.values()) """ Explanation: Cases are differentiated by different ps. End of explanation """ # Iterating Over Cases # Latest style case4 = ut.laminator(['400-[200]-800']) # a sinle case and LM for i, case_ in case4.items(): # iter p and case for LM in case_.LMs: print(LM) print("\nYou processed a case and LaminateModel w/iteration. (Recommended)\n") case5 = ut.laminator(dft.geos_full) # auto, default p=5 for i, case in case5.items(): # iter p and case with .items() for LM in case.LMs: print(LM) for case in case5.values(): # iter case only with .values() for LM in case.LMs: print(LM) print("\nYou processed many cases using Case object methods.") # Convert case dict to generator case_gen1 = (LM for p, case in case4.items() for LM in case.LMs) # Generator without keys case_gen2 = (LM for case in case4.values() for LM in case.LMs) print("\nYou have captured a case in a generator for later, one-time use.") """ Explanation: Python 3 no longer returns a list for .values() method, so list used to evalate a the dictionary view. While consuming a case's, dict value view with list() works in Python 2 and 3, iteration with loops and comprehensions is a preferred technique for both single and mutiple case processing. After cases are accessed, iteration can access the contetnts of all cases. Iteration is the preferred technique for processing cases. It is most general, cleaner, Py2/3 compatible out of the box and agrees with The Zen of Python: There should be one-- and preferably only one --obvious way to do it. End of explanation """ # Style Comparisons dft = wlt.Defaults() ##dft = ut.Defaults() case1 = la.distributions.Case(load_params, mat_props) case1.apply(dft.geos_all) cases = ut.laminator(geos=dft.geos_all) case2 = cases # Equivalent calls print(case1) print(case2) print("\nYou have used classic and modern styles to build equivalent cases.") """ Explanation: We will demonstrate comparing two techniques for generating equivalent cases. End of explanation """
fierval/KaggleMalware
Learning/1DLBP with CUDA.ipynb
mit
from numba import * from timeit import default_timer as timer import numpy as np import matplotlib.pylab as plt """ Explanation: Extracting a 1D Local Binary Pattern Histogram on NVIDIA GPU with CUDA and Numbapro This was done for the Microsoft Malware competition on Kaggle. In this contest, a bunch of malware files needed to be classified in 9 categories. The files were presented in two sets, 10,868 each: - text (disassembly of the malware) - binary (actual binaries minus PE header to render them harmless) I chose the 1DLBP algorithm described in the linked paper to extract (hopefully meaningful) features from each binary file. These features, in combination with some text-based features extracted from the disassemblies, proved to be quite effective in training an accurate classifier Prerequisits basic knowledge of CUDA programming (kernels, grid, blocks, threads, etc) numba End of explanation """ c = np.random.randint(0, 256, size=8) #neighborhood bytes to the left and to the right contain these values b = np.random.randint(256) # center of the neighborhood digits = np.array([0 if d < b else 1 for d in c]) # this is the number that represents the pattern print "Center: ", b print "Bytes around it: ", c print "Pattern: ", digits # In order to compute the actual pattern, we just mulitply each of the digits by a power of two and add them up powers = 1 << np.arange(0, 8) print "As number base 10: ", np.dot(powers, digits.T) """ Explanation: Algorithm in Brief Since the paper (above) does a great job explaining the algorithm, I will only describe it very briefly. For every byte (b) in the image, we look at 4 (or any neighborhood) of bytes immediately to the "left" and at 4 bytes immediately to the "right" of the current byte. We now have 8 selected bytes, call them c[], where size(c) = 8. Each of the elements of array c is converted into a binary digit, based on the following rule: End of explanation """ def extract_1dlbp_cpu(input, neighborhood, p): """ Extract the 1d lbp pattern on CPU """ res = np.zeros(1 << (2 * neighborhood)) for i in range(neighborhood, len(input) - neighborhood): left = input[i - neighborhood : i] right = input[i + 1 : i + neighborhood + 1] both = np.r_[left, right] res[np.sum(p [both >= input[i]])] += 1 return res """ Explanation: Which is actually a non-negative integer less than $2^8$. Then a histogram of these values is built. It has the size of 256 ints and in training experiments proves to be much more effective than a simple byte histogram. The functions below translate this description into Python: End of explanation """ import numbapro numbapro.check_cuda() """ Explanation: With numba, this can be expressed on the GPU using CUDA. If you have numbapro, CUDA support can be validated: End of explanation """ @cuda.jit('void(uint8[:], int32, int32[:], int32[:])') def lbp_kernel(input, neighborhood, powers, h): i = cuda.grid(1) r = 0 if i < input.shape[0] - 2 * neighborhood: i += neighborhood for j in range(i - neighborhood, i): if input[j] >= input[i]: r += powers[j - i + neighborhood] for j in range(i + 1, i + neighborhood + 1): if input[j] >= input[i]: r += powers[j - i + neighborhood - 1] cuda.atomic.add(h, r, 1) """ Explanation: The Kernel End of explanation """ def extract_1dlbp_gpu(input, neighborhood, d_powers): ''' input - the input array neighborhood - size of the neighborhood d_powers - device address of the powers of two constants used to compute the final pattern ''' maxThread = 512 blockDim = maxThread d_input = cuda.to_device(input) hist = np.zeros(2 ** (2 * neighborhood), dtype='int32') gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim d_hist = cuda.to_device(hist) lbp_kernel[gridDim, blockDim](d_input, neighborhood, d_powers, d_hist) d_hist.to_host() return hist """ Explanation: This looks like a regular CUDA kernel. It almost looksy "C++y". Each thread is tasked with computing the pattern ("digits" above) around one single element of the original input array. We compute the pattern, then wait for all of the threads in the block to finish and update the histogram with their results. Note: implementation of histogramming using atomics is the most straightforward, but not the most efficient one. For our purposes it will do just fine. Calling the Kernel That is pretty standard: allocate memory and move it to the GPU. End of explanation """ %matplotlib inline X = np.arange(3, 7) X = 10 ** X neighborhood = 4 cpu_times = np.zeros(X.shape[0]) gpu_times = np.zeros(X.shape[0]) p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32') d_powers = cuda.to_device(p) for i, x in enumerate(X): input = np.random.randint(0, 256, size = x).astype(np.uint8) start = timer() h_cpu = extract_1dlbp_cpu(input, neighborhood, p) cpu_times[i] = timer() - start print "Finished on CPU: length: {0}, time: {1:3.4f}s".format(x, cpu_times[i]) start = timer() h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers) gpu_times[i] = timer() - start print "Finished on GPU: length: {0}, time: {1:3.4f}s".format(x, gpu_times[i]) print "All h_cpu == h_gpu: ", (h_cpu == h_gpu).all() # Here both axes are log-scaled. f = plt.figure(figsize=(10, 5)) plt.plot(X, cpu_times, label = "CPU") plt.plot(X, gpu_times, label = "GPU") plt.xlabel('input length') plt.ylabel('time, sec') plt.yscale('log') plt.xscale('log') plt.legend() plt.show() """ Explanation: Testing Running comparison for arrays of several lengths. End of explanation """ def extract_1dlbp_gpu_debug(input, neighborhood, powers, res): maxThread = 512 blockDim = maxThread gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim for block in range(0, gridDim): for thread in range(0, blockDim): r = 0 i = blockDim * block + thread if i < input.shape[0] - 2 * neighborhood: i += neighborhood for j in range(i - neighborhood, i): if input[j] >= input[i]: r += powers[j - i + neighborhood] for j in range(i + 1, i + neighborhood + 1): if input[j] >= input[i]: r += powers[j - i + neighborhood - 1] res[r] += 1 return res """ Explanation: Improvements The above results are a bit extreme and very boring. GPU is faster than CPU, however, 4 orders of magnitue?! Can we do better on CPU? Obviously the slowdown is due to Python complexity: array manipulations we are using for once... Here are things to try: - Mimic CUDA kernel on CPU: End of explanation """ @jit("int32[:](uint8[:], int64, int32[:], int32[:])", nopython=True) def extract_1dlbp_cpu_jit(input, neighborhood, powers, res): maxThread = 512 blockDim = maxThread gridDim = (len(input) - 2 * neighborhood + blockDim) / blockDim for block in range(0, gridDim): for thread in range(0, blockDim): r = 0 i = blockDim * block + thread if i < input.shape[0] - 2 * neighborhood: i += neighborhood for j in range(i - neighborhood, i): if input[j] >= input[i]: r += powers[j - i + neighborhood] for j in range(i + 1, i + neighborhood + 1): if input[j] >= input[i]: r += powers[j - i + neighborhood - 1] res[r] += 1 return res """ Explanation: A good thing, too, because this is actually a way to debug a CUDA kernel in Python Use numba to compile this simple code to something non-Python. For this we just decorate the definition like so: End of explanation """ X = np.arange(3, 7) X = 10 ** X neighborhood = 4 cpu_times = np.zeros(X.shape[0]) cpu_times_simple = cpu_times.copy() cpu_times_jit = cpu_times.copy() gpu_times = np.zeros(X.shape[0]) p = 1 << np.array(range(0, 2 * neighborhood), dtype='int32') d_powers = cuda.to_device(p) for i, x in enumerate(X): input = np.random.randint(0, 256, size = x).astype(np.uint8) print "Length: {0}".format(x) print "--------------" start = timer() h_cpu = extract_1dlbp_cpu(input, neighborhood, p) cpu_times[i] = timer() - start print "Finished on CPU: time: {0:3.5f}s".format(cpu_times[i]) res = np.zeros(1 << (2 * neighborhood), dtype='int32') start = timer() h_cpu_simple = extract_1dlbp_gpu_debug(input, neighborhood, p, res) cpu_times_simple[i] = timer() - start print "Finished on CPU (simple): time: {0:3.5f}s".format(cpu_times_simple[i]) res = np.zeros(1 << (2 * neighborhood), dtype='int32') start = timer() h_cpu_jit = extract_1dlbp_cpu_jit(input, neighborhood, p, res) cpu_times_jit[i] = timer() - start print "Finished on CPU (numba: jit): time: {0:3.5f}s".format(cpu_times_jit[i]) start = timer() h_gpu = extract_1dlbp_gpu(input, neighborhood, d_powers) gpu_times[i] = timer() - start print "Finished on GPU: time: {0:3.5f}s".format(gpu_times[i]) print "All h_cpu == h_gpu: ", (h_cpu_jit == h_gpu).all() and (h_cpu_simple == h_cpu_jit).all() and (h_cpu == h_cpu_jit).all() print '' f = plt.figure(figsize=(10, 5)) plt.plot(X, cpu_times, label = "CPU") plt.plot(X, cpu_times_simple, label = "CPU non-vectorized") plt.plot(X, cpu_times_jit, label = "CPU jit") plt.plot(X, gpu_times, label = "GPU") plt.yscale('log') plt.xscale('log') plt.xlabel('input length') plt.ylabel('time, sec') plt.legend() plt.show() """ Explanation: Now we modify the test: End of explanation """
ComputationalModeling/spring-2017-danielak
past-semesters/fall_2016/day-by-day/day15-Schelling-1-dimensional-segregation-day2/Day_15_Pre_Class_Notebook.ipynb
agpl-3.0
# Put your code here, using additional cells if necessary. """ Explanation: Getting ready to implement the Schelling model Goal for this assignment The goal of this assignment is to finish up the two functions that you started in class on the first day of this project, to ensure that you're ready to hit the ground running when you get back to together with your group. You are welcome to work with your group on this pre-class assignment - just make sure to list who you worked with below. Also, everybody needs to turn in their own solutions! Your name // put your name here! Function 1: Creating a game board Function 1: Write a function that creates a one-dimensional game board composed of agents of two different types (0 and 1, X and O, stars and pluses... whatever you want), where the agents are assigned to spots randomly with a 50% chance of being either type. As arguments to the function, take in (1) the number of spots in the game board (setting the default to 32) and (2) a random seed that you will use to initialize the board (again with some default number), and return your game board. (Hint: which makes more sense to describe the game board, a list or a Numpy array? What are the tradeoffs?) Show that your function is behaving correctly by printing out the returned game board. End of explanation """ # Put your code here, using additional cells if necessary. """ Explanation: Function 2: deciding if an agent is happy Write a function that takes the game board generated by the function you wrote above and determines whether an agent at position i in the game board of a specified type is happy for a game board of any size and a neighborhood of size N (i.e., from position i-N to i+N), and returns that information. Make sure to check that position i is actually inside the game board (i.e., make sure the request makes sense), and ensure that it behaves correctly for agents near the edges of the game board. Show that your function is behaving correctly by giving having it check every position in the game board you generated previously, and decide whether the agent in each spot is happy or not. Verify by eye that it's behaving correctly. (Hint: You're going to use this later, when you're trying to decide where to put an agent. Should you write the function assuming that the agent is already in the board, or that you're testing to see whether or not you've trying to decide whether to put it there?) End of explanation """ from IPython.display import HTML HTML( """ <iframe src="https://goo.gl/forms/M7YCyE1OLzyOK7gH3?embedded=true" width="80%" height="1200px" frameborder="0" marginheight="0" marginwidth="0"> Loading... </iframe> """ ) """ Explanation: Assignment wrapup Please fill out the form that appears when you run the code below. You must completely fill this out in order to receive credit for the assignment! End of explanation """
massimo-nocentini/on-python
calculus-I/Untitled.ipynb
mit
dis """ Explanation: $${{15}\over{0}} \quad\leftrightarrow\quad 15 = 0m + r = r \quad m, r\in\mathbb{Z} \wedge r < 15$$ Se scelgo $$m=1, r=0 \quad\rightarrow\quad 15 \not = 0*1 + 0 = 0$$ Ritornando al nostro problema iniziale, il denominatore $x+4\not=0$ per ogni $x\in\mathbb{R}$, quindi $x\not=-4$. End of explanation """ sqrt(x+1) """ Explanation: Intermezzo, questo che abbiamo scritto e' la condizione di esistenza rispetto alla divisione. Ce ne possono essere altre, ad esempio: End of explanation """ solve(dis) """ Explanation: La scrittura $\sqrt{y}$ significa che devo trovare un numero $z\in\mathbb{R}$ tale che $y = z^2$. Quindi, ha senso $\sqrt{-3}$? No, perche' non esiste $z\in\mathbb{R}$ tale che $-3=z^2$. Tornando alla scrittura sopra, devo trovare un numero $z\in\mathbb{R}$ tale che $x+1 = z^2$. Qual e' la condizione per cui riesco effettivamente a trovare questo $z$? Quando $x + 1\geq 0$, ovvero $x \geq -1$. End of explanation """ x expr = abs((x**2 - 9)/(x-1)) expr from sympy.plotting import plot plot(expr,) solve(expr > 5) """ Explanation: End of explanation """
DominikDitoIvosevic/Uni
STRUCE/2018/.ipynb_checkpoints/SU-2018-LAB04-Ansambli-i-procjena-parametara-checkpoint.ipynb
mit
# Učitaj osnovne biblioteke... import sklearn import mlutils import numpy as np import scipy as sp import matplotlib.pyplot as plt %pylab inline """ Explanation: Sveučilište u Zagrebu Fakultet elektrotehnike i računarstva Strojno učenje 2018/2019 http://www.fer.unizg.hr/predmet/su Laboratorijska vježba 4: Ansambli i procjena parametara Verzija: 0.2 Zadnji put ažurirano: 7. prosinca 2018. (c) 2015-2018 Jan Šnajder, Domagoj Alagić Objavljeno: 7. prosinca 2018. Rok za predaju: 17. prosinca 2018. u 07:00h Upute Četvrta laboratorijska vježba sastoji se od četiri zadatka. Kako bi kvalitetnije, ali i na manje zamoran način usvojili gradivo ovog kolegija, potrudili smo se uključiti tri vrste zadataka: 1) implementacija manjih algoritama, modela ili postupaka; 2) eksperimenti s raznim modelima te njihovim hiperparametrima, te 3) primjena modela na (stvarnim) podatcima. Ovim zadatcima pokrivamo dvije paradigme učenja: učenje izgradnjom (engl. learning by building) i učenje eksperimentiranjem (engl. learning by experimenting). U nastavku slijedite upute navedene u ćelijama s tekstom. Rješavanje vježbe svodi se na dopunjavanje ove bilježnice: umetanja ćelije ili više njih ispod teksta zadatka, pisanja odgovarajućeg kôda te evaluiranja ćelija. Osigurajte da u potpunosti razumijete kôd koji ste napisali. Kod predaje vježbe, morate biti u stanju na zahtjev asistenta (ili demonstratora) preinačiti i ponovno evaluirati Vaš kôd. Nadalje, morate razumjeti teorijske osnove onoga što radite, u okvirima onoga što smo obradili na predavanju. Ispod nekih zadataka možete naći i pitanja koja služe kao smjernice za bolje razumijevanje gradiva (nemojte pisati odgovore na pitanja u bilježnicu). Stoga se nemojte ograničiti samo na to da riješite zadatak, nego slobodno eksperimentirajte. To upravo i jest svrha ovih vježbi. Vježbe trebate raditi samostalno. Možete se konzultirati s drugima o načelnom načinu rješavanja, ali u konačnici morate sami odraditi vježbu. U protivnome vježba nema smisla. End of explanation """ from collections import Counter class VotingClassifierDIY(object): SCHEME_COUNTING = "counting" SCHEME_AVERAGING = "averaging" def __init__(self, clfs, voting_scheme=SCHEME_COUNTING): self.clfs = clfs self.voting_scheme = voting_scheme def fit(self, X, y): [ clf.fit(X,y) for clf in self.clfs ] def predict_proba(self, X): if self.voting_scheme == self.SCHEME_AVERAGING: pred = [ clf.predict_proba(X) for clf in self.clfs ] pred_avg = [] for i in range(0, len(pred[0])): prob = [ p[i] for p in pred ] pred_avg.append(mean(prod, axis = 0)) return pred_avg else: raise Exception("Nemoze") def predict(self, X): if self.voting_scheme == self.SCHEME_COUNTING: pred_cnt = []; pred = []; predd = []; for i in range(0, len(self.clfs)): predd.append(self.clfs[i].predict(X)) for i in range(0, shape(predd)[1]): # 0 - 1000 for j in range(0, len(predd)): # 0 - 3 pred.append(predd[j][i]) for i in range(0, len(pred), 3): pred_cnt.append((Counter(pred[i:i+3]).most_common()[0])[0]) return pred_cnt elif self.voting_scheme == self.SCHEME_AVERAGING: avg = self.predict_proba(X) pred_avg = [] for i in range(0, len(avg)): pred_avg.append(argmax(avg[i])) return pred_avg """ Explanation: 1. Ansambli (glasovanje) (a) Vaš je zadatak napisati razred VotingClassifierDIY koji implementira glasački ansambl. Konstruktor razreda ima dva parametra: clfs koji predstavlja listu klasifikatora (objekata iz paketa sklearn) i voting_scheme koji označava radi li se o glasovanju prebrojavanjem (SCHEME_COUNTING) ili usrednjavanjem (SCHEME_AVERAGING). Glasovanje prebrojavanjem jednostavno vraća najčešću oznaku klase, dok glasovanje usrednjavanjem uprosječuje pouzdanosti klasifikacije u neku klasu (po svim klasifikatorima) te vraća onu s najvećom pouzdanošću. Primijetite da svi klasifikatori imaju jednake težine. O komplementarnosti klasifikatora vodimo računa tako da koristimo jednake klasifikatore s različitim hiperparametrima. Razred sadržava metode fit(X, y) za učenje ansambla i dvije metode za predikciju: predict(X) i predict_proba(X). Prva vraća predviđene oznake klasa, a druga vjerojatnosti pripadanja svakoj od klasa za svaki od danih primjera iz X. NB: Jedan od razreda koji bi Vam mogao biti koristan jest collections.Counter. Također vrijedi i za funkcije numpy.argmax i numpy.dstack. End of explanation """ from sklearn.datasets import make_classification from sklearn.ensemble import VotingClassifier from sklearn.linear_model import LogisticRegression X_voting, y_voting = make_classification(n_samples=1000, n_features=4, n_redundant=0, n_informative=3, n_classes=3, n_clusters_per_class=2) Logisticka1 = LogisticRegression(multi_class = 'auto', solver = 'lbfgs', C = 1, max_iter = 1000).fit(X_voting, y_voting) Logisticka2 = LogisticRegression(multi_class = 'auto', solver = 'lbfgs', C = 100, max_iter = 1000).fit(X_voting, y_voting) Logisticka3 = LogisticRegression(multi_class = 'auto', solver = 'lbfgs', C = 1000, max_iter = 1000).fit(X_voting, y_voting) clfs = [Logisticka1, Logisticka2, Logisticka3] moj = VotingClassifierDIY(clfs = clfs) njihov = VotingClassifier(estimators= [('lr1', Logisticka1), ('lr2', Logisticka2), ('lr3', Logisticka3)]) a = moj.fit(X_voting, y_voting) a = njihov.fit(X_voting, y_voting) moj.voting_scheme = moj.SCHEME_COUNTING print(moj.voting_scheme) njihov.voting = 'hard' print(moj.predict(X_voting)) print(njihov.predict(X_voting).all()) moj.voting_scheme = moj.SCHEME_AVERAGING njihov.voting = 'soft' print(moj.predict_proba(X_voting) == njihov.predict_proba(X_voting).all) """ Explanation: (b) Uvjerite se da Vaša implementacija radi jednako onoj u razredu ensemble.VotingClassifier, i to pri oba načina glasovanja (parametar voting). Parametar weights ostavite na pretpostavljenoj vrijednosti. Za ovu provjeru koristite tri klasifikatora logističke regresije s različitom stopom regularizacije i brojem iteracija. Koristite skup podataka dan u nastavku. Ekvivalentnost implementacije najlakše je provjeriti usporedbom izlaza funkcije predict (kod prebrojavanja) i funkcije predict_proba (kod usrednjavanja). NB: Ne koristimo SVM jer njegova ugrađena (probabilistička) implementacija nije posve deterministička, što bi onemogućilo robusnu provjeru Vaše implementacije. End of explanation """ from sklearn.cross_validation import train_test_split X_bag, y_bag = make_classification(n_samples=1000, n_features=20, n_redundant=1, n_informative=17, n_classes=3, n_clusters_per_class=2) X_bag_train, X_bag_test, y_bag_train, y_bag_test = train_test_split(X_bag, y_bag, train_size=0.7, random_state=69) """ Explanation: Q: Kada je prebrojavanje bolje od usrednjavanja? Zašto? A obratno? Q: Bi li se ovakav algoritam mogao primijeniti na regresiju? Kako? 2. Ansambli (bagging) U ovom zadatku ćete isprobati tipičnog predstavnika bagging-algoritma, algoritam slučajnih šuma. Pitanje na koje želimo odgovoriti jest kako se ovakvi algoritmi nose s prenaučenošću, odnosno, smanjuje li bagging varijancu modela. Eksperiment ćete provesti na danom skupu podataka: End of explanation """ from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import zero_one_loss # Vaš kôd ovdje... """ Explanation: Razred koji implementira stablo odluke jest tree.DecisionTreeClassifier. Prvo naučite stablo odluke (engl. decision tree) na skupu za učenje, ali tako da je taj model presložen. To možete postići tako da povećate najveću moguću dubinu stabla (parametar max_depth). Ispišite pogrešku na skupu za ispitivanje (pogrešku 0-1; pogledajte paket metrics). End of explanation """ from sklearn.ensemble import RandomForestClassifier # Vaš kôd ovdje... """ Explanation: Sada isprobajte algoritam slučajnih šuma (dostupan u razredu ensemble.RandomForestClassifier) za različit broj stabala $L \in [1, 30]$. Iscrtajte pogrešku na skupu za učenje i na skupu za ispitivanje u ovisnosti o tom hiperparametru. Ispišite najmanju pogrešku na skupu za ispitivanje. End of explanation """ from sklearn.datasets import make_circles circ_X, circ_y = make_circles(n_samples=400, noise=0.1, factor=0.4) mlutils.plot_2d_clf_problem(circ_X, circ_y) """ Explanation: Q: Što možete zaključiti iz ovih grafikona? Q: Kako bagging postiže diverzifikaciju pojedinačnih osnovnih modela? Q: Koristi li ovaj algoritam složeni ili jednostavni osnovni model? Zašto? 3. Ansambli (boosting) U ovom zadatku pogledat ćemo klasifikacijski algoritam AdaBoost, koji je implementiran u razredu ensemble.AdaBoostClassifier. Ovaj algoritam tipičan je predstavnik boosting-algoritama. Najprije ćemo generirati eksperimentalni skup podataka koristeći datasets.make_circles. Ova funkcija stvara dvodimenzijski klasifikacijski problem u kojem su dva razreda podataka raspoređena u obliku kružnica, tako da je jedan razred unutar drugog. End of explanation """ from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier # Vaš kôd ovdje... """ Explanation: (a) Boosting, kao vrsta ansambla, također se temelji na kombinaciji više klasifikatora s ciljem boljih prediktivnih sposobnosti. Međutim, ono što ovakav tip ansambla čini zanimljivim jest to da za osnovni klasifikator traži slabi klasifikator (engl. weak classifier), odnosno klasifikator koji radi tek malo bolje od nasumičnog pogađanja. Često korišteni klasifikator za tu svrhu jest panj odluke (engl. decision stump), koji radi predikciju na temelju samo jedne značajke ulaznih primjera. Panj odluke specijalizacija je stabla odluke (engl. decision tree) koje smo već spomenuli. Panj odluke stablo je dubine 1. Stabla odluke implementirana su u razredu tree.DecisionTreeClassifier. Radi ilustracije, naučite ansambl (AdaBoost) koristeći panj odluke kao osnovni klasifikator, ali pritom isprobavajući različit broj klasifikatora u ansamblu iz skupa $L \in {1, 2, 3, 50}$. Prikažite decizijske granice na danom skupu podataka za svaku od vrijednosti korištenjem pomoćne funkcije mlutils.plot_2d_clf_problem. NB: Još jedan dokaz da hrvatska terminologija zaista može biti smiješna. :) End of explanation """ from sklearn.cross_validation import train_test_split X_boost, y_boost = make_classification(n_samples=1000, n_features=20, n_redundant=0, n_informative=18, n_classes=3, n_clusters_per_class=1) X_boost_train, X_boost_test, y_boost_train, y_boost_test = train_test_split(X_boost, y_boost, train_size=0.7, random_state=69) """ Explanation: Q: Kako AdaBoost radi? Ovise li izlazi pojedinih osnovnih modela o onima drugih? Q: Je li AdaBoost linearan klasifikator? Pojasnite. (b) Kao što je i za očekivati, broj klasifikatora $L$ u ansamblu predstavlja hiperparametar algoritma AdaBoost. U ovom zadatku proučit ćete kako on utječe na generalizacijsku sposobnost Vašeg ansambla. Ponovno, koristite panj odluke kao osnovni klasifikator. Poslužite se skupom podataka koji je dan niže. End of explanation """ from sklearn.metrics import zero_one_loss # Vaš kôd ovdje... """ Explanation: Iscrtajte krivulje pogrešaka na skupu za učenje i ispitivanje u ovisnosti o hiperparametru $L \in [1,80]$. Koristite pogrešku 0-1 iz paketa metrics. Ispišite najmanju ostvarenu pogrešku na skupu za ispitivanje, te pripadajuću vrijednost hiperparametra $L$. End of explanation """ # Vaš kôd ovdje... """ Explanation: Q: Može li uopće doći do prenaučenosti pri korištenju boosting-algoritama? (c) Kao što je rečeno na početku, boosting-algoritmi traže slabe klasifikatore kako bi bili najefikasniji što mogu biti. Međutim, kako se takav ansambl mjeri s jednim jakim klasifikatorom (engl. strong classifier)? To ćemo isprobati na istom primjeru, ali korištenjem jednog optimalno naučenog stabla odluke. Ispišite pogrešku ispitivanja optimalnog stabla odluke. Glavni hiperparametar stabala odluka jest njihova maksimalna dubina $d$ (parametar max_depth). Iscrtajte krivulje pogrešaka na skupu za učenje i ispitivanje u ovisnosti o dubini stabla $d \in [1,20]$. End of explanation """ # Vaš kôd ovdje... """ Explanation: Q: Isplati li se koristiti ansambl u obliku boostinga? Idu li grafikoni tome u prilog?<br> Q: Koja je prednost boostinga nad korištenjem jednog jakog klasifikatora? 4. Procjena maksimalne izglednosti i procjena maksimalne aposteriorne vjerojatnosti (a) Definirajte funkciju izglednosti $\mathcal{L}(\mu|\mathcal{D})$ za skup $\mathcal{D}={x^{(i)}}_{i=1}^N$ Bernoullijevih varijabli. Neka od $N$ varijabli njih $m$ ima vrijednost 1 (npr. od $N$ bacanja novčića, $m$ puta smo dobili glavu). Definirajte funkciju izglednosti tako da je parametrizirana s $N$ i $m$, dakle definirajte funkciju $\mathcal{L}(\mu|N,m)$. End of explanation """ # Vaš kôd ovdje... """ Explanation: (b) Prikažite funkciju $\mathcal{L}(\mu|N,m)$ za (1) $N=10$ i $m=1,2,5,9$ te za (2) $N=100$ i $m=1,10,50,90$ (dva zasebna grafikona). End of explanation """ # Vaš kôd ovdje... """ Explanation: Q: Koja vrijednost odgovara ML-procjenama i zašto? (c) Prikažite funkciju $\mathcal{L}(\mu|N,m)$ za $N=10$ i $m={0,9}$. End of explanation """ # Vaš kôd ovdje... """ Explanation: Q: Koja je ML-procjena za $\mu$ i što je problem s takvom procjenom u ovome slučaju? (d) Prikažite beta-distribuciju $B(\mu|\alpha,\beta)$ za različite kombinacije parametara $\alpha$ i $\beta$, uključivo $\alpha=\beta=1$ te $\alpha=\beta=2$. End of explanation """ # Vaš kôd ovdje... """ Explanation: Q: Koje parametere biste odabrali za modeliranje apriornog znanja o parametru $\mu$ za novčić za koji mislite da je "donekle pravedan, ali malo češće pada na glavu"? Koje biste parametre odabrali za novčić za koji držite da je posve pravedan? Zašto uopće koristimo beta-distribuciju, a ne neku drugu? (e) Definirajte funkciju za izračun zajedničke vjerojatnosti $P(\mu,\mathcal{D}) = P(\mathcal{D}|\mu) \cdot P(\mu|\alpha,\beta)$ te prikažite tu funkciju za $N=10$ i $m=9$ i nekolicinu kombinacija parametara $\alpha$ i $\beta$. End of explanation """ # Vaš kôd ovdje... """ Explanation: Q: Koje vrijednosti odgovaraju MAP-procjeni za $\mu$? Usporedite ih sa ML-procjenama. (f) Za $N=10$ i $m=1$, na jednome grafikonu prikažite sve tri distribucije: $P(\mu,\mathcal{D})$, $P(\mu|\alpha,\beta)$ i $\mathcal{L}(\mu|\mathcal{D})$. End of explanation """ from sklearn.datasets import load_iris import itertools as it # Vaš kôd ovdje... """ Explanation: (g) Pročitajte ove upute o učitavanju oglednih skupova podataka u SciPy. Učitajte skup podataka Iris. Taj skup sadrži $n=4$ značajke i $K=3$ klase. Odaberite jednu klasu i odaberite sve primjere iz te klase, dok ostale primjere zanemarite (u nastavku radite isključivo s primjerima iz te jedne klase). Vizualizirajte podatke tako da načinite 2D-prikaze za svaki par značajki (šest grafikona; za prikaz je najjednostavnije koristiti funkciju scatter). NB: Mogla bi Vam dobro dući funkcija itertools.combinations. End of explanation """ # Vaš kôd ovdje... """ Explanation: (h) Implementirajte funkciju log-izglednosti za parametre $\mu$ i $\sigma^2$ normalne distribucije. End of explanation """ # Vaš kôd ovdje... """ Explanation: (i) Izračunajte ML-procjene za $(\mu, \sigma^2)$ za svaku od $n=4$ značajki iz skupa Iris. Ispišite log-izglednosti tih ML-procjena. End of explanation """ from scipy.stats import pearsonr # Vaš kôd ovdje... """ Explanation: Q: Možete li, na temelju dobivenih log-izglednosti, zaključiti koja se značajka najbolje pokorava normalnoj distribuciji? (j) Proučite funkciju pearsonr za izračun Pearsonovog koeficijenta korelacije. Izračunajte koeficijente korelacije između svih četiri značajki u skupu Iris. End of explanation """ # Vaš kôd ovdje... """ Explanation: (k) Proučite funkciju cov te izračunajte ML-procjenu za kovarijacijsku matricu za skup Iris. Usporedite pristranu i nepristranu procjenu. Pokažite da se razlika (srednja apsolutna i kvadratna) smanjuje s brojem primjera (npr. isprobajte za $N/4$ i $N/2$ i $N$ primjera). End of explanation """
Kunstenpunt/datakunstjes
corpusanalyse uitdatabank/corpusanalyse.ipynb
apache-2.0
from pandas import read_excel, read_csv, DataFrame, Series, concat from datetime import datetime from codecs import open from re import compile from json import dumps from datetime import datetime from random import sample from collections import Counter from itertools import combinations """ Explanation: Corpusanalyse UiTdatabank We hebben de volgende bibliotheken nodig: End of explanation """ df_podium = read_excel("ruwe data/podium.xlsx", sheetname='theaterdans1014') df_bk = read_excel("ruwe data/beeldendekunsten.xlsx", sheetname='UitRapport') df_muziek = read_excel("ruwe data/muziek.xlsx", sheetname='Int nat reg') df_podium["Organisator"] = df_podium["Typologie organisator"] df_podium = df_podium[df_podium["Datum"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))] """ Explanation: Dan lezen we lezen de gegevens in End of explanation """ datumregex = compile(r"\d\d/\d\d/\d\d") df_muziek_expanded = df_muziek.copy() for row in df_muziek_expanded.iterrows(): speelmomenten = row[1]["Speelmomenten"] if str(speelmomenten) != "nan": for speelmoment in datumregex.findall(speelmomenten): speelmoment_dt = datetime(int("20" + speelmoment.split("/")[2]), int(speelmoment.split("/")[1]), int(speelmoment.split("/")[0])) if speelmoment_dt != row[1]["Datum"]: df_muziek_expanded = df_muziek_expanded.append( Series( {"Discipline": row[1]["Discipline"], "Subdiscipline": row[1]["Subdiscipline"], "Tekst": row[1]["Tekst"], "Datum": speelmoment_dt, "Gemeente": row[1]["Gemeente"] }, name=speelmoment_dt.isoformat() + " " + str(row[0]) ) ) df_muziek_expanded = df_muziek_expanded[df_muziek_expanded["Datum"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))] """ Explanation: Samenbrengen en voorbereiden van de data Voor de muziekgegevens moeten we nog controleren dat concerten eventuele herhalingen hebben, door in de speelmomenten kolom na te gaan wat de speelmomenten zijn. Dit is niet nodig bij de beeldende kunsten, aangezien daar de unit of analysis de tentoonstelling is, onafhankelijk van hoelang die tentoonstelling loopt. Bij podiumkunsten zijn de speelmomenten manueel gecheckt, ocharme simon. End of explanation """ subdisciplines = ["Beeldhouwkunst", "Fotografie", "Grafiek", "Installatiekunst", "Kunst en kunsteducatie", "Meerdere kunstvormen", "Schilderkunst"] df_bk_filtered = df_bk[df_bk["Datum tot"] != datetime(1900, 1, 1)] df_bk_filtered = df_bk_filtered[df_bk_filtered["Subdiscipline"].isin(subdisciplines)] df_bk_filtered = df_bk_filtered[ (df_bk_filtered["Datum van"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))) | (df_bk_filtered["Datum tot"].between(datetime(2014, 1, 1), datetime(2014, 12, 31))) ] df_bk_filtered["Datum"] = df_bk_filtered["Datum van"] df_bk_filtered = df_bk_filtered.drop(["Datum van", "Datum tot"], axis=1) """ Explanation: Voor de tentoonstellingen moeten we ook nog inperken op thema, en ook de permanente tentoonstellingen eruit zwieren. Bvendien lopen tentoonstellingen ook gedurende een zekere periode, dus we moeten ook controleren op tentoonstellingen die nog voor 1 januari 2014 beginnen, maar wel nog tijdens 2014 lopen. Idem voor einde van het jaar. End of explanation """ typering = read_excel("extra gegevens/typologie-organisatoren-plat.xlsx") def simplify_key(k): return str(str(k).lower().strip().replace(" ", "").encode("ascii", "replace")).replace("?", "").replace("_", "").replace('"', '').lstrip("b'").rstrip("'").replace('"', '').replace("'", "").replace(".", "").replace(",", "") def map_organisator_naar_typologie_plat(item, mapping): key = simplify_key(item) try: return mapping[mapping["key"] == key]["value"].values[0] except IndexError: onmapbaar.add(key) onmapbaar = set() df_bk_filtered["typering"] = df_bk_filtered["Organisator"].apply(map_organisator_naar_typologie_plat, args=(typering,)) df_muziek_expanded["typering"] = df_muziek_expanded["Organisator"].apply(map_organisator_naar_typologie_plat, args=(typering,)) df_podium["typering"] = df_podium["Organisator"].apply(map_organisator_naar_typologie_plat, args=(typering,)) """ Explanation: We gaan nu de de organisator omzetten naar een typering. We moeten eerst beginnen met die mapping van organisator naar typering op te bouwen op basis van de gegevens die Simon wist aan te leveren. End of explanation """ df_muziek_expanded_clean = df_muziek_expanded.drop("Speelmomenten", axis=1) df_podium_clean = df_podium.drop("Typologie organisator", axis=1) df = concat([df_podium_clean, df_bk_filtered, df_muziek_expanded_clean]) df.drop_duplicates(subset=["Datum", "Titel", "Discipline", "Subdiscipline", "Gemeente", "Tekst"], inplace=True) df.drop(["Titel", "Organisator"], axis=1, inplace=True) """ Explanation: Nu voegen we alles mooi samen, selecteren enkel de juiste kolommen, en gooien ook duplicaten op basis van datum, gemeente en tekst eruit. Bovendien hebben we de kolom met speelmomenten ook niet meer nodig. End of explanation """ df.head() """ Explanation: We kunnen kort inspecteren hoe deze data eruitzien. End of explanation """ df["Discipline"].value_counts() """ Explanation: We zien dat elke lijn een event beschrijving bevat, de plaats waar een event plaatsvindt, en ook de datum. Merk op dat events die op meerdere dagen plaatsvinden een aparte lijn krijgen. We zullen hiermee rekening houden in de interpretatie van de resultaten. Telling per discipline van het aantal unieke events: End of explanation """ print("podium", len(df[(df["Tekst"].str.strip() == "") & (df["Discipline"] == "podium")]["Tekst"]), "concert", len(df[(df["Tekst"].str.strip() == "") & (df["Discipline"] == "Concert")]["Tekst"]), "beeldend", len(df[(df["Tekst"].str.strip() == "") & (df["Discipline"] == "Tentoonstelling")]["Tekst"])) """ Explanation: En hoeveel daarvan hebben geen beschrijving? End of explanation """ def encode(item): return item.replace("\x08", "") df["Tekst"] = df["Tekst"].apply(encode) df.to_excel("samengevoegde data/df.xlsx") """ Explanation: Zo, we kunnen deze dataset nu mooi uitschrijven naar een Excel bestand, zodat Simon nog enkele laatste correcties en aanvullingen kan doorvoeren. End of explanation """ df = read_excel("samengevoegde data/df.xlsx") """ Explanation: Landsvermeldingen Na de manuele correctie van Simon kunnen we nu aan de slag met een propere dataset, die we dan hier nu ook inlezen. End of explanation """ df.groupby(["typering", "Discipline"]).size() """ Explanation: We kunnen voor de volledigheid eventjes een overzicht maken van (organisatoren, discipline). End of explanation """ typering = read_csv("extra gegevens/mapping_udb-gemeente_fusie-gemeente.csv", delimiter=';') coord = read_csv("extra gegevens/coordinaten.csv", delimiter=';') landen = read_excel("extra gegevens/landen.xlsx", sheetname="uitgebreide lijst 2014") """ Explanation: Voor onze analyse hebben we ook nood aan een lijst van namen van landen, coordinaten voor de plaatsnamen, en ook een manueel gemaakte mapping om de plaatsnamen in de UiTdatabank gegevens te normaliseren. End of explanation """ landen.head() """ Explanation: Laten we even in detail deze tabellen bekijken. De landen: End of explanation """ typering.tail() """ Explanation: Voor elk land weten we in welk (staatkundig) continent het ligt, en we hebben in de kolom 'Mention' verschillende manieren waarop dat land kan voorkomen in de tekst. Bij typering zien we het volgende: End of explanation """ coord.head() """ Explanation: De kolom Gemeente Origineel is de naam van de gemeente in de uitdatabank gegevens, en we kunnen de naam in de kolom Fusiegemeente en Province (English) gebruiken om een genormaliseerd zicht te krijgen. Tot slot hebben we nog de coordinaten: End of explanation """ niet_vlaams = ["Jodoigne", "Tournai", "Escanaffles", "Houffalize", "Haulchin", "Braine l'Alleud", "Tourinnes-la-Grosse", "Liège", "Marchienne-au-Pont", "Eupen", "Lessines", "Charleroi"] count = 1 kwic = [] aantal_treffers = 0 for row in df.iterrows(): if count % 5000 == 0: print(count, "of", len(df.index)) count += 1 tekst = row[1]["Tekst"] gemeente = row[1]["Gemeente"] organisatie = row[1]["typering"] if str(gemeente) != "nan" and str(gemeente) not in niet_vlaams: for land in set(landen["Land"].values): regex = compile(r"\b(" + r"|".join(landen[landen["Land"] == land]["Mention"]) + r")\b") matches = regex.finditer(str(tekst)) for match in matches: aantal_treffers += 1 typeringlijn = typering[typering["Gemeente Origineel"] == gemeente] fusiegemeente = typeringlijn["Fusiegemeente"].values[0] provincie = typeringlijn["Province (English)"].values[0] continent = landen[landen["Land"] == land]["Continent (staatkundig)"].values[0] discipline = row[1]["Discipline"] subdiscipline = row[1]["Subdiscipline"] uid = count kwic_lijn = [uid, tekst[:match.start()], tekst[match.start():match.end()], tekst[match.end():], gemeente, land, discipline, subdiscipline] kwic.append(kwic_lijn) DataFrame(kwic, columns=["uid", "left context", "keyword", "right context", "gemeente", "land", "discipline", "subdiscipline"]).to_excel("kwic.xlsx") """ Explanation: Hiermee kunnen we voor iedere Fusiegemeente (zie vorige tabel) de latitude en longitude ophalen. Nu gaan we voor ieder event in de UiTdatabankgegevens na welk land er vermeld wordt in de beschrijving van dat event. We houden ook al onmiddellijk bij wat de genormaliseerde naam is van de gemeente en de coordinaten van het centrum. Bovendien tellen we binnen het land ook nog de verschillende disciplines en subdisciplines. End of explanation """
tensorflow/docs-l10n
site/ja/guide/keras/rnn.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ import numpy as np import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers """ Explanation: Keras の再帰型ニューラルネットワーク(RNN) <table class="tfo-notebook-buttons" align="left"> <td><a target="_blank" href="https://www.tensorflow.org/guide/keras/rnn"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で実行</a></td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/guide/keras/rnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colabで実行</a> </td> <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/guide/keras/rnn.ipynb"> <img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png"> GitHubでソースを表示</a></td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/guide/keras/rnn.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png"> ノートブックをダウンロード</a> </td> </table> はじめに 再帰型ニューラルネットワーク(RNN)は、時系列や自然言語などのシーケンスデータのモデリングを強力に行うニューラルネットワークのクラスです。 概略的には、RNN レイヤーは for ループを使用して、それまでに確認した時間ステップに関する情報をエンコードする内部状態を維持しながらシーケンスの時間ステップをイテレートします。 Keras RNN API は、次に焦点を当てて設計されています。 使いやすさ: keras.layers.RNN、keras.layers.LSTM、keras.layers.GRU レイヤーがビルトインされているため、難しい構成選択を行わずに、再帰型モデルを素早く構築できます。 カスタマイズしやすさ: カスタムビヘイビアを使って独自の RNN セルレイヤーを構築し(for ループの内部)、一般的な keras.layers.RNN レイヤー(for ループ自体)で使用することもできます。このため、異なるリサーチアイデアを最小限のコードで柔軟に素早くプロトタイプすることができます。 セットアップ End of explanation """ model = keras.Sequential() # Add an Embedding layer expecting input vocab of size 1000, and # output embedding dimension of size 64. model.add(layers.Embedding(input_dim=1000, output_dim=64)) # Add a LSTM layer with 128 internal units. model.add(layers.LSTM(128)) # Add a Dense layer with 10 units. model.add(layers.Dense(10)) model.summary() """ Explanation: ビルトイン RNN レイヤー: 単純な例 Keras には、次の 3 つのビルトイン RNN レイヤーがあります。 keras.layers.SimpleRNN: 前の時間ステップの出力が次の時間ステップにフィードされる、完全に連結された RNN です。 keras.layers.GRU: Cho et al., 2014 で初めて提案されたレイヤー。 keras.layers.LSTM: Hochreiter &amp; Schmidhuber, 1997 で初めて提案されたレイヤー。 2015 年始めに、Keras に、LSTM および GRU の再利用可能なオープンソース Python 実装が導入されました。 整数のシーケンスを処理し、そのような整数を 64 次元ベクトルに埋め込み、LSTM レイヤーを使用してベクトルのシーケンスを処理する Sequential モデルの単純な例を次に示しています。 End of explanation """ model = keras.Sequential() model.add(layers.Embedding(input_dim=1000, output_dim=64)) # The output of GRU will be a 3D tensor of shape (batch_size, timesteps, 256) model.add(layers.GRU(256, return_sequences=True)) # The output of SimpleRNN will be a 2D tensor of shape (batch_size, 128) model.add(layers.SimpleRNN(128)) model.add(layers.Dense(10)) model.summary() """ Explanation: ビルトイン RNN は、多数の有益な特徴をサポートしています。 dropout および recurrent_dropout 引数を介した再帰ドロップアウト go_backwards 引数を介して、入力シーケンスを逆順に処理する能力 unroll 引数を介したループ展開(CPU で短いシーケンスを処理する際に大幅な高速化が得られる) など。 詳細については、「RNN API ドキュメント」を参照してください。 出力と状態 デフォルトでは、RNN レイヤーの出力には、サンプル当たり 1 つのベクトルが含まれます。このベクトルは、最後の時間ステップに対応する RNN セル出力で、入力シーケンス全体の情報が含まれます。この出力の形状は (batch_size, units) で、units はレイヤーのコンストラクタに渡される units 引数に対応します。 RNN レイヤーは、return_sequences=True に設定した場合、各サンプルに対する出力のシーケンス全体(各サンプルの時間ステップごとに 1 ベクトル)を返すこともできます。この出力の形状は (batch_size, timesteps, units) です。 End of explanation """ encoder_vocab = 1000 decoder_vocab = 2000 encoder_input = layers.Input(shape=(None,)) encoder_embedded = layers.Embedding(input_dim=encoder_vocab, output_dim=64)( encoder_input ) # Return states in addition to output output, state_h, state_c = layers.LSTM(64, return_state=True, name="encoder")( encoder_embedded ) encoder_state = [state_h, state_c] decoder_input = layers.Input(shape=(None,)) decoder_embedded = layers.Embedding(input_dim=decoder_vocab, output_dim=64)( decoder_input ) # Pass the 2 states to a new LSTM layer, as initial state decoder_output = layers.LSTM(64, name="decoder")( decoder_embedded, initial_state=encoder_state ) output = layers.Dense(10)(decoder_output) model = keras.Model([encoder_input, decoder_input], output) model.summary() """ Explanation: さらに、RNN レイヤーはその最終内部状態を返すことができます。返された状態は、後で RNN 実行を再開する際に使用するか、別の RNN を初期化するために使用できます。この設定は通常、エンコーダ・デコーダ方式の Sequence-to-Sequence モデルで使用され、エンコーダの最終状態がデコーダの初期状態として使用されます。 内部状態を返すように RNN レイヤーを構成するには、レイヤーを作成する際に、return_state パラメータを True に設定します。LSTM には状態テンソルが 2 つあるのに対し、GRU には 1 つしかないことに注意してください。 レイヤーの初期状態を構成するには、追加のキーワード引数 initial_state を使ってレイヤーを呼び出します。次の例に示すように、状態の形状は、レイヤーのユニットサイズに一致する必要があることに注意してください。 End of explanation """ paragraph1 = np.random.random((20, 10, 50)).astype(np.float32) paragraph2 = np.random.random((20, 10, 50)).astype(np.float32) paragraph3 = np.random.random((20, 10, 50)).astype(np.float32) lstm_layer = layers.LSTM(64, stateful=True) output = lstm_layer(paragraph1) output = lstm_layer(paragraph2) output = lstm_layer(paragraph3) # reset_states() will reset the cached state to the original initial_state. # If no initial_state was provided, zero-states will be used by default. lstm_layer.reset_states() """ Explanation: RNN レイヤーと RNN セル ビルトイン RNN レイヤーのほかに、RNN API は、セルレベルの API も提供しています。入力シーケンスの全バッチを処理する RNN レイヤーとは異なり、RNN セルは単一の時間ステップのみを処理します。 セルは、RNN レイヤーの for ループ内にあります。keras.layers.RNN レイヤー内のセルをラップすることで、シーケンスのバッチを処理できるレイヤー(RNN(LSTMCell(10)) など)を得られます。 数学的には、RNN(LSTMCell(10)) は LSTM(10) と同じ結果を出します。実際、TF v1.x でのこのレイヤーの実装は、対応する RNN セルを作成し、それを RNN レイヤーにラップするだけでした。ただし、ビルトインの GRU と LSTM レイヤーを使用すれば、CuDNN が使用できるようになり、パフォーマンスの改善を確認できることがあります。 ビルトイン RNN セルには 3 つあり、それぞれ、それに一致する RNN レイヤーに対応しています。 keras.layers.SimpleRNNCell は SimpleRNN レイヤーに対応します。 keras.layers.GRUCell は GRU レイヤーに対応します。 keras.layers.LSTMCell は LSTM レイヤーに対応します。 セルの抽象化とジェネリックな keras.layers.RNN クラスを合わせることで、リサーチ用のカスタム RNN アーキテクチャの実装を簡単に行えるようになります。 バッチ間のステートフルネス 非常に長い(無限の可能性のある)シーケンスを処理する場合は、バッチ間ステートフルネスのパターンを使用するとよいでしょう。 通常、RNN レイヤーの内部状態は、新しいバッチが確認されるたびにリセットされます(レイヤーが確認する各サンプルは、過去のサンプルとは無関係だと考えられます)。レイヤーは、あるサンプルを処理する間のみ状態を維持します。 ただし、非常に長いシーケンスがある場合、より短いシーケンスに分割し、レイヤーの状態をリセットせずにそれらの短いシーケンスを順次、RNN レイヤーにフィードすることができます。こうすると、レイヤーはサブシーケンスごとに確認していても、シーケンス全体の情報を維持することができます。 これは、コンストラクタに stateful=True を設定して行います。 シーケンス s = [t0, t1, ... t1546, t1547] があるとした場合、これを次のように分割します。 s1 = [t0, t1, ... t100] s2 = [t101, ... t201] ... s16 = [t1501, ... t1547] そして、次のようにして処理します。 python lstm_layer = layers.LSTM(64, stateful=True) for s in sub_sequences: output = lstm_layer(s) 状態をクリアする場合は、layer.reset_states() を使用できます。 注意: このセットアップでは、あるバッチのサンプル i は前のバッチのサンプル i の続きであることを前提としています。つまり、すべてのバッチには同じ数のサンプル(バッチサイズ)が含まれることになります。たとえば、バッチに [sequence_A_from_t0_to_t100, sequence_B_from_t0_to_t100] が含まれるとした場合、次のバッチには、[sequence_A_from_t101_to_t200, sequence_B_from_t101_to_t200] が含まれます。 完全な例を次に示します。 End of explanation """ paragraph1 = np.random.random((20, 10, 50)).astype(np.float32) paragraph2 = np.random.random((20, 10, 50)).astype(np.float32) paragraph3 = np.random.random((20, 10, 50)).astype(np.float32) lstm_layer = layers.LSTM(64, stateful=True) output = lstm_layer(paragraph1) output = lstm_layer(paragraph2) existing_state = lstm_layer.states new_lstm_layer = layers.LSTM(64) new_output = new_lstm_layer(paragraph3, initial_state=existing_state) """ Explanation: RNN 状態の再利用 <a id="rnn_state_reuse"></a> RNN の記録済みの状態は、layer.weights() には含まれません。RNN レイヤーの状態を再利用する場合は、layer.states によって状態の値を取得し、new_layer(inputs, initial_state=layer.states) などの Keras Functional API またはモデルのサブクラス化を通じて新しいレイヤーの初期状態として使用することができます。 この場合には、単一の入力と出力を持つレイヤーのみをサポートする Sequential モデルを使用できない可能性があることにも注意してください。このモデルでは追加入力としての初期状態を使用することができません。 End of explanation """ model = keras.Sequential() model.add( layers.Bidirectional(layers.LSTM(64, return_sequences=True), input_shape=(5, 10)) ) model.add(layers.Bidirectional(layers.LSTM(32))) model.add(layers.Dense(10)) model.summary() """ Explanation: 双方向性 RNN 時系列以外のシーケンスについては(テキストなど)、開始から終了までのシーケンスを処理だけでなく、逆順に処理する場合、RNN モデルの方がパフォーマンスに優れていることがほとんどです。たとえば、ある文で次に出現する単語を予測するには、その単語の前に出現した複数の単語だけでなく、その単語に関する文脈があると役立ちます。 Keras は、そのような双方向性のある RNN を構築するために、keras.layers.Bidirectional ラッパーという簡単な API を提供しています。 End of explanation """ batch_size = 64 # Each MNIST image batch is a tensor of shape (batch_size, 28, 28). # Each input sequence will be of size (28, 28) (height is treated like time). input_dim = 28 units = 64 output_size = 10 # labels are from 0 to 9 # Build the RNN model def build_model(allow_cudnn_kernel=True): # CuDNN is only available at the layer level, and not at the cell level. # This means `LSTM(units)` will use the CuDNN kernel, # while RNN(LSTMCell(units)) will run on non-CuDNN kernel. if allow_cudnn_kernel: # The LSTM layer with default options uses CuDNN. lstm_layer = keras.layers.LSTM(units, input_shape=(None, input_dim)) else: # Wrapping a LSTMCell in a RNN layer will not use CuDNN. lstm_layer = keras.layers.RNN( keras.layers.LSTMCell(units), input_shape=(None, input_dim) ) model = keras.models.Sequential( [ lstm_layer, keras.layers.BatchNormalization(), keras.layers.Dense(output_size), ] ) return model """ Explanation: 内部的には、Bidirectional は渡された RNN レイヤーをコピーし、新たにコピーされたレイヤーの go_backwards フィールドを転換して、入力が逆順に処理されるようにします。 Bidirectional RNN の出力は、デフォルトで、フォワードレイヤー出力とバックワードレイヤー出力の総和となります。これとは異なるマージ動作が必要な場合は(連結など)、Bidirectional ラッパーコンストラクタの merge_mode パラメータを変更します。Bidirectional の詳細については、API ドキュメントをご覧ください。 パフォーマンス最適化と CuDNN カーネル TensorFlow 2.0 では、ビルトインの LSTM と GRU レイヤーは、GPU が利用できる場合にデフォルトで CuDNN カーネルを活用するように更新されています。この変更により、以前の keras.layers.CuDNNLSTM/CuDNNGRU レイヤーは使用廃止となったため、実行するハードウェアを気にせずにモデルを構築することができます。 CuDNN カーネルは、特定の前提を以って構築されており、レイヤーはビルトイン LSTM または GRU レイヤーのデフォルト値を変更しない場合は CuDNN カーネルを使用できません。これらには次のような例があります。 activation 関数を tanh からほかのものに変更する。 recurrent_activation 関数を sigmoid からほかのものに変更する。 recurrent_dropout &gt; 0 を使用する。 unroll を True に設定する。LSTM/GRU によって内部 tf.while_loop は展開済み for ループに分解されます。 use_bias を False に設定する。 入力データが厳密に右詰でない場合にマスキングを使用する(マスクが厳密に右詰データに対応している場合でも、CuDNN は使用されます。これは最も一般的な事例です)。 制約の詳細については、LSTM および GRU レイヤーのドキュメントを参照してください。 利用できる場合に CuDNN カーネルを使用する パフォーマンスの違いを確認するために、単純な LSTM モデルを構築してみましょう。 入力シーケンスとして、MNIST 番号の行のシーケンスを使用し(ピクセルの各行を時間ステップとして扱います)、番号のラベルを予測します。 End of explanation """ mnist = keras.datasets.mnist (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train, x_test = x_train / 255.0, x_test / 255.0 sample, sample_label = x_train[0], y_train[0] """ Explanation: MNIST データセットを読み込みましょう。 End of explanation """ model = build_model(allow_cudnn_kernel=True) model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="sgd", metrics=["accuracy"], ) model.fit( x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1 ) """ Explanation: モデルのインスタンスを作成してトレーニングしましょう。 sparse_categorical_crossentropy をモデルの損失関数として選択します。モデルの出力形状は [batch_size, 10] です。モデルのターゲットは整数ベクトルで、各整数は 0 から 9 の範囲内にあります。 End of explanation """ noncudnn_model = build_model(allow_cudnn_kernel=False) noncudnn_model.set_weights(model.get_weights()) noncudnn_model.compile( loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), optimizer="sgd", metrics=["accuracy"], ) noncudnn_model.fit( x_train, y_train, validation_data=(x_test, y_test), batch_size=batch_size, epochs=1 ) """ Explanation: では、CuDNN カーネルを使用しないモデルと比較してみましょう。 End of explanation """ import matplotlib.pyplot as plt with tf.device("CPU:0"): cpu_model = build_model(allow_cudnn_kernel=True) cpu_model.set_weights(model.get_weights()) result = tf.argmax(cpu_model.predict_on_batch(tf.expand_dims(sample, 0)), axis=1) print( "Predicted result is: %s, target result is: %s" % (result.numpy(), sample_label) ) plt.imshow(sample, cmap=plt.get_cmap("gray")) """ Explanation: NVIDIA GPU と CuDNN がインストールされたマシンで実行すると、CuDNN で構築されたモデルの方が、通常の TensorFlow カーネルを使用するモデルに比べて非常に高速に実行されます。 CPU のみの環境で推論を実行する場合でも、同じ CuDNN 対応モデルを使用できます。次の tf.device 注釈は単にデバイスの交換を強制しています。GPU が利用できないな場合は、デフォルトで CPU で実行されます。 実行するハードウェアを気にする必要がなくなったのです。素晴らしいと思いませんか? End of explanation """ class NestedCell(keras.layers.Layer): def __init__(self, unit_1, unit_2, unit_3, **kwargs): self.unit_1 = unit_1 self.unit_2 = unit_2 self.unit_3 = unit_3 self.state_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])] self.output_size = [tf.TensorShape([unit_1]), tf.TensorShape([unit_2, unit_3])] super(NestedCell, self).__init__(**kwargs) def build(self, input_shapes): # expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)] i1 = input_shapes[0][1] i2 = input_shapes[1][1] i3 = input_shapes[1][2] self.kernel_1 = self.add_weight( shape=(i1, self.unit_1), initializer="uniform", name="kernel_1" ) self.kernel_2_3 = self.add_weight( shape=(i2, i3, self.unit_2, self.unit_3), initializer="uniform", name="kernel_2_3", ) def call(self, inputs, states): # inputs should be in [(batch, input_1), (batch, input_2, input_3)] # state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)] input_1, input_2 = tf.nest.flatten(inputs) s1, s2 = states output_1 = tf.matmul(input_1, self.kernel_1) output_2_3 = tf.einsum("bij,ijkl->bkl", input_2, self.kernel_2_3) state_1 = s1 + output_1 state_2_3 = s2 + output_2_3 output = (output_1, output_2_3) new_states = (state_1, state_2_3) return output, new_states def get_config(self): return {"unit_1": self.unit_1, "unit_2": unit_2, "unit_3": self.unit_3} """ Explanation: リスト/ディクショナリ入力、またはネストされた入力を使う RNN ネスト構造の場合、インプルメンターは単一の時間ステップにより多くの情報を含めることができます。たとえば、動画のフレームに、音声と動画の入力を同時に含めることができます。この場合のデータ形状は、次のようになります。 [batch, timestep, {"video": [height, width, channel], "audio": [frequency]}] 別の例では、手書きのデータに、現在のペンの位置を示す座標 x と y のほか、筆圧情報も含めることができます。データは次のように表現できます。 [batch, timestep, {"location": [x, y], "pressure": [force]}] 次のコードは、このような構造化された入力を受け入れるカスタム RNN セルの構築方法を例に示しています。 ネストされた入力/出力をサポートするカスタムセルを定義する 独自レイヤーの記述に関する詳細は、「サブクラス化による新規レイヤーとモデルの作成」を参照してください。 End of explanation """ unit_1 = 10 unit_2 = 20 unit_3 = 30 i1 = 32 i2 = 64 i3 = 32 batch_size = 64 num_batches = 10 timestep = 50 cell = NestedCell(unit_1, unit_2, unit_3) rnn = keras.layers.RNN(cell) input_1 = keras.Input((None, i1)) input_2 = keras.Input((None, i2, i3)) outputs = rnn((input_1, input_2)) model = keras.models.Model([input_1, input_2], outputs) model.compile(optimizer="adam", loss="mse", metrics=["accuracy"]) """ Explanation: ネストされた入力/出力で RNN モデルを構築する 上記で定義した keras.layers.RNN レイヤーとカスタムセルを使用する Keras モデルを構築しましょう。 End of explanation """ input_1_data = np.random.random((batch_size * num_batches, timestep, i1)) input_2_data = np.random.random((batch_size * num_batches, timestep, i2, i3)) target_1_data = np.random.random((batch_size * num_batches, unit_1)) target_2_data = np.random.random((batch_size * num_batches, unit_2, unit_3)) input_data = [input_1_data, input_2_data] target_data = [target_1_data, target_2_data] model.fit(input_data, target_data, batch_size=batch_size) """ Explanation: ランダムに生成されたデータでモデルをトレーニングする このモデルに適した候補データセットを持ち合わせていないため、ランダムな Numpy データを使って実演することにします。 End of explanation """
thisisbasil/SarcasmDetectionTwitter
plot_roc.ipynb
gpl-3.0
print(__doc__) import numpy as np import matplotlib.pyplot as plt from itertools import cycle from sklearn import svm, datasets from sklearn.metrics import roc_curve, auc from sklearn.model_selection import train_test_split from sklearn.preprocessing import label_binarize from sklearn.multiclass import OneVsRestClassifier from scipy import interp # Import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Binarize the output y = label_binarize(y, classes=[0, 1, 2]) n_classes = y.shape[1] # Add noisy features to make the problem harder random_state = np.random.RandomState(0) n_samples, n_features = X.shape X = np.c_[X, random_state.randn(n_samples, 200 * n_features)] # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5, random_state=0) # Learn to predict each class against the other classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True, random_state=random_state)) y_score = classifier.fit(X_train, y_train).decision_function(X_test) # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) """ Explanation: ======================================= Receiver Operating Characteristic (ROC) ======================================= Example of Receiver Operating Characteristic (ROC) metric to evaluate classifier output quality. ROC curves typically feature true positive rate on the Y axis, and false positive rate on the X axis. This means that the top left corner of the plot is the "ideal" point - a false positive rate of zero, and a true positive rate of one. This is not very realistic, but it does mean that a larger area under the curve (AUC) is usually better. The "steepness" of ROC curves is also important, since it is ideal to maximize the true positive rate while minimizing the false positive rate. Multiclass settings ROC curves are typically used in binary classification to study the output of a classifier. In order to extend ROC curve and ROC area to multi-class or multi-label classification, it is necessary to binarize the output. One ROC curve can be drawn per label, but one can also draw a ROC curve by considering each element of the label indicator matrix as a binary prediction (micro-averaging). Another evaluation measure for multi-class classification is macro-averaging, which gives equal weight to the classification of each label. .. note:: See also :func:`sklearn.metrics.roc_auc_score`, :ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`. End of explanation """ plt.figure() lw = 2 plt.plot(fpr[2], tpr[2], color='darkorange', lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() """ Explanation: Plot of a ROC curve for a specific class End of explanation """ # Compute macro-average ROC curve and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), color='deeppink', linestyle=':', linewidth=4) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), color='navy', linestyle=':', linewidth=4) colors = cycle(['aqua', 'darkorange', 'cornflowerblue']) for i, color in zip(range(n_classes), colors): plt.plot(fpr[i], tpr[i], color=color, lw=lw, label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--', lw=lw) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Some extension of Receiver operating characteristic to multi-class') plt.legend(loc="lower right") plt.show() """ Explanation: Plot ROC curves for the multiclass problem End of explanation """
craigrshenton/home
notebooks/notebook6.ipynb
mit
# code written in py_3.0 import pandas as pd import numpy as np """ Explanation: Load data from http://media.wiley.com/product_ancillary/6X/11186614/DOWNLOAD/ch06.zip, RetailMart.xlsx End of explanation """ # find path to your RetailMart.xlsx df_accounts = pd.read_excel(open('C:/Users/craigrshenton/Desktop/Dropbox/excel_data_sci/ch06/RetailMart.xlsx','rb'), sheetname=0) df_accounts = df_accounts.drop('Unnamed: 17', 1) # drop empty col df_accounts.rename(columns={'PREGNANT':'Pregnant'}, inplace=True) df_accounts.rename(columns={'Home/Apt/ PO Box':'Residency'}, inplace=True) # add simpler col name df_accounts.columns = [x.strip().replace(' ', '_') for x in df_accounts.columns] # python does not like spaces in var names df_accounts.head() """ Explanation: Load customer account data - i.e., past product sales data End of explanation """ df_accounts['Pregnant'] = df_accounts['Pregnant'].astype('category') # set col type """ Explanation: We need to categorise the 'Pregnant' column so that it can only take on one of two (in this case) possabilities. Here 1 = pregnant, and 0 = not pregnant End of explanation """ # dummify gender var dummy_gender = pd.get_dummies(df_accounts['Implied_Gender'], prefix='Gender') print(dummy_gender.head()) # dummify residency var dummy_resident = pd.get_dummies(df_accounts['Residency'], prefix='Resident') print(dummy_resident.head()) # make clean dataframe for regression model cols_to_keep = df_accounts.columns[2:len(df_accounts.columns)-1] # keep all but 'Pregnant' var # add dummy vars back in data = pd.concat([dummy_gender.ix[:, 'Gender_M':],dummy_resident.ix[:, 'Resident_H':],df_accounts[cols_to_keep]], axis=1) data.insert(0, 'Intercept', 1.0) # manually add the intercept data.head() from patsy import dmatrices import statsmodels.api as sm train_cols = data.columns[1:] logit = sm.Logit(df_accounts['Pregnant'], data[train_cols]) # fit the model result = logit.fit() print('Parameters:') print(result.params) print(result.summary()) """ Explanation: Following Greg Lamp over at the Yhat Blog (see here), we need to 'dummify' (i.e., separate out) the catagorical variables: gender and residency End of explanation """ # define X and y X = data[train_cols] y = df_accounts['Pregnant'] # train/test split from sklearn.cross_validation import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1) # train a logistic regression model from sklearn.linear_model import LogisticRegression logreg = LogisticRegression(C=1e9) logreg.fit(X_train, y_train) # make predictions for testing set y_pred_class = logreg.predict(X_test) # calculate testing accuracy from sklearn import metrics print(metrics.accuracy_score(y_test, y_pred_class)) # predict probability of survival y_pred_prob = logreg.predict_proba(X_test)[:, 1] import matplotlib.pyplot as plt # plot ROC curve fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred_prob) plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='navy', linestyle='--') plt.xlim([-0.05, 1.0]) plt.ylim([0.0, 1.05]) plt.gca().set_aspect('equal', adjustable='box') plt.xlabel('False Positive Rate (1 - Specificity)') plt.ylabel('True Positive Rate (Sensitivity)') plt.show() # calculate AUC print(metrics.roc_auc_score(y_test, y_pred_prob)) # histogram of predicted probabilities grouped by actual response value df = pd.DataFrame({'probability':y_pred_prob, 'actual':y_test}) df.hist(column='probability', by='actual', sharex=True, sharey=True) plt.show() # calculate cross-validated AUC from sklearn.cross_validation import cross_val_score cross_val_score(logreg, X, y, cv=10, scoring='roc_auc').mean() """ Explanation: logistic reg revisited with sklearn End of explanation """ from sklearn.ensemble import RandomForestClassifier import matplotlib.pyplot as plt clf = RandomForestClassifier(n_estimators = 500, n_jobs = -1) clf.fit(data[train_cols], df_accounts['Pregnant']) # sort the features by importance sorted_idx = clf.feature_importances_ df_features = pd.DataFrame({"Feature": train_cols}) df_features['Importance'] = sorted_idx df_features = df_features.sort_values(by=['Importance'], ascending=[True]) # sort my most important feature ax = df_features.plot(kind='barh', title ="Classification Feature Importance", figsize=(15, 10), legend=False, fontsize=12) ax.set_xlabel("Importance", fontsize=12) ax.set_yticklabels(df_features['Feature']) plt.show() """ Explanation: Random forest feature selection End of explanation """
sdpython/actuariat_python
_doc/notebooks/sessions/seance5_cube_multidimensionnel_correction.ipynb
mit
%matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') import pyensae from pyquickhelper.helpgen import NbImage from jyquickhelper import add_notebook_menu add_notebook_menu() """ Explanation: Cube multidimensionnel - correction Manipulation de tables de mortalités façon OLAP, correction des exercices. End of explanation """ from actuariat_python.data import table_mortalite_euro_stat table_mortalite_euro_stat() import pandas df = pandas.read_csv("mortalite.txt", sep="\t", encoding="utf8", low_memory=False) df2 = df[["annee", "age_num","indicateur","pays","genre","valeur"]].dropna().reset_index(drop=True) piv = df2.pivot_table(index=["annee", "age_num","pays","genre"], columns=["indicateur"], values="valeur") import xarray ds = xarray.Dataset.from_dataframe(piv) ds """ Explanation: On lit les données puis on recrée un DataSet : End of explanation """ ds.assign(LIFEEXP_add = ds.LIFEXP-1) meanp = ds.mean(dim="pays") ds1, ds2 = xarray.align(ds, meanp, join='outer') joined = ds1.assign(meanp = ds2["LIFEXP"]) joined.to_dataframe().head() """ Explanation: Exercice 1 : que font les lignes suivantes ? Le programme suivant uilise les fonctions align nad reindex pour faire une moyenne sur une des dimensions du DataSet (le pays) puis à ajouter une variable meanp contenant le résultat. End of explanation """ joined.sel(annee=2000, age_num=59, genre='F')["meanp"] """ Explanation: Les valeurs meanp sont constantes quelque soient le pays à annee, age_num, genre fixés. End of explanation """
angelmtenor/data-science-keras
titanic.ipynb
mit
import os import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import helper import keras helper.info_gpu() helper.reproducible(seed=0) # Setup reproducible results from run to run using Keras %matplotlib inline """ Explanation: Titanic Survival with DNN Predicting survival on the Titanic using an artificial neural network in Keras Supervised Learning. Binary classification This project is based on a dataset containing demographics and passenger information from 891 of the 2224 passengers and crew on board the Titanic. A description of this dataset is on the Kaggle website, where the data was obtained. End of explanation """ data_path = 'data/titanic_data.csv' target = ['Survived'] # the target will remain the same throughout the notebook df_original = pd.read_csv(data_path) print("{} rows \n{} columns \ntarget: {}".format(*df_original.shape, target)) """ Explanation: 1. Data Processing and Exploratory Data Analysis End of explanation """ df_original.head(3) """ Explanation: Show the data End of explanation """ df_original.describe(percentiles=[0.5]) """ Explanation: Numerical Data End of explanation """ df_original.describe(include=['O']) """ Explanation: Non-numerical Data End of explanation """ helper.missing(df_original) """ Explanation: Missing values End of explanation """ df = df_original.copy() # modified dataset def enhance_features(df, dict_categories=None): """ Enhance dataframe df """ df = df.copy() # filter Cabin to first letter df["Cabin"] = df["Cabin"].str[0] # get Title from Name df['Title'] = df["Name"].str.extract('([A-Za-z]+)\.', expand=False) # remove low frequency values for the new feautres fields = ['Cabin', 'Title'] df, dict_categories = helper.remove_categories(df, target=target, show=False) # Alone passenger df['Alone'] = ((df["SibSp"] + df["Parch"]) == 0).astype(int) return df, dict_categories df, dict_categories = enhance_features(df) """ Explanation: Binary target "Survived": ~38% ones; F1 score won't be used <br> Some values are missing for key values (e.g. Age) Some features (e.g. PassengerID, Name, Ticket) seem irelevant to survival probabilities <br> Transform the data Enhance and add new features End of explanation """ def drop_irrelevant_features(df, inplace=False): """ Remove non-relevant columns from dataftame df (inplace) """ if not inplace: df = df.copy() df.drop(['PassengerId', 'Name', 'Ticket'], axis='columns', inplace=True) if not inplace: return df drop_irrelevant_features(df, inplace=True) """ Explanation: Remove irrelevant features End of explanation """ df = helper.classify_data(df, target, numerical=["Age", "SibSp", "Parch", "Fare"]) pd.DataFrame(dict(df.dtypes), index=["Type"])[df.columns].head() # show data types """ Explanation: Classify variables Change categorical variables as dtype 'categorical' and sort columns: numerical + categorical + target End of explanation """ helper.show_categorical(df, target=target, sharey=True) """ Explanation: Visualize the data Categorical features End of explanation """ helper.show_target_vs_categorical(df, target) plt.ylim([0, 1]); """ Explanation: Target vs Categorical features End of explanation """ helper.show_numerical(df, kde=True) """ Explanation: Numerical features End of explanation """ helper.show_target_vs_numerical(df, target, jitter=0.2) plt.ylim([-0.4, 1.4]) plt.yticks([0, 1]); #df.groupby('Survived')['Age'].hist(alpha=0.4) # helper.show_target_vs_numerical(df_3sigma, target, numerical, jitter=0.2) """ Explanation: Target vs numerical features End of explanation """ helper.show_correlation(df, target) """ Explanation: Correlation between numerical features and target End of explanation """ sns.FacetGrid( df, row="Sex", col="Pclass", hue="Survived", size=3, margin_titles=True).map( plt.hist, "Age", alpha=.7).add_legend() plt.ylim([0, 70]); # df[['Title', 'Survived']].groupby(['Title'], as_index=False).mean().sort_values( # by='Survived', ascending=False) # helper.show_target_vs_categorical(df.loc[(df['Age']<12) | (df['Sex']=='female')], # target, categorical) """ Explanation: Most relevant features End of explanation """ helper.missing(df) plt.figure(figsize=(7, 3)) sns.countplot(data=df, x='Pclass', hue='Cabin'); helper.show_target_vs_categorical(df, ['Age'], figsize=(17, 2)) # Age vs categorical def fill_missing_values(df, inplace=False): """ Fill missing values of the dataframe df """ if not inplace: df = df.copy() # fill Embarked with mode df['Embarked'].fillna(df['Embarked'].mode()[0], inplace=True) # fill Cabin: the mode for grouped Pclass and Embarked ref = df.groupby(['Pclass', 'Embarked'])['Cabin'].transform(lambda x: x.mode()[0]) df['Cabin'].fillna(ref.iloc[0], inplace=True) # fill Age: the median for grouped Pclass and Title ref = df.groupby(['Pclass', 'Title'])['Age'].transform('median') df['Age'].fillna(ref, inplace=True) # fill Title: by age and sex only (not spouse or job) # df.loc[df['Title']=='Master','Age'].unique() # for idx, row in df.iterrows(): # if (pd.isnull(row['Title'])): # if row['Age'] >= 13: # if row['Sex'] == 'male': # df.loc[idx, 'Title'] = "Mr" # else: # df.loc[idx, 'Title'] = "Mrs" # else: # if row['Sex'] == 'male': # df.loc[idx, 'Title'] = "Master" # else: # df.loc[idx, 'Title'] = "Miss" # fill missing categorical values with the mode (if any) categorical = list(df.select_dtypes(include=['category'])) modes = df[categorical].mode() # this solves fillna issue with mode() for idx, f in enumerate(df[categorical]): df[f].fillna(modes.iloc[0, idx], inplace=True) # fill missing numeric NaN values with the median (if any) df.fillna(df.median(), inplace=True) if not inplace: return df # bins = list(range(0,80,10)) # # bins = (0, 5, 10, 15, 20, 30, 40, 50, 60) # labels = ["{}-{}".format(i, j) for i,j in zip(bins[:-1],bins[:-1])] # df['Age_cat'] = pd.cut(df['Age'], bins, labels=labels).astype('category') # df = df.drop(['Age'], axis='columns') fill_missing_values(df, inplace=True) """ Explanation: Unlike in third class, most children and women in first and second classes survived. Fill missing values End of explanation """ droplist = [] # features to drop from the model # For the model 'data' instead of 'df' data = df.copy() df.drop(droplist, axis='columns', inplace=True) data.head(3) """ Explanation: 2. Neural Network model Select the features End of explanation """ data, scale_param = helper.scale(data) """ Explanation: Scale numerical variables Shift and scale numerical variables to a standard normal distribution. The scaling factors are saved to be used for predictions. End of explanation """ data, dict_dummies = helper.replace_by_dummies(data, target) model_features = [f for f in data if f not in target] # sorted neural network inputs data.head(3) """ Explanation: Create dummy features Replace categorical features (no target) with dummy features End of explanation """ from sklearn.model_selection import train_test_split def split(data, target, test_size=0.15): train, test = train_test_split(data, test_size=test_size, random_state=9, stratify=data[target]) # Separate the data into features and target (x=features, y=target) x_train, y_train = train.drop(target, axis=1).values, train[target].values x_test, y_test = test.drop(target, axis=1).values, test[target].values # _nc: non-categorical yet (needs one-hot encoding) return x_train, y_train, x_test, y_test x_train, y_train, x_test, y_test = split(data, target, test_size=0.2) """ Explanation: Split the data into training and test sets Data leakage: Test set hidden when training the model, but seen when preprocessing the dataset End of explanation """ def one_hot_output(y_train, y_test): num_classes = len(np.unique(y_train)) y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) return y_train, y_test y_train, y_test = one_hot_output(y_train, y_test) print("train size \t X:{} \t Y:{}".format(x_train.shape, y_train.shape)) print("test size \t X:{} \t Y:{} ".format(x_test.shape, y_test.shape)) """ Explanation: One-hot encode the output End of explanation """ from keras.models import Sequential from keras.layers.core import Dense, Dropout def build_nn(input_size, output_size, summary=False): input_nodes = input_size weights = keras.initializers.RandomNormal(stddev=0.001) leaky_relu = keras.layers.advanced_activations.LeakyReLU(alpha=0.01) model = Sequential() model.add( Dense( input_nodes, input_dim=input_size, kernel_initializer=weights, activation='relu', bias_initializer='zero')) model.add(leaky_relu) model.add(Dropout(.3)) model.add( Dense( output_size, activation='softmax', kernel_initializer=weights, bias_initializer='zero')) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) if summary: model.summary() return model model = build_nn(x_train.shape[1], y_train.shape[1], summary=True) """ Explanation: Build the Neural Network for Binary Classification End of explanation """ from time import time model_path = os.path.join("models", "titanic.h5") def train_nn(model, x_train, y_train, validation_data=None, path=False, show=True): """ Train the neural network model. If no validation_data is provided, a split for validation will be used """ if show: print('Training ....') callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', patience=1, verbose=0)] t0 = time() history = model.fit( x_train, y_train, epochs=1000, batch_size=64, verbose=0, validation_split=0.25, validation_data = validation_data, callbacks=callbacks) if show: print("time: \t {:.1f} s".format(time() - t0)) helper.show_training(history) if path: model.save(path) print("\nModel saved at", path) return history model = None model = build_nn(x_train.shape[1], y_train.shape[1], summary=False) train_nn(model, x_train, y_train, path=model_path); """ Explanation: Train the Neural Network End of explanation """ from sklearn.model_selection import StratifiedKFold def cv_train_nn(x_train, y_train, n_splits): """ Create and Train models for cross validation. Return best model """ skf = StratifiedKFold(n_splits=n_splits, shuffle=True) score = [] best_model = None best_acc = 0 print('Training {} models for Cross Validation ...'.format(n_splits)) for train, val in skf.split(x_train[:, 0], y_train[:, 0]): model = None model = build_nn(x_train.shape[1], y_train.shape[1], summary=False) history = train_nn( model, x_train[train], y_train[train], show=False, validation_data=(x_train[val], y_train[val])) val_acc = history.history['val_acc'][-1] score.append(val_acc) if val_acc > best_acc: # save best model (fold) for evaluation and predictions best_model = model best_acc = val_acc model = best_model print('\nCross Validation accuracy: {:.3f}'.format(np.mean(score))) return best_model model = cv_train_nn(x_train, y_train, 4) """ Explanation: Train with Cross Validation End of explanation """ def evaluate_nn(model, x_test, y_test): score = model.evaluate(x_test, y_test, verbose=0) print("Test Accuracy: {:.3f}".format(score[1])) #model = keras.models.load_model(model_path) evaluate_nn(model, x_test, y_test) y_pred = model.predict(x_test, verbose=2) helper.binary_classification_scores( y_test[:, 1], y_pred[:, 1], return_dataframe=True, index="Neural Network") """ Explanation: Evaluate the model End of explanation """ def predict_manual(new_df): """ input: custom dataframe """ new_data = new_df.copy() # force data types to previous dataframe df for col in new_data: new_data[col] = new_data[col].astype(df.dtypes[col]) # standardize numerical variables new_data, _ = helper.scale(new_data, scale_param) # replace categorical features by dummy variables (using existing dummies) new_data, _ = helper.replace_by_dummies(new_data, target, dict_dummies) # sort columns to match with manual entries new_data = new_data[model_features] ## model_features: sorted list used in the model # make predictions prediction = model.predict(new_data.values)[:, 1] return (prediction) # for index, row in new_data.iterrows(): # single_pred = model.predict(np.array([row])) # print('{}:\t {:.0f}%'.format(index,single_pred[0,1] * 100)) # input data format df.describe() df.describe(include=['category']) print(list(df)) new_passengers = { 'Average man': [26, 1, 0, 14, 2, 'male', 'C', 'S', 'Mr', 0], 'Average woman': [26, 1, 0, 14, 2, 'female', 'C', 'S', 'Mrs', 0], 'Alone woman 3c': [26, 0, 2, 8, 3, 'female', 'C', 'S', 'Miss', 1], 'Boy 1c ': [7, 0, 2, 31, 1, 'male', 'C', 'S', 'Master', 0], 'Boy 2c ': [7, 0, 2, 14, 2, 'male', 'C', 'S', 'Master', 0], 'Boy 3c ': [7, 0, 2, 8, 3, 'male', 'C', 'S', 'Master', 0], } # create a dataframe with the new data new_df = pd.DataFrame( data=list(new_passengers.values()), index=new_passengers.keys(), columns= [f for f in list(df) if f not in target]) prediction = predict_manual(new_df) new_df['Survival prob. (%)'] = (prediction * 100).astype(int) new_df """ Explanation: Make predictions End of explanation """ # Same dataset without: # enhancing features # adding new features # filling missing values using grouped median def non_enhanced_pipeline(df): df = df.copy() # select features & classify features df.drop(['PassengerId', 'Name', 'Ticket', 'Cabin'], axis='columns', inplace=True) df = helper.classify_data(df, target, numerical=["Age", "SibSp", "Parch", "Fare"]) # fill NaN df.fillna(df.median(), inplace=True) # standardize and create dummies data, _ = helper.scale(df) data, _ = helper.replace_by_dummies(data, target) # split and one-hot output x_train, y_train, x_test, y_test = split(data, target, test_size=0.15) y_train, y_test = one_hot_output(y_train, y_test) # build, train and evaluate model model = build_nn(x_train.shape[1], y_train.shape[1], summary=False) train_nn(model, x_train, y_train, path=False, show=False) evaluate_nn(model, x_test, y_test) non_enhanced_pipeline(df_original) """ Explanation: The results predicted from the model confirm the impact of the sex for the survival probabilities, as well as the class for the survival of women and children. Compare with non-enhanced features End of explanation """ def remove_outliers_peline(df): df = df.copy() # transform features df, dict_categories = enhance_features(df) # select features & classify features df.drop(['PassengerId', 'Name', 'Ticket'], axis='columns', inplace=True) df = helper.classify_data(df, target, numerical=["Age", "SibSp", "Parch", "Fare"]) # remove outliers helper.remove_outliers(df, inplace=True) # remove default values above 3 times std # fill missing values (enhanced) fill_missing_values(df, inplace=True) # standardize and create dummies data, _ = helper.scale(df) data, _ = helper.replace_by_dummies(data, target) # split and one-hot output x_train, y_train, x_test, y_test = split(data, target, test_size=0.15) y_train, y_test = one_hot_output(y_train, y_test) # build, train and evaluate model model = build_nn(x_train.shape[1], y_train.shape[1], summary=False) train_nn(model, x_train, y_train, path=False, show=False) evaluate_nn(model, x_test, y_test) remove_outliers_peline(df_original) """ Explanation: Compare removing outliers End of explanation """ import warnings warnings.filterwarnings("ignore") helper.XGBClassifier( x_train, y_train[:,1], x_test, y_test[:,1], max_depth=4, n_estimators=400, learning_rate=0.1) """ Explanation: Compare with non-neural network models XGBoost End of explanation """ # enhanced features helper.ml_classification(x_train, y_train[:,1], x_test, y_test[:,1]) from sklearn.ensemble import RandomForestClassifier clf_random_forest = RandomForestClassifier(n_estimators = 30, max_depth=13, class_weight='balanced', n_jobs=-1, random_state=0).fit(x_train, np.ravel(y_train[:,1])) """ Explanation: Classical Machine Learning End of explanation """ y_pred = clf_random_forest.predict(x_test).reshape([-1, 1]) helper.binary_classification_scores( y_test[:, 1], y_pred, return_dataframe=True, index="Random Forest") """ Explanation: Best tree-based model End of explanation """ re = helper.feature_importances(model_features, clf_random_forest) """ Explanation: Feature importances End of explanation """
anhaidgroup/py_entitymatching
notebooks/guides/.ipynb_checkpoints/Down Sampling-checkpoint.ipynb
bsd-3-clause
import py_entitymatching as em """ Explanation: This IPython notebook illustrates how to down sample two large tables that are loaded in the memory End of explanation """ # Read the CSV files A = em.read_csv_metadata('./citeseer.csv',low_memory=False) # setting the parameter low_memory to False to speed up loading. B = em.read_csv_metadata('./dblp.csv', low_memory=False) len(A), len(B) A.head() B.head() # Set 'id' as the keys to the input tables em.set_key(A, 'id') em.set_key(B, 'id') # Display the keys em.get_key(A), em.get_key(B) # Downsample the datasets sample_A, sample_B = em.down_sample(A, B, size=1000, y_param=1) """ Explanation: Down sampling is typically done when the input tables are large (e.g. each containing more than 100K tuples). For the purposes of this notebook we will use two large datasets: Citeseer and DBLP. You can download Citeseer dataset from http://pages.cs.wisc.edu/~anhai/data/falcon_data/citations/citeseer.csv and DBLP dataset from http://pages.cs.wisc.edu/~anhai/data/falcon_data/citations/dblp.csv. Once downloaded, save these datasets as 'citeseer.csv' and 'dblp.csv' in the current directory. End of explanation """ # Display the lengths of sampled datasets len(sample_A), len(sample_B) """ Explanation: In the down_sample command, set the size to the number of tuples that should be sampled from B (this would be the size of sampled B table) and set the y_param to be the number of matching tuples to be picked from A. In the above, we set the number of tuples to be sampled from B to be 1000. We set the y_param to 1 meaning that for each tuple sampled from B pick one matching tuple from A. End of explanation """
chemelnucfin/tensorflow
tensorflow/contrib/autograph/examples/notebooks/dev_summit_2018_demo.ipynb
apache-2.0
# Install TensorFlow; note that Colab notebooks run remotely, on virtual # instances provided by Google. !pip install -U -q tf-nightly import os import time import tensorflow as tf from tensorflow.contrib import autograph import matplotlib.pyplot as plt import numpy as np import six from google.colab import widgets """ Explanation: Experimental: TF AutoGraph TensorFlow Dev Summit, 2018. This interactive notebook demonstrates AutoGraph, an experimental source-code transformation library to automatically convert Python, TensorFlow and NumPy code to TensorFlow graphs. Note: this is pre-alpha software! The notebook works best with Python 2, for now. Table of Contents Write Eager code that is fast and scalable. Case study: complex control flow. Case study: training MNIST with Keras. Case study: building an RNN. End of explanation """ def g(x): if x > 0: x = x * x else: x = 0 return x """ Explanation: 1. Write Eager code that is fast and scalable TF.Eager gives you more flexibility while coding, but at the cost of losing the benefits of TensorFlow graphs. For example, Eager does not currently support distributed training, exporting models, and a variety of memory and computation optimizations. AutoGraph gives you the best of both worlds: you can write your code in an Eager style, and we will automatically transform it into the equivalent TF graph code. The graph code can be executed eagerly (as a single op), included as part of a larger graph, or exported. For example, AutoGraph can convert a function like this: End of explanation """ print(autograph.to_code(g)) """ Explanation: ... into a TF graph-building function: End of explanation """ tf_g = autograph.to_graph(g) with tf.Graph().as_default(): g_ops = tf_g(tf.constant(9)) with tf.Session() as sess: tf_g_result = sess.run(g_ops) print('g(9) = %s' % g(9)) print('tf_g(9) = %s' % tf_g_result) """ Explanation: You can then use the converted function as you would any regular TF op -- you can pass Tensor arguments and it will return Tensors: End of explanation """ def sum_even(numbers): s = 0 for n in numbers: if n % 2 > 0: continue s += n return s tf_sum_even = autograph.to_graph(sum_even) with tf.Graph().as_default(): with tf.Session() as sess: result = sess.run(tf_sum_even(tf.constant([10, 12, 15, 20]))) print('Sum of even numbers: %s' % result) # Uncomment the line below to print the generated graph code # print(autograph.to_code(sum_even)) """ Explanation: 2. Case study: complex control flow Autograph can convert a large subset of the Python language into graph-equivalent code, and we're adding new supported language features all the time. In this section, we'll give you a taste of some of the functionality in AutoGraph. AutoGraph will automatically convert most Python control flow statements into their graph equivalent. We support common statements like while, for, if, break, return and more. You can even nest them as much as you like. Imagine trying to write the graph version of this code by hand: End of explanation """ def f(x): assert x != 0, 'Do not pass zero!' return x * x tf_f = autograph.to_graph(f) with tf.Graph().as_default(): with tf.Session() as sess: try: print(sess.run(tf_f(tf.constant(0)))) except tf.errors.InvalidArgumentError as e: print('Got error message: %s' % e.message) # Uncomment the line below to print the generated graph code # print(autograph.to_code(f)) """ Explanation: Try replacing the continue in the above code with break -- Autograph supports that as well! The Python code above is much more readable than the matching graph code. Autograph takes care of tediously converting every piece of Python code into the matching TensorFlow graph version for you, so that you can quickly write maintainable code, but still benefit from the optimizations and deployment benefits of graphs. Let's try some other useful Python constructs, like print and assert. We automatically convert Python assert statements into the equivalent tf.Assert code. End of explanation """ def print_sign(n): if n >= 0: print(n, 'is positive!') else: print(n, 'is negative!') return n tf_print_sign = autograph.to_graph(print_sign) with tf.Graph().as_default(): with tf.Session() as sess: sess.run(tf_print_sign(tf.constant(1))) # Uncomment the line below to print the generated graph code # print(autograph.to_code(print_sign)) """ Explanation: You can also use print functions in-graph: End of explanation """ def f(n): numbers = [] # We ask you to tell us about the element dtype. autograph.set_element_type(numbers, tf.int32) for i in range(n): numbers.append(i) return autograph.stack(numbers) # Stack the list so that it can be used as a Tensor tf_f = autograph.to_graph(f) with tf.Graph().as_default(): with tf.Session() as sess: print(sess.run(tf_f(tf.constant(5)))) # Uncomment the line below to print the generated graph code # print(autograph.to_code(f)) """ Explanation: Appending to lists also works, with a few modifications: End of explanation """ def print_primes(n): """Returns all the prime numbers less than n.""" assert n > 0 primes = [] autograph.set_element_type(primes, tf.int32) for i in range(2, n): is_prime = True for k in range(2, i): if i % k == 0: is_prime = False break if not is_prime: continue primes.append(i) all_primes = autograph.stack(primes) print('The prime numbers less than', n, 'are:') print(all_primes) return tf.no_op() tf_print_primes = autograph.to_graph(print_primes) with tf.Graph().as_default(): with tf.Session() as sess: n = tf.constant(50) sess.run(tf_print_primes(n)) # Uncomment the line below to print the generated graph code # print(autograph.to_code(print_primes)) """ Explanation: And all of these functionalities, and more, can be composed into more complicated code: End of explanation """ import gzip import shutil from six.moves import urllib def download(directory, filename): filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz' zipped_filepath = filepath + '.gz' print('Downloading %s to %s' % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, 'rb') as f_in, open(filepath, 'wb') as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath def dataset(directory, images_file, labels_file): images_file = download(directory, images_file) labels_file = download(directory, labels_file) def decode_image(image): # Normalize from [0, 255] to [0.0, 1.0] image = tf.decode_raw(image, tf.uint8) image = tf.cast(image, tf.float32) image = tf.reshape(image, [784]) return image / 255.0 def decode_label(label): label = tf.decode_raw(label, tf.uint8) label = tf.reshape(label, []) return tf.to_int32(label) images = tf.data.FixedLengthRecordDataset( images_file, 28 * 28, header_bytes=16).map(decode_image) labels = tf.data.FixedLengthRecordDataset( labels_file, 1, header_bytes=8).map(decode_label) return tf.data.Dataset.zip((images, labels)) def mnist_train(directory): return dataset(directory, 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte') def mnist_test(directory): return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte') """ Explanation: 3. Case study: training MNIST with Keras As we've seen, writing control flow in AutoGraph is easy. So running a training loop in graph should be easy as well! Here, we show an example of such a training loop for a simple Keras model that trains on MNIST. End of explanation """ def mlp_model(input_shape): model = tf.keras.Sequential(( tf.keras.layers.Dense(100, activation='relu', input_shape=input_shape), tf.keras.layers.Dense(100, activation='relu'), tf.keras.layers.Dense(10, activation='softmax'), )) model.build() return model """ Explanation: First, we'll define a small three-layer neural network using the Keras API End of explanation """ def predict(m, x, y): y_p = m(x) losses = tf.keras.losses.categorical_crossentropy(y, y_p) l = tf.reduce_mean(losses) accuracies = tf.keras.metrics.categorical_accuracy(y, y_p) accuracy = tf.reduce_mean(accuracies) return l, accuracy """ Explanation: Let's connect the model definition (here abbreviated as m) to a loss function, so that we can train our model. End of explanation """ def fit(m, x, y, opt): l, accuracy = predict(m, x, y) opt.minimize(l) return l, accuracy """ Explanation: Now the final piece of the problem specification (before loading data, and clicking everything together) is backpropagating the loss through the model, and optimizing the weights using the gradient. End of explanation """ def setup_mnist_data(is_training, hp, batch_size): if is_training: ds = mnist_train('/tmp/autograph_mnist_data') ds = ds.shuffle(batch_size * 10) else: ds = mnist_test('/tmp/autograph_mnist_data') ds = ds.repeat() ds = ds.batch(batch_size) return ds def get_next_batch(ds): itr = ds.make_one_shot_iterator() image, label = itr.get_next() x = tf.to_float(tf.reshape(image, (-1, 28 * 28))) y = tf.one_hot(tf.squeeze(label), 10) return x, y """ Explanation: These are some utility functions to download data and generate batches for training End of explanation """ def train(train_ds, test_ds, hp): m = mlp_model((28 * 28,)) opt = tf.train.MomentumOptimizer(hp.learning_rate, 0.9) train_losses = [] autograph.set_element_type(train_losses, tf.float32) test_losses = [] autograph.set_element_type(test_losses, tf.float32) train_accuracies = [] autograph.set_element_type(train_accuracies, tf.float32) test_accuracies = [] autograph.set_element_type(test_accuracies, tf.float32) i = 0 while i < hp.max_steps: train_x, train_y = get_next_batch(train_ds) test_x, test_y = get_next_batch(test_ds) step_train_loss, step_train_accuracy = fit(m, train_x, train_y, opt) step_test_loss, step_test_accuracy = predict(m, test_x, test_y) if i % (hp.max_steps // 10) == 0: print('Step', i, 'train loss:', step_train_loss, 'test loss:', step_test_loss, 'train accuracy:', step_train_accuracy, 'test accuracy:', step_test_accuracy) train_losses.append(step_train_loss) test_losses.append(step_test_loss) train_accuracies.append(step_train_accuracy) test_accuracies.append(step_test_accuracy) i += 1 return (autograph.stack(train_losses), autograph.stack(test_losses), autograph.stack(train_accuracies), autograph.stack(test_accuracies)) """ Explanation: This function specifies the main training loop. We instantiate the model (using the code above), instantiate an optimizer (here we'll use SGD with momentum, nothing too fancy), and we'll instantiate some lists to keep track of training and test loss and accuracy over time. In the loop inside this function, we'll grab a batch of data, apply an update to the weights of our model to improve its performance, and then record its current training loss and accuracy. Every so often, we'll log some information about training as well. End of explanation """ def plot(train, test, label): plt.title('MNIST model %s' % label) plt.plot(train, label='train %s' % label) plt.plot(test, label='test %s' % label) plt.legend() plt.xlabel('Training step') plt.ylabel(label.capitalize()) plt.show() with tf.Graph().as_default(): hp = tf.contrib.training.HParams( learning_rate=0.05, max_steps=tf.constant(500), ) train_ds = setup_mnist_data(True, hp, 50) test_ds = setup_mnist_data(False, hp, 1000) tf_train = autograph.to_graph(train) all_losses = tf_train(train_ds, test_ds, hp) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) (train_losses, test_losses, train_accuracies, test_accuracies) = sess.run(all_losses) plot(train_losses, test_losses, 'loss') plot(train_accuracies, test_accuracies, 'accuracy') """ Explanation: Everything is ready to go, let's train the model and plot its performance! End of explanation """ def parse(line): """Parses a line from the colors dataset. Args: line: A comma-separated string containing four items: color_name, red, green, and blue, representing the name and respectively the RGB value of the color, as an integer between 0 and 255. Returns: A tuple of three tensors (rgb, chars, length), of shapes: (batch_size, 3), (batch_size, max_sequence_length, 256) and respectively (batch_size). """ items = tf.string_split(tf.expand_dims(line, 0), ",").values rgb = tf.string_to_number(items[1:], out_type=tf.float32) / 255.0 color_name = items[0] chars = tf.one_hot(tf.decode_raw(color_name, tf.uint8), depth=256) length = tf.cast(tf.shape(chars)[0], dtype=tf.int64) return rgb, chars, length def maybe_download(filename, work_directory, source_url): """Downloads the data from source url.""" if not tf.gfile.Exists(work_directory): tf.gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename) if not tf.gfile.Exists(filepath): temp_file_name, _ = six.moves.urllib.request.urlretrieve(source_url) tf.gfile.Copy(temp_file_name, filepath) with tf.gfile.GFile(filepath) as f: size = f.size() print('Successfully downloaded', filename, size, 'bytes.') return filepath def load_dataset(data_dir, url, batch_size, training=True): """Loads the colors data at path into a tf.PaddedDataset.""" path = maybe_download(os.path.basename(url), data_dir, url) dataset = tf.data.TextLineDataset(path) dataset = dataset.skip(1) dataset = dataset.map(parse) dataset = dataset.cache() dataset = dataset.repeat() if training: dataset = dataset.shuffle(buffer_size=3000) dataset = dataset.padded_batch(batch_size, padded_shapes=((None,), (None, None), ())) return dataset train_url = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/train.csv" test_url = "https://raw.githubusercontent.com/random-forests/tensorflow-workshop/master/archive/extras/colorbot/data/test.csv" data_dir = "tmp/rnn/data" """ Explanation: 4. Case study: building an RNN In this exercise we build and train a model similar to the RNNColorbot model that was used in the main Eager notebook. The model is adapted for converting and training in graph mode. To get started, we load the colorbot dataset. The code is identical to that used in the other exercise and its details are unimportant. End of explanation """ def model_components(): lower_cell = tf.contrib.rnn.LSTMBlockCell(256) lower_cell.build(tf.TensorShape((None, 256))) upper_cell = tf.contrib.rnn.LSTMBlockCell(128) upper_cell.build(tf.TensorShape((None, 256))) relu_layer = tf.layers.Dense(3, activation=tf.nn.relu) relu_layer.build(tf.TensorShape((None, 128))) return lower_cell, upper_cell, relu_layer def rnn_layer(chars, cell, batch_size, training): """A simple RNN layer. Args: chars: A Tensor of shape (max_sequence_length, batch_size, input_size) cell: An object of type tf.contrib.rnn.LSTMBlockCell batch_size: Int, the batch size to use training: Boolean, whether the layer is used for training Returns: A Tensor of shape (max_sequence_length, batch_size, output_size). """ hidden_outputs = tf.TensorArray(tf.float32, size=0, dynamic_size=True) state, output = cell.zero_state(batch_size, tf.float32) initial_state_shape = state.shape initial_output_shape = output.shape n = tf.shape(chars)[0] i = 0 while i < n: ch = chars[i] cell_output, (state, output) = cell.call(ch, (state, output)) hidden_outputs.append(cell_output) i += 1 hidden_outputs = autograph.stack(hidden_outputs) if training: hidden_outputs = tf.nn.dropout(hidden_outputs, 0.5) return hidden_outputs def model(inputs, lower_cell, upper_cell, relu_layer, batch_size, training): """RNNColorbot model. The model consists of two RNN layers (made by lower_cell and upper_cell), followed by a fully connected layer with ReLU activation. Args: inputs: A tuple (chars, length) lower_cell: An object of type tf.contrib.rnn.LSTMBlockCell upper_cell: An object of type tf.contrib.rnn.LSTMBlockCell relu_layer: An object of type tf.layers.Dense batch_size: Int, the batch size to use training: Boolean, whether the layer is used for training Returns: A Tensor of shape (batch_size, 3) - the model predictions. """ (chars, length) = inputs chars_time_major = tf.transpose(chars, (1, 0, 2)) chars_time_major.set_shape((None, batch_size, 256)) hidden_outputs = rnn_layer(chars_time_major, lower_cell, batch_size, training) final_outputs = rnn_layer(hidden_outputs, upper_cell, batch_size, training) # Grab just the end-of-sequence from each output. indices = tf.stack((length - 1, range(batch_size)), axis=1) sequence_ends = tf.gather_nd(final_outputs, indices) sequence_ends.set_shape((batch_size, 128)) return relu_layer(sequence_ends) def loss_fn(labels, predictions): return tf.reduce_mean((predictions - labels) ** 2) """ Explanation: Next, we set up the RNNColobot model, which is very similar to the one we used in the main exercise. Autograph doesn't fully support classes yet (but it will soon!), so we'll write the model using simple functions. End of explanation """ def train(optimizer, train_data, lower_cell, upper_cell, relu_layer, batch_size, num_steps): iterator = train_data.make_one_shot_iterator() step = 0 while step < num_steps: labels, chars, sequence_length = iterator.get_next() predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=True) loss = loss_fn(labels, predictions) optimizer.minimize(loss) if step % (num_steps // 10) == 0: print('Step', step, 'train loss', loss) step += 1 return step def test(eval_data, lower_cell, upper_cell, relu_layer, batch_size, num_steps): total_loss = 0.0 iterator = eval_data.make_one_shot_iterator() step = 0 while step < num_steps: labels, chars, sequence_length = iterator.get_next() predictions = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, batch_size, training=False) total_loss += loss_fn(labels, predictions) step += 1 print('Test loss', total_loss) return total_loss def train_model(train_data, eval_data, batch_size, lower_cell, upper_cell, relu_layer, train_steps): optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train(optimizer, train_data, lower_cell, upper_cell, relu_layer, batch_size, num_steps=tf.constant(train_steps)) test(eval_data, lower_cell, upper_cell, relu_layer, 50, num_steps=tf.constant(2)) print('Colorbot is ready to generate colors!\n\n') # In graph mode, every op needs to be a dependent of another op. # Here, we create a no_op that will drive the execution of all other code in # this function. Autograph will add the necessary control dependencies. return tf.no_op() """ Explanation: The train and test functions are also similar to the ones used in the Eager notebook. Since the network requires a fixed batch size, we'll train in a single shot, rather than by epoch. End of explanation """ @autograph.do_not_convert(run_as=autograph.RunMode.PY_FUNC) def draw_prediction(color_name, pred): pred = pred * 255 pred = pred.astype(np.uint8) plt.axis('off') plt.imshow(pred) plt.title(color_name) plt.show() def inference(color_name, lower_cell, upper_cell, relu_layer): _, chars, sequence_length = parse(color_name) chars = tf.expand_dims(chars, 0) sequence_length = tf.expand_dims(sequence_length, 0) pred = model((chars, sequence_length), lower_cell, upper_cell, relu_layer, 1, training=False) pred = tf.minimum(pred, 1.0) pred = tf.expand_dims(pred, 0) draw_prediction(color_name, pred) # Create an op that will drive the entire function. return tf.no_op() """ Explanation: Finally, we add code to run inference on a single input, which we'll read from the input. Note the do_not_convert annotation that lets us disable conversion for certain functions and run them as a py_func instead, so you can still call them from compiled code. End of explanation """ def run_input_loop(sess, inference_ops, color_name_placeholder): """Helper function that reads from input and calls the inference ops in a loop.""" tb = widgets.TabBar(["RNN Colorbot"]) while True: with tb.output_to(0): try: color_name = six.moves.input("Give me a color name (or press 'enter' to exit): ") except (EOFError, KeyboardInterrupt): break if not color_name: break with tb.output_to(0): tb.clear_tab() sess.run(inference_ops, {color_name_placeholder: color_name}) plt.show() with tf.Graph().as_default(): # Read the data. batch_size = 64 train_data = load_dataset(data_dir, train_url, batch_size) eval_data = load_dataset(data_dir, test_url, 50, training=False) # Create the model components. lower_cell, upper_cell, relu_layer = model_components() # Create the helper placeholder for inference. color_name_placeholder = tf.placeholder(tf.string, shape=()) # Compile the train / test code. tf_train_model = autograph.to_graph(train_model) train_model_ops = tf_train_model( train_data, eval_data, batch_size, lower_cell, upper_cell, relu_layer, train_steps=100) # Compile the inference code. tf_inference = autograph.to_graph(inference) inference_ops = tf_inference(color_name_placeholder, lower_cell, upper_cell, relu_layer) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # Run training and testing. sess.run(train_model_ops) # Run the inference loop. run_input_loop(sess, inference_ops, color_name_placeholder) """ Explanation: Finally, we put everything together. Note that the entire training and testing code is all compiled into a single op (tf_train_model) that you only execute once! We also still use a sess.run loop for the inference part, because that requires keyboard input. End of explanation """
kazzz24/deep-learning
tv-script-generation/dlnd_tv_script_generation.ipynb
mit
""" DON'T MODIFY ANYTHING IN THIS CELL """ import helper data_dir = './data/simpsons/moes_tavern_lines.txt' text = helper.load_data(data_dir) # Ignore notice, since we don't use it for analysing the data text = text[81:] """ Explanation: TV Script Generation In this project, you'll generate your own Simpsons TV scripts using RNNs. You'll be using part of the Simpsons dataset of scripts from 27 seasons. The Neural Network you'll build will generate a new TV script for a scene at Moe's Tavern. Get the Data The data is already provided for you. You'll be using a subset of the original dataset. It consists of only the scenes in Moe's Tavern. This doesn't include other versions of the tavern, like "Moe's Cavern", "Flaming Moe's", "Uncle Moe's Family Feed-Bag", etc.. End of explanation """ view_sentence_range = (0, 10) """ DON'T MODIFY ANYTHING IN THIS CELL """ import numpy as np print('Dataset Stats') print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()}))) scenes = text.split('\n\n') print('Number of scenes: {}'.format(len(scenes))) sentence_count_scene = [scene.count('\n') for scene in scenes] print('Average number of sentences in each scene: {}'.format(np.average(sentence_count_scene))) sentences = [sentence for scene in scenes for sentence in scene.split('\n')] print('Number of lines: {}'.format(len(sentences))) word_count_sentence = [len(sentence.split()) for sentence in sentences] print('Average number of words in each line: {}'.format(np.average(word_count_sentence))) print() print('The sentences {} to {}:'.format(*view_sentence_range)) print('\n'.join(text.split('\n')[view_sentence_range[0]:view_sentence_range[1]])) view_sentence_range[1] """ Explanation: Explore the Data Play around with view_sentence_range to view different parts of the data. End of explanation """ import numpy as np import problem_unittests as tests from collections import Counter def create_lookup_tables(text): """ Create lookup tables for vocabulary :param text: The text of tv scripts split into words :return: A tuple of dicts (vocab_to_int, int_to_vocab) """ # TODO: Implement Function #print(text) counts = Counter(text) vocab = sorted(counts, key=counts.get, reverse=True) vocab_to_int = {word: ii for ii, word in enumerate(vocab, 0)} int_to_vocab = {ii: word for ii, word in enumerate(vocab, 0)} print('int_to_vocab size:', len(int_to_vocab)) return vocab_to_int, int_to_vocab """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_create_lookup_tables(create_lookup_tables) """ Explanation: Implement Preprocessing Functions The first thing to do to any dataset is preprocessing. Implement the following preprocessing functions below: - Lookup Table - Tokenize Punctuation Lookup Table To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries: - Dictionary to go from the words to an id, we'll call vocab_to_int - Dictionary to go from the id to word, we'll call int_to_vocab Return these dictionaries in the following tuple (vocab_to_int, int_to_vocab) End of explanation """ def token_lookup(): """ Generate a dict to turn punctuation into a token. :return: Tokenize dictionary where the key is the punctuation and the value is the token """ # TODO: Implement Function punctuation_to_token = {} punctuation_to_token['.'] = '||period||' punctuation_to_token[','] = '||comma||' punctuation_to_token['"'] = '||quotation||' punctuation_to_token[';'] = '||semicolon||' punctuation_to_token['!'] = '||exclamation||' punctuation_to_token['?'] = '||question||' punctuation_to_token['('] = '||l-parentheses||' punctuation_to_token[')'] = '||r-parentheses||' punctuation_to_token['--'] = '||dash||' punctuation_to_token['\n'] = '||return||' return punctuation_to_token """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_tokenize(token_lookup) """ Explanation: Tokenize Punctuation We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks make it hard for the neural network to distinguish between the word "bye" and "bye!". Implement the function token_lookup to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token: - Period ( . ) - Comma ( , ) - Quotation Mark ( " ) - Semicolon ( ; ) - Exclamation mark ( ! ) - Question mark ( ? ) - Left Parentheses ( ( ) - Right Parentheses ( ) ) - Dash ( -- ) - Return ( \n ) This dictionary will be used to token the symbols and add the delimiter (space) around it. This separates the symbols as it's own word, making it easier for the neural network to predict on the next word. Make sure you don't use a token that could be confused as a word. Instead of using the token "dash", try using something like "||dash||". End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Preprocess Training, Validation, and Testing Data helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables) """ Explanation: Preprocess all the data and save it Running the code cell below will preprocess all the data and save it to file. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import helper import numpy as np import problem_unittests as tests int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() """ Explanation: Check Point This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk. End of explanation """ print(len(int_to_vocab)) print(int_to_vocab[6778]) """ DON'T MODIFY ANYTHING IN THIS CELL """ from distutils.version import LooseVersion import warnings import tensorflow as tf # Check TensorFlow Version assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer' print('TensorFlow Version: {}'.format(tf.__version__)) # Check for a GPU if not tf.test.gpu_device_name(): warnings.warn('No GPU found. Please use a GPU to train your neural network.') else: print('Default GPU Device: {}'.format(tf.test.gpu_device_name())) """ Explanation: Build the Neural Network You'll build the components necessary to build a RNN by implementing the following functions below: - get_inputs - get_init_cell - get_embed - build_rnn - build_nn - get_batches Check the Version of TensorFlow and Access to GPU End of explanation """ def get_inputs(): """ Create TF Placeholders for input, targets, and learning rate. :return: Tuple (input, targets, learning rate) """ # TODO: Implement Function input = tf.placeholder(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.int32, [None, None], name='targets') learning_rate = tf.placeholder(tf.float32, name='learning_rate') return input, targets, learning_rate """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_inputs(get_inputs) """ Explanation: Input Implement the get_inputs() function to create TF Placeholders for the Neural Network. It should create the following placeholders: - Input text placeholder named "input" using the TF Placeholder name parameter. - Targets placeholder - Learning Rate placeholder Return the placeholders in the following the tuple (Input, Targets, LearingRate) End of explanation """ def get_init_cell(batch_size, rnn_size): """ Create an RNN Cell and initialize it. :param batch_size: Size of batches :param rnn_size: Size of RNNs :return: Tuple (cell, initialize state) """ # TODO: Implement Function # Your basic LSTM cell lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) cell = tf.contrib.rnn.MultiRNNCell([lstm] * 2) #drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=0.5) #lstm_layers = 1 #cell = tf.contrib.rnn.MultiRNNCell([drop] * lstm_layers) # Getting an initial state of all zeros initial_state = cell.zero_state(batch_size, tf.int32) initial_state = tf.identity(initial_state, name="initial_state") return cell, initial_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_init_cell(get_init_cell) """ Explanation: Build RNN Cell and Initialize Stack one or more BasicLSTMCells in a MultiRNNCell. - The Rnn size should be set using rnn_size - Initalize Cell State using the MultiRNNCell's zero_state() function - Apply the name "initial_state" to the initial state using tf.identity() Return the cell and initial state in the following tuple (Cell, InitialState) End of explanation """ def get_embed(input_data, vocab_size, embed_dim): """ Create embedding for <input_data>. :param input_data: TF placeholder for text input. :param vocab_size: Number of words in vocabulary. :param embed_dim: Number of embedding dimensions :return: Embedded input. """ # TODO: Implement Function #embedding = tf.Variable(tf.random_uniform((vocab_size+1, embed_dim), -1, 1)) embedding = tf.Variable(tf.truncated_normal((vocab_size+1, embed_dim), -1, 1)) embed = tf.nn.embedding_lookup(embedding, input_data) print("vocab_size:", vocab_size) print("embed.shape:", embed.shape) return embed """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_embed(get_embed) """ Explanation: Word Embedding Apply embedding to input_data using TensorFlow. Return the embedded sequence. End of explanation """ def build_rnn(cell, inputs): """ Create a RNN using a RNN Cell :param cell: RNN Cell :param inputs: Input text data :return: Tuple (Outputs, Final State) """ # TODO: Implement Function print("inputs.shape:", inputs.shape) outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) #need to specify dtype instead of initial_state final_state = tf.identity(final_state, name="final_state") return outputs, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_rnn(build_rnn) """ Explanation: Build RNN You created a RNN Cell in the get_init_cell() function. Time to use the cell to create a RNN. - Build the RNN using the tf.nn.dynamic_rnn() - Apply the name "final_state" to the final state using tf.identity() Return the outputs and final_state state in the following tuple (Outputs, FinalState) End of explanation """ def build_nn(cell, rnn_size, input_data, vocab_size): """ Build part of the neural network :param cell: RNN cell :param rnn_size: Size of rnns :param input_data: Input data :param vocab_size: Vocabulary size :return: Tuple (Logits, FinalState) """ # TODO: Implement Function #embed_dim = 300 #embed = get_embed(input_data, vocab_size, embed_dim) embed = get_embed(input_data, vocab_size, rnn_size) outputs, final_state = build_rnn(cell, embed) #logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=tf.nn.relu) logits = tf.contrib.layers.fully_connected(outputs, vocab_size, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(stddev=0.01), biases_initializer=tf.zeros_initializer()) return logits, final_state """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_build_nn(build_nn) """ Explanation: Build the Neural Network Apply the functions you implemented above to: - Apply embedding to input_data using your get_embed(input_data, vocab_size, embed_dim) function. - Build RNN using cell and your build_rnn(cell, inputs) function. - Apply a fully connected layer with a linear activation and vocab_size as the number of outputs. Return the logits and final state in the following tuple (Logits, FinalState) End of explanation """ data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] tmp = [] tmp = [[data[0:2]], data[2:4]] print(tmp) def get_batches(int_text, batch_size, seq_length): """ Return batches of input and target :param int_text: Text with the words replaced by their ids :param batch_size: The size of batch :param seq_length: The length of sequence :return: Batches as a Numpy array """ # TODO: Implement Function #print(int_text) #print(batch_size, seq_length) batches = [] num_of_batches = len(int_text) // (batch_size*seq_length) print("num_of_batches:", num_of_batches) for i in range(0, num_of_batches): batch_of_input = [] batch_of_output = [] for j in range(0, batch_size): top = i*seq_length + j*seq_length*num_of_batches batch_of_input.append(int_text[top : top+seq_length]) batch_of_output.append(int_text[top+1 :top+1+seq_length]) batch = [batch_of_input, batch_of_output] #print('batch', i, 'input:') #print(batch_of_input) #print('batch', i, 'output:') #print(batch_of_output) batches.append(batch) return np.array(batches) """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_batches(get_batches) #get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], 2, 3) """ Explanation: Batches Implement get_batches to create batches of input and targets using int_text. The batches should be a Numpy array with the shape (number of batches, 2, batch size, sequence length). Each batch contains two elements: - The first element is a single batch of input with the shape [batch size, sequence length] - The second element is a single batch of targets with the shape [batch size, sequence length] If you can't fill the last batch with enough data, drop the last batch. For exmple, get_batches([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], 3, 2) would return a Numpy array of the following: ``` [ # First Batch [ # Batch of Input [[ 1 2], [ 7 8], [13 14]] # Batch of targets [[ 2 3], [ 8 9], [14 15]] ] # Second Batch [ # Batch of Input [[ 3 4], [ 9 10], [15 16]] # Batch of targets [[ 4 5], [10 11], [16 17]] ] # Third Batch [ # Batch of Input [[ 5 6], [11 12], [17 18]] # Batch of targets [[ 6 7], [12 13], [18 1]] ] ] ``` Notice that the last target value in the last batch is the first input value of the first batch. In this case, 1. This is a common technique used when creating sequence batches, although it is rather unintuitive. End of explanation """ # Number of Epochs num_epochs = 200 # Batch Size batch_size = 128 # RNN Size rnn_size = 256 # Sequence Length seq_length = 10 # Learning Rate learning_rate = 0.002 # Show stats for every n number of batches show_every_n_batches = 53 """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ save_dir = './save' """ Explanation: Neural Network Training Hyperparameters Tune the following parameters: Set num_epochs to the number of epochs. Set batch_size to the batch size. Set rnn_size to the size of the RNNs. Set seq_length to the length of sequence. Set learning_rate to the learning rate. Set show_every_n_batches to the number of batches the neural network should print progress. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ from tensorflow.contrib import seq2seq train_graph = tf.Graph() with train_graph.as_default(): vocab_size = len(int_to_vocab) input_text, targets, lr = get_inputs() input_data_shape = tf.shape(input_text) cell, initial_state = get_init_cell(input_data_shape[0], rnn_size) logits, final_state = build_nn(cell, rnn_size, input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function cost = seq2seq.sequence_loss( logits, targets, tf.ones([input_data_shape[0], input_data_shape[1]])) # Optimizer optimizer = tf.train.AdamOptimizer(lr) # Gradient Clipping gradients = optimizer.compute_gradients(cost) capped_gradients = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in gradients] train_op = optimizer.apply_gradients(capped_gradients) """ Explanation: Build the Graph Build the graph using the neural network you implemented. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ batches = get_batches(int_text, batch_size, seq_length) with tf.Session(graph=train_graph) as sess: sess.run(tf.global_variables_initializer()) for epoch_i in range(num_epochs): state = sess.run(initial_state, {input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { input_text: x, targets: y, initial_state: state, lr: learning_rate} train_loss, state, _ = sess.run([cost, final_state, train_op], feed) # Show every <show_every_n_batches> batches if (epoch_i * len(batches) + batch_i) % show_every_n_batches == 0: print('Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format( epoch_i, batch_i, len(batches), train_loss)) # Save Model saver = tf.train.Saver() saver.save(sess, save_dir) print('Model Trained and Saved') """ Explanation: Train Train the neural network on the preprocessed data. If you have a hard time getting a good loss, check the forms to see if anyone is having the same problem. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ # Save parameters for checkpoint helper.save_params((seq_length, save_dir)) """ Explanation: Save Parameters Save seq_length and save_dir for generating a new TV script. End of explanation """ """ DON'T MODIFY ANYTHING IN THIS CELL """ import tensorflow as tf import numpy as np import helper import problem_unittests as tests _, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess() seq_length, load_dir = helper.load_params() """ Explanation: Checkpoint End of explanation """ def get_tensors(loaded_graph): """ Get input, initial state, final state, and probabilities tensor from <loaded_graph> :param loaded_graph: TensorFlow graph loaded from file :return: Tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) """ # TODO: Implement Function input_tensor = loaded_graph.get_tensor_by_name('input:0') Initial_state_tensor = loaded_graph.get_tensor_by_name('initial_state:0') final_state_tensor = loaded_graph.get_tensor_by_name('final_state:0') probs_tensor = loaded_graph.get_tensor_by_name('probs:0') return input_tensor, Initial_state_tensor, final_state_tensor, probs_tensor """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_get_tensors(get_tensors) """ Explanation: Implement Generate Functions Get Tensors Get tensors from loaded_graph using the function get_tensor_by_name(). Get the tensors using the following names: - "input:0" - "initial_state:0" - "final_state:0" - "probs:0" Return the tensors in the following tuple (InputTensor, InitialStateTensor, FinalStateTensor, ProbsTensor) End of explanation """ def pick_word(probabilities, int_to_vocab): """ Pick the next word in the generated text :param probabilities: Probabilites of the next word :param int_to_vocab: Dictionary of word ids as the keys and words as the values :return: String of the predicted word """ # TODO: Implement Function #print(probabilities) #print(int_to_vocab) index = np.argmax(probabilities) word = int_to_vocab[index] #word = int_to_vocab.get(probabilities.argmax(axis=0)) return word """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ tests.test_pick_word(pick_word) """ Explanation: Choose Word Implement the pick_word() function to select the next word using probabilities. End of explanation """ gen_length = 200 # homer_simpson, moe_szyslak, or Barney_Gumble prime_word = 'moe_szyslak' """ DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE """ loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: # Load saved model loader = tf.train.import_meta_graph(load_dir + '.meta') loader.restore(sess, load_dir) # Get Tensors from loaded model input_text, initial_state, final_state, probs = get_tensors(loaded_graph) # Sentences generation setup gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {input_text: np.array([[1]])}) # Generate sentences for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [probs, final_state], {input_text: dyn_input, initial_state: prev_state}) pred_word = pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key in ['\n', '(', '"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\n ', '\n') tv_script = tv_script.replace('( ', '(') print(tv_script) """ Explanation: Generate TV Script This will generate the TV script for you. Set gen_length to the length of TV script you want to generate. End of explanation """
molgor/spystats
notebooks/Sandboxes/GMRF/.ipynb_checkpoints/Stationary_with_fft-checkpoint.ipynb
bsd-2-clause
# Load Biospytial modules and etc. %matplotlib inline import sys sys.path.append('/apps') sys.path.append('..') #sys.path.append('../../spystats') import django django.setup() import pandas as pd import matplotlib.pyplot as plt import numpy as np ## Use the ggplot style plt.style.use('ggplot') from external_plugins.spystats.spystats import tools as sptools import scipy """ Explanation: Stationary GMRF simulation with Discrete Fourier Transformation End of explanation """ #c_delta = lambda d : np.hstack(((4 + d),-1,np.zeros(128 - 3),-1)) #c_delta = lambda d : np.hstack(((0),-1,np.zeros(128 - 3),-1)) #C = scipy.linalg.circulant(c_delta(0.1)) def createToroidalCircularBase(d=0.1,N=128): """ Creates a circular base similar to the one described in GMRF Rue and Held, 2005. """ c00 = np.hstack(((4 + d),-1,np.zeros(N - 3),-1)) c01 = np.hstack((-1,np.zeros(N - 1))) c0 = np.zeros((N - 2 ,N)) c1 = np.vstack((c00,c01)) c = np.vstack((c1,c0)) c[N -1, 0] = -1 return c %%time ## Create circular base d = 0.00001 N = 100 c = createToroidalCircularBase(d=d,N=N) ## Simulate random noise (Normal distributed) from scipy.fftpack import ifft2, fft2 zr = scipy.stats.norm.rvs(size=(c.size,2),loc=0,scale=1,random_state=1234) zr.dtype=np.complex_ #plt.hist(zr.real) #Lm = scipy.sqrt(C.shape[0]*C.shape[0]) * fft2(C) Lm = fft2(c) v = 1.0/ len(c) * fft2((Lm ** -0.5)* zr.reshape(Lm.shape)) x = v.real plt.imshow(x,interpolation='None') ## Calculate inverse of c C_inv = ifft2 ((fft2(c) ** -1)) plt.plot(C_inv[:,0]) """ Explanation: Algorithm to simulate GMRF with block-circulant Matrix. Taken from: Rue, H., & Held, L. (2005). Gaussian Markov random fields: theory and applications. CRC press. Algorithm 2.10 Now let's build the circulant matrix for the tourus Oke, for the moment I´ll follow the example in GMRF book. i.e. a Torus (stationary of 128x128) End of explanation """ %%time vm = sptools.ExponentialVariogram(sill=0.3,range_a=0.4) xx,yy,z = sptools.simulatedGaussianFieldAsPcolorMesh(vm,grid_sizex=100,grid_sizey=100,random_seed=1234) plt.imshow(z) """ Explanation: For benchmarking we will perfom a GF simulation. Based on non-conditional simulation. End of explanation """ 346 / 0.151 plt.figure(figsize=(10, 5)) plt.subplot(1,2,1) plt.imshow(z) plt.subplot(1,2,2) plt.imshow(x,interpolation='None') """ Explanation: comparison | Size | Method | Seconds | |----------|---------------|-----------| | 100x100 | Full Gaussian | 346 | | 100x100 | Markov FFT | 0.151 | The stationary circulant markov FFT method is 2291x faster End of explanation """
Elucidation/tensorflow_chessbot
tensorflow_learn.ipynb
mit
# Init and helper functions import tensorflow as tf import numpy as np import PIL import urllib, cStringIO import glob from IPython.core.display import Markdown from IPython.display import Image, display import helper_functions as hf import tensorflow_chessbot np.set_printoptions(precision=2, suppress=True) """ Explanation: Tensorflow Chessbot - Predicting chess pieces from images by training a single-layer classifier Link to Github source code Other IPython Notebooks for Tensorflow Chessbot: Computer Vision to turn a Chessboard image into chess tiles - Blog post #1 Programmatically generating training datasets Predicting chess pieces from images by training a single-layer classifier (This notebook) - Blog post #2 Chessboard Convolutional Neural Network classifier In this notebook we'll train a tensorflow neural network to tell what piece is on a chess square. In the previous notebook we wrote scripts that parsed input images which contained a chessboard into 32x32 grayscale chess squares. End of explanation """ # All tiles with pieces in random organizations all_paths = np.array(glob.glob("tiles/train_tiles_C/*/*.png")) # TODO : (set labels correctly) # Shuffle order of paths so when we split the train/test sets the order of files doesn't affect it np.random.shuffle(all_paths) ratio = 0.9 # training / testing ratio divider = int(len(all_paths) * ratio) train_paths = all_paths[:divider] test_paths = all_paths[divider:] # Training dataset # Generated by programmatic screenshots of lichess.org/editor/<FEN-string> print "Loading %d Training tiles" % train_paths.size train_images, train_labels = hf.loadFENtiles(train_paths) # Load from generated set # Test dataset, taken from screenshots of the starting position print "Loading %d Training tiles" % test_paths.size test_images, test_labels = hf.loadFENtiles(test_paths) # Load from generated set train_dataset = hf.DataSet(train_images, train_labels, dtype=tf.float32) test_dataset = hf.DataSet(test_images, test_labels, dtype=tf.float32) """ Explanation: Let's load the tiles in for the training and test dataset, and then split them in a 90/10 ratio End of explanation """ # Visualize a couple tiles for i in np.random.choice(train_dataset.num_examples, 5, replace=False): #for i in range(train_dataset.num_examples): #if hf.label2Name(train_dataset.labels[i]) == 'P': #print "%d: Piece(%s) : Label vector: %s" % (i, hf.label2Name(train_dataset.labels[i]), train_dataset.labels[i]) print "%d: Piece(%s)" % (i, hf.label2Name(train_dataset.labels[i])) hf.display_array(np.reshape(train_dataset.images[i,:],[32,32])) """ Explanation: Cool, lets look at a few images in the training set End of explanation """ x = tf.placeholder(tf.float32, [None, 32*32]) W = tf.Variable(tf.zeros([32*32, 13])) b = tf.Variable(tf.zeros([13])) y = tf.nn.softmax(tf.matmul(x, W) + b) y_ = tf.placeholder(tf.float32, [None, 13]) cross_entropy = -tf.reduce_sum(y_*tf.log(y)) train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy) # train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) N = 6000 print "Training for %d steps..." % N for i in range(N): batch_xs, batch_ys = train_dataset.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) if ((i+1) % 500) == 0: print "\t%d/%d" % (i+1, N) print "Finished training." correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float")) print "Accuracy: %g\n" % sess.run(accuracy, feed_dict={x: test_dataset.images, y_: test_dataset.labels}) """ Explanation: Looks good. Now that we've loaded the data, let's build up a simple softmax regression classifier based off of this beginner tutorial on tensorflow. End of explanation """ print "Visualization of Weights as negative(Red) to positive(Blue)" for i in range(13): print "Piece: %s" % hf.labelIndex2Name(i) piece_weight = np.reshape(sess.run(W)[:,i], [32,32]) hf.display_weight(piece_weight,rng=[-0.2,0.2]) """ Explanation: Looks like it memorized everything from the datasets we collected, let's look at the weights to get an idea of what it sees for each piece. Weights End of explanation """ mistakes = tf.where(~correct_prediction) mistake_indices = sess.run(mistakes, feed_dict={x: test_dataset.images, y_: test_dataset.labels}).flatten() guess_prob, guessed = sess.run([y, tf.argmax(y,1)], feed_dict={x: test_dataset.images}) print "%d mistakes:" % mistake_indices.size for idx in np.random.choice(mistake_indices, 5, replace=False): a,b = test_dataset.labels[idx], guessed[idx] print "---" print "\t#%d | Actual: '%s', Guessed: '%s'" % (idx, hf.label2Name(a),hf.labelIndex2Name(b)) print "Actual:",a print " Guess:",guess_prob[idx,:] hf.display_array(np.reshape(test_dataset.images[idx,:],[32,32])) """ Explanation: Cool, you can see the shapes show up within the weights. Let's have a look at the failure cases to get a sense of what went wrong. End of explanation """ for idx in np.random.choice(test_dataset.num_examples,5,replace=False): a,b = test_dataset.labels[idx], guessed[idx] print "#%d | Actual: '%s', Guessed: '%s'" % (idx, hf.label2Name(a),hf.labelIndex2Name(b)) hf.display_array(np.reshape(test_dataset.images[idx,:],[32,32])) """ Explanation: It looks like it's been learning that pieces have black borders, and since this pieceSet didn't, and it was a small part of the training set, it just fails and thinks we're looking at blank squares, more training data! From the label probabilities, it did a reasonable job of thinking the pieces were white, and their second best guesses tended to be close to the right answer, the blank spaces just won out. Also, lets look at several random selections, including successes. End of explanation """ validate_img_path = 'chessboards/reddit/aL64q8w.png' img_arr = tensorflow_chessbot.loadImage(validate_img_path) tiles = tensorflow_chessbot.getTiles(img_arr) # See the screenshot display(Image(validate_img_path)) # see one of the tiles print "Let's see the 5th tile, corresponding to F1" hf.display_array(tiles[:,:,5]) validation_set = np.swapaxes(np.reshape(tiles, [32*32, 64]),0,1) guess_prob, guessed = sess.run([y, tf.argmax(y,1)], feed_dict={x: validation_set}) print "First 5 tiles" for idx in range(5): guess = guessed[idx] print "#%d | Actual: '?', Guessed: '%s'" % (idx, hf.labelIndex2Name(guess)) hf.display_array(np.reshape(validation_set[idx,:],[32,32])) """ Explanation: Manual validation via screenshots on reddit We'll eventually build a training/test/validation dataset of different proportions in one go, but for now, lets build a wrapper that given an image, returns a predicted FEN End of explanation """ # guessed is tiles A1-H8 rank-order, so to make a FEN we just need to flip the files from 1-8 to 8-1 pieceNames = map(lambda k: '1' if k == 0 else hf.labelIndex2Name(k), guessed) # exchange ' ' for '1' for FEN fen = '/'.join([''.join(pieceNames[i*8:(i+1)*8]) for i in reversed(range(8))]) print "FEN:",fen # See our prediction as a chessboard display(Markdown("Prediction: [Lichess analysis](https://lichess.org/analysis/%s)" % fen)) display(Image(url='http://www.fen-to-image.com/image/%s' % fen)) # See the original screenshot we took from reddit print "Actual" display(Image(validate_img_path)) """ Explanation: Oh my, that looks correct, let's generate a FEN string from the guessed results, and view that side by side with the screenshot! End of explanation """ def getPrediction(img): """Run trained neural network on tiles generated from image""" # Convert to grayscale numpy array img_arr = np.asarray(img.convert("L"), dtype=np.float32) # Use computer vision to get the tiles tiles = tensorflow_chessbot.getTiles(img_arr) if tiles is []: print "Couldn't parse chessboard" return "" # Reshape into Nx1024 rows of input data, format used by neural network validation_set = np.swapaxes(np.reshape(tiles, [32*32, 64]),0,1) # Run neural network on data guess_prob, guessed = sess.run([y, tf.argmax(y,1)], feed_dict={x: validation_set}) # Convert guess into FEN string # guessed is tiles A1-H8 rank-order, so to make a FEN we just need to flip the files from 1-8 to 8-1 pieceNames = map(lambda k: '1' if k == 0 else hf.labelIndex2Name(k), guessed) # exchange ' ' for '1' for FEN fen = '/'.join([''.join(pieceNames[i*8:(i+1)*8]) for i in reversed(range(8))]) return fen def makePrediction(image_url): """Given image url to a chessboard image, return a visualization of FEN and link to a lichess analysis""" # Load image from url and display img = PIL.Image.open(cStringIO.StringIO(urllib.urlopen(image_url).read())) print "Image on which to make prediction: %s" % image_url hf.display_image(img.resize([200,200], PIL.Image.ADAPTIVE)) # Make prediction fen = getPrediction(img) display(Markdown("Prediction: [Lichess analysis](https://lichess.org/analysis/%s)" % fen)) display(Image(url='http://www.fen-to-image.com/image/%s' % fen)) print "FEN: %s" % fen """ Explanation: A perfect match! Awesome, at this point even though we have enough to make predictions from several lichess boards (not all of them yet) and return a result. We can build our reddit chatbot now. Predict from image url Let's wrap up predictions into a single function call from a URL, and test it on a few reddit posts. End of explanation """ makePrediction('http://i.imgur.com/x6lLQQK.png') """ Explanation: Make Predictions All the boilerplate is done, the model is trained, it's time. I chose the first post I saw on reddit.com/chess with a chessboard (something our CV algorithm can do also): https://www.reddit.com/r/chess/comments/45inab/moderate_black_to_play_and_win/ with an image url of http://i.imgur.com/x6lLQQK.png And awaayyy we gooo... End of explanation """ makePrediction('http://i.imgur.com/r2r43xA.png') """ Explanation: Fantastic, a perfect match! It was able to handle the highlighting on the pawn movement from G2 to F3 also. Now just for fun, let's try an image that is from a chessboard we've never seen before! Here's another on reddit: https://www.reddit.com/r/chess/comments/45c8ty/is_this_position_starting_move_36_a_win_for_white/ End of explanation """ makePrediction('http://i.imgur.com/gSFbM1d.png') """ Explanation: Hah, it thought the black pawns (on A3, B2, C4, and F2) were black bishops. Same for the white pawns. This would be a pretty bad situation for white. But amazingly it predicted all the other pieces and empty squares correctly! This is pretty great, let's look at a few more screenshots taken lichess. Here's https://www.reddit.com/r/chess/comments/44q2n6/tactic_from_a_game_i_just_played_white_to_move/ End of explanation """ makePrediction('http://imgur.com/oXpMSQI.png') """ Explanation: Perfect match, as expected, when the validation images are based off of what the model trains, it'll do great, but if we use images from chess boards we haven't trained on, we'll see lots of mistakes. Mistakes are fun, lets see some. Trying with non-lichess images End of explanation """ makePrediction('http://imgur.com/qk5xa6q.png') makePrediction('http://imgur.com/u4zF5Hj.png') makePrediction('http://imgur.com/CW675pw.png') makePrediction('https://i.ytimg.com/vi/pG1Uhw3pO8o/hqdefault.jpg') makePrediction('http://www.caissa.com/chess-openings/img/siciliandefense1.gif') makePrediction('http://www.jinchess.com/chessboard/?p=rnbqkbnrpPpppppp----------P----------------R----PP-PPPPPRNBQKBNR') """ Explanation: Ouch, it missed most of them there, the training data didn't contain images from this site, which looks somewhat like chess.com, need more DATA! End of explanation """
undercertainty/ou_nlp
semeval_experiments/fnn_beetles.ipynb
apache-2.0
# To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs def reset_graph(seed=42): tf.reset_default_graph() tf.set_random_seed(seed) np.random.seed(seed) # To plot pretty figures %matplotlib inline import matplotlib import matplotlib.pyplot as plt plt.rcParams['axes.labelsize'] = 14 plt.rcParams['xtick.labelsize'] = 12 plt.rcParams['ytick.labelsize'] = 12 # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "ann" def save_fig(fig_id, tight_layout=True): path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png") print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format='png', dpi=300) """ Explanation: Chapter 10 – Introduction to Artificial Neural Networks This notebook contains all the sample code in chapter 10. Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: End of explanation """ import pandas as pd from sklearn.model_selection import train_test_split beetles_full = pd.read_csv('beetleTrainingData.csv') beetles_feature = beetles_full[beetles_full.columns[~beetles_full.columns.str.contains('_RA')]].drop(['accuracy_num', 'accuracy_txt'], axis=1).as_matrix() beetles_label = beetles_full.accuracy_txt.apply(lambda a: int(a == 'correct')).as_matrix() beetles_train_feature, beetles_test_feature, beetles_train_label, beetles_test_label = train_test_split( beetles_feature, beetles_label, test_size=0.2, random_state=42) beetles_train_feature.shape beetles_train_label.shape beetles_test_feature.shape beetles_test_label.shape """ Explanation: Loading the Data From the original data, we extract the student response tf-idf vector as the input feature, and the binary correctness value as the output label. In this way it becomes a binary classification task, where we aim to predict if the student's answer is correct or not based on the tf-idf vector of the answer. We randomly select 80% samples for training, and the remaining 20% as test set. End of explanation """ import numpy as np from sklearn.linear_model import Perceptron per_clf = Perceptron(random_state=42) # The random state is used to shuffle the data per_clf.fit(beetles_train_feature, beetles_train_label) training_accuracy = per_clf.score(beetles_train_feature, beetles_train_label) training_accuracy test_accuracy = per_clf.score(beetles_test_feature, beetles_test_label) test_accuracy per_clf.coef_[0].shape # the shape of w per_clf.intercept_ # the value of b """ Explanation: Perceptron A perceptron is the simplest type of neuron, where the input and output satisfy the relationship shown in the image below: <img src="perceptron.png"> End of explanation """ def logit(z): # logistic return 1 / (1 + np.exp(-z)) def relu(z): # rectified linear unit return np.maximum(0, z) def derivative(f, z, eps=0.000001): return (f(z + eps) - f(z - eps))/(2 * eps) z = np.linspace(-5, 5, 200) plt.figure(figsize=(11,4)) plt.subplot(121) plt.plot(z, np.sign(z), "r-", linewidth=2, label="Step") plt.plot(z, logit(z), "g--", linewidth=2, label="Logit") plt.plot(z, np.tanh(z), "b-", linewidth=2, label="Tanh") plt.plot(z, relu(z), "m-.", linewidth=2, label="ReLU") plt.grid(True) plt.legend(loc="center right", fontsize=14) plt.title("Activation functions", fontsize=14) plt.axis([-5, 5, -1.2, 1.2]) plt.subplot(122) plt.plot(z, derivative(np.sign, z), "r-", linewidth=2, label="Step") plt.plot(0, 0, "ro", markersize=5) plt.plot(0, 0, "rx", markersize=10) plt.plot(z, derivative(logit, z), "g--", linewidth=2, label="Logit") plt.plot(z, derivative(np.tanh, z), "b-", linewidth=2, label="Tanh") plt.plot(z, derivative(relu, z), "m-.", linewidth=2, label="ReLU") plt.grid(True) #plt.legend(loc="center right", fontsize=14) plt.title("Derivatives", fontsize=14) plt.axis([-5, 5, -0.2, 1.2]) # save_fig("activation_functions_plot") plt.show() def heaviside(z): return (z >= 0).astype(z.dtype) def sigmoid(z): return 1/(1+np.exp(-z)) def mlp_xor(x1, x2, activation=heaviside): return activation(-activation(x1 + x2 - 1.5) + activation(x1 + x2 - 0.5) - 0.5) x1s = np.linspace(-0.2, 1.2, 100) x2s = np.linspace(-0.2, 1.2, 100) x1, x2 = np.meshgrid(x1s, x2s) z1 = mlp_xor(x1, x2, activation=heaviside) z2 = mlp_xor(x1, x2, activation=sigmoid) plt.figure(figsize=(10,4)) plt.subplot(121) plt.contourf(x1, x2, z1) plt.plot([0, 1], [0, 1], "gs", markersize=20) plt.plot([0, 1], [1, 0], "y^", markersize=20) plt.title("Activation function: heaviside", fontsize=14) plt.grid(True) plt.subplot(122) plt.contourf(x1, x2, z2) plt.plot([0, 1], [0, 1], "gs", markersize=20) plt.plot([0, 1], [1, 0], "y^", markersize=20) plt.title("Activation function: sigmoid", fontsize=14) plt.grid(True) """ Explanation: Activation functions A perceptron uses the stepwise function to determine the output. In other neural networks, more complicated activation functions are used. End of explanation """ import tensorflow as tf config = tf.contrib.learn.RunConfig(tf_random_seed=42) feature_cols = tf.contrib.learn.infer_real_valued_columns_from_input(beetles_train_feature) #The network has two hidden layers, 150 and 50 dnn_clf = tf.contrib.learn.DNNClassifier(hidden_units=[150,50], n_classes=2,feature_columns=feature_cols, config=config) dnn_clf = tf.contrib.learn.SKCompat(dnn_clf) # if TensorFlow >= 1.1 #fit the model with data, i.e. backpropagation to update the weights dnn_clf.fit(beetles_train_feature, beetles_train_label, batch_size=50, steps=10000) from sklearn.metrics import accuracy_score pred_label = dnn_clf.predict(beetles_train_feature) training_accuracy = accuracy_score(beetles_train_label, pred_label['classes']) training_accuracy pred_label = dnn_clf.predict(beetles_test_feature) test_accuracy = accuracy_score(beetles_test_label, pred_label['classes']) test_accuracy from sklearn.metrics import log_loss pred_proba = pred_label['probabilities'] test_log_loss = log_loss(beetles_test_label, pred_proba) test_log_loss """ Explanation: Feedforward Neural Network (FNN) A feedforward neural network is an artificial neural network wherein connections between the units do not form a cycle.<img src="Feed_forward_neural_net.gif"> In the training phase, the connection weights are updated through the back-propagation algorithm. Using tf.learn End of explanation """ import tensorflow as tf n_inputs = len(beetles_train_feature[0]) n_hidden1 = 150 n_hidden2 = 50 n_outputs = 2 reset_graph() X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") y = tf.placeholder(tf.int64, shape=(None), name="y") def neuron_layer(X, n_neurons, name, activation=None): with tf.name_scope(name): n_inputs = int(X.get_shape()[1]) stddev = 2 / np.sqrt(n_inputs) init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev) W = tf.Variable(init, name="kernel") b = tf.Variable(tf.zeros([n_neurons]), name="bias") Z = tf.matmul(X, W) + b if activation is not None: return activation(Z) else: return Z with tf.name_scope("dnn"): hidden1 = neuron_layer(X, n_hidden1, name="hidden1", activation=tf.nn.relu) hidden2 = neuron_layer(hidden1, n_hidden2, name="hidden2", activation=tf.nn.relu) logits = neuron_layer(hidden2, n_outputs, name="outputs") with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(xentropy, name="loss") learning_rate = 0.0003 with tf.name_scope("train"): optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_op = optimizer.minimize(loss) with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() saver = tf.train.Saver() n_epochs = 20 batch_size = 100 from sklearn.utils import shuffle with tf.Session() as sess: init.run() for epoch in range(n_epochs): train_feature, train_label = shuffle(beetles_train_feature, beetles_train_label, random_state = epoch) for iteration in range(len(beetles_train_feature) // batch_size): # X_batch, y_batch = mnist.train.next_batch(batch_size) begin_index = iteration * batch_size end_index = min((iteration+1) * batch_size, len(beetles_train_feature)) # print(begin_index, end_index) X_batch = train_feature[begin_index:end_index] y_batch = train_label[begin_index:end_index] # print(X_batch.shape) # print(y_batch.shape) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch}) acc_test = accuracy.eval(feed_dict={X: beetles_test_feature, y: beetles_test_label}) print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test) save_path = saver.save(sess, "./my_model_plain.ckpt") with tf.Session() as sess: saver.restore(sess, "./my_model_plain.ckpt") # or better, use save_path X_new_scaled = beetles_test_feature[:20] Z = logits.eval(feed_dict={X: X_new_scaled}) y_pred = np.argmax(Z, axis=1) print("Predicted classes:", y_pred) print("Actual classes: ", beetles_test_label[:20]) from IPython.display import clear_output, Image, display, HTML def strip_consts(graph_def, max_const_size=32): """Strip large constant values from graph_def.""" strip_def = tf.GraphDef() for n0 in graph_def.node: n = strip_def.node.add() n.MergeFrom(n0) if n.op == 'Const': tensor = n.attr['value'].tensor size = len(tensor.tensor_content) if size > max_const_size: tensor.tensor_content = b"<stripped %d bytes>"%size return strip_def def show_graph(graph_def, max_const_size=32): """Visualize TensorFlow graph.""" if hasattr(graph_def, 'as_graph_def'): graph_def = graph_def.as_graph_def() strip_def = strip_consts(graph_def, max_const_size=max_const_size) code = """ <script> function load() {{ document.getElementById("{id}").pbtxt = {data}; }} </script> <link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()> <div style="height:600px"> <tf-graph-basic id="{id}"></tf-graph-basic> </div> """.format(data=repr(str(strip_def)), id='graph'+str(np.random.rand())) iframe = """ <iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe> """.format(code.replace('"', '&quot;')) display(HTML(iframe)) show_graph(tf.get_default_graph()) """ Explanation: Using plain TensorFlow End of explanation """ n_inputs = len(beetles_train_feature[0]) n_hidden1 = 150 n_hidden2 = 50 n_outputs = 2 reset_graph() X = tf.placeholder(tf.float32, shape=(None, n_inputs), name="X") y = tf.placeholder(tf.int64, shape=(None), name="y") with tf.name_scope("dnn"): hidden1 = tf.layers.dense(X, n_hidden1, name="hidden1", activation=tf.nn.relu) hidden2 = tf.layers.dense(hidden1, n_hidden2, name="hidden2", activation=tf.nn.relu) logits = tf.layers.dense(hidden2, n_outputs, name="outputs") with tf.name_scope("loss"): xentropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits) loss = tf.reduce_mean(xentropy, name="loss") learning_rate = 0.0003 with tf.name_scope("train"): optimizer = tf.train.GradientDescentOptimizer(learning_rate) training_op = optimizer.minimize(loss) with tf.name_scope("eval"): correct = tf.nn.in_top_k(logits, y, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) init = tf.global_variables_initializer() saver = tf.train.Saver() n_epochs = 20 n_batches = 100 with tf.Session() as sess: init.run() for epoch in range(n_epochs): train_feature, train_label = shuffle(beetles_train_feature, beetles_train_label, random_state = epoch) for iteration in range(len(beetles_train_feature) // batch_size): begin_index = iteration * batch_size end_index = min((iteration+1) * batch_size, len(beetles_train_feature)) # print(begin_index, end_index) X_batch = train_feature[begin_index:end_index] y_batch = train_label[begin_index:end_index] # print(X_batch.shape) # print(y_batch.shape) #X_batch, y_batch = mnist.train.next_batch(batch_size) sess.run(training_op, feed_dict={X: X_batch, y: y_batch}) # print(X_batch.shape) # print(begin_index,end_index) acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch}) acc_test = accuracy.eval(feed_dict={X: beetles_test_feature, y: beetles_test_label}) print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test) save_path = saver.save(sess, "./my_model_dense.ckpt") show_graph(tf.get_default_graph()) """ Explanation: Using dense() instead of neuron_layer() Note: the book uses tensorflow.contrib.layers.fully_connected() rather than tf.layers.dense() (which did not exist when this chapter was written). It is now preferable to use tf.layers.dense(), because anything in the contrib module may change or be deleted without notice. The dense() function is almost identical to the fully_connected() function, except for a few minor differences: * several parameters are renamed: scope becomes name, activation_fn becomes activation (and similarly the _fn suffix is removed from other parameters such as normalizer_fn), weights_initializer becomes kernel_initializer, etc. * the default activation is now None rather than tf.nn.relu. * a few more differences are presented in chapter 11. End of explanation """
tpin3694/tpin3694.github.io
machine-learning/create_baseline_classification_model.ipynb
mit
# Load libraries from sklearn.datasets import load_iris from sklearn.dummy import DummyClassifier from sklearn.model_selection import train_test_split """ Explanation: Title: Create Baseline Classification Model Slug: create_baseline_classification_model Summary: How to create a baseline classification model in scikit-learn for machine learning in Python. Date: 2017-09-14 12:00 Category: Machine Learning Tags: Model Evaluation Authors: Chris Albon Preliminaries End of explanation """ # Load data iris = load_iris() # Create target vector and feature matrix X, y = iris.data, iris.target """ Explanation: Load Iris Flower Dataset End of explanation """ # Split into training and test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) """ Explanation: Split Data Into Training And Test Set End of explanation """ # Create dummy classifer dummy = DummyClassifier(strategy='uniform', random_state=1) # "Train" model dummy.fit(X_train, y_train) """ Explanation: Create Dummy Regression Always Predicts The Mean Value Of Target End of explanation """ # Get accuracy score dummy.score(X_test, y_test) """ Explanation: Evaluate Performance Metric End of explanation """
mkcor/datavis-tut
solutions/1D_solutions.ipynb
cc0-1.0
df = pd.read_csv('data/coherence_timeseries.csv', header=None) df.columns = ['time', 'signal'] df.head() import matplotlib %matplotlib inline matplotlib.style.use('ggplot') import matplotlib.pyplot as plt plt.plot(df['time'], df['signal']) plt.xlabel('time (fs)') plt.ylabel('signal (a.u.)') plt.title('Decoherence') """ Explanation: Alternatively, we can read the CSV file into a data frame. End of explanation """ ts.loc[:20.0].plot() """ Explanation: At first glance, we can identify two different parts in this time series: before and after roughly $t = 20$ fs. The signal looks pseudo-periodic before $20$ fs, and noisy after $20$ fs. End of explanation """ ts.loc[:8].plot() """ Explanation: There are about 10 pseudo-periods so one pseudo-period is $T \sim 2$ fs. Note that angles are easier on the eye when no sharper than $70$ degrees. End of explanation """ df[:20].plot(kind='scatter', x='time', y='signal') """ Explanation: What about a scatter plot? End of explanation """ # Main axes plt.plot(df['time'][:20], df['signal'][:20]) plt.xlabel('time (fs)') plt.ylabel('signal (a.u.)') plt.title('Decoherence') # This is an inset axes over the main axes inset = plt.axes([.67, .65, .2, .2], axisbg='w') inset.plot(df['time'][:120], df['signal'][:120]) plt.setp(inset, yticks=[]) """ Explanation: Maybe add a 'fitting' sine (line) as a guide to the eye? We may want to use an inset in order to visualize both time scales (pseudo-period $T$ in the main plot and decoherence duration $t$ in the inset). End of explanation """ import plotly plotly.__version__ import plotly.plotly as py import plotly.graph_objs as pgo periods_trace = pgo.Scatter(x=df['time'][:20], y=df['signal'][:20]) decoherence_trace = pgo.Scatter(x=df['time'][:120], y=df['signal'][:120], xaxis='x2', yaxis='y2') data = pgo.Data([periods_trace, decoherence_trace]) layout = pgo.Layout(xaxis2=pgo.XAxis(domain=[0.6, 0.95], anchor='y2', zeroline=False), yaxis2=pgo.YAxis(domain=[0.6, 0.95], anchor='x2', zeroline=False), showlegend=False, xaxis=pgo.XAxis(zeroline=False), yaxis=pgo.YAxis(zeroline=False)) fig = pgo.Figure(data=data, layout=layout) py.iplot(fig, filename = 'inset-two-timescales') """ Explanation: plotly offers a more intuitive syntax. End of explanation """
granttremblay/Meg_Urry_NSFprop
meg_plots.ipynb
mit
import os import glob import math import numpy as np import matplotlib.pyplot as plt from astropy.io import ascii from astropy.table import vstack from astropy import units as u from astropy import constants as const """ Explanation: Plots for Fig. 1 and Fig. 4b for Meg Urry's 2016 NSF Proposal Grant Tremblay, Yale University End of explanation """ # Plots should be pretty plt.style.use('ggplot') %matplotlib inline """ Explanation: For now, I'll use the matplotlib ggplot style from R. It's pretty. End of explanation """ lum_v_z_files = glob.glob('data/lum_v_z/*.txt') r_k_files = glob.glob('data/r-k/*.txt') # by globbing on .txt files, the output will be sorted alphabetically by name """ Explanation: Find the data tables and dump them into lists. End of explanation """ lum_v_z_files column_names_lum_v_z = ["z", "Xlum"] s82x_lum_v_z = ascii.read(lum_v_z_files[2], names=column_names_lum_v_z) cosmos_lum_v_z = ascii.read(lum_v_z_files[1], names=column_names_lum_v_z) cdfs_lum_v_z = ascii.read(lum_v_z_files[0], names=column_names_lum_v_z) """ Explanation: Populate the redshift-Luminosity tables. End of explanation """ r_k_files column_names_r_k = ["R-K", "X/O"] extragalactic_sources_r_k = ascii.read(r_k_files[0], names=column_names_r_k) stars_r_k = ascii.read(r_k_files[3], names=column_names_r_k) sources_lacking_redshifts_r_k = ascii.read(r_k_files[2], names=column_names_r_k) rw1_stars_r_k = ascii.read(r_k_files[1], names=column_names_r_k) targets_r_k = ascii.read(r_k_files[4], names=column_names_r_k) """ Explanation: Populate the R-K tables. End of explanation """ # Stack the two tables on top of each other stars = vstack([rw1_stars_r_k, stars_r_k]) """ Explanation: Steph asked that we just combine all stars into one category, for simplicity: End of explanation """ plt.figure() fig, ax = plt.subplots() ax.set_xlabel('Redshift (z)') ax.set_ylabel('Log X-ray Luminosity (0.5-2 keV)') ax.plot(cdfs_lum_v_z["z"], cdfs_lum_v_z["Xlum"], marker='s', linestyle="None", alpha=1.0, label="CDFS", color=plt.rcParams['axes.color_cycle'][2]) ax.plot(cosmos_lum_v_z["z"], cosmos_lum_v_z["Xlum"], marker='^', linestyle="None", alpha=1.0, label="COSMOS Legacy", color=plt.rcParams['axes.color_cycle'][1]) ax.plot(s82x_lum_v_z["z"], s82x_lum_v_z["Xlum"], marker='o', linestyle="None", alpha=1.0, label="Stripe 82X", color=plt.rcParams['axes.color_cycle'][0]) ax.legend(loc=4, frameon=True, numpoints=1, prop={'size':10}) plt.savefig("Fig1b.pdf") ax.set_aspect('equal') plt.savefig("Fig1b_equal_aspect.pdf") """ Explanation: These are now Astropy table objects. Make a L-z plot End of explanation """ plt.figure() fig, ax = plt.subplots() ax.set_xlabel('R-K color (Vega)') ax.set_ylabel('X-ray / Optical Ratio') ax.plot(extragalactic_sources_r_k["R-K"], extragalactic_sources_r_k["X/O"], marker='o', color="Gray", markeredgewidth=0, alpha=0.5, linestyle="None", label="Extragalactic Sources") ax.plot(targets_r_k["R-K"], targets_r_k["X/O"], marker='s', linestyle="None", label="NIR Spectroscopy Targets") ax.plot(sources_lacking_redshifts_r_k["R-K"], sources_lacking_redshifts_r_k["X/O"], marker='^', linestyle="None", label="Optical Spectroscopy Targets") ax.plot(stars["R-K"], stars["X/O"], marker='o', linestyle="None", label="Stars") ax.plot([4, 7], [0, 0], color='k', linestyle='-', linewidth=1) ax.plot([4, 4], [0, 4], color='k', linestyle='-', linewidth=1) ax.legend(loc=0, frameon=True, numpoints=1, prop={'size':10}) plt.savefig("Fig4b.pdf") """ Explanation: Make an r-k plot End of explanation """
Aniruddha-Tapas/Applied-Machine-Learning
Miscellaneous/Student-Performance-Evaluation-Classification-Regression.ipynb
mit
import os from sklearn.tree import DecisionTreeClassifier, export_graphviz import pandas as pd import numpy as np import matplotlib.pyplot as plt %matplotlib inline from sklearn.cross_validation import train_test_split from sklearn import cross_validation, metrics from sklearn.ensemble import RandomForestClassifier from sklearn.naive_bayes import BernoulliNB from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from time import time from sklearn.pipeline import Pipeline from sklearn.metrics import roc_auc_score , classification_report from sklearn.grid_search import GridSearchCV from sklearn.pipeline import Pipeline from sklearn.metrics import precision_score, recall_score, accuracy_score, classification_report # read .csv from provided dataset csv_filename="student/student-mat.csv" # df=pd.read_csv(csv_filename,index_col=0) df=pd.read_csv(csv_filename, sep=";") df.head() df.describe() """ Explanation: Student-Performance-Evaluation using Classification-Regression Here we would try to predict student performance in secondary education (high school). We would perform data analysis for 3 cases : Case 1: Binary-Classification :-G3>10:-1-else-0 Case 2: Multi-Class-Classification Case 3: Regression Data Set Information: This data approaches student achievement in secondary education of two Portuguese schools. The data attributes include student grades, demographic, social and school related features) and it was collected by using school reports and questionnaires. Two datasets are provided regarding the performance in two distinct subjects: Mathematics (mat) and Portuguese language (por). In [Cortez and Silva, 2008], the two datasets were modeled under binary/five-level classification and regression tasks. Important note: the target attribute G3 has a strong correlation with attributes G2 and G1. This occurs because G3 is the final year grade (issued at the 3rd period), while G1 and G2 correspond to the 1st and 2nd period grades. It is more difficult to predict G3 without G2 and G1, but such prediction is much more useful. Dataset : http://archive.ics.uci.edu/ml/datasets/Student+Performance Attributes for both student-mat.csv (Math course) and student-por.csv (Portuguese language course) datasets: school - student's school (binary: 'GP' - Gabriel Pereira or 'MS' - Mousinho da Silveira) sex - student's sex (binary: 'F' - female or 'M' - male) age - student's age (numeric: from 15 to 22) address - student's home address type (binary: 'U' - urban or 'R' - rural) famsize - family size (binary: 'LE3' - less or equal to 3 or 'GT3' - greater than 3) Pstatus - parent's cohabitation status (binary: 'T' - living together or 'A' - apart) Medu - mother's education (numeric: 0 - none, 1 - primary education (4th grade), 2 - 5th to 9th grade, 3 - secondary education or 4 - higher education) Fedu - father's education (numeric: 0 - none, 1 - primary education (4th grade), 2 - 5th to 9th grade, 3 - secondary education or 4 - higher education) Mjob - mother's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') Fjob - father's job (nominal: 'teacher', 'health' care related, civil 'services' (e.g. administrative or police), 'at_home' or 'other') reason - reason to choose this school (nominal: close to 'home', school 'reputation', 'course' preference or 'other') guardian - student's guardian (nominal: 'mother', 'father' or 'other') traveltime - home to school travel time (numeric: 1 - <15 min., 2 - 15 to 30 min., 3 - 30 min. to 1 hour, or 4 - >1 hour) studytime - weekly study time (numeric: 1 - <2 hours, 2 - 2 to 5 hours, 3 - 5 to 10 hours, or 4 - >10 hours) failures - number of past class failures (numeric: n if 1<=n\<3, else 4) schoolsup - extra educational support (binary: yes or no) famsup - family educational support (binary: yes or no) paid - extra paid classes within the course subject (Math or Portuguese) (binary: yes or no) activities - extra-curricular activities (binary: yes or no) nursery - attended nursery school (binary: yes or no) higher - wants to take higher education (binary: yes or no) internet - Internet access at home (binary: yes or no) romantic - with a romantic relationship (binary: yes or no) famrel - quality of family relationships (numeric: from 1 - very bad to 5 - excellent) freetime - free time after school (numeric: from 1 - very low to 5 - very high) goout - going out with friends (numeric: from 1 - very low to 5 - very high) Dalc - workday alcohol consumption (numeric: from 1 - very low to 5 - very high) Walc - weekend alcohol consumption (numeric: from 1 - very low to 5 - very high) health - current health status (numeric: from 1 - very bad to 5 - very good) absences - number of school absences (numeric: from 0 to 93) G1 - first period grade (numeric: from 0 to 20) G2 - second period grade (numeric: from 0 to 20) G3 - final grade (numeric: from 0 to 20, output target) these grades are related with the course subject, Math or Portuguese: End of explanation """ df.G3.describe() # handle G3 attrubte to binary high = df.G3 >= 10 low = df.G3 < 10 df.loc[high,'G3'] = 1 df.loc[low,'G3'] = 0 df.head() df.G3.describe() cols = list(df.columns) categorical_features = [] for f in cols: if df[f].dtype != 'int64': categorical_features.append(f) categorical_features for f in categorical_features: #Get binarized columns df[f] = pd.get_dummies(df[f]) df.head() features=list(df.columns[:-1]) X = df[features] y = df['G3'] # split dataset to 60% training and 40% testing X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y, test_size=0.4, random_state=0) print (X_train.shape, y_train.shape) """ Explanation: CASE 1: Binary Classification : G3>10: 1 else 0 End of explanation """ import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import ExtraTreesClassifier # Build a classification task using 3 informative features # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(X.shape[1]): print("%d. feature %d - %s (%f) " % (f + 1, indices[f], features[indices[f]], importances[indices[f]])) # Plot the feature importances of the forest plt.figure(num=None, figsize=(14, 10), dpi=80, facecolor='w', edgecolor='k') plt.title("Feature importances") plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() importances[indices[:5]] for f in range(5): print("%d. feature %d - %s (%f)" % (f + 1, indices[f], features[indices[f]] ,importances[indices[f]])) best_features = [] for i in indices[:5]: best_features.append(features[i]) # Plot the top 5 feature importances of the forest plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k') plt.title("Feature importances") plt.bar(range(5), importances[indices][:5], color="r", yerr=std[indices][:5], align="center") plt.xticks(range(5), best_features) plt.xlim([-1, 5]) plt.show() """ Explanation: Feature importances with forests of trees This examples shows the use of forests of trees to evaluate the importance of features on an artificial classification task. The red bars are the feature importances of the forest, along with their inter-trees variability. End of explanation """ t0=time() print ("DecisionTree") dt = DecisionTreeClassifier(min_samples_split=20,random_state=99) # dt = DecisionTreeClassifier(min_samples_split=20,max_depth=5,random_state=99) clf_dt=dt.fit(X_train,y_train) print ("Acurracy: ", clf_dt.score(X_test,y_test)) t1=time() print ("time elapsed: ", t1-t0) """ Explanation: Decision Tree accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(dt, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross validation for DT End of explanation """ from sklearn.metrics import classification_report pipeline = Pipeline([ ('clf', DecisionTreeClassifier(criterion='entropy')) ]) parameters = { 'clf__max_depth': (5, 25 , 50), 'clf__min_samples_split': (1, 5, 10), 'clf__min_samples_leaf': (1, 2, 3) } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='f1') grid_search.fit(X_train, y_train) print 'Best score: %0.3f' % grid_search.best_score_ print 'Best parameters set:' best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid_search.predict(X_test) print classification_report(y_test, predictions) """ Explanation: Tuning our hyperparameters using GridSearch End of explanation """ t2=time() print ("RandomForest") rf = RandomForestClassifier(n_estimators=100,n_jobs=-1) clf_rf = rf.fit(X_train,y_train) print ("Acurracy: ", clf_rf.score(X_test,y_test)) t3=time() print ("time elapsed: ", t3-t2) """ Explanation: Random Forest accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(rf, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross validation for RF End of explanation """ roc_auc_score(y_test,rf.predict(X_test)) %matplotlib inline import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc predictions = rf.predict_proba(X_test) false_positive_rate, recall, thresholds = roc_curve(y_test, predictions[:, 1]) roc_auc = auc(false_positive_rate, recall) plt.title('Receiver Operating Characteristic') plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc) plt.legend(loc='lower right') plt.plot([0, 1], [0, 1], 'r--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.ylabel('Recall') plt.xlabel('Fall-out') plt.show() """ Explanation: Receiver Operating Characteristic (ROC) curve End of explanation """ pipeline2 = Pipeline([ ('clf', RandomForestClassifier(criterion='entropy')) ]) parameters = { 'clf__n_estimators': (5, 25, 50, 100), 'clf__max_depth': (5, 25 , 50), 'clf__min_samples_split': (1, 5, 10), 'clf__min_samples_leaf': (1, 2, 3) } grid_search = GridSearchCV(pipeline2, parameters, n_jobs=-1, verbose=1, scoring='accuracy', cv=3) grid_search.fit(X_train, y_train) print 'Best score: %0.3f' % grid_search.best_score_ print 'Best parameters set:' best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid_search.predict(X_test) print 'Accuracy:', accuracy_score(y_test, predictions) print classification_report(y_test, predictions) """ Explanation: Tuning Models using GridSearch End of explanation """ t4=time() print ("NaiveBayes") nb = BernoulliNB() clf_nb=nb.fit(X_train,y_train) print ("Acurracy: ", clf_nb.score(X_test,y_test)) t5=time() print ("time elapsed: ", t5-t4) """ Explanation: Naive Bayes accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(nb, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross-validation for NB End of explanation """ t6=time() print ("KNN") # knn = KNeighborsClassifier(n_neighbors=3) knn = KNeighborsClassifier() clf_knn=knn.fit(X_train, y_train) print ("Acurracy: ", clf_knn.score(X_test,y_test) ) t7=time() print ("time elapsed: ", t7-t6) """ Explanation: KNN accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(knn, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross validation for KNN End of explanation """ t7=time() print ("SVM") svc = SVC() clf_svc=svc.fit(X_train, y_train) print ("Acurracy: ", clf_svc.score(X_test,y_test) ) t8=time() print ("time elapsed: ", t8-t7) """ Explanation: SVM accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(svc, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) from sklearn.svm import SVC from sklearn.cross_validation import cross_val_score from sklearn.pipeline import Pipeline from sklearn import grid_search svc = SVC() parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} grid = grid_search.GridSearchCV(svc, parameters, n_jobs=-1, verbose=1, scoring='accuracy') grid.fit(X_train, y_train) print 'Best score: %0.3f' % grid.best_score_ print 'Best parameters set:' best_parameters = grid.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid.predict(X_test) print classification_report(y_test, predictions) pipeline = Pipeline([ ('clf', SVC(kernel='rbf', gamma=0.01, C=100)) ]) parameters = { 'clf__gamma': (0.01, 0.03, 0.1, 0.3, 1), 'clf__C': (0.1, 0.3, 1, 3, 10, 30), } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy') grid_search.fit(X_train, y_train) print 'Best score: %0.3f' % grid_search.best_score_ print 'Best parameters set:' best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid_search.predict(X_test) print classification_report(y_test, predictions) """ Explanation: cross validation for SVM End of explanation """ # read .csv from provided dataset csv_filename="student/student-mat.csv" # df=pd.read_csv(csv_filename,index_col=0) df=pd.read_csv(csv_filename, sep=";") df.head() df.describe() """ Explanation: End of explanation """ df.G3.describe() for i in range(len(df.G3)): if df.G3.loc[i] < 10: df.G3.loc[i] = 5 elif df.G3.loc[i] < 12: df.G3.loc[i] = 4 elif df.G3.loc[i] < 14: df.G3.loc[i] = 3 elif df.G3.loc[i] < 16: df.G3.loc[i] = 2 elif df.G3.loc[i] < 21: df.G3.loc[i] = 1 df.G3.unique() df.head() df.G3.describe() cols = list(df.columns) categorical_features = [] for f in cols: if df[f].dtype != 'int64': categorical_features.append(f) categorical_features for f in categorical_features: #Get binarized columns df[f] = pd.get_dummies(df[f]) df.head() features=list(df.columns[:-1]) X = df[features] y = df['G3'] # split dataset to 60% training and 40% testing X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y, test_size=0.4, random_state=0) print (X_train.shape, y_train.shape) """ Explanation: CASE 2: Multi Class Classification : <table> <tr> <th>Class</th><th>G3</th><th>Label</th> </tr> <tr> <td>I (excellent/very good)</td><td>16-20</td><td>A</td> </tr> <tr> <td>II (good)</td><td>14-15</td><td>B</td> </tr> <tr> <td>III (satisfactory)</td><td>12-13</td><td>C</td> </tr> <tr> <td>IV (sufficient)</td><td>10-11</td><td>D</td> </tr> <tr> <td>V (fail)</td><td>0-9</td><td>E</td> </tr> End of explanation """ import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import ExtraTreesClassifier # Build a classification task using 3 informative features # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(X.shape[1]): print("%d. feature %d - %s (%f) " % (f + 1, indices[f], features[indices[f]], importances[indices[f]])) # Plot the feature importances of the forest plt.figure(num=None, figsize=(14, 10), dpi=80, facecolor='w', edgecolor='k') plt.title("Feature importances") plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(X.shape[1]), indices) plt.xlim([-1, X.shape[1]]) plt.show() importances[indices[:5]] for f in range(5): print("%d. feature %d - %s (%f)" % (f + 1, indices[f], features[indices[f]] ,importances[indices[f]])) best_features = [] for i in indices[:5]: best_features.append(features[i]) # Plot the top 5 feature importances of the forest plt.figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k') plt.title("Feature importances") plt.bar(range(5), importances[indices][:5], color="r", yerr=std[indices][:5], align="center") plt.xticks(range(5), best_features) plt.xlim([-1, 5]) plt.show() """ Explanation: Feature importances with forests of trees This examples shows the use of forests of trees to evaluate the importance of features on an artificial classification task. The red bars are the feature importances of the forest, along with their inter-trees variability. End of explanation """ t0=time() print ("DecisionTree") dt = DecisionTreeClassifier(min_samples_split=20,random_state=99) # dt = DecisionTreeClassifier(min_samples_split=20,max_depth=5,random_state=99) clf_dt=dt.fit(X_train,y_train) print ("Acurracy: ", clf_dt.score(X_test,y_test)) t1=time() print ("time elapsed: ", t1-t0) """ Explanation: Decision Tree accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(dt, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross validation for DT End of explanation """ from sklearn.metrics import classification_report pipeline = Pipeline([ ('clf', DecisionTreeClassifier(criterion='entropy')) ]) parameters = { 'clf__max_depth': (5, 25 , 50), 'clf__min_samples_split': (1, 5, 10), 'clf__min_samples_leaf': (1, 2, 3) } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='f1') grid_search.fit(X_train, y_train) print 'Best score: %0.3f' % grid_search.best_score_ print 'Best parameters set:' best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid_search.predict(X_test) print classification_report(y_test, predictions) """ Explanation: Tuning our hyperparameters using GridSearch End of explanation """ t2=time() print ("RandomForest") rf = RandomForestClassifier(n_estimators=100,n_jobs=-1) clf_rf = rf.fit(X_train,y_train) print ("Acurracy: ", clf_rf.score(X_test,y_test)) t3=time() print ("time elapsed: ", t3-t2) """ Explanation: Random Forest accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(rf, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross validation for RF End of explanation """ pipeline2 = Pipeline([ ('clf', RandomForestClassifier(criterion='entropy')) ]) parameters = { 'clf__n_estimators': (5, 25, 50, 100), 'clf__max_depth': (5, 25 , 50), 'clf__min_samples_split': (1, 5, 10), 'clf__min_samples_leaf': (1, 2, 3) } grid_search = GridSearchCV(pipeline2, parameters, n_jobs=-1, verbose=1, scoring='accuracy', cv=3) grid_search.fit(X_train, y_train) print 'Best score: %0.3f' % grid_search.best_score_ print 'Best parameters set:' best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid_search.predict(X_test) print 'Accuracy:', accuracy_score(y_test, predictions) print classification_report(y_test, predictions) """ Explanation: Tuning Models using GridSearch End of explanation """ t4=time() print ("NaiveBayes") nb = BernoulliNB() clf_nb=nb.fit(X_train,y_train) print ("Acurracy: ", clf_nb.score(X_test,y_test)) t5=time() print ("time elapsed: ", t5-t4) """ Explanation: Naive Bayes accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(nb, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross-validation for NB End of explanation """ t6=time() print ("KNN") # knn = KNeighborsClassifier(n_neighbors=3) knn = KNeighborsClassifier() clf_knn=knn.fit(X_train, y_train) print ("Acurracy: ", clf_knn.score(X_test,y_test) ) t7=time() print ("time elapsed: ", t7-t6) """ Explanation: KNN accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(knn, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) """ Explanation: cross validation for KNN End of explanation """ t7=time() print ("SVM") svc = SVC() clf_svc=svc.fit(X_train, y_train) print ("Acurracy: ", clf_svc.score(X_test,y_test) ) t8=time() print ("time elapsed: ", t8-t7) """ Explanation: SVM accuracy and time elapsed caculation End of explanation """ tt0=time() print ("cross result========") scores = cross_validation.cross_val_score(svc, X,y, cv=5) print (scores) print (scores.mean()) tt1=time() print ("time elapsed: ", tt1-tt0) from sklearn.svm import SVC from sklearn.cross_validation import cross_val_score from sklearn.pipeline import Pipeline from sklearn import grid_search svc = SVC() parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]} grid = grid_search.GridSearchCV(svc, parameters, n_jobs=-1, verbose=1, scoring='accuracy') grid.fit(X_train, y_train) print 'Best score: %0.3f' % grid.best_score_ print 'Best parameters set:' best_parameters = grid.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid.predict(X_test) print classification_report(y_test, predictions) pipeline = Pipeline([ ('clf', SVC(kernel='rbf', gamma=0.01, C=100)) ]) parameters = { 'clf__gamma': (0.01, 0.03, 0.1, 0.3, 1), 'clf__C': (0.1, 0.3, 1, 3, 10, 30), } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1, scoring='accuracy') grid_search.fit(X_train, y_train) print 'Best score: %0.3f' % grid_search.best_score_ print 'Best parameters set:' best_parameters = grid_search.best_estimator_.get_params() for param_name in sorted(parameters.keys()): print '\t%s: %r' % (param_name, best_parameters[param_name]) predictions = grid_search.predict(X_test) print classification_report(y_test, predictions) """ Explanation: cross validation for SVM End of explanation """ import numpy as np from sklearn.linear_model import LinearRegression from sklearn.linear_model import SGDRegressor from sklearn.preprocessing import StandardScaler from sklearn.cross_validation import train_test_split from sklearn. cross_validation import cross_val_score from sklearn.feature_selection import * from sklearn import metrics # read .csv from provided dataset csv_filename="student/student-mat.csv" # df=pd.read_csv(csv_filename,index_col=0) df=pd.read_csv(csv_filename,sep=";") df.head() cols = list(df.columns) categorical_features = [] for f in cols: if df[f].dtype != 'int64': categorical_features.append(f) categorical_features for f in categorical_features: #Get binarized columns df[f] = pd.get_dummies(df[f]) df.head() features=list(df.columns[:-1]) X = df[features] y = df['G3'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.feature_selection import * fs=SelectKBest(score_func=f_regression,k=5) X_new=fs.fit_transform(X_train,y_train) z = zip(fs.get_support(),features) print z x_min, x_max = X_new[:,0].min() - .5, X_new[:, 0].max() + .5 y_min, y_max = y_train.min() - .5, y_train.max() + .5 #fig=plt.figure() #fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05) # Two subplots, unpack the axes array immediately fig, axes = plt.subplots(1,5) fig.set_size_inches(12,12) for i in range(5): axes[i].set_aspect('equal') axes[i].set_title('Feature {}'.format(i)) axes[i].set_xlabel('Feature') axes[i].set_ylabel('Grades') axes[i].set_xlim(x_min, x_max) axes[i].set_ylim(y_min, y_max) plt.sca(axes[i]) plt.scatter(X_new[:,i],y_train) best_features = [] for bool,feature in z: if bool: best_features.append(feature) correlated = best_features + ['G3'] correlated %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns sns.set(style='whitegrid', context='notebook') sns.pairplot(df[correlated], size=2.0); plt.tight_layout() # plt.savefig('./figures/scatter.png', dpi=300) plt.show() import numpy as np cm = np.corrcoef(df[correlated].values.T) sns.set(font_scale=1.5) hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 15}, yticklabels=correlated, xticklabels=correlated) plt.tight_layout() # plt.savefig('./figures/corr_mat.png', dpi=300) plt.show() %matplotlib inline import matplotlib.pyplot as plt plt.scatter(df['failures'], df['G3']) plt.xlabel('Failures') plt.ylabel('G3') plt.title('Failures Against G3') plt.show() from sklearn.cross_validation import train_test_split X = df[features].values y = df['G3'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) slr = LinearRegression() slr.fit(X_train, y_train) y_train_pred = slr.predict(X_train) y_test_pred = slr.predict(X_test) plt.scatter(y_train_pred, y_train_pred - y_train, c='blue', marker='o', label='Training data') plt.scatter(y_test_pred, y_test_pred - y_test, c='lightgreen', marker='s', label='Test data') plt.xlabel('Predicted values') plt.ylabel('Residuals') plt.legend(loc='upper left') plt.hlines(y=0, xmin=0, xmax=20, lw=2, color='red') plt.xlim([0, 20]) plt.tight_layout() # plt.savefig('./figures/slr_residuals.png', dpi=300) plt.show() from sklearn.metrics import r2_score from sklearn.metrics import mean_squared_error print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) """ Explanation: Case 3 : Regression End of explanation """ from sklearn.linear_model import Lasso lasso = Lasso(alpha=0.1) lasso.fit(X_train, y_train) y_train_pred = lasso.predict(X_train) y_test_pred = lasso.predict(X_test) print(lasso.coef_) print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) """ Explanation: Using regularized methods for regression A Lasso Regression model can be initialized as follows: End of explanation """ from sklearn.linear_model import Ridge ridge = Ridge(alpha=1.0) ridge.fit(X_train, y_train) y_train_pred = ridge.predict(X_train) y_test_pred = ridge.predict(X_test) print(ridge.coef_) """ Explanation: Similiarly Ridge regression can be used: End of explanation """ from sklearn.linear_model import ElasticNet en = ElasticNet(alpha=1.0, l1_ratio=0.5) en.fit(X_train, y_train) y_train_pred = en.predict(X_train) y_test_pred = en.predict(X_test) print(en.coef_) """ Explanation: Lastly, the ElasticNet implementation allows us to vary the L1 to L2 ratio: End of explanation """ X = df[features].values y = df['G3'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.tree import DecisionTreeRegressor tree = DecisionTreeRegressor(max_depth=3) tree.fit(X_train, y_train) y_train_pred = tree.predict(X_train) y_test_pred = tree.predict(X_test) print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) """ Explanation: For example, if we set l1_ratio to 1.0, the ElasticNet regressor would be equal to LASSO regression. Decision tree regression End of explanation """ X = df[features].values y = df['G3'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0) from sklearn.ensemble import RandomForestRegressor forest = RandomForestRegressor(n_estimators=1000, criterion='mse', random_state=1, n_jobs=-1) forest.fit(X_train, y_train) y_train_pred = forest.predict(X_train) y_test_pred = forest.predict(X_test) print('MSE train: %.3f, test: %.3f' % ( mean_squared_error(y_train, y_train_pred), mean_squared_error(y_test, y_test_pred))) print('R^2 train: %.3f, test: %.3f' % ( r2_score(y_train, y_train_pred), r2_score(y_test, y_test_pred))) plt.scatter(y_train_pred, y_train_pred - y_train, c='black', marker='o', s=35, alpha=0.5, label='Training data') plt.scatter(y_test_pred, y_test_pred - y_test, c='lightgreen', marker='s', s=35, alpha=0.7, label='Test data') plt.xlabel('Predicted values') plt.ylabel('Residuals') plt.legend(loc='upper left') plt.hlines(y=0, xmin=0, xmax=22, lw=2, color='red') plt.xlim([0, 22]) plt.tight_layout() # plt.savefig('./figures/slr_residuals.png', dpi=300) plt.show() """ Explanation: Random forest regression End of explanation """ X_train, X_test, y_train, y_test = train_test_split(X, y) regressor = LinearRegression() regressor.fit(X_train, y_train) y_predictions = regressor.predict(X_test) print 'R-squared:', regressor.score(X_test, y_test) """ Explanation: Linear Regression End of explanation """ scores = cross_val_score(regressor, X, y, cv=5) print "Average of scores: ", scores.mean() print "Cross validation scores: ", scores plt.scatter(y_test,y_predictions) plt.xlabel('True Quality') plt.ylabel('Predicted Quality') plt.title('Predicted Quality Against True Quality') plt.show() """ Explanation: Cross Validation End of explanation """ # Scaling the features using StandardScaler: X_scaler = StandardScaler() y_scaler = StandardScaler() X_train = X_scaler.fit_transform(X_train) y_train = y_scaler.fit_transform(y_train) X_test = X_scaler.transform(X_test) y_test = y_scaler.transform(y_test) regressor = SGDRegressor(loss='squared_loss') scores = cross_val_score(regressor, X_train, y_train, cv=5) print 'Cross validation r-squared scores:', scores print 'Average cross validation r-squared score:', np.mean(scores) regressor.fit_transform(X_train, y_train) print 'Test set r-squared score', regressor.score(X_test, y_test) from sklearn.cross_validation import * def train_and_evaluate(clf, X_train, y_train): clf.fit(X_train, y_train) print "Coefficient of determination on training set:",clf.score(X_train, y_train) # create a k-fold croos validation iterator of k=5 folds cv = KFold(X_train.shape[0], 5, shuffle=True, random_state=33) scores = cross_val_score(clf, X_train, y_train, cv=cv) print "Average coefficient of determination using 5-fold crossvalidation:",np.mean(scores) """ Explanation: Fitting models with gradient descent SGDRegressor End of explanation """ from sklearn import svm clf_svr= svm.SVR(kernel='linear') train_and_evaluate(clf_svr,X_train,y_train) clf_svr_poly= svm.SVR(kernel='poly') train_and_evaluate(clf_svr_poly,X_train,y_train) clf_svr_rbf= svm.SVR(kernel='rbf') train_and_evaluate(clf_svr_rbf,X_train,y_train) clf_svr_poly2= svm.SVR(kernel='poly',degree=2) train_and_evaluate(clf_svr_poly2,X_train,y_train) """ Explanation: Support Vector Machines for regression The regression version of SVM can be used instead to find the hyperplane (note how easy is to change the classification method in scikit-learn!). We will try a linear kernel, a polynomial kernel, and finally, a rbf kernel. For more information on kernels, see http://scikit-learn.org/stable/modules/svm.html#svm-kernels End of explanation """ from sklearn import ensemble clf_et=ensemble.ExtraTreesRegressor(n_estimators=10,random_state=42) train_and_evaluate(clf_et,X_train,y_train) """ Explanation: Random Forests for Regression Finally, let's try again Random Forests, in their Extra Trees, and Regression version End of explanation """ print np.sort(zip(clf_et.feature_importances_,features),axis=0) """ Explanation: An interesting side effect of random forest classification, is that you can measure how 'important' each feature is when predicting the final result End of explanation """ from sklearn import metrics def measure_performance(X,y,clf, show_accuracy=True, show_classification_report=True, show_confusion_matrix=True, show_r2_score=False): y_pred=clf.predict(X) if show_accuracy: print "Accuracy:{0:.3f}".format(metrics.accuracy_score(y,y_pred)),"\n" if show_classification_report: print "Classification report" print metrics.classification_report(y,y_pred),"\n" if show_confusion_matrix: print "Confusion matrix" print metrics.confusion_matrix(y,y_pred),"\n" if show_r2_score: print "Coefficient of determination:{0:.3f}".format(metrics.r2_score(y,y_pred)),"\n" measure_performance(X_test,y_test,clf_et, show_accuracy=False, show_classification_report=False, show_confusion_matrix=False, show_r2_score=True) """ Explanation: Finally, evaluate our classifiers on the testing set End of explanation """
temmeand/scikit-rf
doc/source/examples/circuit/Lumped Element Circuits.ipynb
bsd-3-clause
import numpy as np # for np.allclose() to check that S-params are similar import skrf as rf rf.stylely() """ Explanation: Lumped Elements Circuits In this notebook, we construct various network from basic lumped elements (resistor, capacitor, inductor), with the 'classic' and the Circuit approach. Generally the Circuit approach is more verbose than the 'classic' way for building a circuit. However, as the circuit complexity increases, in particular when components are connected in parallel, the Circuit approach is interesting as it increases the readability of the code. Moreover, Circuit object can be plotted using its plot_graph() method, which is usefull to rapidly control if the circuit is built as expected. End of explanation """ # reference LC circuit made in Designer LC_designer = rf.Network('designer_capacitor_30_80MHz_simple.s2p') # scikit-rf: manually connecting networks line = rf.media.DefinedGammaZ0(frequency=LC_designer.frequency, z0=50) LC_manual = line.inductor(24e-9) ** line.capacitor(70e-12) # scikit-rf: using Circuit builder port1 = rf.Circuit.Port(frequency=LC_designer.frequency, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=LC_designer.frequency, name='port2', z0=50) cap = rf.Circuit.SeriesImpedance(frequency=LC_designer.frequency, name='cap', z0=50, Z=1/(1j*LC_designer.frequency.w*70e-12)) ind = rf.Circuit.SeriesImpedance(frequency=LC_designer.frequency, name='ind', z0=50, Z=1j*LC_designer.frequency.w*24e-9) # NB: it is also possible to create 2-port lumped elements like: # line = rf.media.DefinedGammaZ0(frequency=LC_designer.frequency, z0=50) # cap = line.capacitor(70e-12, name='cap') # ind = line.inductor(24e-9, name='ind') connections = [ [(port1, 0), (cap, 0)], [(cap, 1), (ind, 0)], [(ind, 1), (port2, 0)] ] circuit = rf.Circuit(connections) LC_from_circuit = circuit.network # testing the equivalence of the results print(np.allclose(LC_designer.s, LC_manual.s)) print(np.allclose(LC_designer.s, LC_from_circuit.s)) circuit.plot_graph(network_labels=True, edge_labels=True, port_labels=True) """ Explanation: LC Series Circuit In this section we reproduce a simple equivalent model of a capacitor $C$, as illustrated by the figure below: <img src="designer_capacitor_simple.png" width="700"> End of explanation """ # Reference results from ANSYS Designer LCC_designer = rf.Network('designer_capacitor_30_80MHz_adv.s2p') # scikit-rf: usual way, but this time this is more tedious to deal with connection and port number freq = LCC_designer.frequency line = rf.media.DefinedGammaZ0(frequency=freq, z0=50) elements1 = line.resistor(1e-2) ** line.inductor(24e-9) ** line.capacitor(70e-12) elements2 = line.resistor(20e6) T_in = line.tee() T_out = line.tee() ntw = rf.connect(T_in, 1, elements1, 0) ntw = rf.connect(ntw, 2, elements2, 0) ntw = rf.connect(ntw, 1, T_out, 1) ntw = rf.innerconnect(ntw, 1, 2) LCC_manual = ntw ** line.shunt_capacitor(50e-12) # scikit-rf: using Circuit builder freq = LCC_designer.frequency port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50) line = rf.media.DefinedGammaZ0(frequency=freq, z0=50) cap = line.capacitor(70e-12, name='cap') ind = line.inductor(24e-9, name='ind') res_series = line.resistor(1e-2, name='res_series') res_parallel = line.resistor(20e6, name='res_parallel') cap_shunt = line.capacitor(50e-12, name='cap_shunt') ground = rf.Circuit.Ground(frequency=freq, name='ground', z0=50) connections = [ [(port1, 0), (res_series, 0), (res_parallel, 0)], [(res_series, 1), (cap, 0)], [(cap, 1), (ind, 0)], [(ind, 1), (cap_shunt, 0), (res_parallel, 1), (port2, 0)], [(cap_shunt, 1), (ground, 0)], ] circuit = rf.Circuit(connections) LCC_from_circuit = circuit.network # testing the equivalence of the results print(np.allclose(LCC_designer.s, LCC_manual.s)) print(np.allclose(LCC_designer.s, LCC_from_circuit.s)) circuit.plot_graph(network_labels=True, edge_labels=True, port_labels=True) """ Explanation: A More Advanced Equivalent Model In this section we reproduce an equivalent model of a capacitor $C$, as illustrated by the figure below: <img src="designer_capacitor_adv.png" width="800"> End of explanation """ # Reference result calculated from Designer passband_designer = rf.Network('designer_bandpass_filter_450_550MHz.s2p') # scikit-rf: the filter by cascading all lumped-elements freq = passband_designer.frequency passband_manual = line.shunt_capacitor(25.406e-12) ** line.shunt_inductor(4.154e-9) ** \ line.capacitor(2.419e-12) ** line.inductor(43.636e-9) ** \ line.shunt_capacitor(25.406e-12) ** line.shunt_inductor(4.154e-9) # scikit-rf: the filter with the Circuit builder freq = passband_designer.frequency line = rf.media.DefinedGammaZ0(frequency=freq) C1 = line.capacitor(25.406e-12, name='C1') C2 = line.capacitor(2.419e-12, name='C2') C3 = line.capacitor(25.406e-12, name='C3') L1 = line.inductor(4.154e-9, name='L1') L2 = line.inductor(43.636e-9, name='L2') L3 = line.inductor(4.154e-9, name='L3') port1 = rf.Circuit.Port(frequency=freq, name='port1', z0=50) port2 = rf.Circuit.Port(frequency=freq, name='port2', z0=50) ground1 = rf.Circuit.Ground(frequency=freq, name='ground1', z0=50) ground2 = rf.Circuit.Ground(frequency=freq, name='ground2', z0=50) ground3 = rf.Circuit.Ground(frequency=freq, name='ground3', z0=50) ground4 = rf.Circuit.Ground(frequency=freq, name='ground4', z0=50) connections = [ [(port1, 0), (C1, 0), (L1, 0), (C2, 0)], [(C2, 1), (L2, 0)], [(L2, 1), (C3, 0), (L3, 0), (port2, 0)], # grounding must be done on ground ntw having different names [(C1, 1), (ground1, 0)], [(C3, 1), (ground2, 0)], [(L1, 1), (ground3, 0)], [(L3, 1), (ground4, 0)], ] circuit = rf.Circuit(connections) passband_circuit = circuit.network passband_circuit.name = 'Pass-band circuit' passband_circuit.plot_s_db(m=0, n=0, lw=2) passband_circuit.plot_s_db(m=1, n=0, lw=2) passband_designer.plot_s_db(m=0, n=0, lw=2, ls='-.') passband_designer.plot_s_db(m=1, n=0, lw=2, ls='-.') circuit.plot_graph(network_labels=True, port_labels=True, edge_labels=True) """ Explanation: Pass band filter Below we construct a pass-band filter, from an example given in Microwaves101: <img src="designer_bandpass_filter_450_550MHz.png" width="800"> End of explanation """
mas-dse-greina/neon
luna16/old_code/AugmentCandidates.ipynb
apache-2.0
## Create new candidates file import pandas as pd import numpy as np DATA_DIR = "/Volumes/data/tonyr/dicom/LUNA16/" cand_path = 'CSVFILES/candidates_V2.csv' annotations_path = 'CSVFILES/annotations.csv' dfAnnotations = pd.read_csv(DATA_DIR+annotations_path).reset_index() dfAnnotations = dfAnnotations.rename(columns={'index': 'candidate'}) dfCandidates = pd.read_csv(DATA_DIR+cand_path).reset_index() dfCandidates = dfCandidates.rename(columns={'index': 'candidate'}) dfCandidates['diameter_mm'] = np.nan # Set a new column and fill with NaN until we know the true diameter of the candidate dfClass1 = dfCandidates[dfCandidates['class'] == 1].copy(deep=True) # Get only the class 1 (they are the only ones that are labeled) dfCandidates.shape """ Explanation: Combine the candidates.csv and annotations.csv files For some reason, the candidates and annotations files were never merged. The additional information in the annotation file is the nodule size. That could be useful in our models. The sizes are only included for class 1 nodules and not all class 1 nodules have annotations. Also, the annotations are out of order and the candidate centers are slightly different between the annotations and candidates files. I've asked on the LUNA16 mailing list which of the coordinates is more accurate. Still waiting for a response. This script goes through both files and tries to match up the annotation with the correct candidate. It then merges the information and outputs this to a new candidates_plus_annotations.csv file. End of explanation """ seriesuid = dfClass1['seriesuid'].unique() # Get the unique series names (subjects) for seriesNum in seriesuid: # Get the annotations for this candidate candAnnotations = dfAnnotations[dfAnnotations['seriesuid']==seriesNum]['candidate'].values candCandidates = dfClass1[dfClass1['seriesuid'] == seriesNum]['candidate'].values # Now loop through annotations to find closest candidate diameterArray = [] for ia in candAnnotations: # Loop through the annotation indices for this seriesuid annotatePoint = dfAnnotations[dfAnnotations['candidate']==ia][['coordX', 'coordY', 'coordZ']].values closestDist = 10000 for ic in candCandidates: # Loop through the candidate indices for this seriesuid candidatePoint = dfCandidates[dfCandidates['candidate']==ic][['coordX', 'coordY', 'coordZ']].values dist = np.linalg.norm(annotatePoint - candidatePoint) # Find euclidean distance between points if dist < closestDist: # If this distance is closer then update array closest = [ia, ic, dfAnnotations[dfAnnotations['candidate']==ia]['diameter_mm'].values[0], dfAnnotations[dfAnnotations['candidate']==ia]['coordX'].values[0], dfAnnotations[dfAnnotations['candidate']==ia]['coordY'].values[0], dfAnnotations[dfAnnotations['candidate']==ia]['coordZ'].values[0]] closestDist = dist # Update with new closest distance diameterArray.append(closest) # Update dfClass1 to include the annotated size of the nodule (diameter_mm) for row in diameterArray: dfClass1.set_value(row[1], 'diameter_mm', row[2]) dfClass1.set_value(row[1], 'coordX_annotated', row[3]) dfClass1.set_value(row[1], 'coordY_annotated', row[4]) dfClass1.set_value(row[1], 'coordZ_annotated', row[5]) """ Explanation: Append nodule size to candidates Loop through the annotations dataframe and look for the closest points to the ROI centers listed in the candidates file. Then update the candidates dataframe with the nodule size listed in the annotated file. End of explanation """ dfClass1.iloc[:10,:] del dfCandidates['diameter_mm'] dfOut = dfCandidates.join(dfClass1[['candidate', 'diameter_mm', 'coordX_annotated', 'coordY_annotated', 'coordZ_annotated']], on='candidate', rsuffix='_r') del dfOut['candidate_r'] del dfOut['candidate'] dfOut.to_csv('candidates_with_annotations.csv', index=False) """ Explanation: Not all candidates were annotated It looks like none of the class 0 candidates were annotated. 389 of the 1,557 class 1 nodules are also missing annotations. End of explanation """
tongwang01/tensorflow
tensorflow/examples/udacity/3_regularization-TongCopy1.ipynb
apache-2.0
# These are all the modules we'll be using later. Make sure you can import them # before proceeding further. from __future__ import print_function import numpy as np import tensorflow as tf from six.moves import cPickle as pickle import math """ Explanation: Deep Learning Assignment 3 Previously in 2_fullyconnected.ipynb, you trained a logistic regression and a neural network model. The goal of this assignment is to explore regularization techniques. End of explanation """ pickle_file = 'notMNIST.pickle' with open(pickle_file, 'rb') as f: save = pickle.load(f) train_dataset = save['train_dataset'] train_labels = save['train_labels'] valid_dataset = save['valid_dataset'] valid_labels = save['valid_labels'] test_dataset = save['test_dataset'] test_labels = save['test_labels'] del save # hint to help gc free up memory print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) """ Explanation: First reload the data we generated in notmist.ipynb. End of explanation """ image_size = 28 num_labels = 10 def reformat(dataset, labels): dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32) # Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...] labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32) return dataset, labels train_dataset, train_labels = reformat(train_dataset, train_labels) valid_dataset, valid_labels = reformat(valid_dataset, valid_labels) test_dataset, test_labels = reformat(test_dataset, test_labels) print('Training set', train_dataset.shape, train_labels.shape) print('Validation set', valid_dataset.shape, valid_labels.shape) print('Test set', test_dataset.shape, test_labels.shape) def accuracy(predictions, labels): return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0]) """ Explanation: Reformat into a shape that's more adapted to the models we're going to train: - data as a flat matrix, - labels as float 1-hot encodings. End of explanation """ batch_size = 128 beta = 0.001 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. weights = tf.Variable( tf.truncated_normal([image_size * image_size, num_labels])) biases = tf.Variable(tf.zeros([num_labels])) # Training computation. logits = tf.matmul(tf_train_dataset, weights) + biases loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + beta * tf.nn.l2_loss(weights) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits) valid_prediction = tf.nn.softmax( tf.matmul(tf_valid_dataset, weights) + biases) test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases) """ Explanation: Problem 1 Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor t using nn.l2_loss(t). The right amount of regularization should improve your validation / test accuracy. First introduce L2 for the logistic regression model. Build the graph: End of explanation """ num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run( [optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: Run it: End of explanation """ batch_size = 128 relu_units = 1024 beta = 0.001 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. Redefine the variables reflecting NN structure. # First layer w1 = tf.Variable( tf.truncated_normal([image_size * image_size, relu_units])) b1 = tf.Variable(tf.zeros([relu_units])) # Second layer w2 = tf.Variable( tf.truncated_normal([relu_units, num_labels])) b2 = tf.Variable( tf.zeros([num_labels])) # Training computation. logits1 = tf.matmul(tf_train_dataset, w1) + b1 relu1 = tf.nn.relu(logits1) logits2 = tf.matmul(relu1, w2) + b2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + ( beta * tf.nn.l2_loss(w1)) + ( beta * tf.nn.l2_loss(w2)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits2) valid_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2) test_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2) """ Explanation: Introducing a suitable amount of regularization improves validation set and test set accuracy. (Didn't really tune beta; merely tried out a few examples.) Now let's introduce L2 to the 2-layer NN First build the graph: End of explanation """ num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: Now run it: End of explanation """ #Restrict the training data to a few batches batch_size = 128 num_batch = 5 train_dataset_short = train_dataset[0:batch_size * num_batch + 1,] train_labels_short = train_labels[0:batch_size * num_batch + 1,] """ Explanation: 93.1%, the best results we've had so far. Problem 2 Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens? End of explanation """ batch_size = 128 relu_units = 1024 beta = 0 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. Redefine the variables reflecting NN structure. # First layer w1 = tf.Variable( tf.truncated_normal([image_size * image_size, relu_units])) b1 = tf.Variable(tf.zeros([relu_units])) # Second layer w2 = tf.Variable( tf.truncated_normal([relu_units, num_labels])) b2 = tf.Variable( tf.zeros([num_labels])) # Training computation. logits1 = tf.matmul(tf_train_dataset, w1) + b1 relu1 = tf.nn.relu(logits1) logits2 = tf.matmul(relu1, w2) + b2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + ( beta * tf.nn.l2_loss(w1)) + ( beta * tf.nn.l2_loss(w2)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits2) valid_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2) test_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2) num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels_short.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset_short[offset:(offset + batch_size), :] batch_labels = train_labels_short[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: Now re-run the 2-layer NN, set regularization to 0, and see what happens. End of explanation """ batch_size = 128 relu_units = 1024 beta = 0.001 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. Redefine the variables reflecting NN structure. # First layer w1 = tf.Variable( tf.truncated_normal([image_size * image_size, relu_units])) b1 = tf.Variable(tf.zeros([relu_units])) # Second layer w2 = tf.Variable( tf.truncated_normal([relu_units, num_labels])) b2 = tf.Variable( tf.zeros([num_labels])) # Training computation. logits1 = tf.matmul(tf_train_dataset, w1) + b1 relu1 = tf.nn.relu(logits1) logits2 = tf.matmul(relu1, w2) + b2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + ( beta * tf.nn.l2_loss(w1)) + ( beta * tf.nn.l2_loss(w2)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits2) valid_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2) test_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2) num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels_short.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset_short[offset:(offset + batch_size), :] batch_labels = train_labels_short[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: The model quickly overfits the training set (actually 'remembers' the entire training set; shows how powerful nn can be). The model stopped improving after that and performs poorly on validation and test sets. Now, let's try to use L2 regularization for this. End of explanation """ #Introduce dropout to the nn batch_size = 128 relu_units = 1024 beta = 0.001 graph = tf.Graph() with graph.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. Redefine the variables reflecting NN structure. # First layer w1 = tf.Variable( tf.truncated_normal([image_size * image_size, relu_units])) b1 = tf.Variable(tf.zeros([relu_units])) # Second layer w2 = tf.Variable( tf.truncated_normal([relu_units, num_labels])) b2 = tf.Variable( tf.zeros([num_labels])) # Training computation. logits1 = tf.matmul(tf_train_dataset, w1) + b1 relu1 = tf.nn.dropout(tf.nn.relu(logits1), keep_prob = 0.5) logits2 = tf.matmul(relu1, w2) + b2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + ( beta * tf.nn.l2_loss(w1)) + ( beta * tf.nn.l2_loss(w2)) # Optimizer. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits2) valid_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2) test_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2) num_steps = 3001 with tf.Session(graph=graph) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels_short.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset_short[offset:(offset + batch_size), :] batch_labels = train_labels_short[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: Problem 3 Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides nn.dropout() for that, but you have to make sure it's only inserted during training. What happens to our extreme overfitting case? End of explanation """ #Introduce dropout to the nn batch_size = 128 relu_units = 1024 beta = 0.001 graph1 = tf.Graph() with graph1.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. Redefine the variables reflecting NN structure. # First layer w1 = tf.Variable( tf.truncated_normal([image_size * image_size, relu_units])) b1 = tf.Variable(tf.zeros([relu_units])) # Second layer w2 = tf.Variable( tf.truncated_normal([relu_units, num_labels])) b2 = tf.Variable( tf.zeros([num_labels])) # Training computation. logits1 = tf.matmul(tf_train_dataset, w1) + b1 relu1 = tf.nn.dropout(tf.nn.relu(logits1), keep_prob = 0.5) logits2 = tf.matmul(relu1, w2) + b2 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits2, tf_train_labels)) + ( beta * tf.nn.l2_loss(w1)) + ( beta * tf.nn.l2_loss(w2)) # Optimizer. Use learning rate decay. optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax(logits2) valid_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2) test_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2) """ Explanation: Improved somewhat, but not an aweful lot. Why? Problem 4 Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is 97.1%. One avenue you can explore is to add multiple layers. Another one is to use learning rate decay: global_step = tf.Variable(0) # count the number of steps taken. learning_rate = tf.train.exponential_decay(0.5, global_step, ...) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) Train with the full data set and use dropout. See how good we get. End of explanation """ num_steps = 3001 with tf.Session(graph=graph1) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: Train with full dataset End of explanation """ batch_size = 128 relu_units1 = 1024 relu_units2 = 512 #beta = 0 graph2 = tf.Graph() with graph2.as_default(): # Input data. For the training data, we use a placeholder that will be fed # at run time with a training minibatch. tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size)) tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels)) tf_valid_dataset = tf.constant(valid_dataset) tf_test_dataset = tf.constant(test_dataset) # Variables. Redefine the variables reflecting NN structure. # First layer w1 = tf.Variable( tf.truncated_normal([image_size * image_size, relu_units1], stddev=0.01)) b1 = tf.Variable(tf.zeros([relu_units1])) # Second layer w2 = tf.Variable( tf.truncated_normal([relu_units1, relu_units2], stddev=0.01)) b2 = tf.Variable( tf.zeros([relu_units2])) # Third layer w3 = tf.Variable( tf.truncated_normal([relu_units2, num_labels], stddev=0.01)) b3 = tf.Variable( tf.zeros([num_labels])) # Training computation. logits1 = tf.matmul(tf_train_dataset, w1) + b1 relu1 = tf.nn.dropout(tf.nn.relu(logits1), keep_prob = 0.5) logits2 = tf.matmul(relu1, w2) + b2 relu2 = tf.nn.dropout(tf.nn.relu(logits2), keep_prob = 0.5) logits3 = tf.matmul(relu2, w3) + b3 loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits3, tf_train_labels)) # Optimizer. Use learning rate decay. global_step = tf.Variable(0) # count the number of steps taken. learning_rate = tf.train.exponential_decay(learning_rate=0.3, global_step=global_step, decay_steps=1000, decay_rate=0.98) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) # optimizer = tf.train.GradientDescentOptimizer(0.3).minimize(loss) # Predictions for the training, validation, and test data. train_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_train_dataset, w1) + b1), w2) + b2), w3) + b3) valid_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_valid_dataset, w1) + b1), w2) + b2), w3) + b3) test_prediction = tf.nn.softmax( tf.matmul(tf.nn.relu(tf.matmul(tf.nn.relu(tf.matmul(tf_test_dataset, w1) + b1), w2) + b2), w3) + b3) num_steps = 50001 with tf.Session(graph=graph2) as session: tf.initialize_all_variables().run() print("Initialized") for step in range(num_steps): # Pick an offset within the training data, which has been randomized. # Note: we could use better randomization across epochs. offset = (step * batch_size) % (train_labels.shape[0] - batch_size) # Generate a minibatch. batch_data = train_dataset[offset:(offset + batch_size), :] batch_labels = train_labels[offset:(offset + batch_size), :] # Prepare a dictionary telling the session where to feed the minibatch. # The key of the dictionary is the placeholder node of the graph to be fed, # and the value is the numpy array to feed to it. feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels} _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict) if (step % 500 == 0): print("Minibatch loss at step %d: %f" % (step, l)) print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)) print("Validation accuracy: %.1f%%" % accuracy( valid_prediction.eval(), valid_labels)) print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)) """ Explanation: 91.8%: actually worse than without dropout. Increased training to 20k steps, 93.8% test accuracy. Next steps, let's try: * Use a more complex network (add one more layer) * Use learning rate decay Add one more layer into the nn, same activation function; 512 relu units End of explanation """
Lstyle1/Deep_learning_projects
transfer-learning/Transfer_Learning_Solution.ipynb
mit
from urllib.request import urlretrieve from os.path import isfile, isdir from tqdm import tqdm vgg_dir = 'tensorflow_vgg/' # Make sure vgg exists if not isdir(vgg_dir): raise Exception("VGG directory doesn't exist!") class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile(vgg_dir + "vgg16.npy"): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='VGG16 Parameters') as pbar: urlretrieve( 'https://s3.amazonaws.com/content.udacity-data.com/nd101/vgg16.npy', vgg_dir + 'vgg16.npy', pbar.hook) else: print("Parameter file already exists!") """ Explanation: Transfer Learning Most of the time you won't want to train a whole convolutional network yourself. Modern ConvNets training on huge datasets like ImageNet take weeks on multiple GPUs. Instead, most people use a pretrained network either as a fixed feature extractor, or as an initial network to fine tune. In this notebook, you'll be using VGGNet trained on the ImageNet dataset as a feature extractor. Below is a diagram of the VGGNet architecture. <img src="assets/cnnarchitecture.jpg" width=700px> VGGNet is great because it's simple and has great performance, coming in second in the ImageNet competition. The idea here is that we keep all the convolutional layers, but replace the final fully connected layers with our own classifier. This way we can use VGGNet as a feature extractor for our images then easily train a simple classifier on top of that. What we'll do is take the first fully connected layer with 4096 units, including thresholding with ReLUs. We can use those values as a code for each image, then build a classifier on top of those codes. You can read more about transfer learning from the CS231n course notes. Pretrained VGGNet We'll be using a pretrained network from https://github.com/machrisaa/tensorflow-vgg. This is a really nice implementation of VGGNet, quite easy to work with. The network has already been trained and the parameters are available from this link. End of explanation """ import tarfile dataset_folder_path = 'flower_photos' class DLProgress(tqdm): last_block = 0 def hook(self, block_num=1, block_size=1, total_size=None): self.total = total_size self.update((block_num - self.last_block) * block_size) self.last_block = block_num if not isfile('flower_photos.tar.gz'): with DLProgress(unit='B', unit_scale=True, miniters=1, desc='Flowers Dataset') as pbar: urlretrieve( 'http://download.tensorflow.org/example_images/flower_photos.tgz', 'flower_photos.tar.gz', pbar.hook) if not isdir(dataset_folder_path): with tarfile.open('flower_photos.tar.gz') as tar: tar.extractall() tar.close() """ Explanation: Flower power Here we'll be using VGGNet to classify images of flowers. To get the flower dataset, run the cell below. This dataset comes from the TensorFlow inception tutorial. End of explanation """ import os import numpy as np import tensorflow as tf from tensorflow_vgg import vgg16 from tensorflow_vgg import utils data_dir = 'flower_photos/' contents = os.listdir(data_dir) classes = [each for each in contents if os.path.isdir(data_dir + each)] """ Explanation: ConvNet Codes Below, we'll run through all the images in our dataset and get codes for each of them. That is, we'll run the images through the VGGNet convolutional layers and record the values of the first fully connected layer. We can then write these to a file for later when we build our own classifier. Here we're using the vgg16 module from tensorflow_vgg. The network takes images of size $244 \times 224 \times 3$ as input. Then it has 5 sets of convolutional layers. The network implemented here has this structure (copied from the source code: ``` self.conv1_1 = self.conv_layer(bgr, "conv1_1") self.conv1_2 = self.conv_layer(self.conv1_1, "conv1_2") self.pool1 = self.max_pool(self.conv1_2, 'pool1') self.conv2_1 = self.conv_layer(self.pool1, "conv2_1") self.conv2_2 = self.conv_layer(self.conv2_1, "conv2_2") self.pool2 = self.max_pool(self.conv2_2, 'pool2') self.conv3_1 = self.conv_layer(self.pool2, "conv3_1") self.conv3_2 = self.conv_layer(self.conv3_1, "conv3_2") self.conv3_3 = self.conv_layer(self.conv3_2, "conv3_3") self.pool3 = self.max_pool(self.conv3_3, 'pool3') self.conv4_1 = self.conv_layer(self.pool3, "conv4_1") self.conv4_2 = self.conv_layer(self.conv4_1, "conv4_2") self.conv4_3 = self.conv_layer(self.conv4_2, "conv4_3") self.pool4 = self.max_pool(self.conv4_3, 'pool4') self.conv5_1 = self.conv_layer(self.pool4, "conv5_1") self.conv5_2 = self.conv_layer(self.conv5_1, "conv5_2") self.conv5_3 = self.conv_layer(self.conv5_2, "conv5_3") self.pool5 = self.max_pool(self.conv5_3, 'pool5') self.fc6 = self.fc_layer(self.pool5, "fc6") self.relu6 = tf.nn.relu(self.fc6) ``` So what we want are the values of the first fully connected layer, after being ReLUd (self.relu6). To build the network, we use with tf.Session() as sess: vgg = vgg16.Vgg16() input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) with tf.name_scope("content_vgg"): vgg.build(input_) This creates the vgg object, then builds the graph with vgg.build(input_). Then to get the values from the layer, feed_dict = {input_: images} codes = sess.run(vgg.relu6, feed_dict=feed_dict) End of explanation """ # Set the batch size higher if you can fit in in your GPU memory batch_size = 10 codes_list = [] labels = [] batch = [] codes = None with tf.Session() as sess: vgg = vgg16.Vgg16() input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) with tf.name_scope("content_vgg"): vgg.build(input_) for each in classes: print("Starting {} images".format(each)) class_path = data_dir + each files = os.listdir(class_path) for ii, file in enumerate(files, 1): # Add images to the current batch # utils.load_image crops the input images for us, from the center img = utils.load_image(os.path.join(class_path, file)) batch.append(img.reshape((1, 224, 224, 3))) labels.append(each) # Running the batch through the network to get the codes if ii % batch_size == 0 or ii == len(files): images = np.concatenate(batch) feed_dict = {input_: images} codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict) # Here I'm building an array of the codes if codes is None: codes = codes_batch else: codes = np.concatenate((codes, codes_batch)) # Reset to start building the next batch batch = [] print('{} images processed'.format(ii)) # write codes to file with open('codes', 'w') as f: codes.tofile(f) # write labels to file import csv with open('labels', 'w') as f: writer = csv.writer(f, delimiter='\n') writer.writerow(labels) """ Explanation: Below I'm running images through the VGG network in batches. End of explanation """ # read codes and labels from file import csv with open('labels') as f: reader = csv.reader(f, delimiter='\n') labels = np.array([each for each in reader if len(each) > 0]).squeeze() with open('codes') as f: codes = np.fromfile(f, dtype=np.float32) codes = codes.reshape((len(labels), -1)) """ Explanation: Building the Classifier Now that we have codes for all the images, we can build a simple classifier on top of them. The codes behave just like normal input into a simple neural network. Below I'm going to have you do most of the work. End of explanation """ from sklearn.preprocessing import LabelBinarizer lb = LabelBinarizer() lb.fit(labels) labels_vecs = lb.transform(labels) """ Explanation: Data prep As usual, now we need to one-hot encode our labels and create validation/test sets. First up, creating our labels! Exercise: From scikit-learn, use LabelBinarizer to create one-hot encoded vectors from the labels. End of explanation """ from sklearn.model_selection import StratifiedShuffleSplit ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) train_idx, val_idx = next(ss.split(codes, labels)) half_val_len = int(len(val_idx)/2) val_idx, test_idx = val_idx[:half_val_len], val_idx[half_val_len:] train_x, train_y = codes[train_idx], labels_vecs[train_idx] val_x, val_y = codes[val_idx], labels_vecs[val_idx] test_x, test_y = codes[test_idx], labels_vecs[test_idx] print("Train shapes (x, y):", train_x.shape, train_y.shape) print("Validation shapes (x, y):", val_x.shape, val_y.shape) print("Test shapes (x, y):", test_x.shape, test_y.shape) """ Explanation: Now you'll want to create your training, validation, and test sets. An important thing to note here is that our labels and data aren't randomized yet. We'll want to shuffle our data so the validation and test sets contain data from all classes. Otherwise, you could end up with testing sets that are all one class. Typically, you'll also want to make sure that each smaller set has the same the distribution of classes as it is for the whole data set. The easiest way to accomplish both these goals is to use StratifiedShuffleSplit from scikit-learn. You can create the splitter like so: ss = StratifiedShuffleSplit(n_splits=1, test_size=0.2) Then split the data with splitter = ss.split(x, y) ss.split returns a generator of indices. You can pass the indices into the arrays to get the split sets. The fact that it's a generator means you either need to iterate over it, or use next(splitter) to get the indices. Be sure to read the documentation and the user guide. Exercise: Use StratifiedShuffleSplit to split the codes and labels into training, validation, and test sets. End of explanation """ inputs_ = tf.placeholder(tf.float32, shape=[None, codes.shape[1]]) labels_ = tf.placeholder(tf.int64, shape=[None, labels_vecs.shape[1]]) fc = tf.contrib.layers.fully_connected(inputs_, 256) logits = tf.contrib.layers.fully_connected(fc, labels_vecs.shape[1], activation_fn=None) cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=labels_, logits=logits) cost = tf.reduce_mean(cross_entropy) optimizer = tf.train.AdamOptimizer().minimize(cost) predicted = tf.nn.softmax(logits) correct_pred = tf.equal(tf.argmax(predicted, 1), tf.argmax(labels_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) """ Explanation: If you did it right, you should see these sizes for the training sets: Train shapes (x, y): (2936, 4096) (2936, 5) Validation shapes (x, y): (367, 4096) (367, 5) Test shapes (x, y): (367, 4096) (367, 5) Classifier layers Once you have the convolutional codes, you just need to build a classfier from some fully connected layers. You use the codes as the inputs and the image labels as targets. Otherwise the classifier is a typical neural network. Exercise: With the codes and labels loaded, build the classifier. Consider the codes as your inputs, each of them are 4096D vectors. You'll want to use a hidden layer and an output layer as your classifier. Remember that the output layer needs to have one unit for each class and a softmax activation function. Use the cross entropy to calculate the cost. End of explanation """ def get_batches(x, y, n_batches=10): """ Return a generator that yields batches from arrays x and y. """ batch_size = len(x)//n_batches for ii in range(0, n_batches*batch_size, batch_size): # If we're not on the last batch, grab data with size batch_size if ii != (n_batches-1)*batch_size: X, Y = x[ii: ii+batch_size], y[ii: ii+batch_size] # On the last batch, grab the rest of the data else: X, Y = x[ii:], y[ii:] # I love generators yield X, Y """ Explanation: Batches! Here is just a simple way to do batches. I've written it so that it includes all the data. Sometimes you'll throw out some data at the end to make sure you have full batches. Here I just extend the last batch to include the remaining data. End of explanation """ epochs = 10 iteration = 0 saver = tf.train.Saver() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) for e in range(epochs): for x, y in get_batches(train_x, train_y): feed = {inputs_: x, labels_: y} loss, _ = sess.run([cost, optimizer], feed_dict=feed) print("Epoch: {}/{}".format(e+1, epochs), "Iteration: {}".format(iteration), "Training loss: {:.5f}".format(loss)) iteration += 1 if iteration % 5 == 0: feed = {inputs_: val_x, labels_: val_y} val_acc = sess.run(accuracy, feed_dict=feed) print("Epoch: {}/{}".format(e, epochs), "Iteration: {}".format(iteration), "Validation Acc: {:.4f}".format(val_acc)) saver.save(sess, "checkpoints/flowers.ckpt") """ Explanation: Training Here, we'll train the network. Exercise: So far we've been providing the training code for you. Here, I'm going to give you a bit more of a challenge and have you write the code to train the network. Of course, you'll be able to see my solution if you need help. End of explanation """ with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: test_x, labels_: test_y} test_acc = sess.run(accuracy, feed_dict=feed) print("Test accuracy: {:.4f}".format(test_acc)) %matplotlib inline import matplotlib.pyplot as plt from scipy.ndimage import imread """ Explanation: Testing Below you see the test accuracy. You can also see the predictions returned for images. End of explanation """ test_img_path = 'flower_photos/roses/10894627425_ec76bbc757_n.jpg' test_img = imread(test_img_path) plt.imshow(test_img) # Run this cell if you don't have a vgg graph built with tf.Session() as sess: input_ = tf.placeholder(tf.float32, [None, 224, 224, 3]) vgg = vgg16.Vgg16() vgg.build(input_) with tf.Session() as sess: img = utils.load_image(test_img_path) img = img.reshape((1, 224, 224, 3)) feed_dict = {input_: img} code = sess.run(vgg.relu6, feed_dict=feed_dict) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, tf.train.latest_checkpoint('checkpoints')) feed = {inputs_: code} prediction = sess.run(predicted, feed_dict=feed).squeeze() plt.imshow(test_img) plt.barh(np.arange(5), prediction) _ = plt.yticks(np.arange(5), lb.classes_) """ Explanation: Below, feel free to choose images and see how the trained classifier predicts the flowers in them. End of explanation """
poppy-project/community-notebooks
tutorials-education/poppy-torso__vrep_Prototype d'ininitiation à l'informatique pour les lycéens/dialogue/Dialogue TP1.ipynb
lgpl-3.0
import pypot,time from poppy.creatures import PoppyHumanoid messager = PoppyHumanoid(simulator='vrep') time.sleep(1) messager.r_shoulder_x.goto_position(-5,0.5) messager.l_shoulder_x.goto_position(5,0.5) messager.head_z.goto_position(30,1,wait=True) messager.l_shoulder_x.goto_position(90,2) messager.l_arm_z.goto_position(90,2) messager.abs_z.goto_position(10,2) messager.l_elbow_y.goto_position(-120,2,wait=True) for i in range(3): messager.l_elbow_y.goto_position(-90,0.5,wait=True) messager.l_elbow_y.goto_position(-120,0.5,wait=True) messager.l_shoulder_x.goto_position(5,2) messager.l_arm_z.goto_position(0,2) messager.l_elbow_y.goto_position(0,2,wait=True) messager.head_y.goto_position(10,1) messager.head_z.goto_position(0,1,wait=True) time.sleep(1) messager.stop_simulation() pypot.vrep.close_all_connections() """ Explanation: ET – Niveau 1 - Python TP1 Premier Contact Le projet Seti vient de faire un communiqué: ils ont établi le contact! Grâce à la puissance cumulée de milliers d'ordinateurs, ils ont pu retranscrire visuellement une partie du message intercepté depuis l'espace. le voici: éxécutes le code grâce au raccourci 'ctrl + enter' End of explanation """ import time from poppy.creatures import PoppyTorso poppy=PoppyTorso(simulator='vrep') """ Explanation: Ce messager semble amical ; nous devrions essayer de lui repondre. Mais pour communiquer il nous faut un langage commun... <u>essayons:</u> End of explanation """ # oui poppy.head_y.goal_position = 20 time.sleep(1) poppy.head_y.goal_position = -20 time.sleep(1) poppy.head_y.goal_position = 20 time.sleep(1) poppy.head_y.goal_position = -20 time.sleep(1) poppy.head_y.goal_position = 0 """ Explanation: Voilà notre interprète, Poppy. C'est lui qui va nous servir à communiquer, comment me direz vous : grâce au langage des signes! C'est parti! Tu es prêt Poppy? End of explanation """ # non poppy.head_z.goal_position = 40 time.sleep(1) poppy.head_z.goal_position = -40 time.sleep(1) poppy.head_z.goal_position = 40 time.sleep(1) poppy.head_z.goal_position = -40 time.sleep(1) poppy.head_z.goal_position = 0 """ Explanation: oh! tu bouges vraiment comme un robot... pas terrible tout ça ; tu n'es pas d'accord ? End of explanation """ # NON poppy.head_z.goal_position = 90 time.sleep(0.5) poppy.head_z.goal_position = -90 time.sleep(0.5) poppy.head_z.goal_position = 90 time.sleep(0.5) poppy.head_z.goal_position = -90 time.sleep(0.5) poppy.head_z.goal_position = 0 """ Explanation: Vraiment ? End of explanation """ # oui poppy.head_y.goal_position = 40 time.sleep(0.5) poppy.head_y.goal_position = -40 time.sleep(1) poppy.head_y.goal_position = 40 time.sleep(1) poppy.head_y.goal_position = -40 time.sleep(1) poppy.head_y.goal_position = 0 """ Explanation: Doucement, pas besoin de s'énerver... il n'y aurait pas un moyen de controler la vitesse de tes mouvements? End of explanation """ # chutttt poppy.l_arm_z.goto_position(-40,1,wait=True) poppy.l_shoulder_y.goto_position(-40,1,wait=True) poppy.l_elbow_y.goto_position(40,1,wait=True) time.sleep(1) poppy.l_arm_z.goal_position=0 poppy.l_shoulder_y.goal_position=-90 poppy.l_elbow_y.goal_position=20 """ Explanation: Comment ?! End of explanation """ # NON poppy.head_z.goto_position(40, 0.3 ,wait=True) poppy.head_z.goto_position(-40, 0.3 ,wait=True) poppy.head_z.goto_position(40, 0.9 ,wait=True) poppy.head_z.goto_position(-40, .9 ,wait=True) poppy.head_z.goto_position(40, 1 ,wait=True) poppy.head_z.goto_position(-40, 1.5 ,wait=True) poppy.head_z.goto_position(40, 3 ,wait=True) poppy.head_z.goto_position(-40, 5 ,wait=True) poppy.head_z.goal_position = 0 """ Explanation: mais c'est quoi ce "goto" ???! tu m'expliques ? End of explanation """ print len(poppy.motors) """ Explanation: Pas grave, j'ai compris ^^ ! j'aimerais en savoir un peu plus sur toi, tu as beaucoup de moteurs ? End of explanation """ print type(poppy.motors) """ Explanation: hey, mais c'est encore ce poppy.motors? End of explanation """ for m in poppy.motors: print m.name print "----" """ Explanation: C'est quoi une liste ?! End of explanation """ # OUI for i in range(50,150,25): i= i*0.01 poppy.head_y.goto_position(15, i ,wait=True) poppy.head_y.goto_position(-15, i ,wait=True) print i poppy.head_y.goto_position(0,0.1,wait=True) """ Explanation: ah, c'est une sorte de boite qui contient de l'information ? End of explanation """ # essaies ton propre code! """ Explanation: Et si on communiquait... Trouves les commandes adéquates pour que Poppy te salue de la main. End of explanation """ messager.reset_simulation() """ Explanation: Tu as raté? c'est pas grâve, recommmence, essaies ces lignes pour redémarrer : End of explanation """ import pypot poppy.stop_simulation() pypot.vrep.close_all_connections() from poppy.creatures import PoppyTorso poppy=PoppyTorso(simulator='vrep') """ Explanation: Encore buger ? essaies celles-ci : End of explanation """ import pypot poppy.stop_simulation() pypot.vrep.close_all_connections() """ Explanation: Tu as fini? coupes la simulation ici: End of explanation """
tensorflow/docs-l10n
site/en-snapshot/quantum/tutorials/barren_plateaus.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2020 The TensorFlow Authors. End of explanation """ !pip install tensorflow==2.7.0 """ Explanation: Barren plateaus <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/barren_plateaus"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/barren_plateaus.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/barren_plateaus.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/barren_plateaus.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In this example you will explore the result of <a href="https://www.nature.com/articles/s41467-018-07090-4" class="external">McClean, 2019</a> that says not just any quantum neural network structure will do well when it comes to learning. In particular you will see that a certain large family of random quantum circuits do not serve as good quantum neural networks, because they have gradients that vanish almost everywhere. In this example you won't be training any models for a specific learning problem, but instead focusing on the simpler problem of understanding the behaviors of gradients. Setup End of explanation """ !pip install tensorflow-quantum # Update package resources to account for version changes. import importlib, pkg_resources importlib.reload(pkg_resources) """ Explanation: Install TensorFlow Quantum: End of explanation """ import tensorflow as tf import tensorflow_quantum as tfq import cirq import sympy import numpy as np # visualization tools %matplotlib inline import matplotlib.pyplot as plt from cirq.contrib.svg import SVGCircuit np.random.seed(1234) """ Explanation: Now import TensorFlow and the module dependencies: End of explanation """ def generate_random_qnn(qubits, symbol, depth): """Generate random QNN's with the same structure from McClean et al.""" circuit = cirq.Circuit() for qubit in qubits: circuit += cirq.ry(np.pi / 4.0)(qubit) for d in range(depth): # Add a series of single qubit rotations. for i, qubit in enumerate(qubits): random_n = np.random.uniform() random_rot = np.random.uniform( ) * 2.0 * np.pi if i != 0 or d != 0 else symbol if random_n > 2. / 3.: # Add a Z. circuit += cirq.rz(random_rot)(qubit) elif random_n > 1. / 3.: # Add a Y. circuit += cirq.ry(random_rot)(qubit) else: # Add a X. circuit += cirq.rx(random_rot)(qubit) # Add CZ ladder. for src, dest in zip(qubits, qubits[1:]): circuit += cirq.CZ(src, dest) return circuit generate_random_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2) """ Explanation: 1. Summary Random quantum circuits with many blocks that look like this ($R_{P}(\theta)$ is a random Pauli rotation):<br/> <img src="./images/barren_2.png" width=700> Where if $f(x)$ is defined as the expectation value w.r.t. $Z_{a}Z_{b}$ for any qubits $a$ and $b$, then there is a problem that $f'(x)$ has a mean very close to 0 and does not vary much. You will see this below: 2. Generating random circuits The construction from the paper is straightforward to follow. The following implements a simple function that generates a random quantum circuit—sometimes referred to as a quantum neural network (QNN)—with the given depth on a set of qubits: End of explanation """ def process_batch(circuits, symbol, op): """Compute the variance of a batch of expectations w.r.t. op on each circuit that contains `symbol`. Note that this method sets up a new compute graph every time it is called so it isn't as performant as possible.""" # Setup a simple layer to batch compute the expectation gradients. expectation = tfq.layers.Expectation() # Prep the inputs as tensors circuit_tensor = tfq.convert_to_tensor(circuits) values_tensor = tf.convert_to_tensor( np.random.uniform(0, 2 * np.pi, (n_circuits, 1)).astype(np.float32)) # Use TensorFlow GradientTape to track gradients. with tf.GradientTape() as g: g.watch(values_tensor) forward = expectation(circuit_tensor, operators=op, symbol_names=[symbol], symbol_values=values_tensor) # Return variance of gradients across all circuits. grads = g.gradient(forward, values_tensor) grad_var = tf.math.reduce_std(grads, axis=0) return grad_var.numpy()[0] """ Explanation: The authors investigate the gradient of a single parameter $\theta_{1,1}$. Let's follow along by placing a sympy.Symbol in the circuit where $\theta_{1,1}$ would be. Since the authors do not analyze the statistics for any other symbols in the circuit, let's replace them with random values now instead of later. 3. Running the circuits Generate a few of these circuits along with an observable to test the claim that the gradients don't vary much. First, generate a batch of random circuits. Choose a random ZZ observable and batch calculate the gradients and variance using TensorFlow Quantum. 3.1 Batch variance computation Let's write a helper function that computes the variance of the gradient of a given observable over a batch of circuits: End of explanation """ n_qubits = [2 * i for i in range(2, 7) ] # Ranges studied in paper are between 2 and 24. depth = 50 # Ranges studied in paper are between 50 and 500. n_circuits = 200 theta_var = [] for n in n_qubits: # Generate the random circuits and observable for the given n. qubits = cirq.GridQubit.rect(1, n) symbol = sympy.Symbol('theta') circuits = [ generate_random_qnn(qubits, symbol, depth) for _ in range(n_circuits) ] op = cirq.Z(qubits[0]) * cirq.Z(qubits[1]) theta_var.append(process_batch(circuits, symbol, op)) plt.semilogy(n_qubits, theta_var) plt.title('Gradient Variance in QNNs') plt.xlabel('n_qubits') plt.xticks(n_qubits) plt.ylabel('$\\partial \\theta$ variance') plt.show() """ Explanation: 3.1 Set up and run Choose the number of random circuits to generate along with their depth and the amount of qubits they should act on. Then plot the results. End of explanation """ def generate_identity_qnn(qubits, symbol, block_depth, total_depth): """Generate random QNN's with the same structure from Grant et al.""" circuit = cirq.Circuit() # Generate initial block with symbol. prep_and_U = generate_random_qnn(qubits, symbol, block_depth) circuit += prep_and_U # Generate dagger of initial block without symbol. U_dagger = (prep_and_U[1:])**-1 circuit += cirq.resolve_parameters( U_dagger, param_resolver={symbol: np.random.uniform() * 2 * np.pi}) for d in range(total_depth - 1): # Get a random QNN. prep_and_U_circuit = generate_random_qnn( qubits, np.random.uniform() * 2 * np.pi, block_depth) # Remove the state-prep component U_circuit = prep_and_U_circuit[1:] # Add U circuit += U_circuit # Add U^dagger circuit += U_circuit**-1 return circuit generate_identity_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2, 2) """ Explanation: This plot shows that for quantum machine learning problems, you can't simply guess a random QNN ansatz and hope for the best. Some structure must be present in the model circuit in order for gradients to vary to the point where learning can happen. 4. Heuristics An interesting heuristic by <a href="https://arxiv.org/pdf/1903.05076.pdf" class="external">Grant, 2019</a> allows one to start very close to random, but not quite. Using the same circuits as McClean et al., the authors propose a different initialization technique for the classical control parameters to avoid barren plateaus. The initialization technique starts some layers with totally random control parameters—but, in the layers immediately following, choose parameters such that the initial transformation made by the first few layers is undone. The authors call this an identity block. The advantage of this heuristic is that by changing just a single parameter, all other blocks outside of the current block will remain the identity—and the gradient signal comes through much stronger than before. This allows the user to pick and choose which variables and blocks to modify to get a strong gradient signal. This heuristic does not prevent the user from falling in to a barren plateau during the training phase (and restricts a fully simultaneous update), it just guarantees that you can start outside of a plateau. 4.1 New QNN construction Now construct a function to generate identity block QNNs. This implementation is slightly different than the one from the paper. For now, look at the behavior of the gradient of a single parameter so it is consistent with McClean et al, so some simplifications can be made. To generate an identity block and train the model, generally you need $U1(\theta_{1a}) U1(\theta_{1b})^{\dagger}$ and not $U1(\theta_1) U1(\theta_1)^{\dagger}$. Initially $\theta_{1a}$ and $\theta_{1b}$ are the same angles but they are learned independently. Otherwise, you will always get the identity even after training. The choice for the number of identity blocks is empirical. The deeper the block, the smaller the variance in the middle of the block. But at the start and end of the block, the variance of the parameter gradients should be large. End of explanation """ block_depth = 10 total_depth = 5 heuristic_theta_var = [] for n in n_qubits: # Generate the identity block circuits and observable for the given n. qubits = cirq.GridQubit.rect(1, n) symbol = sympy.Symbol('theta') circuits = [ generate_identity_qnn(qubits, symbol, block_depth, total_depth) for _ in range(n_circuits) ] op = cirq.Z(qubits[0]) * cirq.Z(qubits[1]) heuristic_theta_var.append(process_batch(circuits, symbol, op)) plt.semilogy(n_qubits, theta_var) plt.semilogy(n_qubits, heuristic_theta_var) plt.title('Heuristic vs. Random') plt.xlabel('n_qubits') plt.xticks(n_qubits) plt.ylabel('$\\partial \\theta$ variance') plt.show() """ Explanation: 4.2 Comparison Here you can see that the heuristic does help to keep the variance of the gradient from vanishing as quickly: End of explanation """
VectorBlox/PYNQ
Pynq-Z1/notebooks/examples/pmod_grove_adc.ipynb
bsd-3-clause
from pynq import Overlay Overlay("base.bit").download() from pynq.iop import Grove_ADC from pynq.iop import PMODA from pynq.iop import PMOD_GROVE_G4 grove_adc = Grove_ADC(PMODA, PMOD_GROVE_G4) print("{} V".format(round(grove_adc.read(),4))) """ Explanation: Grove ADC Example This example shows how to use the Grove ADC. A Grove I2C ADC (v1.2) and PYNQ Grove Adapter are required. An analog input is also required. In this example, the Grove slide potentiometer was used. In the example, the ADC is initialized, a test read is done, and then the sensor is set to log a reading every 100 milliseconds. The ADC can be connected to any Grove peripheral that provides an analog voltage. 1. Using Pmod to Grove Adapter This example uses the PYNQ Pmod to Grove adapter. The adapter is connected to PMODA, and the grove ADC is connected to group G4 on adapter. 1. Simple ADC read() End of explanation """ grove_adc.set_log_interval_ms(100) grove_adc.start_log() """ Explanation: 2. Starting logging once every 100 milliseconds End of explanation """ log = grove_adc.get_log() """ Explanation: 3. Try to change the input signal during the logging. For example, if using the Grove slide potentiometer, move the slider back and forth (slowly). Stop the logging whenever done trying to change sensor's value. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt plt.plot(range(len(log)), log, 'ro') plt.title('Grove ADC Voltage Log') plt.axis([0, len(log), min(log), max(log)]) plt.show() """ Explanation: 4. Plot values over time The voltage values can be logged and displayed. End of explanation """ from pynq.iop import Grove_ADC from pynq.iop import ARDUINO from pynq.iop import ARDUINO_GROVE_I2C grove_adc = Grove_ADC(ARDUINO, ARDUINO_GROVE_I2C) print("{} V".format(round(grove_adc.read(),4))) """ Explanation: 2. Using Arduino Shield This example uses the PYNQ Arduino shield. The grove ADC can be connected to any of the I2C groups on the shield. 1. Instantiation and read a single value End of explanation """ grove_adc.set_log_interval_ms(100) grove_adc.start_log() """ Explanation: 2. Starting logging once every 100 milliseconds End of explanation """ log = grove_adc.get_log() """ Explanation: 3. Try to change the input signal during the logging. For example, if using the Grove slide potentiometer, move the slider back and forth (slowly). Stop the logging whenever done trying to change sensor's value. End of explanation """ %matplotlib inline import matplotlib.pyplot as plt plt.plot(range(len(log)), log, 'ro') plt.title('Grove ADC Voltage Log') plt.axis([0, len(log), min(log), max(log)]) plt.show() """ Explanation: 4. Plot values over time The voltage values can be logged and displayed. End of explanation """
ljvmiranda921/pyswarms
docs/examples/usecases/electric_circuit_problem.ipynb
mit
# Import modules import sys import numpy as np import matplotlib.pyplot as plt # Import PySwarms import pyswarms as ps print('Running on Python version: {}'.format(sys.version)) """ Explanation: Solving an electric circuit using Particle Swarm Optimization Introduction PSO can be utilized in a wide variety of fields. In this example, the problem consists of analysing a given electric circuit and finding the electric current that flows through it. To accomplish this, the pyswarms library will be used to solve a non-linear equation by restructuring it as an optimization problem. The circuit is composed by a source, a resistor and a diode, as shown below. Mathematical Formulation Kirchhoff's voltage law states that the directed sum of the voltages around any closed loop is zero. In other words, the sum of the voltages of the passive elements must be equal to the sum of the voltages of the active elements, as expressed by the following equation: $U = v_D + v_R $, where $U$ represents the voltage of the source and, $v_D$ and $v_R$ represent the voltage of the diode and the resistor, respectively. To determine the current flowing through the circuit, $v_D$ and $v_R$ need to be defined as functions of $I$. A simplified Shockley equation will be used to formulate the current-voltage characteristic function of the diode. This function relates the current that flows through the diode with the voltage across it. Both $I_s$ and $v_T$ are known properties. $I = I_s e^{\frac{v_D}{v_T}}$ Where: $I$ : diode current $I_s$ : reverse bias saturation current $v_D$ : diode voltage $v_T$ : thermal voltage Which can be formulated over $v_D$: $v_D = v_T \log{\left |\frac{I}{I_s}\right |}$ The voltage over the resistor can be written as a function of the resistor's resistance $R$ and the current $I$: $v_R = R I$ And by replacing these expressions on the Kirschhoff's voltage law equation, the following equation is obtained: $U = v_T \log{\left |\frac{I}{I_s}\right |} + R I$ To find the solution of the problem, the previous equation needs to be solved for $I$, which is the same as finding $I$ such that the cost function $c$ equals zero, as shown below. By doing this, solving for $I$ is restructured as a minimization problem. The absolute value is necessary because we don't want to obtain negative currents. $c = \left | U - v_T \log{\left | \frac{I}{I_s} \right |} - RI \right |$ Known parameter values The voltage of the source is $10 \space V$ and the resistance of the resistor is $100 \space \Omega$. The diode is a silicon diode and it is assumed to be at room temperature. $U = 10 \space V$ $R = 100 \space \Omega$ $I_s = 9.4 \space pA = 9.4 \times 10^{-12} \space A$ (reverse bias saturation current of silicon diodes at room temperature, $T=300 \space K$) $v_T = 25.85 \space mV = 25.85 \times 10^{-3} \space V$ (thermal voltage at room temperature, $T=300 \space K$) Optimization End of explanation """ def cost_function(I): #Fixed parameters U = 10 R = 100 I_s = 9.4e-12 v_t = 25.85e-3 c = abs(U - v_t * np.log(abs(I[:, 0] / I_s)) - R * I[:, 0]) return c """ Explanation: Defining the cost fuction The first argument of the cost function is a numpy.ndarray. Each dimension of this array represents an unknown variable. In this problem, the unknown variable is just $I$, thus the first argument is a unidimensional array. As default, the thermal voltage is assumed to be $25.85 \space mV$. End of explanation """ %%time # Set-up hyperparameters options = {'c1': 0.5, 'c2': 0.3, 'w':0.3} # Call instance of PSO optimizer = ps.single.GlobalBestPSO(n_particles=10, dimensions=1, options=options) # Perform optimization cost, pos = optimizer.optimize(cost_function, iters=30) print(pos[0]) print(cost) """ Explanation: Setting the optimizer To solve this problem, the global-best optimizer is going to be used. End of explanation """ x = np.linspace(0.001, 0.1, 100).reshape(100, 1) y = cost_function(x) plt.plot(x, y) plt.xlabel('Current I [A]') plt.ylabel('Cost'); """ Explanation: Checking the solution The current flowing through the circuit is approximately $0.094 \space A$ which yields a cost of almost zero. The graph below illustrates the relationship between the cost $c$ and the current $I$. As shown, the cost reaches its minimum value of zero when $I$ is somewhere close to $0.09$. The use of reshape(100, 1) is required since np.linspace(0.001, 0.1, 100) returns an array with shape (100,) and first argument of the cost function must be a unidimensional array, that is, an array with shape (100, 1). End of explanation """ # Import non-linear solver from scipy.optimize import fsolve c = lambda I: abs(10 - 25.85e-3 * np.log(abs(I / 9.4e-12)) - 100 * I) initial_guess = 0.09 current_I = fsolve(func=c, x0=initial_guess) print(current_I[0]) """ Explanation: Another way of solving non-linear equations is by using non-linear solvers implemented in libraries such as scipy. There are different solvers that one can choose which correspond to different numerical methods. We are going to use fsolve, which is a general non-linear solver that finds the root of a given function. Unlike pyswarms, the function (in this case, the cost function) to be used in fsolve must have as first argument a single value. Moreover, numerical methods need an initial guess for the solution, which can be made from the graph above. End of explanation """
tensorflow/docs
site/en/tutorials/generative/adversarial_fgsm.ipynb
apache-2.0
#@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Explanation: Copyright 2019 The TensorFlow Authors. Licensed under the Apache License, Version 2.0 (the "License"); End of explanation """ import tensorflow as tf import matplotlib as mpl import matplotlib.pyplot as plt mpl.rcParams['figure.figsize'] = (8, 8) mpl.rcParams['axes.grid'] = False """ Explanation: Adversarial example using FGSM <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/tutorials/generative/adversarial_fgsm"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/adversarial_fgsm.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> This tutorial creates an adversarial example using the Fast Gradient Signed Method (FGSM) attack as described in Explaining and Harnessing Adversarial Examples by Goodfellow et al. This was one of the first and most popular attacks to fool a neural network. What is an adversarial example? Adversarial examples are specialised inputs created with the purpose of confusing a neural network, resulting in the misclassification of a given input. These notorious inputs are indistinguishable to the human eye, but cause the network to fail to identify the contents of the image. There are several types of such attacks, however, here the focus is on the fast gradient sign method attack, which is a white box attack whose goal is to ensure misclassification. A white box attack is where the attacker has complete access to the model being attacked. One of the most famous examples of an adversarial image shown below is taken from the aforementioned paper. Here, starting with the image of a panda, the attacker adds small perturbations (distortions) to the original image, which results in the model labelling this image as a gibbon, with high confidence. The process of adding these perturbations is explained below. Fast gradient sign method The fast gradient sign method works by using the gradients of the neural network to create an adversarial example. For an input image, the method uses the gradients of the loss with respect to the input image to create a new image that maximises the loss. This new image is called the adversarial image. This can be summarised using the following expression: $$adv_x = x + \epsilon*\text{sign}(\nabla_xJ(\theta, x, y))$$ where adv_x : Adversarial image. x : Original input image. y : Original input label. $\epsilon$ : Multiplier to ensure the perturbations are small. $\theta$ : Model parameters. $J$ : Loss. An intriguing property here, is the fact that the gradients are taken with respect to the input image. This is done because the objective is to create an image that maximises the loss. A method to accomplish this is to find how much each pixel in the image contributes to the loss value, and add a perturbation accordingly. This works pretty fast because it is easy to find how each input pixel contributes to the loss by using the chain rule and finding the required gradients. Hence, the gradients are taken with respect to the image. In addition, since the model is no longer being trained (thus the gradient is not taken with respect to the trainable variables, i.e., the model parameters), and so the model parameters remain constant. The only goal is to fool an already trained model. So let's try and fool a pretrained model. In this tutorial, the model is MobileNetV2 model, pretrained on ImageNet. End of explanation """ pretrained_model = tf.keras.applications.MobileNetV2(include_top=True, weights='imagenet') pretrained_model.trainable = False # ImageNet labels decode_predictions = tf.keras.applications.mobilenet_v2.decode_predictions # Helper function to preprocess the image so that it can be inputted in MobileNetV2 def preprocess(image): image = tf.cast(image, tf.float32) image = tf.image.resize(image, (224, 224)) image = tf.keras.applications.mobilenet_v2.preprocess_input(image) image = image[None, ...] return image # Helper function to extract labels from probability vector def get_imagenet_label(probs): return decode_predictions(probs, top=1)[0][0] """ Explanation: Let's load the pretrained MobileNetV2 model and the ImageNet class names. End of explanation """ image_path = tf.keras.utils.get_file('YellowLabradorLooking_new.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/YellowLabradorLooking_new.jpg') image_raw = tf.io.read_file(image_path) image = tf.image.decode_image(image_raw) image = preprocess(image) image_probs = pretrained_model.predict(image) """ Explanation: Original image Let's use a sample image of a Labrador Retriever by Mirko CC-BY-SA 3.0 from Wikimedia Common and create adversarial examples from it. The first step is to preprocess it so that it can be fed as an input to the MobileNetV2 model. End of explanation """ plt.figure() plt.imshow(image[0] * 0.5 + 0.5) # To change [-1, 1] to [0,1] _, image_class, class_confidence = get_imagenet_label(image_probs) plt.title('{} : {:.2f}% Confidence'.format(image_class, class_confidence*100)) plt.show() """ Explanation: Let's have a look at the image. End of explanation """ loss_object = tf.keras.losses.CategoricalCrossentropy() def create_adversarial_pattern(input_image, input_label): with tf.GradientTape() as tape: tape.watch(input_image) prediction = pretrained_model(input_image) loss = loss_object(input_label, prediction) # Get the gradients of the loss w.r.t to the input image. gradient = tape.gradient(loss, input_image) # Get the sign of the gradients to create the perturbation signed_grad = tf.sign(gradient) return signed_grad """ Explanation: Create the adversarial image Implementing fast gradient sign method The first step is to create perturbations which will be used to distort the original image resulting in an adversarial image. As mentioned, for this task, the gradients are taken with respect to the image. End of explanation """ # Get the input label of the image. labrador_retriever_index = 208 label = tf.one_hot(labrador_retriever_index, image_probs.shape[-1]) label = tf.reshape(label, (1, image_probs.shape[-1])) perturbations = create_adversarial_pattern(image, label) plt.imshow(perturbations[0] * 0.5 + 0.5); # To change [-1, 1] to [0,1] """ Explanation: The resulting perturbations can also be visualised. End of explanation """ def display_images(image, description): _, label, confidence = get_imagenet_label(pretrained_model.predict(image)) plt.figure() plt.imshow(image[0]*0.5+0.5) plt.title('{} \n {} : {:.2f}% Confidence'.format(description, label, confidence*100)) plt.show() epsilons = [0, 0.01, 0.1, 0.15] descriptions = [('Epsilon = {:0.3f}'.format(eps) if eps else 'Input') for eps in epsilons] for i, eps in enumerate(epsilons): adv_x = image + eps*perturbations adv_x = tf.clip_by_value(adv_x, -1, 1) display_images(adv_x, descriptions[i]) """ Explanation: Let's try this out for different values of epsilon and observe the resultant image. You'll notice that as the value of epsilon is increased, it becomes easier to fool the network. However, this comes as a trade-off which results in the perturbations becoming more identifiable. End of explanation """
ktakagaki/kt-2015-DSPHandsOn
MedianFilter/.ipynb_checkpoints/Basic Test Error of the Median filter with different wave number-checkpoint.ipynb
gpl-2.0
import numpy as np import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages % matplotlib inline """ Explanation: Basic Test: Error rate with different wave number and window length 5 End of explanation """ def ErrorPlot( wavenumber,windowLength ): data = np.fromfunction( lambda x: np.sin((x-windowLength / 2)/128 * 2 * np.pi * waveNumber), (128 + windowLength / 2, ) ) #creating an array with a sine wave datafiltered = medianFilter(data, windowLength) #calculate the filtered wave with the medianFiltered function data = data[ windowLength / 2 : - windowLength ] # slice the data array to synchronize both waves datafiltered = datafiltered[ : len(data) ] # cut the filtered wave to the same length as the data wave error = ErrorRate(data,datafiltered,windowLength,wavenumber) #calculate the error with the ErrorRate function plt.axis([0, y + 1, 0, 1.2]) plt.xlabel('Wave number', fontsize = 20) plt.ylabel('Error rate', fontsize = 20) plt.scatter(*error) def ErrorRate(data,datafiltered,windowLength, wavenumber): errorrate = data-datafiltered #calculate the difference between the sine wave and the filtered wave error = [] #creating a list and save the error rate with the matching wavenumber in it errorrate = np.abs(errorrate) error.append([wavenumber ,np.mean(errorrate)])# fill the list with the errorrate and corresponding wave number error = zip(*error) #zip the error ([1,1],[2,2],[3,3]) = ([1,2,3],[1,2,3]) return error def medianFilter( data, windowLength ): if (windowLength < len(data)and data.ndim == 1): tempret = np.zeros(len(data)-windowLength+1) # creating an array where the filtered values will be saved in if windowLength % 2 ==0: # check if the window length is odd or even because with even window length we get an unsynchrone filtered wave for c in range(0, len(tempret)): tempret[c] = np.median( data[ c : c + windowLength +1 ] ) # write the values of the median filtered wave in tempret, calculate the median of all values in the window return tempret else: for c in range(0, len(tempret)): tempret[c] = np.median( data[ c : c + windowLength ] ) return tempret else: raise ValueError("windowLength must be smaller than len(data) and data must be a 1D array") """ Explanation: I plot the error of the filtered wave. I use the absulte values of the difference between sine wave and median filtered wave and calculate the mean, to get the error. I use a window length of 5 and different sine wave numbers. Funcitions End of explanation """ fig = plt.figure() for y in range (0,40): ErrorPlot(y,5) """ Explanation: Plot End of explanation """ pp = PdfPages( 'Error of the median filtered sine waves with different wave numbers.pdf') pp.savefig(fig) pp.close() """ Explanation: With a higher wave number the error rate continues to rise. The error rate at wave number 16 and 32 is unexpected. End of explanation """
brandoncgay/deep-learning
first-neural-network/Your_first_neural_network.ipynb
mit
%matplotlib inline %config InlineBackend.figure_format = 'retina' import numpy as np import pandas as pd import matplotlib.pyplot as plt """ Explanation: Your first neural network In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more. End of explanation """ data_path = 'Bike-Sharing-Dataset/hour.csv' rides = pd.read_csv(data_path) rides.head() """ Explanation: Load and prepare the data A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon! End of explanation """ rides[:24*10].plot(x='dteday', y='cnt') """ Explanation: Checking out the data This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above. Below is a plot showing the number of bike riders over the first 10 days or so in the data set. (Some days don't have exactly 24 entries in the data set, so it's not exactly 10 days.) You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model. End of explanation """ dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday'] for each in dummy_fields: dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False) rides = pd.concat([rides, dummies], axis=1) fields_to_drop = ['instant', 'dteday', 'season', 'weathersit', 'weekday', 'atemp', 'mnth', 'workingday', 'hr'] data = rides.drop(fields_to_drop, axis=1) data.head() """ Explanation: Dummy variables Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to get_dummies(). End of explanation """ quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed'] # Store scalings in a dictionary so we can convert back later scaled_features = {} for each in quant_features: mean, std = data[each].mean(), data[each].std() scaled_features[each] = [mean, std] data.loc[:, each] = (data[each] - mean)/std """ Explanation: Scaling target variables To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1. The scaling factors are saved so we can go backwards when we use the network for predictions. End of explanation """ # Save data for approximately the last 21 days test_data = data[-21*24:] # Now remove the test data from the data set data = data[:-21*24] # Separate the data into features and targets target_fields = ['cnt', 'casual', 'registered'] features, targets = data.drop(target_fields, axis=1), data[target_fields] test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields] """ Explanation: Splitting the data into training, testing, and validation sets We'll save the data for the last approximately 21 days to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders. End of explanation """ # Hold out the last 60 days or so of the remaining data as a validation set train_features, train_targets = features[:-60*24], targets[:-60*24] val_features, val_targets = features[-60*24:], targets[-60*24:] """ Explanation: We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set). End of explanation """ class NeuralNetwork(object): def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate): # Set number of nodes in input, hidden and output layers. self.input_nodes = input_nodes self.hidden_nodes = hidden_nodes self.output_nodes = output_nodes # Initialize weights self.weights_input_to_hidden = np.random.normal(0.0, self.input_nodes**-0.5, (self.input_nodes, self.hidden_nodes)) self.weights_hidden_to_output = np.random.normal(0.0, self.hidden_nodes**-0.5, (self.hidden_nodes, self.output_nodes)) self.lr = learning_rate #### TODO: Set self.activation_function to your implemented sigmoid function #### # # Note: in Python, you can define a function with a lambda expression, # as shown below. self.activation_function = lambda x : 0 # Replace 0 with your sigmoid calculation. ### If the lambda code above is not something you're familiar with, # You can uncomment out the following three lines and put your # implementation there instead. # #def sigmoid(x): # return 0 # Replace 0 with your sigmoid calculation here #self.activation_function = sigmoid def train(self, features, targets): ''' Train the network on batch of features and targets. Arguments --------- features: 2D array, each row is one data record, each column is a feature targets: 1D array of target values ''' n_records = features.shape[0] delta_weights_i_h = np.zeros(self.weights_input_to_hidden.shape) delta_weights_h_o = np.zeros(self.weights_hidden_to_output.shape) for X, y in zip(features, targets): #### Implement the forward pass here #### ### Forward pass ### # TODO: Hidden layer - Replace these values with your calculations. hidden_inputs = None # signals into hidden layer hidden_outputs = None # signals from hidden layer # TODO: Output layer - Replace these values with your calculations. final_inputs = None # signals into final output layer final_outputs = None # signals from final output layer #### Implement the backward pass here #### ### Backward pass ### # TODO: Output error - Replace this value with your calculations. error = None # Output layer error is the difference between desired target and actual output. # TODO: Calculate the backpropagated error term (delta) for the output output_error_term = None # TODO: Calculate the hidden layer's contribution to the error hidden_error = None # TODO: Calculate the backpropagated error term (delta) for the hidden layer hidden_error_term = None # Weight step (input to hidden) delta_weights_i_h += None # Weight step (hidden to output) delta_weights_h_o += None # TODO: Update the weights - Replace these values with your calculations. self.weights_hidden_to_output += None # update hidden-to-output weights with gradient descent step self.weights_input_to_hidden += None # update input-to-hidden weights with gradient descent step def run(self, features): ''' Run a forward pass through the network with input features Arguments --------- features: 1D array of feature values ''' #### Implement the forward pass here #### # TODO: Hidden layer - replace these values with the appropriate calculations. hidden_inputs = None # signals into hidden layer hidden_outputs = None # signals from hidden layer # TODO: Output layer - Replace these values with the appropriate calculations. final_inputs = None # signals into final output layer final_outputs = None # signals from final output layer return final_outputs def MSE(y, Y): return np.mean((y-Y)**2) """ Explanation: Time to build the network Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes. <img src="assets/neural_network.png" width=300px> The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called forward propagation. We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called backpropagation. Hint: You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$. Below, you have these tasks: 1. Implement the sigmoid function to use as the activation function. Set self.activation_function in __init__ to your sigmoid function. 2. Implement the forward pass in the train method. 3. Implement the backpropagation algorithm in the train method, including calculating the output error. 4. Implement the forward pass in the run method. End of explanation """ import unittest inputs = np.array([[0.5, -0.2, 0.1]]) targets = np.array([[0.4]]) test_w_i_h = np.array([[0.1, -0.2], [0.4, 0.5], [-0.3, 0.2]]) test_w_h_o = np.array([[0.3], [-0.1]]) class TestMethods(unittest.TestCase): ########## # Unit tests for data loading ########## def test_data_path(self): # Test that file path to dataset has been unaltered self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv') def test_data_loaded(self): # Test that data frame loaded self.assertTrue(isinstance(rides, pd.DataFrame)) ########## # Unit tests for network functionality ########## def test_activation(self): network = NeuralNetwork(3, 2, 1, 0.5) # Test that the activation function is a sigmoid self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5)))) def test_train(self): # Test that weights are updated correctly on training network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() network.train(inputs, targets) self.assertTrue(np.allclose(network.weights_hidden_to_output, np.array([[ 0.37275328], [-0.03172939]]))) self.assertTrue(np.allclose(network.weights_input_to_hidden, np.array([[ 0.10562014, -0.20185996], [0.39775194, 0.50074398], [-0.29887597, 0.19962801]]))) def test_run(self): # Test correctness of run method network = NeuralNetwork(3, 2, 1, 0.5) network.weights_input_to_hidden = test_w_i_h.copy() network.weights_hidden_to_output = test_w_h_o.copy() self.assertTrue(np.allclose(network.run(inputs), 0.09998924)) suite = unittest.TestLoader().loadTestsFromModule(TestMethods()) unittest.TextTestRunner().run(suite) """ Explanation: Unit tests Run these unit tests to check the correctness of your network implementation. This will help you be sure your network was implemented correctly befor you starting trying to train it. These tests must all be successful to pass the project. End of explanation """ import sys ### Set the hyperparameters here ### iterations = 100 learning_rate = 0.1 hidden_nodes = 2 output_nodes = 1 N_i = train_features.shape[1] network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate) losses = {'train':[], 'validation':[]} for ii in range(iterations): # Go through a random batch of 128 records from the training data set batch = np.random.choice(train_features.index, size=128) X, y = train_features.ix[batch].values, train_targets.ix[batch]['cnt'] network.train(X, y) # Printing out the training progress train_loss = MSE(network.run(train_features).T, train_targets['cnt'].values) val_loss = MSE(network.run(val_features).T, val_targets['cnt'].values) sys.stdout.write("\rProgress: {:2.1f}".format(100 * ii/float(iterations)) \ + "% ... Training loss: " + str(train_loss)[:5] \ + " ... Validation loss: " + str(val_loss)[:5]) sys.stdout.flush() losses['train'].append(train_loss) losses['validation'].append(val_loss) plt.plot(losses['train'], label='Training loss') plt.plot(losses['validation'], label='Validation loss') plt.legend() _ = plt.ylim() """ Explanation: Training the network Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops. You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later. Choose the number of iterations This is the number of batches of samples from the training data we'll use to train the network. The more iterations you use, the better the model will fit the data. However, this process can have sharply diminishing returns and can waste computational resources if you use too many iterations. You want to find a number here where the network has a low training loss, and the validation loss is at a minimum. The ideal number of iterations would be a level that stops shortly after the validation loss is no longer decreasing. Choose the learning rate This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. Normally a good choice to start at is 0.1; however, if you effectively divide the learning rate by n_records, try starting out with a learning rate of 1. In either case, if the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge. Choose the number of hidden nodes In a model where all the weights are optimized, the more hidden nodes you have, the more accurate the predictions of the model will be. (A fully optimized model could have weights of zero, after all.) However, the more hidden nodes you have, the harder it will be to optimize the weights of the model, and the more likely it will be that suboptimal weights will lead to overfitting. With overfitting, the model will memorize the training data instead of learning the true pattern, and won't generalize well to unseen data. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose. You'll generally find that the best number of hidden nodes to use ends up being between the number of input and output nodes. End of explanation """ fig, ax = plt.subplots(figsize=(8,4)) mean, std = scaled_features['cnt'] predictions = network.run(test_features).T*std + mean ax.plot(predictions[0], label='Prediction') ax.plot((test_targets['cnt']*std + mean).values, label='Data') ax.set_xlim(right=len(predictions)) ax.legend() dates = pd.to_datetime(rides.ix[test_data.index]['dteday']) dates = dates.apply(lambda d: d.strftime('%b %d')) ax.set_xticks(np.arange(len(dates))[12::24]) _ = ax.set_xticklabels(dates[12::24], rotation=45) """ Explanation: Check out your predictions Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly. End of explanation """
GoogleCloudPlatform/vertex-ai-samples
community-content/pytorch_image_classification_distributed_data_parallel_training_with_vertex_sdk/multi_node_ddp_nccl_vertex_training_with_custom_container.ipynb
apache-2.0
PROJECT_ID = "YOUR PROJECT ID" BUCKET_NAME = "gs://YOUR BUCKET NAME" REGION = "YOUR REGION" SERVICE_ACCOUNT = "YOUR SERVICE ACCOUNT" ! gsutil ls -al $BUCKET_NAME content_name = "pt-img-cls-multi-node-ddp-cust-cont" """ Explanation: PyTorch Image Classification Multi-Node Distributed Data Parallel Training on GPU using Vertex Training with Custom Container <table align="left"> <td> <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/community-content/pytorch_image_classification_distributed_data_parallel_training_with_vertex_sdk/multi_node_ddp_nccl_vertex_training_with_custom_container.ipynb"> <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo"> View on GitHub </a> </td> </table> Setup End of explanation """ hostname = "gcr.io" image_name = content_name tag = "latest" custom_container_image_uri = f"{hostname}/{PROJECT_ID}/{image_name}:{tag}" """ Explanation: Vertex Training using Vertex SDK and Custom Container Built Custom Container End of explanation """ ! pip install -r requirements.txt from google.cloud import aiplatform aiplatform.init( project=PROJECT_ID, staging_bucket=BUCKET_NAME, location=REGION, ) """ Explanation: Initialize Vertex SDK End of explanation """ content_name = content_name + "-gpu" tensorboard = aiplatform.Tensorboard.create( display_name=content_name, ) """ Explanation: Create a Vertex Tensorboard Instance End of explanation """ display_name = content_name gcs_output_uri_prefix = f"{BUCKET_NAME}/{display_name}" replica_count = 1 machine_type = "n1-standard-4" accelerator_count = 4 accelerator_type = "NVIDIA_TESLA_K80" args = [ "--backend", "nccl", "--batch-size", "128", "--epochs", "25", ] custom_container_training_job = aiplatform.CustomContainerTrainingJob( display_name=display_name, container_uri=custom_container_image_uri, ) custom_container_training_job.run( args=args, base_output_dir=gcs_output_uri_prefix, replica_count=replica_count, machine_type=machine_type, accelerator_count=accelerator_count, accelerator_type=accelerator_type, tensorboard=tensorboard.resource_name, service_account=SERVICE_ACCOUNT, ) print(f"Custom Training Job Name: {custom_container_training_job.resource_name}") print(f"GCS Output URI Prefix: {gcs_output_uri_prefix}") """ Explanation: Option: Use a Previously Created Vertex Tensorboard Instance tensorboard_name = "Your Tensorboard Resource Name or Tensorboard ID" tensorboard = aiplatform.Tensorboard(tensorboard_name=tensorboard_name) Run a Vertex SDK CustomContainerTrainingJob End of explanation """ ! gsutil ls $gcs_output_uri_prefix """ Explanation: Training Output Artifact End of explanation """ ! gsutil rm -rf $gcs_output_uri_prefix """ Explanation: Clean Up Artifact End of explanation """
mikekestemont/wuerzb15
Chapter 3 - First steps in sklearn.ipynb
mit
clf = SomeClassifier(arg1='foo', arg2='foo2') clf.fit(X_train, y_train) predictions = clf.predict(X_test) """ Explanation: Chapter 3 - First steps in Sklearn In this chapter, we make our first steps using scikit-learn (commonly abbreviated to sklearn), a marvellous Python library for Machine Learning, which is actively being developed by a large and enthousiastic community. The excellent documentation of the package etc. can be found here. The library that comes with sklearn is excellent for supervised as well as unsupervised procedures. In this chapter, we will have a look at amongst others Principal Components Analysis, as a representative example of unsupervised learning. In the next chapter, we will focus on how sklearn can be used for and Nearest Neighbour Classification (AKA Burrows's Delta), as well as vectorization and feature extraction. Sklearn offers fast and efficient implementations of an impressive series of state-of-the-art algorithms in Machine Learning. Its main strength is that all these methods can used using the same function calls. Take the example of a traditional classifier, which as a supervised algorithm is first trained on some annotated data and then applied to some unseen data. All classifiers in sklearn can be used in the following way (don't execute the following code blocks, they are just mockups): End of explanation """ m = SomeMethod(arg1='foo', arg2='foo2') clf.fit(X) X_bar = clf.transform(X) """ Explanation: This is the standard procedure for supervised algorithms for unsupervised algorithms, such as Principal Components Analysis a similar procedure exists: End of explanation """ m = SomeMethod(arg1='foo', arg2='foo2') X_bar = clf.fit_transform(X) """ Explanation: Here, we instantiate the method and then apply it to the data that we have (X) via the fit() method. After fitting our method on the data, we can use it to transform() our data to some new format. Conveniently, there is also a convenience method fit_transform(): End of explanation """ import pickle titles, authors, words, X = pickle.load(open("dummy.p", "rb")) print(X.shape) """ Explanation: This will probably look a bit abstract for now, so let's delve into Principal Components Analysis as a case study to introduce sklearn's powerful capabilities. Principal Components Analysis The basics Principal Components Analysis or PCA is a commonly used exploratory technique in stylometry. Let us have a look at our Victorian data matrix again: End of explanation """ from sklearn.decomposition import PCA pca = PCA(n_components=2) """ Explanation: As you can see, this data matrix is fairly high-dimensional, since we work with 100 word frequencies: while we can easily plot texts in two, or perhaps three dimensions, it is much more difficult to think about data in 100 dimensions. PCA is one of the techniques for so-called 'dimension reduction' which is commonly used: it will attempt to convert our original 9 by 100 matrix, to a much lower-dimensional matrix, such as a 9 x 2 matrix. The newly created columns are called principal components and the idea is that they offer a summary of the original 9 x 100 matrix that is maximally faithful to the original structure. This 'column reduction' is easily achieved in sklearn: End of explanation """ pca.fit(X) """ Explanation: We first instantiate a PCA object, indicating that we wish to reduce our 9 x 100 matrix to a 9 x 2 matrix, only retaining the 2 most important components. We can fit our method to our data: End of explanation """ X_bar = pca.transform(X) print(X_bar.shape) """ Explanation: After running the PCA algorithm, we can apply the reduction to our original data: End of explanation """ X_bar = pca.fit_transform(X) print(X_bar.shape) """ Explanation: By inspecting the newly created object's shape, we can see that we indeed obtained the desired reduced matrix. In shorthand, we could also have used: End of explanation """ print(X_bar) """ Explanation: This yields the exact same result. Let us now have a look at X_bar: End of explanation """ %matplotlib inline import matplotlib.pyplot as plt import seaborn as sns fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100) """ Explanation: We see that we have obtained a score for each text in both of the two principal components. As is common in stylometry, we will now plot the texts in the newly created space defined by the two principal components. For this, we can again use matplotlib, in tandem with seaborn: End of explanation """ fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') """ Explanation: As you can see, we see a canvas appear with a number of dots. In our case, however, it will be more informative to remove the dots, and annotate them with the titles corresponding with each dot. We therefore first plot the position labels without any dot: End of explanation """ fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') for x, y, title in zip(x1, x2, titles): ax1.text(x, y, title, ha='center', va="center") """ Explanation: And now we add the text labels: End of explanation """ from sklearn.manifold import MDS mds = MDS(n_components=2) X_bar = mds.fit_transform(X) print(X_bar.shape) fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') for x, y, title in zip(x1, x2, titles): ax1.text(x, y, title, ha='center', va="center") """ Explanation: Here, we can see now how the horizontal spread captures the distinction between Austen's novels and the author author (which by convention represents the first components). The second, vertically displayed components, on the other hand, seems more sensitive to the difference between Thackeray and Dickens. PCA is definitely not the only technique which is nowadays used for dimension reduction, and a bunch of other techniques are equally useful for this problem of dimension reduction. One good example is Multi-Dimensional Scaling, which will result in a similarly reduced or condesed matrix. Luckily, because of sklearn's strict interfacing, we only have to change minor details in our code to re-run the procedure for MDS: End of explanation """ pca = PCA(n_components=2) X_bar = pca.fit_transform(X) loadings = pca.components_ print(loadings.shape) """ Explanation: As you can see, a very similar structure arises from this analysis. Adding loadings One particular advantage of such techniques that they do not need to function as black boxes, but they can actually inform us a lot about as to why this specific structure arises. In PCA, this can be achieved by overlaying our plot with the so-called 'loadings' for the original words: these will show us how specific words contribute to the oppositions created in the plots above. The loadings can be retrieved from the PCA object as follows: End of explanation """ loadings = loadings.transpose() print(loadings.shape) """ Explanation: As you can see, the loadings_ property of the pca object returns a 2 x 100 matrix which holds for each of the 2 principal components a score for each word in our vocabulary of 100. These scores will tell us how important the contribution of specific words was, when it comes to the creation of the principal components. To be able to interact more intuitively with these loadings, we now take the transpose the matrix: End of explanation """ sns.set_style('white') # remove gridlines because of dual axes # we repeat the previous bit: fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') for x, y, title in zip(x1, x2, titles): ax1.text(x, y, title, ha='center', va="center") ax2 = ax1.twinx().twiny() l1, l2 = loadings[:,0], loadings[:,1] ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none') # first empty plot! for x, y, l in zip(l1, l2, words): ax2.text(x, y, l ,ha='center', va="center", size=8, color="darkgrey") """ Explanation: Let us now add these word loadings to our plots, and overlay our original graph with the word loadings in a lighter colour. To achieve this we actually need to add a second axis to the plot, because the word loadings have a scale that is different from the text's position. We can do that in a way that is precisely parallel to plotting the title labels: End of explanation """ color_dict = {'Austen':'r', 'Dickens':'b', 'Thackeray': 'y'} print(color_dict) """ Explanation: In this plot, the loadings now reveal why the PCA came up with this sort of clustering: appararently Jane Austen uses a of personal pronouns (her, she), whereas Thackeray is a fervent used of the article the. Some more eye candy Our plot is starting to look nice. One additional feature we could add is some colouring. We could add this colour information on the basis of our author labels, and give the text of every author a distinct colour in the plot. This easy to achieve. We first define a color dictionary, on the basis of the author labels: End of explanation """ sns.set_style('white') # remove gridlines because of dual axes # we repeat the previous bit: fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') for x, y, title, author in zip(x1, x2, titles, authors): ax1.text(x, y, title, ha='center', va="center", color=color_dict[author]) # overlay the loadings: ax2 = ax1.twinx().twiny() l1, l2 = loadings[:,0], loadings[:,1] ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none') # first empty plot! for x, y, l in zip(l1, l2, words): ax2.text(x, y, l ,ha='center', va="center", size=8, color="darkgrey") """ Explanation: We now add the relevant color when looping through the text labels at plotting time End of explanation """ from sklearn.cluster import AgglomerativeClustering """ Explanation: This surely offers a nice reading aid when inspecting such scatterplots! Another possibility is to run a clustering algorithm on top of the samples' position in the PC space. In this case, our analysis is even less 'supervised', and we will be less tempted to interpret specific distances as 'significant', on the basis of the author labels. Additionally, this setup allows us to demonstrate another set of unsupervised routines in sklearn, namely the type of clustering, which we already worked with in the previous chapter on scipy. Let us import some ('agglomerative') clustering functionality from sklearn as follows: End of explanation """ clust = AgglomerativeClustering(linkage='ward', n_clusters=2) clust.fit(X_bar) """ Explanation: Again, we first need to instantiate the clusters, before we can run it on our data. Note that we have the algorithm discern only the two most distinct clusters, using the n_clusters parameter. End of explanation """ cl_labels = clust.labels_ print(cl_labels) """ Explanation: After running the clustering algorithm, we can now access the cluster labels, indicating to which of the two clusters each inidividual text has been assigned: End of explanation """ sns.set_style('white') # remove gridlines because of dual axes clust_colors = ('r', 'b') # we repeat the previous bit: fig, ax1 = sns.plt.subplots() x1, x2 = X_bar[:,0], X_bar[:,1] # select coordinates via numpy indexing! ax1.scatter(x1, x2, 100, edgecolors='none', facecolors='none') for x, y, title, c_idx in zip(x1, x2, titles, cl_labels): ax1.text(x, y, title, ha='center', va="center", color=clust_colors[c_idx]) # overlay the loadings: ax2 = ax1.twinx().twiny() l1, l2 = loadings[:,0], loadings[:,1] ax2.scatter(l1, l2, 100, edgecolors='none', facecolors='none') # first empty plot! for x, y, l in zip(l1, l2, words): ax2.text(x, y, l ,ha='center', va="center", size=8, color="darkgrey") """ Explanation: Notice the underscore at the end of the .label_ property. If you scroll up, you can see that the .loadings_ property of the PCA object has it too. The fact that this property ends with an underscore, is a convention which sklearn uses to indicate that this is a property, which only becomes available after fitting the algorithm. We can now use these labels to obtain a more objective coloring of our samples: End of explanation """
rfinn/LCS
notebooks/GIM2DvsNSA.ipynb
gpl-3.0
import numpy as np from matplotlib import pyplot as plt %matplotlib inline import warnings warnings.filterwarnings('ignore') import sys sys.path.append("/Users/rfinn/Dropbox/pythonCode/") sys.path.append("/anaconda/lib/python2.7/site-packages") sys.path.append("/Users/rfinn/Ureka/variants/common/lib/python2.7/site-packages") from astropy.io import fits #infile='/Users/rfinn/research/LocalClusters/NSAmastertables/LCS_all_size.fits' #s=fits.getdata(infile) #flag=s['matchflag'] %run ~/Dropbox/pythonCode/LCSanalyzeblue.py """ Explanation: Comparing sizes of GIM2D vs NSA Sersic fits In GIM2D table 1 (devauc + exp fit) * Rhlr_1 = 1/2 light radius is r-band in kpc * Re = effective radius of bulge in kpc * Rd = effective radius of disk in kpc Setting up python and reading in data End of explanation """ plt.figure() plt.plot(s.s.SERSIC_TH50[s.gim2dflag]*s.DA[s.gim2dflag],s.s.Rhlr_1[s.gim2dflag],'ko') #plt.plot(s.s.SERSIC_TH50[s.gim2dflag]*s.DA[s.gim2dflag],s.s.Rd[s.gim2dflag],'bo') xl=np.linspace(0,20,2) plt.plot(xl,xl,'r-') plt.xlabel('NSA SERSIC_TH50*DA') plt.ylabel('GIM2D half light radius') """ Explanation: Plotting 1/2 light radius from GIM2D vs NSA End of explanation """ plt.figure() plt.plot(s.s.Rhlr_1[s.gim2dflag],s.s.Rd[s.gim2dflag],'bo',label='Disk') plt.plot(s.s.Rhlr_1[s.gim2dflag],s.s.Re[s.gim2dflag],'ro',label='Bulge') xl=np.linspace(0,20,2) plt.plot(xl,xl,'k--',label='1:1') plt.ylabel('GIM2D half light radius') plt.xlabel('GIM2D galaxy half light radius') plt.legend(numpoints=1,loc='upper left') """ Explanation: Conclusion two measures of radius are comparable, expect for the NSA galaxies with very large radii. I think I cut these out of the sample. If I used GIM2D fits, I could include them! Right now I have: self.sizeflag=(self.s.SERSIC_TH50*self.DA &gt; minsize_kpc) &amp; (self.s.SERSIC_TH50 &lt; 20.) Using GIM2D instead will add an additional 16 galaxies. Comparing GIM2D bulge and disk fits to overall half-light radius End of explanation """ plt.figure() bins=np.arange(0,2.5,.1) plt.hist(s.SIZE_RATIO_gim2d[s.sampleflag],histtype='step',hatch='///',color='r',bins=bins,label='GIM2D') plt.hist(s.SIZE_RATIO_DISK[s.sampleflag],histtype='step',color='b',hatch='o',bins=bins,label='GIM2D Disk Only') plt.hist(s.s.SIZE_RATIO[s.sampleflag],histtype='step',color='k',hatch='\\\\',bins=bins,label='NSA') plt.legend(loc='upper right') plt.xlabel('Normalized 24um Size') """ Explanation: CONCLUSION Both bulge and disk 1/2 light radii are less than 1/2 light radius for entire galaxy. This surprises me. I would think that the disk is more extended and flatter, so the disk should have a larger half-light radius. What am I missing? Comparing Size Ratios (Re(24)/Re(r)) derived from GIM2D vs NSA End of explanation """
metpy/MetPy
v0.8/_downloads/Simple_Sounding.ipynb
bsd-3-clause
import matplotlib.pyplot as plt import numpy as np import pandas as pd import metpy.calc as mpcalc from metpy.cbook import get_test_data from metpy.plots import add_metpy_logo, SkewT from metpy.units import units # Change default to be better for skew-T plt.rcParams['figure.figsize'] = (9, 9) # Upper air data can be obtained using the siphon package, but for this example we will use # some of MetPy's sample data. col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed'] df = pd.read_fwf(get_test_data('jan20_sounding.txt', as_file_obj=False), skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names) df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'], np.deg2rad(df['direction'])) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed', 'u_wind', 'v_wind'), how='all').reset_index(drop=True) """ Explanation: Simple Sounding Use MetPy as straightforward as possible to make a Skew-T LogP plot. End of explanation """ p = df['pressure'].values * units.hPa T = df['temperature'].values * units.degC Td = df['dewpoint'].values * units.degC wind_speed = df['speed'].values * units.knots wind_dir = df['direction'].values * units.degrees u, v = mpcalc.get_wind_components(wind_speed, wind_dir) skew = SkewT() # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot skew.plot(p, T, 'r') skew.plot(p, Td, 'g') skew.plot_barbs(p, u, v) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() skew.ax.set_ylim(1000, 100) # Add the MetPy logo! fig = plt.gcf() add_metpy_logo(fig, 115, 100) # Example of defining your own vertical barb spacing skew = SkewT() # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot skew.plot(p, T, 'r') skew.plot(p, Td, 'g') # Set spacing interval--Every 50 mb from 1000 to 100 mb my_interval = np.arange(100, 1000, 50) * units('mbar') # Get indexes of values closest to defined interval ix = mpcalc.resample_nn_1d(p, my_interval) # Plot only values nearest to defined interval values skew.plot_barbs(p[ix], u[ix], v[ix]) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() skew.ax.set_ylim(1000, 100) # Add the MetPy logo! fig = plt.gcf() add_metpy_logo(fig, 115, 100) # Show the plot plt.show() """ Explanation: We will pull the data out of the example dataset into individual variables and assign units. End of explanation """
peendebak/SPI-rack
examples/S5k_Low_Level.ipynb
mit
from spirack import SPI_rack, S5k_module, version import numpy as np from scipy import signal import matplotlib.pyplot as plt %matplotlib notebook #assert version.__version__ >= '0.1.4', 'spirack version needs to be >= 0.1.4' print("SPI-rack Code Version: " + version.__version__) """ Explanation: S5k example/demo notebook Demo/example of the S5k module summing fast and slow ramps with individual amplitude control. End of explanation """ spi = SPI_rack("COM4", 1000000, 1) spi.unlock() """ Explanation: Open SPI rack connection and unlock (necessary after bootup of the controller module). End of explanation """ spi.get_battery() s5k = S5k_module(spi, 1) s5k.set_clock_source('internal') s5k.set_clock_division(1, 4) s5k.run_module(False) s5k.run_module(True) s5k.sync_clock() for DAC in range(1,9): s5k.set_clock_division(DAC, 4) for DAC in range(9, 17): s5k.set_clock_division(DAC, 400) """ Explanation: Create new S5k module object at correct address and set clock source to internal clock. The clock can be divided by all even numbers between 2-510. We'll set DAC 1-8 at 50 MHz and DAC 9-16 at 500 KHz. This allows us to play the same waveform on both, with a factor 100 time difference. All these settings are base on a 200 MHz internal oscillator. End of explanation """ for DAC in range(1, 9): s5k.set_waveform_mode(DAC, 'AWG') s5k.set_digital_gain(DAC, 0.45) for DAC in range(1, 9): s5k.set_digital_gain(DAC, 1) for DAC in range(9, 17): s5k.set_digital_gain(DAC, 0) """ Explanation: Set all the DACs to AWG mode. This allows us to write to the internal 4096k samples RAM. End of explanation """ wv_len = 4000 max_val = 2047 width = 0.5 t = np.linspace(0, 1, 4000) sawtooth = signal.square(2*np.pi*t, width) * max_val sawtooth = sawtooth.astype(int) plt.figure() plt.plot(sawtooth) plt.title('Sawtooth RAM data') plt.xlabel('Samples') plt.ylabel('RAM values') plt.show() """ Explanation: The ramp in both the slow and fast DAC's will be the same: 4000 samples long. To create the sawtooth we use the sawtooth function from the scipy signal library. The width argument allows us to define the width of the ramp as a fraction of the total waveform width: creates a ramp down. End of explanation """ s5k.upload_waveform(1, sawtooth, 0, set_pattern_length = True) s5k.upload_waveform(5, sawtooth, 0, set_pattern_length = True) s5k.upload_waveform(9, sawtooth, 0, set_pattern_length = True) s5k.upload_waveform(13, sawtooth, 0, set_pattern_length = True) for DAC in range(1,17): s5k.set_RAM_address(DAC, 0, len(sawtooth)) """ Explanation: We now have to upload the waveform to all DAC's. It only needs to be uploaded once to each DAC chip (each chip contains for DACs with shared memory). We will then simply point all the DAC's in the chip to use the same block of RAM. End of explanation """ s5k.set_pattern_length_trigger(len(sawtooth)-1) """ Explanation: We also have to set the length of the trigger period. It runs on the slowest clock used in the system, in this case at 500kHz. The period length is equal to the slow sawtooth End of explanation """ s5k.run_module(False) fast_period = 1/50e6 slow_period = 1/500e3 delay_necessary = 15*slow_period delay_cycles = round(delay_necessary/fast_period) delay_cycles = int(delay_cycles) s5k.write_AD9106(s5k.DAreg.PATTERN_DLY, delay_cycles-1, 3) s5k.write_AD9106(s5k.DAreg.PATTERN_DLY, delay_cycles-1, 1) s5k.run_module(True) """ Explanation: One issue we now run into is the trigger delay. Each chip has a delay of 15 clock cycles from trigger in, to start outputting. This is especially noticable in this case where half is running at 500 kHz and the other half at 50 MHz. To compensate for this (to get them to start at the same time), we delay the start of the fast running DACs. The delay is 15 clock cycles at 500 kHz. This gives a delay of 30 us. As the fast DACs are running at 50 MHz, we need to delay by 1500 clock cycles. We write this (minus 1) to the necessary DAC chips. End of explanation """ s5k.run_module(True) """ Explanation: Now we can start the module, either by running from software or giving a gate on the front of the module. End of explanation """ for DAC in range(1, 9): s5k.set_digital_gain(DAC, 1) for DAC in range(9, 17): s5k.set_digital_gain(DAC, 0.0) s5k.set_digital_gain(4, -0.1) #s5k.set_digital_gain(12, -0.7) """ Explanation: Set the gain of the slow ramp to 0.5x, and of the fast ramp to 0.1x. Gain can go to 2x, but both channels can max out the swing of the output at a gain of 1x. End of explanation """