markdown
stringlengths 0
37k
| code
stringlengths 1
33.3k
| path
stringlengths 8
215
| repo_name
stringlengths 6
77
| license
stringclasses 15
values |
|---|---|---|---|---|
4.2. Selected Population
4.2.1. Remove Excluded Population, Remove Unused Features
<i>Nothing to do!<i/>
<br/>
<font style="font-weight:bold;color:red">Notes: </font>
- Ideally the features must be configured before generating the CSV feature file, as it is very inefficient to derive new features at this stage
- This step is not necessary, if all the features are generated in prior to the generatiion of the CSV feature file
|
# Exclusion of unused features
# excluded = [name for name in features_input.columns if name not in features_names_group.keys()]
# features_input = features_input.drop(excluded, axis=1)
# print("Number of columns: ", len(features_input.columns), "; Total records: ", len(features_input.index))
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<br/><br/>
5. Set Samples & Target Features
5.1. Set Features
5.1.1. Train & Test Samples
Set the samples
|
frac_train = 0.50
replace = False
random_state = 100
nrows = len(features_input.index)
features = {"train": dict(), "test": dict()}
features["train"] = features_input.sample(frac=frac_train, replace=False, random_state=100)
features["test"] = features_input.drop(features["train"].index)
features["train"] = features["train"].reset_index(drop=True)
features["test"] = features["test"].reset_index(drop=True)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:red">Clean-Up</font>
|
features_input = None
gc.collect()
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
5.1.2. Independent & Target variable¶
Set independent, target & ID features
|
target_labels = list(features_types_group["TARGET"])
target_id = ["patientID"]
features["train_indep"] = dict()
features["train_target"] = dict()
features["train_id"] = dict()
features["test_indep"] = dict()
features["test_target"] = dict()
features["test_id"] = dict()
# Independent and target features
def set_features_indep_target(df):
df_targets = pd.DataFrame(dict(zip(target_labels, [[]] * len(target_labels))))
for i in range(len(target_labels)):
df_targets[target_labels[i]] = df[target_labels[i]]
df_indep = df.drop(target_labels + target_id, axis=1)
df_id = pd.DataFrame({target_id[0]: df[target_id[0]]})
return df_indep, df_targets, df_id
# train & test sets
features["train_indep"], features["train_target"], features["train_id"] = set_features_indep_target(features["train"])
features["test_indep"], features["test_target"], features["test_id"] = set_features_indep_target(features["test"])
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Verify features visually
|
display(pd.concat([features["train_id"].head(), features["train_target"].head(), features["train_indep"].head()], axis=1))
display(pd.concat([features["test_id"].head(), features["test_target"].head(), features["test_indep"].head()], axis=1))
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:red">Clean-Up</font>
|
del features["train"]
del features["test"]
gc.collect()
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
5.5. Save Samples
Serialise & save the samples before any feature transformation.
<br/>This snapshot of the samples may be used for the population profiling
|
file_name = "Step_05_Features"
readers_writers.save_serialised_compressed(path=CONSTANTS.io_path, title=file_name, objects=features)
# print
print("Number of columns: ", len(features["train_indep"].columns),
"features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
5.2. Remove - Near Zero Variance
In order to reduce sparseness and invalid features, highly stationary ones were withdrawn. The features that had constant counts less than or equal a threshold were ltered out, to exclude highly constants and near-zero variances.
The near zero variance rules are presented in below:
- Frequency ratio: The frequency of the most prevalent value over the second most frequent value to be greater than a threshold;
- Percent of unique values: The number of unique values divided by the total number of samples to be greater than the threshold
<font style="font-weight:bold;color:red">Configure:</font> the function
- The cutoff for the percentage of distinct values out of the number of total samples (upper limit). e.g. 10 * 100 / 100
<br/>	 → thresh_unique_cut
- The cutoff for the ratio of the most common value to the second most common value (lower limit). eg. 95/5
<br/>	 → thresh_freq_cut
|
thresh_unique_cut = 100
thresh_freq_cut = 1000
excludes = []
file_name = "Step_05_Preprocess_NZV_config"
features["train_indep"], o_summaries = preprocess.near_zero_var_df(df=features["train_indep"],
excludes=excludes,
file_name=file_name,
thresh_unique_cut=thresh_unique_cut,
thresh_freq_cut=thresh_freq_cut,
to_search=True)
file_name = "Step_05_Preprocess_NZV"
readers_writers.save_text(path=CONSTANTS.io_path, title=file_name, data=o_summaries, append=False, ext="log")
file_name = "Step_05_Preprocess_NZV_config"
features["test_indep"], o_summaries = preprocess.near_zero_var_df(df=features["test_indep"],
excludes=excludes,
file_name=file_name,
thresh_unique_cut=thresh_unique_cut,
thresh_freq_cut=thresh_freq_cut,
to_search=False)
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
5.3. Remove Highly Linearly Correlated
In this step, features that were highly linearly correlated were excluded.
<font style="font-weight:bold;color:red">Configure:</font> the function
- A numeric value for the pair-wise absolute correlation cutoff. e.g. 0.95
<br/>	 → thresh_corr_cut
|
thresh_corr_cut = 0.95
excludes = list(features_types_group["CATEGORICAL"])
file_name = "Step_05_Preprocess_Corr_config"
features["train_indep"], o_summaries = preprocess.high_linear_correlation_df(df=features["train_indep"],
excludes=excludes,
file_name=file_name,
thresh_corr_cut=thresh_corr_cut,
to_search=True)
file_name = "Step_05_Preprocess_Corr"
readers_writers.save_text(path=CONSTANTS.io_path, title=file_name, data=o_summaries, append=False, ext="log")
file_name = "Step_05_Preprocess_Corr_config"
features["test_indep"], o_summaries = preprocess.high_linear_correlation_df(df=features["test_indep"],
excludes=excludes,
file_name=file_name,
thresh_corr_cut=thresh_corr_cut,
to_search=False)
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
5.4. Descriptive Statistics
Produce a descriptive stat report of 'Categorical', 'Continuous', & 'TARGET' features
|
# columns
file_name = "Step_05_Data_ColumnNames_Train"
readers_writers.save_csv(path=CONSTANTS.io_path, title=file_name,
data=list(features["train_indep"].columns.values), append=False)
# Sample - Train
file_name = "Step_05_Stats_Categorical_Train"
o_stats = preprocess.stats_discrete_df(df=features["train_indep"], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_05_Stats_Continuous_Train"
o_stats = preprocess.stats_continuous_df(df=features["train_indep"], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
# Sample - Test
file_name = "Step_05_Stats_Categorical_Test"
o_stats = preprocess.stats_discrete_df(df=features["test_indep"], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_05_Stats_Continuous_Test"
o_stats = preprocess.stats_continuous_df(df=features["test_indep"], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<br/><br/>
6. Recategorise & Transform
Verify features visually
|
display(pd.concat([features["train_id"].head(), features["train_target"].head(), features["train_indep"].head()], axis=1))
display(pd.concat([features["test_id"].head(), features["test_target"].head(), features["test_indep"].head()], axis=1))
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
6.1. Recategorise
Define the factorisation function to generate dummy features for the categorical features.
|
def factorise_settings(max_categories_frac, min_categories_num, exclude_zero):
categories_dic = dict()
labels_dic = dict()
dtypes_dic = dict()
dummies = []
for f_name in features_types_group["CATEGORICAL"]:
if f_name in features["train_indep"]:
# find top & valid states
summaries = stats.itemfreq(features["train_indep"][f_name])
summaries = pd.DataFrame({"value": summaries[:, 0], "freq": summaries[:, 1]})
summaries["value"] = list(map(int, summaries["value"]))
summaries = summaries.sort_values("freq", ascending=False)
summaries = list(summaries["value"])
# exclude zero state
if exclude_zero is True and len(summaries) > 1:
summaries = [s for s in summaries if s != 0]
# if included in the states
summaries = [v for v in summaries if v in set(features_states_values[f_name])]
# limit number of states
max_cnt = max(int(len(summaries) * max_categories_frac), min_categories_num)
# set states
categories_dic[f_name] = summaries[0:max_cnt]
labels_dic[f_name] = [f_name + "_" + str(c) for c in categories_dic[f_name]]
dtypes_dic = {**dtypes_dic,
**dict(zip(labels_dic[f_name], [pd.Series(dtype='i') for _ in range(len(categories_dic[f_name]))]))}
dummies += labels_dic[f_name]
dtypes_dic = pd.DataFrame(dtypes_dic).dtypes
# print
print("Total Categorical Variables : ", len(categories_dic.keys()),
"; Total Number of Dummy Variables: ", sum([len(categories_dic[f_name]) for f_name in categories_dic.keys()]))
return categories_dic, labels_dic, dtypes_dic, features_types
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Select categories: by order of freq., max_categories_frac, & max_categories_num
<br/><font style="font-weight:bold;color:red">Configure:</font> The input arguments are:
- Specify the maximum number of categories a feature can have
<br/>	 → max_categories_frac
- Specify the minimum number of categories a feature can have
<br/>	 → min_categories_num
- Specify to exclude the state '0' (zero). State zero in our features represents 'any other state', including NULL
<br/>	 → exclude_zero = False
|
max_categories_frac = 0.90
min_categories_num = 1
exclude_zero = False # if possible remove state zero
categories_dic, labels_dic, dtypes_dic, features_types_group["DUMMIES"] = \
factorise_settings(max_categories_frac, min_categories_num, exclude_zero)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Manually add dummy variables to the dataframe & remove the original Categorical variables
|
features["train_indep_temp"] = preprocess.factoring_feature_wise(features["train_indep"], categories_dic, labels_dic, dtypes_dic, threaded=False)
features["test_indep_temp"] = preprocess.factoring_feature_wise(features["test_indep"], categories_dic, labels_dic, dtypes_dic, threaded=False)
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Verify features visually
|
display(pd.concat([features["train_id"].head(), features["train_target"].head(), features["train_indep_temp"].head()], axis=1))
display(pd.concat([features["test_id"].head(), features["test_target"].head(), features["test_indep_temp"].head()], axis=1))
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Set
|
features["train_indep"] = features["train_indep_temp"].copy(True)
features["test_indep"] = features["test_indep_temp"].copy(True)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:red">Clean-Up</font>
|
del features["train_indep_temp"]
del features["test_indep_temp"]
gc.collect()
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
6.2. Remove - Near Zero Variance
Optional: Remove more features with near zero variance, after the factorisation step.
<font style="font-weight:bold;color:red">Configure:</font> the function
|
# the cutoff for the percentage of distinct values out of the number of total samples (upper limit). e.g. 10 * 100 / 100
thresh_unique_cut = 100
# the cutoff for the ratio of the most common value to the second most common value (lower limit). eg. 95/5
thresh_freq_cut = 1000
excludes = []
file_name = "Step_06_Preprocess_NZV_config"
features["train_indep"], o_summaries = preprocess.near_zero_var_df(df=features["train_indep"],
excludes=excludes,
file_name=file_name,
thresh_unique_cut=thresh_unique_cut,
thresh_freq_cut=thresh_freq_cut,
to_search=True)
file_name = "Step_06_Preprocess_NZV"
readers_writers.save_text(path=CONSTANTS.io_path, title=file_name, data=o_summaries, append=False, ext="log")
file_name = "Step_06_Preprocess_NZV_config"
features["test_indep"], o_summaries = preprocess.near_zero_var_df(df=features["test_indep"],
excludes=excludes,
file_name=file_name,
thresh_unique_cut=thresh_unique_cut,
thresh_freq_cut=thresh_freq_cut,
to_search=False)
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
6.3. Remove Highly Linearly Correlated
Optional: Remove more features with highly linearly correlated, after the factorisation step.
<font style="font-weight:bold;color:red">Configure:</font> the function
|
# A numeric value for the pair-wise absolute correlation cutoff. e.g. 0.95
thresh_corr_cut = 0.95
excludes = []
file_name = "Step_06_Preprocess_Corr_config"
features["train_indep"], o_summaries = preprocess.high_linear_correlation_df(df=features["train_indep"],
excludes=excludes,
file_name=file_name,
thresh_corr_cut=thresh_corr_cut,
to_search=True)
file_name = "Step_06_Preprocess_Corr"
readers_writers.save_text(path=CONSTANTS.io_path, title=file_name, data=o_summaries, append=False, ext="log")
file_name = "Step_06_Preprocess_Corr_config"
features["test_indep"], o_summaries = preprocess.high_linear_correlation_df(df=features["test_indep"],
excludes=excludes,
file_name=file_name,
thresh_corr_cut=thresh_corr_cut,
to_search=False)
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
6.4. Descriptive Statsistics
Produce a descriptive stat report of 'Categorical', 'Continuous', & 'TARGET' features
|
# columns
file_name = "Step_06_4_Data_ColumnNames_Train"
readers_writers.save_csv(path=CONSTANTS.io_path, title=file_name,
data=list(features["train_indep"].columns.values), append=False)
# Sample - Train
file_name = "Step_06_4_Stats_Categorical_Train"
o_stats = preprocess.stats_discrete_df(df=features["train_indep"], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_06_4_Stats_Continuous_Train"
o_stats = preprocess.stats_continuous_df(df=features["train_indep"], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
# Sample - Test
file_name = "Step_06_4_Stats_Categorical_Test"
o_stats = preprocess.stats_discrete_df(df=features["test_indep"], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_06_4_Stats_Continuous_Test"
o_stats = preprocess.stats_continuous_df(df=features["test_indep"], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
6.5. Transformations
Verify features visually
|
display(pd.concat([features["train_id"].head(), features["train_target"].head(), features["train_indep"].head()], axis=1))
display(pd.concat([features["test_id"].head(), features["test_target"].head(), features["test_indep"].head()], axis=1))
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:blue">Tranformation:</font> scale
<font style="font-weight:bold;color:brown">Note:</font>: It is highly resource intensive
|
transform_type = "scale"
kwargs = {"with_mean": True}
method_args = dict()
excludes = list(features_types_group["CATEGORICAL"]) + list(features_types_group["DUMMIES"])
features["train_indep"], method_args = preprocess.transform_df(df=features["train_indep"], excludes=excludes,
transform_type=transform_type, threaded=False,
method_args=method_args, **kwargs)
features["test_indep"], _ = preprocess.transform_df(df=features["test_indep"], excludes=excludes,
transform_type=transform_type, threaded=False,
method_args=method_args, **kwargs)
# print("Metod arguments:", method_args)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:blue">Tranformation:</font> Yeo-Johnson
<font style="font-weight:bold;color:brown">Note:</font>: It is highly resource intensive
|
transform_type = "yeo_johnson"
kwargs = {"lmbda": -0.5, "derivative": 0, "epsilon": np.finfo(np.float).eps, "inverse": False}
method_args = dict()
excludes = list(features_types_group["CATEGORICAL"]) + list(features_types_group["DUMMIES"])
features["train_indep"], method_args = preprocess.transform_df(df=features["train_indep"], excludes=excludes,
transform_type=transform_type, threaded=False,
method_args=method_args, **kwargs)
features["test_indep"], _ = preprocess.transform_df(df=features["test_indep"], excludes=excludes,
transform_type=transform_type, threaded=False,
method_args=method_args, **kwargs)
# print("Metod arguments:", method_args)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
6.6. Summary Statistics
Produce a descriptive stat report of 'Categorical', 'Continuous', & 'TARGET' features
|
# Statsistics report for 'Categorical', 'Continuous', & 'TARGET' variables
# columns
file_name = "Step_06_6_Data_ColumnNames_Train"
readers_writers.save_csv(path=CONSTANTS.io_path, title=file_name,
data=list(features["train_indep"].columns.values), append=False)
# Sample - Train
file_name = "Step_06_6_Stats_Categorical_Train"
o_stats = preprocess.stats_discrete_df(df=features["train_indep"], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_06_6_Stats_Continuous_Train"
o_stats = preprocess.stats_continuous_df(df=features["train_indep"], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
# Sample - Test
file_name = "Step_06_6_Stats_Categorical_Test"
o_stats = preprocess.stats_discrete_df(df=features["test_indep"], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_06_6_Stats_Continuous_Test"
o_stats = preprocess.stats_continuous_df(df=features["test_indep"], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<br/><br/>
7. Rank & Select Features
<font style="font-weight:bold;color:red">Configure:</font> the general settings
|
# select the target variable
target_feature = "label365" # "label30", "label365"
# number of trials
num_trials = 1
model_rank = dict()
o_summaries_df = dict()
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
7.1. Define
<font style="font-weight:bold;color:blue">Ranking Method:</font> Random forest classifier (Brieman)
<br/>Define a set of classifiers with different settings, to be used in feature ranking trials.
|
def rank_random_forest_brieman(features_indep_arg, features_target_arg, num_trials):
num_settings = 3
o_summaries_df = [pd.DataFrame({'Name': list(features_indep_arg.columns.values)}) for _ in range(num_trials * num_settings)]
model_rank = [None] * (num_trials * num_settings)
# trials
for i in range(num_trials):
print("Trial: " + str(i))
# setting-1
s_i = i
model_rank[s_i] = feature_selection.rank_random_forest_breiman(
features_indep_arg.values, features_target_arg.values,
**{"n_estimators": 10, "criterion": 'gini', "max_depth": None, "min_samples_split": 2, "min_samples_leaf": 1,
"min_weight_fraction_leaf": 0.0, "max_features": 'auto', "max_leaf_nodes": None, "bootstrap": True,
"oob_score": False, "n_jobs": -1, "random_state": None, "verbose": 0, "warm_start": False, "class_weight": None})
# setting-2
s_i = num_trials + i
model_rank[s_i] = feature_selection.rank_random_forest_breiman(
features_indep_arg.values, features_target_arg.values,
**{"n_estimators": 10, "criterion": 'gini', "max_depth": None, "min_samples_split": 50, "min_samples_leaf": 25,
"min_weight_fraction_leaf": 0.0, "max_features": 'auto', "max_leaf_nodes": None, "bootstrap": True,
"oob_score": False, "n_jobs": -1, "random_state": None, "verbose": 0, "warm_start": False, "class_weight": None})
# setting-3
s_i = (num_trials * 2) + i
model_rank[s_i] = feature_selection.rank_random_forest_breiman(
features_indep_arg.values, features_target_arg.values,
**{"n_estimators": 10, "criterion": 'gini', "max_depth": None, "min_samples_split": 40, "min_samples_leaf": 20,
"min_weight_fraction_leaf": 0.0, "max_features": 'auto', "max_leaf_nodes": None, "bootstrap": True,
"oob_score": False, "n_jobs": -1, "random_state": None, "verbose": 0, "warm_start": True, "class_weight": None})
for i in range((num_trials * num_settings)):
o_summaries_df[i]['Importance'] = list(model_rank[i].feature_importances_)
o_summaries_df[i] = o_summaries_df[i].sort_values(['Importance'], ascending = [0])
o_summaries_df[i] = o_summaries_df[i].reset_index(drop = True)
o_summaries_df[i]['Order'] = range(1, len(o_summaries_df[i]['Importance']) + 1)
return model_rank, o_summaries_df
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:blue">Ranking Method:</font> Gradient Boosted Regression Trees (GBRT)
<br/>Define a set of classifiers with different settings, to be used in feature ranking trials.
|
def rank_gbrt(features_indep_arg, features_target_arg, num_trials):
num_settings = 3
o_summaries_df = [pd.DataFrame({'Name': list(features_indep_arg.columns.values)}) for _ in range(num_trials * num_settings)]
model_rank = [None] * (num_trials * num_settings)
# trials
for i in range(num_trials):
print("Trial: " + str(i))
# setting-1
s_i = i
model_rank[s_i] = feature_selection.rank_tree_gbrt(
features_indep_arg.values, features_target_arg.values,
**{"loss": 'ls', "learning_rate": 0.1, "n_estimators": 100, "subsample": 1.0, "min_samples_split": 2, "min_samples_leaf": 1,
"min_weight_fraction_leaf": 0.0, "max_depth": 10, "init": None, "random_state": None, "max_features": None, "alpha": 0.9,
"verbose": 0, "max_leaf_nodes": None, "warm_start": False, "presort": True})
# setting-2
s_i = num_trials + i
model_rank[s_i] = feature_selection.rank_tree_gbrt(
features_indep_arg.values, features_target_arg.values,
**{"loss": 'ls', "learning_rate": 0.1, "n_estimators": 100, "subsample": 1.0, "min_samples_split": 2, "min_samples_leaf": 1,
"min_weight_fraction_leaf": 0.0, "max_depth": 5, "init": None, "random_state": None, "max_features": None, "alpha": 0.9,
"verbose": 0, "max_leaf_nodes": None, "warm_start": False, "presort": True})
# setting-3
s_i = (num_trials * 2) + i
model_rank[s_i] = feature_selection.rank_tree_gbrt(
features_indep_arg.values, features_target_arg.values,
**{"loss": 'ls', "learning_rate": 0.1, "n_estimators": 100, "subsample": 1.0, "min_samples_split": 2, "min_samples_leaf": 1,
"min_weight_fraction_leaf": 0.0, "max_depth": 3, "init": None, "random_state": None, "max_features": None, "alpha": 0.9,
"verbose": 0, "max_leaf_nodes": None, "warm_start": False, "presort": True})
for i in range((num_trials * num_settings)):
o_summaries_df[i]['Importance'] = list(model_rank[i].feature_importances_)
o_summaries_df[i] = o_summaries_df[i].sort_values(['Importance'], ascending = [0])
o_summaries_df[i] = o_summaries_df[i].reset_index(drop = True)
o_summaries_df[i]['Order'] = range(1, len(o_summaries_df[i]['Importance']) + 1)
return model_rank, o_summaries_df
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:blue">Ranking Method:</font> Randomized Logistic Regression
<br/>Define a set of classifiers with different settings, to be used in feature ranking trials.
|
def rank_randLogit(features_indep_arg, features_target_arg, num_trials):
num_settings = 3
o_summaries_df = [pd.DataFrame({'Name': list(features_indep_arg.columns.values)}) for _ in range(num_trials * num_settings)]
model_rank = [None] * (num_trials * num_settings)
# trials
for i in range(num_trials):
print("Trial: " + str(i))
# setting-1
s_i = i
model_rank[s_i] = feature_selection.rank_random_logistic_regression(
features_indep_arg.values, features_target_arg.values,
**{"C": 1, "scaling": 0.5, "sample_fraction": 0.75, "n_resampling": 200, "selection_threshold": 0.25, "tol": 0.001,
"fit_intercept": True, "verbose": False, "normalize": True, "random_state": None, "n_jobs": 1, "pre_dispatch": '3*n_jobs'})
# setting-2
s_i = num_trials + i
model_rank[s_i] = feature_selection.rank_random_logistic_regression(
features_indep_arg.values, features_target_arg.values,
**{"C": 1, "scaling": 0.5, "sample_fraction": 0.50, "n_resampling": 200, "selection_threshold": 0.25, "tol": 0.001,
"fit_intercept": True, "verbose": False, "normalize": True, "random_state": None, "n_jobs": 1, "pre_dispatch": '3*n_jobs'})
# setting-3
s_i = (num_trials * 2) + i
model_rank[s_i] = feature_selection.rank_random_logistic_regression(
features_indep_arg.values, features_target_arg.values,
**{"C": 1, "scaling": 0.5, "sample_fraction": 0.90, "n_resampling": 200, "selection_threshold": 0.25, "tol": 0.001,
"fit_intercept": True, "verbose": False, "normalize": True, "random_state": None, "n_jobs": 1, "pre_dispatch": '3*n_jobs'})
for i in range((num_trials * num_settings)):
o_summaries_df[i]['Importance'] = list(model_rank[i].scores_)
o_summaries_df[i] = o_summaries_df[i].sort_values(['Importance'], ascending = [0])
o_summaries_df[i] = o_summaries_df[i].reset_index(drop = True)
o_summaries_df[i]['Order'] = range(1, len(o_summaries_df[i]['Importance']) + 1)
return model_rank, o_summaries_df
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
7.2. Run
Run one or more feature ranking methods and trials
<font style="font-weight:bold;color:blue">Ranking Method:</font> Random forest classifier (Brieman)
<font style="font-weight:bold;color:brown">Note:</font>: It is moderately resource intensive
|
rank_model = "rfc"
model_rank[rank_model] = dict()
o_summaries_df[rank_model] = dict()
model_rank[rank_model], o_summaries_df[rank_model] = rank_random_forest_brieman(
features["train_indep"], features["train_target"][target_feature], num_trials)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:blue">Ranking Method:</font> Gradient Boosted Regression Trees (GBRT)
<font style="font-weight:bold;color:brown">Note:</font>: It is moderately resource intensive
|
rank_model = "gbrt"
model_rank[rank_model] = dict()
o_summaries_df[rank_model] = dict()
model_rank[rank_model], o_summaries_df[rank_model] = rank_gbrt(
features["train_indep"], features["train_target"][target_feature], num_trials)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:blue">Ranking Method</font>: Randomized Logistic Regression
<font style="font-weight:bold;color:brown">Note:</font>: It is moderately resource intensive
|
rank_model = "randLogit"
model_rank[rank_model] = dict()
o_summaries_df[rank_model] = dict()
model_rank[rank_model], o_summaries_df[rank_model] = rank_randLogit(
features["train_indep"], features["train_target"][target_feature], num_trials)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
7.3. Summaries
|
# combine scores
def rank_summarise (features_arg, o_summaries_df_arg):
summaries_temp = {'Order_avg': [], 'Order_max': [], 'Order_min': [], 'Importance_avg': []}
summary_order = []
summary_importance = []
for f_name in list(features_arg.columns.values):
for i in range(len(o_summaries_df_arg)):
summary_order.append(o_summaries_df_arg[i][o_summaries_df_arg[i]['Name'] == f_name]['Order'].values)
summary_importance.append(o_summaries_df_arg[i][o_summaries_df_arg[i]['Name'] == f_name]['Importance'].values)
summaries_temp['Order_avg'].append(statistics.mean(np.concatenate(summary_order)))
summaries_temp['Order_max'].append(max(np.concatenate(summary_order)))
summaries_temp['Order_min'].append(min(np.concatenate(summary_order)))
summaries_temp['Importance_avg'].append(statistics.mean(np.concatenate(summary_importance)))
summaries_df = pd.DataFrame({'Name': list(features_arg.columns.values)})
summaries_df['Order_avg'] = summaries_temp['Order_avg']
summaries_df['Order_max'] = summaries_temp['Order_max']
summaries_df['Order_min'] = summaries_temp['Order_min']
summaries_df['Importance_avg'] = summaries_temp['Importance_avg']
summaries_df = summaries_df.sort_values(['Order_avg'], ascending = [1])
return summaries_df
# combine scores
summaries_df = dict()
for rank_model in o_summaries_df.keys():
summaries_df[rank_model] = dict()
summaries_df[rank_model] = rank_summarise(features["train_indep"], o_summaries_df[rank_model])
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Save
|
for rank_model in model_rank.keys():
file_name = "Step_07_Model_Train_model_rank_" + rank_model
readers_writers.save_serialised_compressed(path=CONSTANTS.io_path, title=file_name, objects=model_rank[rank_model])
file_name = "Step_07_Model_Train_model_rank_summaries_" + rank_model
readers_writers.save_serialised_compressed(path=CONSTANTS.io_path, title=file_name, objects=o_summaries_df[rank_model])
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
7.4. Select Top Features
<font style="font-weight:bold;color:red">Configure:</font> the selection method
|
rank_model = "rfc"
file_name = "Step_07_Top_Features_" + rank_model
rank_top_features_max = 400
rank_top_features_score_min = 0.1 * (10 ^ -20)
# sort features
features_names_selected = summaries_df[rank_model]['Name'][summaries_df[rank_model]['Order_avg'] >= rank_top_features_score_min]
features_names_selected = (features_names_selected[0:rank_top_features_max]).tolist()
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Save
|
# save to CSV
readers_writers.save_csv(path=CONSTANTS.io_path, title=file_name, data=features_names_selected, append=False, header=False)
# print
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
print("List of sorted features, which can be modified:\n " + CONSTANTS.io_path + file_name + "csv")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:red">Configure</font>: the selected feature manually if it isnecessary!
|
file_name = "Step_07_Top_Features_rfc_adhoc"
features_names_selected = readers_writers.load_csv(path=CONSTANTS.io_path, title=file_name, dataframing=False)[0]
features_names_selected = [f.replace("\n", "") for f in features_names_selected]
display(pd.DataFrame(features_names_selected))
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Verify the top features visually
|
# print
print("Number of columns: ", len(features["train_indep"].columns),
";\nNumber of top columns: ", len(features["train_indep"][features_names_selected].columns))
print("features: {train: ", len(features["train_indep"][features_names_selected]), ", test: ", len(features["test_indep"][features_names_selected]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
7.5. Summary Statistics
Produce a descriptive stat report of 'Categorical', 'Continuous', & 'TARGET' features
|
# columns
file_name = "Step_07_Data_ColumnNames_Train"
readers_writers.save_csv(path=CONSTANTS.io_path, title=file_name,
data=list(features["train_indep"][features_names_selected].columns.values), append=False)
# Sample - Train
file_name = "Step_07_Stats_Categorical_Train"
o_stats = preprocess.stats_discrete_df(df=features["train_indep"][features_names_selected], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_07_Stats_Continuous_Train"
o_stats = preprocess.stats_continuous_df(df=features["train_indep"][features_names_selected], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
# Sample - Test
file_name = "Step_07_Stats_Categorical_Test"
o_stats = preprocess.stats_discrete_df(df=features["test_indep"][features_names_selected], includes=features_types_group["CATEGORICAL"],
file_name=file_name)
file_name = "Step_07_Stats_Continuous_Test"
o_stats = preprocess.stats_continuous_df(df=features["test_indep"][features_names_selected], includes=features_types_group["CONTINUOUS"],
file_name=file_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
7.6. Save Features
|
file_name = "Step_07_Features"
readers_writers.save_serialised_compressed(path=CONSTANTS.io_path, title=file_name, objects=features)
# print
print("File size: ", os.stat(os.path.join(CONSTANTS.io_path, file_name + ".bz2")).st_size)
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<br/><br/>
<br/><br/>
8. Model
<font style="font-weight:bold;color:orange">Load a Saved Samples and Features Ranking:</font>
<br/> It is an optional step. The step loads the serialised & compressed outputs of Step-7.
|
# open fetures
file_name = "Step_07_Features"
features = readers_writers.load_serialised_compressed(path=CONSTANTS.io_path, title=file_name)
# print
print("File size: ", os.stat(os.path.join(CONSTANTS.io_path, file_name + ".bz2")).st_size)
print("Number of columns: ", len(features["train_indep"].columns))
print("features: {train: ", len(features["train_indep"]), ", test: ", len(features["test_indep"]), "}")
# open scoring model files
rank_models = ["rfc", "gbrt", "randLogit"]
model_rank = dict()
o_summaries_df = dict()
for rank_model in rank_models:
file_name = "Step_07_Model_Train_model_rank_" + rank_model
if not readers_writers.exists_serialised(path=CONSTANTS.io_path, title=file_name, ext="bz2"):
continue
file_name = "Step_07_Model_Train_model_rank_" + rank_model
model_rank[rank_model] = readers_writers.load_serialised_compressed(path=CONSTANTS.io_path, title=file_name)
file_name = "Step_07_Model_Train_model_rank_summaries_" + rank_model
o_summaries_df[rank_model] = readers_writers.load_serialised_compressed(path=CONSTANTS.io_path, title=file_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<br/><br/>
8.1. Initialise
8.1.1. Algorithms
<font style="font-weight:bold;color:red">Configure:</font> the trianing algorithm
<font style="font-weight:bold;color:brown">Algorithm 1</font>: Random Forest
|
method_name = "rfc"
kwargs = {"n_estimators": 20, "criterion": 'gini', "max_depth": None, "min_samples_split": 100,
"min_samples_leaf": 50, "min_weight_fraction_leaf": 0.0, "max_features": 'auto',
"max_leaf_nodes": None, "bootstrap": True, "oob_score": False, "n_jobs": -1, "random_state": None,
"verbose": 0, "warm_start": False, "class_weight": "balanced_subsample"}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 2</font>: Logistic Regression
|
method_name = "lr"
kwargs = {"penalty": 'l1', "dual": False, "tol": 0.0001, "C": 1, "fit_intercept": True, "intercept_scaling": 1,
"class_weight": None, "random_state": None, "solver": 'liblinear', "max_iter": 100, "multi_class": 'ovr',
"verbose": 0, "warm_start": False, "n_jobs": -1}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 3</font>: Logistic Cross-Validation
|
method_name = "lr_cv"
kwargs = {"Cs": 10, "fit_intercept": True, "cv": None, "dual": False, "penalty": 'l2', "scoring": None,
"solver": 'lbfgs', "tol": 0.0001, "max_iter": 10, "class_weight": None, "n_jobs": -1, "verbose": 0,
"refit": True, "intercept_scaling": 1.0, "multi_class": "ovr", "random_state": None}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 4</font>: Neural Network
|
method_name = "nn"
kwargs = {"solver": 'lbfgs', "alpha": 1e-5, "hidden_layer_sizes": (5, 2), "random_state": 1}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 5</font>: k-Nearest Neighbourhood
|
method_name = "knc"
kwargs = {"n_neighbors": 5, "weights": 'distance', "algorithm": 'auto', "leaf_size": 30,
"p": 2, "metric": 'minkowski', "metric_params": None, "n_jobs": -1}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 6</font>: Decision Tree
|
method_name = "dtc"
kwargs = {"criterion": 'gini', "splitter": 'best', "max_depth": None, "min_samples_split": 30,
"min_samples_leaf": 30, "min_weight_fraction_leaf": 0.0, "max_features": None,
"random_state": None, "max_leaf_nodes": None, "class_weight": None, "presort": False}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 7</font>: Gradient Boosting Classifier
|
method_name = "gbc"
kwargs = {"loss": 'deviance', "learning_rate": 0.1, "n_estimators": 100, "subsample": 1.0, "min_samples_split": 30,
"min_samples_leaf": 30, "min_weight_fraction_leaf": 0.0, "max_depth": 3, "init": None, "random_state": None,
"max_features": None, "verbose": 0, "max_leaf_nodes": None, "warm_start": False, "presort": 'auto'}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<font style="font-weight:bold;color:brown">Algorithm 8</font>: Naive Bayes<br/>
Note: features must be positive
|
method_name = "nb"
training_method = TrainingMethod(method_name)
kwargs = {"alpha": 1.0, "fit_prior": True, "class_prior": None}
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
<br/><br/>
8.1.2. Other Settings
<font style="font-weight:bold;color:red">Configure:</font> other modelling settings
|
# select the target variable
target_feature = "label365" # "label30" , "label365"
# file name
file_name = "Step_09_Model_" + method_name + "_" + target_feature
# initialise
training_method = TrainingMethod(method_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
8.1.3. Features
|
sample_train = features["train_indep"][features_names_selected] # features["train_indep"][features_names_selected], features["train_indep"]
sample_train_target = features["train_target"][target_feature] # features["train_target"][target_feature]
sample_test = features["test_indep"][features_names_selected] # features["test_indep"][features_names_selected], features["test_indep"]
sample_test_target = features["test_target"][target_feature] # features["test_target"][target_feature]
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
8.3. Fit
Fit the model, using the train sample
|
o_summaries = dict()
# Fit
model = training_method.train(sample_train, sample_train_target, **kwargs)
training_method.save_model(path=CONSTANTS.io_path, title=file_name)
# load model
# training_method.load(path=CONSTANTS.io_path, title=file_name)
# short summary
o_summaries = training_method.train_summaries()
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Predict & report performance, using the train sample
|
o_summaries = dict()
# predict
model = training_method.predict(sample_train, "train")
# short summary
o_summaries = training_method.predict_summaries(pd.Series(sample_train_target), "train")
# Print the main performance statistics
for k in o_summaries.keys():
print(k, o_summaries[k])
# Print the by risk-bands of a selection of statistics
o_summaries = training_method.predict_summaries_risk_bands(pd.Series(sample_train_target), "train", np.arange(0, 1.05, 0.05))
display(o_summaries)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
8.4. Predict
Predict & report performance, using the test sample
|
o_summaries = dict()
# predict
model = training_method.predict(sample_test, "test")
# short summary
o_summaries = training_method.predict_summaries(pd.Series(sample_test_target), "test")
# Print the main performance statistics
for k in o_summaries.keys():
print(k, o_summaries[k])
# Print the by risk-bands of a selection of statistics
o_summaries = training_method.predict_summaries_risk_bands(pd.Series(sample_test_target), "test", np.arange(0, 1.05, 0.05))
display(o_summaries)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
8.5. Cross-Validation
Perform k-fold cross-validation
|
o_summaries = dict()
score = training_method.cross_validate(sample_test, sample_test_target, scoring="neg_mean_squared_error", cv=10)
# short summary
o_summaries = training_method.cross_validate_summaries()
print("Scores: ", o_summaries)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
8.6. Save
Save the training model.
|
training_method.save_model(path=CONSTANTS.io_path, title=file_name)
|
TCARER_Basic.ipynb
|
mesgarpour/T-CARER
|
apache-2.0
|
Calcolo matrice adiacenza
Calcolo il raggio medio che definisce Roma entro il raccordo anulare
NB: da verificare che distanza euclidea non crei troppi problemi
|
colosseo = (41.890173, 12.492331)
raccordo = [(41.914456, 12.615807),(41.990672, 12.502714),(41.793883, 12.511297),(41.812566, 12.396628),(41.956277, 12.384611)]
raggi = []
def geodesicDistance(A, B=colosseo):
return geopy.distance.vincenty(A, B).meters
raggioTerra = 6372795
def euclidDistance(A, B=colosseo):
latitudine1 = math.radians(A[0])
latitudine2 = math.radians(B[0])
longitudine1 = math.radians(A[1])
longitudine2 = math.radians(B[1])
x1 = raggioTerra*math.sin(math.pi-latitudine1)*math.cos(longitudine1)
y1 = raggioTerra*math.sin(math.pi-latitudine1)*math.sin(longitudine1)
z1 = raggioTerra*math.cos(math.pi-latitudine1)
x2 = raggioTerra*math.sin(math.pi-latitudine2)*math.cos(longitudine2)
y2 = raggioTerra*math.sin(math.pi-latitudine2)*math.sin(longitudine2)
z2 = raggioTerra*math.cos(math.pi-latitudine2)
return math.sqrt((x1-x2)**2+(y1-y2)**2+(z1-z2)**2)
raggi = map(geodesicDistance, raccordo)
print raggi
raggi1= []
raggi1 = map(euclidDistance, raccordo)
print raggi1
raggiomedioGeo = 0
raggiomedioEuclid = 0
for i in raggi:
raggiomedioGeo += i
for i in raggi1:
raggiomedioEuclid += i
raggiomedioGeo /= len(raggi)
raggiomedioEuclid /= len(raggi1)
print raggiomedioGeo
print raggiomedioEuclid
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
Popolo il dataframe e faccio una prima grossa scrematura
|
dataframe = pandas.read_csv("/home/protoss/Documenti/Siscomp_datas/data/cell_towers.csv")
#dataframe = pandas.read_csv("/home/protoss/Documenti/SistemiComplessi/data/cell_towers_diff-2016012100.csv")
#dataframe
criterioMCC = dataframe.mcc == 222
criterioMinsamples = dataframe.samples > 1
italydoitcleaner = dataframe[criterioMCC & criterioMinsamples]
italydoitcleaner
italydoitcleaner = italydoitcleaner.reset_index(drop=True)
italydoitcleaner.drop(italydoitcleaner.columns[[1, 3, 5, 10, 11, 12, 13]], axis = 1, inplace=True)
#italydoitcleaner
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
Seleziono le antenne in Roma e faccio dei .csv appositi
|
#istruzione che fa selezione alcune righe con criteri su alcune colonne,
#ne seleziona alcune e restituisce un array nompy di valori desiderati
coordinate = dataframe[criterioMCC & criterioMinsamples][['lat', 'lon']].values
%time distanza = numpy.array(map(geodesicDistance, coordinate), dtype = int)
raggiomedioGeo = 12000
italydoitcleaner['distance'] = distanza
criterioRaccordo = italydoitcleaner.distance < raggiomedioGeo
romaCell = italydoitcleaner[criterioRaccordo]
romaCell = romaCell.reset_index(drop=True)
romaCell.to_csv("../../data/Roma_towers.csv", index= False)
criterioTim = romaCell.net == 1
criterioWind = romaCell.net == 88
criterioVoda = romaCell.net == 10
criterioTre = romaCell.net == 99
timCell = romaCell[criterioTim]
timCell = timCell.reset_index(drop=True)
timCell.to_csv("../../data/Tim_towers.csv", index= False)
windCell = romaCell[criterioWind]
windCell = windCell.reset_index(drop=True)
windCell.to_csv("../../data/Wind_towers.csv", index= False)
vodaCell = romaCell[criterioVoda]
vodaCell = vodaCell.reset_index(drop=True)
vodaCell.to_csv("../../data/Vodafone_towers.csv", index= False)
treCell = romaCell[criterioTre]
treCell = treCell.reset_index(True)
treCell.to_csv("../../data/Tre_towers.csv", index= False)
#istruzione che seleziona alcune righe con criteri su alcune colonne,
#e restituisce un array numpy di valori desiderati
coordinate = dataframe[criterioMCC & criterioMinsamples][['lat', 'lon']].values
%time distanza = numpy.array(map(euclidDistance, coordinate), dtype=int)
raggiomedioEuclid = 12000
italydoitcleaner['distance'] = distanza
criterioRaccordo = italydoitcleaner.distance < raggiomedioEuclid
romaCell = italydoitcleaner[criterioRaccordo]
romaCell = romaCell.reset_index(drop=True)
romaCell.to_csv("../../data/Roma_towersEuc.csv", index= False)
criterioTim = romaCell.net == 1
criterioWind = romaCell.net == 88
criterioVoda = romaCell.net == 10
criterioTre = romaCell.net == 99
timCell = romaCell[criterioTim]
timCell = timCell.reset_index(drop=True)
timCell.to_csv("../../data/Tim_towersEuc.csv", index= False)
windCell = romaCell[criterioWind]
windCell = windCell.reset_index(drop=True)
windCell.to_csv("../../data/Wind_towersEuc.csv", index= False)
vodaCell = romaCell[criterioVoda]
vodaCell = vodaCell.reset_index(drop=True)
vodaCell.to_csv("../../data/Vodafone_towersEuc.csv", index= False)
treCell = romaCell[criterioTre]
treCell = treCell.reset_index(True)
treCell.to_csv("../../data/Tre_towersEuc.csv", index= False)
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
Prendo le antenne di Roma e faccio matrice adiacenza
|
#definisco la funzione che mi calcola la matrice di adiacenza
def matriceSupEuclid(datiCoordinate, datiRaggi):
a = numpy.zeros((numdati,numdati), dtype=int)
for i in xrange(numdati):
for j in xrange(numdati-i-1):
sommaraggi = datiRaggi[i] + datiRaggi[j+i+1]
#è equivalente a un if
a[i,j+i+1] = a[j+i+1,i] = (euclidDistance(datiCoordinate[i], datiCoordinate[j+i+1]) <= 0.8*sommaraggi)
return a
#attenzione: molto lenta!
def matriceSupGeodetic(datiCoordinate, datiRaggi):
a = numpy.zeros((numdati,numdati))
for i in xrange(numdati):
for j in xrange(numdati-i-1):
if geodesicDistance(datiCoordinate[i], datiCoordinate[j+i+1]) <= datiRaggi[i] + datiRaggi[j+i+1]:
a[i,j+i+1] = 1
a[j+i+1,i] = 1
return a
gestore = ["Roma", "Tim", "Vodafone", "Wind", "Tre"]
for aziende in gestore:
dataframe = pandas.read_csv("../../data/{0}_towers.csv".format(aziende))
coordinate = dataframe[['lat', 'lon']].values
raggio = dataframe['range'].values
# for che mette a tutti i raggi sotto i 500 metri, il valore minimo di 500 metri
# for i in range(len(raggio)):
# if(raggio[i] < 500):
# raggio[i] = 500
numdati = raggio.size
#%time adiacenzaGeo = matriceSupGeodetic(coordinate, raggio)
%time adiacenzaEuclid = matriceSupEuclid(coordinate, raggio)
numpy.savetxt(("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(aziende)),adiacenzaEuclid, fmt='%d',delimiter=',',newline='\n')
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
Faccio grafo e calcolo distr grado
|
#for azienda in gestore:
#italydoitcleaner['distanze'] = distanza
#romaCell.to_csv("../data/Roma_towers.csv")
adiacenzaRoma = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_Roma.csv",delimiter=',',dtype='int')
adiacenzaTim = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_Tim.csv",delimiter=',',dtype='int')
adiacenzaVoda = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_Vodafone.csv",delimiter=',',dtype='int')
adiacenzaWind = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_Wind.csv",delimiter=',',dtype='int')
adiacenzaTre = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_Tre.csv",delimiter=',',dtype='int')
%time grafoRoma = networkx.Graph(adiacenzaRoma)
%time grafoTim = networkx.Graph(adiacenzaTim)
%time grafoVoda = networkx.Graph(adiacenzaVoda)
%time grafoWind = networkx.Graph(adiacenzaWind)
%time grafoTre = networkx.Graph(adiacenzaTre)
gradoRoma = grafoRoma.degree().values()
numpy.savetxt("../../data/DistrGrado_Roma",gradoRoma,fmt='%d',newline='\n')
istoGradoRoma = networkx.degree_histogram(grafoRoma)
#numpy.savetxt("../../data/IstoGrado_Roma",istoGradoRoma,fmt='%d',newline='\n')
romaCell["degree"] = gradoRoma
romaCell.to_csv("../../data/Roma_towers.csv", index= False)
gradoTim = grafoTim.degree().values()
numpy.savetxt("../../data/DistrGrado_Tim",gradoTim,fmt='%d',newline='\n')
istoGradoTim = networkx.degree_histogram(grafoTim)
#numpy.savetxt("../../data/IstoGrado_Tim",istoGradoTim,fmt='%d',newline='\n')
timCell["degree"] = gradoTim
timCell.to_csv("../../data/Tim_towers.csv", index= False)
gradoVoda = grafoVoda.degree().values()
numpy.savetxt("../../data/DistrGrado_Vodafone",gradoVoda,fmt='%d',newline='\n')
istoGradoVoda = networkx.degree_histogram(grafoVoda)
#numpy.savetxt("../../data/IstoGrado_Voda",istoGradoVoda,fmt='%d',newline='\n')
vodaCell["degree"] = gradoVoda
vodaCell.to_csv("../../data/Vodafone_towers.csv", index= False)
gradoWind = grafoWind.degree().values()
numpy.savetxt("../../data/DistrGrado_Wind",gradoWind,fmt='%d',newline='\n')
istoGradoWind = networkx.degree_histogram(grafoWind)
#numpy.savetxt("../../data/IstoGrado_Wind",istoGradoWind,fmt='%d',newline='\n')
windCell["degree"] = gradoWind
windCell.to_csv("../../data/Wind_towers.csv", index= False)
gradoTre = grafoTre.degree().values()
numpy.savetxt("../../data/DistrGrado_Tre",gradoTre,fmt='%d',newline='\n')
istoGradoTre = networkx.degree_histogram(grafoTre)
#numpy.savetxt("../../data/IstoGrado_Tre",istoGradoTre,fmt='%d',newline='\n')
treCell["degree"] = gradoTre
treCell.to_csv("../../data/Tre_towers.csv", index= False)
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
Topologia iniziale con networkx (molto lento)
|
gestore = ["Tim", "Vodafone", "Wind", "Tre"]
def topologyNetx(gestore):
adiacenza = numpy.genfromtxt("/home/protoss/Documenti/Siscomp_datas/data/AdiacenzaEuclidea_{0}.csv".format(gestore),delimiter=',',dtype='int')
grafo = networkx.Graph(adiacenza)
c = networkx.average_clustering(grafo)
d = networkx.diameter(grafo)
l = networkx.average_shortest_path_length(grafo)
return c, d, l
for compagnia in gestore:
print compagnia
topo = %time topologyNetx(compagnia)
print topo, "\n"
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
NB. num antenne
* TIM - 1756
* Vodafone - 1771
* Wind - 2365
* 3 - 1395
Tot antenne: 6571
TODO:
Prendere array coordinate ✔
fare array distanze ✔
mettere colonna distanze in dataframe ✔
selezionare righe con variabile compresa entro raggiomedio ✔
fare un nuovo dataframe ✔
escludere tutti i nodi con 1 sample solo ✔
fare P(k) ✔
log binning ✔
FARE GRAFICI MEGLIO ✔
Fare fit su P(k)
variazione D con rimozione random o preferenziale ✔
variazione GC con rimozione random o preferenziale ✔
approfondire condizioni di soglia percolativa (v lez prof e articoli)
barabasi e albert dicono che andamento giant cluster relativo è indipendente dalla dimensione della rete, non solo per reti scale free (frattali), ma anche per reti esponenziali! (frattali anch'esse?) Verificare sta cosa facendo confronto andamento GC tra rete totale e reti delle varie compagnie
fare dei grafi barabasi e erdos e aggiungere quei grafi modellizzati a grafici di attacco e failure per fare confronto
NB giant cluster è cluster che scala con N.
E.g., se il giant cluster è composto da N/10 della rete, se raddoppio la rete o la dimezzo deve rimanere composto da 1/10 del totale dei nodi della rete. Idem se è N/100 o N/0.9
Leggere (materiale lezione su percolazione-attacchi-epidemie):
http://www.nature.com/nature/journal/v406/n6794/pdf/406378a0.pdf
http://arxiv.org/pdf/cond-mat/0010317.pdf
http://arxiv.org/pdf/cond-mat/0007048.pdf
http://arxiv.org/pdf/cond-mat/0010251.pdf
Altro materiale forse utile:
http://www.renyi.hu/~p_erdos/1959-11.pdf (Erdos e Renyi)
http://arxiv.org/pdf/cond-mat/0106096.pdf (Stat mec scale free network)
http://arxiv.org/pdf/cond-mat/9910332.pdf
http://arxiv.org/pdf/cond-mat/9907068.pdf
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.71.8276&rep=rep1&type=pdf
Federico nota andamento range segua una sorta di legge di zipf, NOTA BENE, I NOSTRI DATI NON SONO DATI UFFICIALI, MA COSTRUITI DA GENTE CHE CAMMINA, QUINDI PROB DI TROVARE NUOVA ANTENNA POTREBBE ESSERE SIMILE A PROB TROVARE NUOVA PAROLA, ma io penso che non c'entri perché noi stiamo vedendo solo le lunghezze delle parole. Che legge regola la prob delle lunghezze delle parole?
Il primo tentativo è stato di fare la matrice di adiacenza a forza bruta. Con un campione di soli 50 nodi ci metteva pochi microsecondi, quindi abbiamo provato a fare la matrice di adiacenza delle 7000 antenne entro il raccordo anulare, notando che la compilazione durava tanto, facendo le dovute proporzioni abbiamo preventivato 2,5 ore di tempo di calcolo. La prima cosa che abbiamo sistemato è stato ovviamente fare un ciclo che calcolasse soltanto la metà superiore della matrice, dimezzando il tempo di calcolo.
La prima cosa che abbiamo pensato di fare è stato di diagonalizzare a blocchi la matrice, o fare un ciclo di bassissimo livello che mettesse 0 a tutti gli elementi relativi alle antenne con $\Delta$Latitudine e/o $\Delta$Longitudine maggiori del range massimo del campione di dati. Il problema avuto è che il range delle antenne è tendenzialmente grande, con alcune che arrivano a 10km (con raggioRoma 11km)(e anche tanti samples), quindi non c'era modo di ridurre i calcoli.
L'unica altra idea che abbiamo avuto è stata di non fare il calcolo complicato con la distanza sul geoide con il metodo vincenty. Primo passo è stato usare il metodo con great circles, l'altro è stato di considerare la porzione di Roma presa come un cerchio piano, calcolando quindi la distanza euclidea tra coordinate geografiche e convertendola in metri. E ci mette MOLTO meno tempo $\sim$10 volte in meno. Con un
preventivo quindi di 10 minuti di tempo di calcolo invece di 1 ora e mezza.
TODO vedere parallelaizazione
Varie note su tempi di calcolo
Prova preliminare con 50 dati
con vincenti
$\sim$45 ms
con great circols
$\sim$25 ms
con euclid
$\sim$5 ms
Prova 50 dati
CPU times: user 32 ms, sys: 0 ns, total: 32 ms
Wall time: 31.8 ms
CPU times: user 4 ms, sys: 0 ns, total: 4 ms
Wall time: 4.3 ms
CPU times: user 32 ms, sys: 0 ns, total: 32 ms
Wall time: 33.6 ms
CPU times: user 4 ms, sys: 0 ns, total: 4 ms
Wall time: 4.2 ms
CPU times: user 32 ms, sys: 0 ns, total: 32 ms
Wall time: 31.2 ms
CPU times: user 4 ms, sys: 0 ns, total: 4 ms
Wall time: 4.24 ms
CPU times: user 32 ms, sys: 0 ns, total: 32 ms
Wall time: 31 ms
CPU times: user 4 ms, sys: 0 ns, total: 4 ms
Wall time: 4.29 ms
Prova 100 dati
CPU times: user 132 ms, sys: 0 ns, total: 132 ms
Wall time: 133 ms
CPU times: user 12 ms, sys: 16 ms, total: 28 ms
Wall time: 21.5 ms
CPU times: user 124 ms, sys: 0 ns, total: 124 ms
Wall time: 126 ms
CPU times: user 20 ms, sys: 0 ns, total: 20 ms
Wall time: 16.6 ms
CPU times: user 132 ms, sys: 0 ns, total: 132 ms
Wall time: 126 ms
CPU times: user 16 ms, sys: 8 ms, total: 24 ms
Wall time: 21.9 ms
CPU times: user 128 ms, sys: 0 ns, total: 128 ms
Wall time: 127 ms
CPU times: user 16 ms, sys: 0 ns, total: 16 ms
Wall time: 16.8 ms
con 500
CPU times: user 3.28 s, sys: 0 ns, total: 3.28 s
Wall time: 3.27 s
CPU times: user 404 ms, sys: 0 ns, total: 404 ms
Wall time: 403 ms
CPU times: user 3.26 s, sys: 20 ms, total: 3.28 s
Wall time: 3.23 s
CPU times: user 404 ms, sys: 0 ns, total: 404 ms
Wall time: 401 ms
con 1000
CPU times: user 12.6 s, sys: 32 ms, total: 12.6 s
Wall time: 12.5 s
CPU times: user 1.62 s, sys: 16 ms, total: 1.64 s
Wall time: 1.62 s
CPU times: user 12.5 s, sys: 48 ms, total: 12.5 s
Wall time: 12.5 s
CPU times: user 1.62 s, sys: 16 ms, total: 1.64 s
Wall time: 1.62 s
con 2000
CPU times: user 49.7 s, sys: 160 ms, total: 49.9 s
Wall time: 49.6 s
CPU times: user 6.47 s, sys: 40 ms, total: 6.51 s
Wall time: 6.44 s
CPU times: user 51.2 s, sys: 232 ms, total: 51.4 s
Wall time: 51.1 s
CPU times: user 6.67 s, sys: 24 ms, total: 6.7 s
Wall time: 6.65 s
Geo dist
Tempo previsto di calcolo con $\sim$ 7000 dati: $\sim$ 620 sec $\sim$ 10 minuti
Euclid dist
Tempo previsto di calcolo con $\sim$ 7000 dati: $\sim$ 80 sec $\sim$ 1,3 minuti
|
colori = ['#4d4d4d', '#004184','#ff3300','#ff8000','#018ECC']
paletta = seaborn.color_palette(palette = colori)
seaborn.palplot(paletta)
paletta = seaborn.color_palette(palette = 'muted')
seaborn.palplot(paletta)
paletta = seaborn.color_palette(palette = 'bright')
seaborn.palplot(paletta)
paletta = seaborn.color_palette(palette = 'pastel')
seaborn.palplot(paletta)
paletta = seaborn.color_palette(palette = 'dark')
seaborn.palplot(paletta)
paletta = seaborn.color_palette(palette = 'colorblind')
seaborn.palplot(paletta)
paletta = seaborn.color_palette
print paletta
|
src/Adiacenza, grafo e grado.ipynb
|
FedericoMuciaccia/SistemiComplessi
|
mit
|
Kruk-Jaroniec-Sayari
$\displaystyle{t_\mathrm{i} = \left(\frac{C_\mathrm{1}}{C_\mathrm{2}-\log{(P_\mathrm{rel,\ i})}}\right)^{C_\mathrm{3}}}$
where typically $C_\mathrm{1} = 60.6500$, $C_\mathrm{2} = 0.03071$, and $C_\mathrm{3} = 0.3968$
M. Kruk, M. Jaroniec, and A. Sayari. "Application of Large Pore MCM-41 Molecular Sieves To Improve Pore Size Analysis Using Nitrogen Adsorption Measurements." Langmuir, 1997, 13 (23), pp 6267–6273
M. Kruk, M. Jaroniec, and A. Sayari. "Adsorption Study of Surface and Structural Properties of MCM-41 Materials of Different Pore Sizes." J. Phys. Chem. B, 1997, 101 (4), pp 583–589
Michal Kruk, Mietek Jaroniec, and Abdelhamid Sayari. "Relations between Pore Structure Parameters and Their Implications for Characterization of MCM-41 Using Gas Adsorption and X-ray Diffraction." Chem. Mater., 1999, 11 (2), pp 492–500
|
plots.plotThickness( Prel, t.KrukJaroniecSayari()(Prel), 'Kruk-Jaroniec-Sayari' )
|
documentation/thickness.ipynb
|
lowks/micromeritics
|
gpl-3.0
|
Halsey
The Halsey equation assumes the adsorbed liquid monolayer has the same density and packing as the normal liquid. The values 3.54
and 5.00 are empirical and are user adjustable.
$\displaystyle{t_\mathrm{i} = C_\mathrm{1}\left(\frac{C_\mathrm{2}}{\ln{(P_\mathrm{rel,\ i})}}\right)^{C_\mathrm{3}}}$
where typically $C_\mathrm{1} = 3.540$, $C_\mathrm{2} = -5.0001$, and $C_\mathrm{3} = 0.333$
Halsey, G.D., J. Chem. Phys., 16, 931 (1948).
|
plots.plotThickness( Prel, t.Halsey()(Prel), 'Halsey')
|
documentation/thickness.ipynb
|
lowks/micromeritics
|
gpl-3.0
|
Harkins and Jura
The work of Harkins and Jura has shown that a plot of $\log{(P_\mathrm{rel})} = B - A/V_\mathrm{a}^\mathrm{2}$ returns a linear region where the film is condensed (where B is the intercept and A is the slope of the linear region). The relative pressure can be related to the statistical thickness $(t_\mathrm{i})$ of the adsorbed film with the following association:
$\displaystyle{t_\mathrm{i} = \left(\frac{13.99}{0.034-\log{(P_\mathrm{rel,\ i})}}\right)^{\frac{1}{2}}}$
where the empirical values 13.99 and 0.034 are substituted for the slope A and intercept B, respectively. Note that these values are user adjustable which gives the equation:
$\displaystyle{t_\mathrm{i} = \left(\frac{C_\mathrm{1}}{C_\mathrm{2}-\log{(P_\mathrm{rel,\ i})}}\right)^{C_\mathrm{3}}}$
Harkins, W.D. and Jura, G., J. Am. Chem. Soc., 66, 1366 (1944).
|
plots.plotThickness( Prel, t.HarkinsJura()(Prel), 'Harkins and Jura')
|
documentation/thickness.ipynb
|
lowks/micromeritics
|
gpl-3.0
|
Broekhoff-de Boer
$\displaystyle{\log{(P_\mathrm{rel,\ i})} = \frac{C_\mathrm{1}}{t_\mathrm{i}^{2}}+C_\mathrm{2}e^{C_\mathrm{3}t_\mathrm{i}}}$
where typically $C_\mathrm{1} = -16.1100$, $C_\mathrm{2} = 0.1682$, and $C_\mathrm{3} = -0.1137$
Broekhoff, J.C.P. and de Boer, J.H., "The Surface Area in Intermediate Pores," Proceedings of the International Symposium on Surface Area Determination, D.H. Everett, R.H. Ottwill, eds., U.K. (1969).
|
plots.plotThickness( Prel, t.BroekhoffDeBoer()(Prel), 'Broekhoff-de Boer')
|
documentation/thickness.ipynb
|
lowks/micromeritics
|
gpl-3.0
|
Carbon Black STSA
$\displaystyle{t_\mathrm{i} = C_\mathrm{1}(P_\mathrm{rel,\ i})^2+C_\mathrm{2}(P_\mathrm{rel,\ i})+C_\mathrm{3}}$
where typically $C_\mathrm{1} = 2.9800$, $C_\mathrm{2} = 6.4500$, and $C_\mathrm{3} = 0.8800$
|
plots.plotThickness( Prel, t.CarbonBlackSTSA()(Prel), 'Carbon Black STSA')
|
documentation/thickness.ipynb
|
lowks/micromeritics
|
gpl-3.0
|
We'll use some of the data that comes pre-packaged with statsmodels to demonstrate the library functionality. The data set below comprises incomplete, daily measurements of CO2 levels in Hawaii.
Note: at the time of this writing, the current release of statsmodels includes a utility method for loading these datasets as a pandas.DataFrame which appears to be broken. Below is a short hack inspired by the current master branch on the statsmodels GitHub page.
|
def get_statsmodels_df():
"""Return packaged data in a pandas.DataFrame"""
# some hijinks to get around outdated statsmodels code
dataset = sm.datasets.co2.load()
start = dataset.data['date'][0].decode('utf-8')
index = pd.date_range(start=start, periods=len(dataset.data), freq='W-SAT')
obs = pd.DataFrame(dataset.data['co2'], index=index, columns=['co2'])
return obs
obs = get_statsmodels_df()
obs.head()
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
Because it's based on some existing statsmodels functionality, STLDecompose requires two things of the input dataframe:
1. continuous observations (no missing data points)
2. a pandas DateTimeIndex
Since these are both very situation-dependent, we leave it to the user to define how they want to acheive these goals - pandas provides a number of ways to work with missing data. In particular, the functions shown below make these steps relatively straightforward. Below, we add use linear interpolation, and resample to daily observations. The resulting frame meets both of our criteria.
|
obs = (obs
.resample('D')
.mean()
.interpolate('linear'))
obs.head(10)
obs.index
obs.head(1000).plot()
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
Decompose
One of the primary pieces of functionality is the STL decomposition. The associated method requires the observation frame, and the primary (largest) period of seasonality. This period is specified in terms of index positions, and so care is needed for the user to correctly specify the periodicity in terms of their observations.
For example, with daily observations and large annual cycles, period=365. For hourly observations with large daily cycles, period=24. Some inspection, and trial and error may be helpful.
|
decomp = decompose(obs, period=365)
decomp
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
The resulting object is an extended version of the statsmodels.tsa.seasonal.DecomposeResult. Like the statsmodels object, the arrays of values are available on the object (the observations; and the trend, seasonal, and residual components). An extra attribute (the average seasonal cycle) has been added for the purpose of forecasting.
We inherit the built-in .plot() method on the object.
|
decomp.plot();
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
Forecast
While the STL decomposition is interesting on its own, STLDecompose also provides some relatively naive capabilities for using the decomposition to forecast based on our observations.
We'll use the same data set, but pretend that we only had the first two third of observations. Then we can compare our forecast to the real observation data.
|
len(obs)
short_obs = obs.head(10000)
# apply the decomp to the truncated observation
short_decomp = decompose(short_obs, period=365)
short_decomp
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
The forecast() method requires the following arguments:
- the previously fit DecomposeResult
- the number of steps forward for which we'd like the forecast
- the specific forecasting function to be applied to the decomposition
There are a handful of predefined functions that can be imported from the stldecompose.forecast_funcs module. These implementations are based on Hyndman's online textbook. The user can also define their own forecast function, following the patterns demonstrated in the predefined functions.
The return type of the forecast() method is a pandas.Dataframe with a column name that represents the forecast function and an appropriate DatetimeIndex.
|
fcast = forecast(short_decomp, steps=8000, fc_func=drift)
fcast.head()
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
If desired, we can then plot the corresponding components of the observation and forecast to check and verify the results.
|
plt.plot(obs, '--', label='truth')
plt.plot(short_obs, '--', label='obs')
plt.plot(short_decomp.trend, ':', label='decomp.trend')
plt.plot(fcast, '-', label=fcast.columns[0])
plt.xlim('1970','2004'); plt.ylim(330,380);
plt.legend();
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
To include the estimated seasonal component in the forecast, use the boolean seasonal keyword.
|
fcast = forecast(short_decomp, steps=8000, fc_func=drift, seasonal=True)
plt.plot(obs, '--', label='truth')
plt.plot(short_obs, '--', label='obs')
plt.plot(short_decomp.trend, ':', label='decomp.trend')
plt.plot(fcast, '-', label=fcast.columns[0])
plt.xlim('1970','2004'); plt.ylim(330,380);
plt.legend();
fcast.head()
|
STL-usage-example.ipynb
|
jrmontag/STLDecompose
|
mit
|
RiiDataFrame has an attribute named catalog that is a Pandas DataFrame provinding the catalog of experimental data as shown below.
The columns formula and tabulated indicate the type of data. If n or k is included in the column tamulated, the experimentally observed refractive index n or extinction coefficient k is given in tabulated form, respectively. If tabulated is f, only coefficients of formula are given.
On the other hand, the number written in the column formula indicates the number of dispetsion formula that fits the experimental data. If the number is 0, only the tabulated data are given.
|
ri.catalog.head(3)
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
The experimental data are given by raw_data:
|
ri.raw_data.loc[3].head(5) # first 5 rows for the material whose id is 3
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
where n is the refractive index and k is the extinction coefficient at the vacuum wavelength wl_n (wl_k) in the unit of μm.
The column c gives the coefficients for the dielectric function model.
In the above example, no coefficient is given because only the tabulated data are given (formula number in catalog is 0).
On the other hand, if formula number is not 0, some coefficeints are given in the column c as shown below.
In this case, formula 21 means Drude-Lorentz model, which is explained in Dispersion formulas.
|
ri.catalog.tail(3)
ri.raw_data.loc[2911].head(5) # first 5 rows for the material whose id is 2912
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
Using the method load_grid_data(), you can get grid data calculated at 200 wavelength values in the range [wl_min, wl_max], which is the intersection between the domain of n [wl_n_min, wl_n_max] and the domain of k [wl_k_min, wl_k_max]. These values are shown in catalog.
|
grid_data = ri.load_grid_data(3)
grid_data
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
Helper Methods
By using the functionality of Pandas, you may find what you want, easily. But, here some simple helper methods are implemented.
plot
plot(id: int, comp: str = "n", fmt1: Optional[str] = "-", fmt2: Optional[str] = "--", **kwargs)
* id (int): ID number.
* comp (str): 'n', 'k' or 'eps'.
* fmt1 (Union[str, None]): Plot format for n and Re(eps).
* fmt2 (Union[str, None]): Plot format for k and Im(eps).
Plot refractive index (if set comp="n"), extinction coefficient (comp="k") or permittivity (comp="eps").
|
import matplotlib.pyplot as plt
ri.plot(3, "n")
plt.show()
ri.plot(3, "k")
plt.show()
ri.plot(3, "eps")
plt.show()
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
search
search(name: str) -> DataFrame
This method searches data whose book or book_name contain given name and return a simplified catalog for them.
|
ri.search("NaCl")
ri.search("sodium") # upper or lower case is not significant
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
select
select(condition: str) -> DataFrame
This method make a query with the given condition and return a simplified catalog. It will pick up materials whose experimental data contains some data that fulfill given condition.
|
ri.select("2.5 < n < 3 and 0.4 < wl < 0.8").head(10)
ri.plot(157)
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
show
show(id: int | Sequence[int]) -> DataFrame
This method shows a simplified catalog for given id.
|
ri.show(1)
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
read
read(id, as_dict=False)
This method returns the contants of a page associated with the id.
If you want the page contents as a python dict, give True for argument as_dict.
|
print(ri.read(0))
ri.read(0, as_dict=True)
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
references
references(id: int)
This method returns the REFERENCES of a page associated with the id.
|
ri.references(20)
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
material
material(params: dict) -> Material
Create Material-class instance for given parameter dict params.
params can includes the following parameters,
* 'id': ID number. (int)
* 'book': book value in catalog of RiiDataFrame. (str)
* 'page': page value in catalog of RiiDataFrame. (str)
* 'RI': Constant refractive index. (complex)
* 'e': Constant permittivity. (complex)
* 'bound_check': True if bound check should be done. Defaults to True. (bool)
* 'im_factor': A magnification factor multiplied to the imaginary part of permittivity. Defaults to 1.0. (float)
|
water = ri.material({'id': 428})
water.catalog
|
docs/notebooks/02_RiiDataFrame.ipynb
|
mnishida/RII_Pandas
|
mit
|
Python depends on packages for most of its functionality; these can be either built-in (such as sys), or third-party (like all the packages below). Either way you need to import the packages you need before using them.
The Notebook
Look up http:/www.google.com Lets eat a burrito. $\alpha = \frac{\beta}{\gamma}$
Longer:
$$\alpha = \frac{\beta}{\gamma}$$
an item
another item
i like items
Pandas
Get Cheatsheet:
from https://drive.google.com/folderview?id=0ByIrJAE4KMTtaGhRcXkxNHhmY2M&usp=sharing
We read in some data from a CSV file. CSV files can be output by any spreadsheet software, and are plain text, so make a great way to share data. This dataset is from Goodreads: i scraped the highest regarded (according to Goodread's proprietary algorithm) books on that site. Ypu'll see how to do such a scraping in the next lab.
|
df=pd.read_csv("all.csv", header=None,
names=["rating", 'review_count', 'isbn', 'booktype','author_url', 'year', 'genre_urls', 'dir','rating_count', 'name'],
)
df.head()
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
Notice we have a table! A spreadsheet! And it indexed the rows. Pandas (borrowing from R) calls it a DataFrame. Lets see the types of the columns...
df, in python parlance, is an instance of the pd.DataFrame class, created by calling the pd.read_csv function, which cllas the DataFrame constructor inside of it. If you dont understand this sentence, dont worry, it will become clearer later. What you need to take away is that df is a dataframe object, and it has methods, or functions belonging to it, which allow it to do things. For example df.head() is a method that shows the first 5 rows of the dataframe.
The basics
|
df.dtypes
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
This gives us Trues and Falses. Such a series is called a mask. If we count the number of Trues, and divide by the total, we'll get the fraction of ratings $\lt$ 3. To do this numerically see this:
|
np.sum(df.rating < 3)
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
Notice that you could just find the average since the Trues map to 1s.
|
np.mean(df.rating < 3.0)
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
Oppos we got an error. Something is not right. Its trying to convert some python datatype: None into an int. This usually means data was missing. Was it?
|
df[df.year.isnull()]
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
Much cleaner now!
Visualizing
Pandas has handy built in visualization.
|
df.rating.hist();
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
We can do this in more detail, plotting against a mean, with cutom binsize or number of bins. Note how to label axes and create legends.
|
sns.set_context("notebook")
meanrat=df.rating.mean()
#you can get means and medians in different ways
print meanrat, np.mean(df.rating), df.rating.median()
with sns.axes_style("whitegrid"):
df.rating.hist(bins=30, alpha=0.4);
plt.axvline(meanrat, 0, 0.75, color='r', label='Mean')
plt.xlabel("average rating of book")
plt.ylabel("Counts")
plt.title("Ratings Histogram")
plt.legend()
#sns.despine()
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
One can see the sparseness of review counts. This will be important when we learn about recommendations: we'll have to regularize our models to deal with it.
|
df.review_count.hist(bins=np.arange(0, 40000, 400))
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
Here we make a scatterplot in matplotlib of rating against year. By setting the alpha transparency low we can how the density of highly rated books on goodreads has changed.
|
plt.scatter(df.year, df.rating, lw=0, alpha=.08)
plt.xlim([1900,2010])
plt.xlabel("Year")
plt.ylabel("Rating")
|
Lab1-pythonpandas_original.ipynb
|
stevenydc/2015lab1
|
mit
|
In the cell below
I have calculated in the previous cell the loglikelyhood score of the partitioned sampling and vanilla sampling technique image-wise. So I have a score for each image. I have done this for all the MNIST digits that have been 'corrupted' by the bar images. That is RBM's trained models 1 - 9 and an RBM trained on 2's and 3's
The wins for a given model are where the partitioned scored better than the vanilla sampling technique
Conversly, losses are images where the vanilla score better.
Intuitively, ties is where they scored the same, which could only really occur when the correction would be zero, or ultimately cancelled out.
|
for key in results:
logging.info("Plotting, win, lose and tie images for the {}".format(key))
results[key].plot_various_images()
|
Max/MNIST-ORBM-Inference.ipynb
|
garibaldu/multicauseRBM
|
mit
|
Thoughts
So on a dataset of size 50, with 100 gibbs alterations we see in all cases that for the digit model, 1,2,3,..,9 that the partitioned sampling technique does either better or the same more often than the vanilla does. Let's try some different configurations.
|
results.update(results_for_models(models, corruption_model_name, 400, 500))
|
Max/MNIST-ORBM-Inference.ipynb
|
garibaldu/multicauseRBM
|
mit
|
results.update(results_for_models(models, corruption_model_name, 10, 1))
|
results
# with open('results_dict', 'wb') as f3le:
# pickle.dump(results,f3le, protocol = None)
with open('results_dict', 'rb') as f4le:
results = pickle.load(f4le)
# for key in results:
# if key.startswith('400'):
# logging.info("Results for hiddens")
# r = results[key].stored_hiddens
# for i in range(len(r)):
# print(results[key].imagewise_score())
|
Max/MNIST-ORBM-Inference.ipynb
|
garibaldu/multicauseRBM
|
mit
|
1b. KNN (K=5)
cf. Machine Learning with Scikit-Learn
|
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X,y)
y_pred=knn.predict(X)
# compute classification accuracy for the logistic regression model
from sklearn import metrics
print(metrics.accuracy_score(y,y_pred))
### 1c. KNN (K=1)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X,y)
y_pred=knn.predict(X)
print(metrics.accuracy_score(y,y_pred))
|
coursera_Ng/KNN.ipynb
|
ernestyalumni/MLgrabbag
|
mit
|
2. Evaluation procedure 2 - Train/test split
Data is randomly assigned unless you use random_state hyperparameter
* If you use random_state=4
- Your data will be split exactly the same way
|
# STEP 1: split X and y into training and testing sets
from sklearn.cross_validation import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4,random_state=4)
knn=KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train,y_train)
y_pred=knn.predict(X_test)
print(metrics.accuracy_score(y_test,y_pred))
knn=KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train,y_train)
y_pred=knn.predict(X_test)
print(metrics.accuracy_score(y_test,y_pred))
# try K=1 through K=25 and record testing accuracy
k_range=range(1,26)
# We can create Python dictionary using [] or dict()
scores=[]
# We use a loop through the range 1 to 26
# We append the scores in the dictionary
for k in k_range:
knn=KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train,y_train)
y_pred=knn.predict(X_test)
scores.append(metrics.accuracy_score(y_test,y_pred))
print(scores)
# import Matplotlib (scientific plotting library)
#import matplotlib.pyplot as plt
# allow plots to appear within the notebook
#%matplotlib inline
# plot the relationship between $K$ and testing accuracy
# plt.plot(x_axis,y_axis)
plt.plot(k_range,scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Testing Accuracy')
print(pd.DataFrame(X).describe())
print(pd.DataFrame(X).head())
pd.DataFrame(X).head(10)
|
coursera_Ng/KNN.ipynb
|
ernestyalumni/MLgrabbag
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.